mirror of
https://github.com/dolphin-emu/dolphin.git
synced 2025-01-09 07:39:26 +01:00
Remove un-needed files
This commit is contained in:
parent
6cad635bd8
commit
a20b576d86
777
Externals/ffmpeg/dev/doc/developer.html
vendored
777
Externals/ffmpeg/dev/doc/developer.html
vendored
@ -1,777 +0,0 @@
|
||||
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
|
||||
<html>
|
||||
<!-- Created by GNU Texinfo 5.2, http://www.gnu.org/software/texinfo/ -->
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<title>
|
||||
Developer Documentation
|
||||
</title>
|
||||
<meta name="viewport" content="width=device-width,initial-scale=1.0">
|
||||
<link rel="stylesheet" type="text/css" href="bootstrap.min.css">
|
||||
<link rel="stylesheet" type="text/css" href="style.min.css">
|
||||
</head>
|
||||
<body>
|
||||
<div style="width: 95%; margin: auto">
|
||||
<h1>
|
||||
Developer Documentation
|
||||
</h1>
|
||||
<div align="center">
|
||||
</div>
|
||||
|
||||
<a name="SEC_Top"></a>
|
||||
|
||||
<a name="SEC_Contents"></a>
|
||||
<h2 class="contents-heading">Table of Contents</h2>
|
||||
|
||||
<div class="contents">
|
||||
|
||||
<ul class="no-bullet">
|
||||
<li><a name="toc-Developers-Guide" href="#Developers-Guide">1 Developers Guide</a>
|
||||
<ul class="no-bullet">
|
||||
<li><a name="toc-Notes-for-external-developers" href="#Notes-for-external-developers">1.1 Notes for external developers</a></li>
|
||||
<li><a name="toc-Contributing" href="#Contributing">1.2 Contributing</a></li>
|
||||
<li><a name="toc-Coding-Rules-1" href="#Coding-Rules-1">1.3 Coding Rules</a>
|
||||
<ul class="no-bullet">
|
||||
<li><a name="toc-Code-formatting-conventions" href="#Code-formatting-conventions">1.3.1 Code formatting conventions</a></li>
|
||||
<li><a name="toc-Comments" href="#Comments">1.3.2 Comments</a></li>
|
||||
<li><a name="toc-C-language-features" href="#C-language-features">1.3.3 C language features</a></li>
|
||||
<li><a name="toc-Naming-conventions" href="#Naming-conventions">1.3.4 Naming conventions</a></li>
|
||||
<li><a name="toc-Miscellaneous-conventions" href="#Miscellaneous-conventions">1.3.5 Miscellaneous conventions</a></li>
|
||||
<li><a name="toc-Editor-configuration" href="#Editor-configuration">1.3.6 Editor configuration</a></li>
|
||||
</ul></li>
|
||||
<li><a name="toc-Development-Policy" href="#Development-Policy">1.4 Development Policy</a></li>
|
||||
<li><a name="toc-Submitting-patches-1" href="#Submitting-patches-1">1.5 Submitting patches</a></li>
|
||||
<li><a name="toc-New-codecs-or-formats-checklist" href="#New-codecs-or-formats-checklist">1.6 New codecs or formats checklist</a></li>
|
||||
<li><a name="toc-patch-submission-checklist" href="#patch-submission-checklist">1.7 patch submission checklist</a></li>
|
||||
<li><a name="toc-Patch-review-process" href="#Patch-review-process">1.8 Patch review process</a></li>
|
||||
<li><a name="toc-Regression-tests-1" href="#Regression-tests-1">1.9 Regression tests</a>
|
||||
<ul class="no-bullet">
|
||||
<li><a name="toc-Adding-files-to-the-fate_002dsuite-dataset" href="#Adding-files-to-the-fate_002dsuite-dataset">1.9.1 Adding files to the fate-suite dataset</a></li>
|
||||
<li><a name="toc-Visualizing-Test-Coverage" href="#Visualizing-Test-Coverage">1.9.2 Visualizing Test Coverage</a></li>
|
||||
<li><a name="toc-Using-Valgrind" href="#Using-Valgrind">1.9.3 Using Valgrind</a></li>
|
||||
</ul></li>
|
||||
<li><a name="toc-Release-process-1" href="#Release-process-1">1.10 Release process</a>
|
||||
<ul class="no-bullet">
|
||||
<li><a name="toc-Criteria-for-Point-Releases-1" href="#Criteria-for-Point-Releases-1">1.10.1 Criteria for Point Releases</a></li>
|
||||
<li><a name="toc-Release-Checklist" href="#Release-Checklist">1.10.2 Release Checklist</a></li>
|
||||
</ul></li>
|
||||
</ul></li>
|
||||
</ul>
|
||||
</div>
|
||||
|
||||
|
||||
<a name="Developers-Guide"></a>
|
||||
<h2 class="chapter">1 Developers Guide<span class="pull-right"><a class="anchor hidden-xs" href="#Developers-Guide" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Developers-Guide" aria-hidden="true">TOC</a></span></h2>
|
||||
|
||||
<a name="Notes-for-external-developers"></a>
|
||||
<h3 class="section">1.1 Notes for external developers<span class="pull-right"><a class="anchor hidden-xs" href="#Notes-for-external-developers" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Notes-for-external-developers" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>This document is mostly useful for internal FFmpeg developers.
|
||||
External developers who need to use the API in their application should
|
||||
refer to the API doxygen documentation in the public headers, and
|
||||
check the examples in <samp>doc/examples</samp> and in the source code to
|
||||
see how the public API is employed.
|
||||
</p>
|
||||
<p>You can use the FFmpeg libraries in your commercial program, but you
|
||||
are encouraged to <em>publish any patch you make</em>. In this case the
|
||||
best way to proceed is to send your patches to the ffmpeg-devel
|
||||
mailing list following the guidelines illustrated in the remainder of
|
||||
this document.
|
||||
</p>
|
||||
<p>For more detailed legal information about the use of FFmpeg in
|
||||
external programs read the <samp>LICENSE</samp> file in the source tree and
|
||||
consult <a href="http://ffmpeg.org/legal.html">http://ffmpeg.org/legal.html</a>.
|
||||
</p>
|
||||
<a name="Contributing"></a>
|
||||
<h3 class="section">1.2 Contributing<span class="pull-right"><a class="anchor hidden-xs" href="#Contributing" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Contributing" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>There are 3 ways by which code gets into ffmpeg.
|
||||
</p><ul>
|
||||
<li> Submitting Patches to the main developer mailing list
|
||||
see <a href="#Submitting-patches">Submitting patches</a> for details.
|
||||
</li><li> Directly committing changes to the main tree.
|
||||
</li><li> Committing changes to a git clone, for example on github.com or
|
||||
gitorious.org. And asking us to merge these changes.
|
||||
</li></ul>
|
||||
|
||||
<p>Whichever way, changes should be reviewed by the maintainer of the code
|
||||
before they are committed. And they should follow the <a href="#Coding-Rules">Coding Rules</a>.
|
||||
The developer making the commit and the author are responsible for their changes
|
||||
and should try to fix issues their commit causes.
|
||||
</p>
|
||||
<a name="Coding-Rules"></a><a name="Coding-Rules-1"></a>
|
||||
<h3 class="section">1.3 Coding Rules<span class="pull-right"><a class="anchor hidden-xs" href="#Coding-Rules-1" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Coding-Rules-1" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<a name="Code-formatting-conventions"></a>
|
||||
<h4 class="subsection">1.3.1 Code formatting conventions<span class="pull-right"><a class="anchor hidden-xs" href="#Code-formatting-conventions" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Code-formatting-conventions" aria-hidden="true">TOC</a></span></h4>
|
||||
|
||||
<p>There are the following guidelines regarding the indentation in files:
|
||||
</p>
|
||||
<ul>
|
||||
<li> Indent size is 4.
|
||||
|
||||
</li><li> The TAB character is forbidden outside of Makefiles as is any
|
||||
form of trailing whitespace. Commits containing either will be
|
||||
rejected by the git repository.
|
||||
|
||||
</li><li> You should try to limit your code lines to 80 characters; however, do so if
|
||||
and only if this improves readability.
|
||||
</li></ul>
|
||||
<p>The presentation is one inspired by ’indent -i4 -kr -nut’.
|
||||
</p>
|
||||
<p>The main priority in FFmpeg is simplicity and small code size in order to
|
||||
minimize the bug count.
|
||||
</p>
|
||||
<a name="Comments"></a>
|
||||
<h4 class="subsection">1.3.2 Comments<span class="pull-right"><a class="anchor hidden-xs" href="#Comments" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Comments" aria-hidden="true">TOC</a></span></h4>
|
||||
<p>Use the JavaDoc/Doxygen format (see examples below) so that code documentation
|
||||
can be generated automatically. All nontrivial functions should have a comment
|
||||
above them explaining what the function does, even if it is just one sentence.
|
||||
All structures and their member variables should be documented, too.
|
||||
</p>
|
||||
<p>Avoid Qt-style and similar Doxygen syntax with <code>!</code> in it, i.e. replace
|
||||
<code>//!</code> with <code>///</code> and similar. Also @ syntax should be employed
|
||||
for markup commands, i.e. use <code>@param</code> and not <code>\param</code>.
|
||||
</p>
|
||||
<div class="example">
|
||||
<pre class="example">/**
|
||||
* @file
|
||||
* MPEG codec.
|
||||
* @author ...
|
||||
*/
|
||||
|
||||
/**
|
||||
* Summary sentence.
|
||||
* more text ...
|
||||
* ...
|
||||
*/
|
||||
typedef struct Foobar {
|
||||
int var1; /**< var1 description */
|
||||
int var2; ///< var2 description
|
||||
/** var3 description */
|
||||
int var3;
|
||||
} Foobar;
|
||||
|
||||
/**
|
||||
* Summary sentence.
|
||||
* more text ...
|
||||
* ...
|
||||
* @param my_parameter description of my_parameter
|
||||
* @return return value description
|
||||
*/
|
||||
int myfunc(int my_parameter)
|
||||
...
|
||||
</pre></div>
|
||||
|
||||
<a name="C-language-features"></a>
|
||||
<h4 class="subsection">1.3.3 C language features<span class="pull-right"><a class="anchor hidden-xs" href="#C-language-features" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-C-language-features" aria-hidden="true">TOC</a></span></h4>
|
||||
|
||||
<p>FFmpeg is programmed in the ISO C90 language with a few additional
|
||||
features from ISO C99, namely:
|
||||
</p>
|
||||
<ul>
|
||||
<li> the ‘<samp>inline</samp>’ keyword;
|
||||
|
||||
</li><li> ‘<samp>//</samp>’ comments;
|
||||
|
||||
</li><li> designated struct initializers (‘<samp>struct s x = { .i = 17 };</samp>’)
|
||||
|
||||
</li><li> compound literals (‘<samp>x = (struct s) { 17, 23 };</samp>’)
|
||||
</li></ul>
|
||||
|
||||
<p>These features are supported by all compilers we care about, so we will not
|
||||
accept patches to remove their use unless they absolutely do not impair
|
||||
clarity and performance.
|
||||
</p>
|
||||
<p>All code must compile with recent versions of GCC and a number of other
|
||||
currently supported compilers. To ensure compatibility, please do not use
|
||||
additional C99 features or GCC extensions. Especially watch out for:
|
||||
</p>
|
||||
<ul>
|
||||
<li> mixing statements and declarations;
|
||||
|
||||
</li><li> ‘<samp>long long</samp>’ (use ‘<samp>int64_t</samp>’ instead);
|
||||
|
||||
</li><li> ‘<samp>__attribute__</samp>’ not protected by ‘<samp>#ifdef __GNUC__</samp>’ or similar;
|
||||
|
||||
</li><li> GCC statement expressions (‘<samp>(x = ({ int y = 4; y; })</samp>’).
|
||||
</li></ul>
|
||||
|
||||
<a name="Naming-conventions"></a>
|
||||
<h4 class="subsection">1.3.4 Naming conventions<span class="pull-right"><a class="anchor hidden-xs" href="#Naming-conventions" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Naming-conventions" aria-hidden="true">TOC</a></span></h4>
|
||||
<p>All names should be composed with underscores (_), not CamelCase. For example,
|
||||
‘<samp>avfilter_get_video_buffer</samp>’ is an acceptable function name and
|
||||
‘<samp>AVFilterGetVideo</samp>’ is not. The exception from this are type names, like
|
||||
for example structs and enums; they should always be in the CamelCase
|
||||
</p>
|
||||
<p>There are the following conventions for naming variables and functions:
|
||||
</p>
|
||||
<ul>
|
||||
<li> For local variables no prefix is required.
|
||||
|
||||
</li><li> For file-scope variables and functions declared as <code>static</code>, no prefix
|
||||
is required.
|
||||
|
||||
</li><li> For variables and functions visible outside of file scope, but only used
|
||||
internally by a library, an <code>ff_</code> prefix should be used,
|
||||
e.g. ‘<samp>ff_w64_demuxer</samp>’.
|
||||
|
||||
</li><li> For variables and functions visible outside of file scope, used internally
|
||||
across multiple libraries, use <code>avpriv_</code> as prefix, for example,
|
||||
‘<samp>avpriv_aac_parse_header</samp>’.
|
||||
|
||||
</li><li> Each library has its own prefix for public symbols, in addition to the
|
||||
commonly used <code>av_</code> (<code>avformat_</code> for libavformat,
|
||||
<code>avcodec_</code> for libavcodec, <code>swr_</code> for libswresample, etc).
|
||||
Check the existing code and choose names accordingly.
|
||||
Note that some symbols without these prefixes are also exported for
|
||||
retro-compatibility reasons. These exceptions are declared in the
|
||||
<code>lib<name>/lib<name>.v</code> files.
|
||||
</li></ul>
|
||||
|
||||
<p>Furthermore, name space reserved for the system should not be invaded.
|
||||
Identifiers ending in <code>_t</code> are reserved by
|
||||
<a href="http://pubs.opengroup.org/onlinepubs/007904975/functions/xsh_chap02_02.html#tag_02_02_02">POSIX</a>.
|
||||
Also avoid names starting with <code>__</code> or <code>_</code> followed by an uppercase
|
||||
letter as they are reserved by the C standard. Names starting with <code>_</code>
|
||||
are reserved at the file level and may not be used for externally visible
|
||||
symbols. If in doubt, just avoid names starting with <code>_</code> altogether.
|
||||
</p>
|
||||
<a name="Miscellaneous-conventions"></a>
|
||||
<h4 class="subsection">1.3.5 Miscellaneous conventions<span class="pull-right"><a class="anchor hidden-xs" href="#Miscellaneous-conventions" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Miscellaneous-conventions" aria-hidden="true">TOC</a></span></h4>
|
||||
|
||||
<ul>
|
||||
<li> fprintf and printf are forbidden in libavformat and libavcodec,
|
||||
please use av_log() instead.
|
||||
|
||||
</li><li> Casts should be used only when necessary. Unneeded parentheses
|
||||
should also be avoided if they don’t make the code easier to understand.
|
||||
</li></ul>
|
||||
|
||||
<a name="Editor-configuration"></a>
|
||||
<h4 class="subsection">1.3.6 Editor configuration<span class="pull-right"><a class="anchor hidden-xs" href="#Editor-configuration" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Editor-configuration" aria-hidden="true">TOC</a></span></h4>
|
||||
<p>In order to configure Vim to follow FFmpeg formatting conventions, paste
|
||||
the following snippet into your <samp>.vimrc</samp>:
|
||||
</p><div class="example">
|
||||
<pre class="example">" indentation rules for FFmpeg: 4 spaces, no tabs
|
||||
set expandtab
|
||||
set shiftwidth=4
|
||||
set softtabstop=4
|
||||
set cindent
|
||||
set cinoptions=(0
|
||||
" Allow tabs in Makefiles.
|
||||
autocmd FileType make,automake set noexpandtab shiftwidth=8 softtabstop=8
|
||||
" Trailing whitespace and tabs are forbidden, so highlight them.
|
||||
highlight ForbiddenWhitespace ctermbg=red guibg=red
|
||||
match ForbiddenWhitespace /\s\+$\|\t/
|
||||
" Do not highlight spaces at the end of line while typing on that line.
|
||||
autocmd InsertEnter * match ForbiddenWhitespace /\t\|\s\+\%#\@<!$/
|
||||
</pre></div>
|
||||
|
||||
<p>For Emacs, add these roughly equivalent lines to your <samp>.emacs.d/init.el</samp>:
|
||||
</p><div class="example">
|
||||
<pre class="example">(c-add-style "ffmpeg"
|
||||
'("k&r"
|
||||
(c-basic-offset . 4)
|
||||
(indent-tabs-mode . nil)
|
||||
(show-trailing-whitespace . t)
|
||||
(c-offsets-alist
|
||||
(statement-cont . (c-lineup-assignments +)))
|
||||
)
|
||||
)
|
||||
(setq c-default-style "ffmpeg")
|
||||
</pre></div>
|
||||
|
||||
<a name="Development-Policy"></a>
|
||||
<h3 class="section">1.4 Development Policy<span class="pull-right"><a class="anchor hidden-xs" href="#Development-Policy" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Development-Policy" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<ol>
|
||||
<li> Contributions should be licensed under the
|
||||
<a href="http://www.gnu.org/licenses/lgpl-2.1.html">LGPL 2.1</a>,
|
||||
including an "or any later version" clause, or, if you prefer
|
||||
a gift-style license, the
|
||||
<a href="http://opensource.org/licenses/isc-license.txt">ISC</a> or
|
||||
<a href="http://mit-license.org/">MIT</a> license.
|
||||
<a href="http://www.gnu.org/licenses/gpl-2.0.html">GPL 2</a> including
|
||||
an "or any later version" clause is also acceptable, but LGPL is
|
||||
preferred.
|
||||
If you add a new file, give it a proper license header. Do not copy and
|
||||
paste it from a random place, use an existing file as template.
|
||||
|
||||
</li><li> You must not commit code which breaks FFmpeg! (Meaning unfinished but
|
||||
enabled code which breaks compilation or compiles but does not work or
|
||||
breaks the regression tests)
|
||||
You can commit unfinished stuff (for testing etc), but it must be disabled
|
||||
(#ifdef etc) by default so it does not interfere with other developers’
|
||||
work.
|
||||
|
||||
</li><li> The commit message should have a short first line in the form of
|
||||
a ‘<samp>topic: short description</samp>’ as a header, separated by a newline
|
||||
from the body consisting of an explanation of why the change is necessary.
|
||||
If the commit fixes a known bug on the bug tracker, the commit message
|
||||
should include its bug ID. Referring to the issue on the bug tracker does
|
||||
not exempt you from writing an excerpt of the bug in the commit message.
|
||||
|
||||
</li><li> You do not have to over-test things. If it works for you, and you think it
|
||||
should work for others, then commit. If your code has problems
|
||||
(portability, triggers compiler bugs, unusual environment etc) they will be
|
||||
reported and eventually fixed.
|
||||
|
||||
</li><li> Do not commit unrelated changes together, split them into self-contained
|
||||
pieces. Also do not forget that if part B depends on part A, but A does not
|
||||
depend on B, then A can and should be committed first and separate from B.
|
||||
Keeping changes well split into self-contained parts makes reviewing and
|
||||
understanding them on the commit log mailing list easier. This also helps
|
||||
in case of debugging later on.
|
||||
Also if you have doubts about splitting or not splitting, do not hesitate to
|
||||
ask/discuss it on the developer mailing list.
|
||||
|
||||
</li><li> Do not change behavior of the programs (renaming options etc) or public
|
||||
API or ABI without first discussing it on the ffmpeg-devel mailing list.
|
||||
Do not remove functionality from the code. Just improve!
|
||||
|
||||
<p>Note: Redundant code can be removed.
|
||||
</p>
|
||||
</li><li> Do not commit changes to the build system (Makefiles, configure script)
|
||||
which change behavior, defaults etc, without asking first. The same
|
||||
applies to compiler warning fixes, trivial looking fixes and to code
|
||||
maintained by other developers. We usually have a reason for doing things
|
||||
the way we do. Send your changes as patches to the ffmpeg-devel mailing
|
||||
list, and if the code maintainers say OK, you may commit. This does not
|
||||
apply to files you wrote and/or maintain.
|
||||
|
||||
</li><li> We refuse source indentation and other cosmetic changes if they are mixed
|
||||
with functional changes, such commits will be rejected and removed. Every
|
||||
developer has his own indentation style, you should not change it. Of course
|
||||
if you (re)write something, you can use your own style, even though we would
|
||||
prefer if the indentation throughout FFmpeg was consistent (Many projects
|
||||
force a given indentation style - we do not.). If you really need to make
|
||||
indentation changes (try to avoid this), separate them strictly from real
|
||||
changes.
|
||||
|
||||
<p>NOTE: If you had to put if(){ .. } over a large (> 5 lines) chunk of code,
|
||||
then either do NOT change the indentation of the inner part within (do not
|
||||
move it to the right)! or do so in a separate commit
|
||||
</p>
|
||||
</li><li> Always fill out the commit log message. Describe in a few lines what you
|
||||
changed and why. You can refer to mailing list postings if you fix a
|
||||
particular bug. Comments such as "fixed!" or "Changed it." are unacceptable.
|
||||
Recommended format:
|
||||
|
||||
<div class="example">
|
||||
<pre class="example">area changed: Short 1 line description
|
||||
|
||||
details describing what and why and giving references.
|
||||
</pre></div>
|
||||
|
||||
</li><li> Make sure the author of the commit is set correctly. (see git commit –author)
|
||||
If you apply a patch, send an
|
||||
answer to ffmpeg-devel (or wherever you got the patch from) saying that
|
||||
you applied the patch.
|
||||
|
||||
</li><li> When applying patches that have been discussed (at length) on the mailing
|
||||
list, reference the thread in the log message.
|
||||
|
||||
</li><li> Do NOT commit to code actively maintained by others without permission.
|
||||
Send a patch to ffmpeg-devel instead. If no one answers within a reasonable
|
||||
timeframe (12h for build failures and security fixes, 3 days small changes,
|
||||
1 week for big patches) then commit your patch if you think it is OK.
|
||||
Also note, the maintainer can simply ask for more time to review!
|
||||
|
||||
</li><li> Subscribe to the ffmpeg-cvslog mailing list. The diffs of all commits
|
||||
are sent there and reviewed by all the other developers. Bugs and possible
|
||||
improvements or general questions regarding commits are discussed there. We
|
||||
expect you to react if problems with your code are uncovered.
|
||||
|
||||
</li><li> Update the documentation if you change behavior or add features. If you are
|
||||
unsure how best to do this, send a patch to ffmpeg-devel, the documentation
|
||||
maintainer(s) will review and commit your stuff.
|
||||
|
||||
</li><li> Try to keep important discussions and requests (also) on the public
|
||||
developer mailing list, so that all developers can benefit from them.
|
||||
|
||||
</li><li> Never write to unallocated memory, never write over the end of arrays,
|
||||
always check values read from some untrusted source before using them
|
||||
as array index or other risky things.
|
||||
|
||||
</li><li> Remember to check if you need to bump versions for the specific libav*
|
||||
parts (libavutil, libavcodec, libavformat) you are changing. You need
|
||||
to change the version integer.
|
||||
Incrementing the first component means no backward compatibility to
|
||||
previous versions (e.g. removal of a function from the public API).
|
||||
Incrementing the second component means backward compatible change
|
||||
(e.g. addition of a function to the public API or extension of an
|
||||
existing data structure).
|
||||
Incrementing the third component means a noteworthy binary compatible
|
||||
change (e.g. encoder bug fix that matters for the decoder). The third
|
||||
component always starts at 100 to distinguish FFmpeg from Libav.
|
||||
|
||||
</li><li> Compiler warnings indicate potential bugs or code with bad style. If a type of
|
||||
warning always points to correct and clean code, that warning should
|
||||
be disabled, not the code changed.
|
||||
Thus the remaining warnings can either be bugs or correct code.
|
||||
If it is a bug, the bug has to be fixed. If it is not, the code should
|
||||
be changed to not generate a warning unless that causes a slowdown
|
||||
or obfuscates the code.
|
||||
|
||||
</li><li> Make sure that no parts of the codebase that you maintain are missing from the
|
||||
<samp>MAINTAINERS</samp> file. If something that you want to maintain is missing add it with
|
||||
your name after it.
|
||||
If at some point you no longer want to maintain some code, then please help
|
||||
finding a new maintainer and also don’t forget updating the <samp>MAINTAINERS</samp> file.
|
||||
</li></ol>
|
||||
|
||||
<p>We think our rules are not too hard. If you have comments, contact us.
|
||||
</p>
|
||||
<a name="Submitting-patches"></a><a name="Submitting-patches-1"></a>
|
||||
<h3 class="section">1.5 Submitting patches<span class="pull-right"><a class="anchor hidden-xs" href="#Submitting-patches-1" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Submitting-patches-1" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>First, read the <a href="#Coding-Rules">Coding Rules</a> above if you did not yet, in particular
|
||||
the rules regarding patch submission.
|
||||
</p>
|
||||
<p>When you submit your patch, please use <code>git format-patch</code> or
|
||||
<code>git send-email</code>. We cannot read other diffs :-)
|
||||
</p>
|
||||
<p>Also please do not submit a patch which contains several unrelated changes.
|
||||
Split it into separate, self-contained pieces. This does not mean splitting
|
||||
file by file. Instead, make the patch as small as possible while still
|
||||
keeping it as a logical unit that contains an individual change, even
|
||||
if it spans multiple files. This makes reviewing your patches much easier
|
||||
for us and greatly increases your chances of getting your patch applied.
|
||||
</p>
|
||||
<p>Use the patcheck tool of FFmpeg to check your patch.
|
||||
The tool is located in the tools directory.
|
||||
</p>
|
||||
<p>Run the <a href="#Regression-tests">Regression tests</a> before submitting a patch in order to verify
|
||||
it does not cause unexpected problems.
|
||||
</p>
|
||||
<p>It also helps quite a bit if you tell us what the patch does (for example
|
||||
’replaces lrint by lrintf’), and why (for example ’*BSD isn’t C99 compliant
|
||||
and has no lrint()’)
|
||||
</p>
|
||||
<p>Also please if you send several patches, send each patch as a separate mail,
|
||||
do not attach several unrelated patches to the same mail.
|
||||
</p>
|
||||
<p>Patches should be posted to the
|
||||
<a href="http://lists.ffmpeg.org/mailman/listinfo/ffmpeg-devel">ffmpeg-devel</a>
|
||||
mailing list. Use <code>git send-email</code> when possible since it will properly
|
||||
send patches without requiring extra care. If you cannot, then send patches
|
||||
as base64-encoded attachments, so your patch is not trashed during
|
||||
transmission.
|
||||
</p>
|
||||
<p>Your patch will be reviewed on the mailing list. You will likely be asked
|
||||
to make some changes and are expected to send in an improved version that
|
||||
incorporates the requests from the review. This process may go through
|
||||
several iterations. Once your patch is deemed good enough, some developer
|
||||
will pick it up and commit it to the official FFmpeg tree.
|
||||
</p>
|
||||
<p>Give us a few days to react. But if some time passes without reaction,
|
||||
send a reminder by email. Your patch should eventually be dealt with.
|
||||
</p>
|
||||
|
||||
<a name="New-codecs-or-formats-checklist"></a>
|
||||
<h3 class="section">1.6 New codecs or formats checklist<span class="pull-right"><a class="anchor hidden-xs" href="#New-codecs-or-formats-checklist" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-New-codecs-or-formats-checklist" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<ol>
|
||||
<li> Did you use av_cold for codec initialization and close functions?
|
||||
|
||||
</li><li> Did you add a long_name under NULL_IF_CONFIG_SMALL to the AVCodec or
|
||||
AVInputFormat/AVOutputFormat struct?
|
||||
|
||||
</li><li> Did you bump the minor version number (and reset the micro version
|
||||
number) in <samp>libavcodec/version.h</samp> or <samp>libavformat/version.h</samp>?
|
||||
|
||||
</li><li> Did you register it in <samp>allcodecs.c</samp> or <samp>allformats.c</samp>?
|
||||
|
||||
</li><li> Did you add the AVCodecID to <samp>avcodec.h</samp>?
|
||||
When adding new codec IDs, also add an entry to the codec descriptor
|
||||
list in <samp>libavcodec/codec_desc.c</samp>.
|
||||
|
||||
</li><li> If it has a FourCC, did you add it to <samp>libavformat/riff.c</samp>,
|
||||
even if it is only a decoder?
|
||||
|
||||
</li><li> Did you add a rule to compile the appropriate files in the Makefile?
|
||||
Remember to do this even if you’re just adding a format to a file that is
|
||||
already being compiled by some other rule, like a raw demuxer.
|
||||
|
||||
</li><li> Did you add an entry to the table of supported formats or codecs in
|
||||
<samp>doc/general.texi</samp>?
|
||||
|
||||
</li><li> Did you add an entry in the Changelog?
|
||||
|
||||
</li><li> If it depends on a parser or a library, did you add that dependency in
|
||||
configure?
|
||||
|
||||
</li><li> Did you <code>git add</code> the appropriate files before committing?
|
||||
|
||||
</li><li> Did you make sure it compiles standalone, i.e. with
|
||||
<code>configure --disable-everything --enable-decoder=foo</code>
|
||||
(or <code>--enable-demuxer</code> or whatever your component is)?
|
||||
</li></ol>
|
||||
|
||||
|
||||
<a name="patch-submission-checklist"></a>
|
||||
<h3 class="section">1.7 patch submission checklist<span class="pull-right"><a class="anchor hidden-xs" href="#patch-submission-checklist" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-patch-submission-checklist" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<ol>
|
||||
<li> Does <code>make fate</code> pass with the patch applied?
|
||||
|
||||
</li><li> Was the patch generated with git format-patch or send-email?
|
||||
|
||||
</li><li> Did you sign off your patch? (git commit -s)
|
||||
See <a href="http://git.kernel.org/?p=linux/kernel/git/torvalds/linux.git;a=blob_plain;f=Documentation/SubmittingPatches">http://git.kernel.org/?p=linux/kernel/git/torvalds/linux.git;a=blob_plain;f=Documentation/SubmittingPatches</a> for the meaning
|
||||
of sign off.
|
||||
|
||||
</li><li> Did you provide a clear git commit log message?
|
||||
|
||||
</li><li> Is the patch against latest FFmpeg git master branch?
|
||||
|
||||
</li><li> Are you subscribed to ffmpeg-devel?
|
||||
(the list is subscribers only due to spam)
|
||||
|
||||
</li><li> Have you checked that the changes are minimal, so that the same cannot be
|
||||
achieved with a smaller patch and/or simpler final code?
|
||||
|
||||
</li><li> If the change is to speed critical code, did you benchmark it?
|
||||
|
||||
</li><li> If you did any benchmarks, did you provide them in the mail?
|
||||
|
||||
</li><li> Have you checked that the patch does not introduce buffer overflows or
|
||||
other security issues?
|
||||
|
||||
</li><li> Did you test your decoder or demuxer against damaged data? If no, see
|
||||
tools/trasher, the noise bitstream filter, and
|
||||
<a href="http://caca.zoy.org/wiki/zzuf">zzuf</a>. Your decoder or demuxer
|
||||
should not crash, end in a (near) infinite loop, or allocate ridiculous
|
||||
amounts of memory when fed damaged data.
|
||||
|
||||
</li><li> Does the patch not mix functional and cosmetic changes?
|
||||
|
||||
</li><li> Did you add tabs or trailing whitespace to the code? Both are forbidden.
|
||||
|
||||
</li><li> Is the patch attached to the email you send?
|
||||
|
||||
</li><li> Is the mime type of the patch correct? It should be text/x-diff or
|
||||
text/x-patch or at least text/plain and not application/octet-stream.
|
||||
|
||||
</li><li> If the patch fixes a bug, did you provide a verbose analysis of the bug?
|
||||
|
||||
</li><li> If the patch fixes a bug, did you provide enough information, including
|
||||
a sample, so the bug can be reproduced and the fix can be verified?
|
||||
Note please do not attach samples >100k to mails but rather provide a
|
||||
URL, you can upload to ftp://upload.ffmpeg.org
|
||||
|
||||
</li><li> Did you provide a verbose summary about what the patch does change?
|
||||
|
||||
</li><li> Did you provide a verbose explanation why it changes things like it does?
|
||||
|
||||
</li><li> Did you provide a verbose summary of the user visible advantages and
|
||||
disadvantages if the patch is applied?
|
||||
|
||||
</li><li> Did you provide an example so we can verify the new feature added by the
|
||||
patch easily?
|
||||
|
||||
</li><li> If you added a new file, did you insert a license header? It should be
|
||||
taken from FFmpeg, not randomly copied and pasted from somewhere else.
|
||||
|
||||
</li><li> You should maintain alphabetical order in alphabetically ordered lists as
|
||||
long as doing so does not break API/ABI compatibility.
|
||||
|
||||
</li><li> Lines with similar content should be aligned vertically when doing so
|
||||
improves readability.
|
||||
|
||||
</li><li> Consider to add a regression test for your code.
|
||||
|
||||
</li><li> If you added YASM code please check that things still work with –disable-yasm
|
||||
|
||||
</li><li> Make sure you check the return values of function and return appropriate
|
||||
error codes. Especially memory allocation functions like <code>av_malloc()</code>
|
||||
are notoriously left unchecked, which is a serious problem.
|
||||
|
||||
</li><li> Test your code with valgrind and or Address Sanitizer to ensure it’s free
|
||||
of leaks, out of array accesses, etc.
|
||||
</li></ol>
|
||||
|
||||
<a name="Patch-review-process"></a>
|
||||
<h3 class="section">1.8 Patch review process<span class="pull-right"><a class="anchor hidden-xs" href="#Patch-review-process" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Patch-review-process" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>All patches posted to ffmpeg-devel will be reviewed, unless they contain a
|
||||
clear note that the patch is not for the git master branch.
|
||||
Reviews and comments will be posted as replies to the patch on the
|
||||
mailing list. The patch submitter then has to take care of every comment,
|
||||
that can be by resubmitting a changed patch or by discussion. Resubmitted
|
||||
patches will themselves be reviewed like any other patch. If at some point
|
||||
a patch passes review with no comments then it is approved, that can for
|
||||
simple and small patches happen immediately while large patches will generally
|
||||
have to be changed and reviewed many times before they are approved.
|
||||
After a patch is approved it will be committed to the repository.
|
||||
</p>
|
||||
<p>We will review all submitted patches, but sometimes we are quite busy so
|
||||
especially for large patches this can take several weeks.
|
||||
</p>
|
||||
<p>If you feel that the review process is too slow and you are willing to try to
|
||||
take over maintainership of the area of code you change then just clone
|
||||
git master and maintain the area of code there. We will merge each area from
|
||||
where its best maintained.
|
||||
</p>
|
||||
<p>When resubmitting patches, please do not make any significant changes
|
||||
not related to the comments received during review. Such patches will
|
||||
be rejected. Instead, submit significant changes or new features as
|
||||
separate patches.
|
||||
</p>
|
||||
<a name="Regression-tests"></a><a name="Regression-tests-1"></a>
|
||||
<h3 class="section">1.9 Regression tests<span class="pull-right"><a class="anchor hidden-xs" href="#Regression-tests-1" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Regression-tests-1" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>Before submitting a patch (or committing to the repository), you should at least
|
||||
test that you did not break anything.
|
||||
</p>
|
||||
<p>Running ’make fate’ accomplishes this, please see <a href="fate.html">fate.html</a> for details.
|
||||
</p>
|
||||
<p>[Of course, some patches may change the results of the regression tests. In
|
||||
this case, the reference results of the regression tests shall be modified
|
||||
accordingly].
|
||||
</p>
|
||||
<a name="Adding-files-to-the-fate_002dsuite-dataset"></a>
|
||||
<h4 class="subsection">1.9.1 Adding files to the fate-suite dataset<span class="pull-right"><a class="anchor hidden-xs" href="#Adding-files-to-the-fate_002dsuite-dataset" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Adding-files-to-the-fate_002dsuite-dataset" aria-hidden="true">TOC</a></span></h4>
|
||||
|
||||
<p>When there is no muxer or encoder available to generate test media for a
|
||||
specific test then the media has to be included in the fate-suite.
|
||||
First please make sure that the sample file is as small as possible to test the
|
||||
respective decoder or demuxer sufficiently. Large files increase network
|
||||
bandwidth and disk space requirements.
|
||||
Once you have a working fate test and fate sample, provide in the commit
|
||||
message or introductory message for the patch series that you post to
|
||||
the ffmpeg-devel mailing list, a direct link to download the sample media.
|
||||
</p>
|
||||
|
||||
<a name="Visualizing-Test-Coverage"></a>
|
||||
<h4 class="subsection">1.9.2 Visualizing Test Coverage<span class="pull-right"><a class="anchor hidden-xs" href="#Visualizing-Test-Coverage" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Visualizing-Test-Coverage" aria-hidden="true">TOC</a></span></h4>
|
||||
|
||||
<p>The FFmpeg build system allows visualizing the test coverage in an easy
|
||||
manner with the coverage tools <code>gcov</code>/<code>lcov</code>. This involves
|
||||
the following steps:
|
||||
</p>
|
||||
<ol>
|
||||
<li> Configure to compile with instrumentation enabled:
|
||||
<code>configure --toolchain=gcov</code>.
|
||||
|
||||
</li><li> Run your test case, either manually or via FATE. This can be either
|
||||
the full FATE regression suite, or any arbitrary invocation of any
|
||||
front-end tool provided by FFmpeg, in any combination.
|
||||
|
||||
</li><li> Run <code>make lcov</code> to generate coverage data in HTML format.
|
||||
|
||||
</li><li> View <code>lcov/index.html</code> in your preferred HTML viewer.
|
||||
</li></ol>
|
||||
|
||||
<p>You can use the command <code>make lcov-reset</code> to reset the coverage
|
||||
measurements. You will need to rerun <code>make lcov</code> after running a
|
||||
new test.
|
||||
</p>
|
||||
<a name="Using-Valgrind"></a>
|
||||
<h4 class="subsection">1.9.3 Using Valgrind<span class="pull-right"><a class="anchor hidden-xs" href="#Using-Valgrind" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Using-Valgrind" aria-hidden="true">TOC</a></span></h4>
|
||||
|
||||
<p>The configure script provides a shortcut for using valgrind to spot bugs
|
||||
related to memory handling. Just add the option
|
||||
<code>--toolchain=valgrind-memcheck</code> or <code>--toolchain=valgrind-massif</code>
|
||||
to your configure line, and reasonable defaults will be set for running
|
||||
FATE under the supervision of either the <strong>memcheck</strong> or the
|
||||
<strong>massif</strong> tool of the valgrind suite.
|
||||
</p>
|
||||
<p>In case you need finer control over how valgrind is invoked, use the
|
||||
<code>--target-exec='valgrind <your_custom_valgrind_options></code> option in
|
||||
your configure line instead.
|
||||
</p>
|
||||
<a name="Release-process"></a><a name="Release-process-1"></a>
|
||||
<h3 class="section">1.10 Release process<span class="pull-right"><a class="anchor hidden-xs" href="#Release-process-1" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Release-process-1" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>FFmpeg maintains a set of <strong>release branches</strong>, which are the
|
||||
recommended deliverable for system integrators and distributors (such as
|
||||
Linux distributions, etc.). At regular times, a <strong>release
|
||||
manager</strong> prepares, tests and publishes tarballs on the
|
||||
<a href="http://ffmpeg.org">http://ffmpeg.org</a> website.
|
||||
</p>
|
||||
<p>There are two kinds of releases:
|
||||
</p>
|
||||
<ol>
|
||||
<li> <strong>Major releases</strong> always include the latest and greatest
|
||||
features and functionality.
|
||||
|
||||
</li><li> <strong>Point releases</strong> are cut from <strong>release</strong> branches,
|
||||
which are named <code>release/X</code>, with <code>X</code> being the release
|
||||
version number.
|
||||
</li></ol>
|
||||
|
||||
<p>Note that we promise to our users that shared libraries from any FFmpeg
|
||||
release never break programs that have been <strong>compiled</strong> against
|
||||
previous versions of <strong>the same release series</strong> in any case!
|
||||
</p>
|
||||
<p>However, from time to time, we do make API changes that require adaptations
|
||||
in applications. Such changes are only allowed in (new) major releases and
|
||||
require further steps such as bumping library version numbers and/or
|
||||
adjustments to the symbol versioning file. Please discuss such changes
|
||||
on the <strong>ffmpeg-devel</strong> mailing list in time to allow forward planning.
|
||||
</p>
|
||||
<a name="Criteria-for-Point-Releases"></a><a name="Criteria-for-Point-Releases-1"></a>
|
||||
<h4 class="subsection">1.10.1 Criteria for Point Releases<span class="pull-right"><a class="anchor hidden-xs" href="#Criteria-for-Point-Releases-1" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Criteria-for-Point-Releases-1" aria-hidden="true">TOC</a></span></h4>
|
||||
|
||||
<p>Changes that match the following criteria are valid candidates for
|
||||
inclusion into a point release:
|
||||
</p>
|
||||
<ol>
|
||||
<li> Fixes a security issue, preferably identified by a <strong>CVE
|
||||
number</strong> issued by <a href="http://cve.mitre.org/">http://cve.mitre.org/</a>.
|
||||
|
||||
</li><li> Fixes a documented bug in <a href="https://trac.ffmpeg.org">https://trac.ffmpeg.org</a>.
|
||||
|
||||
</li><li> Improves the included documentation.
|
||||
|
||||
</li><li> Retains both source code and binary compatibility with previous
|
||||
point releases of the same release branch.
|
||||
</li></ol>
|
||||
|
||||
<p>The order for checking the rules is (1 OR 2 OR 3) AND 4.
|
||||
</p>
|
||||
|
||||
<a name="Release-Checklist"></a>
|
||||
<h4 class="subsection">1.10.2 Release Checklist<span class="pull-right"><a class="anchor hidden-xs" href="#Release-Checklist" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Release-Checklist" aria-hidden="true">TOC</a></span></h4>
|
||||
|
||||
<p>The release process involves the following steps:
|
||||
</p>
|
||||
<ol>
|
||||
<li> Ensure that the <samp>RELEASE</samp> file contains the version number for
|
||||
the upcoming release.
|
||||
|
||||
</li><li> Add the release at <a href="https://trac.ffmpeg.org/admin/ticket/versions">https://trac.ffmpeg.org/admin/ticket/versions</a>.
|
||||
|
||||
</li><li> Announce the intent to do a release to the mailing list.
|
||||
|
||||
</li><li> Make sure all relevant security fixes have been backported. See
|
||||
<a href="https://ffmpeg.org/security.html">https://ffmpeg.org/security.html</a>.
|
||||
|
||||
</li><li> Ensure that the FATE regression suite still passes in the release
|
||||
branch on at least <strong>i386</strong> and <strong>amd64</strong>
|
||||
(cf. <a href="#Regression-tests">Regression tests</a>).
|
||||
|
||||
</li><li> Prepare the release tarballs in <code>bz2</code> and <code>gz</code> formats, and
|
||||
supplementing files that contain <code>gpg</code> signatures
|
||||
|
||||
</li><li> Publish the tarballs at <a href="http://ffmpeg.org/releases">http://ffmpeg.org/releases</a>. Create and
|
||||
push an annotated tag in the form <code>nX</code>, with <code>X</code>
|
||||
containing the version number.
|
||||
|
||||
</li><li> Propose and send a patch to the <strong>ffmpeg-devel</strong> mailing list
|
||||
with a news entry for the website.
|
||||
|
||||
</li><li> Publish the news entry.
|
||||
|
||||
</li><li> Send announcement to the mailing list.
|
||||
</li></ol>
|
||||
|
||||
|
||||
<p style="font-size: small;">
|
||||
This document was generated on <em>January 14, 2015</em> using <a href="http://www.gnu.org/software/texinfo/"><em>makeinfo</em></a>.
|
||||
</p>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
44
Externals/ffmpeg/dev/doc/examples/Makefile
vendored
44
Externals/ffmpeg/dev/doc/examples/Makefile
vendored
@ -1,44 +0,0 @@
|
||||
# use pkg-config for getting CFLAGS and LDLIBS
|
||||
FFMPEG_LIBS= libavdevice \
|
||||
libavformat \
|
||||
libavfilter \
|
||||
libavcodec \
|
||||
libswresample \
|
||||
libswscale \
|
||||
libavutil \
|
||||
|
||||
CFLAGS += -Wall -g
|
||||
CFLAGS := $(shell pkg-config --cflags $(FFMPEG_LIBS)) $(CFLAGS)
|
||||
LDLIBS := $(shell pkg-config --libs $(FFMPEG_LIBS)) $(LDLIBS)
|
||||
|
||||
EXAMPLES= avio_reading \
|
||||
decoding_encoding \
|
||||
demuxing_decoding \
|
||||
extract_mvs \
|
||||
filtering_video \
|
||||
filtering_audio \
|
||||
metadata \
|
||||
muxing \
|
||||
remuxing \
|
||||
resampling_audio \
|
||||
scaling_video \
|
||||
transcode_aac \
|
||||
transcoding \
|
||||
|
||||
OBJS=$(addsuffix .o,$(EXAMPLES))
|
||||
|
||||
# the following examples make explicit use of the math library
|
||||
avcodec: LDLIBS += -lm
|
||||
decoding_encoding: LDLIBS += -lm
|
||||
muxing: LDLIBS += -lm
|
||||
resampling_audio: LDLIBS += -lm
|
||||
|
||||
.phony: all clean-test clean
|
||||
|
||||
all: $(OBJS) $(EXAMPLES)
|
||||
|
||||
clean-test:
|
||||
$(RM) test*.pgm test.h264 test.mp2 test.sw test.mpg
|
||||
|
||||
clean: clean-test
|
||||
$(RM) $(EXAMPLES) $(OBJS)
|
23
Externals/ffmpeg/dev/doc/examples/README
vendored
23
Externals/ffmpeg/dev/doc/examples/README
vendored
@ -1,23 +0,0 @@
|
||||
FFmpeg examples README
|
||||
----------------------
|
||||
|
||||
Both following use cases rely on pkg-config and make, thus make sure
|
||||
that you have them installed and working on your system.
|
||||
|
||||
|
||||
Method 1: build the installed examples in a generic read/write user directory
|
||||
|
||||
Copy to a read/write user directory and just use "make", it will link
|
||||
to the libraries on your system, assuming the PKG_CONFIG_PATH is
|
||||
correctly configured.
|
||||
|
||||
Method 2: build the examples in-tree
|
||||
|
||||
Assuming you are in the source FFmpeg checkout directory, you need to build
|
||||
FFmpeg (no need to make install in any prefix). Then just run "make examples".
|
||||
This will build the examples using the FFmpeg build system. You can clean those
|
||||
examples using "make examplesclean"
|
||||
|
||||
If you want to try the dedicated Makefile examples (to emulate the first
|
||||
method), go into doc/examples and run a command such as
|
||||
PKG_CONFIG_PATH=pc-uninstalled make.
|
134
Externals/ffmpeg/dev/doc/examples/avio_reading.c
vendored
134
Externals/ffmpeg/dev/doc/examples/avio_reading.c
vendored
@ -1,134 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2014 Stefano Sabatini
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* libavformat AVIOContext API example.
|
||||
*
|
||||
* Make libavformat demuxer access media content through a custom
|
||||
* AVIOContext read callback.
|
||||
* @example avio_reading.c
|
||||
*/
|
||||
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavformat/avio.h>
|
||||
#include <libavutil/file.h>
|
||||
|
||||
struct buffer_data {
|
||||
uint8_t *ptr;
|
||||
size_t size; ///< size left in the buffer
|
||||
};
|
||||
|
||||
static int read_packet(void *opaque, uint8_t *buf, int buf_size)
|
||||
{
|
||||
struct buffer_data *bd = (struct buffer_data *)opaque;
|
||||
buf_size = FFMIN(buf_size, bd->size);
|
||||
|
||||
printf("ptr:%p size:%zu\n", bd->ptr, bd->size);
|
||||
|
||||
/* copy internal buffer data to buf */
|
||||
memcpy(buf, bd->ptr, buf_size);
|
||||
bd->ptr += buf_size;
|
||||
bd->size -= buf_size;
|
||||
|
||||
return buf_size;
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
AVFormatContext *fmt_ctx = NULL;
|
||||
AVIOContext *avio_ctx = NULL;
|
||||
uint8_t *buffer = NULL, *avio_ctx_buffer = NULL;
|
||||
size_t buffer_size, avio_ctx_buffer_size = 4096;
|
||||
char *input_filename = NULL;
|
||||
int ret = 0;
|
||||
struct buffer_data bd = { 0 };
|
||||
|
||||
if (argc != 2) {
|
||||
fprintf(stderr, "usage: %s input_file\n"
|
||||
"API example program to show how to read from a custom buffer "
|
||||
"accessed through AVIOContext.\n", argv[0]);
|
||||
return 1;
|
||||
}
|
||||
input_filename = argv[1];
|
||||
|
||||
/* register codecs and formats and other lavf/lavc components*/
|
||||
av_register_all();
|
||||
|
||||
/* slurp file content into buffer */
|
||||
ret = av_file_map(input_filename, &buffer, &buffer_size, 0, NULL);
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
|
||||
/* fill opaque structure used by the AVIOContext read callback */
|
||||
bd.ptr = buffer;
|
||||
bd.size = buffer_size;
|
||||
|
||||
if (!(fmt_ctx = avformat_alloc_context())) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
avio_ctx_buffer = av_malloc(avio_ctx_buffer_size);
|
||||
if (!avio_ctx_buffer) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
avio_ctx = avio_alloc_context(avio_ctx_buffer, avio_ctx_buffer_size,
|
||||
0, &bd, &read_packet, NULL, NULL);
|
||||
if (!avio_ctx) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
fmt_ctx->pb = avio_ctx;
|
||||
|
||||
ret = avformat_open_input(&fmt_ctx, NULL, NULL, NULL);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not open input\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = avformat_find_stream_info(fmt_ctx, NULL);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not find stream information\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
av_dump_format(fmt_ctx, 0, input_filename, 0);
|
||||
|
||||
end:
|
||||
avformat_close_input(&fmt_ctx);
|
||||
/* note: the internal buffer could have changed, and be != avio_ctx_buffer */
|
||||
if (avio_ctx) {
|
||||
av_freep(&avio_ctx->buffer);
|
||||
av_freep(&avio_ctx);
|
||||
}
|
||||
av_file_unmap(buffer, buffer_size);
|
||||
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
@ -1,665 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2001 Fabrice Bellard
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* libavcodec API use example.
|
||||
*
|
||||
* @example decoding_encoding.c
|
||||
* Note that libavcodec only handles codecs (mpeg, mpeg4, etc...),
|
||||
* not file formats (avi, vob, mp4, mov, mkv, mxf, flv, mpegts, mpegps, etc...). See library 'libavformat' for the
|
||||
* format handling
|
||||
*/
|
||||
|
||||
#include <math.h>
|
||||
|
||||
#include <libavutil/opt.h>
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavutil/channel_layout.h>
|
||||
#include <libavutil/common.h>
|
||||
#include <libavutil/imgutils.h>
|
||||
#include <libavutil/mathematics.h>
|
||||
#include <libavutil/samplefmt.h>
|
||||
|
||||
#define INBUF_SIZE 4096
|
||||
#define AUDIO_INBUF_SIZE 20480
|
||||
#define AUDIO_REFILL_THRESH 4096
|
||||
|
||||
/* check that a given sample format is supported by the encoder */
|
||||
static int check_sample_fmt(AVCodec *codec, enum AVSampleFormat sample_fmt)
|
||||
{
|
||||
const enum AVSampleFormat *p = codec->sample_fmts;
|
||||
|
||||
while (*p != AV_SAMPLE_FMT_NONE) {
|
||||
if (*p == sample_fmt)
|
||||
return 1;
|
||||
p++;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* just pick the highest supported samplerate */
|
||||
static int select_sample_rate(AVCodec *codec)
|
||||
{
|
||||
const int *p;
|
||||
int best_samplerate = 0;
|
||||
|
||||
if (!codec->supported_samplerates)
|
||||
return 44100;
|
||||
|
||||
p = codec->supported_samplerates;
|
||||
while (*p) {
|
||||
best_samplerate = FFMAX(*p, best_samplerate);
|
||||
p++;
|
||||
}
|
||||
return best_samplerate;
|
||||
}
|
||||
|
||||
/* select layout with the highest channel count */
|
||||
static int select_channel_layout(AVCodec *codec)
|
||||
{
|
||||
const uint64_t *p;
|
||||
uint64_t best_ch_layout = 0;
|
||||
int best_nb_channels = 0;
|
||||
|
||||
if (!codec->channel_layouts)
|
||||
return AV_CH_LAYOUT_STEREO;
|
||||
|
||||
p = codec->channel_layouts;
|
||||
while (*p) {
|
||||
int nb_channels = av_get_channel_layout_nb_channels(*p);
|
||||
|
||||
if (nb_channels > best_nb_channels) {
|
||||
best_ch_layout = *p;
|
||||
best_nb_channels = nb_channels;
|
||||
}
|
||||
p++;
|
||||
}
|
||||
return best_ch_layout;
|
||||
}
|
||||
|
||||
/*
|
||||
* Audio encoding example
|
||||
*/
|
||||
static void audio_encode_example(const char *filename)
|
||||
{
|
||||
AVCodec *codec;
|
||||
AVCodecContext *c= NULL;
|
||||
AVFrame *frame;
|
||||
AVPacket pkt;
|
||||
int i, j, k, ret, got_output;
|
||||
int buffer_size;
|
||||
FILE *f;
|
||||
uint16_t *samples;
|
||||
float t, tincr;
|
||||
|
||||
printf("Encode audio file %s\n", filename);
|
||||
|
||||
/* find the MP2 encoder */
|
||||
codec = avcodec_find_encoder(AV_CODEC_ID_MP2);
|
||||
if (!codec) {
|
||||
fprintf(stderr, "Codec not found\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
c = avcodec_alloc_context3(codec);
|
||||
if (!c) {
|
||||
fprintf(stderr, "Could not allocate audio codec context\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* put sample parameters */
|
||||
c->bit_rate = 64000;
|
||||
|
||||
/* check that the encoder supports s16 pcm input */
|
||||
c->sample_fmt = AV_SAMPLE_FMT_S16;
|
||||
if (!check_sample_fmt(codec, c->sample_fmt)) {
|
||||
fprintf(stderr, "Encoder does not support sample format %s",
|
||||
av_get_sample_fmt_name(c->sample_fmt));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* select other audio parameters supported by the encoder */
|
||||
c->sample_rate = select_sample_rate(codec);
|
||||
c->channel_layout = select_channel_layout(codec);
|
||||
c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
|
||||
|
||||
/* open it */
|
||||
if (avcodec_open2(c, codec, NULL) < 0) {
|
||||
fprintf(stderr, "Could not open codec\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
f = fopen(filename, "wb");
|
||||
if (!f) {
|
||||
fprintf(stderr, "Could not open %s\n", filename);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* frame containing input raw audio */
|
||||
frame = av_frame_alloc();
|
||||
if (!frame) {
|
||||
fprintf(stderr, "Could not allocate audio frame\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
frame->nb_samples = c->frame_size;
|
||||
frame->format = c->sample_fmt;
|
||||
frame->channel_layout = c->channel_layout;
|
||||
|
||||
/* the codec gives us the frame size, in samples,
|
||||
* we calculate the size of the samples buffer in bytes */
|
||||
buffer_size = av_samples_get_buffer_size(NULL, c->channels, c->frame_size,
|
||||
c->sample_fmt, 0);
|
||||
if (buffer_size < 0) {
|
||||
fprintf(stderr, "Could not get sample buffer size\n");
|
||||
exit(1);
|
||||
}
|
||||
samples = av_malloc(buffer_size);
|
||||
if (!samples) {
|
||||
fprintf(stderr, "Could not allocate %d bytes for samples buffer\n",
|
||||
buffer_size);
|
||||
exit(1);
|
||||
}
|
||||
/* setup the data pointers in the AVFrame */
|
||||
ret = avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt,
|
||||
(const uint8_t*)samples, buffer_size, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not setup audio frame\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* encode a single tone sound */
|
||||
t = 0;
|
||||
tincr = 2 * M_PI * 440.0 / c->sample_rate;
|
||||
for (i = 0; i < 200; i++) {
|
||||
av_init_packet(&pkt);
|
||||
pkt.data = NULL; // packet data will be allocated by the encoder
|
||||
pkt.size = 0;
|
||||
|
||||
for (j = 0; j < c->frame_size; j++) {
|
||||
samples[2*j] = (int)(sin(t) * 10000);
|
||||
|
||||
for (k = 1; k < c->channels; k++)
|
||||
samples[2*j + k] = samples[2*j];
|
||||
t += tincr;
|
||||
}
|
||||
/* encode the samples */
|
||||
ret = avcodec_encode_audio2(c, &pkt, frame, &got_output);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error encoding audio frame\n");
|
||||
exit(1);
|
||||
}
|
||||
if (got_output) {
|
||||
fwrite(pkt.data, 1, pkt.size, f);
|
||||
av_free_packet(&pkt);
|
||||
}
|
||||
}
|
||||
|
||||
/* get the delayed frames */
|
||||
for (got_output = 1; got_output; i++) {
|
||||
ret = avcodec_encode_audio2(c, &pkt, NULL, &got_output);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error encoding frame\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (got_output) {
|
||||
fwrite(pkt.data, 1, pkt.size, f);
|
||||
av_free_packet(&pkt);
|
||||
}
|
||||
}
|
||||
fclose(f);
|
||||
|
||||
av_freep(&samples);
|
||||
av_frame_free(&frame);
|
||||
avcodec_close(c);
|
||||
av_free(c);
|
||||
}
|
||||
|
||||
/*
|
||||
* Audio decoding.
|
||||
*/
|
||||
static void audio_decode_example(const char *outfilename, const char *filename)
|
||||
{
|
||||
AVCodec *codec;
|
||||
AVCodecContext *c= NULL;
|
||||
int len;
|
||||
FILE *f, *outfile;
|
||||
uint8_t inbuf[AUDIO_INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE];
|
||||
AVPacket avpkt;
|
||||
AVFrame *decoded_frame = NULL;
|
||||
|
||||
av_init_packet(&avpkt);
|
||||
|
||||
printf("Decode audio file %s to %s\n", filename, outfilename);
|
||||
|
||||
/* find the mpeg audio decoder */
|
||||
codec = avcodec_find_decoder(AV_CODEC_ID_MP2);
|
||||
if (!codec) {
|
||||
fprintf(stderr, "Codec not found\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
c = avcodec_alloc_context3(codec);
|
||||
if (!c) {
|
||||
fprintf(stderr, "Could not allocate audio codec context\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* open it */
|
||||
if (avcodec_open2(c, codec, NULL) < 0) {
|
||||
fprintf(stderr, "Could not open codec\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
f = fopen(filename, "rb");
|
||||
if (!f) {
|
||||
fprintf(stderr, "Could not open %s\n", filename);
|
||||
exit(1);
|
||||
}
|
||||
outfile = fopen(outfilename, "wb");
|
||||
if (!outfile) {
|
||||
av_free(c);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* decode until eof */
|
||||
avpkt.data = inbuf;
|
||||
avpkt.size = fread(inbuf, 1, AUDIO_INBUF_SIZE, f);
|
||||
|
||||
while (avpkt.size > 0) {
|
||||
int i, ch;
|
||||
int got_frame = 0;
|
||||
|
||||
if (!decoded_frame) {
|
||||
if (!(decoded_frame = av_frame_alloc())) {
|
||||
fprintf(stderr, "Could not allocate audio frame\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
len = avcodec_decode_audio4(c, decoded_frame, &got_frame, &avpkt);
|
||||
if (len < 0) {
|
||||
fprintf(stderr, "Error while decoding\n");
|
||||
exit(1);
|
||||
}
|
||||
if (got_frame) {
|
||||
/* if a frame has been decoded, output it */
|
||||
int data_size = av_get_bytes_per_sample(c->sample_fmt);
|
||||
if (data_size < 0) {
|
||||
/* This should not occur, checking just for paranoia */
|
||||
fprintf(stderr, "Failed to calculate data size\n");
|
||||
exit(1);
|
||||
}
|
||||
for (i=0; i<decoded_frame->nb_samples; i++)
|
||||
for (ch=0; ch<c->channels; ch++)
|
||||
fwrite(decoded_frame->data[ch] + data_size*i, 1, data_size, outfile);
|
||||
}
|
||||
avpkt.size -= len;
|
||||
avpkt.data += len;
|
||||
avpkt.dts =
|
||||
avpkt.pts = AV_NOPTS_VALUE;
|
||||
if (avpkt.size < AUDIO_REFILL_THRESH) {
|
||||
/* Refill the input buffer, to avoid trying to decode
|
||||
* incomplete frames. Instead of this, one could also use
|
||||
* a parser, or use a proper container format through
|
||||
* libavformat. */
|
||||
memmove(inbuf, avpkt.data, avpkt.size);
|
||||
avpkt.data = inbuf;
|
||||
len = fread(avpkt.data + avpkt.size, 1,
|
||||
AUDIO_INBUF_SIZE - avpkt.size, f);
|
||||
if (len > 0)
|
||||
avpkt.size += len;
|
||||
}
|
||||
}
|
||||
|
||||
fclose(outfile);
|
||||
fclose(f);
|
||||
|
||||
avcodec_close(c);
|
||||
av_free(c);
|
||||
av_frame_free(&decoded_frame);
|
||||
}
|
||||
|
||||
/*
|
||||
* Video encoding example
|
||||
*/
|
||||
static void video_encode_example(const char *filename, int codec_id)
|
||||
{
|
||||
AVCodec *codec;
|
||||
AVCodecContext *c= NULL;
|
||||
int i, ret, x, y, got_output;
|
||||
FILE *f;
|
||||
AVFrame *frame;
|
||||
AVPacket pkt;
|
||||
uint8_t endcode[] = { 0, 0, 1, 0xb7 };
|
||||
|
||||
printf("Encode video file %s\n", filename);
|
||||
|
||||
/* find the mpeg1 video encoder */
|
||||
codec = avcodec_find_encoder(codec_id);
|
||||
if (!codec) {
|
||||
fprintf(stderr, "Codec not found\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
c = avcodec_alloc_context3(codec);
|
||||
if (!c) {
|
||||
fprintf(stderr, "Could not allocate video codec context\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* put sample parameters */
|
||||
c->bit_rate = 400000;
|
||||
/* resolution must be a multiple of two */
|
||||
c->width = 352;
|
||||
c->height = 288;
|
||||
/* frames per second */
|
||||
c->time_base = (AVRational){1,25};
|
||||
/* emit one intra frame every ten frames
|
||||
* check frame pict_type before passing frame
|
||||
* to encoder, if frame->pict_type is AV_PICTURE_TYPE_I
|
||||
* then gop_size is ignored and the output of encoder
|
||||
* will always be I frame irrespective to gop_size
|
||||
*/
|
||||
c->gop_size = 10;
|
||||
c->max_b_frames = 1;
|
||||
c->pix_fmt = AV_PIX_FMT_YUV420P;
|
||||
|
||||
if (codec_id == AV_CODEC_ID_H264)
|
||||
av_opt_set(c->priv_data, "preset", "slow", 0);
|
||||
|
||||
/* open it */
|
||||
if (avcodec_open2(c, codec, NULL) < 0) {
|
||||
fprintf(stderr, "Could not open codec\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
f = fopen(filename, "wb");
|
||||
if (!f) {
|
||||
fprintf(stderr, "Could not open %s\n", filename);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
frame = av_frame_alloc();
|
||||
if (!frame) {
|
||||
fprintf(stderr, "Could not allocate video frame\n");
|
||||
exit(1);
|
||||
}
|
||||
frame->format = c->pix_fmt;
|
||||
frame->width = c->width;
|
||||
frame->height = c->height;
|
||||
|
||||
/* the image can be allocated by any means and av_image_alloc() is
|
||||
* just the most convenient way if av_malloc() is to be used */
|
||||
ret = av_image_alloc(frame->data, frame->linesize, c->width, c->height,
|
||||
c->pix_fmt, 32);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate raw picture buffer\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* encode 1 second of video */
|
||||
for (i = 0; i < 25; i++) {
|
||||
av_init_packet(&pkt);
|
||||
pkt.data = NULL; // packet data will be allocated by the encoder
|
||||
pkt.size = 0;
|
||||
|
||||
fflush(stdout);
|
||||
/* prepare a dummy image */
|
||||
/* Y */
|
||||
for (y = 0; y < c->height; y++) {
|
||||
for (x = 0; x < c->width; x++) {
|
||||
frame->data[0][y * frame->linesize[0] + x] = x + y + i * 3;
|
||||
}
|
||||
}
|
||||
|
||||
/* Cb and Cr */
|
||||
for (y = 0; y < c->height/2; y++) {
|
||||
for (x = 0; x < c->width/2; x++) {
|
||||
frame->data[1][y * frame->linesize[1] + x] = 128 + y + i * 2;
|
||||
frame->data[2][y * frame->linesize[2] + x] = 64 + x + i * 5;
|
||||
}
|
||||
}
|
||||
|
||||
frame->pts = i;
|
||||
|
||||
/* encode the image */
|
||||
ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error encoding frame\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (got_output) {
|
||||
printf("Write frame %3d (size=%5d)\n", i, pkt.size);
|
||||
fwrite(pkt.data, 1, pkt.size, f);
|
||||
av_free_packet(&pkt);
|
||||
}
|
||||
}
|
||||
|
||||
/* get the delayed frames */
|
||||
for (got_output = 1; got_output; i++) {
|
||||
fflush(stdout);
|
||||
|
||||
ret = avcodec_encode_video2(c, &pkt, NULL, &got_output);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error encoding frame\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (got_output) {
|
||||
printf("Write frame %3d (size=%5d)\n", i, pkt.size);
|
||||
fwrite(pkt.data, 1, pkt.size, f);
|
||||
av_free_packet(&pkt);
|
||||
}
|
||||
}
|
||||
|
||||
/* add sequence end code to have a real mpeg file */
|
||||
fwrite(endcode, 1, sizeof(endcode), f);
|
||||
fclose(f);
|
||||
|
||||
avcodec_close(c);
|
||||
av_free(c);
|
||||
av_freep(&frame->data[0]);
|
||||
av_frame_free(&frame);
|
||||
printf("\n");
|
||||
}
|
||||
|
||||
/*
|
||||
* Video decoding example
|
||||
*/
|
||||
|
||||
static void pgm_save(unsigned char *buf, int wrap, int xsize, int ysize,
|
||||
char *filename)
|
||||
{
|
||||
FILE *f;
|
||||
int i;
|
||||
|
||||
f = fopen(filename,"w");
|
||||
fprintf(f, "P5\n%d %d\n%d\n", xsize, ysize, 255);
|
||||
for (i = 0; i < ysize; i++)
|
||||
fwrite(buf + i * wrap, 1, xsize, f);
|
||||
fclose(f);
|
||||
}
|
||||
|
||||
static int decode_write_frame(const char *outfilename, AVCodecContext *avctx,
|
||||
AVFrame *frame, int *frame_count, AVPacket *pkt, int last)
|
||||
{
|
||||
int len, got_frame;
|
||||
char buf[1024];
|
||||
|
||||
len = avcodec_decode_video2(avctx, frame, &got_frame, pkt);
|
||||
if (len < 0) {
|
||||
fprintf(stderr, "Error while decoding frame %d\n", *frame_count);
|
||||
return len;
|
||||
}
|
||||
if (got_frame) {
|
||||
printf("Saving %sframe %3d\n", last ? "last " : "", *frame_count);
|
||||
fflush(stdout);
|
||||
|
||||
/* the picture is allocated by the decoder, no need to free it */
|
||||
snprintf(buf, sizeof(buf), outfilename, *frame_count);
|
||||
pgm_save(frame->data[0], frame->linesize[0],
|
||||
avctx->width, avctx->height, buf);
|
||||
(*frame_count)++;
|
||||
}
|
||||
if (pkt->data) {
|
||||
pkt->size -= len;
|
||||
pkt->data += len;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void video_decode_example(const char *outfilename, const char *filename)
|
||||
{
|
||||
AVCodec *codec;
|
||||
AVCodecContext *c= NULL;
|
||||
int frame_count;
|
||||
FILE *f;
|
||||
AVFrame *frame;
|
||||
uint8_t inbuf[INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE];
|
||||
AVPacket avpkt;
|
||||
|
||||
av_init_packet(&avpkt);
|
||||
|
||||
/* set end of buffer to 0 (this ensures that no overreading happens for damaged mpeg streams) */
|
||||
memset(inbuf + INBUF_SIZE, 0, FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
|
||||
printf("Decode video file %s to %s\n", filename, outfilename);
|
||||
|
||||
/* find the mpeg1 video decoder */
|
||||
codec = avcodec_find_decoder(AV_CODEC_ID_MPEG1VIDEO);
|
||||
if (!codec) {
|
||||
fprintf(stderr, "Codec not found\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
c = avcodec_alloc_context3(codec);
|
||||
if (!c) {
|
||||
fprintf(stderr, "Could not allocate video codec context\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if(codec->capabilities&CODEC_CAP_TRUNCATED)
|
||||
c->flags|= CODEC_FLAG_TRUNCATED; /* we do not send complete frames */
|
||||
|
||||
/* For some codecs, such as msmpeg4 and mpeg4, width and height
|
||||
MUST be initialized there because this information is not
|
||||
available in the bitstream. */
|
||||
|
||||
/* open it */
|
||||
if (avcodec_open2(c, codec, NULL) < 0) {
|
||||
fprintf(stderr, "Could not open codec\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
f = fopen(filename, "rb");
|
||||
if (!f) {
|
||||
fprintf(stderr, "Could not open %s\n", filename);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
frame = av_frame_alloc();
|
||||
if (!frame) {
|
||||
fprintf(stderr, "Could not allocate video frame\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
frame_count = 0;
|
||||
for (;;) {
|
||||
avpkt.size = fread(inbuf, 1, INBUF_SIZE, f);
|
||||
if (avpkt.size == 0)
|
||||
break;
|
||||
|
||||
/* NOTE1: some codecs are stream based (mpegvideo, mpegaudio)
|
||||
and this is the only method to use them because you cannot
|
||||
know the compressed data size before analysing it.
|
||||
|
||||
BUT some other codecs (msmpeg4, mpeg4) are inherently frame
|
||||
based, so you must call them with all the data for one
|
||||
frame exactly. You must also initialize 'width' and
|
||||
'height' before initializing them. */
|
||||
|
||||
/* NOTE2: some codecs allow the raw parameters (frame size,
|
||||
sample rate) to be changed at any frame. We handle this, so
|
||||
you should also take care of it */
|
||||
|
||||
/* here, we use a stream based decoder (mpeg1video), so we
|
||||
feed decoder and see if it could decode a frame */
|
||||
avpkt.data = inbuf;
|
||||
while (avpkt.size > 0)
|
||||
if (decode_write_frame(outfilename, c, frame, &frame_count, &avpkt, 0) < 0)
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* some codecs, such as MPEG, transmit the I and P frame with a
|
||||
latency of one frame. You must do the following to have a
|
||||
chance to get the last frame of the video */
|
||||
avpkt.data = NULL;
|
||||
avpkt.size = 0;
|
||||
decode_write_frame(outfilename, c, frame, &frame_count, &avpkt, 1);
|
||||
|
||||
fclose(f);
|
||||
|
||||
avcodec_close(c);
|
||||
av_free(c);
|
||||
av_frame_free(&frame);
|
||||
printf("\n");
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
const char *output_type;
|
||||
|
||||
/* register all the codecs */
|
||||
avcodec_register_all();
|
||||
|
||||
if (argc < 2) {
|
||||
printf("usage: %s output_type\n"
|
||||
"API example program to decode/encode a media stream with libavcodec.\n"
|
||||
"This program generates a synthetic stream and encodes it to a file\n"
|
||||
"named test.h264, test.mp2 or test.mpg depending on output_type.\n"
|
||||
"The encoded stream is then decoded and written to a raw data output.\n"
|
||||
"output_type must be chosen between 'h264', 'mp2', 'mpg'.\n",
|
||||
argv[0]);
|
||||
return 1;
|
||||
}
|
||||
output_type = argv[1];
|
||||
|
||||
if (!strcmp(output_type, "h264")) {
|
||||
video_encode_example("test.h264", AV_CODEC_ID_H264);
|
||||
} else if (!strcmp(output_type, "mp2")) {
|
||||
audio_encode_example("test.mp2");
|
||||
audio_decode_example("test.pcm", "test.mp2");
|
||||
} else if (!strcmp(output_type, "mpg")) {
|
||||
video_encode_example("test.mpg", AV_CODEC_ID_MPEG1VIDEO);
|
||||
video_decode_example("test%02d.pgm", "test.mpg");
|
||||
} else {
|
||||
fprintf(stderr, "Invalid output type '%s', choose between 'h264', 'mp2', or 'mpg'\n",
|
||||
output_type);
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
@ -1,386 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2012 Stefano Sabatini
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* Demuxing and decoding example.
|
||||
*
|
||||
* Show how to use the libavformat and libavcodec API to demux and
|
||||
* decode audio and video data.
|
||||
* @example demuxing_decoding.c
|
||||
*/
|
||||
|
||||
#include <libavutil/imgutils.h>
|
||||
#include <libavutil/samplefmt.h>
|
||||
#include <libavutil/timestamp.h>
|
||||
#include <libavformat/avformat.h>
|
||||
|
||||
static AVFormatContext *fmt_ctx = NULL;
|
||||
static AVCodecContext *video_dec_ctx = NULL, *audio_dec_ctx;
|
||||
static AVStream *video_stream = NULL, *audio_stream = NULL;
|
||||
static const char *src_filename = NULL;
|
||||
static const char *video_dst_filename = NULL;
|
||||
static const char *audio_dst_filename = NULL;
|
||||
static FILE *video_dst_file = NULL;
|
||||
static FILE *audio_dst_file = NULL;
|
||||
|
||||
static uint8_t *video_dst_data[4] = {NULL};
|
||||
static int video_dst_linesize[4];
|
||||
static int video_dst_bufsize;
|
||||
|
||||
static int video_stream_idx = -1, audio_stream_idx = -1;
|
||||
static AVFrame *frame = NULL;
|
||||
static AVPacket pkt;
|
||||
static int video_frame_count = 0;
|
||||
static int audio_frame_count = 0;
|
||||
|
||||
/* The different ways of decoding and managing data memory. You are not
|
||||
* supposed to support all the modes in your application but pick the one most
|
||||
* appropriate to your needs. Look for the use of api_mode in this example to
|
||||
* see what are the differences of API usage between them */
|
||||
enum {
|
||||
API_MODE_OLD = 0, /* old method, deprecated */
|
||||
API_MODE_NEW_API_REF_COUNT = 1, /* new method, using the frame reference counting */
|
||||
API_MODE_NEW_API_NO_REF_COUNT = 2, /* new method, without reference counting */
|
||||
};
|
||||
|
||||
static int api_mode = API_MODE_OLD;
|
||||
|
||||
static int decode_packet(int *got_frame, int cached)
|
||||
{
|
||||
int ret = 0;
|
||||
int decoded = pkt.size;
|
||||
|
||||
*got_frame = 0;
|
||||
|
||||
if (pkt.stream_index == video_stream_idx) {
|
||||
/* decode video frame */
|
||||
ret = avcodec_decode_video2(video_dec_ctx, frame, got_frame, &pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error decoding video frame (%s)\n", av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (*got_frame) {
|
||||
printf("video_frame%s n:%d coded_n:%d pts:%s\n",
|
||||
cached ? "(cached)" : "",
|
||||
video_frame_count++, frame->coded_picture_number,
|
||||
av_ts2timestr(frame->pts, &video_dec_ctx->time_base));
|
||||
|
||||
/* copy decoded frame to destination buffer:
|
||||
* this is required since rawvideo expects non aligned data */
|
||||
av_image_copy(video_dst_data, video_dst_linesize,
|
||||
(const uint8_t **)(frame->data), frame->linesize,
|
||||
video_dec_ctx->pix_fmt, video_dec_ctx->width, video_dec_ctx->height);
|
||||
|
||||
/* write to rawvideo file */
|
||||
fwrite(video_dst_data[0], 1, video_dst_bufsize, video_dst_file);
|
||||
}
|
||||
} else if (pkt.stream_index == audio_stream_idx) {
|
||||
/* decode audio frame */
|
||||
ret = avcodec_decode_audio4(audio_dec_ctx, frame, got_frame, &pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error decoding audio frame (%s)\n", av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
/* Some audio decoders decode only part of the packet, and have to be
|
||||
* called again with the remainder of the packet data.
|
||||
* Sample: fate-suite/lossless-audio/luckynight-partial.shn
|
||||
* Also, some decoders might over-read the packet. */
|
||||
decoded = FFMIN(ret, pkt.size);
|
||||
|
||||
if (*got_frame) {
|
||||
size_t unpadded_linesize = frame->nb_samples * av_get_bytes_per_sample(frame->format);
|
||||
printf("audio_frame%s n:%d nb_samples:%d pts:%s\n",
|
||||
cached ? "(cached)" : "",
|
||||
audio_frame_count++, frame->nb_samples,
|
||||
av_ts2timestr(frame->pts, &audio_dec_ctx->time_base));
|
||||
|
||||
/* Write the raw audio data samples of the first plane. This works
|
||||
* fine for packed formats (e.g. AV_SAMPLE_FMT_S16). However,
|
||||
* most audio decoders output planar audio, which uses a separate
|
||||
* plane of audio samples for each channel (e.g. AV_SAMPLE_FMT_S16P).
|
||||
* In other words, this code will write only the first audio channel
|
||||
* in these cases.
|
||||
* You should use libswresample or libavfilter to convert the frame
|
||||
* to packed data. */
|
||||
fwrite(frame->extended_data[0], 1, unpadded_linesize, audio_dst_file);
|
||||
}
|
||||
}
|
||||
|
||||
/* If we use the new API with reference counting, we own the data and need
|
||||
* to de-reference it when we don't use it anymore */
|
||||
if (*got_frame && api_mode == API_MODE_NEW_API_REF_COUNT)
|
||||
av_frame_unref(frame);
|
||||
|
||||
return decoded;
|
||||
}
|
||||
|
||||
static int open_codec_context(int *stream_idx,
|
||||
AVFormatContext *fmt_ctx, enum AVMediaType type)
|
||||
{
|
||||
int ret;
|
||||
AVStream *st;
|
||||
AVCodecContext *dec_ctx = NULL;
|
||||
AVCodec *dec = NULL;
|
||||
AVDictionary *opts = NULL;
|
||||
|
||||
ret = av_find_best_stream(fmt_ctx, type, -1, -1, NULL, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not find %s stream in input file '%s'\n",
|
||||
av_get_media_type_string(type), src_filename);
|
||||
return ret;
|
||||
} else {
|
||||
*stream_idx = ret;
|
||||
st = fmt_ctx->streams[*stream_idx];
|
||||
|
||||
/* find decoder for the stream */
|
||||
dec_ctx = st->codec;
|
||||
dec = avcodec_find_decoder(dec_ctx->codec_id);
|
||||
if (!dec) {
|
||||
fprintf(stderr, "Failed to find %s codec\n",
|
||||
av_get_media_type_string(type));
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
/* Init the decoders, with or without reference counting */
|
||||
if (api_mode == API_MODE_NEW_API_REF_COUNT)
|
||||
av_dict_set(&opts, "refcounted_frames", "1", 0);
|
||||
if ((ret = avcodec_open2(dec_ctx, dec, &opts)) < 0) {
|
||||
fprintf(stderr, "Failed to open %s codec\n",
|
||||
av_get_media_type_string(type));
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int get_format_from_sample_fmt(const char **fmt,
|
||||
enum AVSampleFormat sample_fmt)
|
||||
{
|
||||
int i;
|
||||
struct sample_fmt_entry {
|
||||
enum AVSampleFormat sample_fmt; const char *fmt_be, *fmt_le;
|
||||
} sample_fmt_entries[] = {
|
||||
{ AV_SAMPLE_FMT_U8, "u8", "u8" },
|
||||
{ AV_SAMPLE_FMT_S16, "s16be", "s16le" },
|
||||
{ AV_SAMPLE_FMT_S32, "s32be", "s32le" },
|
||||
{ AV_SAMPLE_FMT_FLT, "f32be", "f32le" },
|
||||
{ AV_SAMPLE_FMT_DBL, "f64be", "f64le" },
|
||||
};
|
||||
*fmt = NULL;
|
||||
|
||||
for (i = 0; i < FF_ARRAY_ELEMS(sample_fmt_entries); i++) {
|
||||
struct sample_fmt_entry *entry = &sample_fmt_entries[i];
|
||||
if (sample_fmt == entry->sample_fmt) {
|
||||
*fmt = AV_NE(entry->fmt_be, entry->fmt_le);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
fprintf(stderr,
|
||||
"sample format %s is not supported as output format\n",
|
||||
av_get_sample_fmt_name(sample_fmt));
|
||||
return -1;
|
||||
}
|
||||
|
||||
int main (int argc, char **argv)
|
||||
{
|
||||
int ret = 0, got_frame;
|
||||
|
||||
if (argc != 4 && argc != 5) {
|
||||
fprintf(stderr, "usage: %s [-refcount=<old|new_norefcount|new_refcount>] "
|
||||
"input_file video_output_file audio_output_file\n"
|
||||
"API example program to show how to read frames from an input file.\n"
|
||||
"This program reads frames from a file, decodes them, and writes decoded\n"
|
||||
"video frames to a rawvideo file named video_output_file, and decoded\n"
|
||||
"audio frames to a rawaudio file named audio_output_file.\n\n"
|
||||
"If the -refcount option is specified, the program use the\n"
|
||||
"reference counting frame system which allows keeping a copy of\n"
|
||||
"the data for longer than one decode call. If unset, it's using\n"
|
||||
"the classic old method.\n"
|
||||
"\n", argv[0]);
|
||||
exit(1);
|
||||
}
|
||||
if (argc == 5) {
|
||||
const char *mode = argv[1] + strlen("-refcount=");
|
||||
if (!strcmp(mode, "old")) api_mode = API_MODE_OLD;
|
||||
else if (!strcmp(mode, "new_norefcount")) api_mode = API_MODE_NEW_API_NO_REF_COUNT;
|
||||
else if (!strcmp(mode, "new_refcount")) api_mode = API_MODE_NEW_API_REF_COUNT;
|
||||
else {
|
||||
fprintf(stderr, "unknow mode '%s'\n", mode);
|
||||
exit(1);
|
||||
}
|
||||
argv++;
|
||||
}
|
||||
src_filename = argv[1];
|
||||
video_dst_filename = argv[2];
|
||||
audio_dst_filename = argv[3];
|
||||
|
||||
/* register all formats and codecs */
|
||||
av_register_all();
|
||||
|
||||
/* open input file, and allocate format context */
|
||||
if (avformat_open_input(&fmt_ctx, src_filename, NULL, NULL) < 0) {
|
||||
fprintf(stderr, "Could not open source file %s\n", src_filename);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* retrieve stream information */
|
||||
if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {
|
||||
fprintf(stderr, "Could not find stream information\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (open_codec_context(&video_stream_idx, fmt_ctx, AVMEDIA_TYPE_VIDEO) >= 0) {
|
||||
video_stream = fmt_ctx->streams[video_stream_idx];
|
||||
video_dec_ctx = video_stream->codec;
|
||||
|
||||
video_dst_file = fopen(video_dst_filename, "wb");
|
||||
if (!video_dst_file) {
|
||||
fprintf(stderr, "Could not open destination file %s\n", video_dst_filename);
|
||||
ret = 1;
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* allocate image where the decoded image will be put */
|
||||
ret = av_image_alloc(video_dst_data, video_dst_linesize,
|
||||
video_dec_ctx->width, video_dec_ctx->height,
|
||||
video_dec_ctx->pix_fmt, 1);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate raw video buffer\n");
|
||||
goto end;
|
||||
}
|
||||
video_dst_bufsize = ret;
|
||||
}
|
||||
|
||||
if (open_codec_context(&audio_stream_idx, fmt_ctx, AVMEDIA_TYPE_AUDIO) >= 0) {
|
||||
audio_stream = fmt_ctx->streams[audio_stream_idx];
|
||||
audio_dec_ctx = audio_stream->codec;
|
||||
audio_dst_file = fopen(audio_dst_filename, "wb");
|
||||
if (!audio_dst_file) {
|
||||
fprintf(stderr, "Could not open destination file %s\n", audio_dst_filename);
|
||||
ret = 1;
|
||||
goto end;
|
||||
}
|
||||
}
|
||||
|
||||
/* dump input information to stderr */
|
||||
av_dump_format(fmt_ctx, 0, src_filename, 0);
|
||||
|
||||
if (!audio_stream && !video_stream) {
|
||||
fprintf(stderr, "Could not find audio or video stream in the input, aborting\n");
|
||||
ret = 1;
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* When using the new API, you need to use the libavutil/frame.h API, while
|
||||
* the classic frame management is available in libavcodec */
|
||||
if (api_mode == API_MODE_OLD)
|
||||
frame = avcodec_alloc_frame();
|
||||
else
|
||||
frame = av_frame_alloc();
|
||||
if (!frame) {
|
||||
fprintf(stderr, "Could not allocate frame\n");
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* initialize packet, set data to NULL, let the demuxer fill it */
|
||||
av_init_packet(&pkt);
|
||||
pkt.data = NULL;
|
||||
pkt.size = 0;
|
||||
|
||||
if (video_stream)
|
||||
printf("Demuxing video from file '%s' into '%s'\n", src_filename, video_dst_filename);
|
||||
if (audio_stream)
|
||||
printf("Demuxing audio from file '%s' into '%s'\n", src_filename, audio_dst_filename);
|
||||
|
||||
/* read frames from the file */
|
||||
while (av_read_frame(fmt_ctx, &pkt) >= 0) {
|
||||
AVPacket orig_pkt = pkt;
|
||||
do {
|
||||
ret = decode_packet(&got_frame, 0);
|
||||
if (ret < 0)
|
||||
break;
|
||||
pkt.data += ret;
|
||||
pkt.size -= ret;
|
||||
} while (pkt.size > 0);
|
||||
av_free_packet(&orig_pkt);
|
||||
}
|
||||
|
||||
/* flush cached frames */
|
||||
pkt.data = NULL;
|
||||
pkt.size = 0;
|
||||
do {
|
||||
decode_packet(&got_frame, 1);
|
||||
} while (got_frame);
|
||||
|
||||
printf("Demuxing succeeded.\n");
|
||||
|
||||
if (video_stream) {
|
||||
printf("Play the output video file with the command:\n"
|
||||
"ffplay -f rawvideo -pix_fmt %s -video_size %dx%d %s\n",
|
||||
av_get_pix_fmt_name(video_dec_ctx->pix_fmt), video_dec_ctx->width, video_dec_ctx->height,
|
||||
video_dst_filename);
|
||||
}
|
||||
|
||||
if (audio_stream) {
|
||||
enum AVSampleFormat sfmt = audio_dec_ctx->sample_fmt;
|
||||
int n_channels = audio_dec_ctx->channels;
|
||||
const char *fmt;
|
||||
|
||||
if (av_sample_fmt_is_planar(sfmt)) {
|
||||
const char *packed = av_get_sample_fmt_name(sfmt);
|
||||
printf("Warning: the sample format the decoder produced is planar "
|
||||
"(%s). This example will output the first channel only.\n",
|
||||
packed ? packed : "?");
|
||||
sfmt = av_get_packed_sample_fmt(sfmt);
|
||||
n_channels = 1;
|
||||
}
|
||||
|
||||
if ((ret = get_format_from_sample_fmt(&fmt, sfmt)) < 0)
|
||||
goto end;
|
||||
|
||||
printf("Play the output audio file with the command:\n"
|
||||
"ffplay -f %s -ac %d -ar %d %s\n",
|
||||
fmt, n_channels, audio_dec_ctx->sample_rate,
|
||||
audio_dst_filename);
|
||||
}
|
||||
|
||||
end:
|
||||
avcodec_close(video_dec_ctx);
|
||||
avcodec_close(audio_dec_ctx);
|
||||
avformat_close_input(&fmt_ctx);
|
||||
if (video_dst_file)
|
||||
fclose(video_dst_file);
|
||||
if (audio_dst_file)
|
||||
fclose(audio_dst_file);
|
||||
if (api_mode == API_MODE_OLD)
|
||||
avcodec_free_frame(&frame);
|
||||
else
|
||||
av_frame_free(&frame);
|
||||
av_free(video_dst_data[0]);
|
||||
|
||||
return ret < 0;
|
||||
}
|
185
Externals/ffmpeg/dev/doc/examples/extract_mvs.c
vendored
185
Externals/ffmpeg/dev/doc/examples/extract_mvs.c
vendored
@ -1,185 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2012 Stefano Sabatini
|
||||
* Copyright (c) 2014 Clément Bœsch
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <libavutil/motion_vector.h>
|
||||
#include <libavformat/avformat.h>
|
||||
|
||||
static AVFormatContext *fmt_ctx = NULL;
|
||||
static AVCodecContext *video_dec_ctx = NULL;
|
||||
static AVStream *video_stream = NULL;
|
||||
static const char *src_filename = NULL;
|
||||
|
||||
static int video_stream_idx = -1;
|
||||
static AVFrame *frame = NULL;
|
||||
static AVPacket pkt;
|
||||
static int video_frame_count = 0;
|
||||
|
||||
static int decode_packet(int *got_frame, int cached)
|
||||
{
|
||||
int decoded = pkt.size;
|
||||
|
||||
*got_frame = 0;
|
||||
|
||||
if (pkt.stream_index == video_stream_idx) {
|
||||
int ret = avcodec_decode_video2(video_dec_ctx, frame, got_frame, &pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error decoding video frame (%s)\n", av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (*got_frame) {
|
||||
int i;
|
||||
AVFrameSideData *sd;
|
||||
|
||||
video_frame_count++;
|
||||
sd = av_frame_get_side_data(frame, AV_FRAME_DATA_MOTION_VECTORS);
|
||||
if (sd) {
|
||||
const AVMotionVector *mvs = (const AVMotionVector *)sd->data;
|
||||
for (i = 0; i < sd->size / sizeof(*mvs); i++) {
|
||||
const AVMotionVector *mv = &mvs[i];
|
||||
printf("%d,%2d,%2d,%2d,%4d,%4d,%4d,%4d,0x%"PRIx64"\n",
|
||||
video_frame_count, mv->source,
|
||||
mv->w, mv->h, mv->src_x, mv->src_y,
|
||||
mv->dst_x, mv->dst_y, mv->flags);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return decoded;
|
||||
}
|
||||
|
||||
static int open_codec_context(int *stream_idx,
|
||||
AVFormatContext *fmt_ctx, enum AVMediaType type)
|
||||
{
|
||||
int ret;
|
||||
AVStream *st;
|
||||
AVCodecContext *dec_ctx = NULL;
|
||||
AVCodec *dec = NULL;
|
||||
AVDictionary *opts = NULL;
|
||||
|
||||
ret = av_find_best_stream(fmt_ctx, type, -1, -1, NULL, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not find %s stream in input file '%s'\n",
|
||||
av_get_media_type_string(type), src_filename);
|
||||
return ret;
|
||||
} else {
|
||||
*stream_idx = ret;
|
||||
st = fmt_ctx->streams[*stream_idx];
|
||||
|
||||
/* find decoder for the stream */
|
||||
dec_ctx = st->codec;
|
||||
dec = avcodec_find_decoder(dec_ctx->codec_id);
|
||||
if (!dec) {
|
||||
fprintf(stderr, "Failed to find %s codec\n",
|
||||
av_get_media_type_string(type));
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
/* Init the video decoder */
|
||||
av_dict_set(&opts, "flags2", "+export_mvs", 0);
|
||||
if ((ret = avcodec_open2(dec_ctx, dec, &opts)) < 0) {
|
||||
fprintf(stderr, "Failed to open %s codec\n",
|
||||
av_get_media_type_string(type));
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int ret = 0, got_frame;
|
||||
|
||||
if (argc != 2) {
|
||||
fprintf(stderr, "Usage: %s <video>\n", argv[0]);
|
||||
exit(1);
|
||||
}
|
||||
src_filename = argv[1];
|
||||
|
||||
av_register_all();
|
||||
|
||||
if (avformat_open_input(&fmt_ctx, src_filename, NULL, NULL) < 0) {
|
||||
fprintf(stderr, "Could not open source file %s\n", src_filename);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {
|
||||
fprintf(stderr, "Could not find stream information\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (open_codec_context(&video_stream_idx, fmt_ctx, AVMEDIA_TYPE_VIDEO) >= 0) {
|
||||
video_stream = fmt_ctx->streams[video_stream_idx];
|
||||
video_dec_ctx = video_stream->codec;
|
||||
}
|
||||
|
||||
av_dump_format(fmt_ctx, 0, src_filename, 0);
|
||||
|
||||
if (!video_stream) {
|
||||
fprintf(stderr, "Could not find video stream in the input, aborting\n");
|
||||
ret = 1;
|
||||
goto end;
|
||||
}
|
||||
|
||||
frame = av_frame_alloc();
|
||||
if (!frame) {
|
||||
fprintf(stderr, "Could not allocate frame\n");
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
printf("framenum,source,blockw,blockh,srcx,srcy,dstx,dsty,flags\n");
|
||||
|
||||
/* initialize packet, set data to NULL, let the demuxer fill it */
|
||||
av_init_packet(&pkt);
|
||||
pkt.data = NULL;
|
||||
pkt.size = 0;
|
||||
|
||||
/* read frames from the file */
|
||||
while (av_read_frame(fmt_ctx, &pkt) >= 0) {
|
||||
AVPacket orig_pkt = pkt;
|
||||
do {
|
||||
ret = decode_packet(&got_frame, 0);
|
||||
if (ret < 0)
|
||||
break;
|
||||
pkt.data += ret;
|
||||
pkt.size -= ret;
|
||||
} while (pkt.size > 0);
|
||||
av_free_packet(&orig_pkt);
|
||||
}
|
||||
|
||||
/* flush cached frames */
|
||||
pkt.data = NULL;
|
||||
pkt.size = 0;
|
||||
do {
|
||||
decode_packet(&got_frame, 1);
|
||||
} while (got_frame);
|
||||
|
||||
end:
|
||||
avcodec_close(video_dec_ctx);
|
||||
avformat_close_input(&fmt_ctx);
|
||||
av_frame_free(&frame);
|
||||
return ret < 0;
|
||||
}
|
365
Externals/ffmpeg/dev/doc/examples/filter_audio.c
vendored
365
Externals/ffmpeg/dev/doc/examples/filter_audio.c
vendored
@ -1,365 +0,0 @@
|
||||
/*
|
||||
* copyright (c) 2013 Andrew Kelley
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* libavfilter API usage example.
|
||||
*
|
||||
* @example filter_audio.c
|
||||
* This example will generate a sine wave audio,
|
||||
* pass it through a simple filter chain, and then compute the MD5 checksum of
|
||||
* the output data.
|
||||
*
|
||||
* The filter chain it uses is:
|
||||
* (input) -> abuffer -> volume -> aformat -> abuffersink -> (output)
|
||||
*
|
||||
* abuffer: This provides the endpoint where you can feed the decoded samples.
|
||||
* volume: In this example we hardcode it to 0.90.
|
||||
* aformat: This converts the samples to the samplefreq, channel layout,
|
||||
* and sample format required by the audio device.
|
||||
* abuffersink: This provides the endpoint where you can read the samples after
|
||||
* they have passed through the filter chain.
|
||||
*/
|
||||
|
||||
#include <inttypes.h>
|
||||
#include <math.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "libavutil/channel_layout.h"
|
||||
#include "libavutil/md5.h"
|
||||
#include "libavutil/mem.h"
|
||||
#include "libavutil/opt.h"
|
||||
#include "libavutil/samplefmt.h"
|
||||
|
||||
#include "libavfilter/avfilter.h"
|
||||
#include "libavfilter/buffersink.h"
|
||||
#include "libavfilter/buffersrc.h"
|
||||
|
||||
#define INPUT_SAMPLERATE 48000
|
||||
#define INPUT_FORMAT AV_SAMPLE_FMT_FLTP
|
||||
#define INPUT_CHANNEL_LAYOUT AV_CH_LAYOUT_5POINT0
|
||||
|
||||
#define VOLUME_VAL 0.90
|
||||
|
||||
static int init_filter_graph(AVFilterGraph **graph, AVFilterContext **src,
|
||||
AVFilterContext **sink)
|
||||
{
|
||||
AVFilterGraph *filter_graph;
|
||||
AVFilterContext *abuffer_ctx;
|
||||
AVFilter *abuffer;
|
||||
AVFilterContext *volume_ctx;
|
||||
AVFilter *volume;
|
||||
AVFilterContext *aformat_ctx;
|
||||
AVFilter *aformat;
|
||||
AVFilterContext *abuffersink_ctx;
|
||||
AVFilter *abuffersink;
|
||||
|
||||
AVDictionary *options_dict = NULL;
|
||||
uint8_t options_str[1024];
|
||||
uint8_t ch_layout[64];
|
||||
|
||||
int err;
|
||||
|
||||
/* Create a new filtergraph, which will contain all the filters. */
|
||||
filter_graph = avfilter_graph_alloc();
|
||||
if (!filter_graph) {
|
||||
fprintf(stderr, "Unable to create filter graph.\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
/* Create the abuffer filter;
|
||||
* it will be used for feeding the data into the graph. */
|
||||
abuffer = avfilter_get_by_name("abuffer");
|
||||
if (!abuffer) {
|
||||
fprintf(stderr, "Could not find the abuffer filter.\n");
|
||||
return AVERROR_FILTER_NOT_FOUND;
|
||||
}
|
||||
|
||||
abuffer_ctx = avfilter_graph_alloc_filter(filter_graph, abuffer, "src");
|
||||
if (!abuffer_ctx) {
|
||||
fprintf(stderr, "Could not allocate the abuffer instance.\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
/* Set the filter options through the AVOptions API. */
|
||||
av_get_channel_layout_string(ch_layout, sizeof(ch_layout), 0, INPUT_CHANNEL_LAYOUT);
|
||||
av_opt_set (abuffer_ctx, "channel_layout", ch_layout, AV_OPT_SEARCH_CHILDREN);
|
||||
av_opt_set (abuffer_ctx, "sample_fmt", av_get_sample_fmt_name(INPUT_FORMAT), AV_OPT_SEARCH_CHILDREN);
|
||||
av_opt_set_q (abuffer_ctx, "time_base", (AVRational){ 1, INPUT_SAMPLERATE }, AV_OPT_SEARCH_CHILDREN);
|
||||
av_opt_set_int(abuffer_ctx, "sample_rate", INPUT_SAMPLERATE, AV_OPT_SEARCH_CHILDREN);
|
||||
|
||||
/* Now initialize the filter; we pass NULL options, since we have already
|
||||
* set all the options above. */
|
||||
err = avfilter_init_str(abuffer_ctx, NULL);
|
||||
if (err < 0) {
|
||||
fprintf(stderr, "Could not initialize the abuffer filter.\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Create volume filter. */
|
||||
volume = avfilter_get_by_name("volume");
|
||||
if (!volume) {
|
||||
fprintf(stderr, "Could not find the volume filter.\n");
|
||||
return AVERROR_FILTER_NOT_FOUND;
|
||||
}
|
||||
|
||||
volume_ctx = avfilter_graph_alloc_filter(filter_graph, volume, "volume");
|
||||
if (!volume_ctx) {
|
||||
fprintf(stderr, "Could not allocate the volume instance.\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
/* A different way of passing the options is as key/value pairs in a
|
||||
* dictionary. */
|
||||
av_dict_set(&options_dict, "volume", AV_STRINGIFY(VOLUME_VAL), 0);
|
||||
err = avfilter_init_dict(volume_ctx, &options_dict);
|
||||
av_dict_free(&options_dict);
|
||||
if (err < 0) {
|
||||
fprintf(stderr, "Could not initialize the volume filter.\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Create the aformat filter;
|
||||
* it ensures that the output is of the format we want. */
|
||||
aformat = avfilter_get_by_name("aformat");
|
||||
if (!aformat) {
|
||||
fprintf(stderr, "Could not find the aformat filter.\n");
|
||||
return AVERROR_FILTER_NOT_FOUND;
|
||||
}
|
||||
|
||||
aformat_ctx = avfilter_graph_alloc_filter(filter_graph, aformat, "aformat");
|
||||
if (!aformat_ctx) {
|
||||
fprintf(stderr, "Could not allocate the aformat instance.\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
/* A third way of passing the options is in a string of the form
|
||||
* key1=value1:key2=value2.... */
|
||||
snprintf(options_str, sizeof(options_str),
|
||||
"sample_fmts=%s:sample_rates=%d:channel_layouts=0x%"PRIx64,
|
||||
av_get_sample_fmt_name(AV_SAMPLE_FMT_S16), 44100,
|
||||
(uint64_t)AV_CH_LAYOUT_STEREO);
|
||||
err = avfilter_init_str(aformat_ctx, options_str);
|
||||
if (err < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Could not initialize the aformat filter.\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Finally create the abuffersink filter;
|
||||
* it will be used to get the filtered data out of the graph. */
|
||||
abuffersink = avfilter_get_by_name("abuffersink");
|
||||
if (!abuffersink) {
|
||||
fprintf(stderr, "Could not find the abuffersink filter.\n");
|
||||
return AVERROR_FILTER_NOT_FOUND;
|
||||
}
|
||||
|
||||
abuffersink_ctx = avfilter_graph_alloc_filter(filter_graph, abuffersink, "sink");
|
||||
if (!abuffersink_ctx) {
|
||||
fprintf(stderr, "Could not allocate the abuffersink instance.\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
/* This filter takes no options. */
|
||||
err = avfilter_init_str(abuffersink_ctx, NULL);
|
||||
if (err < 0) {
|
||||
fprintf(stderr, "Could not initialize the abuffersink instance.\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Connect the filters;
|
||||
* in this simple case the filters just form a linear chain. */
|
||||
err = avfilter_link(abuffer_ctx, 0, volume_ctx, 0);
|
||||
if (err >= 0)
|
||||
err = avfilter_link(volume_ctx, 0, aformat_ctx, 0);
|
||||
if (err >= 0)
|
||||
err = avfilter_link(aformat_ctx, 0, abuffersink_ctx, 0);
|
||||
if (err < 0) {
|
||||
fprintf(stderr, "Error connecting filters\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Configure the graph. */
|
||||
err = avfilter_graph_config(filter_graph, NULL);
|
||||
if (err < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error configuring the filter graph\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
*graph = filter_graph;
|
||||
*src = abuffer_ctx;
|
||||
*sink = abuffersink_ctx;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Do something useful with the filtered data: this simple
|
||||
* example just prints the MD5 checksum of each plane to stdout. */
|
||||
static int process_output(struct AVMD5 *md5, AVFrame *frame)
|
||||
{
|
||||
int planar = av_sample_fmt_is_planar(frame->format);
|
||||
int channels = av_get_channel_layout_nb_channels(frame->channel_layout);
|
||||
int planes = planar ? channels : 1;
|
||||
int bps = av_get_bytes_per_sample(frame->format);
|
||||
int plane_size = bps * frame->nb_samples * (planar ? 1 : channels);
|
||||
int i, j;
|
||||
|
||||
for (i = 0; i < planes; i++) {
|
||||
uint8_t checksum[16];
|
||||
|
||||
av_md5_init(md5);
|
||||
av_md5_sum(checksum, frame->extended_data[i], plane_size);
|
||||
|
||||
fprintf(stdout, "plane %d: 0x", i);
|
||||
for (j = 0; j < sizeof(checksum); j++)
|
||||
fprintf(stdout, "%02X", checksum[j]);
|
||||
fprintf(stdout, "\n");
|
||||
}
|
||||
fprintf(stdout, "\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Construct a frame of audio data to be filtered;
|
||||
* this simple example just synthesizes a sine wave. */
|
||||
static int get_input(AVFrame *frame, int frame_num)
|
||||
{
|
||||
int err, i, j;
|
||||
|
||||
#define FRAME_SIZE 1024
|
||||
|
||||
/* Set up the frame properties and allocate the buffer for the data. */
|
||||
frame->sample_rate = INPUT_SAMPLERATE;
|
||||
frame->format = INPUT_FORMAT;
|
||||
frame->channel_layout = INPUT_CHANNEL_LAYOUT;
|
||||
frame->nb_samples = FRAME_SIZE;
|
||||
frame->pts = frame_num * FRAME_SIZE;
|
||||
|
||||
err = av_frame_get_buffer(frame, 0);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
/* Fill the data for each channel. */
|
||||
for (i = 0; i < 5; i++) {
|
||||
float *data = (float*)frame->extended_data[i];
|
||||
|
||||
for (j = 0; j < frame->nb_samples; j++)
|
||||
data[j] = sin(2 * M_PI * (frame_num + j) * (i + 1) / FRAME_SIZE);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
struct AVMD5 *md5;
|
||||
AVFilterGraph *graph;
|
||||
AVFilterContext *src, *sink;
|
||||
AVFrame *frame;
|
||||
uint8_t errstr[1024];
|
||||
float duration;
|
||||
int err, nb_frames, i;
|
||||
|
||||
if (argc < 2) {
|
||||
fprintf(stderr, "Usage: %s <duration>\n", argv[0]);
|
||||
return 1;
|
||||
}
|
||||
|
||||
duration = atof(argv[1]);
|
||||
nb_frames = duration * INPUT_SAMPLERATE / FRAME_SIZE;
|
||||
if (nb_frames <= 0) {
|
||||
fprintf(stderr, "Invalid duration: %s\n", argv[1]);
|
||||
return 1;
|
||||
}
|
||||
|
||||
avfilter_register_all();
|
||||
|
||||
/* Allocate the frame we will be using to store the data. */
|
||||
frame = av_frame_alloc();
|
||||
if (!frame) {
|
||||
fprintf(stderr, "Error allocating the frame\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
md5 = av_md5_alloc();
|
||||
if (!md5) {
|
||||
fprintf(stderr, "Error allocating the MD5 context\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Set up the filtergraph. */
|
||||
err = init_filter_graph(&graph, &src, &sink);
|
||||
if (err < 0) {
|
||||
fprintf(stderr, "Unable to init filter graph:");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* the main filtering loop */
|
||||
for (i = 0; i < nb_frames; i++) {
|
||||
/* get an input frame to be filtered */
|
||||
err = get_input(frame, i);
|
||||
if (err < 0) {
|
||||
fprintf(stderr, "Error generating input frame:");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* Send the frame to the input of the filtergraph. */
|
||||
err = av_buffersrc_add_frame(src, frame);
|
||||
if (err < 0) {
|
||||
av_frame_unref(frame);
|
||||
fprintf(stderr, "Error submitting the frame to the filtergraph:");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* Get all the filtered output that is available. */
|
||||
while ((err = av_buffersink_get_frame(sink, frame)) >= 0) {
|
||||
/* now do something with our filtered frame */
|
||||
err = process_output(md5, frame);
|
||||
if (err < 0) {
|
||||
fprintf(stderr, "Error processing the filtered frame:");
|
||||
goto fail;
|
||||
}
|
||||
av_frame_unref(frame);
|
||||
}
|
||||
|
||||
if (err == AVERROR(EAGAIN)) {
|
||||
/* Need to feed more frames in. */
|
||||
continue;
|
||||
} else if (err == AVERROR_EOF) {
|
||||
/* Nothing more to do, finish. */
|
||||
break;
|
||||
} else if (err < 0) {
|
||||
/* An error occurred. */
|
||||
fprintf(stderr, "Error filtering the data:");
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
avfilter_graph_free(&graph);
|
||||
av_frame_free(&frame);
|
||||
av_freep(&md5);
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
av_strerror(err, errstr, sizeof(errstr));
|
||||
fprintf(stderr, "%s\n", errstr);
|
||||
return 1;
|
||||
}
|
280
Externals/ffmpeg/dev/doc/examples/filtering_audio.c
vendored
280
Externals/ffmpeg/dev/doc/examples/filtering_audio.c
vendored
@ -1,280 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2010 Nicolas George
|
||||
* Copyright (c) 2011 Stefano Sabatini
|
||||
* Copyright (c) 2012 Clément Bœsch
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* API example for audio decoding and filtering
|
||||
* @example filtering_audio.c
|
||||
*/
|
||||
|
||||
#include <unistd.h>
|
||||
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavfilter/avfiltergraph.h>
|
||||
#include <libavfilter/avcodec.h>
|
||||
#include <libavfilter/buffersink.h>
|
||||
#include <libavfilter/buffersrc.h>
|
||||
#include <libavutil/opt.h>
|
||||
|
||||
static const char *filter_descr = "aresample=8000,aformat=sample_fmts=s16:channel_layouts=mono";
|
||||
static const char *player = "ffplay -f s16le -ar 8000 -ac 1 -";
|
||||
|
||||
static AVFormatContext *fmt_ctx;
|
||||
static AVCodecContext *dec_ctx;
|
||||
AVFilterContext *buffersink_ctx;
|
||||
AVFilterContext *buffersrc_ctx;
|
||||
AVFilterGraph *filter_graph;
|
||||
static int audio_stream_index = -1;
|
||||
|
||||
static int open_input_file(const char *filename)
|
||||
{
|
||||
int ret;
|
||||
AVCodec *dec;
|
||||
|
||||
if ((ret = avformat_open_input(&fmt_ctx, filename, NULL, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
if ((ret = avformat_find_stream_info(fmt_ctx, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* select the audio stream */
|
||||
ret = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_AUDIO, -1, -1, &dec, 0);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot find a audio stream in the input file\n");
|
||||
return ret;
|
||||
}
|
||||
audio_stream_index = ret;
|
||||
dec_ctx = fmt_ctx->streams[audio_stream_index]->codec;
|
||||
av_opt_set_int(dec_ctx, "refcounted_frames", 1, 0);
|
||||
|
||||
/* init the audio decoder */
|
||||
if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot open audio decoder\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int init_filters(const char *filters_descr)
|
||||
{
|
||||
char args[512];
|
||||
int ret = 0;
|
||||
AVFilter *abuffersrc = avfilter_get_by_name("abuffer");
|
||||
AVFilter *abuffersink = avfilter_get_by_name("abuffersink");
|
||||
AVFilterInOut *outputs = avfilter_inout_alloc();
|
||||
AVFilterInOut *inputs = avfilter_inout_alloc();
|
||||
static const enum AVSampleFormat out_sample_fmts[] = { AV_SAMPLE_FMT_S16, -1 };
|
||||
static const int64_t out_channel_layouts[] = { AV_CH_LAYOUT_MONO, -1 };
|
||||
static const int out_sample_rates[] = { 8000, -1 };
|
||||
const AVFilterLink *outlink;
|
||||
AVRational time_base = fmt_ctx->streams[audio_stream_index]->time_base;
|
||||
|
||||
filter_graph = avfilter_graph_alloc();
|
||||
if (!outputs || !inputs || !filter_graph) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* buffer audio source: the decoded frames from the decoder will be inserted here. */
|
||||
if (!dec_ctx->channel_layout)
|
||||
dec_ctx->channel_layout = av_get_default_channel_layout(dec_ctx->channels);
|
||||
snprintf(args, sizeof(args),
|
||||
"time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%"PRIx64,
|
||||
time_base.num, time_base.den, dec_ctx->sample_rate,
|
||||
av_get_sample_fmt_name(dec_ctx->sample_fmt), dec_ctx->channel_layout);
|
||||
ret = avfilter_graph_create_filter(&buffersrc_ctx, abuffersrc, "in",
|
||||
args, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer source\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* buffer audio sink: to terminate the filter chain. */
|
||||
ret = avfilter_graph_create_filter(&buffersink_ctx, abuffersink, "out",
|
||||
NULL, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = av_opt_set_int_list(buffersink_ctx, "sample_fmts", out_sample_fmts, -1,
|
||||
AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output sample format\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = av_opt_set_int_list(buffersink_ctx, "channel_layouts", out_channel_layouts, -1,
|
||||
AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = av_opt_set_int_list(buffersink_ctx, "sample_rates", out_sample_rates, -1,
|
||||
AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output sample rate\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* Endpoints for the filter graph. */
|
||||
outputs->name = av_strdup("in");
|
||||
outputs->filter_ctx = buffersrc_ctx;
|
||||
outputs->pad_idx = 0;
|
||||
outputs->next = NULL;
|
||||
|
||||
inputs->name = av_strdup("out");
|
||||
inputs->filter_ctx = buffersink_ctx;
|
||||
inputs->pad_idx = 0;
|
||||
inputs->next = NULL;
|
||||
|
||||
if ((ret = avfilter_graph_parse_ptr(filter_graph, filters_descr,
|
||||
&inputs, &outputs, NULL)) < 0)
|
||||
goto end;
|
||||
|
||||
if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
|
||||
goto end;
|
||||
|
||||
/* Print summary of the sink buffer
|
||||
* Note: args buffer is reused to store channel layout string */
|
||||
outlink = buffersink_ctx->inputs[0];
|
||||
av_get_channel_layout_string(args, sizeof(args), -1, outlink->channel_layout);
|
||||
av_log(NULL, AV_LOG_INFO, "Output: srate:%dHz fmt:%s chlayout:%s\n",
|
||||
(int)outlink->sample_rate,
|
||||
(char *)av_x_if_null(av_get_sample_fmt_name(outlink->format), "?"),
|
||||
args);
|
||||
|
||||
end:
|
||||
avfilter_inout_free(&inputs);
|
||||
avfilter_inout_free(&outputs);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void print_frame(const AVFrame *frame)
|
||||
{
|
||||
const int n = frame->nb_samples * av_get_channel_layout_nb_channels(av_frame_get_channel_layout(frame));
|
||||
const uint16_t *p = (uint16_t*)frame->data[0];
|
||||
const uint16_t *p_end = p + n;
|
||||
|
||||
while (p < p_end) {
|
||||
fputc(*p & 0xff, stdout);
|
||||
fputc(*p>>8 & 0xff, stdout);
|
||||
p++;
|
||||
}
|
||||
fflush(stdout);
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int ret;
|
||||
AVPacket packet0, packet;
|
||||
AVFrame *frame = av_frame_alloc();
|
||||
AVFrame *filt_frame = av_frame_alloc();
|
||||
int got_frame;
|
||||
|
||||
if (!frame || !filt_frame) {
|
||||
perror("Could not allocate frame");
|
||||
exit(1);
|
||||
}
|
||||
if (argc != 2) {
|
||||
fprintf(stderr, "Usage: %s file | %s\n", argv[0], player);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
av_register_all();
|
||||
avfilter_register_all();
|
||||
|
||||
if ((ret = open_input_file(argv[1])) < 0)
|
||||
goto end;
|
||||
if ((ret = init_filters(filter_descr)) < 0)
|
||||
goto end;
|
||||
|
||||
/* read all packets */
|
||||
packet0.data = NULL;
|
||||
packet.data = NULL;
|
||||
while (1) {
|
||||
if (!packet0.data) {
|
||||
if ((ret = av_read_frame(fmt_ctx, &packet)) < 0)
|
||||
break;
|
||||
packet0 = packet;
|
||||
}
|
||||
|
||||
if (packet.stream_index == audio_stream_index) {
|
||||
got_frame = 0;
|
||||
ret = avcodec_decode_audio4(dec_ctx, frame, &got_frame, &packet);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error decoding audio\n");
|
||||
continue;
|
||||
}
|
||||
packet.size -= ret;
|
||||
packet.data += ret;
|
||||
|
||||
if (got_frame) {
|
||||
/* push the audio data from decoded frame into the filtergraph */
|
||||
if (av_buffersrc_add_frame_flags(buffersrc_ctx, frame, 0) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error while feeding the audio filtergraph\n");
|
||||
break;
|
||||
}
|
||||
|
||||
/* pull filtered audio from the filtergraph */
|
||||
while (1) {
|
||||
ret = av_buffersink_get_frame(buffersink_ctx, filt_frame);
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
|
||||
break;
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
print_frame(filt_frame);
|
||||
av_frame_unref(filt_frame);
|
||||
}
|
||||
}
|
||||
|
||||
if (packet.size <= 0)
|
||||
av_free_packet(&packet0);
|
||||
} else {
|
||||
/* discard non-wanted packets */
|
||||
av_free_packet(&packet0);
|
||||
}
|
||||
}
|
||||
end:
|
||||
avfilter_graph_free(&filter_graph);
|
||||
avcodec_close(dec_ctx);
|
||||
avformat_close_input(&fmt_ctx);
|
||||
av_frame_free(&frame);
|
||||
av_frame_free(&filt_frame);
|
||||
|
||||
if (ret < 0 && ret != AVERROR_EOF) {
|
||||
fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
exit(0);
|
||||
}
|
262
Externals/ffmpeg/dev/doc/examples/filtering_video.c
vendored
262
Externals/ffmpeg/dev/doc/examples/filtering_video.c
vendored
@ -1,262 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2010 Nicolas George
|
||||
* Copyright (c) 2011 Stefano Sabatini
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* API example for decoding and filtering
|
||||
* @example filtering_video.c
|
||||
*/
|
||||
|
||||
#define _XOPEN_SOURCE 600 /* for usleep */
|
||||
#include <unistd.h>
|
||||
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavfilter/avfiltergraph.h>
|
||||
#include <libavfilter/avcodec.h>
|
||||
#include <libavfilter/buffersink.h>
|
||||
#include <libavfilter/buffersrc.h>
|
||||
#include <libavutil/opt.h>
|
||||
|
||||
const char *filter_descr = "scale=78:24";
|
||||
|
||||
static AVFormatContext *fmt_ctx;
|
||||
static AVCodecContext *dec_ctx;
|
||||
AVFilterContext *buffersink_ctx;
|
||||
AVFilterContext *buffersrc_ctx;
|
||||
AVFilterGraph *filter_graph;
|
||||
static int video_stream_index = -1;
|
||||
static int64_t last_pts = AV_NOPTS_VALUE;
|
||||
|
||||
static int open_input_file(const char *filename)
|
||||
{
|
||||
int ret;
|
||||
AVCodec *dec;
|
||||
|
||||
if ((ret = avformat_open_input(&fmt_ctx, filename, NULL, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
if ((ret = avformat_find_stream_info(fmt_ctx, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* select the video stream */
|
||||
ret = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, &dec, 0);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot find a video stream in the input file\n");
|
||||
return ret;
|
||||
}
|
||||
video_stream_index = ret;
|
||||
dec_ctx = fmt_ctx->streams[video_stream_index]->codec;
|
||||
av_opt_set_int(dec_ctx, "refcounted_frames", 1, 0);
|
||||
|
||||
/* init the video decoder */
|
||||
if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot open video decoder\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int init_filters(const char *filters_descr)
|
||||
{
|
||||
char args[512];
|
||||
int ret = 0;
|
||||
AVFilter *buffersrc = avfilter_get_by_name("buffer");
|
||||
AVFilter *buffersink = avfilter_get_by_name("buffersink");
|
||||
AVFilterInOut *outputs = avfilter_inout_alloc();
|
||||
AVFilterInOut *inputs = avfilter_inout_alloc();
|
||||
AVRational time_base = fmt_ctx->streams[video_stream_index]->time_base;
|
||||
enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE };
|
||||
|
||||
filter_graph = avfilter_graph_alloc();
|
||||
if (!outputs || !inputs || !filter_graph) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* buffer video source: the decoded frames from the decoder will be inserted here. */
|
||||
snprintf(args, sizeof(args),
|
||||
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
|
||||
dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
|
||||
time_base.num, time_base.den,
|
||||
dec_ctx->sample_aspect_ratio.num, dec_ctx->sample_aspect_ratio.den);
|
||||
|
||||
ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
|
||||
args, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* buffer video sink: to terminate the filter chain. */
|
||||
ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
|
||||
NULL, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = av_opt_set_int_list(buffersink_ctx, "pix_fmts", pix_fmts,
|
||||
AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* Endpoints for the filter graph. */
|
||||
outputs->name = av_strdup("in");
|
||||
outputs->filter_ctx = buffersrc_ctx;
|
||||
outputs->pad_idx = 0;
|
||||
outputs->next = NULL;
|
||||
|
||||
inputs->name = av_strdup("out");
|
||||
inputs->filter_ctx = buffersink_ctx;
|
||||
inputs->pad_idx = 0;
|
||||
inputs->next = NULL;
|
||||
|
||||
if ((ret = avfilter_graph_parse_ptr(filter_graph, filters_descr,
|
||||
&inputs, &outputs, NULL)) < 0)
|
||||
goto end;
|
||||
|
||||
if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
|
||||
goto end;
|
||||
|
||||
end:
|
||||
avfilter_inout_free(&inputs);
|
||||
avfilter_inout_free(&outputs);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void display_frame(const AVFrame *frame, AVRational time_base)
|
||||
{
|
||||
int x, y;
|
||||
uint8_t *p0, *p;
|
||||
int64_t delay;
|
||||
|
||||
if (frame->pts != AV_NOPTS_VALUE) {
|
||||
if (last_pts != AV_NOPTS_VALUE) {
|
||||
/* sleep roughly the right amount of time;
|
||||
* usleep is in microseconds, just like AV_TIME_BASE. */
|
||||
delay = av_rescale_q(frame->pts - last_pts,
|
||||
time_base, AV_TIME_BASE_Q);
|
||||
if (delay > 0 && delay < 1000000)
|
||||
usleep(delay);
|
||||
}
|
||||
last_pts = frame->pts;
|
||||
}
|
||||
|
||||
/* Trivial ASCII grayscale display. */
|
||||
p0 = frame->data[0];
|
||||
puts("\033c");
|
||||
for (y = 0; y < frame->height; y++) {
|
||||
p = p0;
|
||||
for (x = 0; x < frame->width; x++)
|
||||
putchar(" .-+#"[*(p++) / 52]);
|
||||
putchar('\n');
|
||||
p0 += frame->linesize[0];
|
||||
}
|
||||
fflush(stdout);
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int ret;
|
||||
AVPacket packet;
|
||||
AVFrame *frame = av_frame_alloc();
|
||||
AVFrame *filt_frame = av_frame_alloc();
|
||||
int got_frame;
|
||||
|
||||
if (!frame || !filt_frame) {
|
||||
perror("Could not allocate frame");
|
||||
exit(1);
|
||||
}
|
||||
if (argc != 2) {
|
||||
fprintf(stderr, "Usage: %s file\n", argv[0]);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
av_register_all();
|
||||
avfilter_register_all();
|
||||
|
||||
if ((ret = open_input_file(argv[1])) < 0)
|
||||
goto end;
|
||||
if ((ret = init_filters(filter_descr)) < 0)
|
||||
goto end;
|
||||
|
||||
/* read all packets */
|
||||
while (1) {
|
||||
if ((ret = av_read_frame(fmt_ctx, &packet)) < 0)
|
||||
break;
|
||||
|
||||
if (packet.stream_index == video_stream_index) {
|
||||
got_frame = 0;
|
||||
ret = avcodec_decode_video2(dec_ctx, frame, &got_frame, &packet);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error decoding video\n");
|
||||
break;
|
||||
}
|
||||
|
||||
if (got_frame) {
|
||||
frame->pts = av_frame_get_best_effort_timestamp(frame);
|
||||
|
||||
/* push the decoded frame into the filtergraph */
|
||||
if (av_buffersrc_add_frame_flags(buffersrc_ctx, frame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
|
||||
break;
|
||||
}
|
||||
|
||||
/* pull filtered frames from the filtergraph */
|
||||
while (1) {
|
||||
ret = av_buffersink_get_frame(buffersink_ctx, filt_frame);
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
|
||||
break;
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
display_frame(filt_frame, buffersink_ctx->inputs[0]->time_base);
|
||||
av_frame_unref(filt_frame);
|
||||
}
|
||||
av_frame_unref(frame);
|
||||
}
|
||||
}
|
||||
av_free_packet(&packet);
|
||||
}
|
||||
end:
|
||||
avfilter_graph_free(&filter_graph);
|
||||
avcodec_close(dec_ctx);
|
||||
avformat_close_input(&fmt_ctx);
|
||||
av_frame_free(&frame);
|
||||
av_frame_free(&filt_frame);
|
||||
|
||||
if (ret < 0 && ret != AVERROR_EOF) {
|
||||
fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
exit(0);
|
||||
}
|
56
Externals/ffmpeg/dev/doc/examples/metadata.c
vendored
56
Externals/ffmpeg/dev/doc/examples/metadata.c
vendored
@ -1,56 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2011 Reinhard Tartler
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* Shows how the metadata API can be used in application programs.
|
||||
* @example metadata.c
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavutil/dict.h>
|
||||
|
||||
int main (int argc, char **argv)
|
||||
{
|
||||
AVFormatContext *fmt_ctx = NULL;
|
||||
AVDictionaryEntry *tag = NULL;
|
||||
int ret;
|
||||
|
||||
if (argc != 2) {
|
||||
printf("usage: %s <input_file>\n"
|
||||
"example program to demonstrate the use of the libavformat metadata API.\n"
|
||||
"\n", argv[0]);
|
||||
return 1;
|
||||
}
|
||||
|
||||
av_register_all();
|
||||
if ((ret = avformat_open_input(&fmt_ctx, argv[1], NULL, NULL)))
|
||||
return ret;
|
||||
|
||||
while ((tag = av_dict_get(fmt_ctx->metadata, "", tag, AV_DICT_IGNORE_SUFFIX)))
|
||||
printf("%s=%s\n", tag->key, tag->value);
|
||||
|
||||
avformat_close_input(&fmt_ctx);
|
||||
return 0;
|
||||
}
|
670
Externals/ffmpeg/dev/doc/examples/muxing.c
vendored
670
Externals/ffmpeg/dev/doc/examples/muxing.c
vendored
@ -1,670 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2003 Fabrice Bellard
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* libavformat API example.
|
||||
*
|
||||
* Output a media file in any supported libavformat format. The default
|
||||
* codecs are used.
|
||||
* @example muxing.c
|
||||
*/
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <math.h>
|
||||
|
||||
#include <libavutil/avassert.h>
|
||||
#include <libavutil/channel_layout.h>
|
||||
#include <libavutil/opt.h>
|
||||
#include <libavutil/mathematics.h>
|
||||
#include <libavutil/timestamp.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libswscale/swscale.h>
|
||||
#include <libswresample/swresample.h>
|
||||
|
||||
#define STREAM_DURATION 10.0
|
||||
#define STREAM_FRAME_RATE 25 /* 25 images/s */
|
||||
#define STREAM_PIX_FMT AV_PIX_FMT_YUV420P /* default pix_fmt */
|
||||
|
||||
#define SCALE_FLAGS SWS_BICUBIC
|
||||
|
||||
// a wrapper around a single output AVStream
|
||||
typedef struct OutputStream {
|
||||
AVStream *st;
|
||||
|
||||
/* pts of the next frame that will be generated */
|
||||
int64_t next_pts;
|
||||
int samples_count;
|
||||
|
||||
AVFrame *frame;
|
||||
AVFrame *tmp_frame;
|
||||
|
||||
float t, tincr, tincr2;
|
||||
|
||||
struct SwsContext *sws_ctx;
|
||||
struct SwrContext *swr_ctx;
|
||||
} OutputStream;
|
||||
|
||||
static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt)
|
||||
{
|
||||
AVRational *time_base = &fmt_ctx->streams[pkt->stream_index]->time_base;
|
||||
|
||||
printf("pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",
|
||||
av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, time_base),
|
||||
av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, time_base),
|
||||
av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, time_base),
|
||||
pkt->stream_index);
|
||||
}
|
||||
|
||||
static int write_frame(AVFormatContext *fmt_ctx, const AVRational *time_base, AVStream *st, AVPacket *pkt)
|
||||
{
|
||||
/* rescale output packet timestamp values from codec to stream timebase */
|
||||
av_packet_rescale_ts(pkt, *time_base, st->time_base);
|
||||
pkt->stream_index = st->index;
|
||||
|
||||
/* Write the compressed frame to the media file. */
|
||||
log_packet(fmt_ctx, pkt);
|
||||
return av_interleaved_write_frame(fmt_ctx, pkt);
|
||||
}
|
||||
|
||||
/* Add an output stream. */
|
||||
static void add_stream(OutputStream *ost, AVFormatContext *oc,
|
||||
AVCodec **codec,
|
||||
enum AVCodecID codec_id)
|
||||
{
|
||||
AVCodecContext *c;
|
||||
int i;
|
||||
|
||||
/* find the encoder */
|
||||
*codec = avcodec_find_encoder(codec_id);
|
||||
if (!(*codec)) {
|
||||
fprintf(stderr, "Could not find encoder for '%s'\n",
|
||||
avcodec_get_name(codec_id));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
ost->st = avformat_new_stream(oc, *codec);
|
||||
if (!ost->st) {
|
||||
fprintf(stderr, "Could not allocate stream\n");
|
||||
exit(1);
|
||||
}
|
||||
ost->st->id = oc->nb_streams-1;
|
||||
c = ost->st->codec;
|
||||
|
||||
switch ((*codec)->type) {
|
||||
case AVMEDIA_TYPE_AUDIO:
|
||||
c->sample_fmt = (*codec)->sample_fmts ?
|
||||
(*codec)->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;
|
||||
c->bit_rate = 64000;
|
||||
c->sample_rate = 44100;
|
||||
if ((*codec)->supported_samplerates) {
|
||||
c->sample_rate = (*codec)->supported_samplerates[0];
|
||||
for (i = 0; (*codec)->supported_samplerates[i]; i++) {
|
||||
if ((*codec)->supported_samplerates[i] == 44100)
|
||||
c->sample_rate = 44100;
|
||||
}
|
||||
}
|
||||
c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
|
||||
c->channel_layout = AV_CH_LAYOUT_STEREO;
|
||||
if ((*codec)->channel_layouts) {
|
||||
c->channel_layout = (*codec)->channel_layouts[0];
|
||||
for (i = 0; (*codec)->channel_layouts[i]; i++) {
|
||||
if ((*codec)->channel_layouts[i] == AV_CH_LAYOUT_STEREO)
|
||||
c->channel_layout = AV_CH_LAYOUT_STEREO;
|
||||
}
|
||||
}
|
||||
c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
|
||||
ost->st->time_base = (AVRational){ 1, c->sample_rate };
|
||||
break;
|
||||
|
||||
case AVMEDIA_TYPE_VIDEO:
|
||||
c->codec_id = codec_id;
|
||||
|
||||
c->bit_rate = 400000;
|
||||
/* Resolution must be a multiple of two. */
|
||||
c->width = 352;
|
||||
c->height = 288;
|
||||
/* timebase: This is the fundamental unit of time (in seconds) in terms
|
||||
* of which frame timestamps are represented. For fixed-fps content,
|
||||
* timebase should be 1/framerate and timestamp increments should be
|
||||
* identical to 1. */
|
||||
ost->st->time_base = (AVRational){ 1, STREAM_FRAME_RATE };
|
||||
c->time_base = ost->st->time_base;
|
||||
|
||||
c->gop_size = 12; /* emit one intra frame every twelve frames at most */
|
||||
c->pix_fmt = STREAM_PIX_FMT;
|
||||
if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
|
||||
/* just for testing, we also add B frames */
|
||||
c->max_b_frames = 2;
|
||||
}
|
||||
if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
|
||||
/* Needed to avoid using macroblocks in which some coeffs overflow.
|
||||
* This does not happen with normal video, it just happens here as
|
||||
* the motion of the chroma plane does not match the luma plane. */
|
||||
c->mb_decision = 2;
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
/* Some formats want stream headers to be separate. */
|
||||
if (oc->oformat->flags & AVFMT_GLOBALHEADER)
|
||||
c->flags |= CODEC_FLAG_GLOBAL_HEADER;
|
||||
}
|
||||
|
||||
/**************************************************************/
|
||||
/* audio output */
|
||||
|
||||
static AVFrame *alloc_audio_frame(enum AVSampleFormat sample_fmt,
|
||||
uint64_t channel_layout,
|
||||
int sample_rate, int nb_samples)
|
||||
{
|
||||
AVFrame *frame = av_frame_alloc();
|
||||
int ret;
|
||||
|
||||
if (!frame) {
|
||||
fprintf(stderr, "Error allocating an audio frame\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
frame->format = sample_fmt;
|
||||
frame->channel_layout = channel_layout;
|
||||
frame->sample_rate = sample_rate;
|
||||
frame->nb_samples = nb_samples;
|
||||
|
||||
if (nb_samples) {
|
||||
ret = av_frame_get_buffer(frame, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error allocating an audio buffer\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
return frame;
|
||||
}
|
||||
|
||||
static void open_audio(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
|
||||
{
|
||||
AVCodecContext *c;
|
||||
int nb_samples;
|
||||
int ret;
|
||||
AVDictionary *opt = NULL;
|
||||
|
||||
c = ost->st->codec;
|
||||
|
||||
/* open it */
|
||||
av_dict_copy(&opt, opt_arg, 0);
|
||||
ret = avcodec_open2(c, codec, &opt);
|
||||
av_dict_free(&opt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not open audio codec: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* init signal generator */
|
||||
ost->t = 0;
|
||||
ost->tincr = 2 * M_PI * 110.0 / c->sample_rate;
|
||||
/* increment frequency by 110 Hz per second */
|
||||
ost->tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
|
||||
|
||||
if (c->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)
|
||||
nb_samples = 10000;
|
||||
else
|
||||
nb_samples = c->frame_size;
|
||||
|
||||
ost->frame = alloc_audio_frame(c->sample_fmt, c->channel_layout,
|
||||
c->sample_rate, nb_samples);
|
||||
ost->tmp_frame = alloc_audio_frame(AV_SAMPLE_FMT_S16, c->channel_layout,
|
||||
c->sample_rate, nb_samples);
|
||||
|
||||
/* create resampler context */
|
||||
ost->swr_ctx = swr_alloc();
|
||||
if (!ost->swr_ctx) {
|
||||
fprintf(stderr, "Could not allocate resampler context\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* set options */
|
||||
av_opt_set_int (ost->swr_ctx, "in_channel_count", c->channels, 0);
|
||||
av_opt_set_int (ost->swr_ctx, "in_sample_rate", c->sample_rate, 0);
|
||||
av_opt_set_sample_fmt(ost->swr_ctx, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0);
|
||||
av_opt_set_int (ost->swr_ctx, "out_channel_count", c->channels, 0);
|
||||
av_opt_set_int (ost->swr_ctx, "out_sample_rate", c->sample_rate, 0);
|
||||
av_opt_set_sample_fmt(ost->swr_ctx, "out_sample_fmt", c->sample_fmt, 0);
|
||||
|
||||
/* initialize the resampling context */
|
||||
if ((ret = swr_init(ost->swr_ctx)) < 0) {
|
||||
fprintf(stderr, "Failed to initialize the resampling context\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
/* Prepare a 16 bit dummy audio frame of 'frame_size' samples and
|
||||
* 'nb_channels' channels. */
|
||||
static AVFrame *get_audio_frame(OutputStream *ost)
|
||||
{
|
||||
AVFrame *frame = ost->tmp_frame;
|
||||
int j, i, v;
|
||||
int16_t *q = (int16_t*)frame->data[0];
|
||||
|
||||
/* check if we want to generate more frames */
|
||||
if (av_compare_ts(ost->next_pts, ost->st->codec->time_base,
|
||||
STREAM_DURATION, (AVRational){ 1, 1 }) >= 0)
|
||||
return NULL;
|
||||
|
||||
for (j = 0; j <frame->nb_samples; j++) {
|
||||
v = (int)(sin(ost->t) * 10000);
|
||||
for (i = 0; i < ost->st->codec->channels; i++)
|
||||
*q++ = v;
|
||||
ost->t += ost->tincr;
|
||||
ost->tincr += ost->tincr2;
|
||||
}
|
||||
|
||||
frame->pts = ost->next_pts;
|
||||
ost->next_pts += frame->nb_samples;
|
||||
|
||||
return frame;
|
||||
}
|
||||
|
||||
/*
|
||||
* encode one audio frame and send it to the muxer
|
||||
* return 1 when encoding is finished, 0 otherwise
|
||||
*/
|
||||
static int write_audio_frame(AVFormatContext *oc, OutputStream *ost)
|
||||
{
|
||||
AVCodecContext *c;
|
||||
AVPacket pkt = { 0 }; // data and size must be 0;
|
||||
AVFrame *frame;
|
||||
int ret;
|
||||
int got_packet;
|
||||
int dst_nb_samples;
|
||||
|
||||
av_init_packet(&pkt);
|
||||
c = ost->st->codec;
|
||||
|
||||
frame = get_audio_frame(ost);
|
||||
|
||||
if (frame) {
|
||||
/* convert samples from native format to destination codec format, using the resampler */
|
||||
/* compute destination number of samples */
|
||||
dst_nb_samples = av_rescale_rnd(swr_get_delay(ost->swr_ctx, c->sample_rate) + frame->nb_samples,
|
||||
c->sample_rate, c->sample_rate, AV_ROUND_UP);
|
||||
av_assert0(dst_nb_samples == frame->nb_samples);
|
||||
|
||||
/* when we pass a frame to the encoder, it may keep a reference to it
|
||||
* internally;
|
||||
* make sure we do not overwrite it here
|
||||
*/
|
||||
ret = av_frame_make_writable(ost->frame);
|
||||
if (ret < 0)
|
||||
exit(1);
|
||||
|
||||
/* convert to destination format */
|
||||
ret = swr_convert(ost->swr_ctx,
|
||||
ost->frame->data, dst_nb_samples,
|
||||
(const uint8_t **)frame->data, frame->nb_samples);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error while converting\n");
|
||||
exit(1);
|
||||
}
|
||||
frame = ost->frame;
|
||||
|
||||
frame->pts = av_rescale_q(ost->samples_count, (AVRational){1, c->sample_rate}, c->time_base);
|
||||
ost->samples_count += dst_nb_samples;
|
||||
}
|
||||
|
||||
ret = avcodec_encode_audio2(c, &pkt, frame, &got_packet);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error encoding audio frame: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (got_packet) {
|
||||
ret = write_frame(oc, &c->time_base, ost->st, &pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error while writing audio frame: %s\n",
|
||||
av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
return (frame || got_packet) ? 0 : 1;
|
||||
}
|
||||
|
||||
/**************************************************************/
|
||||
/* video output */
|
||||
|
||||
static AVFrame *alloc_picture(enum AVPixelFormat pix_fmt, int width, int height)
|
||||
{
|
||||
AVFrame *picture;
|
||||
int ret;
|
||||
|
||||
picture = av_frame_alloc();
|
||||
if (!picture)
|
||||
return NULL;
|
||||
|
||||
picture->format = pix_fmt;
|
||||
picture->width = width;
|
||||
picture->height = height;
|
||||
|
||||
/* allocate the buffers for the frame data */
|
||||
ret = av_frame_get_buffer(picture, 32);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate frame data.\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
return picture;
|
||||
}
|
||||
|
||||
static void open_video(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
|
||||
{
|
||||
int ret;
|
||||
AVCodecContext *c = ost->st->codec;
|
||||
AVDictionary *opt = NULL;
|
||||
|
||||
av_dict_copy(&opt, opt_arg, 0);
|
||||
|
||||
/* open the codec */
|
||||
ret = avcodec_open2(c, codec, &opt);
|
||||
av_dict_free(&opt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* allocate and init a re-usable frame */
|
||||
ost->frame = alloc_picture(c->pix_fmt, c->width, c->height);
|
||||
if (!ost->frame) {
|
||||
fprintf(stderr, "Could not allocate video frame\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* If the output format is not YUV420P, then a temporary YUV420P
|
||||
* picture is needed too. It is then converted to the required
|
||||
* output format. */
|
||||
ost->tmp_frame = NULL;
|
||||
if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
|
||||
ost->tmp_frame = alloc_picture(AV_PIX_FMT_YUV420P, c->width, c->height);
|
||||
if (!ost->tmp_frame) {
|
||||
fprintf(stderr, "Could not allocate temporary picture\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Prepare a dummy image. */
|
||||
static void fill_yuv_image(AVFrame *pict, int frame_index,
|
||||
int width, int height)
|
||||
{
|
||||
int x, y, i, ret;
|
||||
|
||||
/* when we pass a frame to the encoder, it may keep a reference to it
|
||||
* internally;
|
||||
* make sure we do not overwrite it here
|
||||
*/
|
||||
ret = av_frame_make_writable(pict);
|
||||
if (ret < 0)
|
||||
exit(1);
|
||||
|
||||
i = frame_index;
|
||||
|
||||
/* Y */
|
||||
for (y = 0; y < height; y++)
|
||||
for (x = 0; x < width; x++)
|
||||
pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3;
|
||||
|
||||
/* Cb and Cr */
|
||||
for (y = 0; y < height / 2; y++) {
|
||||
for (x = 0; x < width / 2; x++) {
|
||||
pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;
|
||||
pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static AVFrame *get_video_frame(OutputStream *ost)
|
||||
{
|
||||
AVCodecContext *c = ost->st->codec;
|
||||
|
||||
/* check if we want to generate more frames */
|
||||
if (av_compare_ts(ost->next_pts, ost->st->codec->time_base,
|
||||
STREAM_DURATION, (AVRational){ 1, 1 }) >= 0)
|
||||
return NULL;
|
||||
|
||||
if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
|
||||
/* as we only generate a YUV420P picture, we must convert it
|
||||
* to the codec pixel format if needed */
|
||||
if (!ost->sws_ctx) {
|
||||
ost->sws_ctx = sws_getContext(c->width, c->height,
|
||||
AV_PIX_FMT_YUV420P,
|
||||
c->width, c->height,
|
||||
c->pix_fmt,
|
||||
SCALE_FLAGS, NULL, NULL, NULL);
|
||||
if (!ost->sws_ctx) {
|
||||
fprintf(stderr,
|
||||
"Could not initialize the conversion context\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
fill_yuv_image(ost->tmp_frame, ost->next_pts, c->width, c->height);
|
||||
sws_scale(ost->sws_ctx,
|
||||
(const uint8_t * const *)ost->tmp_frame->data, ost->tmp_frame->linesize,
|
||||
0, c->height, ost->frame->data, ost->frame->linesize);
|
||||
} else {
|
||||
fill_yuv_image(ost->frame, ost->next_pts, c->width, c->height);
|
||||
}
|
||||
|
||||
ost->frame->pts = ost->next_pts++;
|
||||
|
||||
return ost->frame;
|
||||
}
|
||||
|
||||
/*
|
||||
* encode one video frame and send it to the muxer
|
||||
* return 1 when encoding is finished, 0 otherwise
|
||||
*/
|
||||
static int write_video_frame(AVFormatContext *oc, OutputStream *ost)
|
||||
{
|
||||
int ret;
|
||||
AVCodecContext *c;
|
||||
AVFrame *frame;
|
||||
int got_packet = 0;
|
||||
|
||||
c = ost->st->codec;
|
||||
|
||||
frame = get_video_frame(ost);
|
||||
|
||||
if (oc->oformat->flags & AVFMT_RAWPICTURE) {
|
||||
/* a hack to avoid data copy with some raw video muxers */
|
||||
AVPacket pkt;
|
||||
av_init_packet(&pkt);
|
||||
|
||||
if (!frame)
|
||||
return 1;
|
||||
|
||||
pkt.flags |= AV_PKT_FLAG_KEY;
|
||||
pkt.stream_index = ost->st->index;
|
||||
pkt.data = (uint8_t *)frame;
|
||||
pkt.size = sizeof(AVPicture);
|
||||
|
||||
pkt.pts = pkt.dts = frame->pts;
|
||||
av_packet_rescale_ts(&pkt, c->time_base, ost->st->time_base);
|
||||
|
||||
ret = av_interleaved_write_frame(oc, &pkt);
|
||||
} else {
|
||||
AVPacket pkt = { 0 };
|
||||
av_init_packet(&pkt);
|
||||
|
||||
/* encode the image */
|
||||
ret = avcodec_encode_video2(c, &pkt, frame, &got_packet);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (got_packet) {
|
||||
ret = write_frame(oc, &c->time_base, ost->st, &pkt);
|
||||
} else {
|
||||
ret = 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
return (frame || got_packet) ? 0 : 1;
|
||||
}
|
||||
|
||||
static void close_stream(AVFormatContext *oc, OutputStream *ost)
|
||||
{
|
||||
avcodec_close(ost->st->codec);
|
||||
av_frame_free(&ost->frame);
|
||||
av_frame_free(&ost->tmp_frame);
|
||||
sws_freeContext(ost->sws_ctx);
|
||||
swr_free(&ost->swr_ctx);
|
||||
}
|
||||
|
||||
/**************************************************************/
|
||||
/* media file output */
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
OutputStream video_st = { 0 }, audio_st = { 0 };
|
||||
const char *filename;
|
||||
AVOutputFormat *fmt;
|
||||
AVFormatContext *oc;
|
||||
AVCodec *audio_codec, *video_codec;
|
||||
int ret;
|
||||
int have_video = 0, have_audio = 0;
|
||||
int encode_video = 0, encode_audio = 0;
|
||||
AVDictionary *opt = NULL;
|
||||
|
||||
/* Initialize libavcodec, and register all codecs and formats. */
|
||||
av_register_all();
|
||||
|
||||
if (argc < 2) {
|
||||
printf("usage: %s output_file\n"
|
||||
"API example program to output a media file with libavformat.\n"
|
||||
"This program generates a synthetic audio and video stream, encodes and\n"
|
||||
"muxes them into a file named output_file.\n"
|
||||
"The output format is automatically guessed according to the file extension.\n"
|
||||
"Raw images can also be output by using '%%d' in the filename.\n"
|
||||
"\n", argv[0]);
|
||||
return 1;
|
||||
}
|
||||
|
||||
filename = argv[1];
|
||||
if (argc > 3 && !strcmp(argv[2], "-flags")) {
|
||||
av_dict_set(&opt, argv[2]+1, argv[3], 0);
|
||||
}
|
||||
|
||||
/* allocate the output media context */
|
||||
avformat_alloc_output_context2(&oc, NULL, NULL, filename);
|
||||
if (!oc) {
|
||||
printf("Could not deduce output format from file extension: using MPEG.\n");
|
||||
avformat_alloc_output_context2(&oc, NULL, "mpeg", filename);
|
||||
}
|
||||
if (!oc)
|
||||
return 1;
|
||||
|
||||
fmt = oc->oformat;
|
||||
|
||||
/* Add the audio and video streams using the default format codecs
|
||||
* and initialize the codecs. */
|
||||
if (fmt->video_codec != AV_CODEC_ID_NONE) {
|
||||
add_stream(&video_st, oc, &video_codec, fmt->video_codec);
|
||||
have_video = 1;
|
||||
encode_video = 1;
|
||||
}
|
||||
if (fmt->audio_codec != AV_CODEC_ID_NONE) {
|
||||
add_stream(&audio_st, oc, &audio_codec, fmt->audio_codec);
|
||||
have_audio = 1;
|
||||
encode_audio = 1;
|
||||
}
|
||||
|
||||
/* Now that all the parameters are set, we can open the audio and
|
||||
* video codecs and allocate the necessary encode buffers. */
|
||||
if (have_video)
|
||||
open_video(oc, video_codec, &video_st, opt);
|
||||
|
||||
if (have_audio)
|
||||
open_audio(oc, audio_codec, &audio_st, opt);
|
||||
|
||||
av_dump_format(oc, 0, filename, 1);
|
||||
|
||||
/* open the output file, if needed */
|
||||
if (!(fmt->flags & AVFMT_NOFILE)) {
|
||||
ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not open '%s': %s\n", filename,
|
||||
av_err2str(ret));
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
/* Write the stream header, if any. */
|
||||
ret = avformat_write_header(oc, &opt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error occurred when opening output file: %s\n",
|
||||
av_err2str(ret));
|
||||
return 1;
|
||||
}
|
||||
|
||||
while (encode_video || encode_audio) {
|
||||
/* select the stream to encode */
|
||||
if (encode_video &&
|
||||
(!encode_audio || av_compare_ts(video_st.next_pts, video_st.st->codec->time_base,
|
||||
audio_st.next_pts, audio_st.st->codec->time_base) <= 0)) {
|
||||
encode_video = !write_video_frame(oc, &video_st);
|
||||
} else {
|
||||
encode_audio = !write_audio_frame(oc, &audio_st);
|
||||
}
|
||||
}
|
||||
|
||||
/* Write the trailer, if any. The trailer must be written before you
|
||||
* close the CodecContexts open when you wrote the header; otherwise
|
||||
* av_write_trailer() may try to use memory that was freed on
|
||||
* av_codec_close(). */
|
||||
av_write_trailer(oc);
|
||||
|
||||
/* Close each codec. */
|
||||
if (have_video)
|
||||
close_stream(oc, &video_st);
|
||||
if (have_audio)
|
||||
close_stream(oc, &audio_st);
|
||||
|
||||
if (!(fmt->flags & AVFMT_NOFILE))
|
||||
/* Close the output file. */
|
||||
avio_closep(&oc->pb);
|
||||
|
||||
/* free the stream */
|
||||
avformat_free_context(oc);
|
||||
|
||||
return 0;
|
||||
}
|
165
Externals/ffmpeg/dev/doc/examples/remuxing.c
vendored
165
Externals/ffmpeg/dev/doc/examples/remuxing.c
vendored
@ -1,165 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2013 Stefano Sabatini
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* libavformat/libavcodec demuxing and muxing API example.
|
||||
*
|
||||
* Remux streams from one container format to another.
|
||||
* @example remuxing.c
|
||||
*/
|
||||
|
||||
#include <libavutil/timestamp.h>
|
||||
#include <libavformat/avformat.h>
|
||||
|
||||
static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt, const char *tag)
|
||||
{
|
||||
AVRational *time_base = &fmt_ctx->streams[pkt->stream_index]->time_base;
|
||||
|
||||
printf("%s: pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",
|
||||
tag,
|
||||
av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, time_base),
|
||||
av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, time_base),
|
||||
av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, time_base),
|
||||
pkt->stream_index);
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
AVOutputFormat *ofmt = NULL;
|
||||
AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx = NULL;
|
||||
AVPacket pkt;
|
||||
const char *in_filename, *out_filename;
|
||||
int ret, i;
|
||||
|
||||
if (argc < 3) {
|
||||
printf("usage: %s input output\n"
|
||||
"API example program to remux a media file with libavformat and libavcodec.\n"
|
||||
"The output format is guessed according to the file extension.\n"
|
||||
"\n", argv[0]);
|
||||
return 1;
|
||||
}
|
||||
|
||||
in_filename = argv[1];
|
||||
out_filename = argv[2];
|
||||
|
||||
av_register_all();
|
||||
|
||||
if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0) {
|
||||
fprintf(stderr, "Could not open input file '%s'", in_filename);
|
||||
goto end;
|
||||
}
|
||||
|
||||
if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0) {
|
||||
fprintf(stderr, "Failed to retrieve input stream information");
|
||||
goto end;
|
||||
}
|
||||
|
||||
av_dump_format(ifmt_ctx, 0, in_filename, 0);
|
||||
|
||||
avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename);
|
||||
if (!ofmt_ctx) {
|
||||
fprintf(stderr, "Could not create output context\n");
|
||||
ret = AVERROR_UNKNOWN;
|
||||
goto end;
|
||||
}
|
||||
|
||||
ofmt = ofmt_ctx->oformat;
|
||||
|
||||
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
|
||||
AVStream *in_stream = ifmt_ctx->streams[i];
|
||||
AVStream *out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec);
|
||||
if (!out_stream) {
|
||||
fprintf(stderr, "Failed allocating output stream\n");
|
||||
ret = AVERROR_UNKNOWN;
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = avcodec_copy_context(out_stream->codec, in_stream->codec);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Failed to copy context from input to output stream codec context\n");
|
||||
goto end;
|
||||
}
|
||||
out_stream->codec->codec_tag = 0;
|
||||
if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
|
||||
out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
|
||||
}
|
||||
av_dump_format(ofmt_ctx, 0, out_filename, 1);
|
||||
|
||||
if (!(ofmt->flags & AVFMT_NOFILE)) {
|
||||
ret = avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not open output file '%s'", out_filename);
|
||||
goto end;
|
||||
}
|
||||
}
|
||||
|
||||
ret = avformat_write_header(ofmt_ctx, NULL);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error occurred when opening output file\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
while (1) {
|
||||
AVStream *in_stream, *out_stream;
|
||||
|
||||
ret = av_read_frame(ifmt_ctx, &pkt);
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
||||
in_stream = ifmt_ctx->streams[pkt.stream_index];
|
||||
out_stream = ofmt_ctx->streams[pkt.stream_index];
|
||||
|
||||
log_packet(ifmt_ctx, &pkt, "in");
|
||||
|
||||
/* copy packet */
|
||||
pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
|
||||
pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
|
||||
pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
|
||||
pkt.pos = -1;
|
||||
log_packet(ofmt_ctx, &pkt, "out");
|
||||
|
||||
ret = av_interleaved_write_frame(ofmt_ctx, &pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error muxing packet\n");
|
||||
break;
|
||||
}
|
||||
av_free_packet(&pkt);
|
||||
}
|
||||
|
||||
av_write_trailer(ofmt_ctx);
|
||||
end:
|
||||
|
||||
avformat_close_input(&ifmt_ctx);
|
||||
|
||||
/* close output */
|
||||
if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE))
|
||||
avio_closep(&ofmt_ctx->pb);
|
||||
avformat_free_context(ofmt_ctx);
|
||||
|
||||
if (ret < 0 && ret != AVERROR_EOF) {
|
||||
fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
214
Externals/ffmpeg/dev/doc/examples/resampling_audio.c
vendored
214
Externals/ffmpeg/dev/doc/examples/resampling_audio.c
vendored
@ -1,214 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2012 Stefano Sabatini
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @example resampling_audio.c
|
||||
* libswresample API use example.
|
||||
*/
|
||||
|
||||
#include <libavutil/opt.h>
|
||||
#include <libavutil/channel_layout.h>
|
||||
#include <libavutil/samplefmt.h>
|
||||
#include <libswresample/swresample.h>
|
||||
|
||||
static int get_format_from_sample_fmt(const char **fmt,
|
||||
enum AVSampleFormat sample_fmt)
|
||||
{
|
||||
int i;
|
||||
struct sample_fmt_entry {
|
||||
enum AVSampleFormat sample_fmt; const char *fmt_be, *fmt_le;
|
||||
} sample_fmt_entries[] = {
|
||||
{ AV_SAMPLE_FMT_U8, "u8", "u8" },
|
||||
{ AV_SAMPLE_FMT_S16, "s16be", "s16le" },
|
||||
{ AV_SAMPLE_FMT_S32, "s32be", "s32le" },
|
||||
{ AV_SAMPLE_FMT_FLT, "f32be", "f32le" },
|
||||
{ AV_SAMPLE_FMT_DBL, "f64be", "f64le" },
|
||||
};
|
||||
*fmt = NULL;
|
||||
|
||||
for (i = 0; i < FF_ARRAY_ELEMS(sample_fmt_entries); i++) {
|
||||
struct sample_fmt_entry *entry = &sample_fmt_entries[i];
|
||||
if (sample_fmt == entry->sample_fmt) {
|
||||
*fmt = AV_NE(entry->fmt_be, entry->fmt_le);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
fprintf(stderr,
|
||||
"Sample format %s not supported as output format\n",
|
||||
av_get_sample_fmt_name(sample_fmt));
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
/**
|
||||
* Fill dst buffer with nb_samples, generated starting from t.
|
||||
*/
|
||||
static void fill_samples(double *dst, int nb_samples, int nb_channels, int sample_rate, double *t)
|
||||
{
|
||||
int i, j;
|
||||
double tincr = 1.0 / sample_rate, *dstp = dst;
|
||||
const double c = 2 * M_PI * 440.0;
|
||||
|
||||
/* generate sin tone with 440Hz frequency and duplicated channels */
|
||||
for (i = 0; i < nb_samples; i++) {
|
||||
*dstp = sin(c * *t);
|
||||
for (j = 1; j < nb_channels; j++)
|
||||
dstp[j] = dstp[0];
|
||||
dstp += nb_channels;
|
||||
*t += tincr;
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int64_t src_ch_layout = AV_CH_LAYOUT_STEREO, dst_ch_layout = AV_CH_LAYOUT_SURROUND;
|
||||
int src_rate = 48000, dst_rate = 44100;
|
||||
uint8_t **src_data = NULL, **dst_data = NULL;
|
||||
int src_nb_channels = 0, dst_nb_channels = 0;
|
||||
int src_linesize, dst_linesize;
|
||||
int src_nb_samples = 1024, dst_nb_samples, max_dst_nb_samples;
|
||||
enum AVSampleFormat src_sample_fmt = AV_SAMPLE_FMT_DBL, dst_sample_fmt = AV_SAMPLE_FMT_S16;
|
||||
const char *dst_filename = NULL;
|
||||
FILE *dst_file;
|
||||
int dst_bufsize;
|
||||
const char *fmt;
|
||||
struct SwrContext *swr_ctx;
|
||||
double t;
|
||||
int ret;
|
||||
|
||||
if (argc != 2) {
|
||||
fprintf(stderr, "Usage: %s output_file\n"
|
||||
"API example program to show how to resample an audio stream with libswresample.\n"
|
||||
"This program generates a series of audio frames, resamples them to a specified "
|
||||
"output format and rate and saves them to an output file named output_file.\n",
|
||||
argv[0]);
|
||||
exit(1);
|
||||
}
|
||||
dst_filename = argv[1];
|
||||
|
||||
dst_file = fopen(dst_filename, "wb");
|
||||
if (!dst_file) {
|
||||
fprintf(stderr, "Could not open destination file %s\n", dst_filename);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* create resampler context */
|
||||
swr_ctx = swr_alloc();
|
||||
if (!swr_ctx) {
|
||||
fprintf(stderr, "Could not allocate resampler context\n");
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* set options */
|
||||
av_opt_set_int(swr_ctx, "in_channel_layout", src_ch_layout, 0);
|
||||
av_opt_set_int(swr_ctx, "in_sample_rate", src_rate, 0);
|
||||
av_opt_set_sample_fmt(swr_ctx, "in_sample_fmt", src_sample_fmt, 0);
|
||||
|
||||
av_opt_set_int(swr_ctx, "out_channel_layout", dst_ch_layout, 0);
|
||||
av_opt_set_int(swr_ctx, "out_sample_rate", dst_rate, 0);
|
||||
av_opt_set_sample_fmt(swr_ctx, "out_sample_fmt", dst_sample_fmt, 0);
|
||||
|
||||
/* initialize the resampling context */
|
||||
if ((ret = swr_init(swr_ctx)) < 0) {
|
||||
fprintf(stderr, "Failed to initialize the resampling context\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* allocate source and destination samples buffers */
|
||||
|
||||
src_nb_channels = av_get_channel_layout_nb_channels(src_ch_layout);
|
||||
ret = av_samples_alloc_array_and_samples(&src_data, &src_linesize, src_nb_channels,
|
||||
src_nb_samples, src_sample_fmt, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate source samples\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* compute the number of converted samples: buffering is avoided
|
||||
* ensuring that the output buffer will contain at least all the
|
||||
* converted input samples */
|
||||
max_dst_nb_samples = dst_nb_samples =
|
||||
av_rescale_rnd(src_nb_samples, dst_rate, src_rate, AV_ROUND_UP);
|
||||
|
||||
/* buffer is going to be directly written to a rawaudio file, no alignment */
|
||||
dst_nb_channels = av_get_channel_layout_nb_channels(dst_ch_layout);
|
||||
ret = av_samples_alloc_array_and_samples(&dst_data, &dst_linesize, dst_nb_channels,
|
||||
dst_nb_samples, dst_sample_fmt, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate destination samples\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
t = 0;
|
||||
do {
|
||||
/* generate synthetic audio */
|
||||
fill_samples((double *)src_data[0], src_nb_samples, src_nb_channels, src_rate, &t);
|
||||
|
||||
/* compute destination number of samples */
|
||||
dst_nb_samples = av_rescale_rnd(swr_get_delay(swr_ctx, src_rate) +
|
||||
src_nb_samples, dst_rate, src_rate, AV_ROUND_UP);
|
||||
if (dst_nb_samples > max_dst_nb_samples) {
|
||||
av_freep(&dst_data[0]);
|
||||
ret = av_samples_alloc(dst_data, &dst_linesize, dst_nb_channels,
|
||||
dst_nb_samples, dst_sample_fmt, 1);
|
||||
if (ret < 0)
|
||||
break;
|
||||
max_dst_nb_samples = dst_nb_samples;
|
||||
}
|
||||
|
||||
/* convert to destination format */
|
||||
ret = swr_convert(swr_ctx, dst_data, dst_nb_samples, (const uint8_t **)src_data, src_nb_samples);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error while converting\n");
|
||||
goto end;
|
||||
}
|
||||
dst_bufsize = av_samples_get_buffer_size(&dst_linesize, dst_nb_channels,
|
||||
ret, dst_sample_fmt, 1);
|
||||
if (dst_bufsize < 0) {
|
||||
fprintf(stderr, "Could not get sample buffer size\n");
|
||||
goto end;
|
||||
}
|
||||
printf("t:%f in:%d out:%d\n", t, src_nb_samples, ret);
|
||||
fwrite(dst_data[0], 1, dst_bufsize, dst_file);
|
||||
} while (t < 10);
|
||||
|
||||
if ((ret = get_format_from_sample_fmt(&fmt, dst_sample_fmt)) < 0)
|
||||
goto end;
|
||||
fprintf(stderr, "Resampling succeeded. Play the output file with the command:\n"
|
||||
"ffplay -f %s -channel_layout %"PRId64" -channels %d -ar %d %s\n",
|
||||
fmt, dst_ch_layout, dst_nb_channels, dst_rate, dst_filename);
|
||||
|
||||
end:
|
||||
fclose(dst_file);
|
||||
|
||||
if (src_data)
|
||||
av_freep(&src_data[0]);
|
||||
av_freep(&src_data);
|
||||
|
||||
if (dst_data)
|
||||
av_freep(&dst_data[0]);
|
||||
av_freep(&dst_data);
|
||||
|
||||
swr_free(&swr_ctx);
|
||||
return ret < 0;
|
||||
}
|
140
Externals/ffmpeg/dev/doc/examples/scaling_video.c
vendored
140
Externals/ffmpeg/dev/doc/examples/scaling_video.c
vendored
@ -1,140 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2012 Stefano Sabatini
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* libswscale API use example.
|
||||
* @example scaling_video.c
|
||||
*/
|
||||
|
||||
#include <libavutil/imgutils.h>
|
||||
#include <libavutil/parseutils.h>
|
||||
#include <libswscale/swscale.h>
|
||||
|
||||
static void fill_yuv_image(uint8_t *data[4], int linesize[4],
|
||||
int width, int height, int frame_index)
|
||||
{
|
||||
int x, y;
|
||||
|
||||
/* Y */
|
||||
for (y = 0; y < height; y++)
|
||||
for (x = 0; x < width; x++)
|
||||
data[0][y * linesize[0] + x] = x + y + frame_index * 3;
|
||||
|
||||
/* Cb and Cr */
|
||||
for (y = 0; y < height / 2; y++) {
|
||||
for (x = 0; x < width / 2; x++) {
|
||||
data[1][y * linesize[1] + x] = 128 + y + frame_index * 2;
|
||||
data[2][y * linesize[2] + x] = 64 + x + frame_index * 5;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
uint8_t *src_data[4], *dst_data[4];
|
||||
int src_linesize[4], dst_linesize[4];
|
||||
int src_w = 320, src_h = 240, dst_w, dst_h;
|
||||
enum AVPixelFormat src_pix_fmt = AV_PIX_FMT_YUV420P, dst_pix_fmt = AV_PIX_FMT_RGB24;
|
||||
const char *dst_size = NULL;
|
||||
const char *dst_filename = NULL;
|
||||
FILE *dst_file;
|
||||
int dst_bufsize;
|
||||
struct SwsContext *sws_ctx;
|
||||
int i, ret;
|
||||
|
||||
if (argc != 3) {
|
||||
fprintf(stderr, "Usage: %s output_file output_size\n"
|
||||
"API example program to show how to scale an image with libswscale.\n"
|
||||
"This program generates a series of pictures, rescales them to the given "
|
||||
"output_size and saves them to an output file named output_file\n."
|
||||
"\n", argv[0]);
|
||||
exit(1);
|
||||
}
|
||||
dst_filename = argv[1];
|
||||
dst_size = argv[2];
|
||||
|
||||
if (av_parse_video_size(&dst_w, &dst_h, dst_size) < 0) {
|
||||
fprintf(stderr,
|
||||
"Invalid size '%s', must be in the form WxH or a valid size abbreviation\n",
|
||||
dst_size);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
dst_file = fopen(dst_filename, "wb");
|
||||
if (!dst_file) {
|
||||
fprintf(stderr, "Could not open destination file %s\n", dst_filename);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* create scaling context */
|
||||
sws_ctx = sws_getContext(src_w, src_h, src_pix_fmt,
|
||||
dst_w, dst_h, dst_pix_fmt,
|
||||
SWS_BILINEAR, NULL, NULL, NULL);
|
||||
if (!sws_ctx) {
|
||||
fprintf(stderr,
|
||||
"Impossible to create scale context for the conversion "
|
||||
"fmt:%s s:%dx%d -> fmt:%s s:%dx%d\n",
|
||||
av_get_pix_fmt_name(src_pix_fmt), src_w, src_h,
|
||||
av_get_pix_fmt_name(dst_pix_fmt), dst_w, dst_h);
|
||||
ret = AVERROR(EINVAL);
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* allocate source and destination image buffers */
|
||||
if ((ret = av_image_alloc(src_data, src_linesize,
|
||||
src_w, src_h, src_pix_fmt, 16)) < 0) {
|
||||
fprintf(stderr, "Could not allocate source image\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* buffer is going to be written to rawvideo file, no alignment */
|
||||
if ((ret = av_image_alloc(dst_data, dst_linesize,
|
||||
dst_w, dst_h, dst_pix_fmt, 1)) < 0) {
|
||||
fprintf(stderr, "Could not allocate destination image\n");
|
||||
goto end;
|
||||
}
|
||||
dst_bufsize = ret;
|
||||
|
||||
for (i = 0; i < 100; i++) {
|
||||
/* generate synthetic video */
|
||||
fill_yuv_image(src_data, src_linesize, src_w, src_h, i);
|
||||
|
||||
/* convert to destination format */
|
||||
sws_scale(sws_ctx, (const uint8_t * const*)src_data,
|
||||
src_linesize, 0, src_h, dst_data, dst_linesize);
|
||||
|
||||
/* write scaled image to file */
|
||||
fwrite(dst_data[0], 1, dst_bufsize, dst_file);
|
||||
}
|
||||
|
||||
fprintf(stderr, "Scaling succeeded. Play the output file with the command:\n"
|
||||
"ffplay -f rawvideo -pix_fmt %s -video_size %dx%d %s\n",
|
||||
av_get_pix_fmt_name(dst_pix_fmt), dst_w, dst_h, dst_filename);
|
||||
|
||||
end:
|
||||
fclose(dst_file);
|
||||
av_freep(&src_data[0]);
|
||||
av_freep(&dst_data[0]);
|
||||
sws_freeContext(sws_ctx);
|
||||
return ret < 0;
|
||||
}
|
755
Externals/ffmpeg/dev/doc/examples/transcode_aac.c
vendored
755
Externals/ffmpeg/dev/doc/examples/transcode_aac.c
vendored
@ -1,755 +0,0 @@
|
||||
/*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* simple audio converter
|
||||
*
|
||||
* @example transcode_aac.c
|
||||
* Convert an input audio file to AAC in an MP4 container using FFmpeg.
|
||||
* @author Andreas Unterweger (dustsigns@gmail.com)
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
#include "libavformat/avformat.h"
|
||||
#include "libavformat/avio.h"
|
||||
|
||||
#include "libavcodec/avcodec.h"
|
||||
|
||||
#include "libavutil/audio_fifo.h"
|
||||
#include "libavutil/avassert.h"
|
||||
#include "libavutil/avstring.h"
|
||||
#include "libavutil/frame.h"
|
||||
#include "libavutil/opt.h"
|
||||
|
||||
#include "libswresample/swresample.h"
|
||||
|
||||
/** The output bit rate in kbit/s */
|
||||
#define OUTPUT_BIT_RATE 48000
|
||||
/** The number of output channels */
|
||||
#define OUTPUT_CHANNELS 2
|
||||
/** The audio sample output format */
|
||||
#define OUTPUT_SAMPLE_FORMAT AV_SAMPLE_FMT_S16
|
||||
|
||||
/**
|
||||
* Convert an error code into a text message.
|
||||
* @param error Error code to be converted
|
||||
* @return Corresponding error text (not thread-safe)
|
||||
*/
|
||||
static const char *get_error_text(const int error)
|
||||
{
|
||||
static char error_buffer[255];
|
||||
av_strerror(error, error_buffer, sizeof(error_buffer));
|
||||
return error_buffer;
|
||||
}
|
||||
|
||||
/** Open an input file and the required decoder. */
|
||||
static int open_input_file(const char *filename,
|
||||
AVFormatContext **input_format_context,
|
||||
AVCodecContext **input_codec_context)
|
||||
{
|
||||
AVCodec *input_codec;
|
||||
int error;
|
||||
|
||||
/** Open the input file to read from it. */
|
||||
if ((error = avformat_open_input(input_format_context, filename, NULL,
|
||||
NULL)) < 0) {
|
||||
fprintf(stderr, "Could not open input file '%s' (error '%s')\n",
|
||||
filename, get_error_text(error));
|
||||
*input_format_context = NULL;
|
||||
return error;
|
||||
}
|
||||
|
||||
/** Get information on the input file (number of streams etc.). */
|
||||
if ((error = avformat_find_stream_info(*input_format_context, NULL)) < 0) {
|
||||
fprintf(stderr, "Could not open find stream info (error '%s')\n",
|
||||
get_error_text(error));
|
||||
avformat_close_input(input_format_context);
|
||||
return error;
|
||||
}
|
||||
|
||||
/** Make sure that there is only one stream in the input file. */
|
||||
if ((*input_format_context)->nb_streams != 1) {
|
||||
fprintf(stderr, "Expected one audio input stream, but found %d\n",
|
||||
(*input_format_context)->nb_streams);
|
||||
avformat_close_input(input_format_context);
|
||||
return AVERROR_EXIT;
|
||||
}
|
||||
|
||||
/** Find a decoder for the audio stream. */
|
||||
if (!(input_codec = avcodec_find_decoder((*input_format_context)->streams[0]->codec->codec_id))) {
|
||||
fprintf(stderr, "Could not find input codec\n");
|
||||
avformat_close_input(input_format_context);
|
||||
return AVERROR_EXIT;
|
||||
}
|
||||
|
||||
/** Open the decoder for the audio stream to use it later. */
|
||||
if ((error = avcodec_open2((*input_format_context)->streams[0]->codec,
|
||||
input_codec, NULL)) < 0) {
|
||||
fprintf(stderr, "Could not open input codec (error '%s')\n",
|
||||
get_error_text(error));
|
||||
avformat_close_input(input_format_context);
|
||||
return error;
|
||||
}
|
||||
|
||||
/** Save the decoder context for easier access later. */
|
||||
*input_codec_context = (*input_format_context)->streams[0]->codec;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Open an output file and the required encoder.
|
||||
* Also set some basic encoder parameters.
|
||||
* Some of these parameters are based on the input file's parameters.
|
||||
*/
|
||||
static int open_output_file(const char *filename,
|
||||
AVCodecContext *input_codec_context,
|
||||
AVFormatContext **output_format_context,
|
||||
AVCodecContext **output_codec_context)
|
||||
{
|
||||
AVIOContext *output_io_context = NULL;
|
||||
AVStream *stream = NULL;
|
||||
AVCodec *output_codec = NULL;
|
||||
int error;
|
||||
|
||||
/** Open the output file to write to it. */
|
||||
if ((error = avio_open(&output_io_context, filename,
|
||||
AVIO_FLAG_WRITE)) < 0) {
|
||||
fprintf(stderr, "Could not open output file '%s' (error '%s')\n",
|
||||
filename, get_error_text(error));
|
||||
return error;
|
||||
}
|
||||
|
||||
/** Create a new format context for the output container format. */
|
||||
if (!(*output_format_context = avformat_alloc_context())) {
|
||||
fprintf(stderr, "Could not allocate output format context\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
/** Associate the output file (pointer) with the container format context. */
|
||||
(*output_format_context)->pb = output_io_context;
|
||||
|
||||
/** Guess the desired container format based on the file extension. */
|
||||
if (!((*output_format_context)->oformat = av_guess_format(NULL, filename,
|
||||
NULL))) {
|
||||
fprintf(stderr, "Could not find output file format\n");
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
av_strlcpy((*output_format_context)->filename, filename,
|
||||
sizeof((*output_format_context)->filename));
|
||||
|
||||
/** Find the encoder to be used by its name. */
|
||||
if (!(output_codec = avcodec_find_encoder(AV_CODEC_ID_AAC))) {
|
||||
fprintf(stderr, "Could not find an AAC encoder.\n");
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
/** Create a new audio stream in the output file container. */
|
||||
if (!(stream = avformat_new_stream(*output_format_context, output_codec))) {
|
||||
fprintf(stderr, "Could not create new stream\n");
|
||||
error = AVERROR(ENOMEM);
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
/** Save the encoder context for easiert access later. */
|
||||
*output_codec_context = stream->codec;
|
||||
|
||||
/**
|
||||
* Set the basic encoder parameters.
|
||||
* The input file's sample rate is used to avoid a sample rate conversion.
|
||||
*/
|
||||
(*output_codec_context)->channels = OUTPUT_CHANNELS;
|
||||
(*output_codec_context)->channel_layout = av_get_default_channel_layout(OUTPUT_CHANNELS);
|
||||
(*output_codec_context)->sample_rate = input_codec_context->sample_rate;
|
||||
(*output_codec_context)->sample_fmt = AV_SAMPLE_FMT_S16;
|
||||
(*output_codec_context)->bit_rate = OUTPUT_BIT_RATE;
|
||||
|
||||
/**
|
||||
* Some container formats (like MP4) require global headers to be present
|
||||
* Mark the encoder so that it behaves accordingly.
|
||||
*/
|
||||
if ((*output_format_context)->oformat->flags & AVFMT_GLOBALHEADER)
|
||||
(*output_codec_context)->flags |= CODEC_FLAG_GLOBAL_HEADER;
|
||||
|
||||
/** Open the encoder for the audio stream to use it later. */
|
||||
if ((error = avcodec_open2(*output_codec_context, output_codec, NULL)) < 0) {
|
||||
fprintf(stderr, "Could not open output codec (error '%s')\n",
|
||||
get_error_text(error));
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
cleanup:
|
||||
avio_closep(&(*output_format_context)->pb);
|
||||
avformat_free_context(*output_format_context);
|
||||
*output_format_context = NULL;
|
||||
return error < 0 ? error : AVERROR_EXIT;
|
||||
}
|
||||
|
||||
/** Initialize one data packet for reading or writing. */
|
||||
static void init_packet(AVPacket *packet)
|
||||
{
|
||||
av_init_packet(packet);
|
||||
/** Set the packet data and size so that it is recognized as being empty. */
|
||||
packet->data = NULL;
|
||||
packet->size = 0;
|
||||
}
|
||||
|
||||
/** Initialize one audio frame for reading from the input file */
|
||||
static int init_input_frame(AVFrame **frame)
|
||||
{
|
||||
if (!(*frame = av_frame_alloc())) {
|
||||
fprintf(stderr, "Could not allocate input frame\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize the audio resampler based on the input and output codec settings.
|
||||
* If the input and output sample formats differ, a conversion is required
|
||||
* libswresample takes care of this, but requires initialization.
|
||||
*/
|
||||
static int init_resampler(AVCodecContext *input_codec_context,
|
||||
AVCodecContext *output_codec_context,
|
||||
SwrContext **resample_context)
|
||||
{
|
||||
int error;
|
||||
|
||||
/**
|
||||
* Create a resampler context for the conversion.
|
||||
* Set the conversion parameters.
|
||||
* Default channel layouts based on the number of channels
|
||||
* are assumed for simplicity (they are sometimes not detected
|
||||
* properly by the demuxer and/or decoder).
|
||||
*/
|
||||
*resample_context = swr_alloc_set_opts(NULL,
|
||||
av_get_default_channel_layout(output_codec_context->channels),
|
||||
output_codec_context->sample_fmt,
|
||||
output_codec_context->sample_rate,
|
||||
av_get_default_channel_layout(input_codec_context->channels),
|
||||
input_codec_context->sample_fmt,
|
||||
input_codec_context->sample_rate,
|
||||
0, NULL);
|
||||
if (!*resample_context) {
|
||||
fprintf(stderr, "Could not allocate resample context\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
/**
|
||||
* Perform a sanity check so that the number of converted samples is
|
||||
* not greater than the number of samples to be converted.
|
||||
* If the sample rates differ, this case has to be handled differently
|
||||
*/
|
||||
av_assert0(output_codec_context->sample_rate == input_codec_context->sample_rate);
|
||||
|
||||
/** Open the resampler with the specified parameters. */
|
||||
if ((error = swr_init(*resample_context)) < 0) {
|
||||
fprintf(stderr, "Could not open resample context\n");
|
||||
swr_free(resample_context);
|
||||
return error;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** Initialize a FIFO buffer for the audio samples to be encoded. */
|
||||
static int init_fifo(AVAudioFifo **fifo)
|
||||
{
|
||||
/** Create the FIFO buffer based on the specified output sample format. */
|
||||
if (!(*fifo = av_audio_fifo_alloc(OUTPUT_SAMPLE_FORMAT, OUTPUT_CHANNELS, 1))) {
|
||||
fprintf(stderr, "Could not allocate FIFO\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** Write the header of the output file container. */
|
||||
static int write_output_file_header(AVFormatContext *output_format_context)
|
||||
{
|
||||
int error;
|
||||
if ((error = avformat_write_header(output_format_context, NULL)) < 0) {
|
||||
fprintf(stderr, "Could not write output file header (error '%s')\n",
|
||||
get_error_text(error));
|
||||
return error;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** Decode one audio frame from the input file. */
|
||||
static int decode_audio_frame(AVFrame *frame,
|
||||
AVFormatContext *input_format_context,
|
||||
AVCodecContext *input_codec_context,
|
||||
int *data_present, int *finished)
|
||||
{
|
||||
/** Packet used for temporary storage. */
|
||||
AVPacket input_packet;
|
||||
int error;
|
||||
init_packet(&input_packet);
|
||||
|
||||
/** Read one audio frame from the input file into a temporary packet. */
|
||||
if ((error = av_read_frame(input_format_context, &input_packet)) < 0) {
|
||||
/** If we are at the end of the file, flush the decoder below. */
|
||||
if (error == AVERROR_EOF)
|
||||
*finished = 1;
|
||||
else {
|
||||
fprintf(stderr, "Could not read frame (error '%s')\n",
|
||||
get_error_text(error));
|
||||
return error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Decode the audio frame stored in the temporary packet.
|
||||
* The input audio stream decoder is used to do this.
|
||||
* If we are at the end of the file, pass an empty packet to the decoder
|
||||
* to flush it.
|
||||
*/
|
||||
if ((error = avcodec_decode_audio4(input_codec_context, frame,
|
||||
data_present, &input_packet)) < 0) {
|
||||
fprintf(stderr, "Could not decode frame (error '%s')\n",
|
||||
get_error_text(error));
|
||||
av_free_packet(&input_packet);
|
||||
return error;
|
||||
}
|
||||
|
||||
/**
|
||||
* If the decoder has not been flushed completely, we are not finished,
|
||||
* so that this function has to be called again.
|
||||
*/
|
||||
if (*finished && *data_present)
|
||||
*finished = 0;
|
||||
av_free_packet(&input_packet);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize a temporary storage for the specified number of audio samples.
|
||||
* The conversion requires temporary storage due to the different format.
|
||||
* The number of audio samples to be allocated is specified in frame_size.
|
||||
*/
|
||||
static int init_converted_samples(uint8_t ***converted_input_samples,
|
||||
AVCodecContext *output_codec_context,
|
||||
int frame_size)
|
||||
{
|
||||
int error;
|
||||
|
||||
/**
|
||||
* Allocate as many pointers as there are audio channels.
|
||||
* Each pointer will later point to the audio samples of the corresponding
|
||||
* channels (although it may be NULL for interleaved formats).
|
||||
*/
|
||||
if (!(*converted_input_samples = calloc(output_codec_context->channels,
|
||||
sizeof(**converted_input_samples)))) {
|
||||
fprintf(stderr, "Could not allocate converted input sample pointers\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
/**
|
||||
* Allocate memory for the samples of all channels in one consecutive
|
||||
* block for convenience.
|
||||
*/
|
||||
if ((error = av_samples_alloc(*converted_input_samples, NULL,
|
||||
output_codec_context->channels,
|
||||
frame_size,
|
||||
output_codec_context->sample_fmt, 0)) < 0) {
|
||||
fprintf(stderr,
|
||||
"Could not allocate converted input samples (error '%s')\n",
|
||||
get_error_text(error));
|
||||
av_freep(&(*converted_input_samples)[0]);
|
||||
free(*converted_input_samples);
|
||||
return error;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert the input audio samples into the output sample format.
|
||||
* The conversion happens on a per-frame basis, the size of which is specified
|
||||
* by frame_size.
|
||||
*/
|
||||
static int convert_samples(const uint8_t **input_data,
|
||||
uint8_t **converted_data, const int frame_size,
|
||||
SwrContext *resample_context)
|
||||
{
|
||||
int error;
|
||||
|
||||
/** Convert the samples using the resampler. */
|
||||
if ((error = swr_convert(resample_context,
|
||||
converted_data, frame_size,
|
||||
input_data , frame_size)) < 0) {
|
||||
fprintf(stderr, "Could not convert input samples (error '%s')\n",
|
||||
get_error_text(error));
|
||||
return error;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** Add converted input audio samples to the FIFO buffer for later processing. */
|
||||
static int add_samples_to_fifo(AVAudioFifo *fifo,
|
||||
uint8_t **converted_input_samples,
|
||||
const int frame_size)
|
||||
{
|
||||
int error;
|
||||
|
||||
/**
|
||||
* Make the FIFO as large as it needs to be to hold both,
|
||||
* the old and the new samples.
|
||||
*/
|
||||
if ((error = av_audio_fifo_realloc(fifo, av_audio_fifo_size(fifo) + frame_size)) < 0) {
|
||||
fprintf(stderr, "Could not reallocate FIFO\n");
|
||||
return error;
|
||||
}
|
||||
|
||||
/** Store the new samples in the FIFO buffer. */
|
||||
if (av_audio_fifo_write(fifo, (void **)converted_input_samples,
|
||||
frame_size) < frame_size) {
|
||||
fprintf(stderr, "Could not write data to FIFO\n");
|
||||
return AVERROR_EXIT;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Read one audio frame from the input file, decodes, converts and stores
|
||||
* it in the FIFO buffer.
|
||||
*/
|
||||
static int read_decode_convert_and_store(AVAudioFifo *fifo,
|
||||
AVFormatContext *input_format_context,
|
||||
AVCodecContext *input_codec_context,
|
||||
AVCodecContext *output_codec_context,
|
||||
SwrContext *resampler_context,
|
||||
int *finished)
|
||||
{
|
||||
/** Temporary storage of the input samples of the frame read from the file. */
|
||||
AVFrame *input_frame = NULL;
|
||||
/** Temporary storage for the converted input samples. */
|
||||
uint8_t **converted_input_samples = NULL;
|
||||
int data_present;
|
||||
int ret = AVERROR_EXIT;
|
||||
|
||||
/** Initialize temporary storage for one input frame. */
|
||||
if (init_input_frame(&input_frame))
|
||||
goto cleanup;
|
||||
/** Decode one frame worth of audio samples. */
|
||||
if (decode_audio_frame(input_frame, input_format_context,
|
||||
input_codec_context, &data_present, finished))
|
||||
goto cleanup;
|
||||
/**
|
||||
* If we are at the end of the file and there are no more samples
|
||||
* in the decoder which are delayed, we are actually finished.
|
||||
* This must not be treated as an error.
|
||||
*/
|
||||
if (*finished && !data_present) {
|
||||
ret = 0;
|
||||
goto cleanup;
|
||||
}
|
||||
/** If there is decoded data, convert and store it */
|
||||
if (data_present) {
|
||||
/** Initialize the temporary storage for the converted input samples. */
|
||||
if (init_converted_samples(&converted_input_samples, output_codec_context,
|
||||
input_frame->nb_samples))
|
||||
goto cleanup;
|
||||
|
||||
/**
|
||||
* Convert the input samples to the desired output sample format.
|
||||
* This requires a temporary storage provided by converted_input_samples.
|
||||
*/
|
||||
if (convert_samples((const uint8_t**)input_frame->extended_data, converted_input_samples,
|
||||
input_frame->nb_samples, resampler_context))
|
||||
goto cleanup;
|
||||
|
||||
/** Add the converted input samples to the FIFO buffer for later processing. */
|
||||
if (add_samples_to_fifo(fifo, converted_input_samples,
|
||||
input_frame->nb_samples))
|
||||
goto cleanup;
|
||||
ret = 0;
|
||||
}
|
||||
ret = 0;
|
||||
|
||||
cleanup:
|
||||
if (converted_input_samples) {
|
||||
av_freep(&converted_input_samples[0]);
|
||||
free(converted_input_samples);
|
||||
}
|
||||
av_frame_free(&input_frame);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize one input frame for writing to the output file.
|
||||
* The frame will be exactly frame_size samples large.
|
||||
*/
|
||||
static int init_output_frame(AVFrame **frame,
|
||||
AVCodecContext *output_codec_context,
|
||||
int frame_size)
|
||||
{
|
||||
int error;
|
||||
|
||||
/** Create a new frame to store the audio samples. */
|
||||
if (!(*frame = av_frame_alloc())) {
|
||||
fprintf(stderr, "Could not allocate output frame\n");
|
||||
return AVERROR_EXIT;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the frame's parameters, especially its size and format.
|
||||
* av_frame_get_buffer needs this to allocate memory for the
|
||||
* audio samples of the frame.
|
||||
* Default channel layouts based on the number of channels
|
||||
* are assumed for simplicity.
|
||||
*/
|
||||
(*frame)->nb_samples = frame_size;
|
||||
(*frame)->channel_layout = output_codec_context->channel_layout;
|
||||
(*frame)->format = output_codec_context->sample_fmt;
|
||||
(*frame)->sample_rate = output_codec_context->sample_rate;
|
||||
|
||||
/**
|
||||
* Allocate the samples of the created frame. This call will make
|
||||
* sure that the audio frame can hold as many samples as specified.
|
||||
*/
|
||||
if ((error = av_frame_get_buffer(*frame, 0)) < 0) {
|
||||
fprintf(stderr, "Could allocate output frame samples (error '%s')\n",
|
||||
get_error_text(error));
|
||||
av_frame_free(frame);
|
||||
return error;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** Encode one frame worth of audio to the output file. */
|
||||
static int encode_audio_frame(AVFrame *frame,
|
||||
AVFormatContext *output_format_context,
|
||||
AVCodecContext *output_codec_context,
|
||||
int *data_present)
|
||||
{
|
||||
/** Packet used for temporary storage. */
|
||||
AVPacket output_packet;
|
||||
int error;
|
||||
init_packet(&output_packet);
|
||||
|
||||
/**
|
||||
* Encode the audio frame and store it in the temporary packet.
|
||||
* The output audio stream encoder is used to do this.
|
||||
*/
|
||||
if ((error = avcodec_encode_audio2(output_codec_context, &output_packet,
|
||||
frame, data_present)) < 0) {
|
||||
fprintf(stderr, "Could not encode frame (error '%s')\n",
|
||||
get_error_text(error));
|
||||
av_free_packet(&output_packet);
|
||||
return error;
|
||||
}
|
||||
|
||||
/** Write one audio frame from the temporary packet to the output file. */
|
||||
if (*data_present) {
|
||||
if ((error = av_write_frame(output_format_context, &output_packet)) < 0) {
|
||||
fprintf(stderr, "Could not write frame (error '%s')\n",
|
||||
get_error_text(error));
|
||||
av_free_packet(&output_packet);
|
||||
return error;
|
||||
}
|
||||
|
||||
av_free_packet(&output_packet);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Load one audio frame from the FIFO buffer, encode and write it to the
|
||||
* output file.
|
||||
*/
|
||||
static int load_encode_and_write(AVAudioFifo *fifo,
|
||||
AVFormatContext *output_format_context,
|
||||
AVCodecContext *output_codec_context)
|
||||
{
|
||||
/** Temporary storage of the output samples of the frame written to the file. */
|
||||
AVFrame *output_frame;
|
||||
/**
|
||||
* Use the maximum number of possible samples per frame.
|
||||
* If there is less than the maximum possible frame size in the FIFO
|
||||
* buffer use this number. Otherwise, use the maximum possible frame size
|
||||
*/
|
||||
const int frame_size = FFMIN(av_audio_fifo_size(fifo),
|
||||
output_codec_context->frame_size);
|
||||
int data_written;
|
||||
|
||||
/** Initialize temporary storage for one output frame. */
|
||||
if (init_output_frame(&output_frame, output_codec_context, frame_size))
|
||||
return AVERROR_EXIT;
|
||||
|
||||
/**
|
||||
* Read as many samples from the FIFO buffer as required to fill the frame.
|
||||
* The samples are stored in the frame temporarily.
|
||||
*/
|
||||
if (av_audio_fifo_read(fifo, (void **)output_frame->data, frame_size) < frame_size) {
|
||||
fprintf(stderr, "Could not read data from FIFO\n");
|
||||
av_frame_free(&output_frame);
|
||||
return AVERROR_EXIT;
|
||||
}
|
||||
|
||||
/** Encode one frame worth of audio samples. */
|
||||
if (encode_audio_frame(output_frame, output_format_context,
|
||||
output_codec_context, &data_written)) {
|
||||
av_frame_free(&output_frame);
|
||||
return AVERROR_EXIT;
|
||||
}
|
||||
av_frame_free(&output_frame);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** Write the trailer of the output file container. */
|
||||
static int write_output_file_trailer(AVFormatContext *output_format_context)
|
||||
{
|
||||
int error;
|
||||
if ((error = av_write_trailer(output_format_context)) < 0) {
|
||||
fprintf(stderr, "Could not write output file trailer (error '%s')\n",
|
||||
get_error_text(error));
|
||||
return error;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** Convert an audio file to an AAC file in an MP4 container. */
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
AVFormatContext *input_format_context = NULL, *output_format_context = NULL;
|
||||
AVCodecContext *input_codec_context = NULL, *output_codec_context = NULL;
|
||||
SwrContext *resample_context = NULL;
|
||||
AVAudioFifo *fifo = NULL;
|
||||
int ret = AVERROR_EXIT;
|
||||
|
||||
if (argc < 3) {
|
||||
fprintf(stderr, "Usage: %s <input file> <output file>\n", argv[0]);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/** Register all codecs and formats so that they can be used. */
|
||||
av_register_all();
|
||||
/** Open the input file for reading. */
|
||||
if (open_input_file(argv[1], &input_format_context,
|
||||
&input_codec_context))
|
||||
goto cleanup;
|
||||
/** Open the output file for writing. */
|
||||
if (open_output_file(argv[2], input_codec_context,
|
||||
&output_format_context, &output_codec_context))
|
||||
goto cleanup;
|
||||
/** Initialize the resampler to be able to convert audio sample formats. */
|
||||
if (init_resampler(input_codec_context, output_codec_context,
|
||||
&resample_context))
|
||||
goto cleanup;
|
||||
/** Initialize the FIFO buffer to store audio samples to be encoded. */
|
||||
if (init_fifo(&fifo))
|
||||
goto cleanup;
|
||||
/** Write the header of the output file container. */
|
||||
if (write_output_file_header(output_format_context))
|
||||
goto cleanup;
|
||||
|
||||
/**
|
||||
* Loop as long as we have input samples to read or output samples
|
||||
* to write; abort as soon as we have neither.
|
||||
*/
|
||||
while (1) {
|
||||
/** Use the encoder's desired frame size for processing. */
|
||||
const int output_frame_size = output_codec_context->frame_size;
|
||||
int finished = 0;
|
||||
|
||||
/**
|
||||
* Make sure that there is one frame worth of samples in the FIFO
|
||||
* buffer so that the encoder can do its work.
|
||||
* Since the decoder's and the encoder's frame size may differ, we
|
||||
* need to FIFO buffer to store as many frames worth of input samples
|
||||
* that they make up at least one frame worth of output samples.
|
||||
*/
|
||||
while (av_audio_fifo_size(fifo) < output_frame_size) {
|
||||
/**
|
||||
* Decode one frame worth of audio samples, convert it to the
|
||||
* output sample format and put it into the FIFO buffer.
|
||||
*/
|
||||
if (read_decode_convert_and_store(fifo, input_format_context,
|
||||
input_codec_context,
|
||||
output_codec_context,
|
||||
resample_context, &finished))
|
||||
goto cleanup;
|
||||
|
||||
/**
|
||||
* If we are at the end of the input file, we continue
|
||||
* encoding the remaining audio samples to the output file.
|
||||
*/
|
||||
if (finished)
|
||||
break;
|
||||
}
|
||||
|
||||
/**
|
||||
* If we have enough samples for the encoder, we encode them.
|
||||
* At the end of the file, we pass the remaining samples to
|
||||
* the encoder.
|
||||
*/
|
||||
while (av_audio_fifo_size(fifo) >= output_frame_size ||
|
||||
(finished && av_audio_fifo_size(fifo) > 0))
|
||||
/**
|
||||
* Take one frame worth of audio samples from the FIFO buffer,
|
||||
* encode it and write it to the output file.
|
||||
*/
|
||||
if (load_encode_and_write(fifo, output_format_context,
|
||||
output_codec_context))
|
||||
goto cleanup;
|
||||
|
||||
/**
|
||||
* If we are at the end of the input file and have encoded
|
||||
* all remaining samples, we can exit this loop and finish.
|
||||
*/
|
||||
if (finished) {
|
||||
int data_written;
|
||||
/** Flush the encoder as it may have delayed frames. */
|
||||
do {
|
||||
if (encode_audio_frame(NULL, output_format_context,
|
||||
output_codec_context, &data_written))
|
||||
goto cleanup;
|
||||
} while (data_written);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/** Write the trailer of the output file container. */
|
||||
if (write_output_file_trailer(output_format_context))
|
||||
goto cleanup;
|
||||
ret = 0;
|
||||
|
||||
cleanup:
|
||||
if (fifo)
|
||||
av_audio_fifo_free(fifo);
|
||||
swr_free(&resample_context);
|
||||
if (output_codec_context)
|
||||
avcodec_close(output_codec_context);
|
||||
if (output_format_context) {
|
||||
avio_closep(&output_format_context->pb);
|
||||
avformat_free_context(output_format_context);
|
||||
}
|
||||
if (input_codec_context)
|
||||
avcodec_close(input_codec_context);
|
||||
if (input_format_context)
|
||||
avformat_close_input(&input_format_context);
|
||||
|
||||
return ret;
|
||||
}
|
583
Externals/ffmpeg/dev/doc/examples/transcoding.c
vendored
583
Externals/ffmpeg/dev/doc/examples/transcoding.c
vendored
@ -1,583 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2010 Nicolas George
|
||||
* Copyright (c) 2011 Stefano Sabatini
|
||||
* Copyright (c) 2014 Andrey Utkin
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* API example for demuxing, decoding, filtering, encoding and muxing
|
||||
* @example transcoding.c
|
||||
*/
|
||||
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavfilter/avfiltergraph.h>
|
||||
#include <libavfilter/avcodec.h>
|
||||
#include <libavfilter/buffersink.h>
|
||||
#include <libavfilter/buffersrc.h>
|
||||
#include <libavutil/opt.h>
|
||||
#include <libavutil/pixdesc.h>
|
||||
|
||||
static AVFormatContext *ifmt_ctx;
|
||||
static AVFormatContext *ofmt_ctx;
|
||||
typedef struct FilteringContext {
|
||||
AVFilterContext *buffersink_ctx;
|
||||
AVFilterContext *buffersrc_ctx;
|
||||
AVFilterGraph *filter_graph;
|
||||
} FilteringContext;
|
||||
static FilteringContext *filter_ctx;
|
||||
|
||||
static int open_input_file(const char *filename)
|
||||
{
|
||||
int ret;
|
||||
unsigned int i;
|
||||
|
||||
ifmt_ctx = NULL;
|
||||
if ((ret = avformat_open_input(&ifmt_ctx, filename, NULL, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
if ((ret = avformat_find_stream_info(ifmt_ctx, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
|
||||
AVStream *stream;
|
||||
AVCodecContext *codec_ctx;
|
||||
stream = ifmt_ctx->streams[i];
|
||||
codec_ctx = stream->codec;
|
||||
/* Reencode video & audio and remux subtitles etc. */
|
||||
if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO
|
||||
|| codec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
|
||||
/* Open decoder */
|
||||
ret = avcodec_open2(codec_ctx,
|
||||
avcodec_find_decoder(codec_ctx->codec_id), NULL);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Failed to open decoder for stream #%u\n", i);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
av_dump_format(ifmt_ctx, 0, filename, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int open_output_file(const char *filename)
|
||||
{
|
||||
AVStream *out_stream;
|
||||
AVStream *in_stream;
|
||||
AVCodecContext *dec_ctx, *enc_ctx;
|
||||
AVCodec *encoder;
|
||||
int ret;
|
||||
unsigned int i;
|
||||
|
||||
ofmt_ctx = NULL;
|
||||
avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, filename);
|
||||
if (!ofmt_ctx) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Could not create output context\n");
|
||||
return AVERROR_UNKNOWN;
|
||||
}
|
||||
|
||||
|
||||
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
|
||||
out_stream = avformat_new_stream(ofmt_ctx, NULL);
|
||||
if (!out_stream) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Failed allocating output stream\n");
|
||||
return AVERROR_UNKNOWN;
|
||||
}
|
||||
|
||||
in_stream = ifmt_ctx->streams[i];
|
||||
dec_ctx = in_stream->codec;
|
||||
enc_ctx = out_stream->codec;
|
||||
|
||||
if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO
|
||||
|| dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
|
||||
/* in this example, we choose transcoding to same codec */
|
||||
encoder = avcodec_find_encoder(dec_ctx->codec_id);
|
||||
if (!encoder) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Neccessary encoder not found\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
/* In this example, we transcode to same properties (picture size,
|
||||
* sample rate etc.). These properties can be changed for output
|
||||
* streams easily using filters */
|
||||
if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
|
||||
enc_ctx->height = dec_ctx->height;
|
||||
enc_ctx->width = dec_ctx->width;
|
||||
enc_ctx->sample_aspect_ratio = dec_ctx->sample_aspect_ratio;
|
||||
/* take first format from list of supported formats */
|
||||
enc_ctx->pix_fmt = encoder->pix_fmts[0];
|
||||
/* video time_base can be set to whatever is handy and supported by encoder */
|
||||
enc_ctx->time_base = dec_ctx->time_base;
|
||||
} else {
|
||||
enc_ctx->sample_rate = dec_ctx->sample_rate;
|
||||
enc_ctx->channel_layout = dec_ctx->channel_layout;
|
||||
enc_ctx->channels = av_get_channel_layout_nb_channels(enc_ctx->channel_layout);
|
||||
/* take first format from list of supported formats */
|
||||
enc_ctx->sample_fmt = encoder->sample_fmts[0];
|
||||
enc_ctx->time_base = (AVRational){1, enc_ctx->sample_rate};
|
||||
}
|
||||
|
||||
/* Third parameter can be used to pass settings to encoder */
|
||||
ret = avcodec_open2(enc_ctx, encoder, NULL);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot open video encoder for stream #%u\n", i);
|
||||
return ret;
|
||||
}
|
||||
} else if (dec_ctx->codec_type == AVMEDIA_TYPE_UNKNOWN) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Elementary stream #%d is of unknown type, cannot proceed\n", i);
|
||||
return AVERROR_INVALIDDATA;
|
||||
} else {
|
||||
/* if this stream must be remuxed */
|
||||
ret = avcodec_copy_context(ofmt_ctx->streams[i]->codec,
|
||||
ifmt_ctx->streams[i]->codec);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Copying stream context failed\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
|
||||
enc_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;
|
||||
|
||||
}
|
||||
av_dump_format(ofmt_ctx, 0, filename, 1);
|
||||
|
||||
if (!(ofmt_ctx->oformat->flags & AVFMT_NOFILE)) {
|
||||
ret = avio_open(&ofmt_ctx->pb, filename, AVIO_FLAG_WRITE);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Could not open output file '%s'", filename);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
/* init muxer, write output file header */
|
||||
ret = avformat_write_header(ofmt_ctx, NULL);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error occurred when opening output file\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int init_filter(FilteringContext* fctx, AVCodecContext *dec_ctx,
|
||||
AVCodecContext *enc_ctx, const char *filter_spec)
|
||||
{
|
||||
char args[512];
|
||||
int ret = 0;
|
||||
AVFilter *buffersrc = NULL;
|
||||
AVFilter *buffersink = NULL;
|
||||
AVFilterContext *buffersrc_ctx = NULL;
|
||||
AVFilterContext *buffersink_ctx = NULL;
|
||||
AVFilterInOut *outputs = avfilter_inout_alloc();
|
||||
AVFilterInOut *inputs = avfilter_inout_alloc();
|
||||
AVFilterGraph *filter_graph = avfilter_graph_alloc();
|
||||
|
||||
if (!outputs || !inputs || !filter_graph) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
|
||||
buffersrc = avfilter_get_by_name("buffer");
|
||||
buffersink = avfilter_get_by_name("buffersink");
|
||||
if (!buffersrc || !buffersink) {
|
||||
av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");
|
||||
ret = AVERROR_UNKNOWN;
|
||||
goto end;
|
||||
}
|
||||
|
||||
snprintf(args, sizeof(args),
|
||||
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
|
||||
dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
|
||||
dec_ctx->time_base.num, dec_ctx->time_base.den,
|
||||
dec_ctx->sample_aspect_ratio.num,
|
||||
dec_ctx->sample_aspect_ratio.den);
|
||||
|
||||
ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
|
||||
args, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
|
||||
NULL, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = av_opt_set_bin(buffersink_ctx, "pix_fmts",
|
||||
(uint8_t*)&enc_ctx->pix_fmt, sizeof(enc_ctx->pix_fmt),
|
||||
AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
|
||||
goto end;
|
||||
}
|
||||
} else if (dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
|
||||
buffersrc = avfilter_get_by_name("abuffer");
|
||||
buffersink = avfilter_get_by_name("abuffersink");
|
||||
if (!buffersrc || !buffersink) {
|
||||
av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");
|
||||
ret = AVERROR_UNKNOWN;
|
||||
goto end;
|
||||
}
|
||||
|
||||
if (!dec_ctx->channel_layout)
|
||||
dec_ctx->channel_layout =
|
||||
av_get_default_channel_layout(dec_ctx->channels);
|
||||
snprintf(args, sizeof(args),
|
||||
"time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%"PRIx64,
|
||||
dec_ctx->time_base.num, dec_ctx->time_base.den, dec_ctx->sample_rate,
|
||||
av_get_sample_fmt_name(dec_ctx->sample_fmt),
|
||||
dec_ctx->channel_layout);
|
||||
ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
|
||||
args, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer source\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
|
||||
NULL, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = av_opt_set_bin(buffersink_ctx, "sample_fmts",
|
||||
(uint8_t*)&enc_ctx->sample_fmt, sizeof(enc_ctx->sample_fmt),
|
||||
AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output sample format\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = av_opt_set_bin(buffersink_ctx, "channel_layouts",
|
||||
(uint8_t*)&enc_ctx->channel_layout,
|
||||
sizeof(enc_ctx->channel_layout), AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = av_opt_set_bin(buffersink_ctx, "sample_rates",
|
||||
(uint8_t*)&enc_ctx->sample_rate, sizeof(enc_ctx->sample_rate),
|
||||
AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output sample rate\n");
|
||||
goto end;
|
||||
}
|
||||
} else {
|
||||
ret = AVERROR_UNKNOWN;
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* Endpoints for the filter graph. */
|
||||
outputs->name = av_strdup("in");
|
||||
outputs->filter_ctx = buffersrc_ctx;
|
||||
outputs->pad_idx = 0;
|
||||
outputs->next = NULL;
|
||||
|
||||
inputs->name = av_strdup("out");
|
||||
inputs->filter_ctx = buffersink_ctx;
|
||||
inputs->pad_idx = 0;
|
||||
inputs->next = NULL;
|
||||
|
||||
if (!outputs->name || !inputs->name) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
if ((ret = avfilter_graph_parse_ptr(filter_graph, filter_spec,
|
||||
&inputs, &outputs, NULL)) < 0)
|
||||
goto end;
|
||||
|
||||
if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
|
||||
goto end;
|
||||
|
||||
/* Fill FilteringContext */
|
||||
fctx->buffersrc_ctx = buffersrc_ctx;
|
||||
fctx->buffersink_ctx = buffersink_ctx;
|
||||
fctx->filter_graph = filter_graph;
|
||||
|
||||
end:
|
||||
avfilter_inout_free(&inputs);
|
||||
avfilter_inout_free(&outputs);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int init_filters(void)
|
||||
{
|
||||
const char *filter_spec;
|
||||
unsigned int i;
|
||||
int ret;
|
||||
filter_ctx = av_malloc_array(ifmt_ctx->nb_streams, sizeof(*filter_ctx));
|
||||
if (!filter_ctx)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
|
||||
filter_ctx[i].buffersrc_ctx = NULL;
|
||||
filter_ctx[i].buffersink_ctx = NULL;
|
||||
filter_ctx[i].filter_graph = NULL;
|
||||
if (!(ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO
|
||||
|| ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO))
|
||||
continue;
|
||||
|
||||
|
||||
if (ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
|
||||
filter_spec = "null"; /* passthrough (dummy) filter for video */
|
||||
else
|
||||
filter_spec = "anull"; /* passthrough (dummy) filter for audio */
|
||||
ret = init_filter(&filter_ctx[i], ifmt_ctx->streams[i]->codec,
|
||||
ofmt_ctx->streams[i]->codec, filter_spec);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int encode_write_frame(AVFrame *filt_frame, unsigned int stream_index, int *got_frame) {
|
||||
int ret;
|
||||
int got_frame_local;
|
||||
AVPacket enc_pkt;
|
||||
int (*enc_func)(AVCodecContext *, AVPacket *, const AVFrame *, int *) =
|
||||
(ifmt_ctx->streams[stream_index]->codec->codec_type ==
|
||||
AVMEDIA_TYPE_VIDEO) ? avcodec_encode_video2 : avcodec_encode_audio2;
|
||||
|
||||
if (!got_frame)
|
||||
got_frame = &got_frame_local;
|
||||
|
||||
av_log(NULL, AV_LOG_INFO, "Encoding frame\n");
|
||||
/* encode filtered frame */
|
||||
enc_pkt.data = NULL;
|
||||
enc_pkt.size = 0;
|
||||
av_init_packet(&enc_pkt);
|
||||
ret = enc_func(ofmt_ctx->streams[stream_index]->codec, &enc_pkt,
|
||||
filt_frame, got_frame);
|
||||
av_frame_free(&filt_frame);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (!(*got_frame))
|
||||
return 0;
|
||||
|
||||
/* prepare packet for muxing */
|
||||
enc_pkt.stream_index = stream_index;
|
||||
av_packet_rescale_ts(&enc_pkt,
|
||||
ofmt_ctx->streams[stream_index]->codec->time_base,
|
||||
ofmt_ctx->streams[stream_index]->time_base);
|
||||
|
||||
av_log(NULL, AV_LOG_DEBUG, "Muxing frame\n");
|
||||
/* mux encoded frame */
|
||||
ret = av_interleaved_write_frame(ofmt_ctx, &enc_pkt);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int filter_encode_write_frame(AVFrame *frame, unsigned int stream_index)
|
||||
{
|
||||
int ret;
|
||||
AVFrame *filt_frame;
|
||||
|
||||
av_log(NULL, AV_LOG_INFO, "Pushing decoded frame to filters\n");
|
||||
/* push the decoded frame into the filtergraph */
|
||||
ret = av_buffersrc_add_frame_flags(filter_ctx[stream_index].buffersrc_ctx,
|
||||
frame, 0);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* pull filtered frames from the filtergraph */
|
||||
while (1) {
|
||||
filt_frame = av_frame_alloc();
|
||||
if (!filt_frame) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
break;
|
||||
}
|
||||
av_log(NULL, AV_LOG_INFO, "Pulling filtered frame from filters\n");
|
||||
ret = av_buffersink_get_frame(filter_ctx[stream_index].buffersink_ctx,
|
||||
filt_frame);
|
||||
if (ret < 0) {
|
||||
/* if no more frames for output - returns AVERROR(EAGAIN)
|
||||
* if flushed and no more frames for output - returns AVERROR_EOF
|
||||
* rewrite retcode to 0 to show it as normal procedure completion
|
||||
*/
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
|
||||
ret = 0;
|
||||
av_frame_free(&filt_frame);
|
||||
break;
|
||||
}
|
||||
|
||||
filt_frame->pict_type = AV_PICTURE_TYPE_NONE;
|
||||
ret = encode_write_frame(filt_frame, stream_index, NULL);
|
||||
if (ret < 0)
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int flush_encoder(unsigned int stream_index)
|
||||
{
|
||||
int ret;
|
||||
int got_frame;
|
||||
|
||||
if (!(ofmt_ctx->streams[stream_index]->codec->codec->capabilities &
|
||||
CODEC_CAP_DELAY))
|
||||
return 0;
|
||||
|
||||
while (1) {
|
||||
av_log(NULL, AV_LOG_INFO, "Flushing stream #%u encoder\n", stream_index);
|
||||
ret = encode_write_frame(NULL, stream_index, &got_frame);
|
||||
if (ret < 0)
|
||||
break;
|
||||
if (!got_frame)
|
||||
return 0;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int ret;
|
||||
AVPacket packet = { .data = NULL, .size = 0 };
|
||||
AVFrame *frame = NULL;
|
||||
enum AVMediaType type;
|
||||
unsigned int stream_index;
|
||||
unsigned int i;
|
||||
int got_frame;
|
||||
int (*dec_func)(AVCodecContext *, AVFrame *, int *, const AVPacket *);
|
||||
|
||||
if (argc != 3) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Usage: %s <input file> <output file>\n", argv[0]);
|
||||
return 1;
|
||||
}
|
||||
|
||||
av_register_all();
|
||||
avfilter_register_all();
|
||||
|
||||
if ((ret = open_input_file(argv[1])) < 0)
|
||||
goto end;
|
||||
if ((ret = open_output_file(argv[2])) < 0)
|
||||
goto end;
|
||||
if ((ret = init_filters()) < 0)
|
||||
goto end;
|
||||
|
||||
/* read all packets */
|
||||
while (1) {
|
||||
if ((ret = av_read_frame(ifmt_ctx, &packet)) < 0)
|
||||
break;
|
||||
stream_index = packet.stream_index;
|
||||
type = ifmt_ctx->streams[packet.stream_index]->codec->codec_type;
|
||||
av_log(NULL, AV_LOG_DEBUG, "Demuxer gave frame of stream_index %u\n",
|
||||
stream_index);
|
||||
|
||||
if (filter_ctx[stream_index].filter_graph) {
|
||||
av_log(NULL, AV_LOG_DEBUG, "Going to reencode&filter the frame\n");
|
||||
frame = av_frame_alloc();
|
||||
if (!frame) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
break;
|
||||
}
|
||||
av_packet_rescale_ts(&packet,
|
||||
ifmt_ctx->streams[stream_index]->time_base,
|
||||
ifmt_ctx->streams[stream_index]->codec->time_base);
|
||||
dec_func = (type == AVMEDIA_TYPE_VIDEO) ? avcodec_decode_video2 :
|
||||
avcodec_decode_audio4;
|
||||
ret = dec_func(ifmt_ctx->streams[stream_index]->codec, frame,
|
||||
&got_frame, &packet);
|
||||
if (ret < 0) {
|
||||
av_frame_free(&frame);
|
||||
av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");
|
||||
break;
|
||||
}
|
||||
|
||||
if (got_frame) {
|
||||
frame->pts = av_frame_get_best_effort_timestamp(frame);
|
||||
ret = filter_encode_write_frame(frame, stream_index);
|
||||
av_frame_free(&frame);
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
} else {
|
||||
av_frame_free(&frame);
|
||||
}
|
||||
} else {
|
||||
/* remux this frame without reencoding */
|
||||
av_packet_rescale_ts(&packet,
|
||||
ifmt_ctx->streams[stream_index]->time_base,
|
||||
ofmt_ctx->streams[stream_index]->time_base);
|
||||
|
||||
ret = av_interleaved_write_frame(ofmt_ctx, &packet);
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
}
|
||||
av_free_packet(&packet);
|
||||
}
|
||||
|
||||
/* flush filters and encoders */
|
||||
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
|
||||
/* flush filter */
|
||||
if (!filter_ctx[i].filter_graph)
|
||||
continue;
|
||||
ret = filter_encode_write_frame(NULL, i);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Flushing filter failed\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* flush encoder */
|
||||
ret = flush_encoder(i);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Flushing encoder failed\n");
|
||||
goto end;
|
||||
}
|
||||
}
|
||||
|
||||
av_write_trailer(ofmt_ctx);
|
||||
end:
|
||||
av_free_packet(&packet);
|
||||
av_frame_free(&frame);
|
||||
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
|
||||
avcodec_close(ifmt_ctx->streams[i]->codec);
|
||||
if (ofmt_ctx && ofmt_ctx->nb_streams > i && ofmt_ctx->streams[i] && ofmt_ctx->streams[i]->codec)
|
||||
avcodec_close(ofmt_ctx->streams[i]->codec);
|
||||
if (filter_ctx && filter_ctx[i].filter_graph)
|
||||
avfilter_graph_free(&filter_ctx[i].filter_graph);
|
||||
}
|
||||
av_free(filter_ctx);
|
||||
avformat_close_input(&ifmt_ctx);
|
||||
if (ofmt_ctx && !(ofmt_ctx->oformat->flags & AVFMT_NOFILE))
|
||||
avio_closep(&ofmt_ctx->pb);
|
||||
avformat_free_context(ofmt_ctx);
|
||||
|
||||
if (ret < 0)
|
||||
av_log(NULL, AV_LOG_ERROR, "Error occurred: %s\n", av_err2str(ret));
|
||||
|
||||
return ret ? 1 : 0;
|
||||
}
|
719
Externals/ffmpeg/dev/doc/faq.html
vendored
719
Externals/ffmpeg/dev/doc/faq.html
vendored
@ -1,719 +0,0 @@
|
||||
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
|
||||
<html>
|
||||
<!-- Created by GNU Texinfo 5.2, http://www.gnu.org/software/texinfo/ -->
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<title>
|
||||
FFmpeg FAQ
|
||||
</title>
|
||||
<meta name="viewport" content="width=device-width,initial-scale=1.0">
|
||||
<link rel="stylesheet" type="text/css" href="bootstrap.min.css">
|
||||
<link rel="stylesheet" type="text/css" href="style.min.css">
|
||||
</head>
|
||||
<body>
|
||||
<div style="width: 95%; margin: auto">
|
||||
<h1>
|
||||
FFmpeg FAQ
|
||||
</h1>
|
||||
<div align="center">
|
||||
</div>
|
||||
|
||||
<a name="SEC_Top"></a>
|
||||
|
||||
<a name="SEC_Contents"></a>
|
||||
<h2 class="contents-heading">Table of Contents</h2>
|
||||
|
||||
<div class="contents">
|
||||
|
||||
<ul class="no-bullet">
|
||||
<li><a name="toc-General-Questions" href="#General-Questions">1 General Questions</a>
|
||||
<ul class="no-bullet">
|
||||
<li><a name="toc-Why-doesn_0027t-FFmpeg-support-feature-_005bxyz_005d_003f" href="#Why-doesn_0027t-FFmpeg-support-feature-_005bxyz_005d_003f">1.1 Why doesn’t FFmpeg support feature [xyz]?</a></li>
|
||||
<li><a name="toc-FFmpeg-does-not-support-codec-XXX_002e-Can-you-include-a-Windows-DLL-loader-to-support-it_003f" href="#FFmpeg-does-not-support-codec-XXX_002e-Can-you-include-a-Windows-DLL-loader-to-support-it_003f">1.2 FFmpeg does not support codec XXX. Can you include a Windows DLL loader to support it?</a></li>
|
||||
<li><a name="toc-I-cannot-read-this-file-although-this-format-seems-to-be-supported-by-ffmpeg_002e" href="#I-cannot-read-this-file-although-this-format-seems-to-be-supported-by-ffmpeg_002e">1.3 I cannot read this file although this format seems to be supported by ffmpeg.</a></li>
|
||||
<li><a name="toc-Which-codecs-are-supported-by-Windows_003f" href="#Which-codecs-are-supported-by-Windows_003f">1.4 Which codecs are supported by Windows?</a></li>
|
||||
</ul></li>
|
||||
<li><a name="toc-Compilation" href="#Compilation">2 Compilation</a>
|
||||
<ul class="no-bullet">
|
||||
<li><a name="toc-error_003a-can_0027t-find-a-register-in-class-_0027GENERAL_005fREGS_0027-while-reloading-_0027asm_0027" href="#error_003a-can_0027t-find-a-register-in-class-_0027GENERAL_005fREGS_0027-while-reloading-_0027asm_0027">2.1 <code>error: can't find a register in class 'GENERAL_REGS' while reloading 'asm'</code></a></li>
|
||||
<li><a name="toc-I-have-installed-this-library-with-my-distro_0027s-package-manager_002e-Why-does-configure-not-see-it_003f" href="#I-have-installed-this-library-with-my-distro_0027s-package-manager_002e-Why-does-configure-not-see-it_003f">2.2 I have installed this library with my distro’s package manager. Why does <code>configure</code> not see it?</a></li>
|
||||
<li><a name="toc-How-do-I-make-pkg_002dconfig-find-my-libraries_003f" href="#How-do-I-make-pkg_002dconfig-find-my-libraries_003f">2.3 How do I make <code>pkg-config</code> find my libraries?</a></li>
|
||||
<li><a name="toc-How-do-I-use-pkg_002dconfig-when-cross_002dcompiling_003f" href="#How-do-I-use-pkg_002dconfig-when-cross_002dcompiling_003f">2.4 How do I use <code>pkg-config</code> when cross-compiling?</a></li>
|
||||
</ul></li>
|
||||
<li><a name="toc-Usage" href="#Usage">3 Usage</a>
|
||||
<ul class="no-bullet">
|
||||
<li><a name="toc-ffmpeg-does-not-work_003b-what-is-wrong_003f" href="#ffmpeg-does-not-work_003b-what-is-wrong_003f">3.1 ffmpeg does not work; what is wrong?</a></li>
|
||||
<li><a name="toc-How-do-I-encode-single-pictures-into-movies_003f" href="#How-do-I-encode-single-pictures-into-movies_003f">3.2 How do I encode single pictures into movies?</a></li>
|
||||
<li><a name="toc-How-do-I-encode-movie-to-single-pictures_003f" href="#How-do-I-encode-movie-to-single-pictures_003f">3.3 How do I encode movie to single pictures?</a></li>
|
||||
<li><a name="toc-Why-do-I-see-a-slight-quality-degradation-with-multithreaded-MPEG_002a-encoding_003f" href="#Why-do-I-see-a-slight-quality-degradation-with-multithreaded-MPEG_002a-encoding_003f">3.4 Why do I see a slight quality degradation with multithreaded MPEG* encoding?</a></li>
|
||||
<li><a name="toc-How-can-I-read-from-the-standard-input-or-write-to-the-standard-output_003f" href="#How-can-I-read-from-the-standard-input-or-write-to-the-standard-output_003f">3.5 How can I read from the standard input or write to the standard output?</a></li>
|
||||
<li><a name="toc-_002df-jpeg-doesn_0027t-work_002e" href="#g_t_002df-jpeg-doesn_0027t-work_002e">3.6 -f jpeg doesn’t work.</a></li>
|
||||
<li><a name="toc-Why-can-I-not-change-the-frame-rate_003f" href="#Why-can-I-not-change-the-frame-rate_003f">3.7 Why can I not change the frame rate?</a></li>
|
||||
<li><a name="toc-How-do-I-encode-Xvid-or-DivX-video-with-ffmpeg_003f" href="#How-do-I-encode-Xvid-or-DivX-video-with-ffmpeg_003f">3.8 How do I encode Xvid or DivX video with ffmpeg?</a></li>
|
||||
<li><a name="toc-Which-are-good-parameters-for-encoding-high-quality-MPEG_002d4_003f" href="#Which-are-good-parameters-for-encoding-high-quality-MPEG_002d4_003f">3.9 Which are good parameters for encoding high quality MPEG-4?</a></li>
|
||||
<li><a name="toc-Which-are-good-parameters-for-encoding-high-quality-MPEG_002d1_002fMPEG_002d2_003f" href="#Which-are-good-parameters-for-encoding-high-quality-MPEG_002d1_002fMPEG_002d2_003f">3.10 Which are good parameters for encoding high quality MPEG-1/MPEG-2?</a></li>
|
||||
<li><a name="toc-Interlaced-video-looks-very-bad-when-encoded-with-ffmpeg_002c-what-is-wrong_003f" href="#Interlaced-video-looks-very-bad-when-encoded-with-ffmpeg_002c-what-is-wrong_003f">3.11 Interlaced video looks very bad when encoded with ffmpeg, what is wrong?</a></li>
|
||||
<li><a name="toc-How-can-I-read-DirectShow-files_003f" href="#How-can-I-read-DirectShow-files_003f">3.12 How can I read DirectShow files?</a></li>
|
||||
<li><a name="toc-How-can-I-join-video-files_003f" href="#How-can-I-join-video-files_003f">3.13 How can I join video files?</a></li>
|
||||
<li><a name="toc-How-can-I-concatenate-video-files_003f" href="#How-can-I-concatenate-video-files_003f">3.14 How can I concatenate video files?</a>
|
||||
<ul class="no-bullet">
|
||||
<li><a name="toc-Concatenating-using-the-concat-filter" href="#Concatenating-using-the-concat-filter">3.14.1 Concatenating using the concat <em>filter</em></a></li>
|
||||
<li><a name="toc-Concatenating-using-the-concat-demuxer" href="#Concatenating-using-the-concat-demuxer">3.14.2 Concatenating using the concat <em>demuxer</em></a></li>
|
||||
<li><a name="toc-Concatenating-using-the-concat-protocol-_0028file-level_0029" href="#Concatenating-using-the-concat-protocol-_0028file-level_0029">3.14.3 Concatenating using the concat <em>protocol</em> (file level)</a></li>
|
||||
<li><a name="toc-Concatenating-using-raw-audio-and-video" href="#Concatenating-using-raw-audio-and-video">3.14.4 Concatenating using raw audio and video</a></li>
|
||||
</ul></li>
|
||||
<li><a name="toc-Using-_002df-lavfi_002c-audio-becomes-mono-for-no-apparent-reason_002e" href="#Using-_002df-lavfi_002c-audio-becomes-mono-for-no-apparent-reason_002e">3.15 Using <samp>-f lavfi</samp>, audio becomes mono for no apparent reason.</a></li>
|
||||
<li><a name="toc-Why-does-FFmpeg-not-see-the-subtitles-in-my-VOB-file_003f" href="#Why-does-FFmpeg-not-see-the-subtitles-in-my-VOB-file_003f">3.16 Why does FFmpeg not see the subtitles in my VOB file?</a></li>
|
||||
<li><a name="toc-Why-was-the-ffmpeg-_002dsameq-option-removed_003f-What-to-use-instead_003f" href="#Why-was-the-ffmpeg-_002dsameq-option-removed_003f-What-to-use-instead_003f">3.17 Why was the <code>ffmpeg</code> <samp>-sameq</samp> option removed? What to use instead?</a></li>
|
||||
</ul></li>
|
||||
<li><a name="toc-Development" href="#Development">4 Development</a>
|
||||
<ul class="no-bullet">
|
||||
<li><a name="toc-Are-there-examples-illustrating-how-to-use-the-FFmpeg-libraries_002c-particularly-libavcodec-and-libavformat_003f" href="#Are-there-examples-illustrating-how-to-use-the-FFmpeg-libraries_002c-particularly-libavcodec-and-libavformat_003f">4.1 Are there examples illustrating how to use the FFmpeg libraries, particularly libavcodec and libavformat?</a></li>
|
||||
<li><a name="toc-Can-you-support-my-C-compiler-XXX_003f" href="#Can-you-support-my-C-compiler-XXX_003f">4.2 Can you support my C compiler XXX?</a></li>
|
||||
<li><a name="toc-Is-Microsoft-Visual-C_002b_002b-supported_003f" href="#Is-Microsoft-Visual-C_002b_002b-supported_003f">4.3 Is Microsoft Visual C++ supported?</a></li>
|
||||
<li><a name="toc-Can-you-add-automake_002c-libtool-or-autoconf-support_003f" href="#Can-you-add-automake_002c-libtool-or-autoconf-support_003f">4.4 Can you add automake, libtool or autoconf support?</a></li>
|
||||
<li><a name="toc-Why-not-rewrite-FFmpeg-in-object_002doriented-C_002b_002b_003f" href="#Why-not-rewrite-FFmpeg-in-object_002doriented-C_002b_002b_003f">4.5 Why not rewrite FFmpeg in object-oriented C++?</a></li>
|
||||
<li><a name="toc-Why-are-the-ffmpeg-programs-devoid-of-debugging-symbols_003f" href="#Why-are-the-ffmpeg-programs-devoid-of-debugging-symbols_003f">4.6 Why are the ffmpeg programs devoid of debugging symbols?</a></li>
|
||||
<li><a name="toc-I-do-not-like-the-LGPL_002c-can-I-contribute-code-under-the-GPL-instead_003f" href="#I-do-not-like-the-LGPL_002c-can-I-contribute-code-under-the-GPL-instead_003f">4.7 I do not like the LGPL, can I contribute code under the GPL instead?</a></li>
|
||||
<li><a name="toc-I_0027m-using-FFmpeg-from-within-my-C-application-but-the-linker-complains-about-missing-symbols-from-the-libraries-themselves_002e" href="#I_0027m-using-FFmpeg-from-within-my-C-application-but-the-linker-complains-about-missing-symbols-from-the-libraries-themselves_002e">4.8 I’m using FFmpeg from within my C application but the linker complains about missing symbols from the libraries themselves.</a></li>
|
||||
<li><a name="toc-I_0027m-using-FFmpeg-from-within-my-C_002b_002b-application-but-the-linker-complains-about-missing-symbols-which-seem-to-be-available_002e" href="#I_0027m-using-FFmpeg-from-within-my-C_002b_002b-application-but-the-linker-complains-about-missing-symbols-which-seem-to-be-available_002e">4.9 I’m using FFmpeg from within my C++ application but the linker complains about missing symbols which seem to be available.</a></li>
|
||||
<li><a name="toc-I_0027m-using-libavutil-from-within-my-C_002b_002b-application-but-the-compiler-complains-about-_0027UINT64_005fC_0027-was-not-declared-in-this-scope" href="#I_0027m-using-libavutil-from-within-my-C_002b_002b-application-but-the-compiler-complains-about-_0027UINT64_005fC_0027-was-not-declared-in-this-scope">4.10 I’m using libavutil from within my C++ application but the compiler complains about ’UINT64_C’ was not declared in this scope</a></li>
|
||||
<li><a name="toc-I-have-a-file-in-memory-_002f-a-API-different-from-_002aopen_002f_002aread_002f-libc-how-do-I-use-it-with-libavformat_003f" href="#I-have-a-file-in-memory-_002f-a-API-different-from-_002aopen_002f_002aread_002f-libc-how-do-I-use-it-with-libavformat_003f">4.11 I have a file in memory / a API different from *open/*read/ libc how do I use it with libavformat?</a></li>
|
||||
<li><a name="toc-Where-is-the-documentation-about-ffv1_002c-msmpeg4_002c-asv1_002c-4xm_003f" href="#Where-is-the-documentation-about-ffv1_002c-msmpeg4_002c-asv1_002c-4xm_003f">4.12 Where is the documentation about ffv1, msmpeg4, asv1, 4xm?</a></li>
|
||||
<li><a name="toc-How-do-I-feed-H_002e263_002dRTP-_0028and-other-codecs-in-RTP_0029-to-libavcodec_003f" href="#How-do-I-feed-H_002e263_002dRTP-_0028and-other-codecs-in-RTP_0029-to-libavcodec_003f">4.13 How do I feed H.263-RTP (and other codecs in RTP) to libavcodec?</a></li>
|
||||
<li><a name="toc-AVStream_002er_005fframe_005frate-is-wrong_002c-it-is-much-larger-than-the-frame-rate_002e" href="#AVStream_002er_005fframe_005frate-is-wrong_002c-it-is-much-larger-than-the-frame-rate_002e">4.14 AVStream.r_frame_rate is wrong, it is much larger than the frame rate.</a></li>
|
||||
<li><a name="toc-Why-is-make-fate-not-running-all-tests_003f" href="#Why-is-make-fate-not-running-all-tests_003f">4.15 Why is <code>make fate</code> not running all tests?</a></li>
|
||||
<li><a name="toc-Why-is-make-fate-not-finding-the-samples_003f" href="#Why-is-make-fate-not-finding-the-samples_003f">4.16 Why is <code>make fate</code> not finding the samples?</a></li>
|
||||
</ul></li>
|
||||
</ul>
|
||||
</div>
|
||||
|
||||
|
||||
<a name="General-Questions"></a>
|
||||
<h2 class="chapter">1 General Questions<span class="pull-right"><a class="anchor hidden-xs" href="#General-Questions" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-General-Questions" aria-hidden="true">TOC</a></span></h2>
|
||||
|
||||
<a name="Why-doesn_0027t-FFmpeg-support-feature-_005bxyz_005d_003f"></a>
|
||||
<h3 class="section">1.1 Why doesn’t FFmpeg support feature [xyz]?<span class="pull-right"><a class="anchor hidden-xs" href="#Why-doesn_0027t-FFmpeg-support-feature-_005bxyz_005d_003f" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Why-doesn_0027t-FFmpeg-support-feature-_005bxyz_005d_003f" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>Because no one has taken on that task yet. FFmpeg development is
|
||||
driven by the tasks that are important to the individual developers.
|
||||
If there is a feature that is important to you, the best way to get
|
||||
it implemented is to undertake the task yourself or sponsor a developer.
|
||||
</p>
|
||||
<a name="FFmpeg-does-not-support-codec-XXX_002e-Can-you-include-a-Windows-DLL-loader-to-support-it_003f"></a>
|
||||
<h3 class="section">1.2 FFmpeg does not support codec XXX. Can you include a Windows DLL loader to support it?<span class="pull-right"><a class="anchor hidden-xs" href="#FFmpeg-does-not-support-codec-XXX_002e-Can-you-include-a-Windows-DLL-loader-to-support-it_003f" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-FFmpeg-does-not-support-codec-XXX_002e-Can-you-include-a-Windows-DLL-loader-to-support-it_003f" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>No. Windows DLLs are not portable, bloated and often slow.
|
||||
Moreover FFmpeg strives to support all codecs natively.
|
||||
A DLL loader is not conducive to that goal.
|
||||
</p>
|
||||
<a name="I-cannot-read-this-file-although-this-format-seems-to-be-supported-by-ffmpeg_002e"></a>
|
||||
<h3 class="section">1.3 I cannot read this file although this format seems to be supported by ffmpeg.<span class="pull-right"><a class="anchor hidden-xs" href="#I-cannot-read-this-file-although-this-format-seems-to-be-supported-by-ffmpeg_002e" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-I-cannot-read-this-file-although-this-format-seems-to-be-supported-by-ffmpeg_002e" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>Even if ffmpeg can read the container format, it may not support all its
|
||||
codecs. Please consult the supported codec list in the ffmpeg
|
||||
documentation.
|
||||
</p>
|
||||
<a name="Which-codecs-are-supported-by-Windows_003f"></a>
|
||||
<h3 class="section">1.4 Which codecs are supported by Windows?<span class="pull-right"><a class="anchor hidden-xs" href="#Which-codecs-are-supported-by-Windows_003f" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Which-codecs-are-supported-by-Windows_003f" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>Windows does not support standard formats like MPEG very well, unless you
|
||||
install some additional codecs.
|
||||
</p>
|
||||
<p>The following list of video codecs should work on most Windows systems:
|
||||
</p><dl compact="compact">
|
||||
<dt><samp>msmpeg4v2</samp></dt>
|
||||
<dd><p>.avi/.asf
|
||||
</p></dd>
|
||||
<dt><samp>msmpeg4</samp></dt>
|
||||
<dd><p>.asf only
|
||||
</p></dd>
|
||||
<dt><samp>wmv1</samp></dt>
|
||||
<dd><p>.asf only
|
||||
</p></dd>
|
||||
<dt><samp>wmv2</samp></dt>
|
||||
<dd><p>.asf only
|
||||
</p></dd>
|
||||
<dt><samp>mpeg4</samp></dt>
|
||||
<dd><p>Only if you have some MPEG-4 codec like ffdshow or Xvid installed.
|
||||
</p></dd>
|
||||
<dt><samp>mpeg1video</samp></dt>
|
||||
<dd><p>.mpg only
|
||||
</p></dd>
|
||||
</dl>
|
||||
<p>Note, ASF files often have .wmv or .wma extensions in Windows. It should also
|
||||
be mentioned that Microsoft claims a patent on the ASF format, and may sue
|
||||
or threaten users who create ASF files with non-Microsoft software. It is
|
||||
strongly advised to avoid ASF where possible.
|
||||
</p>
|
||||
<p>The following list of audio codecs should work on most Windows systems:
|
||||
</p><dl compact="compact">
|
||||
<dt><samp>adpcm_ima_wav</samp></dt>
|
||||
<dt><samp>adpcm_ms</samp></dt>
|
||||
<dt><samp>pcm_s16le</samp></dt>
|
||||
<dd><p>always
|
||||
</p></dd>
|
||||
<dt><samp>libmp3lame</samp></dt>
|
||||
<dd><p>If some MP3 codec like LAME is installed.
|
||||
</p></dd>
|
||||
</dl>
|
||||
|
||||
|
||||
<a name="Compilation"></a>
|
||||
<h2 class="chapter">2 Compilation<span class="pull-right"><a class="anchor hidden-xs" href="#Compilation" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Compilation" aria-hidden="true">TOC</a></span></h2>
|
||||
|
||||
<a name="error_003a-can_0027t-find-a-register-in-class-_0027GENERAL_005fREGS_0027-while-reloading-_0027asm_0027"></a>
|
||||
<h3 class="section">2.1 <code>error: can't find a register in class 'GENERAL_REGS' while reloading 'asm'</code><span class="pull-right"><a class="anchor hidden-xs" href="#error_003a-can_0027t-find-a-register-in-class-_0027GENERAL_005fREGS_0027-while-reloading-_0027asm_0027" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-error_003a-can_0027t-find-a-register-in-class-_0027GENERAL_005fREGS_0027-while-reloading-_0027asm_0027" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>This is a bug in gcc. Do not report it to us. Instead, please report it to
|
||||
the gcc developers. Note that we will not add workarounds for gcc bugs.
|
||||
</p>
|
||||
<p>Also note that (some of) the gcc developers believe this is not a bug or
|
||||
not a bug they should fix:
|
||||
<a href="http://gcc.gnu.org/bugzilla/show_bug.cgi?id=11203">http://gcc.gnu.org/bugzilla/show_bug.cgi?id=11203</a>.
|
||||
Then again, some of them do not know the difference between an undecidable
|
||||
problem and an NP-hard problem...
|
||||
</p>
|
||||
<a name="I-have-installed-this-library-with-my-distro_0027s-package-manager_002e-Why-does-configure-not-see-it_003f"></a>
|
||||
<h3 class="section">2.2 I have installed this library with my distro’s package manager. Why does <code>configure</code> not see it?<span class="pull-right"><a class="anchor hidden-xs" href="#I-have-installed-this-library-with-my-distro_0027s-package-manager_002e-Why-does-configure-not-see-it_003f" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-I-have-installed-this-library-with-my-distro_0027s-package-manager_002e-Why-does-configure-not-see-it_003f" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>Distributions usually split libraries in several packages. The main package
|
||||
contains the files necessary to run programs using the library. The
|
||||
development package contains the files necessary to build programs using the
|
||||
library. Sometimes, docs and/or data are in a separate package too.
|
||||
</p>
|
||||
<p>To build FFmpeg, you need to install the development package. It is usually
|
||||
called <samp>libfoo-dev</samp> or <samp>libfoo-devel</samp>. You can remove it after the
|
||||
build is finished, but be sure to keep the main package.
|
||||
</p>
|
||||
<a name="How-do-I-make-pkg_002dconfig-find-my-libraries_003f"></a>
|
||||
<h3 class="section">2.3 How do I make <code>pkg-config</code> find my libraries?<span class="pull-right"><a class="anchor hidden-xs" href="#How-do-I-make-pkg_002dconfig-find-my-libraries_003f" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-How-do-I-make-pkg_002dconfig-find-my-libraries_003f" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>Somewhere along with your libraries, there is a <samp>.pc</samp> file (or several)
|
||||
in a <samp>pkgconfig</samp> directory. You need to set environment variables to
|
||||
point <code>pkg-config</code> to these files.
|
||||
</p>
|
||||
<p>If you need to <em>add</em> directories to <code>pkg-config</code>’s search list
|
||||
(typical use case: library installed separately), add it to
|
||||
<code>$PKG_CONFIG_PATH</code>:
|
||||
</p>
|
||||
<div class="example">
|
||||
<pre class="example">export PKG_CONFIG_PATH=/opt/x264/lib/pkgconfig:/opt/opus/lib/pkgconfig
|
||||
</pre></div>
|
||||
|
||||
<p>If you need to <em>replace</em> <code>pkg-config</code>’s search list
|
||||
(typical use case: cross-compiling), set it in
|
||||
<code>$PKG_CONFIG_LIBDIR</code>:
|
||||
</p>
|
||||
<div class="example">
|
||||
<pre class="example">export PKG_CONFIG_LIBDIR=/home/me/cross/usr/lib/pkgconfig:/home/me/cross/usr/local/lib/pkgconfig
|
||||
</pre></div>
|
||||
|
||||
<p>If you need to know the library’s internal dependencies (typical use: static
|
||||
linking), add the <code>--static</code> option to <code>pkg-config</code>:
|
||||
</p>
|
||||
<div class="example">
|
||||
<pre class="example">./configure --pkg-config-flags=--static
|
||||
</pre></div>
|
||||
|
||||
<a name="How-do-I-use-pkg_002dconfig-when-cross_002dcompiling_003f"></a>
|
||||
<h3 class="section">2.4 How do I use <code>pkg-config</code> when cross-compiling?<span class="pull-right"><a class="anchor hidden-xs" href="#How-do-I-use-pkg_002dconfig-when-cross_002dcompiling_003f" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-How-do-I-use-pkg_002dconfig-when-cross_002dcompiling_003f" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>The best way is to install <code>pkg-config</code> in your cross-compilation
|
||||
environment. It will automatically use the cross-compilation libraries.
|
||||
</p>
|
||||
<p>You can also use <code>pkg-config</code> from the host environment by
|
||||
specifying explicitly <code>--pkg-config=pkg-config</code> to <code>configure</code>.
|
||||
In that case, you must point <code>pkg-config</code> to the correct directories
|
||||
using the <code>PKG_CONFIG_LIBDIR</code>, as explained in the previous entry.
|
||||
</p>
|
||||
<p>As an intermediate solution, you can place in your cross-compilation
|
||||
environment a script that calls the host <code>pkg-config</code> with
|
||||
<code>PKG_CONFIG_LIBDIR</code> set. That script can look like that:
|
||||
</p>
|
||||
<div class="example">
|
||||
<pre class="example">#!/bin/sh
|
||||
PKG_CONFIG_LIBDIR=/path/to/cross/lib/pkgconfig
|
||||
export PKG_CONFIG_LIBDIR
|
||||
exec /usr/bin/pkg-config "$@"
|
||||
</pre></div>
|
||||
|
||||
<a name="Usage"></a>
|
||||
<h2 class="chapter">3 Usage<span class="pull-right"><a class="anchor hidden-xs" href="#Usage" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Usage" aria-hidden="true">TOC</a></span></h2>
|
||||
|
||||
<a name="ffmpeg-does-not-work_003b-what-is-wrong_003f"></a>
|
||||
<h3 class="section">3.1 ffmpeg does not work; what is wrong?<span class="pull-right"><a class="anchor hidden-xs" href="#ffmpeg-does-not-work_003b-what-is-wrong_003f" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-ffmpeg-does-not-work_003b-what-is-wrong_003f" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>Try a <code>make distclean</code> in the ffmpeg source directory before the build.
|
||||
If this does not help see
|
||||
(<a href="http://ffmpeg.org/bugreports.html">http://ffmpeg.org/bugreports.html</a>).
|
||||
</p>
|
||||
<a name="How-do-I-encode-single-pictures-into-movies_003f"></a>
|
||||
<h3 class="section">3.2 How do I encode single pictures into movies?<span class="pull-right"><a class="anchor hidden-xs" href="#How-do-I-encode-single-pictures-into-movies_003f" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-How-do-I-encode-single-pictures-into-movies_003f" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>First, rename your pictures to follow a numerical sequence.
|
||||
For example, img1.jpg, img2.jpg, img3.jpg,...
|
||||
Then you may run:
|
||||
</p>
|
||||
<div class="example">
|
||||
<pre class="example">ffmpeg -f image2 -i img%d.jpg /tmp/a.mpg
|
||||
</pre></div>
|
||||
|
||||
<p>Notice that ‘<samp>%d</samp>’ is replaced by the image number.
|
||||
</p>
|
||||
<p><samp>img%03d.jpg</samp> means the sequence <samp>img001.jpg</samp>, <samp>img002.jpg</samp>, etc.
|
||||
</p>
|
||||
<p>Use the <samp>-start_number</samp> option to declare a starting number for
|
||||
the sequence. This is useful if your sequence does not start with
|
||||
<samp>img001.jpg</samp> but is still in a numerical order. The following
|
||||
example will start with <samp>img100.jpg</samp>:
|
||||
</p>
|
||||
<div class="example">
|
||||
<pre class="example">ffmpeg -f image2 -start_number 100 -i img%d.jpg /tmp/a.mpg
|
||||
</pre></div>
|
||||
|
||||
<p>If you have large number of pictures to rename, you can use the
|
||||
following command to ease the burden. The command, using the bourne
|
||||
shell syntax, symbolically links all files in the current directory
|
||||
that match <code>*jpg</code> to the <samp>/tmp</samp> directory in the sequence of
|
||||
<samp>img001.jpg</samp>, <samp>img002.jpg</samp> and so on.
|
||||
</p>
|
||||
<div class="example">
|
||||
<pre class="example">x=1; for i in *jpg; do counter=$(printf %03d $x); ln -s "$i" /tmp/img"$counter".jpg; x=$(($x+1)); done
|
||||
</pre></div>
|
||||
|
||||
<p>If you want to sequence them by oldest modified first, substitute
|
||||
<code>$(ls -r -t *jpg)</code> in place of <code>*jpg</code>.
|
||||
</p>
|
||||
<p>Then run:
|
||||
</p>
|
||||
<div class="example">
|
||||
<pre class="example">ffmpeg -f image2 -i /tmp/img%03d.jpg /tmp/a.mpg
|
||||
</pre></div>
|
||||
|
||||
<p>The same logic is used for any image format that ffmpeg reads.
|
||||
</p>
|
||||
<p>You can also use <code>cat</code> to pipe images to ffmpeg:
|
||||
</p>
|
||||
<div class="example">
|
||||
<pre class="example">cat *.jpg | ffmpeg -f image2pipe -c:v mjpeg -i - output.mpg
|
||||
</pre></div>
|
||||
|
||||
<a name="How-do-I-encode-movie-to-single-pictures_003f"></a>
|
||||
<h3 class="section">3.3 How do I encode movie to single pictures?<span class="pull-right"><a class="anchor hidden-xs" href="#How-do-I-encode-movie-to-single-pictures_003f" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-How-do-I-encode-movie-to-single-pictures_003f" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>Use:
|
||||
</p>
|
||||
<div class="example">
|
||||
<pre class="example">ffmpeg -i movie.mpg movie%d.jpg
|
||||
</pre></div>
|
||||
|
||||
<p>The <samp>movie.mpg</samp> used as input will be converted to
|
||||
<samp>movie1.jpg</samp>, <samp>movie2.jpg</samp>, etc...
|
||||
</p>
|
||||
<p>Instead of relying on file format self-recognition, you may also use
|
||||
</p><dl compact="compact">
|
||||
<dt><samp>-c:v ppm</samp></dt>
|
||||
<dt><samp>-c:v png</samp></dt>
|
||||
<dt><samp>-c:v mjpeg</samp></dt>
|
||||
</dl>
|
||||
<p>to force the encoding.
|
||||
</p>
|
||||
<p>Applying that to the previous example:
|
||||
</p><div class="example">
|
||||
<pre class="example">ffmpeg -i movie.mpg -f image2 -c:v mjpeg menu%d.jpg
|
||||
</pre></div>
|
||||
|
||||
<p>Beware that there is no "jpeg" codec. Use "mjpeg" instead.
|
||||
</p>
|
||||
<a name="Why-do-I-see-a-slight-quality-degradation-with-multithreaded-MPEG_002a-encoding_003f"></a>
|
||||
<h3 class="section">3.4 Why do I see a slight quality degradation with multithreaded MPEG* encoding?<span class="pull-right"><a class="anchor hidden-xs" href="#Why-do-I-see-a-slight-quality-degradation-with-multithreaded-MPEG_002a-encoding_003f" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Why-do-I-see-a-slight-quality-degradation-with-multithreaded-MPEG_002a-encoding_003f" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>For multithreaded MPEG* encoding, the encoded slices must be independent,
|
||||
otherwise thread n would practically have to wait for n-1 to finish, so it’s
|
||||
quite logical that there is a small reduction of quality. This is not a bug.
|
||||
</p>
|
||||
<a name="How-can-I-read-from-the-standard-input-or-write-to-the-standard-output_003f"></a>
|
||||
<h3 class="section">3.5 How can I read from the standard input or write to the standard output?<span class="pull-right"><a class="anchor hidden-xs" href="#How-can-I-read-from-the-standard-input-or-write-to-the-standard-output_003f" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-How-can-I-read-from-the-standard-input-or-write-to-the-standard-output_003f" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>Use <samp>-</samp> as file name.
|
||||
</p>
|
||||
<a name="g_t_002df-jpeg-doesn_0027t-work_002e"></a>
|
||||
<h3 class="section">3.6 -f jpeg doesn’t work.<span class="pull-right"><a class="anchor hidden-xs" href="#_002df-jpeg-doesn_0027t-work_002e" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-_002df-jpeg-doesn_0027t-work_002e" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>Try ’-f image2 test%d.jpg’.
|
||||
</p>
|
||||
<a name="Why-can-I-not-change-the-frame-rate_003f"></a>
|
||||
<h3 class="section">3.7 Why can I not change the frame rate?<span class="pull-right"><a class="anchor hidden-xs" href="#Why-can-I-not-change-the-frame-rate_003f" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Why-can-I-not-change-the-frame-rate_003f" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>Some codecs, like MPEG-1/2, only allow a small number of fixed frame rates.
|
||||
Choose a different codec with the -c:v command line option.
|
||||
</p>
|
||||
<a name="How-do-I-encode-Xvid-or-DivX-video-with-ffmpeg_003f"></a>
|
||||
<h3 class="section">3.8 How do I encode Xvid or DivX video with ffmpeg?<span class="pull-right"><a class="anchor hidden-xs" href="#How-do-I-encode-Xvid-or-DivX-video-with-ffmpeg_003f" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-How-do-I-encode-Xvid-or-DivX-video-with-ffmpeg_003f" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>Both Xvid and DivX (version 4+) are implementations of the ISO MPEG-4
|
||||
standard (note that there are many other coding formats that use this
|
||||
same standard). Thus, use ’-c:v mpeg4’ to encode in these formats. The
|
||||
default fourcc stored in an MPEG-4-coded file will be ’FMP4’. If you want
|
||||
a different fourcc, use the ’-vtag’ option. E.g., ’-vtag xvid’ will
|
||||
force the fourcc ’xvid’ to be stored as the video fourcc rather than the
|
||||
default.
|
||||
</p>
|
||||
<a name="Which-are-good-parameters-for-encoding-high-quality-MPEG_002d4_003f"></a>
|
||||
<h3 class="section">3.9 Which are good parameters for encoding high quality MPEG-4?<span class="pull-right"><a class="anchor hidden-xs" href="#Which-are-good-parameters-for-encoding-high-quality-MPEG_002d4_003f" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Which-are-good-parameters-for-encoding-high-quality-MPEG_002d4_003f" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>’-mbd rd -flags +mv4+aic -trellis 2 -cmp 2 -subcmp 2 -g 300 -pass 1/2’,
|
||||
things to try: ’-bf 2’, ’-flags qprd’, ’-flags mv0’, ’-flags skiprd’.
|
||||
</p>
|
||||
<a name="Which-are-good-parameters-for-encoding-high-quality-MPEG_002d1_002fMPEG_002d2_003f"></a>
|
||||
<h3 class="section">3.10 Which are good parameters for encoding high quality MPEG-1/MPEG-2?<span class="pull-right"><a class="anchor hidden-xs" href="#Which-are-good-parameters-for-encoding-high-quality-MPEG_002d1_002fMPEG_002d2_003f" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Which-are-good-parameters-for-encoding-high-quality-MPEG_002d1_002fMPEG_002d2_003f" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>’-mbd rd -trellis 2 -cmp 2 -subcmp 2 -g 100 -pass 1/2’
|
||||
but beware the ’-g 100’ might cause problems with some decoders.
|
||||
Things to try: ’-bf 2’, ’-flags qprd’, ’-flags mv0’, ’-flags skiprd.
|
||||
</p>
|
||||
<a name="Interlaced-video-looks-very-bad-when-encoded-with-ffmpeg_002c-what-is-wrong_003f"></a>
|
||||
<h3 class="section">3.11 Interlaced video looks very bad when encoded with ffmpeg, what is wrong?<span class="pull-right"><a class="anchor hidden-xs" href="#Interlaced-video-looks-very-bad-when-encoded-with-ffmpeg_002c-what-is-wrong_003f" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Interlaced-video-looks-very-bad-when-encoded-with-ffmpeg_002c-what-is-wrong_003f" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>You should use ’-flags +ilme+ildct’ and maybe ’-flags +alt’ for interlaced
|
||||
material, and try ’-top 0/1’ if the result looks really messed-up.
|
||||
</p>
|
||||
<a name="How-can-I-read-DirectShow-files_003f"></a>
|
||||
<h3 class="section">3.12 How can I read DirectShow files?<span class="pull-right"><a class="anchor hidden-xs" href="#How-can-I-read-DirectShow-files_003f" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-How-can-I-read-DirectShow-files_003f" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>If you have built FFmpeg with <code>./configure --enable-avisynth</code>
|
||||
(only possible on MinGW/Cygwin platforms),
|
||||
then you may use any file that DirectShow can read as input.
|
||||
</p>
|
||||
<p>Just create an "input.avs" text file with this single line ...
|
||||
</p><div class="example">
|
||||
<pre class="example">DirectShowSource("C:\path to your file\yourfile.asf")
|
||||
</pre></div>
|
||||
<p>... and then feed that text file to ffmpeg:
|
||||
</p><div class="example">
|
||||
<pre class="example">ffmpeg -i input.avs
|
||||
</pre></div>
|
||||
|
||||
<p>For ANY other help on AviSynth, please visit the
|
||||
<a href="http://www.avisynth.org/">AviSynth homepage</a>.
|
||||
</p>
|
||||
<a name="How-can-I-join-video-files_003f"></a>
|
||||
<h3 class="section">3.13 How can I join video files?<span class="pull-right"><a class="anchor hidden-xs" href="#How-can-I-join-video-files_003f" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-How-can-I-join-video-files_003f" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>To "join" video files is quite ambiguous. The following list explains the
|
||||
different kinds of "joining" and points out how those are addressed in
|
||||
FFmpeg. To join video files may mean:
|
||||
</p>
|
||||
<ul>
|
||||
<li> To put them one after the other: this is called to <em>concatenate</em> them
|
||||
(in short: concat) and is addressed
|
||||
<a href="#How-can-I-concatenate-video-files">in this very faq</a>.
|
||||
|
||||
</li><li> To put them together in the same file, to let the user choose between the
|
||||
different versions (example: different audio languages): this is called to
|
||||
<em>multiplex</em> them together (in short: mux), and is done by simply
|
||||
invoking ffmpeg with several <samp>-i</samp> options.
|
||||
|
||||
</li><li> For audio, to put all channels together in a single stream (example: two
|
||||
mono streams into one stereo stream): this is sometimes called to
|
||||
<em>merge</em> them, and can be done using the
|
||||
<a href="http://ffmpeg.org/ffmpeg-filters.html#amerge"><code>amerge</code></a> filter.
|
||||
|
||||
</li><li> For audio, to play one on top of the other: this is called to <em>mix</em>
|
||||
them, and can be done by first merging them into a single stream and then
|
||||
using the <a href="http://ffmpeg.org/ffmpeg-filters.html#pan"><code>pan</code></a> filter to mix
|
||||
the channels at will.
|
||||
|
||||
</li><li> For video, to display both together, side by side or one on top of a part of
|
||||
the other; it can be done using the
|
||||
<a href="http://ffmpeg.org/ffmpeg-filters.html#overlay"><code>overlay</code></a> video filter.
|
||||
|
||||
</li></ul>
|
||||
|
||||
<a name="How-can-I-concatenate-video-files"></a><a name="How-can-I-concatenate-video-files_003f"></a>
|
||||
<h3 class="section">3.14 How can I concatenate video files?<span class="pull-right"><a class="anchor hidden-xs" href="#How-can-I-concatenate-video-files_003f" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-How-can-I-concatenate-video-files_003f" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>There are several solutions, depending on the exact circumstances.
|
||||
</p>
|
||||
<a name="Concatenating-using-the-concat-filter"></a>
|
||||
<h4 class="subsection">3.14.1 Concatenating using the concat <em>filter</em><span class="pull-right"><a class="anchor hidden-xs" href="#Concatenating-using-the-concat-filter" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Concatenating-using-the-concat-filter" aria-hidden="true">TOC</a></span></h4>
|
||||
|
||||
<p>FFmpeg has a <a href="http://ffmpeg.org/ffmpeg-filters.html#concat"><code>concat</code></a> filter designed specifically for that, with examples in the
|
||||
documentation. This operation is recommended if you need to re-encode.
|
||||
</p>
|
||||
<a name="Concatenating-using-the-concat-demuxer"></a>
|
||||
<h4 class="subsection">3.14.2 Concatenating using the concat <em>demuxer</em><span class="pull-right"><a class="anchor hidden-xs" href="#Concatenating-using-the-concat-demuxer" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Concatenating-using-the-concat-demuxer" aria-hidden="true">TOC</a></span></h4>
|
||||
|
||||
<p>FFmpeg has a <a href="http://www.ffmpeg.org/ffmpeg-formats.html#concat"><code>concat</code></a> demuxer which you can use when you want to avoid a re-encode and
|
||||
your format doesn’t support file level concatenation.
|
||||
</p>
|
||||
<a name="Concatenating-using-the-concat-protocol-_0028file-level_0029"></a>
|
||||
<h4 class="subsection">3.14.3 Concatenating using the concat <em>protocol</em> (file level)<span class="pull-right"><a class="anchor hidden-xs" href="#Concatenating-using-the-concat-protocol-_0028file-level_0029" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Concatenating-using-the-concat-protocol-_0028file-level_0029" aria-hidden="true">TOC</a></span></h4>
|
||||
|
||||
<p>FFmpeg has a <a href="http://ffmpeg.org/ffmpeg-protocols.html#concat"><code>concat</code></a> protocol designed specifically for that, with examples in the
|
||||
documentation.
|
||||
</p>
|
||||
<p>A few multimedia containers (MPEG-1, MPEG-2 PS, DV) allow to concatenate
|
||||
video by merely concatenating the files containing them.
|
||||
</p>
|
||||
<p>Hence you may concatenate your multimedia files by first transcoding them to
|
||||
these privileged formats, then using the humble <code>cat</code> command (or the
|
||||
equally humble <code>copy</code> under Windows), and finally transcoding back to your
|
||||
format of choice.
|
||||
</p>
|
||||
<div class="example">
|
||||
<pre class="example">ffmpeg -i input1.avi -qscale:v 1 intermediate1.mpg
|
||||
ffmpeg -i input2.avi -qscale:v 1 intermediate2.mpg
|
||||
cat intermediate1.mpg intermediate2.mpg > intermediate_all.mpg
|
||||
ffmpeg -i intermediate_all.mpg -qscale:v 2 output.avi
|
||||
</pre></div>
|
||||
|
||||
<p>Additionally, you can use the <code>concat</code> protocol instead of <code>cat</code> or
|
||||
<code>copy</code> which will avoid creation of a potentially huge intermediate file.
|
||||
</p>
|
||||
<div class="example">
|
||||
<pre class="example">ffmpeg -i input1.avi -qscale:v 1 intermediate1.mpg
|
||||
ffmpeg -i input2.avi -qscale:v 1 intermediate2.mpg
|
||||
ffmpeg -i concat:"intermediate1.mpg|intermediate2.mpg" -c copy intermediate_all.mpg
|
||||
ffmpeg -i intermediate_all.mpg -qscale:v 2 output.avi
|
||||
</pre></div>
|
||||
|
||||
<p>Note that you may need to escape the character "|" which is special for many
|
||||
shells.
|
||||
</p>
|
||||
<p>Another option is usage of named pipes, should your platform support it:
|
||||
</p>
|
||||
<div class="example">
|
||||
<pre class="example">mkfifo intermediate1.mpg
|
||||
mkfifo intermediate2.mpg
|
||||
ffmpeg -i input1.avi -qscale:v 1 -y intermediate1.mpg < /dev/null &
|
||||
ffmpeg -i input2.avi -qscale:v 1 -y intermediate2.mpg < /dev/null &
|
||||
cat intermediate1.mpg intermediate2.mpg |\
|
||||
ffmpeg -f mpeg -i - -c:v mpeg4 -acodec libmp3lame output.avi
|
||||
</pre></div>
|
||||
|
||||
<a name="Concatenating-using-raw-audio-and-video"></a>
|
||||
<h4 class="subsection">3.14.4 Concatenating using raw audio and video<span class="pull-right"><a class="anchor hidden-xs" href="#Concatenating-using-raw-audio-and-video" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Concatenating-using-raw-audio-and-video" aria-hidden="true">TOC</a></span></h4>
|
||||
|
||||
<p>Similarly, the yuv4mpegpipe format, and the raw video, raw audio codecs also
|
||||
allow concatenation, and the transcoding step is almost lossless.
|
||||
When using multiple yuv4mpegpipe(s), the first line needs to be discarded
|
||||
from all but the first stream. This can be accomplished by piping through
|
||||
<code>tail</code> as seen below. Note that when piping through <code>tail</code> you
|
||||
must use command grouping, <code>{ ;}</code>, to background properly.
|
||||
</p>
|
||||
<p>For example, let’s say we want to concatenate two FLV files into an
|
||||
output.flv file:
|
||||
</p>
|
||||
<div class="example">
|
||||
<pre class="example">mkfifo temp1.a
|
||||
mkfifo temp1.v
|
||||
mkfifo temp2.a
|
||||
mkfifo temp2.v
|
||||
mkfifo all.a
|
||||
mkfifo all.v
|
||||
ffmpeg -i input1.flv -vn -f u16le -acodec pcm_s16le -ac 2 -ar 44100 - > temp1.a < /dev/null &
|
||||
ffmpeg -i input2.flv -vn -f u16le -acodec pcm_s16le -ac 2 -ar 44100 - > temp2.a < /dev/null &
|
||||
ffmpeg -i input1.flv -an -f yuv4mpegpipe - > temp1.v < /dev/null &
|
||||
{ ffmpeg -i input2.flv -an -f yuv4mpegpipe - < /dev/null | tail -n +2 > temp2.v ; } &
|
||||
cat temp1.a temp2.a > all.a &
|
||||
cat temp1.v temp2.v > all.v &
|
||||
ffmpeg -f u16le -acodec pcm_s16le -ac 2 -ar 44100 -i all.a \
|
||||
-f yuv4mpegpipe -i all.v \
|
||||
-y output.flv
|
||||
rm temp[12].[av] all.[av]
|
||||
</pre></div>
|
||||
|
||||
<a name="Using-_002df-lavfi_002c-audio-becomes-mono-for-no-apparent-reason_002e"></a>
|
||||
<h3 class="section">3.15 Using <samp>-f lavfi</samp>, audio becomes mono for no apparent reason.<span class="pull-right"><a class="anchor hidden-xs" href="#Using-_002df-lavfi_002c-audio-becomes-mono-for-no-apparent-reason_002e" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Using-_002df-lavfi_002c-audio-becomes-mono-for-no-apparent-reason_002e" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>Use <samp>-dumpgraph -</samp> to find out exactly where the channel layout is
|
||||
lost.
|
||||
</p>
|
||||
<p>Most likely, it is through <code>auto-inserted aresample</code>. Try to understand
|
||||
why the converting filter was needed at that place.
|
||||
</p>
|
||||
<p>Just before the output is a likely place, as <samp>-f lavfi</samp> currently
|
||||
only support packed S16.
|
||||
</p>
|
||||
<p>Then insert the correct <code>aformat</code> explicitly in the filtergraph,
|
||||
specifying the exact format.
|
||||
</p>
|
||||
<div class="example">
|
||||
<pre class="example">aformat=sample_fmts=s16:channel_layouts=stereo
|
||||
</pre></div>
|
||||
|
||||
<a name="Why-does-FFmpeg-not-see-the-subtitles-in-my-VOB-file_003f"></a>
|
||||
<h3 class="section">3.16 Why does FFmpeg not see the subtitles in my VOB file?<span class="pull-right"><a class="anchor hidden-xs" href="#Why-does-FFmpeg-not-see-the-subtitles-in-my-VOB-file_003f" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Why-does-FFmpeg-not-see-the-subtitles-in-my-VOB-file_003f" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>VOB and a few other formats do not have a global header that describes
|
||||
everything present in the file. Instead, applications are supposed to scan
|
||||
the file to see what it contains. Since VOB files are frequently large, only
|
||||
the beginning is scanned. If the subtitles happen only later in the file,
|
||||
they will not be initially detected.
|
||||
</p>
|
||||
<p>Some applications, including the <code>ffmpeg</code> command-line tool, can only
|
||||
work with streams that were detected during the initial scan; streams that
|
||||
are detected later are ignored.
|
||||
</p>
|
||||
<p>The size of the initial scan is controlled by two options: <code>probesize</code>
|
||||
(default ~5 Mo) and <code>analyzeduration</code> (default 5,000,000 µs = 5 s). For
|
||||
the subtitle stream to be detected, both values must be large enough.
|
||||
</p>
|
||||
<a name="Why-was-the-ffmpeg-_002dsameq-option-removed_003f-What-to-use-instead_003f"></a>
|
||||
<h3 class="section">3.17 Why was the <code>ffmpeg</code> <samp>-sameq</samp> option removed? What to use instead?<span class="pull-right"><a class="anchor hidden-xs" href="#Why-was-the-ffmpeg-_002dsameq-option-removed_003f-What-to-use-instead_003f" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Why-was-the-ffmpeg-_002dsameq-option-removed_003f-What-to-use-instead_003f" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>The <samp>-sameq</samp> option meant "same quantizer", and made sense only in a
|
||||
very limited set of cases. Unfortunately, a lot of people mistook it for
|
||||
"same quality" and used it in places where it did not make sense: it had
|
||||
roughly the expected visible effect, but achieved it in a very inefficient
|
||||
way.
|
||||
</p>
|
||||
<p>Each encoder has its own set of options to set the quality-vs-size balance,
|
||||
use the options for the encoder you are using to set the quality level to a
|
||||
point acceptable for your tastes. The most common options to do that are
|
||||
<samp>-qscale</samp> and <samp>-qmax</samp>, but you should peruse the documentation
|
||||
of the encoder you chose.
|
||||
</p>
|
||||
<a name="Development"></a>
|
||||
<h2 class="chapter">4 Development<span class="pull-right"><a class="anchor hidden-xs" href="#Development" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Development" aria-hidden="true">TOC</a></span></h2>
|
||||
|
||||
<a name="Are-there-examples-illustrating-how-to-use-the-FFmpeg-libraries_002c-particularly-libavcodec-and-libavformat_003f"></a>
|
||||
<h3 class="section">4.1 Are there examples illustrating how to use the FFmpeg libraries, particularly libavcodec and libavformat?<span class="pull-right"><a class="anchor hidden-xs" href="#Are-there-examples-illustrating-how-to-use-the-FFmpeg-libraries_002c-particularly-libavcodec-and-libavformat_003f" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Are-there-examples-illustrating-how-to-use-the-FFmpeg-libraries_002c-particularly-libavcodec-and-libavformat_003f" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>Yes. Check the <samp>doc/examples</samp> directory in the source
|
||||
repository, also available online at:
|
||||
<a href="https://github.com/FFmpeg/FFmpeg/tree/master/doc/examples">https://github.com/FFmpeg/FFmpeg/tree/master/doc/examples</a>.
|
||||
</p>
|
||||
<p>Examples are also installed by default, usually in
|
||||
<code>$PREFIX/share/ffmpeg/examples</code>.
|
||||
</p>
|
||||
<p>Also you may read the Developers Guide of the FFmpeg documentation. Alternatively,
|
||||
examine the source code for one of the many open source projects that
|
||||
already incorporate FFmpeg at (<a href="projects.html">projects.html</a>).
|
||||
</p>
|
||||
<a name="Can-you-support-my-C-compiler-XXX_003f"></a>
|
||||
<h3 class="section">4.2 Can you support my C compiler XXX?<span class="pull-right"><a class="anchor hidden-xs" href="#Can-you-support-my-C-compiler-XXX_003f" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Can-you-support-my-C-compiler-XXX_003f" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>It depends. If your compiler is C99-compliant, then patches to support
|
||||
it are likely to be welcome if they do not pollute the source code
|
||||
with <code>#ifdef</code>s related to the compiler.
|
||||
</p>
|
||||
<a name="Is-Microsoft-Visual-C_002b_002b-supported_003f"></a>
|
||||
<h3 class="section">4.3 Is Microsoft Visual C++ supported?<span class="pull-right"><a class="anchor hidden-xs" href="#Is-Microsoft-Visual-C_002b_002b-supported_003f" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Is-Microsoft-Visual-C_002b_002b-supported_003f" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>Yes. Please see the <a href="platform.html">Microsoft Visual C++</a>
|
||||
section in the FFmpeg documentation.
|
||||
</p>
|
||||
<a name="Can-you-add-automake_002c-libtool-or-autoconf-support_003f"></a>
|
||||
<h3 class="section">4.4 Can you add automake, libtool or autoconf support?<span class="pull-right"><a class="anchor hidden-xs" href="#Can-you-add-automake_002c-libtool-or-autoconf-support_003f" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Can-you-add-automake_002c-libtool-or-autoconf-support_003f" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>No. These tools are too bloated and they complicate the build.
|
||||
</p>
|
||||
<a name="Why-not-rewrite-FFmpeg-in-object_002doriented-C_002b_002b_003f"></a>
|
||||
<h3 class="section">4.5 Why not rewrite FFmpeg in object-oriented C++?<span class="pull-right"><a class="anchor hidden-xs" href="#Why-not-rewrite-FFmpeg-in-object_002doriented-C_002b_002b_003f" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Why-not-rewrite-FFmpeg-in-object_002doriented-C_002b_002b_003f" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>FFmpeg is already organized in a highly modular manner and does not need to
|
||||
be rewritten in a formal object language. Further, many of the developers
|
||||
favor straight C; it works for them. For more arguments on this matter,
|
||||
read <a href="http://www.tux.org/lkml/#s15">"Programming Religion"</a>.
|
||||
</p>
|
||||
<a name="Why-are-the-ffmpeg-programs-devoid-of-debugging-symbols_003f"></a>
|
||||
<h3 class="section">4.6 Why are the ffmpeg programs devoid of debugging symbols?<span class="pull-right"><a class="anchor hidden-xs" href="#Why-are-the-ffmpeg-programs-devoid-of-debugging-symbols_003f" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Why-are-the-ffmpeg-programs-devoid-of-debugging-symbols_003f" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>The build process creates <code>ffmpeg_g</code>, <code>ffplay_g</code>, etc. which
|
||||
contain full debug information. Those binaries are stripped to create
|
||||
<code>ffmpeg</code>, <code>ffplay</code>, etc. If you need the debug information, use
|
||||
the *_g versions.
|
||||
</p>
|
||||
<a name="I-do-not-like-the-LGPL_002c-can-I-contribute-code-under-the-GPL-instead_003f"></a>
|
||||
<h3 class="section">4.7 I do not like the LGPL, can I contribute code under the GPL instead?<span class="pull-right"><a class="anchor hidden-xs" href="#I-do-not-like-the-LGPL_002c-can-I-contribute-code-under-the-GPL-instead_003f" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-I-do-not-like-the-LGPL_002c-can-I-contribute-code-under-the-GPL-instead_003f" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>Yes, as long as the code is optional and can easily and cleanly be placed
|
||||
under #if CONFIG_GPL without breaking anything. So, for example, a new codec
|
||||
or filter would be OK under GPL while a bug fix to LGPL code would not.
|
||||
</p>
|
||||
<a name="I_0027m-using-FFmpeg-from-within-my-C-application-but-the-linker-complains-about-missing-symbols-from-the-libraries-themselves_002e"></a>
|
||||
<h3 class="section">4.8 I’m using FFmpeg from within my C application but the linker complains about missing symbols from the libraries themselves.<span class="pull-right"><a class="anchor hidden-xs" href="#I_0027m-using-FFmpeg-from-within-my-C-application-but-the-linker-complains-about-missing-symbols-from-the-libraries-themselves_002e" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-I_0027m-using-FFmpeg-from-within-my-C-application-but-the-linker-complains-about-missing-symbols-from-the-libraries-themselves_002e" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>FFmpeg builds static libraries by default. In static libraries, dependencies
|
||||
are not handled. That has two consequences. First, you must specify the
|
||||
libraries in dependency order: <code>-lavdevice</code> must come before
|
||||
<code>-lavformat</code>, <code>-lavutil</code> must come after everything else, etc.
|
||||
Second, external libraries that are used in FFmpeg have to be specified too.
|
||||
</p>
|
||||
<p>An easy way to get the full list of required libraries in dependency order
|
||||
is to use <code>pkg-config</code>.
|
||||
</p>
|
||||
<div class="example">
|
||||
<pre class="example">c99 -o program program.c $(pkg-config --cflags --libs libavformat libavcodec)
|
||||
</pre></div>
|
||||
|
||||
<p>See <samp>doc/example/Makefile</samp> and <samp>doc/example/pc-uninstalled</samp> for
|
||||
more details.
|
||||
</p>
|
||||
<a name="I_0027m-using-FFmpeg-from-within-my-C_002b_002b-application-but-the-linker-complains-about-missing-symbols-which-seem-to-be-available_002e"></a>
|
||||
<h3 class="section">4.9 I’m using FFmpeg from within my C++ application but the linker complains about missing symbols which seem to be available.<span class="pull-right"><a class="anchor hidden-xs" href="#I_0027m-using-FFmpeg-from-within-my-C_002b_002b-application-but-the-linker-complains-about-missing-symbols-which-seem-to-be-available_002e" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-I_0027m-using-FFmpeg-from-within-my-C_002b_002b-application-but-the-linker-complains-about-missing-symbols-which-seem-to-be-available_002e" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>FFmpeg is a pure C project, so to use the libraries within your C++ application
|
||||
you need to explicitly state that you are using a C library. You can do this by
|
||||
encompassing your FFmpeg includes using <code>extern "C"</code>.
|
||||
</p>
|
||||
<p>See <a href="http://www.parashift.com/c++-faq-lite/mixing-c-and-cpp.html#faq-32.3">http://www.parashift.com/c++-faq-lite/mixing-c-and-cpp.html#faq-32.3</a>
|
||||
</p>
|
||||
<a name="I_0027m-using-libavutil-from-within-my-C_002b_002b-application-but-the-compiler-complains-about-_0027UINT64_005fC_0027-was-not-declared-in-this-scope"></a>
|
||||
<h3 class="section">4.10 I’m using libavutil from within my C++ application but the compiler complains about ’UINT64_C’ was not declared in this scope<span class="pull-right"><a class="anchor hidden-xs" href="#I_0027m-using-libavutil-from-within-my-C_002b_002b-application-but-the-compiler-complains-about-_0027UINT64_005fC_0027-was-not-declared-in-this-scope" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-I_0027m-using-libavutil-from-within-my-C_002b_002b-application-but-the-compiler-complains-about-_0027UINT64_005fC_0027-was-not-declared-in-this-scope" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>FFmpeg is a pure C project using C99 math features, in order to enable C++
|
||||
to use them you have to append -D__STDC_CONSTANT_MACROS to your CXXFLAGS
|
||||
</p>
|
||||
<a name="I-have-a-file-in-memory-_002f-a-API-different-from-_002aopen_002f_002aread_002f-libc-how-do-I-use-it-with-libavformat_003f"></a>
|
||||
<h3 class="section">4.11 I have a file in memory / a API different from *open/*read/ libc how do I use it with libavformat?<span class="pull-right"><a class="anchor hidden-xs" href="#I-have-a-file-in-memory-_002f-a-API-different-from-_002aopen_002f_002aread_002f-libc-how-do-I-use-it-with-libavformat_003f" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-I-have-a-file-in-memory-_002f-a-API-different-from-_002aopen_002f_002aread_002f-libc-how-do-I-use-it-with-libavformat_003f" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>You have to create a custom AVIOContext using <code>avio_alloc_context</code>,
|
||||
see <samp>libavformat/aviobuf.c</samp> in FFmpeg and <samp>libmpdemux/demux_lavf.c</samp> in MPlayer or MPlayer2 sources.
|
||||
</p>
|
||||
<a name="Where-is-the-documentation-about-ffv1_002c-msmpeg4_002c-asv1_002c-4xm_003f"></a>
|
||||
<h3 class="section">4.12 Where is the documentation about ffv1, msmpeg4, asv1, 4xm?<span class="pull-right"><a class="anchor hidden-xs" href="#Where-is-the-documentation-about-ffv1_002c-msmpeg4_002c-asv1_002c-4xm_003f" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Where-is-the-documentation-about-ffv1_002c-msmpeg4_002c-asv1_002c-4xm_003f" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>see <a href="http://www.ffmpeg.org/~michael/">http://www.ffmpeg.org/~michael/</a>
|
||||
</p>
|
||||
<a name="How-do-I-feed-H_002e263_002dRTP-_0028and-other-codecs-in-RTP_0029-to-libavcodec_003f"></a>
|
||||
<h3 class="section">4.13 How do I feed H.263-RTP (and other codecs in RTP) to libavcodec?<span class="pull-right"><a class="anchor hidden-xs" href="#How-do-I-feed-H_002e263_002dRTP-_0028and-other-codecs-in-RTP_0029-to-libavcodec_003f" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-How-do-I-feed-H_002e263_002dRTP-_0028and-other-codecs-in-RTP_0029-to-libavcodec_003f" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>Even if peculiar since it is network oriented, RTP is a container like any
|
||||
other. You have to <em>demux</em> RTP before feeding the payload to libavcodec.
|
||||
In this specific case please look at RFC 4629 to see how it should be done.
|
||||
</p>
|
||||
<a name="AVStream_002er_005fframe_005frate-is-wrong_002c-it-is-much-larger-than-the-frame-rate_002e"></a>
|
||||
<h3 class="section">4.14 AVStream.r_frame_rate is wrong, it is much larger than the frame rate.<span class="pull-right"><a class="anchor hidden-xs" href="#AVStream_002er_005fframe_005frate-is-wrong_002c-it-is-much-larger-than-the-frame-rate_002e" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-AVStream_002er_005fframe_005frate-is-wrong_002c-it-is-much-larger-than-the-frame-rate_002e" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p><code>r_frame_rate</code> is NOT the average frame rate, it is the smallest frame rate
|
||||
that can accurately represent all timestamps. So no, it is not
|
||||
wrong if it is larger than the average!
|
||||
For example, if you have mixed 25 and 30 fps content, then <code>r_frame_rate</code>
|
||||
will be 150 (it is the least common multiple).
|
||||
If you are looking for the average frame rate, see <code>AVStream.avg_frame_rate</code>.
|
||||
</p>
|
||||
<a name="Why-is-make-fate-not-running-all-tests_003f"></a>
|
||||
<h3 class="section">4.15 Why is <code>make fate</code> not running all tests?<span class="pull-right"><a class="anchor hidden-xs" href="#Why-is-make-fate-not-running-all-tests_003f" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Why-is-make-fate-not-running-all-tests_003f" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>Make sure you have the fate-suite samples and the <code>SAMPLES</code> Make variable
|
||||
or <code>FATE_SAMPLES</code> environment variable or the <code>--samples</code>
|
||||
<code>configure</code> option is set to the right path.
|
||||
</p>
|
||||
<a name="Why-is-make-fate-not-finding-the-samples_003f"></a>
|
||||
<h3 class="section">4.16 Why is <code>make fate</code> not finding the samples?<span class="pull-right"><a class="anchor hidden-xs" href="#Why-is-make-fate-not-finding-the-samples_003f" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Why-is-make-fate-not-finding-the-samples_003f" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>Do you happen to have a <code>~</code> character in the samples path to indicate a
|
||||
home directory? The value is used in ways where the shell cannot expand it,
|
||||
causing FATE to not find files. Just replace <code>~</code> by the full path.
|
||||
</p>
|
||||
|
||||
<p style="font-size: small;">
|
||||
This document was generated on <em>January 14, 2015</em> using <a href="http://www.gnu.org/software/texinfo/"><em>makeinfo</em></a>.
|
||||
</p>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
286
Externals/ffmpeg/dev/doc/fate.html
vendored
286
Externals/ffmpeg/dev/doc/fate.html
vendored
@ -1,286 +0,0 @@
|
||||
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
|
||||
<html>
|
||||
<!-- Created by GNU Texinfo 5.2, http://www.gnu.org/software/texinfo/ -->
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<title>
|
||||
FFmpeg Automated Testing Environment
|
||||
</title>
|
||||
<meta name="viewport" content="width=device-width,initial-scale=1.0">
|
||||
<link rel="stylesheet" type="text/css" href="bootstrap.min.css">
|
||||
<link rel="stylesheet" type="text/css" href="style.min.css">
|
||||
</head>
|
||||
<body>
|
||||
<div style="width: 95%; margin: auto">
|
||||
<h1>
|
||||
FFmpeg Automated Testing Environment
|
||||
</h1>
|
||||
<div align="center">
|
||||
</div>
|
||||
|
||||
<a name="Top"></a>
|
||||
<a name="SEC_Top"></a>
|
||||
|
||||
<a name="SEC_Contents"></a>
|
||||
<h2 class="contents-heading">Table of Contents</h2>
|
||||
|
||||
<div class="contents">
|
||||
|
||||
<ul class="no-bullet">
|
||||
<li><a name="toc-Introduction" href="#Introduction">1 Introduction</a></li>
|
||||
<li><a name="toc-Using-FATE-from-your-FFmpeg-source-directory" href="#Using-FATE-from-your-FFmpeg-source-directory">2 Using FATE from your FFmpeg source directory</a></li>
|
||||
<li><a name="toc-Submitting-the-results-to-the-FFmpeg-result-aggregation-server" href="#Submitting-the-results-to-the-FFmpeg-result-aggregation-server">3 Submitting the results to the FFmpeg result aggregation server</a></li>
|
||||
<li><a name="toc-FATE-makefile-targets-and-variables" href="#FATE-makefile-targets-and-variables">4 FATE makefile targets and variables</a>
|
||||
<ul class="no-bullet">
|
||||
<li><a name="toc-Makefile-targets" href="#Makefile-targets">4.1 Makefile targets</a></li>
|
||||
<li><a name="toc-Makefile-variables" href="#Makefile-variables">4.2 Makefile variables</a></li>
|
||||
<li><a name="toc-Examples" href="#Examples">4.3 Examples</a></li>
|
||||
</ul></li>
|
||||
</ul>
|
||||
</div>
|
||||
|
||||
|
||||
<a name="Introduction"></a>
|
||||
<h2 class="chapter">1 Introduction<span class="pull-right"><a class="anchor hidden-xs" href="#Introduction" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Introduction" aria-hidden="true">TOC</a></span></h2>
|
||||
|
||||
<p>FATE is an extended regression suite on the client-side and a means
|
||||
for results aggregation and presentation on the server-side.
|
||||
</p>
|
||||
<p>The first part of this document explains how you can use FATE from
|
||||
your FFmpeg source directory to test your ffmpeg binary. The second
|
||||
part describes how you can run FATE to submit the results to FFmpeg’s
|
||||
FATE server.
|
||||
</p>
|
||||
<p>In any way you can have a look at the publicly viewable FATE results
|
||||
by visiting this website:
|
||||
</p>
|
||||
<p><a href="http://fate.ffmpeg.org/">http://fate.ffmpeg.org/</a>
|
||||
</p>
|
||||
<p>This is especially recommended for all people contributing source
|
||||
code to FFmpeg, as it can be seen if some test on some platform broke
|
||||
with their recent contribution. This usually happens on the platforms
|
||||
the developers could not test on.
|
||||
</p>
|
||||
<p>The second part of this document describes how you can run FATE to
|
||||
submit your results to FFmpeg’s FATE server. If you want to submit your
|
||||
results be sure to check that your combination of CPU, OS and compiler
|
||||
is not already listed on the above mentioned website.
|
||||
</p>
|
||||
<p>In the third part you can find a comprehensive listing of FATE makefile
|
||||
targets and variables.
|
||||
</p>
|
||||
|
||||
<a name="Using-FATE-from-your-FFmpeg-source-directory"></a>
|
||||
<h2 class="chapter">2 Using FATE from your FFmpeg source directory<span class="pull-right"><a class="anchor hidden-xs" href="#Using-FATE-from-your-FFmpeg-source-directory" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Using-FATE-from-your-FFmpeg-source-directory" aria-hidden="true">TOC</a></span></h2>
|
||||
|
||||
<p>If you want to run FATE on your machine you need to have the samples
|
||||
in place. You can get the samples via the build target fate-rsync.
|
||||
Use this command from the top-level source directory:
|
||||
</p>
|
||||
<div class="example">
|
||||
<pre class="example">make fate-rsync SAMPLES=fate-suite/
|
||||
make fate SAMPLES=fate-suite/
|
||||
</pre></div>
|
||||
|
||||
<p>The above commands set the samples location by passing a makefile
|
||||
variable via command line. It is also possible to set the samples
|
||||
location at source configuration time by invoking configure with
|
||||
‘–samples=<path to the samples directory>’. Afterwards you can
|
||||
invoke the makefile targets without setting the SAMPLES makefile
|
||||
variable. This is illustrated by the following commands:
|
||||
</p>
|
||||
<div class="example">
|
||||
<pre class="example">./configure --samples=fate-suite/
|
||||
make fate-rsync
|
||||
make fate
|
||||
</pre></div>
|
||||
|
||||
<p>Yet another way to tell FATE about the location of the sample
|
||||
directory is by making sure the environment variable FATE_SAMPLES
|
||||
contains the path to your samples directory. This can be achieved
|
||||
by e.g. putting that variable in your shell profile or by setting
|
||||
it in your interactive session.
|
||||
</p>
|
||||
<div class="example">
|
||||
<pre class="example">FATE_SAMPLES=fate-suite/ make fate
|
||||
</pre></div>
|
||||
|
||||
<div class="info">
|
||||
<p>Do not put a ’~’ character in the samples path to indicate a home
|
||||
directory. Because of shell nuances, this will cause FATE to fail.
|
||||
</p></div>
|
||||
<p>To use a custom wrapper to run the test, pass <samp>--target-exec</samp> to
|
||||
<code>configure</code> or set the <var>TARGET_EXEC</var> Make variable.
|
||||
</p>
|
||||
|
||||
<a name="Submitting-the-results-to-the-FFmpeg-result-aggregation-server"></a>
|
||||
<h2 class="chapter">3 Submitting the results to the FFmpeg result aggregation server<span class="pull-right"><a class="anchor hidden-xs" href="#Submitting-the-results-to-the-FFmpeg-result-aggregation-server" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Submitting-the-results-to-the-FFmpeg-result-aggregation-server" aria-hidden="true">TOC</a></span></h2>
|
||||
|
||||
<p>To submit your results to the server you should run fate through the
|
||||
shell script <samp>tests/fate.sh</samp> from the FFmpeg sources. This script needs
|
||||
to be invoked with a configuration file as its first argument.
|
||||
</p>
|
||||
<div class="example">
|
||||
<pre class="example">tests/fate.sh /path/to/fate_config
|
||||
</pre></div>
|
||||
|
||||
<p>A configuration file template with comments describing the individual
|
||||
configuration variables can be found at <samp>doc/fate_config.sh.template</samp>.
|
||||
</p>
|
||||
<p>The mentioned configuration template is also available here:
|
||||
</p><pre class="verbatim">slot= # some unique identifier
|
||||
repo=git://source.ffmpeg.org/ffmpeg.git # the source repository
|
||||
samples= # path to samples directory
|
||||
workdir= # directory in which to do all the work
|
||||
#fate_recv="ssh -T fate@fate.ffmpeg.org" # command to submit report
|
||||
comment= # optional description
|
||||
build_only= # set to "yes" for a compile-only instance that skips tests
|
||||
|
||||
# the following are optional and map to configure options
|
||||
arch=
|
||||
cpu=
|
||||
cross_prefix=
|
||||
as=
|
||||
cc=
|
||||
ld=
|
||||
target_os=
|
||||
sysroot=
|
||||
target_exec=
|
||||
target_path=
|
||||
target_samples=
|
||||
extra_cflags=
|
||||
extra_ldflags=
|
||||
extra_libs=
|
||||
extra_conf= # extra configure options not covered above
|
||||
|
||||
#make= # name of GNU make if not 'make'
|
||||
makeopts= # extra options passed to 'make'
|
||||
#tar= # command to create a tar archive from its arguments on stdout,
|
||||
# defaults to 'tar c'
|
||||
</pre>
|
||||
<p>Create a configuration that suits your needs, based on the configuration
|
||||
template. The ‘slot’ configuration variable can be any string that is not
|
||||
yet used, but it is suggested that you name it adhering to the following
|
||||
pattern <arch>-<os>-<compiler>-<compiler version>. The configuration file
|
||||
itself will be sourced in a shell script, therefore all shell features may
|
||||
be used. This enables you to setup the environment as you need it for your
|
||||
build.
|
||||
</p>
|
||||
<p>For your first test runs the ‘fate_recv’ variable should be empty or
|
||||
commented out. This will run everything as normal except that it will omit
|
||||
the submission of the results to the server. The following files should be
|
||||
present in $workdir as specified in the configuration file:
|
||||
</p>
|
||||
<ul>
|
||||
<li> configure.log
|
||||
</li><li> compile.log
|
||||
</li><li> test.log
|
||||
</li><li> report
|
||||
</li><li> version
|
||||
</li></ul>
|
||||
|
||||
<p>When you have everything working properly you can create an SSH key pair
|
||||
and send the public key to the FATE server administrator who can be contacted
|
||||
at the email address <a href="mailto:fate-admin@ffmpeg.org">fate-admin@ffmpeg.org</a>.
|
||||
</p>
|
||||
<p>Configure your SSH client to use public key authentication with that key
|
||||
when connecting to the FATE server. Also do not forget to check the identity
|
||||
of the server and to accept its host key. This can usually be achieved by
|
||||
running your SSH client manually and killing it after you accepted the key.
|
||||
The FATE server’s fingerprint is:
|
||||
</p>
|
||||
<dl compact="compact">
|
||||
<dt><samp>RSA</samp></dt>
|
||||
<dd><p>d3:f1:83:97:a4:75:2b:a6:fb:d6:e8:aa:81:93:97:51
|
||||
</p></dd>
|
||||
<dt><samp>ECDSA</samp></dt>
|
||||
<dd><p>76:9f:68:32:04:1e:d5:d4:ec:47:3f:dc:fc:18:17:86
|
||||
</p></dd>
|
||||
</dl>
|
||||
|
||||
<p>If you have problems connecting to the FATE server, it may help to try out
|
||||
the <code>ssh</code> command with one or more <samp>-v</samp> options. You should
|
||||
get detailed output concerning your SSH configuration and the authentication
|
||||
process.
|
||||
</p>
|
||||
<p>The only thing left is to automate the execution of the fate.sh script and
|
||||
the synchronisation of the samples directory.
|
||||
</p>
|
||||
|
||||
<a name="FATE-makefile-targets-and-variables"></a>
|
||||
<h2 class="chapter">4 FATE makefile targets and variables<span class="pull-right"><a class="anchor hidden-xs" href="#FATE-makefile-targets-and-variables" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-FATE-makefile-targets-and-variables" aria-hidden="true">TOC</a></span></h2>
|
||||
|
||||
<a name="Makefile-targets"></a>
|
||||
<h3 class="section">4.1 Makefile targets<span class="pull-right"><a class="anchor hidden-xs" href="#Makefile-targets" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Makefile-targets" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<dl compact="compact">
|
||||
<dt><samp>fate-rsync</samp></dt>
|
||||
<dd><p>Download/synchronize sample files to the configured samples directory.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>fate-list</samp></dt>
|
||||
<dd><p>Will list all fate/regression test targets.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>fate</samp></dt>
|
||||
<dd><p>Run the FATE test suite (requires the fate-suite dataset).
|
||||
</p></dd>
|
||||
</dl>
|
||||
|
||||
<a name="Makefile-variables"></a>
|
||||
<h3 class="section">4.2 Makefile variables<span class="pull-right"><a class="anchor hidden-xs" href="#Makefile-variables" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Makefile-variables" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<dl compact="compact">
|
||||
<dt><samp>V</samp></dt>
|
||||
<dd><p>Verbosity level, can be set to 0, 1 or 2.
|
||||
</p><ul>
|
||||
<li> 0: show just the test arguments
|
||||
</li><li> 1: show just the command used in the test
|
||||
</li><li> 2: show everything
|
||||
</li></ul>
|
||||
|
||||
</dd>
|
||||
<dt><samp>SAMPLES</samp></dt>
|
||||
<dd><p>Specify or override the path to the FATE samples at make time, it has a
|
||||
meaning only while running the regression tests.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>THREADS</samp></dt>
|
||||
<dd><p>Specify how many threads to use while running regression tests, it is
|
||||
quite useful to detect thread-related regressions.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>THREAD_TYPE</samp></dt>
|
||||
<dd><p>Specify which threading strategy test, either <var>slice</var> or <var>frame</var>,
|
||||
by default <var>slice+frame</var>
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>CPUFLAGS</samp></dt>
|
||||
<dd><p>Specify CPU flags.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>TARGET_EXEC</samp></dt>
|
||||
<dd><p>Specify or override the wrapper used to run the tests.
|
||||
The <var>TARGET_EXEC</var> option provides a way to run FATE wrapped in
|
||||
<code>valgrind</code>, <code>qemu-user</code> or <code>wine</code> or on remote targets
|
||||
through <code>ssh</code>.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>GEN</samp></dt>
|
||||
<dd><p>Set to <var>1</var> to generate the missing or mismatched references.
|
||||
</p></dd>
|
||||
</dl>
|
||||
|
||||
<a name="Examples"></a>
|
||||
<h3 class="section">4.3 Examples<span class="pull-right"><a class="anchor hidden-xs" href="#Examples" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Examples" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<div class="example">
|
||||
<pre class="example">make V=1 SAMPLES=/var/fate/samples THREADS=2 CPUFLAGS=mmx fate
|
||||
</pre></div>
|
||||
|
||||
<p style="font-size: small;">
|
||||
This document was generated on <em>January 14, 2015</em> using <a href="http://www.gnu.org/software/texinfo/"><em>makeinfo</em></a>.
|
||||
</p>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
27303
Externals/ffmpeg/dev/doc/ffmpeg-all.html
vendored
27303
Externals/ffmpeg/dev/doc/ffmpeg-all.html
vendored
File diff suppressed because it is too large
Load Diff
@ -1,261 +0,0 @@
|
||||
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
|
||||
<html>
|
||||
<!-- Created by GNU Texinfo 5.2, http://www.gnu.org/software/texinfo/ -->
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<title>
|
||||
FFmpeg Bitstream Filters Documentation
|
||||
</title>
|
||||
<meta name="viewport" content="width=device-width,initial-scale=1.0">
|
||||
<link rel="stylesheet" type="text/css" href="bootstrap.min.css">
|
||||
<link rel="stylesheet" type="text/css" href="style.min.css">
|
||||
</head>
|
||||
<body>
|
||||
<div style="width: 95%; margin: auto">
|
||||
<h1>
|
||||
FFmpeg Bitstream Filters Documentation
|
||||
</h1>
|
||||
<div align="center">
|
||||
</div>
|
||||
|
||||
<a name="SEC_Top"></a>
|
||||
|
||||
<a name="SEC_Contents"></a>
|
||||
<h2 class="contents-heading">Table of Contents</h2>
|
||||
|
||||
<div class="contents">
|
||||
|
||||
<ul class="no-bullet">
|
||||
<li><a name="toc-Description" href="#Description">1 Description</a></li>
|
||||
<li><a name="toc-Bitstream-Filters" href="#Bitstream-Filters">2 Bitstream Filters</a>
|
||||
<ul class="no-bullet">
|
||||
<li><a name="toc-aac_005fadtstoasc" href="#aac_005fadtstoasc">2.1 aac_adtstoasc</a></li>
|
||||
<li><a name="toc-chomp" href="#chomp">2.2 chomp</a></li>
|
||||
<li><a name="toc-dump_005fextra" href="#dump_005fextra">2.3 dump_extra</a></li>
|
||||
<li><a name="toc-h264_005fmp4toannexb" href="#h264_005fmp4toannexb">2.4 h264_mp4toannexb</a></li>
|
||||
<li><a name="toc-imxdump" href="#imxdump">2.5 imxdump</a></li>
|
||||
<li><a name="toc-mjpeg2jpeg" href="#mjpeg2jpeg">2.6 mjpeg2jpeg</a></li>
|
||||
<li><a name="toc-mjpega_005fdump_005fheader" href="#mjpega_005fdump_005fheader">2.7 mjpega_dump_header</a></li>
|
||||
<li><a name="toc-movsub" href="#movsub">2.8 movsub</a></li>
|
||||
<li><a name="toc-mp3_005fheader_005fdecompress" href="#mp3_005fheader_005fdecompress">2.9 mp3_header_decompress</a></li>
|
||||
<li><a name="toc-noise" href="#noise">2.10 noise</a></li>
|
||||
<li><a name="toc-remove_005fextra" href="#remove_005fextra">2.11 remove_extra</a></li>
|
||||
</ul></li>
|
||||
<li><a name="toc-See-Also" href="#See-Also">3 See Also</a></li>
|
||||
<li><a name="toc-Authors" href="#Authors">4 Authors</a></li>
|
||||
</ul>
|
||||
</div>
|
||||
|
||||
|
||||
<a name="Description"></a>
|
||||
<h2 class="chapter">1 Description<span class="pull-right"><a class="anchor hidden-xs" href="#Description" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Description" aria-hidden="true">TOC</a></span></h2>
|
||||
|
||||
<p>This document describes the bitstream filters provided by the
|
||||
libavcodec library.
|
||||
</p>
|
||||
<p>A bitstream filter operates on the encoded stream data, and performs
|
||||
bitstream level modifications without performing decoding.
|
||||
</p>
|
||||
|
||||
<a name="Bitstream-Filters"></a>
|
||||
<h2 class="chapter">2 Bitstream Filters<span class="pull-right"><a class="anchor hidden-xs" href="#Bitstream-Filters" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Bitstream-Filters" aria-hidden="true">TOC</a></span></h2>
|
||||
|
||||
<p>When you configure your FFmpeg build, all the supported bitstream
|
||||
filters are enabled by default. You can list all available ones using
|
||||
the configure option <code>--list-bsfs</code>.
|
||||
</p>
|
||||
<p>You can disable all the bitstream filters using the configure option
|
||||
<code>--disable-bsfs</code>, and selectively enable any bitstream filter using
|
||||
the option <code>--enable-bsf=BSF</code>, or you can disable a particular
|
||||
bitstream filter using the option <code>--disable-bsf=BSF</code>.
|
||||
</p>
|
||||
<p>The option <code>-bsfs</code> of the ff* tools will display the list of
|
||||
all the supported bitstream filters included in your build.
|
||||
</p>
|
||||
<p>The ff* tools have a -bsf option applied per stream, taking a
|
||||
comma-separated list of filters, whose parameters follow the filter
|
||||
name after a ’=’.
|
||||
</p>
|
||||
<div class="example">
|
||||
<pre class="example">ffmpeg -i INPUT -c:v copy -bsf:v filter1[=opt1=str1/opt2=str2][,filter2] OUTPUT
|
||||
</pre></div>
|
||||
|
||||
<p>Below is a description of the currently available bitstream filters,
|
||||
with their parameters, if any.
|
||||
</p>
|
||||
<a name="aac_005fadtstoasc"></a>
|
||||
<h3 class="section">2.1 aac_adtstoasc<span class="pull-right"><a class="anchor hidden-xs" href="#aac_005fadtstoasc" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-aac_005fadtstoasc" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>Convert MPEG-2/4 AAC ADTS to MPEG-4 Audio Specific Configuration
|
||||
bitstream filter.
|
||||
</p>
|
||||
<p>This filter creates an MPEG-4 AudioSpecificConfig from an MPEG-2/4
|
||||
ADTS header and removes the ADTS header.
|
||||
</p>
|
||||
<p>This is required for example when copying an AAC stream from a raw
|
||||
ADTS AAC container to a FLV or a MOV/MP4 file.
|
||||
</p>
|
||||
<a name="chomp"></a>
|
||||
<h3 class="section">2.2 chomp<span class="pull-right"><a class="anchor hidden-xs" href="#chomp" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-chomp" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>Remove zero padding at the end of a packet.
|
||||
</p>
|
||||
<a name="dump_005fextra"></a>
|
||||
<h3 class="section">2.3 dump_extra<span class="pull-right"><a class="anchor hidden-xs" href="#dump_005fextra" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-dump_005fextra" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>Add extradata to the beginning of the filtered packets.
|
||||
</p>
|
||||
<p>The additional argument specifies which packets should be filtered.
|
||||
It accepts the values:
|
||||
</p><dl compact="compact">
|
||||
<dt>‘<samp>a</samp>’</dt>
|
||||
<dd><p>add extradata to all key packets, but only if <var>local_header</var> is
|
||||
set in the <samp>flags2</samp> codec context field
|
||||
</p>
|
||||
</dd>
|
||||
<dt>‘<samp>k</samp>’</dt>
|
||||
<dd><p>add extradata to all key packets
|
||||
</p>
|
||||
</dd>
|
||||
<dt>‘<samp>e</samp>’</dt>
|
||||
<dd><p>add extradata to all packets
|
||||
</p></dd>
|
||||
</dl>
|
||||
|
||||
<p>If not specified it is assumed ‘<samp>k</samp>’.
|
||||
</p>
|
||||
<p>For example the following <code>ffmpeg</code> command forces a global
|
||||
header (thus disabling individual packet headers) in the H.264 packets
|
||||
generated by the <code>libx264</code> encoder, but corrects them by adding
|
||||
the header stored in extradata to the key packets:
|
||||
</p><div class="example">
|
||||
<pre class="example">ffmpeg -i INPUT -map 0 -flags:v +global_header -c:v libx264 -bsf:v dump_extra out.ts
|
||||
</pre></div>
|
||||
|
||||
<a name="h264_005fmp4toannexb"></a>
|
||||
<h3 class="section">2.4 h264_mp4toannexb<span class="pull-right"><a class="anchor hidden-xs" href="#h264_005fmp4toannexb" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-h264_005fmp4toannexb" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>Convert an H.264 bitstream from length prefixed mode to start code
|
||||
prefixed mode (as defined in the Annex B of the ITU-T H.264
|
||||
specification).
|
||||
</p>
|
||||
<p>This is required by some streaming formats, typically the MPEG-2
|
||||
transport stream format ("mpegts").
|
||||
</p>
|
||||
<p>For example to remux an MP4 file containing an H.264 stream to mpegts
|
||||
format with <code>ffmpeg</code>, you can use the command:
|
||||
</p>
|
||||
<div class="example">
|
||||
<pre class="example">ffmpeg -i INPUT.mp4 -codec copy -bsf:v h264_mp4toannexb OUTPUT.ts
|
||||
</pre></div>
|
||||
|
||||
<a name="imxdump"></a>
|
||||
<h3 class="section">2.5 imxdump<span class="pull-right"><a class="anchor hidden-xs" href="#imxdump" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-imxdump" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>Modifies the bitstream to fit in MOV and to be usable by the Final Cut
|
||||
Pro decoder. This filter only applies to the mpeg2video codec, and is
|
||||
likely not needed for Final Cut Pro 7 and newer with the appropriate
|
||||
<samp>-tag:v</samp>.
|
||||
</p>
|
||||
<p>For example, to remux 30 MB/sec NTSC IMX to MOV:
|
||||
</p>
|
||||
<div class="example">
|
||||
<pre class="example">ffmpeg -i input.mxf -c copy -bsf:v imxdump -tag:v mx3n output.mov
|
||||
</pre></div>
|
||||
|
||||
<a name="mjpeg2jpeg"></a>
|
||||
<h3 class="section">2.6 mjpeg2jpeg<span class="pull-right"><a class="anchor hidden-xs" href="#mjpeg2jpeg" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-mjpeg2jpeg" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>Convert MJPEG/AVI1 packets to full JPEG/JFIF packets.
|
||||
</p>
|
||||
<p>MJPEG is a video codec wherein each video frame is essentially a
|
||||
JPEG image. The individual frames can be extracted without loss,
|
||||
e.g. by
|
||||
</p>
|
||||
<div class="example">
|
||||
<pre class="example">ffmpeg -i ../some_mjpeg.avi -c:v copy frames_%d.jpg
|
||||
</pre></div>
|
||||
|
||||
<p>Unfortunately, these chunks are incomplete JPEG images, because
|
||||
they lack the DHT segment required for decoding. Quoting from
|
||||
<a href="http://www.digitalpreservation.gov/formats/fdd/fdd000063.shtml">http://www.digitalpreservation.gov/formats/fdd/fdd000063.shtml</a>:
|
||||
</p>
|
||||
<p>Avery Lee, writing in the rec.video.desktop newsgroup in 2001,
|
||||
commented that "MJPEG, or at least the MJPEG in AVIs having the
|
||||
MJPG fourcc, is restricted JPEG with a fixed – and *omitted* –
|
||||
Huffman table. The JPEG must be YCbCr colorspace, it must be 4:2:2,
|
||||
and it must use basic Huffman encoding, not arithmetic or
|
||||
progressive. . . . You can indeed extract the MJPEG frames and
|
||||
decode them with a regular JPEG decoder, but you have to prepend
|
||||
the DHT segment to them, or else the decoder won’t have any idea
|
||||
how to decompress the data. The exact table necessary is given in
|
||||
the OpenDML spec."
|
||||
</p>
|
||||
<p>This bitstream filter patches the header of frames extracted from an MJPEG
|
||||
stream (carrying the AVI1 header ID and lacking a DHT segment) to
|
||||
produce fully qualified JPEG images.
|
||||
</p>
|
||||
<div class="example">
|
||||
<pre class="example">ffmpeg -i mjpeg-movie.avi -c:v copy -bsf:v mjpeg2jpeg frame_%d.jpg
|
||||
exiftran -i -9 frame*.jpg
|
||||
ffmpeg -i frame_%d.jpg -c:v copy rotated.avi
|
||||
</pre></div>
|
||||
|
||||
<a name="mjpega_005fdump_005fheader"></a>
|
||||
<h3 class="section">2.7 mjpega_dump_header<span class="pull-right"><a class="anchor hidden-xs" href="#mjpega_005fdump_005fheader" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-mjpega_005fdump_005fheader" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<a name="movsub"></a>
|
||||
<h3 class="section">2.8 movsub<span class="pull-right"><a class="anchor hidden-xs" href="#movsub" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-movsub" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<a name="mp3_005fheader_005fdecompress"></a>
|
||||
<h3 class="section">2.9 mp3_header_decompress<span class="pull-right"><a class="anchor hidden-xs" href="#mp3_005fheader_005fdecompress" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-mp3_005fheader_005fdecompress" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<a name="noise"></a>
|
||||
<h3 class="section">2.10 noise<span class="pull-right"><a class="anchor hidden-xs" href="#noise" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-noise" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>Damages the contents of packets without damaging the container. Can be
|
||||
used for fuzzing or testing error resilience/concealment.
|
||||
</p>
|
||||
<p>Parameters:
|
||||
A numeral string, whose value is related to how often output bytes will
|
||||
be modified. Therefore, values below or equal to 0 are forbidden, and
|
||||
the lower the more frequent bytes will be modified, with 1 meaning
|
||||
every byte is modified.
|
||||
</p>
|
||||
<div class="example">
|
||||
<pre class="example">ffmpeg -i INPUT -c copy -bsf noise[=1] output.mkv
|
||||
</pre></div>
|
||||
<p>applies the modification to every byte.
|
||||
</p>
|
||||
<a name="remove_005fextra"></a>
|
||||
<h3 class="section">2.11 remove_extra<span class="pull-right"><a class="anchor hidden-xs" href="#remove_005fextra" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-remove_005fextra" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
|
||||
<a name="See-Also"></a>
|
||||
<h2 class="chapter">3 See Also<span class="pull-right"><a class="anchor hidden-xs" href="#See-Also" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-See-Also" aria-hidden="true">TOC</a></span></h2>
|
||||
|
||||
<p><a href="ffmpeg.html">ffmpeg</a>, <a href="ffplay.html">ffplay</a>, <a href="ffprobe.html">ffprobe</a>, <a href="ffserver.html">ffserver</a>,
|
||||
<a href="libavcodec.html">libavcodec</a>
|
||||
</p>
|
||||
|
||||
<a name="Authors"></a>
|
||||
<h2 class="chapter">4 Authors<span class="pull-right"><a class="anchor hidden-xs" href="#Authors" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Authors" aria-hidden="true">TOC</a></span></h2>
|
||||
|
||||
<p>The FFmpeg developers.
|
||||
</p>
|
||||
<p>For details about the authorship, see the Git history of the project
|
||||
(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
|
||||
<code>git log</code> in the FFmpeg source directory, or browsing the
|
||||
online repository at <a href="http://source.ffmpeg.org">http://source.ffmpeg.org</a>.
|
||||
</p>
|
||||
<p>Maintainers for the specific components are listed in the file
|
||||
<samp>MAINTAINERS</samp> in the source code tree.
|
||||
</p>
|
||||
|
||||
|
||||
<p style="font-size: small;">
|
||||
This document was generated on <em>January 14, 2015</em> using <a href="http://www.gnu.org/software/texinfo/"><em>makeinfo</em></a>.
|
||||
</p>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
4474
Externals/ffmpeg/dev/doc/ffmpeg-codecs.html
vendored
4474
Externals/ffmpeg/dev/doc/ffmpeg-codecs.html
vendored
File diff suppressed because it is too large
Load Diff
1810
Externals/ffmpeg/dev/doc/ffmpeg-devices.html
vendored
1810
Externals/ffmpeg/dev/doc/ffmpeg-devices.html
vendored
File diff suppressed because it is too large
Load Diff
13382
Externals/ffmpeg/dev/doc/ffmpeg-filters.html
vendored
13382
Externals/ffmpeg/dev/doc/ffmpeg-filters.html
vendored
File diff suppressed because it is too large
Load Diff
2311
Externals/ffmpeg/dev/doc/ffmpeg-formats.html
vendored
2311
Externals/ffmpeg/dev/doc/ffmpeg-formats.html
vendored
File diff suppressed because it is too large
Load Diff
1545
Externals/ffmpeg/dev/doc/ffmpeg-protocols.html
vendored
1545
Externals/ffmpeg/dev/doc/ffmpeg-protocols.html
vendored
File diff suppressed because it is too large
Load Diff
357
Externals/ffmpeg/dev/doc/ffmpeg-resampler.html
vendored
357
Externals/ffmpeg/dev/doc/ffmpeg-resampler.html
vendored
@ -1,357 +0,0 @@
|
||||
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
|
||||
<html>
|
||||
<!-- Created by GNU Texinfo 5.2, http://www.gnu.org/software/texinfo/ -->
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<title>
|
||||
FFmpeg Resampler Documentation
|
||||
</title>
|
||||
<meta name="viewport" content="width=device-width,initial-scale=1.0">
|
||||
<link rel="stylesheet" type="text/css" href="bootstrap.min.css">
|
||||
<link rel="stylesheet" type="text/css" href="style.min.css">
|
||||
</head>
|
||||
<body>
|
||||
<div style="width: 95%; margin: auto">
|
||||
<h1>
|
||||
FFmpeg Resampler Documentation
|
||||
</h1>
|
||||
<div align="center">
|
||||
</div>
|
||||
|
||||
<a name="SEC_Top"></a>
|
||||
|
||||
<a name="SEC_Contents"></a>
|
||||
<h2 class="contents-heading">Table of Contents</h2>
|
||||
|
||||
<div class="contents">
|
||||
|
||||
<ul class="no-bullet">
|
||||
<li><a name="toc-Description" href="#Description">1 Description</a></li>
|
||||
<li><a name="toc-Resampler-Options" href="#Resampler-Options">2 Resampler Options</a></li>
|
||||
<li><a name="toc-See-Also" href="#See-Also">3 See Also</a></li>
|
||||
<li><a name="toc-Authors" href="#Authors">4 Authors</a></li>
|
||||
</ul>
|
||||
</div>
|
||||
|
||||
|
||||
<a name="Description"></a>
|
||||
<h2 class="chapter">1 Description<span class="pull-right"><a class="anchor hidden-xs" href="#Description" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Description" aria-hidden="true">TOC</a></span></h2>
|
||||
|
||||
<p>The FFmpeg resampler provides a high-level interface to the
|
||||
libswresample library audio resampling utilities. In particular it
|
||||
allows one to perform audio resampling, audio channel layout rematrixing,
|
||||
and convert audio format and packing layout.
|
||||
</p>
|
||||
|
||||
<a name="Resampler-Options"></a>
|
||||
<h2 class="chapter">2 Resampler Options<span class="pull-right"><a class="anchor hidden-xs" href="#Resampler-Options" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Resampler-Options" aria-hidden="true">TOC</a></span></h2>
|
||||
|
||||
<p>The audio resampler supports the following named options.
|
||||
</p>
|
||||
<p>Options may be set by specifying -<var>option</var> <var>value</var> in the
|
||||
FFmpeg tools, <var>option</var>=<var>value</var> for the aresample filter,
|
||||
by setting the value explicitly in the
|
||||
<code>SwrContext</code> options or using the <samp>libavutil/opt.h</samp> API for
|
||||
programmatic use.
|
||||
</p>
|
||||
<dl compact="compact">
|
||||
<dt><samp>ich, in_channel_count</samp></dt>
|
||||
<dd><p>Set the number of input channels. Default value is 0. Setting this
|
||||
value is not mandatory if the corresponding channel layout
|
||||
<samp>in_channel_layout</samp> is set.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>och, out_channel_count</samp></dt>
|
||||
<dd><p>Set the number of output channels. Default value is 0. Setting this
|
||||
value is not mandatory if the corresponding channel layout
|
||||
<samp>out_channel_layout</samp> is set.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>uch, used_channel_count</samp></dt>
|
||||
<dd><p>Set the number of used input channels. Default value is 0. This option is
|
||||
only used for special remapping.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>isr, in_sample_rate</samp></dt>
|
||||
<dd><p>Set the input sample rate. Default value is 0.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>osr, out_sample_rate</samp></dt>
|
||||
<dd><p>Set the output sample rate. Default value is 0.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>isf, in_sample_fmt</samp></dt>
|
||||
<dd><p>Specify the input sample format. It is set by default to <code>none</code>.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>osf, out_sample_fmt</samp></dt>
|
||||
<dd><p>Specify the output sample format. It is set by default to <code>none</code>.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>tsf, internal_sample_fmt</samp></dt>
|
||||
<dd><p>Set the internal sample format. Default value is <code>none</code>.
|
||||
This will automatically be chosen when it is not explicitly set.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>icl, in_channel_layout</samp></dt>
|
||||
<dt><samp>ocl, out_channel_layout</samp></dt>
|
||||
<dd><p>Set the input/output channel layout.
|
||||
</p>
|
||||
<p>See <a href="ffmpeg-utils.html#channel-layout-syntax">(ffmpeg-utils)the Channel Layout section in the ffmpeg-utils(1) manual</a>
|
||||
for the required syntax.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>clev, center_mix_level</samp></dt>
|
||||
<dd><p>Set the center mix level. It is a value expressed in deciBel, and must be
|
||||
in the interval [-32,32].
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>slev, surround_mix_level</samp></dt>
|
||||
<dd><p>Set the surround mix level. It is a value expressed in deciBel, and must
|
||||
be in the interval [-32,32].
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>lfe_mix_level</samp></dt>
|
||||
<dd><p>Set LFE mix into non LFE level. It is used when there is a LFE input but no
|
||||
LFE output. It is a value expressed in deciBel, and must
|
||||
be in the interval [-32,32].
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>rmvol, rematrix_volume</samp></dt>
|
||||
<dd><p>Set rematrix volume. Default value is 1.0.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>rematrix_maxval</samp></dt>
|
||||
<dd><p>Set maximum output value for rematrixing.
|
||||
This can be used to prevent clipping vs. preventing volumn reduction
|
||||
A value of 1.0 prevents cliping.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>flags, swr_flags</samp></dt>
|
||||
<dd><p>Set flags used by the converter. Default value is 0.
|
||||
</p>
|
||||
<p>It supports the following individual flags:
|
||||
</p><dl compact="compact">
|
||||
<dt><samp>res</samp></dt>
|
||||
<dd><p>force resampling, this flag forces resampling to be used even when the
|
||||
input and output sample rates match.
|
||||
</p></dd>
|
||||
</dl>
|
||||
|
||||
</dd>
|
||||
<dt><samp>dither_scale</samp></dt>
|
||||
<dd><p>Set the dither scale. Default value is 1.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>dither_method</samp></dt>
|
||||
<dd><p>Set dither method. Default value is 0.
|
||||
</p>
|
||||
<p>Supported values:
|
||||
</p><dl compact="compact">
|
||||
<dt>‘<samp>rectangular</samp>’</dt>
|
||||
<dd><p>select rectangular dither
|
||||
</p></dd>
|
||||
<dt>‘<samp>triangular</samp>’</dt>
|
||||
<dd><p>select triangular dither
|
||||
</p></dd>
|
||||
<dt>‘<samp>triangular_hp</samp>’</dt>
|
||||
<dd><p>select triangular dither with high pass
|
||||
</p></dd>
|
||||
<dt>‘<samp>lipshitz</samp>’</dt>
|
||||
<dd><p>select lipshitz noise shaping dither
|
||||
</p></dd>
|
||||
<dt>‘<samp>shibata</samp>’</dt>
|
||||
<dd><p>select shibata noise shaping dither
|
||||
</p></dd>
|
||||
<dt>‘<samp>low_shibata</samp>’</dt>
|
||||
<dd><p>select low shibata noise shaping dither
|
||||
</p></dd>
|
||||
<dt>‘<samp>high_shibata</samp>’</dt>
|
||||
<dd><p>select high shibata noise shaping dither
|
||||
</p></dd>
|
||||
<dt>‘<samp>f_weighted</samp>’</dt>
|
||||
<dd><p>select f-weighted noise shaping dither
|
||||
</p></dd>
|
||||
<dt>‘<samp>modified_e_weighted</samp>’</dt>
|
||||
<dd><p>select modified-e-weighted noise shaping dither
|
||||
</p></dd>
|
||||
<dt>‘<samp>improved_e_weighted</samp>’</dt>
|
||||
<dd><p>select improved-e-weighted noise shaping dither
|
||||
</p>
|
||||
</dd>
|
||||
</dl>
|
||||
|
||||
</dd>
|
||||
<dt><samp>resampler</samp></dt>
|
||||
<dd><p>Set resampling engine. Default value is swr.
|
||||
</p>
|
||||
<p>Supported values:
|
||||
</p><dl compact="compact">
|
||||
<dt>‘<samp>swr</samp>’</dt>
|
||||
<dd><p>select the native SW Resampler; filter options precision and cheby are not
|
||||
applicable in this case.
|
||||
</p></dd>
|
||||
<dt>‘<samp>soxr</samp>’</dt>
|
||||
<dd><p>select the SoX Resampler (where available); compensation, and filter options
|
||||
filter_size, phase_shift, filter_type & kaiser_beta, are not applicable in this
|
||||
case.
|
||||
</p></dd>
|
||||
</dl>
|
||||
|
||||
</dd>
|
||||
<dt><samp>filter_size</samp></dt>
|
||||
<dd><p>For swr only, set resampling filter size, default value is 32.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>phase_shift</samp></dt>
|
||||
<dd><p>For swr only, set resampling phase shift, default value is 10, and must be in
|
||||
the interval [0,30].
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>linear_interp</samp></dt>
|
||||
<dd><p>Use Linear Interpolation if set to 1, default value is 0.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>cutoff</samp></dt>
|
||||
<dd><p>Set cutoff frequency (swr: 6dB point; soxr: 0dB point) ratio; must be a float
|
||||
value between 0 and 1. Default value is 0.97 with swr, and 0.91 with soxr
|
||||
(which, with a sample-rate of 44100, preserves the entire audio band to 20kHz).
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>precision</samp></dt>
|
||||
<dd><p>For soxr only, the precision in bits to which the resampled signal will be
|
||||
calculated. The default value of 20 (which, with suitable dithering, is
|
||||
appropriate for a destination bit-depth of 16) gives SoX’s ’High Quality’; a
|
||||
value of 28 gives SoX’s ’Very High Quality’.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>cheby</samp></dt>
|
||||
<dd><p>For soxr only, selects passband rolloff none (Chebyshev) & higher-precision
|
||||
approximation for ’irrational’ ratios. Default value is 0.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>async</samp></dt>
|
||||
<dd><p>For swr only, simple 1 parameter audio sync to timestamps using stretching,
|
||||
squeezing, filling and trimming. Setting this to 1 will enable filling and
|
||||
trimming, larger values represent the maximum amount in samples that the data
|
||||
may be stretched or squeezed for each second.
|
||||
Default value is 0, thus no compensation is applied to make the samples match
|
||||
the audio timestamps.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>first_pts</samp></dt>
|
||||
<dd><p>For swr only, assume the first pts should be this value. The time unit is 1 / sample rate.
|
||||
This allows for padding/trimming at the start of stream. By default, no
|
||||
assumption is made about the first frame’s expected pts, so no padding or
|
||||
trimming is done. For example, this could be set to 0 to pad the beginning with
|
||||
silence if an audio stream starts after the video stream or to trim any samples
|
||||
with a negative pts due to encoder delay.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>min_comp</samp></dt>
|
||||
<dd><p>For swr only, set the minimum difference between timestamps and audio data (in
|
||||
seconds) to trigger stretching/squeezing/filling or trimming of the
|
||||
data to make it match the timestamps. The default is that
|
||||
stretching/squeezing/filling and trimming is disabled
|
||||
(<samp>min_comp</samp> = <code>FLT_MAX</code>).
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>min_hard_comp</samp></dt>
|
||||
<dd><p>For swr only, set the minimum difference between timestamps and audio data (in
|
||||
seconds) to trigger adding/dropping samples to make it match the
|
||||
timestamps. This option effectively is a threshold to select between
|
||||
hard (trim/fill) and soft (squeeze/stretch) compensation. Note that
|
||||
all compensation is by default disabled through <samp>min_comp</samp>.
|
||||
The default is 0.1.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>comp_duration</samp></dt>
|
||||
<dd><p>For swr only, set duration (in seconds) over which data is stretched/squeezed
|
||||
to make it match the timestamps. Must be a non-negative double float value,
|
||||
default value is 1.0.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>max_soft_comp</samp></dt>
|
||||
<dd><p>For swr only, set maximum factor by which data is stretched/squeezed to make it
|
||||
match the timestamps. Must be a non-negative double float value, default value
|
||||
is 0.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>matrix_encoding</samp></dt>
|
||||
<dd><p>Select matrixed stereo encoding.
|
||||
</p>
|
||||
<p>It accepts the following values:
|
||||
</p><dl compact="compact">
|
||||
<dt>‘<samp>none</samp>’</dt>
|
||||
<dd><p>select none
|
||||
</p></dd>
|
||||
<dt>‘<samp>dolby</samp>’</dt>
|
||||
<dd><p>select Dolby
|
||||
</p></dd>
|
||||
<dt>‘<samp>dplii</samp>’</dt>
|
||||
<dd><p>select Dolby Pro Logic II
|
||||
</p></dd>
|
||||
</dl>
|
||||
|
||||
<p>Default value is <code>none</code>.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>filter_type</samp></dt>
|
||||
<dd><p>For swr only, select resampling filter type. This only affects resampling
|
||||
operations.
|
||||
</p>
|
||||
<p>It accepts the following values:
|
||||
</p><dl compact="compact">
|
||||
<dt>‘<samp>cubic</samp>’</dt>
|
||||
<dd><p>select cubic
|
||||
</p></dd>
|
||||
<dt>‘<samp>blackman_nuttall</samp>’</dt>
|
||||
<dd><p>select Blackman Nuttall Windowed Sinc
|
||||
</p></dd>
|
||||
<dt>‘<samp>kaiser</samp>’</dt>
|
||||
<dd><p>select Kaiser Windowed Sinc
|
||||
</p></dd>
|
||||
</dl>
|
||||
|
||||
</dd>
|
||||
<dt><samp>kaiser_beta</samp></dt>
|
||||
<dd><p>For swr only, set Kaiser Window Beta value. Must be an integer in the
|
||||
interval [2,16], default value is 9.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>output_sample_bits</samp></dt>
|
||||
<dd><p>For swr only, set number of used output sample bits for dithering. Must be an integer in the
|
||||
interval [0,64], default value is 0, which means it’s not used.
|
||||
</p>
|
||||
</dd>
|
||||
</dl>
|
||||
|
||||
|
||||
<a name="See-Also"></a>
|
||||
<h2 class="chapter">3 See Also<span class="pull-right"><a class="anchor hidden-xs" href="#See-Also" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-See-Also" aria-hidden="true">TOC</a></span></h2>
|
||||
|
||||
<p><a href="ffmpeg.html">ffmpeg</a>, <a href="ffplay.html">ffplay</a>, <a href="ffprobe.html">ffprobe</a>, <a href="ffserver.html">ffserver</a>,
|
||||
<a href="libswresample.html">libswresample</a>
|
||||
</p>
|
||||
|
||||
<a name="Authors"></a>
|
||||
<h2 class="chapter">4 Authors<span class="pull-right"><a class="anchor hidden-xs" href="#Authors" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Authors" aria-hidden="true">TOC</a></span></h2>
|
||||
|
||||
<p>The FFmpeg developers.
|
||||
</p>
|
||||
<p>For details about the authorship, see the Git history of the project
|
||||
(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
|
||||
<code>git log</code> in the FFmpeg source directory, or browsing the
|
||||
online repository at <a href="http://source.ffmpeg.org">http://source.ffmpeg.org</a>.
|
||||
</p>
|
||||
<p>Maintainers for the specific components are listed in the file
|
||||
<samp>MAINTAINERS</samp> in the source code tree.
|
||||
</p>
|
||||
|
||||
|
||||
<p style="font-size: small;">
|
||||
This document was generated on <em>January 14, 2015</em> using <a href="http://www.gnu.org/software/texinfo/"><em>makeinfo</em></a>.
|
||||
</p>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
231
Externals/ffmpeg/dev/doc/ffmpeg-scaler.html
vendored
231
Externals/ffmpeg/dev/doc/ffmpeg-scaler.html
vendored
@ -1,231 +0,0 @@
|
||||
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
|
||||
<html>
|
||||
<!-- Created by GNU Texinfo 5.2, http://www.gnu.org/software/texinfo/ -->
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<title>
|
||||
FFmpeg Scaler Documentation
|
||||
</title>
|
||||
<meta name="viewport" content="width=device-width,initial-scale=1.0">
|
||||
<link rel="stylesheet" type="text/css" href="bootstrap.min.css">
|
||||
<link rel="stylesheet" type="text/css" href="style.min.css">
|
||||
</head>
|
||||
<body>
|
||||
<div style="width: 95%; margin: auto">
|
||||
<h1>
|
||||
FFmpeg Scaler Documentation
|
||||
</h1>
|
||||
<div align="center">
|
||||
</div>
|
||||
|
||||
<a name="SEC_Top"></a>
|
||||
|
||||
<a name="SEC_Contents"></a>
|
||||
<h2 class="contents-heading">Table of Contents</h2>
|
||||
|
||||
<div class="contents">
|
||||
|
||||
<ul class="no-bullet">
|
||||
<li><a name="toc-Description" href="#Description">1 Description</a></li>
|
||||
<li><a name="toc-Scaler-Options" href="#Scaler-Options">2 Scaler Options</a></li>
|
||||
<li><a name="toc-See-Also" href="#See-Also">3 See Also</a></li>
|
||||
<li><a name="toc-Authors" href="#Authors">4 Authors</a></li>
|
||||
</ul>
|
||||
</div>
|
||||
|
||||
|
||||
<a name="Description"></a>
|
||||
<h2 class="chapter">1 Description<span class="pull-right"><a class="anchor hidden-xs" href="#Description" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Description" aria-hidden="true">TOC</a></span></h2>
|
||||
|
||||
<p>The FFmpeg rescaler provides a high-level interface to the libswscale
|
||||
library image conversion utilities. In particular it allows one to perform
|
||||
image rescaling and pixel format conversion.
|
||||
</p>
|
||||
|
||||
<a name="scaler_005foptions"></a><a name="Scaler-Options"></a>
|
||||
<h2 class="chapter">2 Scaler Options<span class="pull-right"><a class="anchor hidden-xs" href="#Scaler-Options" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Scaler-Options" aria-hidden="true">TOC</a></span></h2>
|
||||
|
||||
<p>The video scaler supports the following named options.
|
||||
</p>
|
||||
<p>Options may be set by specifying -<var>option</var> <var>value</var> in the
|
||||
FFmpeg tools. For programmatic use, they can be set explicitly in the
|
||||
<code>SwsContext</code> options or through the <samp>libavutil/opt.h</samp> API.
|
||||
</p>
|
||||
<dl compact="compact">
|
||||
<dd>
|
||||
<a name="sws_005fflags"></a></dd>
|
||||
<dt><samp>sws_flags</samp></dt>
|
||||
<dd><p>Set the scaler flags. This is also used to set the scaling
|
||||
algorithm. Only a single algorithm should be selected.
|
||||
</p>
|
||||
<p>It accepts the following values:
|
||||
</p><dl compact="compact">
|
||||
<dt>‘<samp>fast_bilinear</samp>’</dt>
|
||||
<dd><p>Select fast bilinear scaling algorithm.
|
||||
</p>
|
||||
</dd>
|
||||
<dt>‘<samp>bilinear</samp>’</dt>
|
||||
<dd><p>Select bilinear scaling algorithm.
|
||||
</p>
|
||||
</dd>
|
||||
<dt>‘<samp>bicubic</samp>’</dt>
|
||||
<dd><p>Select bicubic scaling algorithm.
|
||||
</p>
|
||||
</dd>
|
||||
<dt>‘<samp>experimental</samp>’</dt>
|
||||
<dd><p>Select experimental scaling algorithm.
|
||||
</p>
|
||||
</dd>
|
||||
<dt>‘<samp>neighbor</samp>’</dt>
|
||||
<dd><p>Select nearest neighbor rescaling algorithm.
|
||||
</p>
|
||||
</dd>
|
||||
<dt>‘<samp>area</samp>’</dt>
|
||||
<dd><p>Select averaging area rescaling algorithm.
|
||||
</p>
|
||||
</dd>
|
||||
<dt>‘<samp>bicublin</samp>’</dt>
|
||||
<dd><p>Select bicubic scaling algorithm for the luma component, bilinear for
|
||||
chroma components.
|
||||
</p>
|
||||
</dd>
|
||||
<dt>‘<samp>gauss</samp>’</dt>
|
||||
<dd><p>Select Gaussian rescaling algorithm.
|
||||
</p>
|
||||
</dd>
|
||||
<dt>‘<samp>sinc</samp>’</dt>
|
||||
<dd><p>Select sinc rescaling algorithm.
|
||||
</p>
|
||||
</dd>
|
||||
<dt>‘<samp>lanczos</samp>’</dt>
|
||||
<dd><p>Select lanczos rescaling algorithm.
|
||||
</p>
|
||||
</dd>
|
||||
<dt>‘<samp>spline</samp>’</dt>
|
||||
<dd><p>Select natural bicubic spline rescaling algorithm.
|
||||
</p>
|
||||
</dd>
|
||||
<dt>‘<samp>print_info</samp>’</dt>
|
||||
<dd><p>Enable printing/debug logging.
|
||||
</p>
|
||||
</dd>
|
||||
<dt>‘<samp>accurate_rnd</samp>’</dt>
|
||||
<dd><p>Enable accurate rounding.
|
||||
</p>
|
||||
</dd>
|
||||
<dt>‘<samp>full_chroma_int</samp>’</dt>
|
||||
<dd><p>Enable full chroma interpolation.
|
||||
</p>
|
||||
</dd>
|
||||
<dt>‘<samp>full_chroma_inp</samp>’</dt>
|
||||
<dd><p>Select full chroma input.
|
||||
</p>
|
||||
</dd>
|
||||
<dt>‘<samp>bitexact</samp>’</dt>
|
||||
<dd><p>Enable bitexact output.
|
||||
</p></dd>
|
||||
</dl>
|
||||
|
||||
</dd>
|
||||
<dt><samp>srcw</samp></dt>
|
||||
<dd><p>Set source width.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>srch</samp></dt>
|
||||
<dd><p>Set source height.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>dstw</samp></dt>
|
||||
<dd><p>Set destination width.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>dsth</samp></dt>
|
||||
<dd><p>Set destination height.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>src_format</samp></dt>
|
||||
<dd><p>Set source pixel format (must be expressed as an integer).
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>dst_format</samp></dt>
|
||||
<dd><p>Set destination pixel format (must be expressed as an integer).
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>src_range</samp></dt>
|
||||
<dd><p>Select source range.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>dst_range</samp></dt>
|
||||
<dd><p>Select destination range.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>param0, param1</samp></dt>
|
||||
<dd><p>Set scaling algorithm parameters. The specified values are specific of
|
||||
some scaling algorithms and ignored by others. The specified values
|
||||
are floating point number values.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>sws_dither</samp></dt>
|
||||
<dd><p>Set the dithering algorithm. Accepts one of the following
|
||||
values. Default value is ‘<samp>auto</samp>’.
|
||||
</p>
|
||||
<dl compact="compact">
|
||||
<dt>‘<samp>auto</samp>’</dt>
|
||||
<dd><p>automatic choice
|
||||
</p>
|
||||
</dd>
|
||||
<dt>‘<samp>none</samp>’</dt>
|
||||
<dd><p>no dithering
|
||||
</p>
|
||||
</dd>
|
||||
<dt>‘<samp>bayer</samp>’</dt>
|
||||
<dd><p>bayer dither
|
||||
</p>
|
||||
</dd>
|
||||
<dt>‘<samp>ed</samp>’</dt>
|
||||
<dd><p>error diffusion dither
|
||||
</p>
|
||||
</dd>
|
||||
<dt>‘<samp>a_dither</samp>’</dt>
|
||||
<dd><p>arithmetic dither, based using addition
|
||||
</p>
|
||||
</dd>
|
||||
<dt>‘<samp>x_dither</samp>’</dt>
|
||||
<dd><p>arithmetic dither, based using xor (more random/less apparent patterning that
|
||||
a_dither).
|
||||
</p>
|
||||
</dd>
|
||||
</dl>
|
||||
|
||||
</dd>
|
||||
</dl>
|
||||
|
||||
|
||||
<a name="See-Also"></a>
|
||||
<h2 class="chapter">3 See Also<span class="pull-right"><a class="anchor hidden-xs" href="#See-Also" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-See-Also" aria-hidden="true">TOC</a></span></h2>
|
||||
|
||||
<p><a href="ffmpeg.html">ffmpeg</a>, <a href="ffplay.html">ffplay</a>, <a href="ffprobe.html">ffprobe</a>, <a href="ffserver.html">ffserver</a>,
|
||||
<a href="libswscale.html">libswscale</a>
|
||||
</p>
|
||||
|
||||
<a name="Authors"></a>
|
||||
<h2 class="chapter">4 Authors<span class="pull-right"><a class="anchor hidden-xs" href="#Authors" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Authors" aria-hidden="true">TOC</a></span></h2>
|
||||
|
||||
<p>The FFmpeg developers.
|
||||
</p>
|
||||
<p>For details about the authorship, see the Git history of the project
|
||||
(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
|
||||
<code>git log</code> in the FFmpeg source directory, or browsing the
|
||||
online repository at <a href="http://source.ffmpeg.org">http://source.ffmpeg.org</a>.
|
||||
</p>
|
||||
<p>Maintainers for the specific components are listed in the file
|
||||
<samp>MAINTAINERS</samp> in the source code tree.
|
||||
</p>
|
||||
|
||||
|
||||
<p style="font-size: small;">
|
||||
This document was generated on <em>January 14, 2015</em> using <a href="http://www.gnu.org/software/texinfo/"><em>makeinfo</em></a>.
|
||||
</p>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
1468
Externals/ffmpeg/dev/doc/ffmpeg-utils.html
vendored
1468
Externals/ffmpeg/dev/doc/ffmpeg-utils.html
vendored
File diff suppressed because it is too large
Load Diff
2109
Externals/ffmpeg/dev/doc/ffmpeg.html
vendored
2109
Externals/ffmpeg/dev/doc/ffmpeg.html
vendored
File diff suppressed because it is too large
Load Diff
21308
Externals/ffmpeg/dev/doc/ffplay-all.html
vendored
21308
Externals/ffmpeg/dev/doc/ffplay-all.html
vendored
File diff suppressed because it is too large
Load Diff
745
Externals/ffmpeg/dev/doc/ffplay.html
vendored
745
Externals/ffmpeg/dev/doc/ffplay.html
vendored
@ -1,745 +0,0 @@
|
||||
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
|
||||
<html>
|
||||
<!-- Created by GNU Texinfo 5.2, http://www.gnu.org/software/texinfo/ -->
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<title>
|
||||
ffplay Documentation
|
||||
</title>
|
||||
<meta name="viewport" content="width=device-width,initial-scale=1.0">
|
||||
<link rel="stylesheet" type="text/css" href="bootstrap.min.css">
|
||||
<link rel="stylesheet" type="text/css" href="style.min.css">
|
||||
</head>
|
||||
<body>
|
||||
<div style="width: 95%; margin: auto">
|
||||
<h1>
|
||||
ffplay Documentation
|
||||
</h1>
|
||||
<div align="center">
|
||||
</div>
|
||||
|
||||
<a name="SEC_Top"></a>
|
||||
|
||||
<a name="SEC_Contents"></a>
|
||||
<h2 class="contents-heading">Table of Contents</h2>
|
||||
|
||||
<div class="contents">
|
||||
|
||||
<ul class="no-bullet">
|
||||
<li><a name="toc-Synopsis" href="#Synopsis">1 Synopsis</a></li>
|
||||
<li><a name="toc-Description" href="#Description">2 Description</a></li>
|
||||
<li><a name="toc-Options" href="#Options">3 Options</a>
|
||||
<ul class="no-bullet">
|
||||
<li><a name="toc-Stream-specifiers-1" href="#Stream-specifiers-1">3.1 Stream specifiers</a></li>
|
||||
<li><a name="toc-Generic-options" href="#Generic-options">3.2 Generic options</a></li>
|
||||
<li><a name="toc-AVOptions" href="#AVOptions">3.3 AVOptions</a></li>
|
||||
<li><a name="toc-Main-options" href="#Main-options">3.4 Main options</a></li>
|
||||
<li><a name="toc-Advanced-options" href="#Advanced-options">3.5 Advanced options</a></li>
|
||||
<li><a name="toc-While-playing" href="#While-playing">3.6 While playing</a></li>
|
||||
</ul></li>
|
||||
<li><a name="toc-See-Also" href="#See-Also">4 See Also</a></li>
|
||||
<li><a name="toc-Authors" href="#Authors">5 Authors</a></li>
|
||||
</ul>
|
||||
</div>
|
||||
|
||||
|
||||
<a name="Synopsis"></a>
|
||||
<h2 class="chapter">1 Synopsis<span class="pull-right"><a class="anchor hidden-xs" href="#Synopsis" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Synopsis" aria-hidden="true">TOC</a></span></h2>
|
||||
|
||||
<p>ffplay [<var>options</var>] [<samp>input_file</samp>]
|
||||
</p>
|
||||
<a name="Description"></a>
|
||||
<h2 class="chapter">2 Description<span class="pull-right"><a class="anchor hidden-xs" href="#Description" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Description" aria-hidden="true">TOC</a></span></h2>
|
||||
|
||||
<p>FFplay is a very simple and portable media player using the FFmpeg
|
||||
libraries and the SDL library. It is mostly used as a testbed for the
|
||||
various FFmpeg APIs.
|
||||
</p>
|
||||
<a name="Options"></a>
|
||||
<h2 class="chapter">3 Options<span class="pull-right"><a class="anchor hidden-xs" href="#Options" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Options" aria-hidden="true">TOC</a></span></h2>
|
||||
|
||||
<p>All the numerical options, if not specified otherwise, accept a string
|
||||
representing a number as input, which may be followed by one of the SI
|
||||
unit prefixes, for example: ’K’, ’M’, or ’G’.
|
||||
</p>
|
||||
<p>If ’i’ is appended to the SI unit prefix, the complete prefix will be
|
||||
interpreted as a unit prefix for binary multiples, which are based on
|
||||
powers of 1024 instead of powers of 1000. Appending ’B’ to the SI unit
|
||||
prefix multiplies the value by 8. This allows using, for example:
|
||||
’KB’, ’MiB’, ’G’ and ’B’ as number suffixes.
|
||||
</p>
|
||||
<p>Options which do not take arguments are boolean options, and set the
|
||||
corresponding value to true. They can be set to false by prefixing
|
||||
the option name with "no". For example using "-nofoo"
|
||||
will set the boolean option with name "foo" to false.
|
||||
</p>
|
||||
<a name="Stream-specifiers"></a><a name="Stream-specifiers-1"></a>
|
||||
<h3 class="section">3.1 Stream specifiers<span class="pull-right"><a class="anchor hidden-xs" href="#Stream-specifiers-1" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Stream-specifiers-1" aria-hidden="true">TOC</a></span></h3>
|
||||
<p>Some options are applied per-stream, e.g. bitrate or codec. Stream specifiers
|
||||
are used to precisely specify which stream(s) a given option belongs to.
|
||||
</p>
|
||||
<p>A stream specifier is a string generally appended to the option name and
|
||||
separated from it by a colon. E.g. <code>-codec:a:1 ac3</code> contains the
|
||||
<code>a:1</code> stream specifier, which matches the second audio stream. Therefore, it
|
||||
would select the ac3 codec for the second audio stream.
|
||||
</p>
|
||||
<p>A stream specifier can match several streams, so that the option is applied to all
|
||||
of them. E.g. the stream specifier in <code>-b:a 128k</code> matches all audio
|
||||
streams.
|
||||
</p>
|
||||
<p>An empty stream specifier matches all streams. For example, <code>-codec copy</code>
|
||||
or <code>-codec: copy</code> would copy all the streams without reencoding.
|
||||
</p>
|
||||
<p>Possible forms of stream specifiers are:
|
||||
</p><dl compact="compact">
|
||||
<dt><samp><var>stream_index</var></samp></dt>
|
||||
<dd><p>Matches the stream with this index. E.g. <code>-threads:1 4</code> would set the
|
||||
thread count for the second stream to 4.
|
||||
</p></dd>
|
||||
<dt><samp><var>stream_type</var>[:<var>stream_index</var>]</samp></dt>
|
||||
<dd><p><var>stream_type</var> is one of following: ’v’ for video, ’a’ for audio, ’s’ for subtitle,
|
||||
’d’ for data, and ’t’ for attachments. If <var>stream_index</var> is given, then it matches
|
||||
stream number <var>stream_index</var> of this type. Otherwise, it matches all
|
||||
streams of this type.
|
||||
</p></dd>
|
||||
<dt><samp>p:<var>program_id</var>[:<var>stream_index</var>]</samp></dt>
|
||||
<dd><p>If <var>stream_index</var> is given, then it matches the stream with number <var>stream_index</var>
|
||||
in the program with the id <var>program_id</var>. Otherwise, it matches all streams in the
|
||||
program.
|
||||
</p></dd>
|
||||
<dt><samp>#<var>stream_id</var> or i:<var>stream_id</var></samp></dt>
|
||||
<dd><p>Match the stream by stream id (e.g. PID in MPEG-TS container).
|
||||
</p></dd>
|
||||
<dt><samp>m:<var>key</var>[:<var>value</var>]</samp></dt>
|
||||
<dd><p>Matches streams with the metadata tag <var>key</var> having the specified value. If
|
||||
<var>value</var> is not given, matches streams that contain the given tag with any
|
||||
value.
|
||||
</p>
|
||||
<p>Note that in <code>ffmpeg</code>, matching by metadata will only work properly for
|
||||
input files.
|
||||
</p></dd>
|
||||
</dl>
|
||||
|
||||
<a name="Generic-options"></a>
|
||||
<h3 class="section">3.2 Generic options<span class="pull-right"><a class="anchor hidden-xs" href="#Generic-options" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Generic-options" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>These options are shared amongst the ff* tools.
|
||||
</p>
|
||||
<dl compact="compact">
|
||||
<dt><samp>-L</samp></dt>
|
||||
<dd><p>Show license.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>-h, -?, -help, --help [<var>arg</var>]</samp></dt>
|
||||
<dd><p>Show help. An optional parameter may be specified to print help about a specific
|
||||
item. If no argument is specified, only basic (non advanced) tool
|
||||
options are shown.
|
||||
</p>
|
||||
<p>Possible values of <var>arg</var> are:
|
||||
</p><dl compact="compact">
|
||||
<dt><samp>long</samp></dt>
|
||||
<dd><p>Print advanced tool options in addition to the basic tool options.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>full</samp></dt>
|
||||
<dd><p>Print complete list of options, including shared and private options
|
||||
for encoders, decoders, demuxers, muxers, filters, etc.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>decoder=<var>decoder_name</var></samp></dt>
|
||||
<dd><p>Print detailed information about the decoder named <var>decoder_name</var>. Use the
|
||||
<samp>-decoders</samp> option to get a list of all decoders.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>encoder=<var>encoder_name</var></samp></dt>
|
||||
<dd><p>Print detailed information about the encoder named <var>encoder_name</var>. Use the
|
||||
<samp>-encoders</samp> option to get a list of all encoders.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>demuxer=<var>demuxer_name</var></samp></dt>
|
||||
<dd><p>Print detailed information about the demuxer named <var>demuxer_name</var>. Use the
|
||||
<samp>-formats</samp> option to get a list of all demuxers and muxers.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>muxer=<var>muxer_name</var></samp></dt>
|
||||
<dd><p>Print detailed information about the muxer named <var>muxer_name</var>. Use the
|
||||
<samp>-formats</samp> option to get a list of all muxers and demuxers.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>filter=<var>filter_name</var></samp></dt>
|
||||
<dd><p>Print detailed information about the filter name <var>filter_name</var>. Use the
|
||||
<samp>-filters</samp> option to get a list of all filters.
|
||||
</p></dd>
|
||||
</dl>
|
||||
|
||||
</dd>
|
||||
<dt><samp>-version</samp></dt>
|
||||
<dd><p>Show version.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>-formats</samp></dt>
|
||||
<dd><p>Show available formats (including devices).
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>-devices</samp></dt>
|
||||
<dd><p>Show available devices.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>-codecs</samp></dt>
|
||||
<dd><p>Show all codecs known to libavcodec.
|
||||
</p>
|
||||
<p>Note that the term ’codec’ is used throughout this documentation as a shortcut
|
||||
for what is more correctly called a media bitstream format.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>-decoders</samp></dt>
|
||||
<dd><p>Show available decoders.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>-encoders</samp></dt>
|
||||
<dd><p>Show all available encoders.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>-bsfs</samp></dt>
|
||||
<dd><p>Show available bitstream filters.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>-protocols</samp></dt>
|
||||
<dd><p>Show available protocols.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>-filters</samp></dt>
|
||||
<dd><p>Show available libavfilter filters.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>-pix_fmts</samp></dt>
|
||||
<dd><p>Show available pixel formats.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>-sample_fmts</samp></dt>
|
||||
<dd><p>Show available sample formats.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>-layouts</samp></dt>
|
||||
<dd><p>Show channel names and standard channel layouts.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>-colors</samp></dt>
|
||||
<dd><p>Show recognized color names.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>-sources <var>device</var>[,<var>opt1</var>=<var>val1</var>[,<var>opt2</var>=<var>val2</var>]...]</samp></dt>
|
||||
<dd><p>Show autodetected sources of the intput device.
|
||||
Some devices may provide system-dependent source names that cannot be autodetected.
|
||||
The returned list cannot be assumed to be always complete.
|
||||
</p><div class="example">
|
||||
<pre class="example">ffmpeg -sources pulse,server=192.168.0.4
|
||||
</pre></div>
|
||||
|
||||
</dd>
|
||||
<dt><samp>-sinks <var>device</var>[,<var>opt1</var>=<var>val1</var>[,<var>opt2</var>=<var>val2</var>]...]</samp></dt>
|
||||
<dd><p>Show autodetected sinks of the output device.
|
||||
Some devices may provide system-dependent sink names that cannot be autodetected.
|
||||
The returned list cannot be assumed to be always complete.
|
||||
</p><div class="example">
|
||||
<pre class="example">ffmpeg -sinks pulse,server=192.168.0.4
|
||||
</pre></div>
|
||||
|
||||
</dd>
|
||||
<dt><samp>-loglevel [repeat+]<var>loglevel</var> | -v [repeat+]<var>loglevel</var></samp></dt>
|
||||
<dd><p>Set the logging level used by the library.
|
||||
Adding "repeat+" indicates that repeated log output should not be compressed
|
||||
to the first line and the "Last message repeated n times" line will be
|
||||
omitted. "repeat" can also be used alone.
|
||||
If "repeat" is used alone, and with no prior loglevel set, the default
|
||||
loglevel will be used. If multiple loglevel parameters are given, using
|
||||
’repeat’ will not change the loglevel.
|
||||
<var>loglevel</var> is a string or a number containing one of the following values:
|
||||
</p><dl compact="compact">
|
||||
<dt>‘<samp>quiet, -8</samp>’</dt>
|
||||
<dd><p>Show nothing at all; be silent.
|
||||
</p></dd>
|
||||
<dt>‘<samp>panic, 0</samp>’</dt>
|
||||
<dd><p>Only show fatal errors which could lead the process to crash, such as
|
||||
and assert failure. This is not currently used for anything.
|
||||
</p></dd>
|
||||
<dt>‘<samp>fatal, 8</samp>’</dt>
|
||||
<dd><p>Only show fatal errors. These are errors after which the process absolutely
|
||||
cannot continue after.
|
||||
</p></dd>
|
||||
<dt>‘<samp>error, 16</samp>’</dt>
|
||||
<dd><p>Show all errors, including ones which can be recovered from.
|
||||
</p></dd>
|
||||
<dt>‘<samp>warning, 24</samp>’</dt>
|
||||
<dd><p>Show all warnings and errors. Any message related to possibly
|
||||
incorrect or unexpected events will be shown.
|
||||
</p></dd>
|
||||
<dt>‘<samp>info, 32</samp>’</dt>
|
||||
<dd><p>Show informative messages during processing. This is in addition to
|
||||
warnings and errors. This is the default value.
|
||||
</p></dd>
|
||||
<dt>‘<samp>verbose, 40</samp>’</dt>
|
||||
<dd><p>Same as <code>info</code>, except more verbose.
|
||||
</p></dd>
|
||||
<dt>‘<samp>debug, 48</samp>’</dt>
|
||||
<dd><p>Show everything, including debugging information.
|
||||
</p></dd>
|
||||
</dl>
|
||||
|
||||
<p>By default the program logs to stderr, if coloring is supported by the
|
||||
terminal, colors are used to mark errors and warnings. Log coloring
|
||||
can be disabled setting the environment variable
|
||||
<code>AV_LOG_FORCE_NOCOLOR</code> or <code>NO_COLOR</code>, or can be forced setting
|
||||
the environment variable <code>AV_LOG_FORCE_COLOR</code>.
|
||||
The use of the environment variable <code>NO_COLOR</code> is deprecated and
|
||||
will be dropped in a following FFmpeg version.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>-report</samp></dt>
|
||||
<dd><p>Dump full command line and console output to a file named
|
||||
<code><var>program</var>-<var>YYYYMMDD</var>-<var>HHMMSS</var>.log</code> in the current
|
||||
directory.
|
||||
This file can be useful for bug reports.
|
||||
It also implies <code>-loglevel verbose</code>.
|
||||
</p>
|
||||
<p>Setting the environment variable <code>FFREPORT</code> to any value has the
|
||||
same effect. If the value is a ’:’-separated key=value sequence, these
|
||||
options will affect the report; option values must be escaped if they
|
||||
contain special characters or the options delimiter ’:’ (see the
|
||||
“Quoting and escaping” section in the ffmpeg-utils manual).
|
||||
</p>
|
||||
<p>The following options are recognized:
|
||||
</p><dl compact="compact">
|
||||
<dt><samp>file</samp></dt>
|
||||
<dd><p>set the file name to use for the report; <code>%p</code> is expanded to the name
|
||||
of the program, <code>%t</code> is expanded to a timestamp, <code>%%</code> is expanded
|
||||
to a plain <code>%</code>
|
||||
</p></dd>
|
||||
<dt><samp>level</samp></dt>
|
||||
<dd><p>set the log verbosity level using a numerical value (see <code>-loglevel</code>).
|
||||
</p></dd>
|
||||
</dl>
|
||||
|
||||
<p>For example, to output a report to a file named <samp>ffreport.log</samp>
|
||||
using a log level of <code>32</code> (alias for log level <code>info</code>):
|
||||
</p>
|
||||
<div class="example">
|
||||
<pre class="example">FFREPORT=file=ffreport.log:level=32 ffmpeg -i input output
|
||||
</pre></div>
|
||||
|
||||
<p>Errors in parsing the environment variable are not fatal, and will not
|
||||
appear in the report.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>-hide_banner</samp></dt>
|
||||
<dd><p>Suppress printing banner.
|
||||
</p>
|
||||
<p>All FFmpeg tools will normally show a copyright notice, build options
|
||||
and library versions. This option can be used to suppress printing
|
||||
this information.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>-cpuflags flags (<em>global</em>)</samp></dt>
|
||||
<dd><p>Allows setting and clearing cpu flags. This option is intended
|
||||
for testing. Do not use it unless you know what you’re doing.
|
||||
</p><div class="example">
|
||||
<pre class="example">ffmpeg -cpuflags -sse+mmx ...
|
||||
ffmpeg -cpuflags mmx ...
|
||||
ffmpeg -cpuflags 0 ...
|
||||
</pre></div>
|
||||
<p>Possible flags for this option are:
|
||||
</p><dl compact="compact">
|
||||
<dt>‘<samp>x86</samp>’</dt>
|
||||
<dd><dl compact="compact">
|
||||
<dt>‘<samp>mmx</samp>’</dt>
|
||||
<dt>‘<samp>mmxext</samp>’</dt>
|
||||
<dt>‘<samp>sse</samp>’</dt>
|
||||
<dt>‘<samp>sse2</samp>’</dt>
|
||||
<dt>‘<samp>sse2slow</samp>’</dt>
|
||||
<dt>‘<samp>sse3</samp>’</dt>
|
||||
<dt>‘<samp>sse3slow</samp>’</dt>
|
||||
<dt>‘<samp>ssse3</samp>’</dt>
|
||||
<dt>‘<samp>atom</samp>’</dt>
|
||||
<dt>‘<samp>sse4.1</samp>’</dt>
|
||||
<dt>‘<samp>sse4.2</samp>’</dt>
|
||||
<dt>‘<samp>avx</samp>’</dt>
|
||||
<dt>‘<samp>xop</samp>’</dt>
|
||||
<dt>‘<samp>fma4</samp>’</dt>
|
||||
<dt>‘<samp>3dnow</samp>’</dt>
|
||||
<dt>‘<samp>3dnowext</samp>’</dt>
|
||||
<dt>‘<samp>cmov</samp>’</dt>
|
||||
</dl>
|
||||
</dd>
|
||||
<dt>‘<samp>ARM</samp>’</dt>
|
||||
<dd><dl compact="compact">
|
||||
<dt>‘<samp>armv5te</samp>’</dt>
|
||||
<dt>‘<samp>armv6</samp>’</dt>
|
||||
<dt>‘<samp>armv6t2</samp>’</dt>
|
||||
<dt>‘<samp>vfp</samp>’</dt>
|
||||
<dt>‘<samp>vfpv3</samp>’</dt>
|
||||
<dt>‘<samp>neon</samp>’</dt>
|
||||
</dl>
|
||||
</dd>
|
||||
<dt>‘<samp>PowerPC</samp>’</dt>
|
||||
<dd><dl compact="compact">
|
||||
<dt>‘<samp>altivec</samp>’</dt>
|
||||
</dl>
|
||||
</dd>
|
||||
<dt>‘<samp>Specific Processors</samp>’</dt>
|
||||
<dd><dl compact="compact">
|
||||
<dt>‘<samp>pentium2</samp>’</dt>
|
||||
<dt>‘<samp>pentium3</samp>’</dt>
|
||||
<dt>‘<samp>pentium4</samp>’</dt>
|
||||
<dt>‘<samp>k6</samp>’</dt>
|
||||
<dt>‘<samp>k62</samp>’</dt>
|
||||
<dt>‘<samp>athlon</samp>’</dt>
|
||||
<dt>‘<samp>athlonxp</samp>’</dt>
|
||||
<dt>‘<samp>k8</samp>’</dt>
|
||||
</dl>
|
||||
</dd>
|
||||
</dl>
|
||||
|
||||
</dd>
|
||||
<dt><samp>-opencl_bench</samp></dt>
|
||||
<dd><p>Benchmark all available OpenCL devices and show the results. This option
|
||||
is only available when FFmpeg has been compiled with <code>--enable-opencl</code>.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>-opencl_options options (<em>global</em>)</samp></dt>
|
||||
<dd><p>Set OpenCL environment options. This option is only available when
|
||||
FFmpeg has been compiled with <code>--enable-opencl</code>.
|
||||
</p>
|
||||
<p><var>options</var> must be a list of <var>key</var>=<var>value</var> option pairs
|
||||
separated by ’:’. See the “OpenCL Options” section in the
|
||||
ffmpeg-utils manual for the list of supported options.
|
||||
</p></dd>
|
||||
</dl>
|
||||
|
||||
<a name="AVOptions"></a>
|
||||
<h3 class="section">3.3 AVOptions<span class="pull-right"><a class="anchor hidden-xs" href="#AVOptions" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-AVOptions" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>These options are provided directly by the libavformat, libavdevice and
|
||||
libavcodec libraries. To see the list of available AVOptions, use the
|
||||
<samp>-help</samp> option. They are separated into two categories:
|
||||
</p><dl compact="compact">
|
||||
<dt><samp>generic</samp></dt>
|
||||
<dd><p>These options can be set for any container, codec or device. Generic options
|
||||
are listed under AVFormatContext options for containers/devices and under
|
||||
AVCodecContext options for codecs.
|
||||
</p></dd>
|
||||
<dt><samp>private</samp></dt>
|
||||
<dd><p>These options are specific to the given container, device or codec. Private
|
||||
options are listed under their corresponding containers/devices/codecs.
|
||||
</p></dd>
|
||||
</dl>
|
||||
|
||||
<p>For example to write an ID3v2.3 header instead of a default ID3v2.4 to
|
||||
an MP3 file, use the <samp>id3v2_version</samp> private option of the MP3
|
||||
muxer:
|
||||
</p><div class="example">
|
||||
<pre class="example">ffmpeg -i input.flac -id3v2_version 3 out.mp3
|
||||
</pre></div>
|
||||
|
||||
<p>All codec AVOptions are per-stream, and thus a stream specifier
|
||||
should be attached to them.
|
||||
</p>
|
||||
<p>Note: the <samp>-nooption</samp> syntax cannot be used for boolean
|
||||
AVOptions, use <samp>-option 0</samp>/<samp>-option 1</samp>.
|
||||
</p>
|
||||
<p>Note: the old undocumented way of specifying per-stream AVOptions by
|
||||
prepending v/a/s to the options name is now obsolete and will be
|
||||
removed soon.
|
||||
</p>
|
||||
<a name="Main-options"></a>
|
||||
<h3 class="section">3.4 Main options<span class="pull-right"><a class="anchor hidden-xs" href="#Main-options" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Main-options" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<dl compact="compact">
|
||||
<dt><samp>-x <var>width</var></samp></dt>
|
||||
<dd><p>Force displayed width.
|
||||
</p></dd>
|
||||
<dt><samp>-y <var>height</var></samp></dt>
|
||||
<dd><p>Force displayed height.
|
||||
</p></dd>
|
||||
<dt><samp>-s <var>size</var></samp></dt>
|
||||
<dd><p>Set frame size (WxH or abbreviation), needed for videos which do
|
||||
not contain a header with the frame size like raw YUV. This option
|
||||
has been deprecated in favor of private options, try -video_size.
|
||||
</p></dd>
|
||||
<dt><samp>-fs</samp></dt>
|
||||
<dd><p>Start in fullscreen mode.
|
||||
</p></dd>
|
||||
<dt><samp>-an</samp></dt>
|
||||
<dd><p>Disable audio.
|
||||
</p></dd>
|
||||
<dt><samp>-vn</samp></dt>
|
||||
<dd><p>Disable video.
|
||||
</p></dd>
|
||||
<dt><samp>-sn</samp></dt>
|
||||
<dd><p>Disable subtitles.
|
||||
</p></dd>
|
||||
<dt><samp>-ss <var>pos</var></samp></dt>
|
||||
<dd><p>Seek to a given position in seconds.
|
||||
</p></dd>
|
||||
<dt><samp>-t <var>duration</var></samp></dt>
|
||||
<dd><p>play <duration> seconds of audio/video
|
||||
</p></dd>
|
||||
<dt><samp>-bytes</samp></dt>
|
||||
<dd><p>Seek by bytes.
|
||||
</p></dd>
|
||||
<dt><samp>-nodisp</samp></dt>
|
||||
<dd><p>Disable graphical display.
|
||||
</p></dd>
|
||||
<dt><samp>-f <var>fmt</var></samp></dt>
|
||||
<dd><p>Force format.
|
||||
</p></dd>
|
||||
<dt><samp>-window_title <var>title</var></samp></dt>
|
||||
<dd><p>Set window title (default is the input filename).
|
||||
</p></dd>
|
||||
<dt><samp>-loop <var>number</var></samp></dt>
|
||||
<dd><p>Loops movie playback <number> times. 0 means forever.
|
||||
</p></dd>
|
||||
<dt><samp>-showmode <var>mode</var></samp></dt>
|
||||
<dd><p>Set the show mode to use.
|
||||
Available values for <var>mode</var> are:
|
||||
</p><dl compact="compact">
|
||||
<dt>‘<samp>0, video</samp>’</dt>
|
||||
<dd><p>show video
|
||||
</p></dd>
|
||||
<dt>‘<samp>1, waves</samp>’</dt>
|
||||
<dd><p>show audio waves
|
||||
</p></dd>
|
||||
<dt>‘<samp>2, rdft</samp>’</dt>
|
||||
<dd><p>show audio frequency band using RDFT ((Inverse) Real Discrete Fourier Transform)
|
||||
</p></dd>
|
||||
</dl>
|
||||
|
||||
<p>Default value is "video", if video is not present or cannot be played
|
||||
"rdft" is automatically selected.
|
||||
</p>
|
||||
<p>You can interactively cycle through the available show modes by
|
||||
pressing the key <tt class="key">w</tt>.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>-vf <var>filtergraph</var></samp></dt>
|
||||
<dd><p>Create the filtergraph specified by <var>filtergraph</var> and use it to
|
||||
filter the video stream.
|
||||
</p>
|
||||
<p><var>filtergraph</var> is a description of the filtergraph to apply to
|
||||
the stream, and must have a single video input and a single video
|
||||
output. In the filtergraph, the input is associated to the label
|
||||
<code>in</code>, and the output to the label <code>out</code>. See the
|
||||
ffmpeg-filters manual for more information about the filtergraph
|
||||
syntax.
|
||||
</p>
|
||||
<p>You can specify this parameter multiple times and cycle through the specified
|
||||
filtergraphs along with the show modes by pressing the key <tt class="key">w</tt>.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>-af <var>filtergraph</var></samp></dt>
|
||||
<dd><p><var>filtergraph</var> is a description of the filtergraph to apply to
|
||||
the input audio.
|
||||
Use the option "-filters" to show all the available filters (including
|
||||
sources and sinks).
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>-i <var>input_file</var></samp></dt>
|
||||
<dd><p>Read <var>input_file</var>.
|
||||
</p></dd>
|
||||
</dl>
|
||||
|
||||
<a name="Advanced-options"></a>
|
||||
<h3 class="section">3.5 Advanced options<span class="pull-right"><a class="anchor hidden-xs" href="#Advanced-options" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Advanced-options" aria-hidden="true">TOC</a></span></h3>
|
||||
<dl compact="compact">
|
||||
<dt><samp>-pix_fmt <var>format</var></samp></dt>
|
||||
<dd><p>Set pixel format.
|
||||
This option has been deprecated in favor of private options, try -pixel_format.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>-stats</samp></dt>
|
||||
<dd><p>Print several playback statistics, in particular show the stream
|
||||
duration, the codec parameters, the current position in the stream and
|
||||
the audio/video synchronisation drift. It is on by default, to
|
||||
explicitly disable it you need to specify <code>-nostats</code>.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>-fast</samp></dt>
|
||||
<dd><p>Non-spec-compliant optimizations.
|
||||
</p></dd>
|
||||
<dt><samp>-genpts</samp></dt>
|
||||
<dd><p>Generate pts.
|
||||
</p></dd>
|
||||
<dt><samp>-sync <var>type</var></samp></dt>
|
||||
<dd><p>Set the master clock to audio (<code>type=audio</code>), video
|
||||
(<code>type=video</code>) or external (<code>type=ext</code>). Default is audio. The
|
||||
master clock is used to control audio-video synchronization. Most media
|
||||
players use audio as master clock, but in some cases (streaming or high
|
||||
quality broadcast) it is necessary to change that. This option is mainly
|
||||
used for debugging purposes.
|
||||
</p></dd>
|
||||
<dt><samp>-ast <var>audio_stream_specifier</var></samp></dt>
|
||||
<dd><p>Select the desired audio stream using the given stream specifier. The stream
|
||||
specifiers are described in the <a href="#Stream-specifiers">Stream specifiers</a> chapter. If this option
|
||||
is not specified, the "best" audio stream is selected in the program of the
|
||||
already selected video stream.
|
||||
</p></dd>
|
||||
<dt><samp>-vst <var>video_stream_specifier</var></samp></dt>
|
||||
<dd><p>Select the desired video stream using the given stream specifier. The stream
|
||||
specifiers are described in the <a href="#Stream-specifiers">Stream specifiers</a> chapter. If this option
|
||||
is not specified, the "best" video stream is selected.
|
||||
</p></dd>
|
||||
<dt><samp>-sst <var>subtitle_stream_specifier</var></samp></dt>
|
||||
<dd><p>Select the desired subtitle stream using the given stream specifier. The stream
|
||||
specifiers are described in the <a href="#Stream-specifiers">Stream specifiers</a> chapter. If this option
|
||||
is not specified, the "best" subtitle stream is selected in the program of the
|
||||
already selected video or audio stream.
|
||||
</p></dd>
|
||||
<dt><samp>-autoexit</samp></dt>
|
||||
<dd><p>Exit when video is done playing.
|
||||
</p></dd>
|
||||
<dt><samp>-exitonkeydown</samp></dt>
|
||||
<dd><p>Exit if any key is pressed.
|
||||
</p></dd>
|
||||
<dt><samp>-exitonmousedown</samp></dt>
|
||||
<dd><p>Exit if any mouse button is pressed.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>-codec:<var>media_specifier</var> <var>codec_name</var></samp></dt>
|
||||
<dd><p>Force a specific decoder implementation for the stream identified by
|
||||
<var>media_specifier</var>, which can assume the values <code>a</code> (audio),
|
||||
<code>v</code> (video), and <code>s</code> subtitle.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>-acodec <var>codec_name</var></samp></dt>
|
||||
<dd><p>Force a specific audio decoder.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>-vcodec <var>codec_name</var></samp></dt>
|
||||
<dd><p>Force a specific video decoder.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>-scodec <var>codec_name</var></samp></dt>
|
||||
<dd><p>Force a specific subtitle decoder.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>-autorotate</samp></dt>
|
||||
<dd><p>Automatically rotate the video according to presentation metadata. Enabled by
|
||||
default, use <samp>-noautorotate</samp> to disable it.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>-framedrop</samp></dt>
|
||||
<dd><p>Drop video frames if video is out of sync. Enabled by default if the master
|
||||
clock is not set to video. Use this option to enable frame dropping for all
|
||||
master clock sources, use <samp>-noframedrop</samp> to disable it.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><samp>-infbuf</samp></dt>
|
||||
<dd><p>Do not limit the input buffer size, read as much data as possible from the
|
||||
input as soon as possible. Enabled by default for realtime streams, where data
|
||||
may be dropped if not read in time. Use this option to enable infinite buffers
|
||||
for all inputs, use <samp>-noinfbuf</samp> to disable it.
|
||||
</p>
|
||||
</dd>
|
||||
</dl>
|
||||
|
||||
<a name="While-playing"></a>
|
||||
<h3 class="section">3.6 While playing<span class="pull-right"><a class="anchor hidden-xs" href="#While-playing" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-While-playing" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<dl compact="compact">
|
||||
<dt><tt class="key">q, ESC</tt></dt>
|
||||
<dd><p>Quit.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><tt class="key">f</tt></dt>
|
||||
<dd><p>Toggle full screen.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><tt class="key">p, SPC</tt></dt>
|
||||
<dd><p>Pause.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><tt class="key">a</tt></dt>
|
||||
<dd><p>Cycle audio channel in the current program.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><tt class="key">v</tt></dt>
|
||||
<dd><p>Cycle video channel.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><tt class="key">t</tt></dt>
|
||||
<dd><p>Cycle subtitle channel in the current program.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><tt class="key">c</tt></dt>
|
||||
<dd><p>Cycle program.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><tt class="key">w</tt></dt>
|
||||
<dd><p>Cycle video filters or show modes.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><tt class="key">s</tt></dt>
|
||||
<dd><p>Step to the next frame.
|
||||
</p>
|
||||
<p>Pause if the stream is not already paused, step to the next video
|
||||
frame, and pause.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><tt class="key">left/right</tt></dt>
|
||||
<dd><p>Seek backward/forward 10 seconds.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><tt class="key">down/up</tt></dt>
|
||||
<dd><p>Seek backward/forward 1 minute.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><tt class="key">page down/page up</tt></dt>
|
||||
<dd><p>Seek to the previous/next chapter.
|
||||
or if there are no chapters
|
||||
Seek backward/forward 10 minutes.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><tt class="key">mouse click</tt></dt>
|
||||
<dd><p>Seek to percentage in file corresponding to fraction of width.
|
||||
</p>
|
||||
</dd>
|
||||
</dl>
|
||||
|
||||
|
||||
|
||||
<a name="See-Also"></a>
|
||||
<h2 class="chapter">4 See Also<span class="pull-right"><a class="anchor hidden-xs" href="#See-Also" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-See-Also" aria-hidden="true">TOC</a></span></h2>
|
||||
|
||||
<p><a href="ffplay-all.html">ffmpeg-all</a>,
|
||||
<a href="ffmpeg.html">ffmpeg</a>, <a href="ffprobe.html">ffprobe</a>, <a href="ffserver.html">ffserver</a>,
|
||||
<a href="ffmpeg-utils.html">ffmpeg-utils</a>,
|
||||
<a href="ffmpeg-scaler.html">ffmpeg-scaler</a>,
|
||||
<a href="ffmpeg-resampler.html">ffmpeg-resampler</a>,
|
||||
<a href="ffmpeg-codecs.html">ffmpeg-codecs</a>,
|
||||
<a href="ffmpeg-bitstream-filters.html">ffmpeg-bitstream-filters</a>,
|
||||
<a href="ffmpeg-formats.html">ffmpeg-formats</a>,
|
||||
<a href="ffmpeg-devices.html">ffmpeg-devices</a>,
|
||||
<a href="ffmpeg-protocols.html">ffmpeg-protocols</a>,
|
||||
<a href="ffmpeg-filters.html">ffmpeg-filters</a>
|
||||
</p>
|
||||
|
||||
<a name="Authors"></a>
|
||||
<h2 class="chapter">5 Authors<span class="pull-right"><a class="anchor hidden-xs" href="#Authors" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Authors" aria-hidden="true">TOC</a></span></h2>
|
||||
|
||||
<p>The FFmpeg developers.
|
||||
</p>
|
||||
<p>For details about the authorship, see the Git history of the project
|
||||
(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
|
||||
<code>git log</code> in the FFmpeg source directory, or browsing the
|
||||
online repository at <a href="http://source.ffmpeg.org">http://source.ffmpeg.org</a>.
|
||||
</p>
|
||||
<p>Maintainers for the specific components are listed in the file
|
||||
<samp>MAINTAINERS</samp> in the source code tree.
|
||||
</p>
|
||||
|
||||
|
||||
<p style="font-size: small;">
|
||||
This document was generated on <em>January 14, 2015</em> using <a href="http://www.gnu.org/software/texinfo/"><em>makeinfo</em></a>.
|
||||
</p>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
21676
Externals/ffmpeg/dev/doc/ffprobe-all.html
vendored
21676
Externals/ffmpeg/dev/doc/ffprobe-all.html
vendored
File diff suppressed because it is too large
Load Diff
1113
Externals/ffmpeg/dev/doc/ffprobe.html
vendored
1113
Externals/ffmpeg/dev/doc/ffprobe.html
vendored
File diff suppressed because it is too large
Load Diff
986
Externals/ffmpeg/dev/doc/general.html
vendored
986
Externals/ffmpeg/dev/doc/general.html
vendored
@ -1,986 +0,0 @@
|
||||
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
|
||||
<html>
|
||||
<!-- Created by GNU Texinfo 5.2, http://www.gnu.org/software/texinfo/ -->
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<title>
|
||||
General Documentation
|
||||
</title>
|
||||
<meta name="viewport" content="width=device-width,initial-scale=1.0">
|
||||
<link rel="stylesheet" type="text/css" href="bootstrap.min.css">
|
||||
<link rel="stylesheet" type="text/css" href="style.min.css">
|
||||
</head>
|
||||
<body>
|
||||
<div style="width: 95%; margin: auto">
|
||||
<h1>
|
||||
General Documentation
|
||||
</h1>
|
||||
<div align="center">
|
||||
</div>
|
||||
|
||||
<a name="SEC_Top"></a>
|
||||
|
||||
<a name="SEC_Contents"></a>
|
||||
<h2 class="contents-heading">Table of Contents</h2>
|
||||
|
||||
<div class="contents">
|
||||
|
||||
<ul class="no-bullet">
|
||||
<li><a name="toc-External-libraries" href="#External-libraries">1 External libraries</a>
|
||||
<ul class="no-bullet">
|
||||
<li><a name="toc-OpenJPEG" href="#OpenJPEG">1.1 OpenJPEG</a></li>
|
||||
<li><a name="toc-OpenCORE_002c-VisualOn_002c-and-Fraunhofer-libraries" href="#OpenCORE_002c-VisualOn_002c-and-Fraunhofer-libraries">1.2 OpenCORE, VisualOn, and Fraunhofer libraries</a>
|
||||
<ul class="no-bullet">
|
||||
<li><a name="toc-OpenCORE-AMR" href="#OpenCORE-AMR">1.2.1 OpenCORE AMR</a></li>
|
||||
<li><a name="toc-VisualOn-AAC-encoder-library" href="#VisualOn-AAC-encoder-library">1.2.2 VisualOn AAC encoder library</a></li>
|
||||
<li><a name="toc-VisualOn-AMR_002dWB-encoder-library" href="#VisualOn-AMR_002dWB-encoder-library">1.2.3 VisualOn AMR-WB encoder library</a></li>
|
||||
<li><a name="toc-Fraunhofer-AAC-library" href="#Fraunhofer-AAC-library">1.2.4 Fraunhofer AAC library</a></li>
|
||||
</ul></li>
|
||||
<li><a name="toc-LAME" href="#LAME">1.3 LAME</a></li>
|
||||
<li><a name="toc-TwoLAME" href="#TwoLAME">1.4 TwoLAME</a></li>
|
||||
<li><a name="toc-libvpx" href="#libvpx">1.5 libvpx</a></li>
|
||||
<li><a name="toc-libwavpack" href="#libwavpack">1.6 libwavpack</a></li>
|
||||
<li><a name="toc-OpenH264" href="#OpenH264">1.7 OpenH264</a></li>
|
||||
<li><a name="toc-x264" href="#x264">1.8 x264</a></li>
|
||||
<li><a name="toc-x265" href="#x265">1.9 x265</a></li>
|
||||
<li><a name="toc-libilbc" href="#libilbc">1.10 libilbc</a></li>
|
||||
<li><a name="toc-libzvbi" href="#libzvbi">1.11 libzvbi</a></li>
|
||||
<li><a name="toc-AviSynth" href="#AviSynth">1.12 AviSynth</a></li>
|
||||
</ul></li>
|
||||
<li><a name="toc-Supported-File-Formats_002c-Codecs-or-Features" href="#Supported-File-Formats_002c-Codecs-or-Features">2 Supported File Formats, Codecs or Features</a>
|
||||
<ul class="no-bullet">
|
||||
<li><a name="toc-File-Formats" href="#File-Formats">2.1 File Formats</a></li>
|
||||
<li><a name="toc-Image-Formats" href="#Image-Formats">2.2 Image Formats</a></li>
|
||||
<li><a name="toc-Video-Codecs" href="#Video-Codecs">2.3 Video Codecs</a></li>
|
||||
<li><a name="toc-Audio-Codecs" href="#Audio-Codecs">2.4 Audio Codecs</a></li>
|
||||
<li><a name="toc-Subtitle-Formats" href="#Subtitle-Formats">2.5 Subtitle Formats</a></li>
|
||||
<li><a name="toc-Network-Protocols" href="#Network-Protocols">2.6 Network Protocols</a></li>
|
||||
<li><a name="toc-Input_002fOutput-Devices" href="#Input_002fOutput-Devices">2.7 Input/Output Devices</a></li>
|
||||
<li><a name="toc-Timecode" href="#Timecode">2.8 Timecode</a></li>
|
||||
</ul></li>
|
||||
</ul>
|
||||
</div>
|
||||
|
||||
|
||||
<a name="External-libraries"></a>
|
||||
<h2 class="chapter">1 External libraries<span class="pull-right"><a class="anchor hidden-xs" href="#External-libraries" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-External-libraries" aria-hidden="true">TOC</a></span></h2>
|
||||
|
||||
<p>FFmpeg can be hooked up with a number of external libraries to add support
|
||||
for more formats. None of them are used by default, their use has to be
|
||||
explicitly requested by passing the appropriate flags to
|
||||
<code>./configure</code>.
|
||||
</p>
|
||||
<a name="OpenJPEG"></a>
|
||||
<h3 class="section">1.1 OpenJPEG<span class="pull-right"><a class="anchor hidden-xs" href="#OpenJPEG" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-OpenJPEG" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>FFmpeg can use the OpenJPEG libraries for encoding/decoding J2K videos. Go to
|
||||
<a href="http://www.openjpeg.org/">http://www.openjpeg.org/</a> to get the libraries and follow the installation
|
||||
instructions. To enable using OpenJPEG in FFmpeg, pass <code>--enable-libopenjpeg</code> to
|
||||
<samp>./configure</samp>.
|
||||
</p>
|
||||
|
||||
<a name="OpenCORE_002c-VisualOn_002c-and-Fraunhofer-libraries"></a>
|
||||
<h3 class="section">1.2 OpenCORE, VisualOn, and Fraunhofer libraries<span class="pull-right"><a class="anchor hidden-xs" href="#OpenCORE_002c-VisualOn_002c-and-Fraunhofer-libraries" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-OpenCORE_002c-VisualOn_002c-and-Fraunhofer-libraries" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>Spun off Google Android sources, OpenCore, VisualOn and Fraunhofer
|
||||
libraries provide encoders for a number of audio codecs.
|
||||
</p>
|
||||
<div class="info">
|
||||
<p>OpenCORE and VisualOn libraries are under the Apache License 2.0
|
||||
(see <a href="http://www.apache.org/licenses/LICENSE-2.0">http://www.apache.org/licenses/LICENSE-2.0</a> for details), which is
|
||||
incompatible to the LGPL version 2.1 and GPL version 2. You have to
|
||||
upgrade FFmpeg’s license to LGPL version 3 (or if you have enabled
|
||||
GPL components, GPL version 3) by passing <code>--enable-version3</code> to configure in
|
||||
order to use it.
|
||||
</p>
|
||||
<p>The Fraunhofer AAC library is licensed under a license incompatible to the GPL
|
||||
and is not known to be compatible to the LGPL. Therefore, you have to pass
|
||||
<code>--enable-nonfree</code> to configure to use it.
|
||||
</p></div>
|
||||
<a name="OpenCORE-AMR"></a>
|
||||
<h4 class="subsection">1.2.1 OpenCORE AMR<span class="pull-right"><a class="anchor hidden-xs" href="#OpenCORE-AMR" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-OpenCORE-AMR" aria-hidden="true">TOC</a></span></h4>
|
||||
|
||||
<p>FFmpeg can make use of the OpenCORE libraries for AMR-NB
|
||||
decoding/encoding and AMR-WB decoding.
|
||||
</p>
|
||||
<p>Go to <a href="http://sourceforge.net/projects/opencore-amr/">http://sourceforge.net/projects/opencore-amr/</a> and follow the
|
||||
instructions for installing the libraries.
|
||||
Then pass <code>--enable-libopencore-amrnb</code> and/or
|
||||
<code>--enable-libopencore-amrwb</code> to configure to enable them.
|
||||
</p>
|
||||
<a name="VisualOn-AAC-encoder-library"></a>
|
||||
<h4 class="subsection">1.2.2 VisualOn AAC encoder library<span class="pull-right"><a class="anchor hidden-xs" href="#VisualOn-AAC-encoder-library" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-VisualOn-AAC-encoder-library" aria-hidden="true">TOC</a></span></h4>
|
||||
|
||||
<p>FFmpeg can make use of the VisualOn AACenc library for AAC encoding.
|
||||
</p>
|
||||
<p>Go to <a href="http://sourceforge.net/projects/opencore-amr/">http://sourceforge.net/projects/opencore-amr/</a> and follow the
|
||||
instructions for installing the library.
|
||||
Then pass <code>--enable-libvo-aacenc</code> to configure to enable it.
|
||||
</p>
|
||||
<a name="VisualOn-AMR_002dWB-encoder-library"></a>
|
||||
<h4 class="subsection">1.2.3 VisualOn AMR-WB encoder library<span class="pull-right"><a class="anchor hidden-xs" href="#VisualOn-AMR_002dWB-encoder-library" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-VisualOn-AMR_002dWB-encoder-library" aria-hidden="true">TOC</a></span></h4>
|
||||
|
||||
<p>FFmpeg can make use of the VisualOn AMR-WBenc library for AMR-WB encoding.
|
||||
</p>
|
||||
<p>Go to <a href="http://sourceforge.net/projects/opencore-amr/">http://sourceforge.net/projects/opencore-amr/</a> and follow the
|
||||
instructions for installing the library.
|
||||
Then pass <code>--enable-libvo-amrwbenc</code> to configure to enable it.
|
||||
</p>
|
||||
<a name="Fraunhofer-AAC-library"></a>
|
||||
<h4 class="subsection">1.2.4 Fraunhofer AAC library<span class="pull-right"><a class="anchor hidden-xs" href="#Fraunhofer-AAC-library" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Fraunhofer-AAC-library" aria-hidden="true">TOC</a></span></h4>
|
||||
|
||||
<p>FFmpeg can make use of the Fraunhofer AAC library for AAC encoding.
|
||||
</p>
|
||||
<p>Go to <a href="http://sourceforge.net/projects/opencore-amr/">http://sourceforge.net/projects/opencore-amr/</a> and follow the
|
||||
instructions for installing the library.
|
||||
Then pass <code>--enable-libfdk-aac</code> to configure to enable it.
|
||||
</p>
|
||||
<a name="LAME"></a>
|
||||
<h3 class="section">1.3 LAME<span class="pull-right"><a class="anchor hidden-xs" href="#LAME" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-LAME" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>FFmpeg can make use of the LAME library for MP3 encoding.
|
||||
</p>
|
||||
<p>Go to <a href="http://lame.sourceforge.net/">http://lame.sourceforge.net/</a> and follow the
|
||||
instructions for installing the library.
|
||||
Then pass <code>--enable-libmp3lame</code> to configure to enable it.
|
||||
</p>
|
||||
<a name="TwoLAME"></a>
|
||||
<h3 class="section">1.4 TwoLAME<span class="pull-right"><a class="anchor hidden-xs" href="#TwoLAME" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-TwoLAME" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>FFmpeg can make use of the TwoLAME library for MP2 encoding.
|
||||
</p>
|
||||
<p>Go to <a href="http://www.twolame.org/">http://www.twolame.org/</a> and follow the
|
||||
instructions for installing the library.
|
||||
Then pass <code>--enable-libtwolame</code> to configure to enable it.
|
||||
</p>
|
||||
<a name="libvpx"></a>
|
||||
<h3 class="section">1.5 libvpx<span class="pull-right"><a class="anchor hidden-xs" href="#libvpx" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-libvpx" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>FFmpeg can make use of the libvpx library for VP8/VP9 encoding.
|
||||
</p>
|
||||
<p>Go to <a href="http://www.webmproject.org/">http://www.webmproject.org/</a> and follow the instructions for
|
||||
installing the library. Then pass <code>--enable-libvpx</code> to configure to
|
||||
enable it.
|
||||
</p>
|
||||
<a name="libwavpack"></a>
|
||||
<h3 class="section">1.6 libwavpack<span class="pull-right"><a class="anchor hidden-xs" href="#libwavpack" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-libwavpack" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>FFmpeg can make use of the libwavpack library for WavPack encoding.
|
||||
</p>
|
||||
<p>Go to <a href="http://www.wavpack.com/">http://www.wavpack.com/</a> and follow the instructions for
|
||||
installing the library. Then pass <code>--enable-libwavpack</code> to configure to
|
||||
enable it.
|
||||
</p>
|
||||
<a name="OpenH264"></a>
|
||||
<h3 class="section">1.7 OpenH264<span class="pull-right"><a class="anchor hidden-xs" href="#OpenH264" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-OpenH264" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>FFmpeg can make use of the OpenH264 library for H.264 encoding.
|
||||
</p>
|
||||
<p>Go to <a href="http://www.openh264.org/">http://www.openh264.org/</a> and follow the instructions for
|
||||
installing the library. Then pass <code>--enable-libopenh264</code> to configure to
|
||||
enable it.
|
||||
</p>
|
||||
<a name="x264"></a>
|
||||
<h3 class="section">1.8 x264<span class="pull-right"><a class="anchor hidden-xs" href="#x264" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-x264" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>FFmpeg can make use of the x264 library for H.264 encoding.
|
||||
</p>
|
||||
<p>Go to <a href="http://www.videolan.org/developers/x264.html">http://www.videolan.org/developers/x264.html</a> and follow the
|
||||
instructions for installing the library. Then pass <code>--enable-libx264</code> to
|
||||
configure to enable it.
|
||||
</p>
|
||||
<div class="info">
|
||||
<p>x264 is under the GNU Public License Version 2 or later
|
||||
(see <a href="http://www.gnu.org/licenses/old-licenses/gpl-2.0.html">http://www.gnu.org/licenses/old-licenses/gpl-2.0.html</a> for
|
||||
details), you must upgrade FFmpeg’s license to GPL in order to use it.
|
||||
</p></div>
|
||||
<a name="x265"></a>
|
||||
<h3 class="section">1.9 x265<span class="pull-right"><a class="anchor hidden-xs" href="#x265" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-x265" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>FFmpeg can make use of the x265 library for HEVC encoding.
|
||||
</p>
|
||||
<p>Go to <a href="http://x265.org/developers.html">http://x265.org/developers.html</a> and follow the instructions
|
||||
for installing the library. Then pass <code>--enable-libx265</code> to configure
|
||||
to enable it.
|
||||
</p>
|
||||
<div class="info">
|
||||
<p>x265 is under the GNU Public License Version 2 or later
|
||||
(see <a href="http://www.gnu.org/licenses/old-licenses/gpl-2.0.html">http://www.gnu.org/licenses/old-licenses/gpl-2.0.html</a> for
|
||||
details), you must upgrade FFmpeg’s license to GPL in order to use it.
|
||||
</p></div>
|
||||
<a name="libilbc"></a>
|
||||
<h3 class="section">1.10 libilbc<span class="pull-right"><a class="anchor hidden-xs" href="#libilbc" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-libilbc" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>iLBC is a narrowband speech codec that has been made freely available
|
||||
by Google as part of the WebRTC project. libilbc is a packaging friendly
|
||||
copy of the iLBC codec. FFmpeg can make use of the libilbc library for
|
||||
iLBC encoding and decoding.
|
||||
</p>
|
||||
<p>Go to <a href="https://github.com/TimothyGu/libilbc">https://github.com/TimothyGu/libilbc</a> and follow the instructions for
|
||||
installing the library. Then pass <code>--enable-libilbc</code> to configure to
|
||||
enable it.
|
||||
</p>
|
||||
<a name="libzvbi"></a>
|
||||
<h3 class="section">1.11 libzvbi<span class="pull-right"><a class="anchor hidden-xs" href="#libzvbi" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-libzvbi" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>libzvbi is a VBI decoding library which can be used by FFmpeg to decode DVB
|
||||
teletext pages and DVB teletext subtitles.
|
||||
</p>
|
||||
<p>Go to <a href="http://sourceforge.net/projects/zapping/">http://sourceforge.net/projects/zapping/</a> and follow the instructions for
|
||||
installing the library. Then pass <code>--enable-libzvbi</code> to configure to
|
||||
enable it.
|
||||
</p>
|
||||
<div class="info">
|
||||
<p>libzvbi is licensed under the GNU General Public License Version 2 or later
|
||||
(see <a href="http://www.gnu.org/licenses/old-licenses/gpl-2.0.html">http://www.gnu.org/licenses/old-licenses/gpl-2.0.html</a> for details),
|
||||
you must upgrade FFmpeg’s license to GPL in order to use it.
|
||||
</p></div>
|
||||
<a name="AviSynth"></a>
|
||||
<h3 class="section">1.12 AviSynth<span class="pull-right"><a class="anchor hidden-xs" href="#AviSynth" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-AviSynth" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>FFmpeg can read AviSynth scripts as input. To enable support, pass
|
||||
<code>--enable-avisynth</code> to configure. The correct headers are
|
||||
included in compat/avisynth/, which allows the user to enable support
|
||||
without needing to search for these headers themselves.
|
||||
</p>
|
||||
<p>For Windows, supported AviSynth variants are
|
||||
<a href="http://avisynth.nl">AviSynth 2.5 or 2.6</a> for 32-bit builds and
|
||||
<a href="http://avs-plus.net">AviSynth+ 0.1</a> for 32-bit and 64-bit builds.
|
||||
</p>
|
||||
<p>For Linux and OS X, the supported AviSynth variant is
|
||||
<a href="https://github.com/avxsynth/avxsynth">AvxSynth</a>.
|
||||
</p>
|
||||
<div class="info">
|
||||
<p>AviSynth and AvxSynth are loaded dynamically. Distributors can build FFmpeg
|
||||
with <code>--enable-avisynth</code>, and the binaries will work regardless of the
|
||||
end user having AviSynth or AvxSynth installed - they’ll only need to be
|
||||
installed to use AviSynth scripts (obviously).
|
||||
</p></div>
|
||||
|
||||
<a name="Supported-File-Formats_002c-Codecs-or-Features"></a>
|
||||
<h2 class="chapter">2 Supported File Formats, Codecs or Features<span class="pull-right"><a class="anchor hidden-xs" href="#Supported-File-Formats_002c-Codecs-or-Features" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Supported-File-Formats_002c-Codecs-or-Features" aria-hidden="true">TOC</a></span></h2>
|
||||
|
||||
<p>You can use the <code>-formats</code> and <code>-codecs</code> options to have an exhaustive list.
|
||||
</p>
|
||||
<a name="File-Formats"></a>
|
||||
<h3 class="section">2.1 File Formats<span class="pull-right"><a class="anchor hidden-xs" href="#File-Formats" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-File-Formats" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>FFmpeg supports the following file formats through the <code>libavformat</code>
|
||||
library:
|
||||
</p>
|
||||
<table>
|
||||
<tr><td width="40%">Name</td><td width="10%">Encoding</td><td width="10%">Decoding</td><td width="40%">Comments</td></tr>
|
||||
<tr><td width="40%">4xm</td><td width="10%"></td><td width="10%">X</td><td width="40%">4X Technologies format, used in some games.</td></tr>
|
||||
<tr><td width="40%">8088flex TMV</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">ACT Voice</td><td width="10%"></td><td width="10%">X</td><td width="40%">contains G.729 audio</td></tr>
|
||||
<tr><td width="40%">Adobe Filmstrip</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Audio IFF (AIFF)</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">American Laser Games MM</td><td width="10%"></td><td width="10%">X</td><td width="40%">Multimedia format used in games like Mad Dog McCree.</td></tr>
|
||||
<tr><td width="40%">3GPP AMR</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Amazing Studio Packed Animation File</td><td width="10%"></td><td width="10%">X</td><td width="40%">Multimedia format used in game Heart Of Darkness.</td></tr>
|
||||
<tr><td width="40%">Apple HTTP Live Streaming</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Artworx Data Format</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">ADP</td><td width="10%"></td><td width="10%">X</td><td width="40%">Audio format used on the Nintendo Gamecube.</td></tr>
|
||||
<tr><td width="40%">AFC</td><td width="10%"></td><td width="10%">X</td><td width="40%">Audio format used on the Nintendo Gamecube.</td></tr>
|
||||
<tr><td width="40%">ASF</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">AST</td><td width="10%">X</td><td width="10%">X</td><td width="40%">Audio format used on the Nintendo Wii.</td></tr>
|
||||
<tr><td width="40%">AVI</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">AviSynth</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">AVR</td><td width="10%"></td><td width="10%">X</td><td width="40%">Audio format used on Mac.</td></tr>
|
||||
<tr><td width="40%">AVS</td><td width="10%"></td><td width="10%">X</td><td width="40%">Multimedia format used by the Creature Shock game.</td></tr>
|
||||
<tr><td width="40%">Beam Software SIFF</td><td width="10%"></td><td width="10%">X</td><td width="40%">Audio and video format used in some games by Beam Software.</td></tr>
|
||||
<tr><td width="40%">Bethesda Softworks VID</td><td width="10%"></td><td width="10%">X</td><td width="40%">Used in some games from Bethesda Softworks.</td></tr>
|
||||
<tr><td width="40%">Binary text</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Bink</td><td width="10%"></td><td width="10%">X</td><td width="40%">Multimedia format used by many games.</td></tr>
|
||||
<tr><td width="40%">Bitmap Brothers JV</td><td width="10%"></td><td width="10%">X</td><td width="40%">Used in Z and Z95 games.</td></tr>
|
||||
<tr><td width="40%">Brute Force & Ignorance</td><td width="10%"></td><td width="10%">X</td><td width="40%">Used in the game Flash Traffic: City of Angels.</td></tr>
|
||||
<tr><td width="40%">BRSTM</td><td width="10%"></td><td width="10%">X</td><td width="40%">Audio format used on the Nintendo Wii.</td></tr>
|
||||
<tr><td width="40%">BWF</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">CRI ADX</td><td width="10%">X</td><td width="10%">X</td><td width="40%">Audio-only format used in console video games.</td></tr>
|
||||
<tr><td width="40%">Discworld II BMV</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Interplay C93</td><td width="10%"></td><td width="10%">X</td><td width="40%">Used in the game Cyberia from Interplay.</td></tr>
|
||||
<tr><td width="40%">Delphine Software International CIN</td><td width="10%"></td><td width="10%">X</td><td width="40%">Multimedia format used by Delphine Software games.</td></tr>
|
||||
<tr><td width="40%">CD+G</td><td width="10%"></td><td width="10%">X</td><td width="40%">Video format used by CD+G karaoke disks</td></tr>
|
||||
<tr><td width="40%">Phantom Cine</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Commodore CDXL</td><td width="10%"></td><td width="10%">X</td><td width="40%">Amiga CD video format</td></tr>
|
||||
<tr><td width="40%">Core Audio Format</td><td width="10%">X</td><td width="10%">X</td><td width="40%">Apple Core Audio Format</td></tr>
|
||||
<tr><td width="40%">CRC testing format</td><td width="10%">X</td><td width="10%"></td></tr>
|
||||
<tr><td width="40%">Creative Voice</td><td width="10%">X</td><td width="10%">X</td><td width="40%">Created for the Sound Blaster Pro.</td></tr>
|
||||
<tr><td width="40%">CRYO APC</td><td width="10%"></td><td width="10%">X</td><td width="40%">Audio format used in some games by CRYO Interactive Entertainment.</td></tr>
|
||||
<tr><td width="40%">D-Cinema audio</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Deluxe Paint Animation</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">DFA</td><td width="10%"></td><td width="10%">X</td><td width="40%">This format is used in Chronomaster game</td></tr>
|
||||
<tr><td width="40%">DSD Stream File (DSF)</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">DV video</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">DXA</td><td width="10%"></td><td width="10%">X</td><td width="40%">This format is used in the non-Windows version of the Feeble Files
|
||||
game and different game cutscenes repacked for use with ScummVM.</td></tr>
|
||||
<tr><td width="40%">Electronic Arts cdata</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Electronic Arts Multimedia</td><td width="10%"></td><td width="10%">X</td><td width="40%">Used in various EA games; files have extensions like WVE and UV2.</td></tr>
|
||||
<tr><td width="40%">Ensoniq Paris Audio File</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">FFM (FFserver live feed)</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Flash (SWF)</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Flash 9 (AVM2)</td><td width="10%">X</td><td width="10%">X</td><td width="40%">Only embedded audio is decoded.</td></tr>
|
||||
<tr><td width="40%">FLI/FLC/FLX animation</td><td width="10%"></td><td width="10%">X</td><td width="40%">.fli/.flc files</td></tr>
|
||||
<tr><td width="40%">Flash Video (FLV)</td><td width="10%">X</td><td width="10%">X</td><td width="40%">Macromedia Flash video files</td></tr>
|
||||
<tr><td width="40%">framecrc testing format</td><td width="10%">X</td><td width="10%"></td></tr>
|
||||
<tr><td width="40%">FunCom ISS</td><td width="10%"></td><td width="10%">X</td><td width="40%">Audio format used in various games from FunCom like The Longest Journey.</td></tr>
|
||||
<tr><td width="40%">G.723.1</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">G.729 BIT</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">G.729 raw</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">GIF Animation</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">GXF</td><td width="10%">X</td><td width="10%">X</td><td width="40%">General eXchange Format SMPTE 360M, used by Thomson Grass Valley
|
||||
playout servers.</td></tr>
|
||||
<tr><td width="40%">HNM</td><td width="10%"></td><td width="10%">X</td><td width="40%">Only version 4 supported, used in some games from Cryo Interactive</td></tr>
|
||||
<tr><td width="40%">iCEDraw File</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">ICO</td><td width="10%">X</td><td width="10%">X</td><td width="40%">Microsoft Windows ICO</td></tr>
|
||||
<tr><td width="40%">id Quake II CIN video</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">id RoQ</td><td width="10%">X</td><td width="10%">X</td><td width="40%">Used in Quake III, Jedi Knight 2 and other computer games.</td></tr>
|
||||
<tr><td width="40%">IEC61937 encapsulation</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">IFF</td><td width="10%"></td><td width="10%">X</td><td width="40%">Interchange File Format</td></tr>
|
||||
<tr><td width="40%">iLBC</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Interplay MVE</td><td width="10%"></td><td width="10%">X</td><td width="40%">Format used in various Interplay computer games.</td></tr>
|
||||
<tr><td width="40%">IV8</td><td width="10%"></td><td width="10%">X</td><td width="40%">A format generated by IndigoVision 8000 video server.</td></tr>
|
||||
<tr><td width="40%">IVF (On2)</td><td width="10%">X</td><td width="10%">X</td><td width="40%">A format used by libvpx</td></tr>
|
||||
<tr><td width="40%">IRCAM</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">LATM</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">LMLM4</td><td width="10%"></td><td width="10%">X</td><td width="40%">Used by Linux Media Labs MPEG-4 PCI boards</td></tr>
|
||||
<tr><td width="40%">LOAS</td><td width="10%"></td><td width="10%">X</td><td width="40%">contains LATM multiplexed AAC audio</td></tr>
|
||||
<tr><td width="40%">LRC</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">LVF</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">LXF</td><td width="10%"></td><td width="10%">X</td><td width="40%">VR native stream format, used by Leitch/Harris’ video servers.</td></tr>
|
||||
<tr><td width="40%">Magic Lantern Video (MLV)</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Matroska</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Matroska audio</td><td width="10%">X</td><td width="10%"></td></tr>
|
||||
<tr><td width="40%">FFmpeg metadata</td><td width="10%">X</td><td width="10%">X</td><td width="40%">Metadata in text format.</td></tr>
|
||||
<tr><td width="40%">MAXIS XA</td><td width="10%"></td><td width="10%">X</td><td width="40%">Used in Sim City 3000; file extension .xa.</td></tr>
|
||||
<tr><td width="40%">MD Studio</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Metal Gear Solid: The Twin Snakes</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Megalux Frame</td><td width="10%"></td><td width="10%">X</td><td width="40%">Used by Megalux Ultimate Paint</td></tr>
|
||||
<tr><td width="40%">Mobotix .mxg</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Monkey’s Audio</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Motion Pixels MVI</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">MOV/QuickTime/MP4</td><td width="10%">X</td><td width="10%">X</td><td width="40%">3GP, 3GP2, PSP, iPod variants supported</td></tr>
|
||||
<tr><td width="40%">MP2</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">MP3</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">MPEG-1 System</td><td width="10%">X</td><td width="10%">X</td><td width="40%">muxed audio and video, VCD format supported</td></tr>
|
||||
<tr><td width="40%">MPEG-PS (program stream)</td><td width="10%">X</td><td width="10%">X</td><td width="40%">also known as <code>VOB</code> file, SVCD and DVD format supported</td></tr>
|
||||
<tr><td width="40%">MPEG-TS (transport stream)</td><td width="10%">X</td><td width="10%">X</td><td width="40%">also known as DVB Transport Stream</td></tr>
|
||||
<tr><td width="40%">MPEG-4</td><td width="10%">X</td><td width="10%">X</td><td width="40%">MPEG-4 is a variant of QuickTime.</td></tr>
|
||||
<tr><td width="40%">Mirillis FIC video</td><td width="10%"></td><td width="10%">X</td><td width="40%">No cursor rendering.</td></tr>
|
||||
<tr><td width="40%">MIME multipart JPEG</td><td width="10%">X</td><td width="10%"></td></tr>
|
||||
<tr><td width="40%">MSN TCP webcam</td><td width="10%"></td><td width="10%">X</td><td width="40%">Used by MSN Messenger webcam streams.</td></tr>
|
||||
<tr><td width="40%">MTV</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Musepack</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Musepack SV8</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Material eXchange Format (MXF)</td><td width="10%">X</td><td width="10%">X</td><td width="40%">SMPTE 377M, used by D-Cinema, broadcast industry.</td></tr>
|
||||
<tr><td width="40%">Material eXchange Format (MXF), D-10 Mapping</td><td width="10%">X</td><td width="10%">X</td><td width="40%">SMPTE 386M, D-10/IMX Mapping.</td></tr>
|
||||
<tr><td width="40%">NC camera feed</td><td width="10%"></td><td width="10%">X</td><td width="40%">NC (AVIP NC4600) camera streams</td></tr>
|
||||
<tr><td width="40%">NIST SPeech HEader REsources</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">NTT TwinVQ (VQF)</td><td width="10%"></td><td width="10%">X</td><td width="40%">Nippon Telegraph and Telephone Corporation TwinVQ.</td></tr>
|
||||
<tr><td width="40%">Nullsoft Streaming Video</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">NuppelVideo</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">NUT</td><td width="10%">X</td><td width="10%">X</td><td width="40%">NUT Open Container Format</td></tr>
|
||||
<tr><td width="40%">Ogg</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Playstation Portable PMP</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Portable Voice Format</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">TechnoTrend PVA</td><td width="10%"></td><td width="10%">X</td><td width="40%">Used by TechnoTrend DVB PCI boards.</td></tr>
|
||||
<tr><td width="40%">QCP</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">raw ADTS (AAC)</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">raw AC-3</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">raw Chinese AVS video</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">raw CRI ADX</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">raw Dirac</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">raw DNxHD</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">raw DTS</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">raw DTS-HD</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">raw E-AC-3</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">raw FLAC</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">raw GSM</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">raw H.261</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">raw H.263</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">raw H.264</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">raw HEVC</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">raw Ingenient MJPEG</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">raw MJPEG</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">raw MLP</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">raw MPEG</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">raw MPEG-1</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">raw MPEG-2</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">raw MPEG-4</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">raw NULL</td><td width="10%">X</td><td width="10%"></td></tr>
|
||||
<tr><td width="40%">raw video</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">raw id RoQ</td><td width="10%">X</td><td width="10%"></td></tr>
|
||||
<tr><td width="40%">raw Shorten</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">raw TAK</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">raw TrueHD</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">raw VC-1</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">raw PCM A-law</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">raw PCM mu-law</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">raw PCM signed 8 bit</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">raw PCM signed 16 bit big-endian</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">raw PCM signed 16 bit little-endian</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">raw PCM signed 24 bit big-endian</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">raw PCM signed 24 bit little-endian</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">raw PCM signed 32 bit big-endian</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">raw PCM signed 32 bit little-endian</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">raw PCM unsigned 8 bit</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">raw PCM unsigned 16 bit big-endian</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">raw PCM unsigned 16 bit little-endian</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">raw PCM unsigned 24 bit big-endian</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">raw PCM unsigned 24 bit little-endian</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">raw PCM unsigned 32 bit big-endian</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">raw PCM unsigned 32 bit little-endian</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">raw PCM floating-point 32 bit big-endian</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">raw PCM floating-point 32 bit little-endian</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">raw PCM floating-point 64 bit big-endian</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">raw PCM floating-point 64 bit little-endian</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">RDT</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">REDCODE R3D</td><td width="10%"></td><td width="10%">X</td><td width="40%">File format used by RED Digital cameras, contains JPEG 2000 frames and PCM audio.</td></tr>
|
||||
<tr><td width="40%">RealMedia</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Redirector</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">RedSpark</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Renderware TeXture Dictionary</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">RL2</td><td width="10%"></td><td width="10%">X</td><td width="40%">Audio and video format used in some games by Entertainment Software Partners.</td></tr>
|
||||
<tr><td width="40%">RPL/ARMovie</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Lego Mindstorms RSO</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">RSD</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">RTMP</td><td width="10%">X</td><td width="10%">X</td><td width="40%">Output is performed by publishing stream to RTMP server</td></tr>
|
||||
<tr><td width="40%">RTP</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">RTSP</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">SAP</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">SBG</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">SDP</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Sega FILM/CPK</td><td width="10%"></td><td width="10%">X</td><td width="40%">Used in many Sega Saturn console games.</td></tr>
|
||||
<tr><td width="40%">Silicon Graphics Movie</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Sierra SOL</td><td width="10%"></td><td width="10%">X</td><td width="40%">.sol files used in Sierra Online games.</td></tr>
|
||||
<tr><td width="40%">Sierra VMD</td><td width="10%"></td><td width="10%">X</td><td width="40%">Used in Sierra CD-ROM games.</td></tr>
|
||||
<tr><td width="40%">Smacker</td><td width="10%"></td><td width="10%">X</td><td width="40%">Multimedia format used by many games.</td></tr>
|
||||
<tr><td width="40%">SMJPEG</td><td width="10%">X</td><td width="10%">X</td><td width="40%">Used in certain Loki game ports.</td></tr>
|
||||
<tr><td width="40%">Smush</td><td width="10%"></td><td width="10%">X</td><td width="40%">Multimedia format used in some LucasArts games.</td></tr>
|
||||
<tr><td width="40%">Sony OpenMG (OMA)</td><td width="10%">X</td><td width="10%">X</td><td width="40%">Audio format used in Sony Sonic Stage and Sony Vegas.</td></tr>
|
||||
<tr><td width="40%">Sony PlayStation STR</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Sony Wave64 (W64)</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">SoX native format</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">SUN AU format</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">SUP raw PGS subtitles</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Text files</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">THP</td><td width="10%"></td><td width="10%">X</td><td width="40%">Used on the Nintendo GameCube.</td></tr>
|
||||
<tr><td width="40%">Tiertex Limited SEQ</td><td width="10%"></td><td width="10%">X</td><td width="40%">Tiertex .seq files used in the DOS CD-ROM version of the game Flashback.</td></tr>
|
||||
<tr><td width="40%">True Audio</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">VC-1 test bitstream</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Vivo</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">WAV</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">WavPack</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">WebM</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Windows Televison (WTV)</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Wing Commander III movie</td><td width="10%"></td><td width="10%">X</td><td width="40%">Multimedia format used in Origin’s Wing Commander III computer game.</td></tr>
|
||||
<tr><td width="40%">Westwood Studios audio</td><td width="10%"></td><td width="10%">X</td><td width="40%">Multimedia format used in Westwood Studios games.</td></tr>
|
||||
<tr><td width="40%">Westwood Studios VQA</td><td width="10%"></td><td width="10%">X</td><td width="40%">Multimedia format used in Westwood Studios games.</td></tr>
|
||||
<tr><td width="40%">XMV</td><td width="10%"></td><td width="10%">X</td><td width="40%">Microsoft video container used in Xbox games.</td></tr>
|
||||
<tr><td width="40%">xWMA</td><td width="10%"></td><td width="10%">X</td><td width="40%">Microsoft audio container used by XAudio 2.</td></tr>
|
||||
<tr><td width="40%">eXtended BINary text (XBIN)</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">YUV4MPEG pipe</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Psygnosis YOP</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
</table>
|
||||
|
||||
<p><code>X</code> means that encoding (resp. decoding) is supported.
|
||||
</p>
|
||||
<a name="Image-Formats"></a>
|
||||
<h3 class="section">2.2 Image Formats<span class="pull-right"><a class="anchor hidden-xs" href="#Image-Formats" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Image-Formats" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>FFmpeg can read and write images for each frame of a video sequence. The
|
||||
following image formats are supported:
|
||||
</p>
|
||||
<table>
|
||||
<tr><td width="40%">Name</td><td width="10%">Encoding</td><td width="10%">Decoding</td><td width="40%">Comments</td></tr>
|
||||
<tr><td width="40%">.Y.U.V</td><td width="10%">X</td><td width="10%">X</td><td width="40%">one raw file per component</td></tr>
|
||||
<tr><td width="40%">Alias PIX</td><td width="10%">X</td><td width="10%">X</td><td width="40%">Alias/Wavefront PIX image format</td></tr>
|
||||
<tr><td width="40%">animated GIF</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">BMP</td><td width="10%">X</td><td width="10%">X</td><td width="40%">Microsoft BMP image</td></tr>
|
||||
<tr><td width="40%">BRender PIX</td><td width="10%"></td><td width="10%">X</td><td width="40%">Argonaut BRender 3D engine image format.</td></tr>
|
||||
<tr><td width="40%">DPX</td><td width="10%">X</td><td width="10%">X</td><td width="40%">Digital Picture Exchange</td></tr>
|
||||
<tr><td width="40%">EXR</td><td width="10%"></td><td width="10%">X</td><td width="40%">OpenEXR</td></tr>
|
||||
<tr><td width="40%">JPEG</td><td width="10%">X</td><td width="10%">X</td><td width="40%">Progressive JPEG is not supported.</td></tr>
|
||||
<tr><td width="40%">JPEG 2000</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">JPEG-LS</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">LJPEG</td><td width="10%">X</td><td width="10%"></td><td width="40%">Lossless JPEG</td></tr>
|
||||
<tr><td width="40%">PAM</td><td width="10%">X</td><td width="10%">X</td><td width="40%">PAM is a PNM extension with alpha support.</td></tr>
|
||||
<tr><td width="40%">PBM</td><td width="10%">X</td><td width="10%">X</td><td width="40%">Portable BitMap image</td></tr>
|
||||
<tr><td width="40%">PCX</td><td width="10%">X</td><td width="10%">X</td><td width="40%">PC Paintbrush</td></tr>
|
||||
<tr><td width="40%">PGM</td><td width="10%">X</td><td width="10%">X</td><td width="40%">Portable GrayMap image</td></tr>
|
||||
<tr><td width="40%">PGMYUV</td><td width="10%">X</td><td width="10%">X</td><td width="40%">PGM with U and V components in YUV 4:2:0</td></tr>
|
||||
<tr><td width="40%">PIC</td><td width="10%"></td><td width="10%">X</td><td width="40%">Pictor/PC Paint</td></tr>
|
||||
<tr><td width="40%">PNG</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">PPM</td><td width="10%">X</td><td width="10%">X</td><td width="40%">Portable PixelMap image</td></tr>
|
||||
<tr><td width="40%">PTX</td><td width="10%"></td><td width="10%">X</td><td width="40%">V.Flash PTX format</td></tr>
|
||||
<tr><td width="40%">SGI</td><td width="10%">X</td><td width="10%">X</td><td width="40%">SGI RGB image format</td></tr>
|
||||
<tr><td width="40%">Sun Rasterfile</td><td width="10%">X</td><td width="10%">X</td><td width="40%">Sun RAS image format</td></tr>
|
||||
<tr><td width="40%">TIFF</td><td width="10%">X</td><td width="10%">X</td><td width="40%">YUV, JPEG and some extension is not supported yet.</td></tr>
|
||||
<tr><td width="40%">Truevision Targa</td><td width="10%">X</td><td width="10%">X</td><td width="40%">Targa (.TGA) image format</td></tr>
|
||||
<tr><td width="40%">WebP</td><td width="10%">E</td><td width="10%">X</td><td width="40%">WebP image format, encoding supported through external library libwebp</td></tr>
|
||||
<tr><td width="40%">XBM</td><td width="10%">X</td><td width="10%">X</td><td width="40%">X BitMap image format</td></tr>
|
||||
<tr><td width="40%">XFace</td><td width="10%">X</td><td width="10%">X</td><td width="40%">X-Face image format</td></tr>
|
||||
<tr><td width="40%">XWD</td><td width="10%">X</td><td width="10%">X</td><td width="40%">X Window Dump image format</td></tr>
|
||||
</table>
|
||||
|
||||
<p><code>X</code> means that encoding (resp. decoding) is supported.
|
||||
</p>
|
||||
<p><code>E</code> means that support is provided through an external library.
|
||||
</p>
|
||||
<a name="Video-Codecs"></a>
|
||||
<h3 class="section">2.3 Video Codecs<span class="pull-right"><a class="anchor hidden-xs" href="#Video-Codecs" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Video-Codecs" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<table>
|
||||
<tr><td width="40%">Name</td><td width="10%">Encoding</td><td width="10%">Decoding</td><td width="40%">Comments</td></tr>
|
||||
<tr><td width="40%">4X Movie</td><td width="10%"></td><td width="10%">X</td><td width="40%">Used in certain computer games.</td></tr>
|
||||
<tr><td width="40%">8088flex TMV</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">A64 multicolor</td><td width="10%">X</td><td width="10%"></td><td width="40%">Creates video suitable to be played on a commodore 64 (multicolor mode).</td></tr>
|
||||
<tr><td width="40%">Amazing Studio PAF Video</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">American Laser Games MM</td><td width="10%"></td><td width="10%">X</td><td width="40%">Used in games like Mad Dog McCree.</td></tr>
|
||||
<tr><td width="40%">AMV Video</td><td width="10%">X</td><td width="10%">X</td><td width="40%">Used in Chinese MP3 players.</td></tr>
|
||||
<tr><td width="40%">ANSI/ASCII art</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Apple Intermediate Codec</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Apple MJPEG-B</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Apple ProRes</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Apple QuickDraw</td><td width="10%"></td><td width="10%">X</td><td width="40%">fourcc: qdrw</td></tr>
|
||||
<tr><td width="40%">Asus v1</td><td width="10%">X</td><td width="10%">X</td><td width="40%">fourcc: ASV1</td></tr>
|
||||
<tr><td width="40%">Asus v2</td><td width="10%">X</td><td width="10%">X</td><td width="40%">fourcc: ASV2</td></tr>
|
||||
<tr><td width="40%">ATI VCR1</td><td width="10%"></td><td width="10%">X</td><td width="40%">fourcc: VCR1</td></tr>
|
||||
<tr><td width="40%">ATI VCR2</td><td width="10%"></td><td width="10%">X</td><td width="40%">fourcc: VCR2</td></tr>
|
||||
<tr><td width="40%">Auravision Aura</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Auravision Aura 2</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Autodesk Animator Flic video</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Autodesk RLE</td><td width="10%"></td><td width="10%">X</td><td width="40%">fourcc: AASC</td></tr>
|
||||
<tr><td width="40%">Avid 1:1 10-bit RGB Packer</td><td width="10%">X</td><td width="10%">X</td><td width="40%">fourcc: AVrp</td></tr>
|
||||
<tr><td width="40%">AVS (Audio Video Standard) video</td><td width="10%"></td><td width="10%">X</td><td width="40%">Video encoding used by the Creature Shock game.</td></tr>
|
||||
<tr><td width="40%">AYUV</td><td width="10%">X</td><td width="10%">X</td><td width="40%">Microsoft uncompressed packed 4:4:4:4</td></tr>
|
||||
<tr><td width="40%">Beam Software VB</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Bethesda VID video</td><td width="10%"></td><td width="10%">X</td><td width="40%">Used in some games from Bethesda Softworks.</td></tr>
|
||||
<tr><td width="40%">Bink Video</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Bitmap Brothers JV video</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">y41p Brooktree uncompressed 4:1:1 12-bit</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Brute Force & Ignorance</td><td width="10%"></td><td width="10%">X</td><td width="40%">Used in the game Flash Traffic: City of Angels.</td></tr>
|
||||
<tr><td width="40%">C93 video</td><td width="10%"></td><td width="10%">X</td><td width="40%">Codec used in Cyberia game.</td></tr>
|
||||
<tr><td width="40%">CamStudio</td><td width="10%"></td><td width="10%">X</td><td width="40%">fourcc: CSCD</td></tr>
|
||||
<tr><td width="40%">CD+G</td><td width="10%"></td><td width="10%">X</td><td width="40%">Video codec for CD+G karaoke disks</td></tr>
|
||||
<tr><td width="40%">CDXL</td><td width="10%"></td><td width="10%">X</td><td width="40%">Amiga CD video codec</td></tr>
|
||||
<tr><td width="40%">Chinese AVS video</td><td width="10%">E</td><td width="10%">X</td><td width="40%">AVS1-P2, JiZhun profile, encoding through external library libxavs</td></tr>
|
||||
<tr><td width="40%">Delphine Software International CIN video</td><td width="10%"></td><td width="10%">X</td><td width="40%">Codec used in Delphine Software International games.</td></tr>
|
||||
<tr><td width="40%">Discworld II BMV Video</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Canopus Lossless Codec</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Cinepak</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Cirrus Logic AccuPak</td><td width="10%">X</td><td width="10%">X</td><td width="40%">fourcc: CLJR</td></tr>
|
||||
<tr><td width="40%">CPiA Video Format</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Creative YUV (CYUV)</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">DFA</td><td width="10%"></td><td width="10%">X</td><td width="40%">Codec used in Chronomaster game.</td></tr>
|
||||
<tr><td width="40%">Dirac</td><td width="10%">E</td><td width="10%">X</td><td width="40%">supported through external library libschroedinger</td></tr>
|
||||
<tr><td width="40%">Deluxe Paint Animation</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">DNxHD</td><td width="10%">X</td><td width="10%">X</td><td width="40%">aka SMPTE VC3</td></tr>
|
||||
<tr><td width="40%">Duck TrueMotion 1.0</td><td width="10%"></td><td width="10%">X</td><td width="40%">fourcc: DUCK</td></tr>
|
||||
<tr><td width="40%">Duck TrueMotion 2.0</td><td width="10%"></td><td width="10%">X</td><td width="40%">fourcc: TM20</td></tr>
|
||||
<tr><td width="40%">DV (Digital Video)</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Dxtory capture format</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Feeble Files/ScummVM DXA</td><td width="10%"></td><td width="10%">X</td><td width="40%">Codec originally used in Feeble Files game.</td></tr>
|
||||
<tr><td width="40%">Electronic Arts CMV video</td><td width="10%"></td><td width="10%">X</td><td width="40%">Used in NHL 95 game.</td></tr>
|
||||
<tr><td width="40%">Electronic Arts Madcow video</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Electronic Arts TGV video</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Electronic Arts TGQ video</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Electronic Arts TQI video</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Escape 124</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Escape 130</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">FFmpeg video codec #1</td><td width="10%">X</td><td width="10%">X</td><td width="40%">lossless codec (fourcc: FFV1)</td></tr>
|
||||
<tr><td width="40%">Flash Screen Video v1</td><td width="10%">X</td><td width="10%">X</td><td width="40%">fourcc: FSV1</td></tr>
|
||||
<tr><td width="40%">Flash Screen Video v2</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Flash Video (FLV)</td><td width="10%">X</td><td width="10%">X</td><td width="40%">Sorenson H.263 used in Flash</td></tr>
|
||||
<tr><td width="40%">Forward Uncompressed</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Fraps</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Go2Webinar</td><td width="10%"></td><td width="10%">X</td><td width="40%">fourcc: G2M4</td></tr>
|
||||
<tr><td width="40%">H.261</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">H.263 / H.263-1996</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">H.263+ / H.263-1998 / H.263 version 2</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10</td><td width="10%">E</td><td width="10%">X</td><td width="40%">encoding supported through external library libx264 and OpenH264</td></tr>
|
||||
<tr><td width="40%">HEVC</td><td width="10%">X</td><td width="10%">X</td><td width="40%">encoding supported through the external library libx265</td></tr>
|
||||
<tr><td width="40%">HNM version 4</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">HuffYUV</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">HuffYUV FFmpeg variant</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">IBM Ultimotion</td><td width="10%"></td><td width="10%">X</td><td width="40%">fourcc: ULTI</td></tr>
|
||||
<tr><td width="40%">id Cinematic video</td><td width="10%"></td><td width="10%">X</td><td width="40%">Used in Quake II.</td></tr>
|
||||
<tr><td width="40%">id RoQ video</td><td width="10%">X</td><td width="10%">X</td><td width="40%">Used in Quake III, Jedi Knight 2, other computer games.</td></tr>
|
||||
<tr><td width="40%">IFF ILBM</td><td width="10%"></td><td width="10%">X</td><td width="40%">IFF interleaved bitmap</td></tr>
|
||||
<tr><td width="40%">IFF ByteRun1</td><td width="10%"></td><td width="10%">X</td><td width="40%">IFF run length encoded bitmap</td></tr>
|
||||
<tr><td width="40%">Intel H.263</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Intel Indeo 2</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Intel Indeo 3</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Intel Indeo 4</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Intel Indeo 5</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Interplay C93</td><td width="10%"></td><td width="10%">X</td><td width="40%">Used in the game Cyberia from Interplay.</td></tr>
|
||||
<tr><td width="40%">Interplay MVE video</td><td width="10%"></td><td width="10%">X</td><td width="40%">Used in Interplay .MVE files.</td></tr>
|
||||
<tr><td width="40%">J2K</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Karl Morton’s video codec</td><td width="10%"></td><td width="10%">X</td><td width="40%">Codec used in Worms games.</td></tr>
|
||||
<tr><td width="40%">Kega Game Video (KGV1)</td><td width="10%"></td><td width="10%">X</td><td width="40%">Kega emulator screen capture codec.</td></tr>
|
||||
<tr><td width="40%">Lagarith</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">LCL (LossLess Codec Library) MSZH</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">LCL (LossLess Codec Library) ZLIB</td><td width="10%">E</td><td width="10%">E</td></tr>
|
||||
<tr><td width="40%">LOCO</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">LucasArts SANM/Smush</td><td width="10%"></td><td width="10%">X</td><td width="40%">Used in LucasArts games / SMUSH animations.</td></tr>
|
||||
<tr><td width="40%">lossless MJPEG</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Microsoft ATC Screen</td><td width="10%"></td><td width="10%">X</td><td width="40%">Also known as Microsoft Screen 3.</td></tr>
|
||||
<tr><td width="40%">Microsoft Expression Encoder Screen</td><td width="10%"></td><td width="10%">X</td><td width="40%">Also known as Microsoft Titanium Screen 2.</td></tr>
|
||||
<tr><td width="40%">Microsoft RLE</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Microsoft Screen 1</td><td width="10%"></td><td width="10%">X</td><td width="40%">Also known as Windows Media Video V7 Screen.</td></tr>
|
||||
<tr><td width="40%">Microsoft Screen 2</td><td width="10%"></td><td width="10%">X</td><td width="40%">Also known as Windows Media Video V9 Screen.</td></tr>
|
||||
<tr><td width="40%">Microsoft Video 1</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Mimic</td><td width="10%"></td><td width="10%">X</td><td width="40%">Used in MSN Messenger Webcam streams.</td></tr>
|
||||
<tr><td width="40%">Miro VideoXL</td><td width="10%"></td><td width="10%">X</td><td width="40%">fourcc: VIXL</td></tr>
|
||||
<tr><td width="40%">MJPEG (Motion JPEG)</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Mobotix MxPEG video</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Motion Pixels video</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">MPEG-1 video</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">MPEG-2 video</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">MPEG-4 part 2</td><td width="10%">X</td><td width="10%">X</td><td width="40%">libxvidcore can be used alternatively for encoding.</td></tr>
|
||||
<tr><td width="40%">MPEG-4 part 2 Microsoft variant version 1</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">MPEG-4 part 2 Microsoft variant version 2</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">MPEG-4 part 2 Microsoft variant version 3</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Nintendo Gamecube THP video</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">NuppelVideo/RTjpeg</td><td width="10%"></td><td width="10%">X</td><td width="40%">Video encoding used in NuppelVideo files.</td></tr>
|
||||
<tr><td width="40%">On2 VP3</td><td width="10%"></td><td width="10%">X</td><td width="40%">still experimental</td></tr>
|
||||
<tr><td width="40%">On2 VP5</td><td width="10%"></td><td width="10%">X</td><td width="40%">fourcc: VP50</td></tr>
|
||||
<tr><td width="40%">On2 VP6</td><td width="10%"></td><td width="10%">X</td><td width="40%">fourcc: VP60,VP61,VP62</td></tr>
|
||||
<tr><td width="40%">On2 VP7</td><td width="10%"></td><td width="10%">X</td><td width="40%">fourcc: VP70,VP71</td></tr>
|
||||
<tr><td width="40%">VP8</td><td width="10%">E</td><td width="10%">X</td><td width="40%">fourcc: VP80, encoding supported through external library libvpx</td></tr>
|
||||
<tr><td width="40%">VP9</td><td width="10%">E</td><td width="10%">X</td><td width="40%">encoding supported through external library libvpx</td></tr>
|
||||
<tr><td width="40%">Pinnacle TARGA CineWave YUV16</td><td width="10%"></td><td width="10%">X</td><td width="40%">fourcc: Y216</td></tr>
|
||||
<tr><td width="40%">Prores</td><td width="10%"></td><td width="10%">X</td><td width="40%">fourcc: apch,apcn,apcs,apco</td></tr>
|
||||
<tr><td width="40%">Q-team QPEG</td><td width="10%"></td><td width="10%">X</td><td width="40%">fourccs: QPEG, Q1.0, Q1.1</td></tr>
|
||||
<tr><td width="40%">QuickTime 8BPS video</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">QuickTime Animation (RLE) video</td><td width="10%">X</td><td width="10%">X</td><td width="40%">fourcc: ’rle ’</td></tr>
|
||||
<tr><td width="40%">QuickTime Graphics (SMC)</td><td width="10%"></td><td width="10%">X</td><td width="40%">fourcc: ’smc ’</td></tr>
|
||||
<tr><td width="40%">QuickTime video (RPZA)</td><td width="10%"></td><td width="10%">X</td><td width="40%">fourcc: rpza</td></tr>
|
||||
<tr><td width="40%">R10K AJA Kona 10-bit RGB Codec</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">R210 Quicktime Uncompressed RGB 10-bit</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Raw Video</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">RealVideo 1.0</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">RealVideo 2.0</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">RealVideo 3.0</td><td width="10%"></td><td width="10%">X</td><td width="40%">still far from ideal</td></tr>
|
||||
<tr><td width="40%">RealVideo 4.0</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Renderware TXD (TeXture Dictionary)</td><td width="10%"></td><td width="10%">X</td><td width="40%">Texture dictionaries used by the Renderware Engine.</td></tr>
|
||||
<tr><td width="40%">RL2 video</td><td width="10%"></td><td width="10%">X</td><td width="40%">used in some games by Entertainment Software Partners</td></tr>
|
||||
<tr><td width="40%">Sierra VMD video</td><td width="10%"></td><td width="10%">X</td><td width="40%">Used in Sierra VMD files.</td></tr>
|
||||
<tr><td width="40%">Silicon Graphics Motion Video Compressor 1 (MVC1)</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Silicon Graphics Motion Video Compressor 2 (MVC2)</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Silicon Graphics RLE 8-bit video</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Smacker video</td><td width="10%"></td><td width="10%">X</td><td width="40%">Video encoding used in Smacker.</td></tr>
|
||||
<tr><td width="40%">SMPTE VC-1</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Snow</td><td width="10%">X</td><td width="10%">X</td><td width="40%">experimental wavelet codec (fourcc: SNOW)</td></tr>
|
||||
<tr><td width="40%">Sony PlayStation MDEC (Motion DECoder)</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Sorenson Vector Quantizer 1</td><td width="10%">X</td><td width="10%">X</td><td width="40%">fourcc: SVQ1</td></tr>
|
||||
<tr><td width="40%">Sorenson Vector Quantizer 3</td><td width="10%"></td><td width="10%">X</td><td width="40%">fourcc: SVQ3</td></tr>
|
||||
<tr><td width="40%">Sunplus JPEG (SP5X)</td><td width="10%"></td><td width="10%">X</td><td width="40%">fourcc: SP5X</td></tr>
|
||||
<tr><td width="40%">TechSmith Screen Capture Codec</td><td width="10%"></td><td width="10%">X</td><td width="40%">fourcc: TSCC</td></tr>
|
||||
<tr><td width="40%">TechSmith Screen Capture Codec 2</td><td width="10%"></td><td width="10%">X</td><td width="40%">fourcc: TSC2</td></tr>
|
||||
<tr><td width="40%">Theora</td><td width="10%">E</td><td width="10%">X</td><td width="40%">encoding supported through external library libtheora</td></tr>
|
||||
<tr><td width="40%">Tiertex Limited SEQ video</td><td width="10%"></td><td width="10%">X</td><td width="40%">Codec used in DOS CD-ROM FlashBack game.</td></tr>
|
||||
<tr><td width="40%">Ut Video</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">v210 QuickTime uncompressed 4:2:2 10-bit</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">v308 QuickTime uncompressed 4:4:4</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">v408 QuickTime uncompressed 4:4:4:4</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">v410 QuickTime uncompressed 4:4:4 10-bit</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">VBLE Lossless Codec</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">VMware Screen Codec / VMware Video</td><td width="10%"></td><td width="10%">X</td><td width="40%">Codec used in videos captured by VMware.</td></tr>
|
||||
<tr><td width="40%">Westwood Studios VQA (Vector Quantized Animation) video</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Windows Media Image</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Windows Media Video 7</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Windows Media Video 8</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Windows Media Video 9</td><td width="10%"></td><td width="10%">X</td><td width="40%">not completely working</td></tr>
|
||||
<tr><td width="40%">Wing Commander III / Xan</td><td width="10%"></td><td width="10%">X</td><td width="40%">Used in Wing Commander III .MVE files.</td></tr>
|
||||
<tr><td width="40%">Wing Commander IV / Xan</td><td width="10%"></td><td width="10%">X</td><td width="40%">Used in Wing Commander IV.</td></tr>
|
||||
<tr><td width="40%">Winnov WNV1</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">WMV7</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">YAMAHA SMAF</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Psygnosis YOP Video</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">yuv4</td><td width="10%">X</td><td width="10%">X</td><td width="40%">libquicktime uncompressed packed 4:2:0</td></tr>
|
||||
<tr><td width="40%">ZeroCodec Lossless Video</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">ZLIB</td><td width="10%">X</td><td width="10%">X</td><td width="40%">part of LCL, encoder experimental</td></tr>
|
||||
<tr><td width="40%">Zip Motion Blocks Video</td><td width="10%">X</td><td width="10%">X</td><td width="40%">Encoder works only in PAL8.</td></tr>
|
||||
</table>
|
||||
|
||||
<p><code>X</code> means that encoding (resp. decoding) is supported.
|
||||
</p>
|
||||
<p><code>E</code> means that support is provided through an external library.
|
||||
</p>
|
||||
<a name="Audio-Codecs"></a>
|
||||
<h3 class="section">2.4 Audio Codecs<span class="pull-right"><a class="anchor hidden-xs" href="#Audio-Codecs" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Audio-Codecs" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<table>
|
||||
<tr><td width="40%">Name</td><td width="10%">Encoding</td><td width="10%">Decoding</td><td width="40%">Comments</td></tr>
|
||||
<tr><td width="40%">8SVX exponential</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">8SVX fibonacci</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">AAC+</td><td width="10%">E</td><td width="10%">X</td><td width="40%">encoding supported through external library libaacplus</td></tr>
|
||||
<tr><td width="40%">AAC</td><td width="10%">E</td><td width="10%">X</td><td width="40%">encoding supported through external library libfaac and libvo-aacenc</td></tr>
|
||||
<tr><td width="40%">AC-3</td><td width="10%">IX</td><td width="10%">IX</td></tr>
|
||||
<tr><td width="40%">ADPCM 4X Movie</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">ADPCM CDROM XA</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">ADPCM Creative Technology</td><td width="10%"></td><td width="10%">X</td><td width="40%">16 -> 4, 8 -> 4, 8 -> 3, 8 -> 2</td></tr>
|
||||
<tr><td width="40%">ADPCM Electronic Arts</td><td width="10%"></td><td width="10%">X</td><td width="40%">Used in various EA titles.</td></tr>
|
||||
<tr><td width="40%">ADPCM Electronic Arts Maxis CDROM XS</td><td width="10%"></td><td width="10%">X</td><td width="40%">Used in Sim City 3000.</td></tr>
|
||||
<tr><td width="40%">ADPCM Electronic Arts R1</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">ADPCM Electronic Arts R2</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">ADPCM Electronic Arts R3</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">ADPCM Electronic Arts XAS</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">ADPCM G.722</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">ADPCM G.726</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">ADPCM IMA AMV</td><td width="10%"></td><td width="10%">X</td><td width="40%">Used in AMV files</td></tr>
|
||||
<tr><td width="40%">ADPCM IMA Electronic Arts EACS</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">ADPCM IMA Electronic Arts SEAD</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">ADPCM IMA Funcom</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">ADPCM IMA QuickTime</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">ADPCM IMA Loki SDL MJPEG</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">ADPCM IMA WAV</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">ADPCM IMA Westwood</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">ADPCM ISS IMA</td><td width="10%"></td><td width="10%">X</td><td width="40%">Used in FunCom games.</td></tr>
|
||||
<tr><td width="40%">ADPCM IMA Dialogic</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">ADPCM IMA Duck DK3</td><td width="10%"></td><td width="10%">X</td><td width="40%">Used in some Sega Saturn console games.</td></tr>
|
||||
<tr><td width="40%">ADPCM IMA Duck DK4</td><td width="10%"></td><td width="10%">X</td><td width="40%">Used in some Sega Saturn console games.</td></tr>
|
||||
<tr><td width="40%">ADPCM IMA Radical</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">ADPCM Microsoft</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">ADPCM MS IMA</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">ADPCM Nintendo Gamecube AFC</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">ADPCM Nintendo Gamecube DTK</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">ADPCM Nintendo Gamecube THP</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">ADPCM QT IMA</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">ADPCM SEGA CRI ADX</td><td width="10%">X</td><td width="10%">X</td><td width="40%">Used in Sega Dreamcast games.</td></tr>
|
||||
<tr><td width="40%">ADPCM Shockwave Flash</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">ADPCM Sound Blaster Pro 2-bit</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">ADPCM Sound Blaster Pro 2.6-bit</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">ADPCM Sound Blaster Pro 4-bit</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">ADPCM VIMA</td><td width="10%">Used in LucasArts SMUSH animations.</td></tr>
|
||||
<tr><td width="40%">ADPCM Westwood Studios IMA</td><td width="10%"></td><td width="10%">X</td><td width="40%">Used in Westwood Studios games like Command and Conquer.</td></tr>
|
||||
<tr><td width="40%">ADPCM Yamaha</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">AMR-NB</td><td width="10%">E</td><td width="10%">X</td><td width="40%">encoding supported through external library libopencore-amrnb</td></tr>
|
||||
<tr><td width="40%">AMR-WB</td><td width="10%">E</td><td width="10%">X</td><td width="40%">encoding supported through external library libvo-amrwbenc</td></tr>
|
||||
<tr><td width="40%">Amazing Studio PAF Audio</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Apple lossless audio</td><td width="10%">X</td><td width="10%">X</td><td width="40%">QuickTime fourcc ’alac’</td></tr>
|
||||
<tr><td width="40%">ATRAC1</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">ATRAC3</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">ATRAC3+</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Bink Audio</td><td width="10%"></td><td width="10%">X</td><td width="40%">Used in Bink and Smacker files in many games.</td></tr>
|
||||
<tr><td width="40%">CELT</td><td width="10%"></td><td width="10%">E</td><td width="40%">decoding supported through external library libcelt</td></tr>
|
||||
<tr><td width="40%">Delphine Software International CIN audio</td><td width="10%"></td><td width="10%">X</td><td width="40%">Codec used in Delphine Software International games.</td></tr>
|
||||
<tr><td width="40%">Discworld II BMV Audio</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">COOK</td><td width="10%"></td><td width="10%">X</td><td width="40%">All versions except 5.1 are supported.</td></tr>
|
||||
<tr><td width="40%">DCA (DTS Coherent Acoustics)</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">DPCM id RoQ</td><td width="10%">X</td><td width="10%">X</td><td width="40%">Used in Quake III, Jedi Knight 2 and other computer games.</td></tr>
|
||||
<tr><td width="40%">DPCM Interplay</td><td width="10%"></td><td width="10%">X</td><td width="40%">Used in various Interplay computer games.</td></tr>
|
||||
<tr><td width="40%">DPCM Sierra Online</td><td width="10%"></td><td width="10%">X</td><td width="40%">Used in Sierra Online game audio files.</td></tr>
|
||||
<tr><td width="40%">DPCM Sol</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">DPCM Xan</td><td width="10%"></td><td width="10%">X</td><td width="40%">Used in Origin’s Wing Commander IV AVI files.</td></tr>
|
||||
<tr><td width="40%">DSD (Direct Stream Digitial), least significant bit first</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">DSD (Direct Stream Digitial), most significant bit first</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">DSD (Direct Stream Digitial), least significant bit first, planar</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">DSD (Direct Stream Digitial), most significant bit first, planar</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">DSP Group TrueSpeech</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">DV audio</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Enhanced AC-3</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">EVRC (Enhanced Variable Rate Codec)</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">FLAC (Free Lossless Audio Codec)</td><td width="10%">X</td><td width="10%">IX</td></tr>
|
||||
<tr><td width="40%">G.723.1</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">G.729</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">GSM</td><td width="10%">E</td><td width="10%">X</td><td width="40%">encoding supported through external library libgsm</td></tr>
|
||||
<tr><td width="40%">GSM Microsoft variant</td><td width="10%">E</td><td width="10%">X</td><td width="40%">encoding supported through external library libgsm</td></tr>
|
||||
<tr><td width="40%">IAC (Indeo Audio Coder)</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">iLBC (Internet Low Bitrate Codec)</td><td width="10%">E</td><td width="10%">E</td><td width="40%">encoding and decoding supported through external library libilbc</td></tr>
|
||||
<tr><td width="40%">IMC (Intel Music Coder)</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">MACE (Macintosh Audio Compression/Expansion) 3:1</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">MACE (Macintosh Audio Compression/Expansion) 6:1</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">MLP (Meridian Lossless Packing)</td><td width="10%"></td><td width="10%">X</td><td width="40%">Used in DVD-Audio discs.</td></tr>
|
||||
<tr><td width="40%">Monkey’s Audio</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">MP1 (MPEG audio layer 1)</td><td width="10%"></td><td width="10%">IX</td></tr>
|
||||
<tr><td width="40%">MP2 (MPEG audio layer 2)</td><td width="10%">IX</td><td width="10%">IX</td><td width="40%">encoding supported also through external library TwoLAME</td></tr>
|
||||
<tr><td width="40%">MP3 (MPEG audio layer 3)</td><td width="10%">E</td><td width="10%">IX</td><td width="40%">encoding supported through external library LAME, ADU MP3 and MP3onMP4 also supported</td></tr>
|
||||
<tr><td width="40%">MPEG-4 Audio Lossless Coding (ALS)</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Musepack SV7</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Musepack SV8</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Nellymoser Asao</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">On2 AVC (Audio for Video Codec)</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Opus</td><td width="10%">E</td><td width="10%">E</td><td width="40%">supported through external library libopus</td></tr>
|
||||
<tr><td width="40%">PCM A-law</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">PCM mu-law</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">PCM signed 8-bit planar</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">PCM signed 16-bit big-endian planar</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">PCM signed 16-bit little-endian planar</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">PCM signed 24-bit little-endian planar</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">PCM signed 32-bit little-endian planar</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">PCM 32-bit floating point big-endian</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">PCM 32-bit floating point little-endian</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">PCM 64-bit floating point big-endian</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">PCM 64-bit floating point little-endian</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">PCM D-Cinema audio signed 24-bit</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">PCM signed 8-bit</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">PCM signed 16-bit big-endian</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">PCM signed 16-bit little-endian</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">PCM signed 24-bit big-endian</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">PCM signed 24-bit little-endian</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">PCM signed 32-bit big-endian</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">PCM signed 32-bit little-endian</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">PCM signed 16/20/24-bit big-endian in MPEG-TS</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">PCM unsigned 8-bit</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">PCM unsigned 16-bit big-endian</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">PCM unsigned 16-bit little-endian</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">PCM unsigned 24-bit big-endian</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">PCM unsigned 24-bit little-endian</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">PCM unsigned 32-bit big-endian</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">PCM unsigned 32-bit little-endian</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">PCM Zork</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">QCELP / PureVoice</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">QDesign Music Codec 2</td><td width="10%"></td><td width="10%">X</td><td width="40%">There are still some distortions.</td></tr>
|
||||
<tr><td width="40%">RealAudio 1.0 (14.4K)</td><td width="10%">X</td><td width="10%">X</td><td width="40%">Real 14400 bit/s codec</td></tr>
|
||||
<tr><td width="40%">RealAudio 2.0 (28.8K)</td><td width="10%"></td><td width="10%">X</td><td width="40%">Real 28800 bit/s codec</td></tr>
|
||||
<tr><td width="40%">RealAudio 3.0 (dnet)</td><td width="10%">IX</td><td width="10%">X</td><td width="40%">Real low bitrate AC-3 codec</td></tr>
|
||||
<tr><td width="40%">RealAudio Lossless</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">RealAudio SIPR / ACELP.NET</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Shorten</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Sierra VMD audio</td><td width="10%"></td><td width="10%">X</td><td width="40%">Used in Sierra VMD files.</td></tr>
|
||||
<tr><td width="40%">Smacker audio</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">SMPTE 302M AES3 audio</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Sonic</td><td width="10%">X</td><td width="10%">X</td><td width="40%">experimental codec</td></tr>
|
||||
<tr><td width="40%">Sonic lossless</td><td width="10%">X</td><td width="10%">X</td><td width="40%">experimental codec</td></tr>
|
||||
<tr><td width="40%">Speex</td><td width="10%">E</td><td width="10%">E</td><td width="40%">supported through external library libspeex</td></tr>
|
||||
<tr><td width="40%">TAK (Tom’s lossless Audio Kompressor)</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">True Audio (TTA)</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">TrueHD</td><td width="10%"></td><td width="10%">X</td><td width="40%">Used in HD-DVD and Blu-Ray discs.</td></tr>
|
||||
<tr><td width="40%">TwinVQ (VQF flavor)</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">VIMA</td><td width="10%"></td><td width="10%">X</td><td width="40%">Used in LucasArts SMUSH animations.</td></tr>
|
||||
<tr><td width="40%">Vorbis</td><td width="10%">E</td><td width="10%">X</td><td width="40%">A native but very primitive encoder exists.</td></tr>
|
||||
<tr><td width="40%">Voxware MetaSound</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">WavPack</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Westwood Audio (SND1)</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Windows Media Audio 1</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Windows Media Audio 2</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Windows Media Audio Lossless</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Windows Media Audio Pro</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Windows Media Audio Voice</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
</table>
|
||||
|
||||
<p><code>X</code> means that encoding (resp. decoding) is supported.
|
||||
</p>
|
||||
<p><code>E</code> means that support is provided through an external library.
|
||||
</p>
|
||||
<p><code>I</code> means that an integer-only version is available, too (ensures high
|
||||
performance on systems without hardware floating point support).
|
||||
</p>
|
||||
<a name="Subtitle-Formats"></a>
|
||||
<h3 class="section">2.5 Subtitle Formats<span class="pull-right"><a class="anchor hidden-xs" href="#Subtitle-Formats" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Subtitle-Formats" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<table>
|
||||
<tr><td width="40%">Name</td><td width="10%">Muxing</td><td width="10%">Demuxing</td><td width="10%">Encoding</td><td width="10%">Decoding</td></tr>
|
||||
<tr><td width="40%">3GPP Timed Text</td><td width="10%"></td><td width="10%"></td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">AQTitle</td><td width="10%"></td><td width="10%">X</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">DVB</td><td width="10%">X</td><td width="10%">X</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">DVB teletext</td><td width="10%"></td><td width="10%">X</td><td width="10%"></td><td width="10%">E</td></tr>
|
||||
<tr><td width="40%">DVD</td><td width="10%">X</td><td width="10%">X</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">JACOsub</td><td width="10%">X</td><td width="10%">X</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">MicroDVD</td><td width="10%">X</td><td width="10%">X</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">MPL2</td><td width="10%"></td><td width="10%">X</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">MPsub (MPlayer)</td><td width="10%"></td><td width="10%">X</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">PGS</td><td width="10%"></td><td width="10%"></td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">PJS (Phoenix)</td><td width="10%"></td><td width="10%">X</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">RealText</td><td width="10%"></td><td width="10%">X</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">SAMI</td><td width="10%"></td><td width="10%">X</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Spruce format (STL)</td><td width="10%"></td><td width="10%">X</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">SSA/ASS</td><td width="10%">X</td><td width="10%">X</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">SubRip (SRT)</td><td width="10%">X</td><td width="10%">X</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">SubViewer v1</td><td width="10%"></td><td width="10%">X</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">SubViewer</td><td width="10%"></td><td width="10%">X</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">TED Talks captions</td><td width="10%"></td><td width="10%">X</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">VobSub (IDX+SUB)</td><td width="10%"></td><td width="10%">X</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">VPlayer</td><td width="10%"></td><td width="10%">X</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">WebVTT</td><td width="10%">X</td><td width="10%">X</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">XSUB</td><td width="10%"></td><td width="10%"></td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
</table>
|
||||
|
||||
<p><code>X</code> means that the feature is supported.
|
||||
</p>
|
||||
<p><code>E</code> means that support is provided through an external library.
|
||||
</p>
|
||||
<a name="Network-Protocols"></a>
|
||||
<h3 class="section">2.6 Network Protocols<span class="pull-right"><a class="anchor hidden-xs" href="#Network-Protocols" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Network-Protocols" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<table>
|
||||
<tr><td width="40%">Name</td><td width="10%">Support</td></tr>
|
||||
<tr><td width="40%">file</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">FTP</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Gopher</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">HLS</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">HTTP</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">HTTPS</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Icecast</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">MMSH</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">MMST</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">pipe</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">RTMP</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">RTMPE</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">RTMPS</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">RTMPT</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">RTMPTE</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">RTMPTS</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">RTP</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">SAMBA</td><td width="10%">E</td></tr>
|
||||
<tr><td width="40%">SCTP</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">SFTP</td><td width="10%">E</td></tr>
|
||||
<tr><td width="40%">TCP</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">TLS</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">UDP</td><td width="10%">X</td></tr>
|
||||
</table>
|
||||
|
||||
<p><code>X</code> means that the protocol is supported.
|
||||
</p>
|
||||
<p><code>E</code> means that support is provided through an external library.
|
||||
</p>
|
||||
|
||||
<a name="Input_002fOutput-Devices"></a>
|
||||
<h3 class="section">2.7 Input/Output Devices<span class="pull-right"><a class="anchor hidden-xs" href="#Input_002fOutput-Devices" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Input_002fOutput-Devices" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<table>
|
||||
<tr><td width="40%">Name</td><td width="10%">Input</td><td width="10%">Output</td></tr>
|
||||
<tr><td width="40%">ALSA</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">BKTR</td><td width="10%">X</td><td width="10%"></td></tr>
|
||||
<tr><td width="40%">caca</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">DV1394</td><td width="10%">X</td><td width="10%"></td></tr>
|
||||
<tr><td width="40%">Lavfi virtual device</td><td width="10%">X</td><td width="10%"></td></tr>
|
||||
<tr><td width="40%">Linux framebuffer</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">JACK</td><td width="10%">X</td><td width="10%"></td></tr>
|
||||
<tr><td width="40%">LIBCDIO</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">LIBDC1394</td><td width="10%">X</td><td width="10%"></td></tr>
|
||||
<tr><td width="40%">OpenAL</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">OpenGL</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">OSS</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">PulseAudio</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">SDL</td><td width="10%"></td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">Video4Linux2</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">VfW capture</td><td width="10%">X</td><td width="10%"></td></tr>
|
||||
<tr><td width="40%">X11 grabbing</td><td width="10%">X</td><td width="10%"></td></tr>
|
||||
<tr><td width="40%">Win32 grabbing</td><td width="10%">X</td><td width="10%"></td></tr>
|
||||
</table>
|
||||
|
||||
<p><code>X</code> means that input/output is supported.
|
||||
</p>
|
||||
<a name="Timecode"></a>
|
||||
<h3 class="section">2.8 Timecode<span class="pull-right"><a class="anchor hidden-xs" href="#Timecode" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Timecode" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<table>
|
||||
<tr><td width="40%">Codec/format</td><td width="10%">Read</td><td width="10%">Write</td></tr>
|
||||
<tr><td width="40%">AVI</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">DV</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">GXF</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">MOV</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">MPEG1/2</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
<tr><td width="40%">MXF</td><td width="10%">X</td><td width="10%">X</td></tr>
|
||||
</table>
|
||||
|
||||
|
||||
<p style="font-size: small;">
|
||||
This document was generated on <em>January 14, 2015</em> using <a href="http://www.gnu.org/software/texinfo/"><em>makeinfo</em></a>.
|
||||
</p>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
493
Externals/ffmpeg/dev/doc/git-howto.html
vendored
493
Externals/ffmpeg/dev/doc/git-howto.html
vendored
@ -1,493 +0,0 @@
|
||||
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
|
||||
<html>
|
||||
<!-- Created by GNU Texinfo 5.2, http://www.gnu.org/software/texinfo/ -->
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<title>
|
||||
Using git to develop FFmpeg
|
||||
</title>
|
||||
<meta name="viewport" content="width=device-width,initial-scale=1.0">
|
||||
<link rel="stylesheet" type="text/css" href="bootstrap.min.css">
|
||||
<link rel="stylesheet" type="text/css" href="style.min.css">
|
||||
</head>
|
||||
<body>
|
||||
<div style="width: 95%; margin: auto">
|
||||
<h1>
|
||||
Using git to develop FFmpeg
|
||||
</h1>
|
||||
<div align="center">
|
||||
</div>
|
||||
|
||||
|
||||
<a name="SEC_Top"></a>
|
||||
|
||||
<a name="SEC_Contents"></a>
|
||||
<h2 class="contents-heading">Table of Contents</h2>
|
||||
|
||||
<div class="contents">
|
||||
|
||||
<ul class="no-bullet">
|
||||
<li><a name="toc-Introduction" href="#Introduction">1 Introduction</a></li>
|
||||
<li><a name="toc-Basics-Usage" href="#Basics-Usage">2 Basics Usage</a>
|
||||
<ul class="no-bullet">
|
||||
<li><a name="toc-Get-GIT" href="#Get-GIT">2.1 Get GIT</a></li>
|
||||
<li><a name="toc-Cloning-the-source-tree" href="#Cloning-the-source-tree">2.2 Cloning the source tree</a></li>
|
||||
<li><a name="toc-Updating-the-source-tree-to-the-latest-revision" href="#Updating-the-source-tree-to-the-latest-revision">2.3 Updating the source tree to the latest revision</a></li>
|
||||
<li><a name="toc-Rebasing-your-local-branches" href="#Rebasing-your-local-branches">2.4 Rebasing your local branches</a></li>
|
||||
<li><a name="toc-Adding_002fremoving-files_002fdirectories" href="#Adding_002fremoving-files_002fdirectories">2.5 Adding/removing files/directories</a></li>
|
||||
<li><a name="toc-Showing-modifications" href="#Showing-modifications">2.6 Showing modifications</a></li>
|
||||
<li><a name="toc-Inspecting-the-changelog" href="#Inspecting-the-changelog">2.7 Inspecting the changelog</a></li>
|
||||
<li><a name="toc-Checking-source-tree-status" href="#Checking-source-tree-status">2.8 Checking source tree status</a></li>
|
||||
<li><a name="toc-Committing" href="#Committing">2.9 Committing</a></li>
|
||||
<li><a name="toc-Preparing-a-patchset" href="#Preparing-a-patchset">2.10 Preparing a patchset</a></li>
|
||||
<li><a name="toc-Sending-patches-for-review" href="#Sending-patches-for-review">2.11 Sending patches for review</a></li>
|
||||
<li><a name="toc-Renaming_002fmoving_002fcopying-files-or-contents-of-files" href="#Renaming_002fmoving_002fcopying-files-or-contents-of-files">2.12 Renaming/moving/copying files or contents of files</a></li>
|
||||
</ul></li>
|
||||
<li><a name="toc-Git-configuration" href="#Git-configuration">3 Git configuration</a>
|
||||
<ul class="no-bullet">
|
||||
<li><a name="toc-Personal-Git-installation" href="#Personal-Git-installation">3.1 Personal Git installation</a></li>
|
||||
<li><a name="toc-Repository-configuration" href="#Repository-configuration">3.2 Repository configuration</a></li>
|
||||
</ul></li>
|
||||
<li><a name="toc-FFmpeg-specific" href="#FFmpeg-specific">4 FFmpeg specific</a>
|
||||
<ul class="no-bullet">
|
||||
<li><a name="toc-Reverting-broken-commits" href="#Reverting-broken-commits">4.1 Reverting broken commits</a></li>
|
||||
<li><a name="toc-Pushing-changes-to-remote-trees" href="#Pushing-changes-to-remote-trees">4.2 Pushing changes to remote trees</a></li>
|
||||
<li><a name="toc-Finding-a-specific-svn-revision" href="#Finding-a-specific-svn-revision">4.3 Finding a specific svn revision</a></li>
|
||||
</ul></li>
|
||||
<li><a name="toc-pre_002dpush-checklist" href="#pre_002dpush-checklist">5 pre-push checklist</a></li>
|
||||
<li><a name="toc-Server-Issues" href="#Server-Issues">6 Server Issues</a></li>
|
||||
</ul>
|
||||
</div>
|
||||
|
||||
|
||||
<a name="Introduction"></a>
|
||||
<h2 class="chapter">1 Introduction<span class="pull-right"><a class="anchor hidden-xs" href="#Introduction" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Introduction" aria-hidden="true">TOC</a></span></h2>
|
||||
|
||||
<p>This document aims in giving some quick references on a set of useful git
|
||||
commands. You should always use the extensive and detailed documentation
|
||||
provided directly by git:
|
||||
</p>
|
||||
<div class="example">
|
||||
<pre class="example">git --help
|
||||
man git
|
||||
</pre></div>
|
||||
|
||||
<p>shows you the available subcommands,
|
||||
</p>
|
||||
<div class="example">
|
||||
<pre class="example">git <command> --help
|
||||
man git-<command>
|
||||
</pre></div>
|
||||
|
||||
<p>shows information about the subcommand <command>.
|
||||
</p>
|
||||
<p>Additional information could be found on the
|
||||
<a href="http://gitref.org">Git Reference</a> website
|
||||
</p>
|
||||
<p>For more information about the Git project, visit the
|
||||
</p>
|
||||
<p><a href="http://git-scm.com/">Git website</a>
|
||||
</p>
|
||||
<p>Consult these resources whenever you have problems, they are quite exhaustive.
|
||||
</p>
|
||||
<p>What follows now is a basic introduction to Git and some FFmpeg-specific
|
||||
guidelines to ease the contribution to the project
|
||||
</p>
|
||||
<a name="Basics-Usage"></a>
|
||||
<h2 class="chapter">2 Basics Usage<span class="pull-right"><a class="anchor hidden-xs" href="#Basics-Usage" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Basics-Usage" aria-hidden="true">TOC</a></span></h2>
|
||||
|
||||
<a name="Get-GIT"></a>
|
||||
<h3 class="section">2.1 Get GIT<span class="pull-right"><a class="anchor hidden-xs" href="#Get-GIT" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Get-GIT" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>You can get git from <a href="http://git-scm.com/">http://git-scm.com/</a>
|
||||
Most distribution and operating system provide a package for it.
|
||||
</p>
|
||||
|
||||
<a name="Cloning-the-source-tree"></a>
|
||||
<h3 class="section">2.2 Cloning the source tree<span class="pull-right"><a class="anchor hidden-xs" href="#Cloning-the-source-tree" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Cloning-the-source-tree" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<div class="example">
|
||||
<pre class="example">git clone git://source.ffmpeg.org/ffmpeg <target>
|
||||
</pre></div>
|
||||
|
||||
<p>This will put the FFmpeg sources into the directory <var><target></var>.
|
||||
</p>
|
||||
<div class="example">
|
||||
<pre class="example">git clone git@source.ffmpeg.org:ffmpeg <target>
|
||||
</pre></div>
|
||||
|
||||
<p>This will put the FFmpeg sources into the directory <var><target></var> and let
|
||||
you push back your changes to the remote repository.
|
||||
</p>
|
||||
<p>Make sure that you do not have Windows line endings in your checkouts,
|
||||
otherwise you may experience spurious compilation failures. One way to
|
||||
achieve this is to run
|
||||
</p>
|
||||
<div class="example">
|
||||
<pre class="example">git config --global core.autocrlf false
|
||||
</pre></div>
|
||||
|
||||
|
||||
<a name="Updating-the-source-tree-to-the-latest-revision"></a>
|
||||
<h3 class="section">2.3 Updating the source tree to the latest revision<span class="pull-right"><a class="anchor hidden-xs" href="#Updating-the-source-tree-to-the-latest-revision" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Updating-the-source-tree-to-the-latest-revision" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<div class="example">
|
||||
<pre class="example">git pull (--rebase)
|
||||
</pre></div>
|
||||
|
||||
<p>pulls in the latest changes from the tracked branch. The tracked branch
|
||||
can be remote. By default the master branch tracks the branch master in
|
||||
the remote origin.
|
||||
</p>
|
||||
<div class="warning">
|
||||
<p><code>--rebase</code> (see below) is recommended.
|
||||
</p></div>
|
||||
<a name="Rebasing-your-local-branches"></a>
|
||||
<h3 class="section">2.4 Rebasing your local branches<span class="pull-right"><a class="anchor hidden-xs" href="#Rebasing-your-local-branches" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Rebasing-your-local-branches" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<div class="example">
|
||||
<pre class="example">git pull --rebase
|
||||
</pre></div>
|
||||
|
||||
<p>fetches the changes from the main repository and replays your local commits
|
||||
over it. This is required to keep all your local changes at the top of
|
||||
FFmpeg’s master tree. The master tree will reject pushes with merge commits.
|
||||
</p>
|
||||
|
||||
<a name="Adding_002fremoving-files_002fdirectories"></a>
|
||||
<h3 class="section">2.5 Adding/removing files/directories<span class="pull-right"><a class="anchor hidden-xs" href="#Adding_002fremoving-files_002fdirectories" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Adding_002fremoving-files_002fdirectories" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<div class="example">
|
||||
<pre class="example">git add [-A] <filename/dirname>
|
||||
git rm [-r] <filename/dirname>
|
||||
</pre></div>
|
||||
|
||||
<p>GIT needs to get notified of all changes you make to your working
|
||||
directory that makes files appear or disappear.
|
||||
Line moves across files are automatically tracked.
|
||||
</p>
|
||||
|
||||
<a name="Showing-modifications"></a>
|
||||
<h3 class="section">2.6 Showing modifications<span class="pull-right"><a class="anchor hidden-xs" href="#Showing-modifications" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Showing-modifications" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<div class="example">
|
||||
<pre class="example">git diff <filename(s)>
|
||||
</pre></div>
|
||||
|
||||
<p>will show all local modifications in your working directory as unified diff.
|
||||
</p>
|
||||
|
||||
<a name="Inspecting-the-changelog"></a>
|
||||
<h3 class="section">2.7 Inspecting the changelog<span class="pull-right"><a class="anchor hidden-xs" href="#Inspecting-the-changelog" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Inspecting-the-changelog" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<div class="example">
|
||||
<pre class="example">git log <filename(s)>
|
||||
</pre></div>
|
||||
|
||||
<p>You may also use the graphical tools like gitview or gitk or the web
|
||||
interface available at http://source.ffmpeg.org/
|
||||
</p>
|
||||
<a name="Checking-source-tree-status"></a>
|
||||
<h3 class="section">2.8 Checking source tree status<span class="pull-right"><a class="anchor hidden-xs" href="#Checking-source-tree-status" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Checking-source-tree-status" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<div class="example">
|
||||
<pre class="example">git status
|
||||
</pre></div>
|
||||
|
||||
<p>detects all the changes you made and lists what actions will be taken in case
|
||||
of a commit (additions, modifications, deletions, etc.).
|
||||
</p>
|
||||
|
||||
<a name="Committing"></a>
|
||||
<h3 class="section">2.9 Committing<span class="pull-right"><a class="anchor hidden-xs" href="#Committing" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Committing" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<div class="example">
|
||||
<pre class="example">git diff --check
|
||||
</pre></div>
|
||||
|
||||
<p>to double check your changes before committing them to avoid trouble later
|
||||
on. All experienced developers do this on each and every commit, no matter
|
||||
how small.
|
||||
Every one of them has been saved from looking like a fool by this many times.
|
||||
It’s very easy for stray debug output or cosmetic modifications to slip in,
|
||||
please avoid problems through this extra level of scrutiny.
|
||||
</p>
|
||||
<p>For cosmetics-only commits you should get (almost) empty output from
|
||||
</p>
|
||||
<div class="example">
|
||||
<pre class="example">git diff -w -b <filename(s)>
|
||||
</pre></div>
|
||||
|
||||
<p>Also check the output of
|
||||
</p>
|
||||
<div class="example">
|
||||
<pre class="example">git status
|
||||
</pre></div>
|
||||
|
||||
<p>to make sure you don’t have untracked files or deletions.
|
||||
</p>
|
||||
<div class="example">
|
||||
<pre class="example">git add [-i|-p|-A] <filenames/dirnames>
|
||||
</pre></div>
|
||||
|
||||
<p>Make sure you have told git your name and email address
|
||||
</p>
|
||||
<div class="example">
|
||||
<pre class="example">git config --global user.name "My Name"
|
||||
git config --global user.email my@email.invalid
|
||||
</pre></div>
|
||||
|
||||
<p>Use <var>–global</var> to set the global configuration for all your git checkouts.
|
||||
</p>
|
||||
<p>Git will select the changes to the files for commit. Optionally you can use
|
||||
the interactive or the patch mode to select hunk by hunk what should be
|
||||
added to the commit.
|
||||
</p>
|
||||
|
||||
<div class="example">
|
||||
<pre class="example">git commit
|
||||
</pre></div>
|
||||
|
||||
<p>Git will commit the selected changes to your current local branch.
|
||||
</p>
|
||||
<p>You will be prompted for a log message in an editor, which is either
|
||||
set in your personal configuration file through
|
||||
</p>
|
||||
<div class="example">
|
||||
<pre class="example">git config --global core.editor
|
||||
</pre></div>
|
||||
|
||||
<p>or set by one of the following environment variables:
|
||||
<var>GIT_EDITOR</var>, <var>VISUAL</var> or <var>EDITOR</var>.
|
||||
</p>
|
||||
<p>Log messages should be concise but descriptive. Explain why you made a change,
|
||||
what you did will be obvious from the changes themselves most of the time.
|
||||
Saying just "bug fix" or "10l" is bad. Remember that people of varying skill
|
||||
levels look at and educate themselves while reading through your code. Don’t
|
||||
include filenames in log messages, Git provides that information.
|
||||
</p>
|
||||
<p>Possibly make the commit message have a terse, descriptive first line, an
|
||||
empty line and then a full description. The first line will be used to name
|
||||
the patch by git format-patch.
|
||||
</p>
|
||||
<a name="Preparing-a-patchset"></a>
|
||||
<h3 class="section">2.10 Preparing a patchset<span class="pull-right"><a class="anchor hidden-xs" href="#Preparing-a-patchset" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Preparing-a-patchset" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<div class="example">
|
||||
<pre class="example">git format-patch <commit> [-o directory]
|
||||
</pre></div>
|
||||
|
||||
<p>will generate a set of patches for each commit between <var><commit></var> and
|
||||
current <var>HEAD</var>. E.g.
|
||||
</p>
|
||||
<div class="example">
|
||||
<pre class="example">git format-patch origin/master
|
||||
</pre></div>
|
||||
|
||||
<p>will generate patches for all commits on current branch which are not
|
||||
present in upstream.
|
||||
A useful shortcut is also
|
||||
</p>
|
||||
<div class="example">
|
||||
<pre class="example">git format-patch -n
|
||||
</pre></div>
|
||||
|
||||
<p>which will generate patches from last <var>n</var> commits.
|
||||
By default the patches are created in the current directory.
|
||||
</p>
|
||||
<a name="Sending-patches-for-review"></a>
|
||||
<h3 class="section">2.11 Sending patches for review<span class="pull-right"><a class="anchor hidden-xs" href="#Sending-patches-for-review" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Sending-patches-for-review" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<div class="example">
|
||||
<pre class="example">git send-email <commit list|directory>
|
||||
</pre></div>
|
||||
|
||||
<p>will send the patches created by <code>git format-patch</code> or directly
|
||||
generates them. All the email fields can be configured in the global/local
|
||||
configuration or overridden by command line.
|
||||
Note that this tool must often be installed separately (e.g. <var>git-email</var>
|
||||
package on Debian-based distros).
|
||||
</p>
|
||||
|
||||
<a name="Renaming_002fmoving_002fcopying-files-or-contents-of-files"></a>
|
||||
<h3 class="section">2.12 Renaming/moving/copying files or contents of files<span class="pull-right"><a class="anchor hidden-xs" href="#Renaming_002fmoving_002fcopying-files-or-contents-of-files" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Renaming_002fmoving_002fcopying-files-or-contents-of-files" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>Git automatically tracks such changes, making those normal commits.
|
||||
</p>
|
||||
<div class="example">
|
||||
<pre class="example">mv/cp path/file otherpath/otherfile
|
||||
git add [-A] .
|
||||
git commit
|
||||
</pre></div>
|
||||
|
||||
|
||||
<a name="Git-configuration"></a>
|
||||
<h2 class="chapter">3 Git configuration<span class="pull-right"><a class="anchor hidden-xs" href="#Git-configuration" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Git-configuration" aria-hidden="true">TOC</a></span></h2>
|
||||
|
||||
<p>In order to simplify a few workflows, it is advisable to configure both
|
||||
your personal Git installation and your local FFmpeg repository.
|
||||
</p>
|
||||
<a name="Personal-Git-installation"></a>
|
||||
<h3 class="section">3.1 Personal Git installation<span class="pull-right"><a class="anchor hidden-xs" href="#Personal-Git-installation" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Personal-Git-installation" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>Add the following to your <samp>~/.gitconfig</samp> to help <code>git send-email</code>
|
||||
and <code>git format-patch</code> detect renames:
|
||||
</p>
|
||||
<div class="example">
|
||||
<pre class="example">[diff]
|
||||
renames = copy
|
||||
</pre></div>
|
||||
|
||||
<a name="Repository-configuration"></a>
|
||||
<h3 class="section">3.2 Repository configuration<span class="pull-right"><a class="anchor hidden-xs" href="#Repository-configuration" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Repository-configuration" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>In order to have <code>git send-email</code> automatically send patches
|
||||
to the ffmpeg-devel mailing list, add the following stanza
|
||||
to <samp>/path/to/ffmpeg/repository/.git/config</samp>:
|
||||
</p>
|
||||
<div class="example">
|
||||
<pre class="example">[sendemail]
|
||||
to = ffmpeg-devel@ffmpeg.org
|
||||
</pre></div>
|
||||
|
||||
<a name="FFmpeg-specific"></a>
|
||||
<h2 class="chapter">4 FFmpeg specific<span class="pull-right"><a class="anchor hidden-xs" href="#FFmpeg-specific" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-FFmpeg-specific" aria-hidden="true">TOC</a></span></h2>
|
||||
|
||||
<a name="Reverting-broken-commits"></a>
|
||||
<h3 class="section">4.1 Reverting broken commits<span class="pull-right"><a class="anchor hidden-xs" href="#Reverting-broken-commits" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Reverting-broken-commits" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<div class="example">
|
||||
<pre class="example">git reset <commit>
|
||||
</pre></div>
|
||||
|
||||
<p><code>git reset</code> will uncommit the changes till <var><commit></var> rewriting
|
||||
the current branch history.
|
||||
</p>
|
||||
<div class="example">
|
||||
<pre class="example">git commit --amend
|
||||
</pre></div>
|
||||
|
||||
<p>allows one to amend the last commit details quickly.
|
||||
</p>
|
||||
<div class="example">
|
||||
<pre class="example">git rebase -i origin/master
|
||||
</pre></div>
|
||||
|
||||
<p>will replay local commits over the main repository allowing to edit, merge
|
||||
or remove some of them in the process.
|
||||
</p>
|
||||
<div class="info">
|
||||
<p><code>git reset</code>, <code>git commit --amend</code> and <code>git rebase</code>
|
||||
rewrite history, so you should use them ONLY on your local or topic branches.
|
||||
The main repository will reject those changes.
|
||||
</p></div>
|
||||
<div class="example">
|
||||
<pre class="example">git revert <commit>
|
||||
</pre></div>
|
||||
|
||||
<p><code>git revert</code> will generate a revert commit. This will not make the
|
||||
faulty commit disappear from the history.
|
||||
</p>
|
||||
<a name="Pushing-changes-to-remote-trees"></a>
|
||||
<h3 class="section">4.2 Pushing changes to remote trees<span class="pull-right"><a class="anchor hidden-xs" href="#Pushing-changes-to-remote-trees" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Pushing-changes-to-remote-trees" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<div class="example">
|
||||
<pre class="example">git push
|
||||
</pre></div>
|
||||
|
||||
<p>Will push the changes to the default remote (<var>origin</var>).
|
||||
Git will prevent you from pushing changes if the local and remote trees are
|
||||
out of sync. Refer to and to sync the local tree.
|
||||
</p>
|
||||
<div class="example">
|
||||
<pre class="example">git remote add <name> <url>
|
||||
</pre></div>
|
||||
|
||||
<p>Will add additional remote with a name reference, it is useful if you want
|
||||
to push your local branch for review on a remote host.
|
||||
</p>
|
||||
<div class="example">
|
||||
<pre class="example">git push <remote> <refspec>
|
||||
</pre></div>
|
||||
|
||||
<p>Will push the changes to the <var><remote></var> repository.
|
||||
Omitting <var><refspec></var> makes <code>git push</code> update all the remote
|
||||
branches matching the local ones.
|
||||
</p>
|
||||
<a name="Finding-a-specific-svn-revision"></a>
|
||||
<h3 class="section">4.3 Finding a specific svn revision<span class="pull-right"><a class="anchor hidden-xs" href="#Finding-a-specific-svn-revision" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Finding-a-specific-svn-revision" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>Since version 1.7.1 git supports <var>:/foo</var> syntax for specifying commits
|
||||
based on a regular expression. see man gitrevisions
|
||||
</p>
|
||||
<div class="example">
|
||||
<pre class="example">git show :/'as revision 23456'
|
||||
</pre></div>
|
||||
|
||||
<p>will show the svn changeset <var>r23456</var>. With older git versions searching in
|
||||
the <code>git log</code> output is the easiest option (especially if a pager with
|
||||
search capabilities is used).
|
||||
This commit can be checked out with
|
||||
</p>
|
||||
<div class="example">
|
||||
<pre class="example">git checkout -b svn_23456 :/'as revision 23456'
|
||||
</pre></div>
|
||||
|
||||
<p>or for git < 1.7.1 with
|
||||
</p>
|
||||
<div class="example">
|
||||
<pre class="example">git checkout -b svn_23456 $SHA1
|
||||
</pre></div>
|
||||
|
||||
<p>where <var>$SHA1</var> is the commit hash from the <code>git log</code> output.
|
||||
</p>
|
||||
|
||||
<a name="pre_002dpush-checklist"></a>
|
||||
<h2 class="chapter">5 pre-push checklist<span class="pull-right"><a class="anchor hidden-xs" href="#pre_002dpush-checklist" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-pre_002dpush-checklist" aria-hidden="true">TOC</a></span></h2>
|
||||
|
||||
<p>Once you have a set of commits that you feel are ready for pushing,
|
||||
work through the following checklist to doublecheck everything is in
|
||||
proper order. This list tries to be exhaustive. In case you are just
|
||||
pushing a typo in a comment, some of the steps may be unnecessary.
|
||||
Apply your common sense, but if in doubt, err on the side of caution.
|
||||
</p>
|
||||
<p>First, make sure that the commits and branches you are going to push
|
||||
match what you want pushed and that nothing is missing, extraneous or
|
||||
wrong. You can see what will be pushed by running the git push command
|
||||
with –dry-run first. And then inspecting the commits listed with
|
||||
<code>git log -p 1234567..987654</code>. The <code>git status</code> command
|
||||
may help in finding local changes that have been forgotten to be added.
|
||||
</p>
|
||||
<p>Next let the code pass through a full run of our testsuite.
|
||||
</p>
|
||||
<ul>
|
||||
<li> <code>make distclean</code>
|
||||
</li><li> <code>/path/to/ffmpeg/configure</code>
|
||||
</li><li> <code>make check</code>
|
||||
</li><li> if fate fails due to missing samples run <code>make fate-rsync</code> and retry
|
||||
</li></ul>
|
||||
|
||||
<p>Make sure all your changes have been checked before pushing them, the
|
||||
testsuite only checks against regressions and that only to some extend. It does
|
||||
obviously not check newly added features/code to be working unless you have
|
||||
added a test for that (which is recommended).
|
||||
</p>
|
||||
<p>Also note that every single commit should pass the test suite, not just
|
||||
the result of a series of patches.
|
||||
</p>
|
||||
<p>Once everything passed, push the changes to your public ffmpeg clone and post a
|
||||
merge request to ffmpeg-devel. You can also push them directly but this is not
|
||||
recommended.
|
||||
</p>
|
||||
<a name="Server-Issues"></a>
|
||||
<h2 class="chapter">6 Server Issues<span class="pull-right"><a class="anchor hidden-xs" href="#Server-Issues" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Server-Issues" aria-hidden="true">TOC</a></span></h2>
|
||||
|
||||
<p>Contact the project admins <a href="mailto:root@ffmpeg.org">root@ffmpeg.org</a> if you have technical
|
||||
problems with the GIT server.
|
||||
</p>
|
||||
<p style="font-size: small;">
|
||||
This document was generated on <em>January 14, 2015</em> using <a href="http://www.gnu.org/software/texinfo/"><em>makeinfo</em></a>.
|
||||
</p>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
76
Externals/ffmpeg/dev/doc/libavcodec.html
vendored
76
Externals/ffmpeg/dev/doc/libavcodec.html
vendored
@ -1,76 +0,0 @@
|
||||
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
|
||||
<html>
|
||||
<!-- Created by GNU Texinfo 5.2, http://www.gnu.org/software/texinfo/ -->
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<title>
|
||||
Libavcodec Documentation
|
||||
</title>
|
||||
<meta name="viewport" content="width=device-width,initial-scale=1.0">
|
||||
<link rel="stylesheet" type="text/css" href="bootstrap.min.css">
|
||||
<link rel="stylesheet" type="text/css" href="style.min.css">
|
||||
</head>
|
||||
<body>
|
||||
<div style="width: 95%; margin: auto">
|
||||
<h1>
|
||||
Libavcodec Documentation
|
||||
</h1>
|
||||
<div align="center">
|
||||
</div>
|
||||
|
||||
<a name="SEC_Top"></a>
|
||||
|
||||
<a name="SEC_Contents"></a>
|
||||
<h2 class="contents-heading">Table of Contents</h2>
|
||||
|
||||
<div class="contents">
|
||||
|
||||
<ul class="no-bullet">
|
||||
<li><a name="toc-Description" href="#Description">1 Description</a></li>
|
||||
<li><a name="toc-See-Also" href="#See-Also">2 See Also</a></li>
|
||||
<li><a name="toc-Authors" href="#Authors">3 Authors</a></li>
|
||||
</ul>
|
||||
</div>
|
||||
|
||||
|
||||
<a name="Description"></a>
|
||||
<h2 class="chapter">1 Description<span class="pull-right"><a class="anchor hidden-xs" href="#Description" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Description" aria-hidden="true">TOC</a></span></h2>
|
||||
|
||||
<p>The libavcodec library provides a generic encoding/decoding framework
|
||||
and contains multiple decoders and encoders for audio, video and
|
||||
subtitle streams, and several bitstream filters.
|
||||
</p>
|
||||
<p>The shared architecture provides various services ranging from bit
|
||||
stream I/O to DSP optimizations, and makes it suitable for
|
||||
implementing robust and fast codecs as well as for experimentation.
|
||||
</p>
|
||||
|
||||
<a name="See-Also"></a>
|
||||
<h2 class="chapter">2 See Also<span class="pull-right"><a class="anchor hidden-xs" href="#See-Also" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-See-Also" aria-hidden="true">TOC</a></span></h2>
|
||||
|
||||
<p><a href="ffmpeg.html">ffmpeg</a>, <a href="ffplay.html">ffplay</a>, <a href="ffprobe.html">ffprobe</a>, <a href="ffserver.html">ffserver</a>,
|
||||
<a href="ffmpeg-codecs.html">ffmpeg-codecs</a>, <a href="ffmpeg-bitstream-filters.html">bitstream-filters</a>,
|
||||
<a href="libavutil.html">libavutil</a>
|
||||
</p>
|
||||
|
||||
<a name="Authors"></a>
|
||||
<h2 class="chapter">3 Authors<span class="pull-right"><a class="anchor hidden-xs" href="#Authors" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Authors" aria-hidden="true">TOC</a></span></h2>
|
||||
|
||||
<p>The FFmpeg developers.
|
||||
</p>
|
||||
<p>For details about the authorship, see the Git history of the project
|
||||
(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
|
||||
<code>git log</code> in the FFmpeg source directory, or browsing the
|
||||
online repository at <a href="http://source.ffmpeg.org">http://source.ffmpeg.org</a>.
|
||||
</p>
|
||||
<p>Maintainers for the specific components are listed in the file
|
||||
<samp>MAINTAINERS</samp> in the source code tree.
|
||||
</p>
|
||||
|
||||
|
||||
<p style="font-size: small;">
|
||||
This document was generated on <em>January 14, 2015</em> using <a href="http://www.gnu.org/software/texinfo/"><em>makeinfo</em></a>.
|
||||
</p>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
73
Externals/ffmpeg/dev/doc/libavdevice.html
vendored
73
Externals/ffmpeg/dev/doc/libavdevice.html
vendored
@ -1,73 +0,0 @@
|
||||
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
|
||||
<html>
|
||||
<!-- Created by GNU Texinfo 5.2, http://www.gnu.org/software/texinfo/ -->
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<title>
|
||||
Libavdevice Documentation
|
||||
</title>
|
||||
<meta name="viewport" content="width=device-width,initial-scale=1.0">
|
||||
<link rel="stylesheet" type="text/css" href="bootstrap.min.css">
|
||||
<link rel="stylesheet" type="text/css" href="style.min.css">
|
||||
</head>
|
||||
<body>
|
||||
<div style="width: 95%; margin: auto">
|
||||
<h1>
|
||||
Libavdevice Documentation
|
||||
</h1>
|
||||
<div align="center">
|
||||
</div>
|
||||
|
||||
<a name="SEC_Top"></a>
|
||||
|
||||
<a name="SEC_Contents"></a>
|
||||
<h2 class="contents-heading">Table of Contents</h2>
|
||||
|
||||
<div class="contents">
|
||||
|
||||
<ul class="no-bullet">
|
||||
<li><a name="toc-Description" href="#Description">1 Description</a></li>
|
||||
<li><a name="toc-See-Also" href="#See-Also">2 See Also</a></li>
|
||||
<li><a name="toc-Authors" href="#Authors">3 Authors</a></li>
|
||||
</ul>
|
||||
</div>
|
||||
|
||||
|
||||
<a name="Description"></a>
|
||||
<h2 class="chapter">1 Description<span class="pull-right"><a class="anchor hidden-xs" href="#Description" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Description" aria-hidden="true">TOC</a></span></h2>
|
||||
|
||||
<p>The libavdevice library provides a generic framework for grabbing from
|
||||
and rendering to many common multimedia input/output devices, and
|
||||
supports several input and output devices, including Video4Linux2,
|
||||
VfW, DShow, and ALSA.
|
||||
</p>
|
||||
|
||||
<a name="See-Also"></a>
|
||||
<h2 class="chapter">2 See Also<span class="pull-right"><a class="anchor hidden-xs" href="#See-Also" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-See-Also" aria-hidden="true">TOC</a></span></h2>
|
||||
|
||||
<p><a href="ffmpeg.html">ffmpeg</a>, <a href="ffplay.html">ffplay</a>, <a href="ffprobe.html">ffprobe</a>, <a href="ffserver.html">ffserver</a>,
|
||||
<a href="ffmpeg-devices.html">ffmpeg-devices</a>,
|
||||
<a href="libavutil.html">libavutil</a>, <a href="libavcodec.html">libavcodec</a>, <a href="libavformat.html">libavformat</a>
|
||||
</p>
|
||||
|
||||
<a name="Authors"></a>
|
||||
<h2 class="chapter">3 Authors<span class="pull-right"><a class="anchor hidden-xs" href="#Authors" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Authors" aria-hidden="true">TOC</a></span></h2>
|
||||
|
||||
<p>The FFmpeg developers.
|
||||
</p>
|
||||
<p>For details about the authorship, see the Git history of the project
|
||||
(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
|
||||
<code>git log</code> in the FFmpeg source directory, or browsing the
|
||||
online repository at <a href="http://source.ffmpeg.org">http://source.ffmpeg.org</a>.
|
||||
</p>
|
||||
<p>Maintainers for the specific components are listed in the file
|
||||
<samp>MAINTAINERS</samp> in the source code tree.
|
||||
</p>
|
||||
|
||||
|
||||
<p style="font-size: small;">
|
||||
This document was generated on <em>January 14, 2015</em> using <a href="http://www.gnu.org/software/texinfo/"><em>makeinfo</em></a>.
|
||||
</p>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
72
Externals/ffmpeg/dev/doc/libavfilter.html
vendored
72
Externals/ffmpeg/dev/doc/libavfilter.html
vendored
@ -1,72 +0,0 @@
|
||||
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
|
||||
<html>
|
||||
<!-- Created by GNU Texinfo 5.2, http://www.gnu.org/software/texinfo/ -->
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<title>
|
||||
Libavfilter Documentation
|
||||
</title>
|
||||
<meta name="viewport" content="width=device-width,initial-scale=1.0">
|
||||
<link rel="stylesheet" type="text/css" href="bootstrap.min.css">
|
||||
<link rel="stylesheet" type="text/css" href="style.min.css">
|
||||
</head>
|
||||
<body>
|
||||
<div style="width: 95%; margin: auto">
|
||||
<h1>
|
||||
Libavfilter Documentation
|
||||
</h1>
|
||||
<div align="center">
|
||||
</div>
|
||||
|
||||
<a name="SEC_Top"></a>
|
||||
|
||||
<a name="SEC_Contents"></a>
|
||||
<h2 class="contents-heading">Table of Contents</h2>
|
||||
|
||||
<div class="contents">
|
||||
|
||||
<ul class="no-bullet">
|
||||
<li><a name="toc-Description" href="#Description">1 Description</a></li>
|
||||
<li><a name="toc-See-Also" href="#See-Also">2 See Also</a></li>
|
||||
<li><a name="toc-Authors" href="#Authors">3 Authors</a></li>
|
||||
</ul>
|
||||
</div>
|
||||
|
||||
|
||||
<a name="Description"></a>
|
||||
<h2 class="chapter">1 Description<span class="pull-right"><a class="anchor hidden-xs" href="#Description" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Description" aria-hidden="true">TOC</a></span></h2>
|
||||
|
||||
<p>The libavfilter library provides a generic audio/video filtering
|
||||
framework containing several filters, sources and sinks.
|
||||
</p>
|
||||
|
||||
<a name="See-Also"></a>
|
||||
<h2 class="chapter">2 See Also<span class="pull-right"><a class="anchor hidden-xs" href="#See-Also" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-See-Also" aria-hidden="true">TOC</a></span></h2>
|
||||
|
||||
<p><a href="ffmpeg.html">ffmpeg</a>, <a href="ffplay.html">ffplay</a>, <a href="ffprobe.html">ffprobe</a>, <a href="ffserver.html">ffserver</a>,
|
||||
<a href="ffmpeg-filters.html">ffmpeg-filters</a>,
|
||||
<a href="libavutil.html">libavutil</a>, <a href="libswscale.html">libswscale</a>, <a href="libswresample.html">libswresample</a>,
|
||||
<a href="libavcodec.html">libavcodec</a>, <a href="libavformat.html">libavformat</a>, <a href="libavdevice.html">libavdevice</a>
|
||||
</p>
|
||||
|
||||
<a name="Authors"></a>
|
||||
<h2 class="chapter">3 Authors<span class="pull-right"><a class="anchor hidden-xs" href="#Authors" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Authors" aria-hidden="true">TOC</a></span></h2>
|
||||
|
||||
<p>The FFmpeg developers.
|
||||
</p>
|
||||
<p>For details about the authorship, see the Git history of the project
|
||||
(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
|
||||
<code>git log</code> in the FFmpeg source directory, or browsing the
|
||||
online repository at <a href="http://source.ffmpeg.org">http://source.ffmpeg.org</a>.
|
||||
</p>
|
||||
<p>Maintainers for the specific components are listed in the file
|
||||
<samp>MAINTAINERS</samp> in the source code tree.
|
||||
</p>
|
||||
|
||||
|
||||
<p style="font-size: small;">
|
||||
This document was generated on <em>January 14, 2015</em> using <a href="http://www.gnu.org/software/texinfo/"><em>makeinfo</em></a>.
|
||||
</p>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
76
Externals/ffmpeg/dev/doc/libavformat.html
vendored
76
Externals/ffmpeg/dev/doc/libavformat.html
vendored
@ -1,76 +0,0 @@
|
||||
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
|
||||
<html>
|
||||
<!-- Created by GNU Texinfo 5.2, http://www.gnu.org/software/texinfo/ -->
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<title>
|
||||
Libavformat Documentation
|
||||
</title>
|
||||
<meta name="viewport" content="width=device-width,initial-scale=1.0">
|
||||
<link rel="stylesheet" type="text/css" href="bootstrap.min.css">
|
||||
<link rel="stylesheet" type="text/css" href="style.min.css">
|
||||
</head>
|
||||
<body>
|
||||
<div style="width: 95%; margin: auto">
|
||||
<h1>
|
||||
Libavformat Documentation
|
||||
</h1>
|
||||
<div align="center">
|
||||
</div>
|
||||
|
||||
<a name="SEC_Top"></a>
|
||||
|
||||
<a name="SEC_Contents"></a>
|
||||
<h2 class="contents-heading">Table of Contents</h2>
|
||||
|
||||
<div class="contents">
|
||||
|
||||
<ul class="no-bullet">
|
||||
<li><a name="toc-Description" href="#Description">1 Description</a></li>
|
||||
<li><a name="toc-See-Also" href="#See-Also">2 See Also</a></li>
|
||||
<li><a name="toc-Authors" href="#Authors">3 Authors</a></li>
|
||||
</ul>
|
||||
</div>
|
||||
|
||||
|
||||
<a name="Description"></a>
|
||||
<h2 class="chapter">1 Description<span class="pull-right"><a class="anchor hidden-xs" href="#Description" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Description" aria-hidden="true">TOC</a></span></h2>
|
||||
|
||||
<p>The libavformat library provides a generic framework for multiplexing
|
||||
and demultiplexing (muxing and demuxing) audio, video and subtitle
|
||||
streams. It encompasses multiple muxers and demuxers for multimedia
|
||||
container formats.
|
||||
</p>
|
||||
<p>It also supports several input and output protocols to access a media
|
||||
resource.
|
||||
</p>
|
||||
|
||||
<a name="See-Also"></a>
|
||||
<h2 class="chapter">2 See Also<span class="pull-right"><a class="anchor hidden-xs" href="#See-Also" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-See-Also" aria-hidden="true">TOC</a></span></h2>
|
||||
|
||||
<p><a href="ffmpeg.html">ffmpeg</a>, <a href="ffplay.html">ffplay</a>, <a href="ffprobe.html">ffprobe</a>, <a href="ffserver.html">ffserver</a>,
|
||||
<a href="ffmpeg-formats.html">ffmpeg-formats</a>, <a href="ffmpeg-protocols.html">ffmpeg-protocols</a>,
|
||||
<a href="libavutil.html">libavutil</a>, <a href="libavcodec.html">libavcodec</a>
|
||||
</p>
|
||||
|
||||
<a name="Authors"></a>
|
||||
<h2 class="chapter">3 Authors<span class="pull-right"><a class="anchor hidden-xs" href="#Authors" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Authors" aria-hidden="true">TOC</a></span></h2>
|
||||
|
||||
<p>The FFmpeg developers.
|
||||
</p>
|
||||
<p>For details about the authorship, see the Git history of the project
|
||||
(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
|
||||
<code>git log</code> in the FFmpeg source directory, or browsing the
|
||||
online repository at <a href="http://source.ffmpeg.org">http://source.ffmpeg.org</a>.
|
||||
</p>
|
||||
<p>Maintainers for the specific components are listed in the file
|
||||
<samp>MAINTAINERS</samp> in the source code tree.
|
||||
</p>
|
||||
|
||||
|
||||
<p style="font-size: small;">
|
||||
This document was generated on <em>January 14, 2015</em> using <a href="http://www.gnu.org/software/texinfo/"><em>makeinfo</em></a>.
|
||||
</p>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
95
Externals/ffmpeg/dev/doc/libavutil.html
vendored
95
Externals/ffmpeg/dev/doc/libavutil.html
vendored
@ -1,95 +0,0 @@
|
||||
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
|
||||
<html>
|
||||
<!-- Created by GNU Texinfo 5.2, http://www.gnu.org/software/texinfo/ -->
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<title>
|
||||
Libavutil Documentation
|
||||
</title>
|
||||
<meta name="viewport" content="width=device-width,initial-scale=1.0">
|
||||
<link rel="stylesheet" type="text/css" href="bootstrap.min.css">
|
||||
<link rel="stylesheet" type="text/css" href="style.min.css">
|
||||
</head>
|
||||
<body>
|
||||
<div style="width: 95%; margin: auto">
|
||||
<h1>
|
||||
Libavutil Documentation
|
||||
</h1>
|
||||
<div align="center">
|
||||
</div>
|
||||
|
||||
<a name="SEC_Top"></a>
|
||||
|
||||
<a name="SEC_Contents"></a>
|
||||
<h2 class="contents-heading">Table of Contents</h2>
|
||||
|
||||
<div class="contents">
|
||||
|
||||
<ul class="no-bullet">
|
||||
<li><a name="toc-Description" href="#Description">1 Description</a></li>
|
||||
<li><a name="toc-See-Also" href="#See-Also">2 See Also</a></li>
|
||||
<li><a name="toc-Authors" href="#Authors">3 Authors</a></li>
|
||||
</ul>
|
||||
</div>
|
||||
|
||||
|
||||
<a name="Description"></a>
|
||||
<h2 class="chapter">1 Description<span class="pull-right"><a class="anchor hidden-xs" href="#Description" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Description" aria-hidden="true">TOC</a></span></h2>
|
||||
|
||||
<p>The libavutil library is a utility library to aid portable
|
||||
multimedia programming. It contains safe portable string functions,
|
||||
random number generators, data structures, additional mathematics
|
||||
functions, cryptography and multimedia related functionality (like
|
||||
enumerations for pixel and sample formats). It is not a library for
|
||||
code needed by both libavcodec and libavformat.
|
||||
</p>
|
||||
<p>The goals for this library is to be:
|
||||
</p>
|
||||
<dl compact="compact">
|
||||
<dt><strong>Modular</strong></dt>
|
||||
<dd><p>It should have few interdependencies and the possibility of disabling individual
|
||||
parts during <code>./configure</code>.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><strong>Small</strong></dt>
|
||||
<dd><p>Both sources and objects should be small.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><strong>Efficient</strong></dt>
|
||||
<dd><p>It should have low CPU and memory usage.
|
||||
</p>
|
||||
</dd>
|
||||
<dt><strong>Useful</strong></dt>
|
||||
<dd><p>It should avoid useless features that almost no one needs.
|
||||
</p></dd>
|
||||
</dl>
|
||||
|
||||
|
||||
<a name="See-Also"></a>
|
||||
<h2 class="chapter">2 See Also<span class="pull-right"><a class="anchor hidden-xs" href="#See-Also" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-See-Also" aria-hidden="true">TOC</a></span></h2>
|
||||
|
||||
<p><a href="ffmpeg.html">ffmpeg</a>, <a href="ffplay.html">ffplay</a>, <a href="ffprobe.html">ffprobe</a>, <a href="ffserver.html">ffserver</a>,
|
||||
<a href="ffmpeg-utils.html">ffmpeg-utils</a>
|
||||
</p>
|
||||
|
||||
<a name="Authors"></a>
|
||||
<h2 class="chapter">3 Authors<span class="pull-right"><a class="anchor hidden-xs" href="#Authors" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Authors" aria-hidden="true">TOC</a></span></h2>
|
||||
|
||||
<p>The FFmpeg developers.
|
||||
</p>
|
||||
<p>For details about the authorship, see the Git history of the project
|
||||
(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
|
||||
<code>git log</code> in the FFmpeg source directory, or browsing the
|
||||
online repository at <a href="http://source.ffmpeg.org">http://source.ffmpeg.org</a>.
|
||||
</p>
|
||||
<p>Maintainers for the specific components are listed in the file
|
||||
<samp>MAINTAINERS</samp> in the source code tree.
|
||||
</p>
|
||||
|
||||
|
||||
<p style="font-size: small;">
|
||||
This document was generated on <em>January 14, 2015</em> using <a href="http://www.gnu.org/software/texinfo/"><em>makeinfo</em></a>.
|
||||
</p>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
95
Externals/ffmpeg/dev/doc/libswresample.html
vendored
95
Externals/ffmpeg/dev/doc/libswresample.html
vendored
@ -1,95 +0,0 @@
|
||||
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
|
||||
<html>
|
||||
<!-- Created by GNU Texinfo 5.2, http://www.gnu.org/software/texinfo/ -->
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<title>
|
||||
Libswresample Documentation
|
||||
</title>
|
||||
<meta name="viewport" content="width=device-width,initial-scale=1.0">
|
||||
<link rel="stylesheet" type="text/css" href="bootstrap.min.css">
|
||||
<link rel="stylesheet" type="text/css" href="style.min.css">
|
||||
</head>
|
||||
<body>
|
||||
<div style="width: 95%; margin: auto">
|
||||
<h1>
|
||||
Libswresample Documentation
|
||||
</h1>
|
||||
<div align="center">
|
||||
</div>
|
||||
|
||||
<a name="SEC_Top"></a>
|
||||
|
||||
<a name="SEC_Contents"></a>
|
||||
<h2 class="contents-heading">Table of Contents</h2>
|
||||
|
||||
<div class="contents">
|
||||
|
||||
<ul class="no-bullet">
|
||||
<li><a name="toc-Description" href="#Description">1 Description</a></li>
|
||||
<li><a name="toc-See-Also" href="#See-Also">2 See Also</a></li>
|
||||
<li><a name="toc-Authors" href="#Authors">3 Authors</a></li>
|
||||
</ul>
|
||||
</div>
|
||||
|
||||
|
||||
<a name="Description"></a>
|
||||
<h2 class="chapter">1 Description<span class="pull-right"><a class="anchor hidden-xs" href="#Description" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Description" aria-hidden="true">TOC</a></span></h2>
|
||||
|
||||
<p>The libswresample library performs highly optimized audio resampling,
|
||||
rematrixing and sample format conversion operations.
|
||||
</p>
|
||||
<p>Specifically, this library performs the following conversions:
|
||||
</p>
|
||||
<ul>
|
||||
<li> <em>Resampling</em>: is the process of changing the audio rate, for
|
||||
example from a high sample rate of 44100Hz to 8000Hz. Audio
|
||||
conversion from high to low sample rate is a lossy process. Several
|
||||
resampling options and algorithms are available.
|
||||
|
||||
</li><li> <em>Format conversion</em>: is the process of converting the type of
|
||||
samples, for example from 16-bit signed samples to unsigned 8-bit or
|
||||
float samples. It also handles packing conversion, when passing from
|
||||
packed layout (all samples belonging to distinct channels interleaved
|
||||
in the same buffer), to planar layout (all samples belonging to the
|
||||
same channel stored in a dedicated buffer or "plane").
|
||||
|
||||
</li><li> <em>Rematrixing</em>: is the process of changing the channel layout, for
|
||||
example from stereo to mono. When the input channels cannot be mapped
|
||||
to the output streams, the process is lossy, since it involves
|
||||
different gain factors and mixing.
|
||||
</li></ul>
|
||||
|
||||
<p>Various other audio conversions (e.g. stretching and padding) are
|
||||
enabled through dedicated options.
|
||||
</p>
|
||||
|
||||
<a name="See-Also"></a>
|
||||
<h2 class="chapter">2 See Also<span class="pull-right"><a class="anchor hidden-xs" href="#See-Also" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-See-Also" aria-hidden="true">TOC</a></span></h2>
|
||||
|
||||
<p><a href="ffmpeg.html">ffmpeg</a>, <a href="ffplay.html">ffplay</a>, <a href="ffprobe.html">ffprobe</a>, <a href="ffserver.html">ffserver</a>,
|
||||
<a href="ffmpeg-resampler.html">ffmpeg-resampler</a>,
|
||||
<a href="libavutil.html">libavutil</a>
|
||||
</p>
|
||||
|
||||
<a name="Authors"></a>
|
||||
<h2 class="chapter">3 Authors<span class="pull-right"><a class="anchor hidden-xs" href="#Authors" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Authors" aria-hidden="true">TOC</a></span></h2>
|
||||
|
||||
<p>The FFmpeg developers.
|
||||
</p>
|
||||
<p>For details about the authorship, see the Git history of the project
|
||||
(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
|
||||
<code>git log</code> in the FFmpeg source directory, or browsing the
|
||||
online repository at <a href="http://source.ffmpeg.org">http://source.ffmpeg.org</a>.
|
||||
</p>
|
||||
<p>Maintainers for the specific components are listed in the file
|
||||
<samp>MAINTAINERS</samp> in the source code tree.
|
||||
</p>
|
||||
|
||||
|
||||
<p style="font-size: small;">
|
||||
This document was generated on <em>January 14, 2015</em> using <a href="http://www.gnu.org/software/texinfo/"><em>makeinfo</em></a>.
|
||||
</p>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
89
Externals/ffmpeg/dev/doc/libswscale.html
vendored
89
Externals/ffmpeg/dev/doc/libswscale.html
vendored
@ -1,89 +0,0 @@
|
||||
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
|
||||
<html>
|
||||
<!-- Created by GNU Texinfo 5.2, http://www.gnu.org/software/texinfo/ -->
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<title>
|
||||
Libswscale Documentation
|
||||
</title>
|
||||
<meta name="viewport" content="width=device-width,initial-scale=1.0">
|
||||
<link rel="stylesheet" type="text/css" href="bootstrap.min.css">
|
||||
<link rel="stylesheet" type="text/css" href="style.min.css">
|
||||
</head>
|
||||
<body>
|
||||
<div style="width: 95%; margin: auto">
|
||||
<h1>
|
||||
Libswscale Documentation
|
||||
</h1>
|
||||
<div align="center">
|
||||
</div>
|
||||
|
||||
<a name="SEC_Top"></a>
|
||||
|
||||
<a name="SEC_Contents"></a>
|
||||
<h2 class="contents-heading">Table of Contents</h2>
|
||||
|
||||
<div class="contents">
|
||||
|
||||
<ul class="no-bullet">
|
||||
<li><a name="toc-Description" href="#Description">1 Description</a></li>
|
||||
<li><a name="toc-See-Also" href="#See-Also">2 See Also</a></li>
|
||||
<li><a name="toc-Authors" href="#Authors">3 Authors</a></li>
|
||||
</ul>
|
||||
</div>
|
||||
|
||||
|
||||
<a name="Description"></a>
|
||||
<h2 class="chapter">1 Description<span class="pull-right"><a class="anchor hidden-xs" href="#Description" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Description" aria-hidden="true">TOC</a></span></h2>
|
||||
|
||||
<p>The libswscale library performs highly optimized image scaling and
|
||||
colorspace and pixel format conversion operations.
|
||||
</p>
|
||||
<p>Specifically, this library performs the following conversions:
|
||||
</p>
|
||||
<ul>
|
||||
<li> <em>Rescaling</em>: is the process of changing the video size. Several
|
||||
rescaling options and algorithms are available. This is usually a
|
||||
lossy process.
|
||||
|
||||
</li><li> <em>Pixel format conversion</em>: is the process of converting the image
|
||||
format and colorspace of the image, for example from planar YUV420P to
|
||||
RGB24 packed. It also handles packing conversion, that is converts
|
||||
from packed layout (all pixels belonging to distinct planes
|
||||
interleaved in the same buffer), to planar layout (all samples
|
||||
belonging to the same plane stored in a dedicated buffer or "plane").
|
||||
|
||||
<p>This is usually a lossy process in case the source and destination
|
||||
colorspaces differ.
|
||||
</p></li></ul>
|
||||
|
||||
|
||||
<a name="See-Also"></a>
|
||||
<h2 class="chapter">2 See Also<span class="pull-right"><a class="anchor hidden-xs" href="#See-Also" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-See-Also" aria-hidden="true">TOC</a></span></h2>
|
||||
|
||||
<p><a href="ffmpeg.html">ffmpeg</a>, <a href="ffplay.html">ffplay</a>, <a href="ffprobe.html">ffprobe</a>, <a href="ffserver.html">ffserver</a>,
|
||||
<a href="ffmpeg-scaler.html">ffmpeg-scaler</a>,
|
||||
<a href="libavutil.html">libavutil</a>
|
||||
</p>
|
||||
|
||||
<a name="Authors"></a>
|
||||
<h2 class="chapter">3 Authors<span class="pull-right"><a class="anchor hidden-xs" href="#Authors" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Authors" aria-hidden="true">TOC</a></span></h2>
|
||||
|
||||
<p>The FFmpeg developers.
|
||||
</p>
|
||||
<p>For details about the authorship, see the Git history of the project
|
||||
(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
|
||||
<code>git log</code> in the FFmpeg source directory, or browsing the
|
||||
online repository at <a href="http://source.ffmpeg.org">http://source.ffmpeg.org</a>.
|
||||
</p>
|
||||
<p>Maintainers for the specific components are listed in the file
|
||||
<samp>MAINTAINERS</samp> in the source code tree.
|
||||
</p>
|
||||
|
||||
|
||||
<p style="font-size: small;">
|
||||
This document was generated on <em>January 14, 2015</em> using <a href="http://www.gnu.org/software/texinfo/"><em>makeinfo</em></a>.
|
||||
</p>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
211
Externals/ffmpeg/dev/doc/nut.html
vendored
211
Externals/ffmpeg/dev/doc/nut.html
vendored
@ -1,211 +0,0 @@
|
||||
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
|
||||
<html>
|
||||
<!-- Created by GNU Texinfo 5.2, http://www.gnu.org/software/texinfo/ -->
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<title>
|
||||
NUT
|
||||
</title>
|
||||
<meta name="viewport" content="width=device-width,initial-scale=1.0">
|
||||
<link rel="stylesheet" type="text/css" href="bootstrap.min.css">
|
||||
<link rel="stylesheet" type="text/css" href="style.min.css">
|
||||
</head>
|
||||
<body>
|
||||
<div style="width: 95%; margin: auto">
|
||||
<h1>
|
||||
NUT
|
||||
</h1>
|
||||
<div align="center">
|
||||
</div>
|
||||
|
||||
|
||||
<a name="SEC_Top"></a>
|
||||
|
||||
<a name="SEC_Contents"></a>
|
||||
<h2 class="contents-heading">Table of Contents</h2>
|
||||
|
||||
<div class="contents">
|
||||
|
||||
<ul class="no-bullet">
|
||||
<li><a name="toc-Description" href="#Description">1 Description</a></li>
|
||||
<li><a name="toc-Modes" href="#Modes">2 Modes</a>
|
||||
<ul class="no-bullet">
|
||||
<li><a name="toc-BROADCAST" href="#BROADCAST">2.1 BROADCAST</a></li>
|
||||
<li><a name="toc-PIPE" href="#PIPE">2.2 PIPE</a></li>
|
||||
</ul></li>
|
||||
<li><a name="toc-Container_002dspecific-codec-tags" href="#Container_002dspecific-codec-tags">3 Container-specific codec tags</a>
|
||||
<ul class="no-bullet">
|
||||
<li><a name="toc-Generic-raw-YUVA-formats" href="#Generic-raw-YUVA-formats">3.1 Generic raw YUVA formats</a></li>
|
||||
<li><a name="toc-Raw-Audio" href="#Raw-Audio">3.2 Raw Audio</a></li>
|
||||
<li><a name="toc-Subtitles" href="#Subtitles">3.3 Subtitles</a></li>
|
||||
<li><a name="toc-Raw-Data" href="#Raw-Data">3.4 Raw Data</a></li>
|
||||
<li><a name="toc-Codecs" href="#Codecs">3.5 Codecs</a></li>
|
||||
</ul></li>
|
||||
</ul>
|
||||
</div>
|
||||
|
||||
|
||||
<a name="Description"></a>
|
||||
<h2 class="chapter">1 Description<span class="pull-right"><a class="anchor hidden-xs" href="#Description" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Description" aria-hidden="true">TOC</a></span></h2>
|
||||
<p>NUT is a low overhead generic container format. It stores audio, video,
|
||||
subtitle and user-defined streams in a simple, yet efficient, way.
|
||||
</p>
|
||||
<p>It was created by a group of FFmpeg and MPlayer developers in 2003
|
||||
and was finalized in 2008.
|
||||
</p>
|
||||
<p>The official nut specification is at svn://svn.mplayerhq.hu/nut
|
||||
In case of any differences between this text and the official specification,
|
||||
the official specification shall prevail.
|
||||
</p>
|
||||
<a name="Modes"></a>
|
||||
<h2 class="chapter">2 Modes<span class="pull-right"><a class="anchor hidden-xs" href="#Modes" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Modes" aria-hidden="true">TOC</a></span></h2>
|
||||
<p>NUT has some variants signaled by using the flags field in its main header.
|
||||
</p>
|
||||
<table>
|
||||
<tr><td width="40%">BROADCAST</td><td width="40%">Extend the syncpoint to report the sender wallclock</td></tr>
|
||||
<tr><td width="40%">PIPE</td><td width="40%">Omit completely the syncpoint</td></tr>
|
||||
</table>
|
||||
|
||||
<a name="BROADCAST"></a>
|
||||
<h3 class="section">2.1 BROADCAST<span class="pull-right"><a class="anchor hidden-xs" href="#BROADCAST" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-BROADCAST" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>The BROADCAST variant provides a secondary time reference to facilitate
|
||||
detecting endpoint latency and network delays.
|
||||
It assumes all the endpoint clocks are syncronized.
|
||||
To be used in real-time scenarios.
|
||||
</p>
|
||||
<a name="PIPE"></a>
|
||||
<h3 class="section">2.2 PIPE<span class="pull-right"><a class="anchor hidden-xs" href="#PIPE" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-PIPE" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>The PIPE variant assumes NUT is used as non-seekable intermediate container,
|
||||
by not using syncpoint removes unneeded overhead and reduces the overall
|
||||
memory usage.
|
||||
</p>
|
||||
<a name="Container_002dspecific-codec-tags"></a>
|
||||
<h2 class="chapter">3 Container-specific codec tags<span class="pull-right"><a class="anchor hidden-xs" href="#Container_002dspecific-codec-tags" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Container_002dspecific-codec-tags" aria-hidden="true">TOC</a></span></h2>
|
||||
|
||||
<a name="Generic-raw-YUVA-formats"></a>
|
||||
<h3 class="section">3.1 Generic raw YUVA formats<span class="pull-right"><a class="anchor hidden-xs" href="#Generic-raw-YUVA-formats" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Generic-raw-YUVA-formats" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>Since many exotic planar YUVA pixel formats are not considered by
|
||||
the AVI/QuickTime FourCC lists, the following scheme is adopted for
|
||||
representing them.
|
||||
</p>
|
||||
<p>The first two bytes can contain the values:
|
||||
Y1 = only Y
|
||||
Y2 = Y+A
|
||||
Y3 = YUV
|
||||
Y4 = YUVA
|
||||
</p>
|
||||
<p>The third byte represents the width and height chroma subsampling
|
||||
values for the UV planes, that is the amount to shift the luma
|
||||
width/height right to find the chroma width/height.
|
||||
</p>
|
||||
<p>The fourth byte is the number of bits used (8, 16, ...).
|
||||
</p>
|
||||
<p>If the order of bytes is inverted, that means that each component has
|
||||
to be read big-endian.
|
||||
</p>
|
||||
<a name="Raw-Audio"></a>
|
||||
<h3 class="section">3.2 Raw Audio<span class="pull-right"><a class="anchor hidden-xs" href="#Raw-Audio" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Raw-Audio" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<table>
|
||||
<tr><td width="40%">ALAW</td><td width="40%">A-LAW</td></tr>
|
||||
<tr><td width="40%">ULAW</td><td width="40%">MU-LAW</td></tr>
|
||||
<tr><td width="40%">P<type><interleaving><bits></td><td width="40%">little-endian PCM</td></tr>
|
||||
<tr><td width="40%"><bits><interleaving><type>P</td><td width="40%">big-endian PCM</td></tr>
|
||||
</table>
|
||||
|
||||
<p><type> is S for signed integer, U for unsigned integer, F for IEEE float
|
||||
<interleaving> is D for default, P is for planar.
|
||||
<bits> is 8/16/24/32
|
||||
</p>
|
||||
<div class="example">
|
||||
<pre class="example">PFD[32] would for example be signed 32 bit little-endian IEEE float
|
||||
</pre></div>
|
||||
|
||||
<a name="Subtitles"></a>
|
||||
<h3 class="section">3.3 Subtitles<span class="pull-right"><a class="anchor hidden-xs" href="#Subtitles" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Subtitles" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<table>
|
||||
<tr><td width="40%">UTF8</td><td width="40%">Raw UTF-8</td></tr>
|
||||
<tr><td width="40%">SSA[0]</td><td width="40%">SubStation Alpha</td></tr>
|
||||
<tr><td width="40%">DVDS</td><td width="40%">DVD subtitles</td></tr>
|
||||
<tr><td width="40%">DVBS</td><td width="40%">DVB subtitles</td></tr>
|
||||
</table>
|
||||
|
||||
<a name="Raw-Data"></a>
|
||||
<h3 class="section">3.4 Raw Data<span class="pull-right"><a class="anchor hidden-xs" href="#Raw-Data" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Raw-Data" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<table>
|
||||
<tr><td width="40%">UTF8</td><td width="40%">Raw UTF-8</td></tr>
|
||||
</table>
|
||||
|
||||
<a name="Codecs"></a>
|
||||
<h3 class="section">3.5 Codecs<span class="pull-right"><a class="anchor hidden-xs" href="#Codecs" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Codecs" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<table>
|
||||
<tr><td width="40%">3IV1</td><td width="40%">non-compliant MPEG-4 generated by old 3ivx</td></tr>
|
||||
<tr><td width="40%">ASV1</td><td width="40%">Asus Video</td></tr>
|
||||
<tr><td width="40%">ASV2</td><td width="40%">Asus Video 2</td></tr>
|
||||
<tr><td width="40%">CVID</td><td width="40%">Cinepak</td></tr>
|
||||
<tr><td width="40%">CYUV</td><td width="40%">Creative YUV</td></tr>
|
||||
<tr><td width="40%">DIVX</td><td width="40%">non-compliant MPEG-4 generated by old DivX</td></tr>
|
||||
<tr><td width="40%">DUCK</td><td width="40%">Truemotion 1</td></tr>
|
||||
<tr><td width="40%">FFV1</td><td width="40%">FFmpeg video 1</td></tr>
|
||||
<tr><td width="40%">FFVH</td><td width="40%">FFmpeg Huffyuv</td></tr>
|
||||
<tr><td width="40%">H261</td><td width="40%">ITU H.261</td></tr>
|
||||
<tr><td width="40%">H262</td><td width="40%">ITU H.262</td></tr>
|
||||
<tr><td width="40%">H263</td><td width="40%">ITU H.263</td></tr>
|
||||
<tr><td width="40%">H264</td><td width="40%">ITU H.264</td></tr>
|
||||
<tr><td width="40%">HFYU</td><td width="40%">Huffyuv</td></tr>
|
||||
<tr><td width="40%">I263</td><td width="40%">Intel H.263</td></tr>
|
||||
<tr><td width="40%">IV31</td><td width="40%">Indeo 3.1</td></tr>
|
||||
<tr><td width="40%">IV32</td><td width="40%">Indeo 3.2</td></tr>
|
||||
<tr><td width="40%">IV50</td><td width="40%">Indeo 5.0</td></tr>
|
||||
<tr><td width="40%">LJPG</td><td width="40%">ITU JPEG (lossless)</td></tr>
|
||||
<tr><td width="40%">MJLS</td><td width="40%">ITU JPEG-LS</td></tr>
|
||||
<tr><td width="40%">MJPG</td><td width="40%">ITU JPEG</td></tr>
|
||||
<tr><td width="40%">MPG4</td><td width="40%">MS MPEG-4v1 (not ISO MPEG-4)</td></tr>
|
||||
<tr><td width="40%">MP42</td><td width="40%">MS MPEG-4v2</td></tr>
|
||||
<tr><td width="40%">MP43</td><td width="40%">MS MPEG-4v3</td></tr>
|
||||
<tr><td width="40%">MP4V</td><td width="40%">ISO MPEG-4 Part 2 Video (from old encoders)</td></tr>
|
||||
<tr><td width="40%">mpg1</td><td width="40%">ISO MPEG-1 Video</td></tr>
|
||||
<tr><td width="40%">mpg2</td><td width="40%">ISO MPEG-2 Video</td></tr>
|
||||
<tr><td width="40%">MRLE</td><td width="40%">MS RLE</td></tr>
|
||||
<tr><td width="40%">MSVC</td><td width="40%">MS Video 1</td></tr>
|
||||
<tr><td width="40%">RT21</td><td width="40%">Indeo 2.1</td></tr>
|
||||
<tr><td width="40%">RV10</td><td width="40%">RealVideo 1.0</td></tr>
|
||||
<tr><td width="40%">RV20</td><td width="40%">RealVideo 2.0</td></tr>
|
||||
<tr><td width="40%">RV30</td><td width="40%">RealVideo 3.0</td></tr>
|
||||
<tr><td width="40%">RV40</td><td width="40%">RealVideo 4.0</td></tr>
|
||||
<tr><td width="40%">SNOW</td><td width="40%">FFmpeg Snow</td></tr>
|
||||
<tr><td width="40%">SVQ1</td><td width="40%">Sorenson Video 1</td></tr>
|
||||
<tr><td width="40%">SVQ3</td><td width="40%">Sorenson Video 3</td></tr>
|
||||
<tr><td width="40%">theo</td><td width="40%">Xiph Theora</td></tr>
|
||||
<tr><td width="40%">TM20</td><td width="40%">Truemotion 2.0</td></tr>
|
||||
<tr><td width="40%">UMP4</td><td width="40%">non-compliant MPEG-4 generated by UB Video MPEG-4</td></tr>
|
||||
<tr><td width="40%">VCR1</td><td width="40%">ATI VCR1</td></tr>
|
||||
<tr><td width="40%">VP30</td><td width="40%">VP 3.0</td></tr>
|
||||
<tr><td width="40%">VP31</td><td width="40%">VP 3.1</td></tr>
|
||||
<tr><td width="40%">VP50</td><td width="40%">VP 5.0</td></tr>
|
||||
<tr><td width="40%">VP60</td><td width="40%">VP 6.0</td></tr>
|
||||
<tr><td width="40%">VP61</td><td width="40%">VP 6.1</td></tr>
|
||||
<tr><td width="40%">VP62</td><td width="40%">VP 6.2</td></tr>
|
||||
<tr><td width="40%">VP70</td><td width="40%">VP 7.0</td></tr>
|
||||
<tr><td width="40%">WMV1</td><td width="40%">MS WMV7</td></tr>
|
||||
<tr><td width="40%">WMV2</td><td width="40%">MS WMV8</td></tr>
|
||||
<tr><td width="40%">WMV3</td><td width="40%">MS WMV9</td></tr>
|
||||
<tr><td width="40%">WV1F</td><td width="40%">non-compliant MPEG-4 generated by ?</td></tr>
|
||||
<tr><td width="40%">WVC1</td><td width="40%">VC-1</td></tr>
|
||||
<tr><td width="40%">XVID</td><td width="40%">non-compliant MPEG-4 generated by old Xvid</td></tr>
|
||||
<tr><td width="40%">XVIX</td><td width="40%">non-compliant MPEG-4 generated by old Xvid with interlacing bug</td></tr>
|
||||
</table>
|
||||
|
||||
|
||||
<p style="font-size: small;">
|
||||
This document was generated on <em>January 14, 2015</em> using <a href="http://www.gnu.org/software/texinfo/"><em>makeinfo</em></a>.
|
||||
</p>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
447
Externals/ffmpeg/dev/doc/platform.html
vendored
447
Externals/ffmpeg/dev/doc/platform.html
vendored
@ -1,447 +0,0 @@
|
||||
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
|
||||
<html>
|
||||
<!-- Created by GNU Texinfo 5.2, http://www.gnu.org/software/texinfo/ -->
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<title>
|
||||
Platform Specific Information
|
||||
</title>
|
||||
<meta name="viewport" content="width=device-width,initial-scale=1.0">
|
||||
<link rel="stylesheet" type="text/css" href="bootstrap.min.css">
|
||||
<link rel="stylesheet" type="text/css" href="style.min.css">
|
||||
</head>
|
||||
<body>
|
||||
<div style="width: 95%; margin: auto">
|
||||
<h1>
|
||||
Platform Specific Information
|
||||
</h1>
|
||||
<div align="center">
|
||||
</div>
|
||||
|
||||
<a name="SEC_Top"></a>
|
||||
|
||||
<a name="SEC_Contents"></a>
|
||||
<h2 class="contents-heading">Table of Contents</h2>
|
||||
|
||||
<div class="contents">
|
||||
|
||||
<ul class="no-bullet">
|
||||
<li><a name="toc-Unix_002dlike" href="#Unix_002dlike">1 Unix-like</a>
|
||||
<ul class="no-bullet">
|
||||
<li><a name="toc-Advanced-linking-configuration" href="#Advanced-linking-configuration">1.1 Advanced linking configuration</a></li>
|
||||
<li><a name="toc-BSD" href="#BSD">1.2 BSD</a></li>
|
||||
<li><a name="toc-_0028Open_0029Solaris" href="#g_t_0028Open_0029Solaris">1.3 (Open)Solaris</a></li>
|
||||
<li><a name="toc-Darwin-_0028Mac-OS-X_002c-iPhone_0029" href="#Darwin-_0028Mac-OS-X_002c-iPhone_0029">1.4 Darwin (Mac OS X, iPhone)</a></li>
|
||||
</ul></li>
|
||||
<li><a name="toc-DOS" href="#DOS">2 DOS</a></li>
|
||||
<li><a name="toc-OS_002f2" href="#OS_002f2">3 OS/2</a></li>
|
||||
<li><a name="toc-Windows" href="#Windows">4 Windows</a>
|
||||
<ul class="no-bullet">
|
||||
<li><a name="toc-Native-Windows-compilation-using-MinGW-or-MinGW_002dw64" href="#Native-Windows-compilation-using-MinGW-or-MinGW_002dw64">4.1 Native Windows compilation using MinGW or MinGW-w64</a></li>
|
||||
<li><a name="toc-Microsoft-Visual-C_002b_002b-or-Intel-C_002b_002b-Compiler-for-Windows" href="#Microsoft-Visual-C_002b_002b-or-Intel-C_002b_002b-Compiler-for-Windows">4.2 Microsoft Visual C++ or Intel C++ Compiler for Windows</a>
|
||||
<ul class="no-bullet">
|
||||
<li><a name="toc-Linking-to-FFmpeg-with-Microsoft-Visual-C_002b_002b" href="#Linking-to-FFmpeg-with-Microsoft-Visual-C_002b_002b">4.2.1 Linking to FFmpeg with Microsoft Visual C++</a></li>
|
||||
</ul></li>
|
||||
<li><a name="toc-Cross-compilation-for-Windows-with-Linux-1" href="#Cross-compilation-for-Windows-with-Linux-1">4.3 Cross compilation for Windows with Linux</a></li>
|
||||
<li><a name="toc-Compilation-under-Cygwin" href="#Compilation-under-Cygwin">4.4 Compilation under Cygwin</a></li>
|
||||
<li><a name="toc-Crosscompilation-for-Windows-under-Cygwin" href="#Crosscompilation-for-Windows-under-Cygwin">4.5 Crosscompilation for Windows under Cygwin</a></li>
|
||||
</ul></li>
|
||||
<li><a name="toc-Plan-9" href="#Plan-9">5 Plan 9</a></li>
|
||||
</ul>
|
||||
</div>
|
||||
|
||||
|
||||
<a name="Unix_002dlike"></a>
|
||||
<h2 class="chapter">1 Unix-like<span class="pull-right"><a class="anchor hidden-xs" href="#Unix_002dlike" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Unix_002dlike" aria-hidden="true">TOC</a></span></h2>
|
||||
|
||||
<p>Some parts of FFmpeg cannot be built with version 2.15 of the GNU
|
||||
assembler which is still provided by a few AMD64 distributions. To
|
||||
make sure your compiler really uses the required version of gas
|
||||
after a binutils upgrade, run:
|
||||
</p>
|
||||
<div class="example">
|
||||
<pre class="example">$(gcc -print-prog-name=as) --version
|
||||
</pre></div>
|
||||
|
||||
<p>If not, then you should install a different compiler that has no
|
||||
hard-coded path to gas. In the worst case pass <code>--disable-asm</code>
|
||||
to configure.
|
||||
</p>
|
||||
<a name="Advanced-linking-configuration"></a>
|
||||
<h3 class="section">1.1 Advanced linking configuration<span class="pull-right"><a class="anchor hidden-xs" href="#Advanced-linking-configuration" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Advanced-linking-configuration" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>If you compiled FFmpeg libraries statically and you want to use them to
|
||||
build your own shared library, you may need to force PIC support (with
|
||||
<code>--enable-pic</code> during FFmpeg configure) and add the following option
|
||||
to your project LDFLAGS:
|
||||
</p>
|
||||
<div class="example">
|
||||
<pre class="example">-Wl,-Bsymbolic
|
||||
</pre></div>
|
||||
|
||||
<p>If your target platform requires position independent binaries, you should
|
||||
pass the correct linking flag (e.g. <code>-pie</code>) to <code>--extra-ldexeflags</code>.
|
||||
</p>
|
||||
<a name="BSD"></a>
|
||||
<h3 class="section">1.2 BSD<span class="pull-right"><a class="anchor hidden-xs" href="#BSD" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-BSD" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>BSD make will not build FFmpeg, you need to install and use GNU Make
|
||||
(<code>gmake</code>).
|
||||
</p>
|
||||
<a name="g_t_0028Open_0029Solaris"></a>
|
||||
<h3 class="section">1.3 (Open)Solaris<span class="pull-right"><a class="anchor hidden-xs" href="#_0028Open_0029Solaris" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-_0028Open_0029Solaris" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>GNU Make is required to build FFmpeg, so you have to invoke (<code>gmake</code>),
|
||||
standard Solaris Make will not work. When building with a non-c99 front-end
|
||||
(gcc, generic suncc) add either <code>--extra-libs=/usr/lib/values-xpg6.o</code>
|
||||
or <code>--extra-libs=/usr/lib/64/values-xpg6.o</code> to the configure options
|
||||
since the libc is not c99-compliant by default. The probes performed by
|
||||
configure may raise an exception leading to the death of configure itself
|
||||
due to a bug in the system shell. Simply invoke a different shell such as
|
||||
bash directly to work around this:
|
||||
</p>
|
||||
<div class="example">
|
||||
<pre class="example">bash ./configure
|
||||
</pre></div>
|
||||
|
||||
<a name="Darwin"></a><a name="Darwin-_0028Mac-OS-X_002c-iPhone_0029"></a>
|
||||
<h3 class="section">1.4 Darwin (Mac OS X, iPhone)<span class="pull-right"><a class="anchor hidden-xs" href="#Darwin-_0028Mac-OS-X_002c-iPhone_0029" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Darwin-_0028Mac-OS-X_002c-iPhone_0029" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>The toolchain provided with Xcode is sufficient to build the basic
|
||||
unacelerated code.
|
||||
</p>
|
||||
<p>Mac OS X on PowerPC or ARM (iPhone) requires a preprocessor from
|
||||
<a href="https://github.com/FFmpeg/gas-preprocessor">https://github.com/FFmpeg/gas-preprocessor</a> or
|
||||
<a href="https://github.com/yuvi/gas-preprocessor">https://github.com/yuvi/gas-preprocessor</a>(currently outdated) to build the optimized
|
||||
assembly functions. Put the Perl script somewhere
|
||||
in your PATH, FFmpeg’s configure will pick it up automatically.
|
||||
</p>
|
||||
<p>Mac OS X on amd64 and x86 requires <code>yasm</code> to build most of the
|
||||
optimized assembly functions. <a href="http://www.finkproject.org/">Fink</a>,
|
||||
<a href="http://www.gentoo.org/proj/en/gentoo-alt/prefix/bootstrap-macos.xml">Gentoo Prefix</a>,
|
||||
<a href="https://mxcl.github.com/homebrew/">Homebrew</a>
|
||||
or <a href="http://www.macports.org">MacPorts</a> can easily provide it.
|
||||
</p>
|
||||
|
||||
<a name="DOS"></a>
|
||||
<h2 class="chapter">2 DOS<span class="pull-right"><a class="anchor hidden-xs" href="#DOS" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-DOS" aria-hidden="true">TOC</a></span></h2>
|
||||
|
||||
<p>Using a cross-compiler is preferred for various reasons.
|
||||
<a href="http://www.delorie.com/howto/djgpp/linux-x-djgpp.html">http://www.delorie.com/howto/djgpp/linux-x-djgpp.html</a>
|
||||
</p>
|
||||
|
||||
<a name="OS_002f2"></a>
|
||||
<h2 class="chapter">3 OS/2<span class="pull-right"><a class="anchor hidden-xs" href="#OS_002f2" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-OS_002f2" aria-hidden="true">TOC</a></span></h2>
|
||||
|
||||
<p>For information about compiling FFmpeg on OS/2 see
|
||||
<a href="http://www.edm2.com/index.php/FFmpeg">http://www.edm2.com/index.php/FFmpeg</a>.
|
||||
</p>
|
||||
|
||||
<a name="Windows"></a>
|
||||
<h2 class="chapter">4 Windows<span class="pull-right"><a class="anchor hidden-xs" href="#Windows" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Windows" aria-hidden="true">TOC</a></span></h2>
|
||||
|
||||
<p>To get help and instructions for building FFmpeg under Windows, check out
|
||||
the FFmpeg Windows Help Forum at <a href="http://ffmpeg.zeranoe.com/forum/">http://ffmpeg.zeranoe.com/forum/</a>.
|
||||
</p>
|
||||
<a name="Native-Windows-compilation-using-MinGW-or-MinGW_002dw64"></a>
|
||||
<h3 class="section">4.1 Native Windows compilation using MinGW or MinGW-w64<span class="pull-right"><a class="anchor hidden-xs" href="#Native-Windows-compilation-using-MinGW-or-MinGW_002dw64" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Native-Windows-compilation-using-MinGW-or-MinGW_002dw64" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>FFmpeg can be built to run natively on Windows using the MinGW or MinGW-w64
|
||||
toolchains. Install the latest versions of MSYS and MinGW or MinGW-w64 from
|
||||
<a href="http://www.mingw.org/">http://www.mingw.org/</a> or <a href="http://mingw-w64.sourceforge.net/">http://mingw-w64.sourceforge.net/</a>.
|
||||
You can find detailed installation instructions in the download section and
|
||||
the FAQ.
|
||||
</p>
|
||||
<p>Notes:
|
||||
</p>
|
||||
<ul>
|
||||
<li> Building natively using MSYS can be sped up by disabling implicit rules
|
||||
in the Makefile by calling <code>make -r</code> instead of plain <code>make</code>. This
|
||||
speed up is close to non-existent for normal one-off builds and is only
|
||||
noticeable when running make for a second time (for example during
|
||||
<code>make install</code>).
|
||||
|
||||
</li><li> In order to compile FFplay, you must have the MinGW development library
|
||||
of <a href="http://www.libsdl.org/">SDL</a> and <code>pkg-config</code> installed.
|
||||
|
||||
</li><li> By using <code>./configure --enable-shared</code> when configuring FFmpeg,
|
||||
you can build the FFmpeg libraries (e.g. libavutil, libavcodec,
|
||||
libavformat) as DLLs.
|
||||
|
||||
</li></ul>
|
||||
|
||||
<a name="Microsoft-Visual-C_002b_002b-or-Intel-C_002b_002b-Compiler-for-Windows"></a>
|
||||
<h3 class="section">4.2 Microsoft Visual C++ or Intel C++ Compiler for Windows<span class="pull-right"><a class="anchor hidden-xs" href="#Microsoft-Visual-C_002b_002b-or-Intel-C_002b_002b-Compiler-for-Windows" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Microsoft-Visual-C_002b_002b-or-Intel-C_002b_002b-Compiler-for-Windows" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>FFmpeg can be built with MSVC 2012 or earlier using a C99-to-C89 conversion utility
|
||||
and wrapper, or with MSVC 2013 and ICL natively.
|
||||
</p>
|
||||
<p>You will need the following prerequisites:
|
||||
</p>
|
||||
<ul>
|
||||
<li> <a href="https://github.com/libav/c99-to-c89/">C99-to-C89 Converter & Wrapper</a>
|
||||
(if using MSVC 2012 or earlier)
|
||||
</li><li> <a href="http://code.google.com/p/msinttypes/">msinttypes</a>
|
||||
(if using MSVC 2012 or earlier)
|
||||
</li><li> <a href="http://www.mingw.org/">MSYS</a>
|
||||
</li><li> <a href="http://yasm.tortall.net/">YASM</a>
|
||||
</li><li> <a href="http://gnuwin32.sourceforge.net/packages/bc.htm">bc for Windows</a> if
|
||||
you want to run <a href="fate.html">FATE</a>.
|
||||
</li></ul>
|
||||
|
||||
<p>To set up a proper environment in MSYS, you need to run <code>msys.bat</code> from
|
||||
the Visual Studio or Intel Compiler command prompt.
|
||||
</p>
|
||||
<p>Place <code>yasm.exe</code> somewhere in your <code>PATH</code>. If using MSVC 2012 or
|
||||
earlier, place <code>c99wrap.exe</code> and <code>c99conv.exe</code> somewhere in your
|
||||
<code>PATH</code> as well.
|
||||
</p>
|
||||
<p>Next, make sure any other headers and libs you want to use, such as zlib, are
|
||||
located in a spot that the compiler can see. Do so by modifying the <code>LIB</code>
|
||||
and <code>INCLUDE</code> environment variables to include the <strong>Windows-style</strong>
|
||||
paths to these directories. Alternatively, you can try and use the
|
||||
<code>--extra-cflags</code>/<code>--extra-ldflags</code> configure options. If using MSVC
|
||||
2012 or earlier, place <code>inttypes.h</code> somewhere the compiler can see too.
|
||||
</p>
|
||||
<p>Finally, run:
|
||||
</p>
|
||||
<div class="example">
|
||||
<pre class="example">For MSVC:
|
||||
./configure --toolchain=msvc
|
||||
|
||||
For ICL:
|
||||
./configure --toolchain=icl
|
||||
|
||||
make
|
||||
make install
|
||||
</pre></div>
|
||||
|
||||
<p>If you wish to compile shared libraries, add <code>--enable-shared</code> to your
|
||||
configure options. Note that due to the way MSVC and ICL handle DLL imports and
|
||||
exports, you cannot compile static and shared libraries at the same time, and
|
||||
enabling shared libraries will automatically disable the static ones.
|
||||
</p>
|
||||
<p>Notes:
|
||||
</p>
|
||||
<ul>
|
||||
<li> It is possible that coreutils’ <code>link.exe</code> conflicts with MSVC’s linker.
|
||||
You can find out by running <code>which link</code> to see which <code>link.exe</code> you
|
||||
are using. If it is located at <code>/bin/link.exe</code>, then you have the wrong one
|
||||
in your <code>PATH</code>. Either move or remove that copy, or make sure MSVC’s
|
||||
<code>link.exe</code> takes precedence in your <code>PATH</code> over coreutils’.
|
||||
|
||||
</li><li> If you wish to build with zlib support, you will have to grab a compatible
|
||||
zlib binary from somewhere, with an MSVC import lib, or if you wish to link
|
||||
statically, you can follow the instructions below to build a compatible
|
||||
<code>zlib.lib</code> with MSVC. Regardless of which method you use, you must still
|
||||
follow step 3, or compilation will fail.
|
||||
<ol>
|
||||
<li> Grab the <a href="http://zlib.net/">zlib sources</a>.
|
||||
</li><li> Edit <code>win32/Makefile.msc</code> so that it uses -MT instead of -MD, since
|
||||
this is how FFmpeg is built as well.
|
||||
</li><li> Edit <code>zconf.h</code> and remove its inclusion of <code>unistd.h</code>. This gets
|
||||
erroneously included when building FFmpeg.
|
||||
</li><li> Run <code>nmake -f win32/Makefile.msc</code>.
|
||||
</li><li> Move <code>zlib.lib</code>, <code>zconf.h</code>, and <code>zlib.h</code> to somewhere MSVC
|
||||
can see.
|
||||
</li></ol>
|
||||
|
||||
</li><li> FFmpeg has been tested with the following on i686 and x86_64:
|
||||
<ul>
|
||||
<li> Visual Studio 2010 Pro and Express
|
||||
</li><li> Visual Studio 2012 Pro and Express
|
||||
</li><li> Visual Studio 2013 Pro and Express
|
||||
</li><li> Intel Composer XE 2013
|
||||
</li><li> Intel Composer XE 2013 SP1
|
||||
</li></ul>
|
||||
<p>Anything else is not officially supported.
|
||||
</p>
|
||||
</li></ul>
|
||||
|
||||
<a name="Linking-to-FFmpeg-with-Microsoft-Visual-C_002b_002b"></a>
|
||||
<h4 class="subsection">4.2.1 Linking to FFmpeg with Microsoft Visual C++<span class="pull-right"><a class="anchor hidden-xs" href="#Linking-to-FFmpeg-with-Microsoft-Visual-C_002b_002b" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Linking-to-FFmpeg-with-Microsoft-Visual-C_002b_002b" aria-hidden="true">TOC</a></span></h4>
|
||||
|
||||
<p>If you plan to link with MSVC-built static libraries, you will need
|
||||
to make sure you have <code>Runtime Library</code> set to
|
||||
<code>Multi-threaded (/MT)</code> in your project’s settings.
|
||||
</p>
|
||||
<p>You will need to define <code>inline</code> to something MSVC understands:
|
||||
</p><div class="example">
|
||||
<pre class="example">#define inline __inline
|
||||
</pre></div>
|
||||
|
||||
<p>Also note, that as stated in <strong>Microsoft Visual C++</strong>, you will need
|
||||
an MSVC-compatible <a href="http://code.google.com/p/msinttypes/">inttypes.h</a>.
|
||||
</p>
|
||||
<p>If you plan on using import libraries created by dlltool, you must
|
||||
set <code>References</code> to <code>No (/OPT:NOREF)</code> under the linker optimization
|
||||
settings, otherwise the resulting binaries will fail during runtime.
|
||||
This is not required when using import libraries generated by <code>lib.exe</code>.
|
||||
This issue is reported upstream at
|
||||
<a href="http://sourceware.org/bugzilla/show_bug.cgi?id=12633">http://sourceware.org/bugzilla/show_bug.cgi?id=12633</a>.
|
||||
</p>
|
||||
<p>To create import libraries that work with the <code>/OPT:REF</code> option
|
||||
(which is enabled by default in Release mode), follow these steps:
|
||||
</p>
|
||||
<ol>
|
||||
<li> Open the <em>Visual Studio Command Prompt</em>.
|
||||
|
||||
<p>Alternatively, in a normal command line prompt, call <samp>vcvars32.bat</samp>
|
||||
which sets up the environment variables for the Visual C++ tools
|
||||
(the standard location for this file is something like
|
||||
<samp>C:\Program Files (x86_\Microsoft Visual Studio 10.0\VC\bin\vcvars32.bat</samp>).
|
||||
</p>
|
||||
</li><li> Enter the <samp>bin</samp> directory where the created LIB and DLL files
|
||||
are stored.
|
||||
|
||||
</li><li> Generate new import libraries with <code>lib.exe</code>:
|
||||
|
||||
<div class="example">
|
||||
<pre class="example">lib /machine:i386 /def:..\lib\foo-version.def /out:foo.lib
|
||||
</pre></div>
|
||||
|
||||
<p>Replace <code>foo-version</code> and <code>foo</code> with the respective library names.
|
||||
</p>
|
||||
</li></ol>
|
||||
|
||||
<a name="Cross-compilation-for-Windows-with-Linux"></a><a name="Cross-compilation-for-Windows-with-Linux-1"></a>
|
||||
<h3 class="section">4.3 Cross compilation for Windows with Linux<span class="pull-right"><a class="anchor hidden-xs" href="#Cross-compilation-for-Windows-with-Linux-1" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Cross-compilation-for-Windows-with-Linux-1" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>You must use the MinGW cross compilation tools available at
|
||||
<a href="http://www.mingw.org/">http://www.mingw.org/</a>.
|
||||
</p>
|
||||
<p>Then configure FFmpeg with the following options:
|
||||
</p><div class="example">
|
||||
<pre class="example">./configure --target-os=mingw32 --cross-prefix=i386-mingw32msvc-
|
||||
</pre></div>
|
||||
<p>(you can change the cross-prefix according to the prefix chosen for the
|
||||
MinGW tools).
|
||||
</p>
|
||||
<p>Then you can easily test FFmpeg with <a href="http://www.winehq.com/">Wine</a>.
|
||||
</p>
|
||||
<a name="Compilation-under-Cygwin"></a>
|
||||
<h3 class="section">4.4 Compilation under Cygwin<span class="pull-right"><a class="anchor hidden-xs" href="#Compilation-under-Cygwin" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Compilation-under-Cygwin" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>Please use Cygwin 1.7.x as the obsolete 1.5.x Cygwin versions lack
|
||||
llrint() in its C library.
|
||||
</p>
|
||||
<p>Install your Cygwin with all the "Base" packages, plus the
|
||||
following "Devel" ones:
|
||||
</p><div class="example">
|
||||
<pre class="example">binutils, gcc4-core, make, git, mingw-runtime, texinfo
|
||||
</pre></div>
|
||||
|
||||
<p>In order to run FATE you will also need the following "Utils" packages:
|
||||
</p><div class="example">
|
||||
<pre class="example">bc, diffutils
|
||||
</pre></div>
|
||||
|
||||
<p>If you want to build FFmpeg with additional libraries, download Cygwin
|
||||
"Devel" packages for Ogg and Vorbis from any Cygwin packages repository:
|
||||
</p><div class="example">
|
||||
<pre class="example">libogg-devel, libvorbis-devel
|
||||
</pre></div>
|
||||
|
||||
<p>These library packages are only available from
|
||||
<a href="http://sourceware.org/cygwinports/">Cygwin Ports</a>:
|
||||
</p>
|
||||
<div class="example">
|
||||
<pre class="example">yasm, libSDL-devel, libfaac-devel, libaacplus-devel, libgsm-devel, libmp3lame-devel,
|
||||
libschroedinger1.0-devel, speex-devel, libtheora-devel, libxvidcore-devel
|
||||
</pre></div>
|
||||
|
||||
<p>The recommendation for x264 is to build it from source, as it evolves too
|
||||
quickly for Cygwin Ports to be up to date.
|
||||
</p>
|
||||
<a name="Crosscompilation-for-Windows-under-Cygwin"></a>
|
||||
<h3 class="section">4.5 Crosscompilation for Windows under Cygwin<span class="pull-right"><a class="anchor hidden-xs" href="#Crosscompilation-for-Windows-under-Cygwin" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Crosscompilation-for-Windows-under-Cygwin" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>With Cygwin you can create Windows binaries that do not need the cygwin1.dll.
|
||||
</p>
|
||||
<p>Just install your Cygwin as explained before, plus these additional
|
||||
"Devel" packages:
|
||||
</p><div class="example">
|
||||
<pre class="example">gcc-mingw-core, mingw-runtime, mingw-zlib
|
||||
</pre></div>
|
||||
|
||||
<p>and add some special flags to your configure invocation.
|
||||
</p>
|
||||
<p>For a static build run
|
||||
</p><div class="example">
|
||||
<pre class="example">./configure --target-os=mingw32 --extra-cflags=-mno-cygwin --extra-libs=-mno-cygwin
|
||||
</pre></div>
|
||||
|
||||
<p>and for a build with shared libraries
|
||||
</p><div class="example">
|
||||
<pre class="example">./configure --target-os=mingw32 --enable-shared --disable-static --extra-cflags=-mno-cygwin --extra-libs=-mno-cygwin
|
||||
</pre></div>
|
||||
|
||||
<a name="Plan-9"></a>
|
||||
<h2 class="chapter">5 Plan 9<span class="pull-right"><a class="anchor hidden-xs" href="#Plan-9" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Plan-9" aria-hidden="true">TOC</a></span></h2>
|
||||
|
||||
<p>The native <a href="http://plan9.bell-labs.com/plan9/">Plan 9</a> compiler
|
||||
does not implement all the C99 features needed by FFmpeg so the gcc
|
||||
port must be used. Furthermore, a few items missing from the C
|
||||
library and shell environment need to be fixed.
|
||||
</p>
|
||||
<ul>
|
||||
<li> GNU awk, grep, make, and sed
|
||||
|
||||
<p>Working packages of these tools can be found at
|
||||
<a href="http://code.google.com/p/ports2plan9/downloads/list">ports2plan9</a>.
|
||||
They can be installed with <a href="http://9front.org/">9front’s</a> <code>pkg</code>
|
||||
utility by setting <code>pkgpath</code> to
|
||||
<code>http://ports2plan9.googlecode.com/files/</code>.
|
||||
</p>
|
||||
</li><li> Missing/broken <code>head</code> and <code>printf</code> commands
|
||||
|
||||
<p>Replacements adequate for building FFmpeg can be found in the
|
||||
<code>compat/plan9</code> directory. Place these somewhere they will be
|
||||
found by the shell. These are not full implementations of the
|
||||
commands and are <em>not</em> suitable for general use.
|
||||
</p>
|
||||
</li><li> Missing C99 <code>stdint.h</code> and <code>inttypes.h</code>
|
||||
|
||||
<p>Replacement headers are available from
|
||||
<a href="http://code.google.com/p/plan9front/issues/detail?id=152">http://code.google.com/p/plan9front/issues/detail?id=152</a>.
|
||||
</p>
|
||||
</li><li> Missing or non-standard library functions
|
||||
|
||||
<p>Some functions in the C library are missing or incomplete. The
|
||||
<code><a href="http://ports2plan9.googlecode.com/files/gcc-apelibs-1207.tbz">gcc-apelibs-1207</a></code> package from
|
||||
<a href="http://code.google.com/p/ports2plan9/downloads/list">ports2plan9</a>
|
||||
includes an updated C library, but installing the full package gives
|
||||
unusable executables. Instead, keep the files from <code>gccbin.tgz</code>
|
||||
under <code>/386/lib/gnu</code>. From the <code>libc.a</code> archive in the
|
||||
<code>gcc-apelibs-1207</code> package, extract the following object files and
|
||||
turn them into a library:
|
||||
</p>
|
||||
<ul>
|
||||
<li> <code>strerror.o</code>
|
||||
</li><li> <code>strtoll.o</code>
|
||||
</li><li> <code>snprintf.o</code>
|
||||
</li><li> <code>vsnprintf.o</code>
|
||||
</li><li> <code>vfprintf.o</code>
|
||||
</li><li> <code>_IO_getc.o</code>
|
||||
</li><li> <code>_IO_putc.o</code>
|
||||
</li></ul>
|
||||
|
||||
<p>Use the <code>--extra-libs</code> option of <code>configure</code> to inform the
|
||||
build system of this library.
|
||||
</p>
|
||||
</li><li> FPU exceptions enabled by default
|
||||
|
||||
<p>Unlike most other systems, Plan 9 enables FPU exceptions by default.
|
||||
These must be disabled before calling any FFmpeg functions. While the
|
||||
included tools will do this automatically, other users of the
|
||||
libraries must do it themselves.
|
||||
</p>
|
||||
</li></ul>
|
||||
|
||||
|
||||
<p style="font-size: small;">
|
||||
This document was generated on <em>January 14, 2015</em> using <a href="http://www.gnu.org/software/texinfo/"><em>makeinfo</em></a>.
|
||||
</p>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
509
Externals/ffmpeg/dev/include/libavdevice/avdevice.h
vendored
509
Externals/ffmpeg/dev/include/libavdevice/avdevice.h
vendored
@ -1,509 +0,0 @@
|
||||
/*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#ifndef AVDEVICE_AVDEVICE_H
|
||||
#define AVDEVICE_AVDEVICE_H
|
||||
|
||||
#include "version.h"
|
||||
|
||||
/**
|
||||
* @file
|
||||
* @ingroup lavd
|
||||
* Main libavdevice API header
|
||||
*/
|
||||
|
||||
/**
|
||||
* @defgroup lavd Special devices muxing/demuxing library
|
||||
* @{
|
||||
* Libavdevice is a complementary library to @ref libavf "libavformat". It
|
||||
* provides various "special" platform-specific muxers and demuxers, e.g. for
|
||||
* grabbing devices, audio capture and playback etc. As a consequence, the
|
||||
* (de)muxers in libavdevice are of the AVFMT_NOFILE type (they use their own
|
||||
* I/O functions). The filename passed to avformat_open_input() often does not
|
||||
* refer to an actually existing file, but has some special device-specific
|
||||
* meaning - e.g. for x11grab it is the display name.
|
||||
*
|
||||
* To use libavdevice, simply call avdevice_register_all() to register all
|
||||
* compiled muxers and demuxers. They all use standard libavformat API.
|
||||
* @}
|
||||
*/
|
||||
|
||||
#include "libavutil/log.h"
|
||||
#include "libavutil/opt.h"
|
||||
#include "libavutil/dict.h"
|
||||
#include "libavformat/avformat.h"
|
||||
|
||||
/**
|
||||
* Return the LIBAVDEVICE_VERSION_INT constant.
|
||||
*/
|
||||
unsigned avdevice_version(void);
|
||||
|
||||
/**
|
||||
* Return the libavdevice build-time configuration.
|
||||
*/
|
||||
const char *avdevice_configuration(void);
|
||||
|
||||
/**
|
||||
* Return the libavdevice license.
|
||||
*/
|
||||
const char *avdevice_license(void);
|
||||
|
||||
/**
|
||||
* Initialize libavdevice and register all the input and output devices.
|
||||
* @warning This function is not thread safe.
|
||||
*/
|
||||
void avdevice_register_all(void);
|
||||
|
||||
/**
|
||||
* Audio input devices iterator.
|
||||
*
|
||||
* If d is NULL, returns the first registered input audio/video device,
|
||||
* if d is non-NULL, returns the next registered input audio/video device after d
|
||||
* or NULL if d is the last one.
|
||||
*/
|
||||
AVInputFormat *av_input_audio_device_next(AVInputFormat *d);
|
||||
|
||||
/**
|
||||
* Video input devices iterator.
|
||||
*
|
||||
* If d is NULL, returns the first registered input audio/video device,
|
||||
* if d is non-NULL, returns the next registered input audio/video device after d
|
||||
* or NULL if d is the last one.
|
||||
*/
|
||||
AVInputFormat *av_input_video_device_next(AVInputFormat *d);
|
||||
|
||||
/**
|
||||
* Audio output devices iterator.
|
||||
*
|
||||
* If d is NULL, returns the first registered output audio/video device,
|
||||
* if d is non-NULL, returns the next registered output audio/video device after d
|
||||
* or NULL if d is the last one.
|
||||
*/
|
||||
AVOutputFormat *av_output_audio_device_next(AVOutputFormat *d);
|
||||
|
||||
/**
|
||||
* Video output devices iterator.
|
||||
*
|
||||
* If d is NULL, returns the first registered output audio/video device,
|
||||
* if d is non-NULL, returns the next registered output audio/video device after d
|
||||
* or NULL if d is the last one.
|
||||
*/
|
||||
AVOutputFormat *av_output_video_device_next(AVOutputFormat *d);
|
||||
|
||||
typedef struct AVDeviceRect {
|
||||
int x; /**< x coordinate of top left corner */
|
||||
int y; /**< y coordinate of top left corner */
|
||||
int width; /**< width */
|
||||
int height; /**< height */
|
||||
} AVDeviceRect;
|
||||
|
||||
/**
|
||||
* Message types used by avdevice_app_to_dev_control_message().
|
||||
*/
|
||||
enum AVAppToDevMessageType {
|
||||
/**
|
||||
* Dummy message.
|
||||
*/
|
||||
AV_APP_TO_DEV_NONE = MKBETAG('N','O','N','E'),
|
||||
|
||||
/**
|
||||
* Window size change message.
|
||||
*
|
||||
* Message is sent to the device every time the application changes the size
|
||||
* of the window device renders to.
|
||||
* Message should also be sent right after window is created.
|
||||
*
|
||||
* data: AVDeviceRect: new window size.
|
||||
*/
|
||||
AV_APP_TO_DEV_WINDOW_SIZE = MKBETAG('G','E','O','M'),
|
||||
|
||||
/**
|
||||
* Repaint request message.
|
||||
*
|
||||
* Message is sent to the device when window has to be repainted.
|
||||
*
|
||||
* data: AVDeviceRect: area required to be repainted.
|
||||
* NULL: whole area is required to be repainted.
|
||||
*/
|
||||
AV_APP_TO_DEV_WINDOW_REPAINT = MKBETAG('R','E','P','A'),
|
||||
|
||||
/**
|
||||
* Request pause/play.
|
||||
*
|
||||
* Application requests pause/unpause playback.
|
||||
* Mostly usable with devices that have internal buffer.
|
||||
* By default devices are not paused.
|
||||
*
|
||||
* data: NULL
|
||||
*/
|
||||
AV_APP_TO_DEV_PAUSE = MKBETAG('P', 'A', 'U', ' '),
|
||||
AV_APP_TO_DEV_PLAY = MKBETAG('P', 'L', 'A', 'Y'),
|
||||
AV_APP_TO_DEV_TOGGLE_PAUSE = MKBETAG('P', 'A', 'U', 'T'),
|
||||
|
||||
/**
|
||||
* Volume control message.
|
||||
*
|
||||
* Set volume level. It may be device-dependent if volume
|
||||
* is changed per stream or system wide. Per stream volume
|
||||
* change is expected when possible.
|
||||
*
|
||||
* data: double: new volume with range of 0.0 - 1.0.
|
||||
*/
|
||||
AV_APP_TO_DEV_SET_VOLUME = MKBETAG('S', 'V', 'O', 'L'),
|
||||
|
||||
/**
|
||||
* Mute control messages.
|
||||
*
|
||||
* Change mute state. It may be device-dependent if mute status
|
||||
* is changed per stream or system wide. Per stream mute status
|
||||
* change is expected when possible.
|
||||
*
|
||||
* data: NULL.
|
||||
*/
|
||||
AV_APP_TO_DEV_MUTE = MKBETAG(' ', 'M', 'U', 'T'),
|
||||
AV_APP_TO_DEV_UNMUTE = MKBETAG('U', 'M', 'U', 'T'),
|
||||
AV_APP_TO_DEV_TOGGLE_MUTE = MKBETAG('T', 'M', 'U', 'T'),
|
||||
|
||||
/**
|
||||
* Get volume/mute messages.
|
||||
*
|
||||
* Force the device to send AV_DEV_TO_APP_VOLUME_LEVEL_CHANGED or
|
||||
* AV_DEV_TO_APP_MUTE_STATE_CHANGED command respectively.
|
||||
*
|
||||
* data: NULL.
|
||||
*/
|
||||
AV_APP_TO_DEV_GET_VOLUME = MKBETAG('G', 'V', 'O', 'L'),
|
||||
AV_APP_TO_DEV_GET_MUTE = MKBETAG('G', 'M', 'U', 'T'),
|
||||
};
|
||||
|
||||
/**
|
||||
* Message types used by avdevice_dev_to_app_control_message().
|
||||
*/
|
||||
enum AVDevToAppMessageType {
|
||||
/**
|
||||
* Dummy message.
|
||||
*/
|
||||
AV_DEV_TO_APP_NONE = MKBETAG('N','O','N','E'),
|
||||
|
||||
/**
|
||||
* Create window buffer message.
|
||||
*
|
||||
* Device requests to create a window buffer. Exact meaning is device-
|
||||
* and application-dependent. Message is sent before rendering first
|
||||
* frame and all one-shot initializations should be done here.
|
||||
* Application is allowed to ignore preferred window buffer size.
|
||||
*
|
||||
* @note: Application is obligated to inform about window buffer size
|
||||
* with AV_APP_TO_DEV_WINDOW_SIZE message.
|
||||
*
|
||||
* data: AVDeviceRect: preferred size of the window buffer.
|
||||
* NULL: no preferred size of the window buffer.
|
||||
*/
|
||||
AV_DEV_TO_APP_CREATE_WINDOW_BUFFER = MKBETAG('B','C','R','E'),
|
||||
|
||||
/**
|
||||
* Prepare window buffer message.
|
||||
*
|
||||
* Device requests to prepare a window buffer for rendering.
|
||||
* Exact meaning is device- and application-dependent.
|
||||
* Message is sent before rendering of each frame.
|
||||
*
|
||||
* data: NULL.
|
||||
*/
|
||||
AV_DEV_TO_APP_PREPARE_WINDOW_BUFFER = MKBETAG('B','P','R','E'),
|
||||
|
||||
/**
|
||||
* Display window buffer message.
|
||||
*
|
||||
* Device requests to display a window buffer.
|
||||
* Message is sent when new frame is ready to be displayed.
|
||||
* Usually buffers need to be swapped in handler of this message.
|
||||
*
|
||||
* data: NULL.
|
||||
*/
|
||||
AV_DEV_TO_APP_DISPLAY_WINDOW_BUFFER = MKBETAG('B','D','I','S'),
|
||||
|
||||
/**
|
||||
* Destroy window buffer message.
|
||||
*
|
||||
* Device requests to destroy a window buffer.
|
||||
* Message is sent when device is about to be destroyed and window
|
||||
* buffer is not required anymore.
|
||||
*
|
||||
* data: NULL.
|
||||
*/
|
||||
AV_DEV_TO_APP_DESTROY_WINDOW_BUFFER = MKBETAG('B','D','E','S'),
|
||||
|
||||
/**
|
||||
* Buffer fullness status messages.
|
||||
*
|
||||
* Device signals buffer overflow/underflow.
|
||||
*
|
||||
* data: NULL.
|
||||
*/
|
||||
AV_DEV_TO_APP_BUFFER_OVERFLOW = MKBETAG('B','O','F','L'),
|
||||
AV_DEV_TO_APP_BUFFER_UNDERFLOW = MKBETAG('B','U','F','L'),
|
||||
|
||||
/**
|
||||
* Buffer readable/writable.
|
||||
*
|
||||
* Device informs that buffer is readable/writable.
|
||||
* When possible, device informs how many bytes can be read/write.
|
||||
*
|
||||
* @warning Device may not inform when number of bytes than can be read/write changes.
|
||||
*
|
||||
* data: int64_t: amount of bytes available to read/write.
|
||||
* NULL: amount of bytes available to read/write is not known.
|
||||
*/
|
||||
AV_DEV_TO_APP_BUFFER_READABLE = MKBETAG('B','R','D',' '),
|
||||
AV_DEV_TO_APP_BUFFER_WRITABLE = MKBETAG('B','W','R',' '),
|
||||
|
||||
/**
|
||||
* Mute state change message.
|
||||
*
|
||||
* Device informs that mute state has changed.
|
||||
*
|
||||
* data: int: 0 for not muted state, non-zero for muted state.
|
||||
*/
|
||||
AV_DEV_TO_APP_MUTE_STATE_CHANGED = MKBETAG('C','M','U','T'),
|
||||
|
||||
/**
|
||||
* Volume level change message.
|
||||
*
|
||||
* Device informs that volume level has changed.
|
||||
*
|
||||
* data: double: new volume with range of 0.0 - 1.0.
|
||||
*/
|
||||
AV_DEV_TO_APP_VOLUME_LEVEL_CHANGED = MKBETAG('C','V','O','L'),
|
||||
};
|
||||
|
||||
/**
|
||||
* Send control message from application to device.
|
||||
*
|
||||
* @param s device context.
|
||||
* @param type message type.
|
||||
* @param data message data. Exact type depends on message type.
|
||||
* @param data_size size of message data.
|
||||
* @return >= 0 on success, negative on error.
|
||||
* AVERROR(ENOSYS) when device doesn't implement handler of the message.
|
||||
*/
|
||||
int avdevice_app_to_dev_control_message(struct AVFormatContext *s,
|
||||
enum AVAppToDevMessageType type,
|
||||
void *data, size_t data_size);
|
||||
|
||||
/**
|
||||
* Send control message from device to application.
|
||||
*
|
||||
* @param s device context.
|
||||
* @param type message type.
|
||||
* @param data message data. Can be NULL.
|
||||
* @param data_size size of message data.
|
||||
* @return >= 0 on success, negative on error.
|
||||
* AVERROR(ENOSYS) when application doesn't implement handler of the message.
|
||||
*/
|
||||
int avdevice_dev_to_app_control_message(struct AVFormatContext *s,
|
||||
enum AVDevToAppMessageType type,
|
||||
void *data, size_t data_size);
|
||||
|
||||
/**
|
||||
* Following API allows user to probe device capabilities (supported codecs,
|
||||
* pixel formats, sample formats, resolutions, channel counts, etc).
|
||||
* It is build on top op AVOption API.
|
||||
* Queried capabilities allows to set up converters of video or audio
|
||||
* parameters that fit to the device.
|
||||
*
|
||||
* List of capabilities that can be queried:
|
||||
* - Capabilities valid for both audio and video devices:
|
||||
* - codec: supported audio/video codecs.
|
||||
* type: AV_OPT_TYPE_INT (AVCodecID value)
|
||||
* - Capabilities valid for audio devices:
|
||||
* - sample_format: supported sample formats.
|
||||
* type: AV_OPT_TYPE_INT (AVSampleFormat value)
|
||||
* - sample_rate: supported sample rates.
|
||||
* type: AV_OPT_TYPE_INT
|
||||
* - channels: supported number of channels.
|
||||
* type: AV_OPT_TYPE_INT
|
||||
* - channel_layout: supported channel layouts.
|
||||
* type: AV_OPT_TYPE_INT64
|
||||
* - Capabilities valid for video devices:
|
||||
* - pixel_format: supported pixel formats.
|
||||
* type: AV_OPT_TYPE_INT (AVPixelFormat value)
|
||||
* - window_size: supported window sizes (describes size of the window size presented to the user).
|
||||
* type: AV_OPT_TYPE_IMAGE_SIZE
|
||||
* - frame_size: supported frame sizes (describes size of provided video frames).
|
||||
* type: AV_OPT_TYPE_IMAGE_SIZE
|
||||
* - fps: supported fps values
|
||||
* type: AV_OPT_TYPE_RATIONAL
|
||||
*
|
||||
* Value of the capability may be set by user using av_opt_set() function
|
||||
* and AVDeviceCapabilitiesQuery object. Following queries will
|
||||
* limit results to the values matching already set capabilities.
|
||||
* For example, setting a codec may impact number of formats or fps values
|
||||
* returned during next query. Setting invalid value may limit results to zero.
|
||||
*
|
||||
* Example of the usage basing on opengl output device:
|
||||
*
|
||||
* @code
|
||||
* AVFormatContext *oc = NULL;
|
||||
* AVDeviceCapabilitiesQuery *caps = NULL;
|
||||
* AVOptionRanges *ranges;
|
||||
* int ret;
|
||||
*
|
||||
* if ((ret = avformat_alloc_output_context2(&oc, NULL, "opengl", NULL)) < 0)
|
||||
* goto fail;
|
||||
* if (avdevice_capabilities_create(&caps, oc, NULL) < 0)
|
||||
* goto fail;
|
||||
*
|
||||
* //query codecs
|
||||
* if (av_opt_query_ranges(&ranges, caps, "codec", AV_OPT_MULTI_COMPONENT_RANGE)) < 0)
|
||||
* goto fail;
|
||||
* //pick codec here and set it
|
||||
* av_opt_set(caps, "codec", AV_CODEC_ID_RAWVIDEO, 0);
|
||||
*
|
||||
* //query format
|
||||
* if (av_opt_query_ranges(&ranges, caps, "pixel_format", AV_OPT_MULTI_COMPONENT_RANGE)) < 0)
|
||||
* goto fail;
|
||||
* //pick format here and set it
|
||||
* av_opt_set(caps, "pixel_format", AV_PIX_FMT_YUV420P, 0);
|
||||
*
|
||||
* //query and set more capabilities
|
||||
*
|
||||
* fail:
|
||||
* //clean up code
|
||||
* avdevice_capabilities_free(&query, oc);
|
||||
* avformat_free_context(oc);
|
||||
* @endcode
|
||||
*/
|
||||
|
||||
/**
|
||||
* Structure describes device capabilities.
|
||||
*
|
||||
* It is used by devices in conjunction with av_device_capabilities AVOption table
|
||||
* to implement capabilities probing API based on AVOption API. Should not be used directly.
|
||||
*/
|
||||
typedef struct AVDeviceCapabilitiesQuery {
|
||||
const AVClass *av_class;
|
||||
AVFormatContext *device_context;
|
||||
enum AVCodecID codec;
|
||||
enum AVSampleFormat sample_format;
|
||||
enum AVPixelFormat pixel_format;
|
||||
int sample_rate;
|
||||
int channels;
|
||||
int64_t channel_layout;
|
||||
int window_width;
|
||||
int window_height;
|
||||
int frame_width;
|
||||
int frame_height;
|
||||
AVRational fps;
|
||||
} AVDeviceCapabilitiesQuery;
|
||||
|
||||
/**
|
||||
* AVOption table used by devices to implement device capabilities API. Should not be used by a user.
|
||||
*/
|
||||
extern const AVOption av_device_capabilities[];
|
||||
|
||||
/**
|
||||
* Initialize capabilities probing API based on AVOption API.
|
||||
*
|
||||
* avdevice_capabilities_free() must be called when query capabilities API is
|
||||
* not used anymore.
|
||||
*
|
||||
* @param[out] caps Device capabilities data. Pointer to a NULL pointer must be passed.
|
||||
* @param s Context of the device.
|
||||
* @param device_options An AVDictionary filled with device-private options.
|
||||
* On return this parameter will be destroyed and replaced with a dict
|
||||
* containing options that were not found. May be NULL.
|
||||
* The same options must be passed later to avformat_write_header() for output
|
||||
* devices or avformat_open_input() for input devices, or at any other place
|
||||
* that affects device-private options.
|
||||
*
|
||||
* @return >= 0 on success, negative otherwise.
|
||||
*/
|
||||
int avdevice_capabilities_create(AVDeviceCapabilitiesQuery **caps, AVFormatContext *s,
|
||||
AVDictionary **device_options);
|
||||
|
||||
/**
|
||||
* Free resources created by avdevice_capabilities_create()
|
||||
*
|
||||
* @param caps Device capabilities data to be freed.
|
||||
* @param s Context of the device.
|
||||
*/
|
||||
void avdevice_capabilities_free(AVDeviceCapabilitiesQuery **caps, AVFormatContext *s);
|
||||
|
||||
/**
|
||||
* Structure describes basic parameters of the device.
|
||||
*/
|
||||
typedef struct AVDeviceInfo {
|
||||
char *device_name; /**< device name, format depends on device */
|
||||
char *device_description; /**< human friendly name */
|
||||
} AVDeviceInfo;
|
||||
|
||||
/**
|
||||
* List of devices.
|
||||
*/
|
||||
typedef struct AVDeviceInfoList {
|
||||
AVDeviceInfo **devices; /**< list of autodetected devices */
|
||||
int nb_devices; /**< number of autodetected devices */
|
||||
int default_device; /**< index of default device or -1 if no default */
|
||||
} AVDeviceInfoList;
|
||||
|
||||
/**
|
||||
* List devices.
|
||||
*
|
||||
* Returns available device names and their parameters.
|
||||
*
|
||||
* @note: Some devices may accept system-dependent device names that cannot be
|
||||
* autodetected. The list returned by this function cannot be assumed to
|
||||
* be always completed.
|
||||
*
|
||||
* @param s device context.
|
||||
* @param[out] device_list list of autodetected devices.
|
||||
* @return count of autodetected devices, negative on error.
|
||||
*/
|
||||
int avdevice_list_devices(struct AVFormatContext *s, AVDeviceInfoList **device_list);
|
||||
|
||||
/**
|
||||
* Convenient function to free result of avdevice_list_devices().
|
||||
*
|
||||
* @param devices device list to be freed.
|
||||
*/
|
||||
void avdevice_free_list_devices(AVDeviceInfoList **device_list);
|
||||
|
||||
/**
|
||||
* List devices.
|
||||
*
|
||||
* Returns available device names and their parameters.
|
||||
* These are convinient wrappers for avdevice_list_devices().
|
||||
* Device context is allocated and deallocated internally.
|
||||
*
|
||||
* @param device device format. May be NULL if device name is set.
|
||||
* @param device_name device name. May be NULL if device format is set.
|
||||
* @param device_options An AVDictionary filled with device-private options. May be NULL.
|
||||
* The same options must be passed later to avformat_write_header() for output
|
||||
* devices or avformat_open_input() for input devices, or at any other place
|
||||
* that affects device-private options.
|
||||
* @param[out] device_list list of autodetected devices
|
||||
* @return count of autodetected devices, negative on error.
|
||||
* @note device argument takes precedence over device_name when both are set.
|
||||
*/
|
||||
int avdevice_list_input_sources(struct AVInputFormat *device, const char *device_name,
|
||||
AVDictionary *device_options, AVDeviceInfoList **device_list);
|
||||
int avdevice_list_output_sinks(struct AVOutputFormat *device, const char *device_name,
|
||||
AVDictionary *device_options, AVDeviceInfoList **device_list);
|
||||
|
||||
#endif /* AVDEVICE_AVDEVICE_H */
|
@ -1,50 +0,0 @@
|
||||
/*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#ifndef AVDEVICE_VERSION_H
|
||||
#define AVDEVICE_VERSION_H
|
||||
|
||||
/**
|
||||
* @file
|
||||
* @ingroup lavd
|
||||
* Libavdevice version macros
|
||||
*/
|
||||
|
||||
#include "libavutil/version.h"
|
||||
|
||||
#define LIBAVDEVICE_VERSION_MAJOR 56
|
||||
#define LIBAVDEVICE_VERSION_MINOR 4
|
||||
#define LIBAVDEVICE_VERSION_MICRO 100
|
||||
|
||||
#define LIBAVDEVICE_VERSION_INT AV_VERSION_INT(LIBAVDEVICE_VERSION_MAJOR, \
|
||||
LIBAVDEVICE_VERSION_MINOR, \
|
||||
LIBAVDEVICE_VERSION_MICRO)
|
||||
#define LIBAVDEVICE_VERSION AV_VERSION(LIBAVDEVICE_VERSION_MAJOR, \
|
||||
LIBAVDEVICE_VERSION_MINOR, \
|
||||
LIBAVDEVICE_VERSION_MICRO)
|
||||
#define LIBAVDEVICE_BUILD LIBAVDEVICE_VERSION_INT
|
||||
|
||||
#define LIBAVDEVICE_IDENT "Lavd" AV_STRINGIFY(LIBAVDEVICE_VERSION)
|
||||
|
||||
/**
|
||||
* FF_API_* defines may be placed below to indicate public API that will be
|
||||
* dropped at a future version bump. The defines themselves are not part of
|
||||
* the public API and may change, break or disappear at any time.
|
||||
*/
|
||||
|
||||
#endif /* AVDEVICE_VERSION_H */
|
@ -1,91 +0,0 @@
|
||||
/*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#ifndef AVFILTER_ASRC_ABUFFER_H
|
||||
#define AVFILTER_ASRC_ABUFFER_H
|
||||
|
||||
#include "avfilter.h"
|
||||
|
||||
/**
|
||||
* @file
|
||||
* memory buffer source for audio
|
||||
*
|
||||
* @deprecated use buffersrc.h instead.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Queue an audio buffer to the audio buffer source.
|
||||
*
|
||||
* @param abuffersrc audio source buffer context
|
||||
* @param data pointers to the samples planes
|
||||
* @param linesize linesizes of each audio buffer plane
|
||||
* @param nb_samples number of samples per channel
|
||||
* @param sample_fmt sample format of the audio data
|
||||
* @param ch_layout channel layout of the audio data
|
||||
* @param planar flag to indicate if audio data is planar or packed
|
||||
* @param pts presentation timestamp of the audio buffer
|
||||
* @param flags unused
|
||||
*
|
||||
* @deprecated use av_buffersrc_add_ref() instead.
|
||||
*/
|
||||
attribute_deprecated
|
||||
int av_asrc_buffer_add_samples(AVFilterContext *abuffersrc,
|
||||
uint8_t *data[8], int linesize[8],
|
||||
int nb_samples, int sample_rate,
|
||||
int sample_fmt, int64_t ch_layout, int planar,
|
||||
int64_t pts, int av_unused flags);
|
||||
|
||||
/**
|
||||
* Queue an audio buffer to the audio buffer source.
|
||||
*
|
||||
* This is similar to av_asrc_buffer_add_samples(), but the samples
|
||||
* are stored in a buffer with known size.
|
||||
*
|
||||
* @param abuffersrc audio source buffer context
|
||||
* @param buf pointer to the samples data, packed is assumed
|
||||
* @param size the size in bytes of the buffer, it must contain an
|
||||
* integer number of samples
|
||||
* @param sample_fmt sample format of the audio data
|
||||
* @param ch_layout channel layout of the audio data
|
||||
* @param pts presentation timestamp of the audio buffer
|
||||
* @param flags unused
|
||||
*
|
||||
* @deprecated use av_buffersrc_add_ref() instead.
|
||||
*/
|
||||
attribute_deprecated
|
||||
int av_asrc_buffer_add_buffer(AVFilterContext *abuffersrc,
|
||||
uint8_t *buf, int buf_size,
|
||||
int sample_rate,
|
||||
int sample_fmt, int64_t ch_layout, int planar,
|
||||
int64_t pts, int av_unused flags);
|
||||
|
||||
/**
|
||||
* Queue an audio buffer to the audio buffer source.
|
||||
*
|
||||
* @param abuffersrc audio source buffer context
|
||||
* @param samplesref buffer ref to queue
|
||||
* @param flags unused
|
||||
*
|
||||
* @deprecated use av_buffersrc_add_ref() instead.
|
||||
*/
|
||||
attribute_deprecated
|
||||
int av_asrc_buffer_add_audio_buffer_ref(AVFilterContext *abuffersrc,
|
||||
AVFilterBufferRef *samplesref,
|
||||
int av_unused flags);
|
||||
|
||||
#endif /* AVFILTER_ASRC_ABUFFER_H */
|
@ -1,69 +0,0 @@
|
||||
/*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#ifndef AVFILTER_AVCODEC_H
|
||||
#define AVFILTER_AVCODEC_H
|
||||
|
||||
/**
|
||||
* @file
|
||||
* libavcodec/libavfilter gluing utilities
|
||||
*
|
||||
* This should be included in an application ONLY if the installed
|
||||
* libavfilter has been compiled with libavcodec support, otherwise
|
||||
* symbols defined below will not be available.
|
||||
*/
|
||||
|
||||
#include "avfilter.h"
|
||||
|
||||
#if FF_API_AVFILTERBUFFER
|
||||
/**
|
||||
* Create and return a picref reference from the data and properties
|
||||
* contained in frame.
|
||||
*
|
||||
* @param perms permissions to assign to the new buffer reference
|
||||
* @deprecated avfilter APIs work natively with AVFrame instead.
|
||||
*/
|
||||
attribute_deprecated
|
||||
AVFilterBufferRef *avfilter_get_video_buffer_ref_from_frame(const AVFrame *frame, int perms);
|
||||
|
||||
|
||||
/**
|
||||
* Create and return a picref reference from the data and properties
|
||||
* contained in frame.
|
||||
*
|
||||
* @param perms permissions to assign to the new buffer reference
|
||||
* @deprecated avfilter APIs work natively with AVFrame instead.
|
||||
*/
|
||||
attribute_deprecated
|
||||
AVFilterBufferRef *avfilter_get_audio_buffer_ref_from_frame(const AVFrame *frame,
|
||||
int perms);
|
||||
|
||||
/**
|
||||
* Create and return a buffer reference from the data and properties
|
||||
* contained in frame.
|
||||
*
|
||||
* @param perms permissions to assign to the new buffer reference
|
||||
* @deprecated avfilter APIs work natively with AVFrame instead.
|
||||
*/
|
||||
attribute_deprecated
|
||||
AVFilterBufferRef *avfilter_get_buffer_ref_from_frame(enum AVMediaType type,
|
||||
const AVFrame *frame,
|
||||
int perms);
|
||||
#endif
|
||||
|
||||
#endif /* AVFILTER_AVCODEC_H */
|
1531
Externals/ffmpeg/dev/include/libavfilter/avfilter.h
vendored
1531
Externals/ffmpeg/dev/include/libavfilter/avfilter.h
vendored
File diff suppressed because it is too large
Load Diff
@ -1,28 +0,0 @@
|
||||
/*
|
||||
* Filter graphs
|
||||
* copyright (c) 2007 Bobby Bingham
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#ifndef AVFILTER_AVFILTERGRAPH_H
|
||||
#define AVFILTER_AVFILTERGRAPH_H
|
||||
|
||||
#include "avfilter.h"
|
||||
#include "libavutil/log.h"
|
||||
|
||||
#endif /* AVFILTER_AVFILTERGRAPH_H */
|
@ -1,204 +0,0 @@
|
||||
/*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#ifndef AVFILTER_BUFFERSINK_H
|
||||
#define AVFILTER_BUFFERSINK_H
|
||||
|
||||
/**
|
||||
* @file
|
||||
* @ingroup lavfi_buffersink
|
||||
* memory buffer sink API for audio and video
|
||||
*/
|
||||
|
||||
#include "avfilter.h"
|
||||
|
||||
/**
|
||||
* @defgroup lavfi_buffersink Buffer sink API
|
||||
* @ingroup lavfi
|
||||
* @{
|
||||
*/
|
||||
|
||||
#if FF_API_AVFILTERBUFFER
|
||||
/**
|
||||
* Get an audio/video buffer data from buffer_sink and put it in bufref.
|
||||
*
|
||||
* This function works with both audio and video buffer sinks.
|
||||
*
|
||||
* @param buffer_sink pointer to a buffersink or abuffersink context
|
||||
* @param flags a combination of AV_BUFFERSINK_FLAG_* flags
|
||||
* @return >= 0 in case of success, a negative AVERROR code in case of
|
||||
* failure
|
||||
*/
|
||||
attribute_deprecated
|
||||
int av_buffersink_get_buffer_ref(AVFilterContext *buffer_sink,
|
||||
AVFilterBufferRef **bufref, int flags);
|
||||
|
||||
/**
|
||||
* Get the number of immediately available frames.
|
||||
*/
|
||||
attribute_deprecated
|
||||
int av_buffersink_poll_frame(AVFilterContext *ctx);
|
||||
|
||||
/**
|
||||
* Get a buffer with filtered data from sink and put it in buf.
|
||||
*
|
||||
* @param ctx pointer to a context of a buffersink or abuffersink AVFilter.
|
||||
* @param buf pointer to the buffer will be written here if buf is non-NULL. buf
|
||||
* must be freed by the caller using avfilter_unref_buffer().
|
||||
* Buf may also be NULL to query whether a buffer is ready to be
|
||||
* output.
|
||||
*
|
||||
* @return >= 0 in case of success, a negative AVERROR code in case of
|
||||
* failure.
|
||||
*/
|
||||
attribute_deprecated
|
||||
int av_buffersink_read(AVFilterContext *ctx, AVFilterBufferRef **buf);
|
||||
|
||||
/**
|
||||
* Same as av_buffersink_read, but with the ability to specify the number of
|
||||
* samples read. This function is less efficient than av_buffersink_read(),
|
||||
* because it copies the data around.
|
||||
*
|
||||
* @param ctx pointer to a context of the abuffersink AVFilter.
|
||||
* @param buf pointer to the buffer will be written here if buf is non-NULL. buf
|
||||
* must be freed by the caller using avfilter_unref_buffer(). buf
|
||||
* will contain exactly nb_samples audio samples, except at the end
|
||||
* of stream, when it can contain less than nb_samples.
|
||||
* Buf may also be NULL to query whether a buffer is ready to be
|
||||
* output.
|
||||
*
|
||||
* @warning do not mix this function with av_buffersink_read(). Use only one or
|
||||
* the other with a single sink, not both.
|
||||
*/
|
||||
attribute_deprecated
|
||||
int av_buffersink_read_samples(AVFilterContext *ctx, AVFilterBufferRef **buf,
|
||||
int nb_samples);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Get a frame with filtered data from sink and put it in frame.
|
||||
*
|
||||
* @param ctx pointer to a buffersink or abuffersink filter context.
|
||||
* @param frame pointer to an allocated frame that will be filled with data.
|
||||
* The data must be freed using av_frame_unref() / av_frame_free()
|
||||
* @param flags a combination of AV_BUFFERSINK_FLAG_* flags
|
||||
*
|
||||
* @return >= 0 in for success, a negative AVERROR code for failure.
|
||||
*/
|
||||
int av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags);
|
||||
|
||||
/**
|
||||
* Tell av_buffersink_get_buffer_ref() to read video/samples buffer
|
||||
* reference, but not remove it from the buffer. This is useful if you
|
||||
* need only to read a video/samples buffer, without to fetch it.
|
||||
*/
|
||||
#define AV_BUFFERSINK_FLAG_PEEK 1
|
||||
|
||||
/**
|
||||
* Tell av_buffersink_get_buffer_ref() not to request a frame from its input.
|
||||
* If a frame is already buffered, it is read (and removed from the buffer),
|
||||
* but if no frame is present, return AVERROR(EAGAIN).
|
||||
*/
|
||||
#define AV_BUFFERSINK_FLAG_NO_REQUEST 2
|
||||
|
||||
/**
|
||||
* Struct to use for initializing a buffersink context.
|
||||
*/
|
||||
typedef struct {
|
||||
const enum AVPixelFormat *pixel_fmts; ///< list of allowed pixel formats, terminated by AV_PIX_FMT_NONE
|
||||
} AVBufferSinkParams;
|
||||
|
||||
/**
|
||||
* Create an AVBufferSinkParams structure.
|
||||
*
|
||||
* Must be freed with av_free().
|
||||
*/
|
||||
AVBufferSinkParams *av_buffersink_params_alloc(void);
|
||||
|
||||
/**
|
||||
* Struct to use for initializing an abuffersink context.
|
||||
*/
|
||||
typedef struct {
|
||||
const enum AVSampleFormat *sample_fmts; ///< list of allowed sample formats, terminated by AV_SAMPLE_FMT_NONE
|
||||
const int64_t *channel_layouts; ///< list of allowed channel layouts, terminated by -1
|
||||
const int *channel_counts; ///< list of allowed channel counts, terminated by -1
|
||||
int all_channel_counts; ///< if not 0, accept any channel count or layout
|
||||
int *sample_rates; ///< list of allowed sample rates, terminated by -1
|
||||
} AVABufferSinkParams;
|
||||
|
||||
/**
|
||||
* Create an AVABufferSinkParams structure.
|
||||
*
|
||||
* Must be freed with av_free().
|
||||
*/
|
||||
AVABufferSinkParams *av_abuffersink_params_alloc(void);
|
||||
|
||||
/**
|
||||
* Set the frame size for an audio buffer sink.
|
||||
*
|
||||
* All calls to av_buffersink_get_buffer_ref will return a buffer with
|
||||
* exactly the specified number of samples, or AVERROR(EAGAIN) if there is
|
||||
* not enough. The last buffer at EOF will be padded with 0.
|
||||
*/
|
||||
void av_buffersink_set_frame_size(AVFilterContext *ctx, unsigned frame_size);
|
||||
|
||||
/**
|
||||
* Get the frame rate of the input.
|
||||
*/
|
||||
AVRational av_buffersink_get_frame_rate(AVFilterContext *ctx);
|
||||
|
||||
/**
|
||||
* Get a frame with filtered data from sink and put it in frame.
|
||||
*
|
||||
* @param ctx pointer to a context of a buffersink or abuffersink AVFilter.
|
||||
* @param frame pointer to an allocated frame that will be filled with data.
|
||||
* The data must be freed using av_frame_unref() / av_frame_free()
|
||||
*
|
||||
* @return
|
||||
* - >= 0 if a frame was successfully returned.
|
||||
* - AVERROR(EAGAIN) if no frames are available at this point; more
|
||||
* input frames must be added to the filtergraph to get more output.
|
||||
* - AVERROR_EOF if there will be no more output frames on this sink.
|
||||
* - A different negative AVERROR code in other failure cases.
|
||||
*/
|
||||
int av_buffersink_get_frame(AVFilterContext *ctx, AVFrame *frame);
|
||||
|
||||
/**
|
||||
* Same as av_buffersink_get_frame(), but with the ability to specify the number
|
||||
* of samples read. This function is less efficient than
|
||||
* av_buffersink_get_frame(), because it copies the data around.
|
||||
*
|
||||
* @param ctx pointer to a context of the abuffersink AVFilter.
|
||||
* @param frame pointer to an allocated frame that will be filled with data.
|
||||
* The data must be freed using av_frame_unref() / av_frame_free()
|
||||
* frame will contain exactly nb_samples audio samples, except at
|
||||
* the end of stream, when it can contain less than nb_samples.
|
||||
*
|
||||
* @return The return codes have the same meaning as for
|
||||
* av_buffersink_get_samples().
|
||||
*
|
||||
* @warning do not mix this function with av_buffersink_get_frame(). Use only one or
|
||||
* the other with a single sink, not both.
|
||||
*/
|
||||
int av_buffersink_get_samples(AVFilterContext *ctx, AVFrame *frame, int nb_samples);
|
||||
|
||||
/**
|
||||
* @}
|
||||
*/
|
||||
|
||||
#endif /* AVFILTER_BUFFERSINK_H */
|
160
Externals/ffmpeg/dev/include/libavfilter/buffersrc.h
vendored
160
Externals/ffmpeg/dev/include/libavfilter/buffersrc.h
vendored
@ -1,160 +0,0 @@
|
||||
/*
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#ifndef AVFILTER_BUFFERSRC_H
|
||||
#define AVFILTER_BUFFERSRC_H
|
||||
|
||||
/**
|
||||
* @file
|
||||
* @ingroup lavfi_buffersrc
|
||||
* Memory buffer source API.
|
||||
*/
|
||||
|
||||
#include "libavcodec/avcodec.h"
|
||||
#include "avfilter.h"
|
||||
|
||||
/**
|
||||
* @defgroup lavfi_buffersrc Buffer source API
|
||||
* @ingroup lavfi
|
||||
* @{
|
||||
*/
|
||||
|
||||
enum {
|
||||
|
||||
/**
|
||||
* Do not check for format changes.
|
||||
*/
|
||||
AV_BUFFERSRC_FLAG_NO_CHECK_FORMAT = 1,
|
||||
|
||||
#if FF_API_AVFILTERBUFFER
|
||||
/**
|
||||
* Ignored
|
||||
*/
|
||||
AV_BUFFERSRC_FLAG_NO_COPY = 2,
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Immediately push the frame to the output.
|
||||
*/
|
||||
AV_BUFFERSRC_FLAG_PUSH = 4,
|
||||
|
||||
/**
|
||||
* Keep a reference to the frame.
|
||||
* If the frame if reference-counted, create a new reference; otherwise
|
||||
* copy the frame data.
|
||||
*/
|
||||
AV_BUFFERSRC_FLAG_KEEP_REF = 8,
|
||||
|
||||
};
|
||||
|
||||
/**
|
||||
* Add buffer data in picref to buffer_src.
|
||||
*
|
||||
* @param buffer_src pointer to a buffer source context
|
||||
* @param picref a buffer reference, or NULL to mark EOF
|
||||
* @param flags a combination of AV_BUFFERSRC_FLAG_*
|
||||
* @return >= 0 in case of success, a negative AVERROR code
|
||||
* in case of failure
|
||||
*/
|
||||
int av_buffersrc_add_ref(AVFilterContext *buffer_src,
|
||||
AVFilterBufferRef *picref, int flags);
|
||||
|
||||
/**
|
||||
* Get the number of failed requests.
|
||||
*
|
||||
* A failed request is when the request_frame method is called while no
|
||||
* frame is present in the buffer.
|
||||
* The number is reset when a frame is added.
|
||||
*/
|
||||
unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src);
|
||||
|
||||
#if FF_API_AVFILTERBUFFER
|
||||
/**
|
||||
* Add a buffer to a filtergraph.
|
||||
*
|
||||
* @param ctx an instance of the buffersrc filter
|
||||
* @param buf buffer containing frame data to be passed down the filtergraph.
|
||||
* This function will take ownership of buf, the user must not free it.
|
||||
* A NULL buf signals EOF -- i.e. no more frames will be sent to this filter.
|
||||
*
|
||||
* @deprecated use av_buffersrc_write_frame() or av_buffersrc_add_frame()
|
||||
*/
|
||||
attribute_deprecated
|
||||
int av_buffersrc_buffer(AVFilterContext *ctx, AVFilterBufferRef *buf);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Add a frame to the buffer source.
|
||||
*
|
||||
* @param ctx an instance of the buffersrc filter
|
||||
* @param frame frame to be added. If the frame is reference counted, this
|
||||
* function will make a new reference to it. Otherwise the frame data will be
|
||||
* copied.
|
||||
*
|
||||
* @return 0 on success, a negative AVERROR on error
|
||||
*
|
||||
* This function is equivalent to av_buffersrc_add_frame_flags() with the
|
||||
* AV_BUFFERSRC_FLAG_KEEP_REF flag.
|
||||
*/
|
||||
int av_buffersrc_write_frame(AVFilterContext *ctx, const AVFrame *frame);
|
||||
|
||||
/**
|
||||
* Add a frame to the buffer source.
|
||||
*
|
||||
* @param ctx an instance of the buffersrc filter
|
||||
* @param frame frame to be added. If the frame is reference counted, this
|
||||
* function will take ownership of the reference(s) and reset the frame.
|
||||
* Otherwise the frame data will be copied. If this function returns an error,
|
||||
* the input frame is not touched.
|
||||
*
|
||||
* @return 0 on success, a negative AVERROR on error.
|
||||
*
|
||||
* @note the difference between this function and av_buffersrc_write_frame() is
|
||||
* that av_buffersrc_write_frame() creates a new reference to the input frame,
|
||||
* while this function takes ownership of the reference passed to it.
|
||||
*
|
||||
* This function is equivalent to av_buffersrc_add_frame_flags() without the
|
||||
* AV_BUFFERSRC_FLAG_KEEP_REF flag.
|
||||
*/
|
||||
int av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame);
|
||||
|
||||
/**
|
||||
* Add a frame to the buffer source.
|
||||
*
|
||||
* By default, if the frame is reference-counted, this function will take
|
||||
* ownership of the reference(s) and reset the frame. This can be controlled
|
||||
* using the flags.
|
||||
*
|
||||
* If this function returns an error, the input frame is not touched.
|
||||
*
|
||||
* @param buffer_src pointer to a buffer source context
|
||||
* @param frame a frame, or NULL to mark EOF
|
||||
* @param flags a combination of AV_BUFFERSRC_FLAG_*
|
||||
* @return >= 0 in case of success, a negative AVERROR code
|
||||
* in case of failure
|
||||
*/
|
||||
int av_buffersrc_add_frame_flags(AVFilterContext *buffer_src,
|
||||
AVFrame *frame, int flags);
|
||||
|
||||
|
||||
/**
|
||||
* @}
|
||||
*/
|
||||
|
||||
#endif /* AVFILTER_BUFFERSRC_H */
|
@ -1,80 +0,0 @@
|
||||
/*
|
||||
* Version macros.
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#ifndef AVFILTER_VERSION_H
|
||||
#define AVFILTER_VERSION_H
|
||||
|
||||
/**
|
||||
* @file
|
||||
* @ingroup lavfi
|
||||
* Libavfilter version macros
|
||||
*/
|
||||
|
||||
#include "libavutil/version.h"
|
||||
|
||||
#define LIBAVFILTER_VERSION_MAJOR 5
|
||||
#define LIBAVFILTER_VERSION_MINOR 7
|
||||
#define LIBAVFILTER_VERSION_MICRO 101
|
||||
|
||||
#define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \
|
||||
LIBAVFILTER_VERSION_MINOR, \
|
||||
LIBAVFILTER_VERSION_MICRO)
|
||||
#define LIBAVFILTER_VERSION AV_VERSION(LIBAVFILTER_VERSION_MAJOR, \
|
||||
LIBAVFILTER_VERSION_MINOR, \
|
||||
LIBAVFILTER_VERSION_MICRO)
|
||||
#define LIBAVFILTER_BUILD LIBAVFILTER_VERSION_INT
|
||||
|
||||
#define LIBAVFILTER_IDENT "Lavfi" AV_STRINGIFY(LIBAVFILTER_VERSION)
|
||||
|
||||
/**
|
||||
* FF_API_* defines may be placed below to indicate public API that will be
|
||||
* dropped at a future version bump. The defines themselves are not part of
|
||||
* the public API and may change, break or disappear at any time.
|
||||
*/
|
||||
|
||||
#ifndef FF_API_AVFILTERPAD_PUBLIC
|
||||
#define FF_API_AVFILTERPAD_PUBLIC (LIBAVFILTER_VERSION_MAJOR < 6)
|
||||
#endif
|
||||
#ifndef FF_API_FOO_COUNT
|
||||
#define FF_API_FOO_COUNT (LIBAVFILTER_VERSION_MAJOR < 6)
|
||||
#endif
|
||||
#ifndef FF_API_AVFILTERBUFFER
|
||||
#define FF_API_AVFILTERBUFFER (LIBAVFILTER_VERSION_MAJOR < 6)
|
||||
#endif
|
||||
#ifndef FF_API_OLD_FILTER_OPTS
|
||||
#define FF_API_OLD_FILTER_OPTS (LIBAVFILTER_VERSION_MAJOR < 6)
|
||||
#endif
|
||||
#ifndef FF_API_AVFILTER_OPEN
|
||||
#define FF_API_AVFILTER_OPEN (LIBAVFILTER_VERSION_MAJOR < 6)
|
||||
#endif
|
||||
#ifndef FF_API_AVFILTER_INIT_FILTER
|
||||
#define FF_API_AVFILTER_INIT_FILTER (LIBAVFILTER_VERSION_MAJOR < 6)
|
||||
#endif
|
||||
#ifndef FF_API_OLD_FILTER_REGISTER
|
||||
#define FF_API_OLD_FILTER_REGISTER (LIBAVFILTER_VERSION_MAJOR < 6)
|
||||
#endif
|
||||
#ifndef FF_API_OLD_GRAPH_PARSE
|
||||
#define FF_API_OLD_GRAPH_PARSE (LIBAVFILTER_VERSION_MAJOR < 5)
|
||||
#endif
|
||||
#ifndef FF_API_NOCONST_GET_NAME
|
||||
#define FF_API_NOCONST_GET_NAME (LIBAVFILTER_VERSION_MAJOR < 6)
|
||||
#endif
|
||||
|
||||
#endif /* AVFILTER_VERSION_H */
|
@ -1,107 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2001-2003 Michael Niedermayer (michaelni@gmx.at)
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#ifndef POSTPROC_POSTPROCESS_H
|
||||
#define POSTPROC_POSTPROCESS_H
|
||||
|
||||
/**
|
||||
* @file
|
||||
* @ingroup lpp
|
||||
* external API header
|
||||
*/
|
||||
|
||||
/**
|
||||
* @defgroup lpp Libpostproc
|
||||
* @{
|
||||
*/
|
||||
|
||||
#include "libpostproc/version.h"
|
||||
|
||||
/**
|
||||
* Return the LIBPOSTPROC_VERSION_INT constant.
|
||||
*/
|
||||
unsigned postproc_version(void);
|
||||
|
||||
/**
|
||||
* Return the libpostproc build-time configuration.
|
||||
*/
|
||||
const char *postproc_configuration(void);
|
||||
|
||||
/**
|
||||
* Return the libpostproc license.
|
||||
*/
|
||||
const char *postproc_license(void);
|
||||
|
||||
#define PP_QUALITY_MAX 6
|
||||
|
||||
#define QP_STORE_T int8_t
|
||||
|
||||
#include <inttypes.h>
|
||||
|
||||
typedef void pp_context;
|
||||
typedef void pp_mode;
|
||||
|
||||
#if LIBPOSTPROC_VERSION_INT < (52<<16)
|
||||
typedef pp_context pp_context_t;
|
||||
typedef pp_mode pp_mode_t;
|
||||
extern const char *const pp_help; ///< a simple help text
|
||||
#else
|
||||
extern const char pp_help[]; ///< a simple help text
|
||||
#endif
|
||||
|
||||
void pp_postprocess(const uint8_t * src[3], const int srcStride[3],
|
||||
uint8_t * dst[3], const int dstStride[3],
|
||||
int horizontalSize, int verticalSize,
|
||||
const QP_STORE_T *QP_store, int QP_stride,
|
||||
pp_mode *mode, pp_context *ppContext, int pict_type);
|
||||
|
||||
|
||||
/**
|
||||
* Return a pp_mode or NULL if an error occurred.
|
||||
*
|
||||
* @param name the string after "-pp" on the command line
|
||||
* @param quality a number from 0 to PP_QUALITY_MAX
|
||||
*/
|
||||
pp_mode *pp_get_mode_by_name_and_quality(const char *name, int quality);
|
||||
void pp_free_mode(pp_mode *mode);
|
||||
|
||||
pp_context *pp_get_context(int width, int height, int flags);
|
||||
void pp_free_context(pp_context *ppContext);
|
||||
|
||||
#define PP_CPU_CAPS_MMX 0x80000000
|
||||
#define PP_CPU_CAPS_MMX2 0x20000000
|
||||
#define PP_CPU_CAPS_3DNOW 0x40000000
|
||||
#define PP_CPU_CAPS_ALTIVEC 0x10000000
|
||||
#define PP_CPU_CAPS_AUTO 0x00080000
|
||||
|
||||
#define PP_FORMAT 0x00000008
|
||||
#define PP_FORMAT_420 (0x00000011|PP_FORMAT)
|
||||
#define PP_FORMAT_422 (0x00000001|PP_FORMAT)
|
||||
#define PP_FORMAT_411 (0x00000002|PP_FORMAT)
|
||||
#define PP_FORMAT_444 (0x00000000|PP_FORMAT)
|
||||
#define PP_FORMAT_440 (0x00000010|PP_FORMAT)
|
||||
|
||||
#define PP_PICT_TYPE_QP2 0x00000010 ///< MPEG2 style QScale
|
||||
|
||||
/**
|
||||
* @}
|
||||
*/
|
||||
|
||||
#endif /* POSTPROC_POSTPROCESS_H */
|
@ -1,45 +0,0 @@
|
||||
/*
|
||||
* Version macros.
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#ifndef POSTPROC_POSTPROCESS_VERSION_H
|
||||
#define POSTPROC_POSTPROCESS_VERSION_H
|
||||
|
||||
/**
|
||||
* @file
|
||||
* Libpostproc version macros
|
||||
*/
|
||||
|
||||
#include "libavutil/avutil.h"
|
||||
|
||||
#define LIBPOSTPROC_VERSION_MAJOR 53
|
||||
#define LIBPOSTPROC_VERSION_MINOR 3
|
||||
#define LIBPOSTPROC_VERSION_MICRO 100
|
||||
|
||||
#define LIBPOSTPROC_VERSION_INT AV_VERSION_INT(LIBPOSTPROC_VERSION_MAJOR, \
|
||||
LIBPOSTPROC_VERSION_MINOR, \
|
||||
LIBPOSTPROC_VERSION_MICRO)
|
||||
#define LIBPOSTPROC_VERSION AV_VERSION(LIBPOSTPROC_VERSION_MAJOR, \
|
||||
LIBPOSTPROC_VERSION_MINOR, \
|
||||
LIBPOSTPROC_VERSION_MICRO)
|
||||
#define LIBPOSTPROC_BUILD LIBPOSTPROC_VERSION_INT
|
||||
|
||||
#define LIBPOSTPROC_IDENT "postproc" AV_STRINGIFY(LIBPOSTPROC_VERSION)
|
||||
|
||||
#endif /* POSTPROC_POSTPROCESS_VERSION_H */
|
@ -1,534 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2011-2013 Michael Niedermayer (michaelni@gmx.at)
|
||||
*
|
||||
* This file is part of libswresample
|
||||
*
|
||||
* libswresample is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* libswresample is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with libswresample; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#ifndef SWRESAMPLE_SWRESAMPLE_H
|
||||
#define SWRESAMPLE_SWRESAMPLE_H
|
||||
|
||||
/**
|
||||
* @file
|
||||
* @ingroup lswr
|
||||
* libswresample public header
|
||||
*/
|
||||
|
||||
/**
|
||||
* @defgroup lswr Libswresample
|
||||
* @{
|
||||
*
|
||||
* Libswresample (lswr) is a library that handles audio resampling, sample
|
||||
* format conversion and mixing.
|
||||
*
|
||||
* Interaction with lswr is done through SwrContext, which is
|
||||
* allocated with swr_alloc() or swr_alloc_set_opts(). It is opaque, so all parameters
|
||||
* must be set with the @ref avoptions API.
|
||||
*
|
||||
* The first thing you will need to do in order to use lswr is to allocate
|
||||
* SwrContext. This can be done with swr_alloc() or swr_alloc_set_opts(). If you
|
||||
* are using the former, you must set options through the @ref avoptions API.
|
||||
* The latter function provides the same feature, but it allows you to set some
|
||||
* common options in the same statement.
|
||||
*
|
||||
* For example the following code will setup conversion from planar float sample
|
||||
* format to interleaved signed 16-bit integer, downsampling from 48kHz to
|
||||
* 44.1kHz and downmixing from 5.1 channels to stereo (using the default mixing
|
||||
* matrix). This is using the swr_alloc() function.
|
||||
* @code
|
||||
* SwrContext *swr = swr_alloc();
|
||||
* av_opt_set_channel_layout(swr, "in_channel_layout", AV_CH_LAYOUT_5POINT1, 0);
|
||||
* av_opt_set_channel_layout(swr, "out_channel_layout", AV_CH_LAYOUT_STEREO, 0);
|
||||
* av_opt_set_int(swr, "in_sample_rate", 48000, 0);
|
||||
* av_opt_set_int(swr, "out_sample_rate", 44100, 0);
|
||||
* av_opt_set_sample_fmt(swr, "in_sample_fmt", AV_SAMPLE_FMT_FLTP, 0);
|
||||
* av_opt_set_sample_fmt(swr, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0);
|
||||
* @endcode
|
||||
*
|
||||
* The same job can be done using swr_alloc_set_opts() as well:
|
||||
* @code
|
||||
* SwrContext *swr = swr_alloc_set_opts(NULL, // we're allocating a new context
|
||||
* AV_CH_LAYOUT_STEREO, // out_ch_layout
|
||||
* AV_SAMPLE_FMT_S16, // out_sample_fmt
|
||||
* 44100, // out_sample_rate
|
||||
* AV_CH_LAYOUT_5POINT1, // in_ch_layout
|
||||
* AV_SAMPLE_FMT_FLTP, // in_sample_fmt
|
||||
* 48000, // in_sample_rate
|
||||
* 0, // log_offset
|
||||
* NULL); // log_ctx
|
||||
* @endcode
|
||||
*
|
||||
* Once all values have been set, it must be initialized with swr_init(). If
|
||||
* you need to change the conversion parameters, you can change the parameters
|
||||
* using @ref AVOptions, as described above in the first example; or by using
|
||||
* swr_alloc_set_opts(), but with the first argument the allocated context.
|
||||
* You must then call swr_init() again.
|
||||
*
|
||||
* The conversion itself is done by repeatedly calling swr_convert().
|
||||
* Note that the samples may get buffered in swr if you provide insufficient
|
||||
* output space or if sample rate conversion is done, which requires "future"
|
||||
* samples. Samples that do not require future input can be retrieved at any
|
||||
* time by using swr_convert() (in_count can be set to 0).
|
||||
* At the end of conversion the resampling buffer can be flushed by calling
|
||||
* swr_convert() with NULL in and 0 in_count.
|
||||
*
|
||||
* The samples used in the conversion process can be managed with the libavutil
|
||||
* @ref lavu_sampmanip "samples manipulation" API, including av_samples_alloc()
|
||||
* function used in the following example.
|
||||
*
|
||||
* The delay between input and output, can at any time be found by using
|
||||
* swr_get_delay().
|
||||
*
|
||||
* The following code demonstrates the conversion loop assuming the parameters
|
||||
* from above and caller-defined functions get_input() and handle_output():
|
||||
* @code
|
||||
* uint8_t **input;
|
||||
* int in_samples;
|
||||
*
|
||||
* while (get_input(&input, &in_samples)) {
|
||||
* uint8_t *output;
|
||||
* int out_samples = av_rescale_rnd(swr_get_delay(swr, 48000) +
|
||||
* in_samples, 44100, 48000, AV_ROUND_UP);
|
||||
* av_samples_alloc(&output, NULL, 2, out_samples,
|
||||
* AV_SAMPLE_FMT_S16, 0);
|
||||
* out_samples = swr_convert(swr, &output, out_samples,
|
||||
* input, in_samples);
|
||||
* handle_output(output, out_samples);
|
||||
* av_freep(&output);
|
||||
* }
|
||||
* @endcode
|
||||
*
|
||||
* When the conversion is finished, the conversion
|
||||
* context and everything associated with it must be freed with swr_free().
|
||||
* A swr_close() function is also available, but it exists mainly for
|
||||
* compatibility with libavresample, and is not required to be called.
|
||||
*
|
||||
* There will be no memory leak if the data is not completely flushed before
|
||||
* swr_free().
|
||||
*/
|
||||
|
||||
#include <stdint.h>
|
||||
#include "libavutil/frame.h"
|
||||
#include "libavutil/samplefmt.h"
|
||||
|
||||
#include "libswresample/version.h"
|
||||
|
||||
#if LIBSWRESAMPLE_VERSION_MAJOR < 1
|
||||
#define SWR_CH_MAX 32 ///< Maximum number of channels
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @name Option constants
|
||||
* These constants are used for the @ref avoptions interface for lswr.
|
||||
* @{
|
||||
*
|
||||
*/
|
||||
|
||||
#define SWR_FLAG_RESAMPLE 1 ///< Force resampling even if equal sample rate
|
||||
//TODO use int resample ?
|
||||
//long term TODO can we enable this dynamically?
|
||||
|
||||
/** Dithering algorithms */
|
||||
enum SwrDitherType {
|
||||
SWR_DITHER_NONE = 0,
|
||||
SWR_DITHER_RECTANGULAR,
|
||||
SWR_DITHER_TRIANGULAR,
|
||||
SWR_DITHER_TRIANGULAR_HIGHPASS,
|
||||
|
||||
SWR_DITHER_NS = 64, ///< not part of API/ABI
|
||||
SWR_DITHER_NS_LIPSHITZ,
|
||||
SWR_DITHER_NS_F_WEIGHTED,
|
||||
SWR_DITHER_NS_MODIFIED_E_WEIGHTED,
|
||||
SWR_DITHER_NS_IMPROVED_E_WEIGHTED,
|
||||
SWR_DITHER_NS_SHIBATA,
|
||||
SWR_DITHER_NS_LOW_SHIBATA,
|
||||
SWR_DITHER_NS_HIGH_SHIBATA,
|
||||
SWR_DITHER_NB, ///< not part of API/ABI
|
||||
};
|
||||
|
||||
/** Resampling Engines */
|
||||
enum SwrEngine {
|
||||
SWR_ENGINE_SWR, /**< SW Resampler */
|
||||
SWR_ENGINE_SOXR, /**< SoX Resampler */
|
||||
SWR_ENGINE_NB, ///< not part of API/ABI
|
||||
};
|
||||
|
||||
/** Resampling Filter Types */
|
||||
enum SwrFilterType {
|
||||
SWR_FILTER_TYPE_CUBIC, /**< Cubic */
|
||||
SWR_FILTER_TYPE_BLACKMAN_NUTTALL, /**< Blackman Nuttall Windowed Sinc */
|
||||
SWR_FILTER_TYPE_KAISER, /**< Kaiser Windowed Sinc */
|
||||
};
|
||||
|
||||
/**
|
||||
* @}
|
||||
*/
|
||||
|
||||
/**
|
||||
* The libswresample context. Unlike libavcodec and libavformat, this structure
|
||||
* is opaque. This means that if you would like to set options, you must use
|
||||
* the @ref avoptions API and cannot directly set values to members of the
|
||||
* structure.
|
||||
*/
|
||||
typedef struct SwrContext SwrContext;
|
||||
|
||||
/**
|
||||
* Get the AVClass for SwrContext. It can be used in combination with
|
||||
* AV_OPT_SEARCH_FAKE_OBJ for examining options.
|
||||
*
|
||||
* @see av_opt_find().
|
||||
* @return the AVClass of SwrContext
|
||||
*/
|
||||
const AVClass *swr_get_class(void);
|
||||
|
||||
/**
|
||||
* @name SwrContext constructor functions
|
||||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
* Allocate SwrContext.
|
||||
*
|
||||
* If you use this function you will need to set the parameters (manually or
|
||||
* with swr_alloc_set_opts()) before calling swr_init().
|
||||
*
|
||||
* @see swr_alloc_set_opts(), swr_init(), swr_free()
|
||||
* @return NULL on error, allocated context otherwise
|
||||
*/
|
||||
struct SwrContext *swr_alloc(void);
|
||||
|
||||
/**
|
||||
* Initialize context after user parameters have been set.
|
||||
* @note The context must be configured using the AVOption API.
|
||||
*
|
||||
* @see av_opt_set_int()
|
||||
* @see av_opt_set_dict()
|
||||
*
|
||||
* @param[in,out] s Swr context to initialize
|
||||
* @return AVERROR error code in case of failure.
|
||||
*/
|
||||
int swr_init(struct SwrContext *s);
|
||||
|
||||
/**
|
||||
* Check whether an swr context has been initialized or not.
|
||||
*
|
||||
* @param[in] s Swr context to check
|
||||
* @see swr_init()
|
||||
* @return positive if it has been initialized, 0 if not initialized
|
||||
*/
|
||||
int swr_is_initialized(struct SwrContext *s);
|
||||
|
||||
/**
|
||||
* Allocate SwrContext if needed and set/reset common parameters.
|
||||
*
|
||||
* This function does not require s to be allocated with swr_alloc(). On the
|
||||
* other hand, swr_alloc() can use swr_alloc_set_opts() to set the parameters
|
||||
* on the allocated context.
|
||||
*
|
||||
* @param s existing Swr context if available, or NULL if not
|
||||
* @param out_ch_layout output channel layout (AV_CH_LAYOUT_*)
|
||||
* @param out_sample_fmt output sample format (AV_SAMPLE_FMT_*).
|
||||
* @param out_sample_rate output sample rate (frequency in Hz)
|
||||
* @param in_ch_layout input channel layout (AV_CH_LAYOUT_*)
|
||||
* @param in_sample_fmt input sample format (AV_SAMPLE_FMT_*).
|
||||
* @param in_sample_rate input sample rate (frequency in Hz)
|
||||
* @param log_offset logging level offset
|
||||
* @param log_ctx parent logging context, can be NULL
|
||||
*
|
||||
* @see swr_init(), swr_free()
|
||||
* @return NULL on error, allocated context otherwise
|
||||
*/
|
||||
struct SwrContext *swr_alloc_set_opts(struct SwrContext *s,
|
||||
int64_t out_ch_layout, enum AVSampleFormat out_sample_fmt, int out_sample_rate,
|
||||
int64_t in_ch_layout, enum AVSampleFormat in_sample_fmt, int in_sample_rate,
|
||||
int log_offset, void *log_ctx);
|
||||
|
||||
/**
|
||||
* @}
|
||||
*
|
||||
* @name SwrContext destructor functions
|
||||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
* Free the given SwrContext and set the pointer to NULL.
|
||||
*
|
||||
* @param[in] s a pointer to a pointer to Swr context
|
||||
*/
|
||||
void swr_free(struct SwrContext **s);
|
||||
|
||||
/**
|
||||
* Closes the context so that swr_is_initialized() returns 0.
|
||||
*
|
||||
* The context can be brought back to life by running swr_init(),
|
||||
* swr_init() can also be used without swr_close().
|
||||
* This function is mainly provided for simplifying the usecase
|
||||
* where one tries to support libavresample and libswresample.
|
||||
*
|
||||
* @param[in,out] s Swr context to be closed
|
||||
*/
|
||||
void swr_close(struct SwrContext *s);
|
||||
|
||||
/**
|
||||
* @}
|
||||
*
|
||||
* @name Core conversion functions
|
||||
* @{
|
||||
*/
|
||||
|
||||
/** Convert audio.
|
||||
*
|
||||
* in and in_count can be set to 0 to flush the last few samples out at the
|
||||
* end.
|
||||
*
|
||||
* If more input is provided than output space then the input will be buffered.
|
||||
* You can avoid this buffering by providing more output space than input.
|
||||
* Conversion will run directly without copying whenever possible.
|
||||
*
|
||||
* @param s allocated Swr context, with parameters set
|
||||
* @param out output buffers, only the first one need be set in case of packed audio
|
||||
* @param out_count amount of space available for output in samples per channel
|
||||
* @param in input buffers, only the first one need to be set in case of packed audio
|
||||
* @param in_count number of input samples available in one channel
|
||||
*
|
||||
* @return number of samples output per channel, negative value on error
|
||||
*/
|
||||
int swr_convert(struct SwrContext *s, uint8_t **out, int out_count,
|
||||
const uint8_t **in , int in_count);
|
||||
|
||||
/**
|
||||
* Convert the next timestamp from input to output
|
||||
* timestamps are in 1/(in_sample_rate * out_sample_rate) units.
|
||||
*
|
||||
* @note There are 2 slightly differently behaving modes.
|
||||
* @li When automatic timestamp compensation is not used, (min_compensation >= FLT_MAX)
|
||||
* in this case timestamps will be passed through with delays compensated
|
||||
* @li When automatic timestamp compensation is used, (min_compensation < FLT_MAX)
|
||||
* in this case the output timestamps will match output sample numbers.
|
||||
* See ffmpeg-resampler(1) for the two modes of compensation.
|
||||
*
|
||||
* @param s[in] initialized Swr context
|
||||
* @param pts[in] timestamp for the next input sample, INT64_MIN if unknown
|
||||
* @see swr_set_compensation(), swr_drop_output(), and swr_inject_silence() are
|
||||
* function used internally for timestamp compensation.
|
||||
* @return the output timestamp for the next output sample
|
||||
*/
|
||||
int64_t swr_next_pts(struct SwrContext *s, int64_t pts);
|
||||
|
||||
/**
|
||||
* @}
|
||||
*
|
||||
* @name Low-level option setting functions
|
||||
* These functons provide a means to set low-level options that is not possible
|
||||
* with the AVOption API.
|
||||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
* Activate resampling compensation ("soft" compensation). This function is
|
||||
* internally called when needed in swr_next_pts().
|
||||
*
|
||||
* @param[in,out] s allocated Swr context. If it is not initialized,
|
||||
* or SWR_FLAG_RESAMPLE is not set, swr_init() is
|
||||
* called with the flag set.
|
||||
* @param[in] sample_delta delta in PTS per sample
|
||||
* @param[in] compensation_distance number of samples to compensate for
|
||||
* @return >= 0 on success, AVERROR error codes if:
|
||||
* @li @c s is NULL,
|
||||
* @li @c compensation_distance is less than 0,
|
||||
* @li @c compensation_distance is 0 but sample_delta is not,
|
||||
* @li compensation unsupported by resampler, or
|
||||
* @li swr_init() fails when called.
|
||||
*/
|
||||
int swr_set_compensation(struct SwrContext *s, int sample_delta, int compensation_distance);
|
||||
|
||||
/**
|
||||
* Set a customized input channel mapping.
|
||||
*
|
||||
* @param[in,out] s allocated Swr context, not yet initialized
|
||||
* @param[in] channel_map customized input channel mapping (array of channel
|
||||
* indexes, -1 for a muted channel)
|
||||
* @return >= 0 on success, or AVERROR error code in case of failure.
|
||||
*/
|
||||
int swr_set_channel_mapping(struct SwrContext *s, const int *channel_map);
|
||||
|
||||
/**
|
||||
* Set a customized remix matrix.
|
||||
*
|
||||
* @param s allocated Swr context, not yet initialized
|
||||
* @param matrix remix coefficients; matrix[i + stride * o] is
|
||||
* the weight of input channel i in output channel o
|
||||
* @param stride offset between lines of the matrix
|
||||
* @return >= 0 on success, or AVERROR error code in case of failure.
|
||||
*/
|
||||
int swr_set_matrix(struct SwrContext *s, const double *matrix, int stride);
|
||||
|
||||
/**
|
||||
* @}
|
||||
*
|
||||
* @name Sample handling functions
|
||||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
* Drops the specified number of output samples.
|
||||
*
|
||||
* This function, along with swr_inject_silence(), is called by swr_next_pts()
|
||||
* if needed for "hard" compensation.
|
||||
*
|
||||
* @param s allocated Swr context
|
||||
* @param count number of samples to be dropped
|
||||
*
|
||||
* @return >= 0 on success, or a negative AVERROR code on failure
|
||||
*/
|
||||
int swr_drop_output(struct SwrContext *s, int count);
|
||||
|
||||
/**
|
||||
* Injects the specified number of silence samples.
|
||||
*
|
||||
* This function, along with swr_drop_output(), is called by swr_next_pts()
|
||||
* if needed for "hard" compensation.
|
||||
*
|
||||
* @param s allocated Swr context
|
||||
* @param count number of samples to be dropped
|
||||
*
|
||||
* @return >= 0 on success, or a negative AVERROR code on failure
|
||||
*/
|
||||
int swr_inject_silence(struct SwrContext *s, int count);
|
||||
|
||||
/**
|
||||
* Gets the delay the next input sample will experience relative to the next output sample.
|
||||
*
|
||||
* Swresample can buffer data if more input has been provided than available
|
||||
* output space, also converting between sample rates needs a delay.
|
||||
* This function returns the sum of all such delays.
|
||||
* The exact delay is not necessarily an integer value in either input or
|
||||
* output sample rate. Especially when downsampling by a large value, the
|
||||
* output sample rate may be a poor choice to represent the delay, similarly
|
||||
* for upsampling and the input sample rate.
|
||||
*
|
||||
* @param s swr context
|
||||
* @param base timebase in which the returned delay will be:
|
||||
* @li if it's set to 1 the returned delay is in seconds
|
||||
* @li if it's set to 1000 the returned delay is in milliseconds
|
||||
* @li if it's set to the input sample rate then the returned
|
||||
* delay is in input samples
|
||||
* @li if it's set to the output sample rate then the returned
|
||||
* delay is in output samples
|
||||
* @li if it's the least common multiple of in_sample_rate and
|
||||
* out_sample_rate then an exact rounding-free delay will be
|
||||
* returned
|
||||
* @returns the delay in 1 / @c base units.
|
||||
*/
|
||||
int64_t swr_get_delay(struct SwrContext *s, int64_t base);
|
||||
|
||||
/**
|
||||
* @}
|
||||
*
|
||||
* @name Configuration accessors
|
||||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
* Return the @ref LIBSWRESAMPLE_VERSION_INT constant.
|
||||
*
|
||||
* This is useful to check if the build-time libswresample has the same version
|
||||
* as the run-time one.
|
||||
*
|
||||
* @returns the unsigned int-typed version
|
||||
*/
|
||||
unsigned swresample_version(void);
|
||||
|
||||
/**
|
||||
* Return the swr build-time configuration.
|
||||
*
|
||||
* @returns the build-time @c ./configure flags
|
||||
*/
|
||||
const char *swresample_configuration(void);
|
||||
|
||||
/**
|
||||
* Return the swr license.
|
||||
*
|
||||
* @returns the license of libswresample, determined at build-time
|
||||
*/
|
||||
const char *swresample_license(void);
|
||||
|
||||
/**
|
||||
* @}
|
||||
*
|
||||
* @name AVFrame based API
|
||||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
* Convert the samples in the input AVFrame and write them to the output AVFrame.
|
||||
*
|
||||
* Input and output AVFrames must have channel_layout, sample_rate and format set.
|
||||
*
|
||||
* If the output AVFrame does not have the data pointers allocated the nb_samples
|
||||
* field will be set using av_frame_get_buffer()
|
||||
* is called to allocate the frame.
|
||||
*
|
||||
* The output AVFrame can be NULL or have fewer allocated samples than required.
|
||||
* In this case, any remaining samples not written to the output will be added
|
||||
* to an internal FIFO buffer, to be returned at the next call to this function
|
||||
* or to swr_convert().
|
||||
*
|
||||
* If converting sample rate, there may be data remaining in the internal
|
||||
* resampling delay buffer. swr_get_delay() tells the number of
|
||||
* remaining samples. To get this data as output, call this function or
|
||||
* swr_convert() with NULL input.
|
||||
*
|
||||
* If the SwrContext configuration does not match the output and
|
||||
* input AVFrame settings the conversion does not take place and depending on
|
||||
* which AVFrame is not matching AVERROR_OUTPUT_CHANGED, AVERROR_INPUT_CHANGED
|
||||
* or the result of a bitwise-OR of them is returned.
|
||||
*
|
||||
* @see swr_delay()
|
||||
* @see swr_convert()
|
||||
* @see swr_get_delay()
|
||||
*
|
||||
* @param swr audio resample context
|
||||
* @param output output AVFrame
|
||||
* @param input input AVFrame
|
||||
* @return 0 on success, AVERROR on failure or nonmatching
|
||||
* configuration.
|
||||
*/
|
||||
int swr_convert_frame(SwrContext *swr,
|
||||
AVFrame *output, const AVFrame *input);
|
||||
|
||||
/**
|
||||
* Configure or reconfigure the SwrContext using the information
|
||||
* provided by the AVFrames.
|
||||
*
|
||||
* The original resampling context is reset even on failure.
|
||||
* The function calls swr_close() internally if the context is open.
|
||||
*
|
||||
* @see swr_close();
|
||||
*
|
||||
* @param swr audio resample context
|
||||
* @param output output AVFrame
|
||||
* @param input input AVFrame
|
||||
* @return 0 on success, AVERROR on failure.
|
||||
*/
|
||||
int swr_config_frame(SwrContext *swr, const AVFrame *out, const AVFrame *in);
|
||||
|
||||
/**
|
||||
* @}
|
||||
* @}
|
||||
*/
|
||||
|
||||
#endif /* SWRESAMPLE_SWRESAMPLE_H */
|
@ -1,45 +0,0 @@
|
||||
/*
|
||||
* Version macros.
|
||||
*
|
||||
* This file is part of libswresample
|
||||
*
|
||||
* libswresample is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* libswresample is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with libswresample; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#ifndef SWR_VERSION_H
|
||||
#define SWR_VERSION_H
|
||||
|
||||
/**
|
||||
* @file
|
||||
* Libswresample version macros
|
||||
*/
|
||||
|
||||
#include "libavutil/avutil.h"
|
||||
|
||||
#define LIBSWRESAMPLE_VERSION_MAJOR 1
|
||||
#define LIBSWRESAMPLE_VERSION_MINOR 1
|
||||
#define LIBSWRESAMPLE_VERSION_MICRO 100
|
||||
|
||||
#define LIBSWRESAMPLE_VERSION_INT AV_VERSION_INT(LIBSWRESAMPLE_VERSION_MAJOR, \
|
||||
LIBSWRESAMPLE_VERSION_MINOR, \
|
||||
LIBSWRESAMPLE_VERSION_MICRO)
|
||||
#define LIBSWRESAMPLE_VERSION AV_VERSION(LIBSWRESAMPLE_VERSION_MAJOR, \
|
||||
LIBSWRESAMPLE_VERSION_MINOR, \
|
||||
LIBSWRESAMPLE_VERSION_MICRO)
|
||||
#define LIBSWRESAMPLE_BUILD LIBSWRESAMPLE_VERSION_INT
|
||||
|
||||
#define LIBSWRESAMPLE_IDENT "SwR" AV_STRINGIFY(LIBSWRESAMPLE_VERSION)
|
||||
|
||||
#endif /* SWR_VERSION_H */
|
219
Externals/ffmpeg/dev/lib/avcodec-56.def
vendored
219
Externals/ffmpeg/dev/lib/avcodec-56.def
vendored
@ -1,219 +0,0 @@
|
||||
EXPORTS
|
||||
audio_resample
|
||||
audio_resample_close
|
||||
av_audio_convert
|
||||
av_audio_convert_alloc
|
||||
av_audio_convert_free
|
||||
av_audio_resample_init
|
||||
av_bitstream_filter_close
|
||||
av_bitstream_filter_filter
|
||||
av_bitstream_filter_init
|
||||
av_bitstream_filter_next
|
||||
av_codec_ffversion DATA
|
||||
av_codec_get_chroma_intra_matrix
|
||||
av_codec_get_codec_descriptor
|
||||
av_codec_get_lowres
|
||||
av_codec_get_max_lowres
|
||||
av_codec_get_pkt_timebase
|
||||
av_codec_get_seek_preroll
|
||||
av_codec_is_decoder
|
||||
av_codec_is_encoder
|
||||
av_codec_next
|
||||
av_codec_set_chroma_intra_matrix
|
||||
av_codec_set_codec_descriptor
|
||||
av_codec_set_lowres
|
||||
av_codec_set_pkt_timebase
|
||||
av_codec_set_seek_preroll
|
||||
av_copy_packet
|
||||
av_copy_packet_side_data
|
||||
av_dct_calc
|
||||
av_dct_end
|
||||
av_dct_init
|
||||
av_destruct_packet
|
||||
av_dup_packet
|
||||
av_dv_codec_profile
|
||||
av_dv_codec_profile2
|
||||
av_dv_frame_profile
|
||||
av_fast_padded_malloc
|
||||
av_fast_padded_mallocz
|
||||
av_fft_calc
|
||||
av_fft_end
|
||||
av_fft_init
|
||||
av_fft_permute
|
||||
av_free_packet
|
||||
av_get_audio_frame_duration
|
||||
av_get_bits_per_sample
|
||||
av_get_codec_tag_string
|
||||
av_get_exact_bits_per_sample
|
||||
av_get_pcm_codec
|
||||
av_get_profile_name
|
||||
av_grow_packet
|
||||
av_hwaccel_next
|
||||
av_imdct_calc
|
||||
av_imdct_half
|
||||
av_init_packet
|
||||
av_lockmgr_register
|
||||
av_log_ask_for_sample
|
||||
av_log_missing_feature
|
||||
av_mdct_calc
|
||||
av_mdct_end
|
||||
av_mdct_init
|
||||
av_new_packet
|
||||
av_packet_copy_props
|
||||
av_packet_free_side_data
|
||||
av_packet_from_data
|
||||
av_packet_get_side_data
|
||||
av_packet_merge_side_data
|
||||
av_packet_move_ref
|
||||
av_packet_new_side_data
|
||||
av_packet_pack_dictionary
|
||||
av_packet_ref
|
||||
av_packet_rescale_ts
|
||||
av_packet_shrink_side_data
|
||||
av_packet_split_side_data
|
||||
av_packet_unpack_dictionary
|
||||
av_packet_unref
|
||||
av_parser_change
|
||||
av_parser_close
|
||||
av_parser_init
|
||||
av_parser_next
|
||||
av_parser_parse2
|
||||
av_picture_copy
|
||||
av_picture_crop
|
||||
av_picture_pad
|
||||
av_rdft_calc
|
||||
av_rdft_end
|
||||
av_rdft_init
|
||||
av_register_bitstream_filter
|
||||
av_register_codec_parser
|
||||
av_register_hwaccel
|
||||
av_resample
|
||||
av_resample_close
|
||||
av_resample_compensate
|
||||
av_resample_init
|
||||
av_shrink_packet
|
||||
av_vorbis_parse_frame
|
||||
av_vorbis_parse_frame_flags
|
||||
av_vorbis_parse_free
|
||||
av_vorbis_parse_init
|
||||
av_vorbis_parse_reset
|
||||
av_xiphlacing
|
||||
available_bits
|
||||
avcodec_align_dimensions
|
||||
avcodec_align_dimensions2
|
||||
avcodec_alloc_context3
|
||||
avcodec_alloc_frame
|
||||
avcodec_chroma_pos_to_enum
|
||||
avcodec_close
|
||||
avcodec_configuration
|
||||
avcodec_copy_context
|
||||
avcodec_dct_alloc
|
||||
avcodec_dct_get_class
|
||||
avcodec_dct_init
|
||||
avcodec_decode_audio3
|
||||
avcodec_decode_audio4
|
||||
avcodec_decode_subtitle2
|
||||
avcodec_decode_video2
|
||||
avcodec_default_execute
|
||||
avcodec_default_execute2
|
||||
avcodec_default_get_buffer
|
||||
avcodec_default_get_buffer2
|
||||
avcodec_default_get_format
|
||||
avcodec_default_reget_buffer
|
||||
avcodec_default_release_buffer
|
||||
avcodec_descriptor_get
|
||||
avcodec_descriptor_get_by_name
|
||||
avcodec_descriptor_next
|
||||
avcodec_encode_audio
|
||||
avcodec_encode_audio2
|
||||
avcodec_encode_subtitle
|
||||
avcodec_encode_video
|
||||
avcodec_encode_video2
|
||||
avcodec_enum_to_chroma_pos
|
||||
avcodec_fill_audio_frame
|
||||
avcodec_find_best_pix_fmt2
|
||||
avcodec_find_best_pix_fmt_of_2
|
||||
avcodec_find_best_pix_fmt_of_list
|
||||
avcodec_find_decoder
|
||||
avcodec_find_decoder_by_name
|
||||
avcodec_find_encoder
|
||||
avcodec_find_encoder_by_name
|
||||
avcodec_flush_buffers
|
||||
avcodec_free_context
|
||||
avcodec_free_frame
|
||||
avcodec_get_chroma_sub_sample
|
||||
avcodec_get_class
|
||||
avcodec_get_context_defaults3
|
||||
avcodec_get_edge_width
|
||||
avcodec_get_frame_class
|
||||
avcodec_get_frame_defaults
|
||||
avcodec_get_name
|
||||
avcodec_get_pix_fmt_loss
|
||||
avcodec_get_subtitle_rect_class
|
||||
avcodec_get_type
|
||||
avcodec_is_open
|
||||
avcodec_license
|
||||
avcodec_open2
|
||||
avcodec_pix_fmt_to_codec_tag
|
||||
avcodec_register
|
||||
avcodec_register_all
|
||||
avcodec_set_dimensions
|
||||
avcodec_string
|
||||
avcodec_version
|
||||
aver_isf_history
|
||||
avpicture_alloc
|
||||
avpicture_deinterlace
|
||||
avpicture_fill
|
||||
avpicture_free
|
||||
avpicture_get_size
|
||||
avpicture_layout
|
||||
avpriv_aac_parse_header
|
||||
avpriv_ac3_channel_layout_tab DATA
|
||||
avpriv_ac3_parse_header
|
||||
avpriv_ac3_parse_header2
|
||||
avpriv_align_put_bits
|
||||
avpriv_bprint_to_extradata
|
||||
avpriv_color_frame
|
||||
avpriv_copy_bits
|
||||
avpriv_copy_pce_data
|
||||
avpriv_dca_convert_bitstream
|
||||
avpriv_dca_sample_rates DATA
|
||||
avpriv_dirac_parse_sequence_header
|
||||
avpriv_dnxhd_get_frame_size
|
||||
avpriv_do_elbg
|
||||
avpriv_dv_frame_profile2
|
||||
avpriv_exif_decode_ifd
|
||||
avpriv_find_pix_fmt
|
||||
avpriv_find_start_code
|
||||
avpriv_flac_is_extradata_valid
|
||||
avpriv_flac_parse_streaminfo
|
||||
avpriv_get_raw_pix_fmt_tags
|
||||
avpriv_h264_has_num_reorder_frames
|
||||
avpriv_init_elbg
|
||||
avpriv_lock_avformat
|
||||
avpriv_mjpeg_bits_ac_chrominance DATA
|
||||
avpriv_mjpeg_bits_ac_luminance DATA
|
||||
avpriv_mjpeg_bits_dc_chrominance DATA
|
||||
avpriv_mjpeg_bits_dc_luminance DATA
|
||||
avpriv_mjpeg_val_ac_chrominance DATA
|
||||
avpriv_mjpeg_val_ac_luminance DATA
|
||||
avpriv_mjpeg_val_dc DATA
|
||||
avpriv_mpa_bitrate_tab DATA
|
||||
avpriv_mpa_decode_header
|
||||
avpriv_mpa_decode_header2
|
||||
avpriv_mpa_freq_tab DATA
|
||||
avpriv_mpeg4audio_get_config
|
||||
avpriv_mpeg4audio_sample_rates DATA
|
||||
avpriv_mpegaudio_decode_header
|
||||
avpriv_pix_fmt_bps_avi DATA
|
||||
avpriv_pix_fmt_bps_mov DATA
|
||||
avpriv_put_string
|
||||
avpriv_split_xiph_headers
|
||||
avpriv_tak_parse_streaminfo
|
||||
avpriv_toupper4
|
||||
avpriv_unlock_avformat
|
||||
avpriv_vorbis_parse_extradata
|
||||
avpriv_vorbis_parse_frame
|
||||
avpriv_vorbis_parse_frame_flags
|
||||
avpriv_vorbis_parse_reset
|
||||
avsubtitle_free
|
19
Externals/ffmpeg/dev/lib/avdevice-56.def
vendored
19
Externals/ffmpeg/dev/lib/avdevice-56.def
vendored
@ -1,19 +0,0 @@
|
||||
EXPORTS
|
||||
av_device_capabilities DATA
|
||||
av_device_ffversion DATA
|
||||
av_input_audio_device_next
|
||||
av_input_video_device_next
|
||||
av_output_audio_device_next
|
||||
av_output_video_device_next
|
||||
avdevice_app_to_dev_control_message
|
||||
avdevice_capabilities_create
|
||||
avdevice_capabilities_free
|
||||
avdevice_configuration
|
||||
avdevice_dev_to_app_control_message
|
||||
avdevice_free_list_devices
|
||||
avdevice_license
|
||||
avdevice_list_devices
|
||||
avdevice_list_input_sources
|
||||
avdevice_list_output_sinks
|
||||
avdevice_register_all
|
||||
avdevice_version
|
BIN
Externals/ffmpeg/dev/lib/avdevice.lib
vendored
BIN
Externals/ffmpeg/dev/lib/avdevice.lib
vendored
Binary file not shown.
81
Externals/ffmpeg/dev/lib/avfilter-5.def
vendored
81
Externals/ffmpeg/dev/lib/avfilter-5.def
vendored
@ -1,81 +0,0 @@
|
||||
EXPORTS
|
||||
av_abuffersink_params_alloc
|
||||
av_buffersink_get_buffer_ref
|
||||
av_buffersink_get_frame
|
||||
av_buffersink_get_frame_flags
|
||||
av_buffersink_get_frame_rate
|
||||
av_buffersink_get_samples
|
||||
av_buffersink_params_alloc
|
||||
av_buffersink_poll_frame
|
||||
av_buffersink_read
|
||||
av_buffersink_read_samples
|
||||
av_buffersink_set_frame_size
|
||||
av_buffersrc_add_frame
|
||||
av_buffersrc_add_frame_flags
|
||||
av_buffersrc_add_ref
|
||||
av_buffersrc_buffer
|
||||
av_buffersrc_get_nb_failed_requests
|
||||
av_buffersrc_write_frame
|
||||
av_filter_ffversion DATA
|
||||
av_filter_next
|
||||
avfilter_add_matrix
|
||||
avfilter_all_channel_layouts DATA
|
||||
avfilter_config_links
|
||||
avfilter_configuration
|
||||
avfilter_copy_buf_props
|
||||
avfilter_copy_buffer_ref_props
|
||||
avfilter_copy_frame_props
|
||||
avfilter_free
|
||||
avfilter_get_audio_buffer_ref_from_arrays
|
||||
avfilter_get_audio_buffer_ref_from_arrays_channels
|
||||
avfilter_get_audio_buffer_ref_from_frame
|
||||
avfilter_get_buffer_ref_from_frame
|
||||
avfilter_get_by_name
|
||||
avfilter_get_class
|
||||
avfilter_get_matrix
|
||||
avfilter_get_video_buffer_ref_from_arrays
|
||||
avfilter_get_video_buffer_ref_from_frame
|
||||
avfilter_graph_add_filter
|
||||
avfilter_graph_alloc
|
||||
avfilter_graph_alloc_filter
|
||||
avfilter_graph_config
|
||||
avfilter_graph_create_filter
|
||||
avfilter_graph_dump
|
||||
avfilter_graph_free
|
||||
avfilter_graph_get_filter
|
||||
avfilter_graph_parse
|
||||
avfilter_graph_parse2
|
||||
avfilter_graph_parse_ptr
|
||||
avfilter_graph_queue_command
|
||||
avfilter_graph_request_oldest
|
||||
avfilter_graph_send_command
|
||||
avfilter_graph_set_auto_convert
|
||||
avfilter_init_dict
|
||||
avfilter_init_filter
|
||||
avfilter_init_str
|
||||
avfilter_inout_alloc
|
||||
avfilter_inout_free
|
||||
avfilter_insert_filter
|
||||
avfilter_license
|
||||
avfilter_link
|
||||
avfilter_link_free
|
||||
avfilter_link_get_channels
|
||||
avfilter_link_set_closed
|
||||
avfilter_make_format64_list
|
||||
avfilter_mul_matrix
|
||||
avfilter_next
|
||||
avfilter_open
|
||||
avfilter_pad_count
|
||||
avfilter_pad_get_name
|
||||
avfilter_pad_get_type
|
||||
avfilter_process_command
|
||||
avfilter_ref_buffer
|
||||
avfilter_ref_get_channels
|
||||
avfilter_register
|
||||
avfilter_register_all
|
||||
avfilter_sub_matrix
|
||||
avfilter_transform
|
||||
avfilter_uninit
|
||||
avfilter_unref_buffer
|
||||
avfilter_unref_bufferp
|
||||
avfilter_version
|
BIN
Externals/ffmpeg/dev/lib/avfilter.lib
vendored
BIN
Externals/ffmpeg/dev/lib/avfilter.lib
vendored
Binary file not shown.
161
Externals/ffmpeg/dev/lib/avformat-56.def
vendored
161
Externals/ffmpeg/dev/lib/avformat-56.def
vendored
@ -1,161 +0,0 @@
|
||||
EXPORTS
|
||||
av_add_index_entry
|
||||
av_append_packet
|
||||
av_codec_get_id
|
||||
av_codec_get_tag
|
||||
av_codec_get_tag2
|
||||
av_convert_lang_to
|
||||
av_demuxer_open
|
||||
av_dump_format
|
||||
av_filename_number_test
|
||||
av_find_best_stream
|
||||
av_find_default_stream_index
|
||||
av_find_input_format
|
||||
av_find_program_from_stream
|
||||
av_fmt_ctx_get_duration_estimation_method
|
||||
av_format_ffversion DATA
|
||||
av_format_get_audio_codec
|
||||
av_format_get_control_message_cb
|
||||
av_format_get_metadata_header_padding
|
||||
av_format_get_opaque
|
||||
av_format_get_probe_score
|
||||
av_format_get_subtitle_codec
|
||||
av_format_get_video_codec
|
||||
av_format_inject_global_side_data
|
||||
av_format_set_audio_codec
|
||||
av_format_set_control_message_cb
|
||||
av_format_set_metadata_header_padding
|
||||
av_format_set_opaque
|
||||
av_format_set_subtitle_codec
|
||||
av_format_set_video_codec
|
||||
av_get_frame_filename
|
||||
av_get_output_timestamp
|
||||
av_get_packet
|
||||
av_guess_codec
|
||||
av_guess_format
|
||||
av_guess_frame_rate
|
||||
av_guess_sample_aspect_ratio
|
||||
av_hex_dump
|
||||
av_hex_dump_log
|
||||
av_iformat_next
|
||||
av_index_search_timestamp
|
||||
av_interleaved_write_frame
|
||||
av_interleaved_write_uncoded_frame
|
||||
av_match_ext
|
||||
av_new_program
|
||||
av_oformat_next
|
||||
av_pkt_dump2
|
||||
av_pkt_dump_log2
|
||||
av_probe_input_buffer
|
||||
av_probe_input_buffer2
|
||||
av_probe_input_format
|
||||
av_probe_input_format2
|
||||
av_probe_input_format3
|
||||
av_read_frame
|
||||
av_read_pause
|
||||
av_read_play
|
||||
av_register_all
|
||||
av_register_input_format
|
||||
av_register_output_format
|
||||
av_sdp_create
|
||||
av_seek_frame
|
||||
av_stream_get_end_pts
|
||||
av_stream_get_parser
|
||||
av_stream_get_r_frame_rate
|
||||
av_stream_get_recommended_encoder_configuration
|
||||
av_stream_get_side_data
|
||||
av_stream_set_r_frame_rate
|
||||
av_stream_set_recommended_encoder_configuration
|
||||
av_url_split
|
||||
av_write_frame
|
||||
av_write_trailer
|
||||
av_write_uncoded_frame
|
||||
av_write_uncoded_frame_query
|
||||
avformat_alloc_context
|
||||
avformat_alloc_output_context2
|
||||
avformat_close_input
|
||||
avformat_configuration
|
||||
avformat_find_stream_info
|
||||
avformat_free_context
|
||||
avformat_get_class
|
||||
avformat_get_mov_audio_tags
|
||||
avformat_get_mov_video_tags
|
||||
avformat_get_riff_audio_tags
|
||||
avformat_get_riff_video_tags
|
||||
avformat_license
|
||||
avformat_match_stream_specifier
|
||||
avformat_network_deinit
|
||||
avformat_network_init
|
||||
avformat_new_stream
|
||||
avformat_open_input
|
||||
avformat_query_codec
|
||||
avformat_queue_attached_pictures
|
||||
avformat_seek_file
|
||||
avformat_version
|
||||
avformat_write_header
|
||||
avio_alloc_context
|
||||
avio_check
|
||||
avio_close
|
||||
avio_close_dyn_buf
|
||||
avio_closep
|
||||
avio_enum_protocols
|
||||
avio_feof
|
||||
avio_find_protocol_name
|
||||
avio_flush
|
||||
avio_get_str
|
||||
avio_get_str16be
|
||||
avio_get_str16le
|
||||
avio_open
|
||||
avio_open2
|
||||
avio_open_dyn_buf
|
||||
avio_pause
|
||||
avio_printf
|
||||
avio_put_str
|
||||
avio_put_str16le
|
||||
avio_r8
|
||||
avio_rb16
|
||||
avio_rb24
|
||||
avio_rb32
|
||||
avio_rb64
|
||||
avio_read
|
||||
avio_read_to_bprint
|
||||
avio_rl16
|
||||
avio_rl24
|
||||
avio_rl32
|
||||
avio_rl64
|
||||
avio_seek
|
||||
avio_seek_time
|
||||
avio_size
|
||||
avio_skip
|
||||
avio_w8
|
||||
avio_wb16
|
||||
avio_wb24
|
||||
avio_wb32
|
||||
avio_wb64
|
||||
avio_wl16
|
||||
avio_wl24
|
||||
avio_wl32
|
||||
avio_wl64
|
||||
avio_write
|
||||
avpriv_dv_get_packet
|
||||
avpriv_dv_init_demux
|
||||
avpriv_dv_produce_packet
|
||||
avpriv_mpegts_parse_close
|
||||
avpriv_mpegts_parse_open
|
||||
avpriv_mpegts_parse_packet
|
||||
avpriv_new_chapter
|
||||
avpriv_set_pts_info
|
||||
ff_inet_aton
|
||||
ff_rtp_get_local_rtcp_port
|
||||
ff_rtp_get_local_rtp_port
|
||||
ff_rtsp_parse_line
|
||||
ff_socket_nonblock
|
||||
ffio_open_dyn_packet_buf
|
||||
ffio_set_buf_size
|
||||
ffurl_close
|
||||
ffurl_open
|
||||
ffurl_read_complete
|
||||
ffurl_seek
|
||||
ffurl_size
|
||||
ffurl_write
|
||||
url_feof
|
441
Externals/ffmpeg/dev/lib/avutil-54.def
vendored
441
Externals/ffmpeg/dev/lib/avutil-54.def
vendored
@ -1,441 +0,0 @@
|
||||
EXPORTS
|
||||
av_add_q
|
||||
av_add_stable
|
||||
av_adler32_update
|
||||
av_aes_alloc
|
||||
av_aes_crypt
|
||||
av_aes_init
|
||||
av_aes_size DATA
|
||||
av_asprintf
|
||||
av_audio_fifo_alloc
|
||||
av_audio_fifo_drain
|
||||
av_audio_fifo_free
|
||||
av_audio_fifo_read
|
||||
av_audio_fifo_realloc
|
||||
av_audio_fifo_reset
|
||||
av_audio_fifo_size
|
||||
av_audio_fifo_space
|
||||
av_audio_fifo_write
|
||||
av_base64_decode
|
||||
av_base64_encode
|
||||
av_basename
|
||||
av_blowfish_crypt
|
||||
av_blowfish_crypt_ecb
|
||||
av_blowfish_init
|
||||
av_bmg_get
|
||||
av_bprint_append_data
|
||||
av_bprint_channel_layout
|
||||
av_bprint_chars
|
||||
av_bprint_clear
|
||||
av_bprint_escape
|
||||
av_bprint_finalize
|
||||
av_bprint_get_buffer
|
||||
av_bprint_init
|
||||
av_bprint_init_for_buffer
|
||||
av_bprint_strftime
|
||||
av_bprintf
|
||||
av_buffer_alloc
|
||||
av_buffer_allocz
|
||||
av_buffer_create
|
||||
av_buffer_default_free
|
||||
av_buffer_get_opaque
|
||||
av_buffer_get_ref_count
|
||||
av_buffer_is_writable
|
||||
av_buffer_make_writable
|
||||
av_buffer_pool_get
|
||||
av_buffer_pool_init
|
||||
av_buffer_pool_uninit
|
||||
av_buffer_realloc
|
||||
av_buffer_ref
|
||||
av_buffer_unref
|
||||
av_calloc
|
||||
av_camellia_alloc
|
||||
av_camellia_crypt
|
||||
av_camellia_init
|
||||
av_camellia_size DATA
|
||||
av_cast5_alloc
|
||||
av_cast5_crypt
|
||||
av_cast5_crypt2
|
||||
av_cast5_init
|
||||
av_cast5_size DATA
|
||||
av_channel_layout_extract_channel
|
||||
av_chroma_location_name
|
||||
av_color_primaries_name
|
||||
av_color_range_name
|
||||
av_color_space_name
|
||||
av_color_transfer_name
|
||||
av_compare_mod
|
||||
av_compare_ts
|
||||
av_cpu_count
|
||||
av_crc
|
||||
av_crc_get_table
|
||||
av_crc_init
|
||||
av_ctz
|
||||
av_d2q
|
||||
av_d2str
|
||||
av_default_get_category
|
||||
av_default_item_name
|
||||
av_des_crypt
|
||||
av_des_init
|
||||
av_des_mac
|
||||
av_dict_copy
|
||||
av_dict_count
|
||||
av_dict_free
|
||||
av_dict_get
|
||||
av_dict_get_string
|
||||
av_dict_parse_string
|
||||
av_dict_set
|
||||
av_dict_set_int
|
||||
av_dirname
|
||||
av_display_matrix_flip
|
||||
av_display_rotation_get
|
||||
av_display_rotation_set
|
||||
av_div_q
|
||||
av_downmix_info_update_side_data
|
||||
av_dynarray2_add
|
||||
av_dynarray_add
|
||||
av_dynarray_add_nofree
|
||||
av_escape
|
||||
av_expr_eval
|
||||
av_expr_free
|
||||
av_expr_parse
|
||||
av_expr_parse_and_eval
|
||||
av_fast_malloc
|
||||
av_fast_realloc
|
||||
av_fifo_alloc
|
||||
av_fifo_alloc_array
|
||||
av_fifo_drain
|
||||
av_fifo_free
|
||||
av_fifo_freep
|
||||
av_fifo_generic_read
|
||||
av_fifo_generic_write
|
||||
av_fifo_grow
|
||||
av_fifo_realloc2
|
||||
av_fifo_reset
|
||||
av_fifo_size
|
||||
av_fifo_space
|
||||
av_file_map
|
||||
av_file_unmap
|
||||
av_find_best_pix_fmt_of_2
|
||||
av_find_info_tag
|
||||
av_find_nearest_q_idx
|
||||
av_fopen_utf8
|
||||
av_force_cpu_flags
|
||||
av_frame_alloc
|
||||
av_frame_clone
|
||||
av_frame_copy
|
||||
av_frame_copy_props
|
||||
av_frame_free
|
||||
av_frame_get_best_effort_timestamp
|
||||
av_frame_get_buffer
|
||||
av_frame_get_channel_layout
|
||||
av_frame_get_channels
|
||||
av_frame_get_color_range
|
||||
av_frame_get_colorspace
|
||||
av_frame_get_decode_error_flags
|
||||
av_frame_get_metadata
|
||||
av_frame_get_pkt_duration
|
||||
av_frame_get_pkt_pos
|
||||
av_frame_get_pkt_size
|
||||
av_frame_get_plane_buffer
|
||||
av_frame_get_qp_table
|
||||
av_frame_get_sample_rate
|
||||
av_frame_get_side_data
|
||||
av_frame_is_writable
|
||||
av_frame_make_writable
|
||||
av_frame_move_ref
|
||||
av_frame_new_side_data
|
||||
av_frame_ref
|
||||
av_frame_remove_side_data
|
||||
av_frame_set_best_effort_timestamp
|
||||
av_frame_set_channel_layout
|
||||
av_frame_set_channels
|
||||
av_frame_set_color_range
|
||||
av_frame_set_colorspace
|
||||
av_frame_set_decode_error_flags
|
||||
av_frame_set_metadata
|
||||
av_frame_set_pkt_duration
|
||||
av_frame_set_pkt_pos
|
||||
av_frame_set_pkt_size
|
||||
av_frame_set_qp_table
|
||||
av_frame_set_sample_rate
|
||||
av_frame_side_data_name
|
||||
av_frame_unref
|
||||
av_free
|
||||
av_freep
|
||||
av_gcd
|
||||
av_get_alt_sample_fmt
|
||||
av_get_bits_per_pixel
|
||||
av_get_bytes_per_sample
|
||||
av_get_channel_description
|
||||
av_get_channel_layout
|
||||
av_get_channel_layout_channel_index
|
||||
av_get_channel_layout_nb_channels
|
||||
av_get_channel_layout_string
|
||||
av_get_channel_name
|
||||
av_get_colorspace_name
|
||||
av_get_cpu_flags
|
||||
av_get_default_channel_layout
|
||||
av_get_double
|
||||
av_get_int
|
||||
av_get_known_color_name
|
||||
av_get_media_type_string
|
||||
av_get_packed_sample_fmt
|
||||
av_get_padded_bits_per_pixel
|
||||
av_get_picture_type_char
|
||||
av_get_pix_fmt
|
||||
av_get_pix_fmt_loss
|
||||
av_get_pix_fmt_name
|
||||
av_get_pix_fmt_string
|
||||
av_get_planar_sample_fmt
|
||||
av_get_q
|
||||
av_get_random_seed
|
||||
av_get_sample_fmt
|
||||
av_get_sample_fmt_name
|
||||
av_get_sample_fmt_string
|
||||
av_get_standard_channel_layout
|
||||
av_get_string
|
||||
av_get_time_base_q
|
||||
av_get_token
|
||||
av_gettime
|
||||
av_gettime_relative
|
||||
av_gettime_relative_is_monotonic
|
||||
av_hash_alloc
|
||||
av_hash_final
|
||||
av_hash_final_b64
|
||||
av_hash_final_bin
|
||||
av_hash_final_hex
|
||||
av_hash_freep
|
||||
av_hash_get_name
|
||||
av_hash_get_size
|
||||
av_hash_init
|
||||
av_hash_names
|
||||
av_hash_update
|
||||
av_hmac_alloc
|
||||
av_hmac_calc
|
||||
av_hmac_final
|
||||
av_hmac_free
|
||||
av_hmac_init
|
||||
av_hmac_update
|
||||
av_image_alloc
|
||||
av_image_check_sar
|
||||
av_image_check_size
|
||||
av_image_copy
|
||||
av_image_copy_plane
|
||||
av_image_copy_to_buffer
|
||||
av_image_fill_arrays
|
||||
av_image_fill_linesizes
|
||||
av_image_fill_max_pixsteps
|
||||
av_image_fill_pointers
|
||||
av_image_get_buffer_size
|
||||
av_image_get_linesize
|
||||
av_int_list_length_for_size
|
||||
av_isdigit
|
||||
av_isgraph
|
||||
av_isspace
|
||||
av_isxdigit
|
||||
av_lfg_init
|
||||
av_log
|
||||
av_log2
|
||||
av_log2_16bit
|
||||
av_log_default_callback
|
||||
av_log_format_line
|
||||
av_log_get_flags
|
||||
av_log_get_level
|
||||
av_log_set_callback
|
||||
av_log_set_flags
|
||||
av_log_set_level
|
||||
av_lzo1x_decode
|
||||
av_malloc
|
||||
av_mallocz
|
||||
av_match_list
|
||||
av_match_name
|
||||
av_max_alloc
|
||||
av_md5_alloc
|
||||
av_md5_final
|
||||
av_md5_init
|
||||
av_md5_size DATA
|
||||
av_md5_sum
|
||||
av_md5_update
|
||||
av_memcpy_backptr
|
||||
av_memdup
|
||||
av_mul_q
|
||||
av_murmur3_alloc
|
||||
av_murmur3_final
|
||||
av_murmur3_init
|
||||
av_murmur3_init_seeded
|
||||
av_murmur3_update
|
||||
av_nearer_q
|
||||
av_next_option
|
||||
av_opt_child_class_next
|
||||
av_opt_child_next
|
||||
av_opt_copy
|
||||
av_opt_eval_double
|
||||
av_opt_eval_flags
|
||||
av_opt_eval_float
|
||||
av_opt_eval_int
|
||||
av_opt_eval_int64
|
||||
av_opt_eval_q
|
||||
av_opt_find
|
||||
av_opt_find2
|
||||
av_opt_flag_is_set
|
||||
av_opt_free
|
||||
av_opt_freep_ranges
|
||||
av_opt_get
|
||||
av_opt_get_channel_layout
|
||||
av_opt_get_dict_val
|
||||
av_opt_get_double
|
||||
av_opt_get_image_size
|
||||
av_opt_get_int
|
||||
av_opt_get_key_value
|
||||
av_opt_get_pixel_fmt
|
||||
av_opt_get_q
|
||||
av_opt_get_sample_fmt
|
||||
av_opt_get_video_rate
|
||||
av_opt_is_set_to_default
|
||||
av_opt_is_set_to_default_by_name
|
||||
av_opt_next
|
||||
av_opt_ptr
|
||||
av_opt_query_ranges
|
||||
av_opt_query_ranges_default
|
||||
av_opt_serialize
|
||||
av_opt_set
|
||||
av_opt_set_bin
|
||||
av_opt_set_channel_layout
|
||||
av_opt_set_defaults
|
||||
av_opt_set_defaults2
|
||||
av_opt_set_dict
|
||||
av_opt_set_dict2
|
||||
av_opt_set_dict_val
|
||||
av_opt_set_double
|
||||
av_opt_set_from_string
|
||||
av_opt_set_image_size
|
||||
av_opt_set_int
|
||||
av_opt_set_pixel_fmt
|
||||
av_opt_set_q
|
||||
av_opt_set_sample_fmt
|
||||
av_opt_set_video_rate
|
||||
av_opt_show2
|
||||
av_parse_color
|
||||
av_parse_cpu_caps
|
||||
av_parse_cpu_flags
|
||||
av_parse_ratio
|
||||
av_parse_time
|
||||
av_parse_video_rate
|
||||
av_parse_video_size
|
||||
av_pix_fmt_count_planes
|
||||
av_pix_fmt_desc_get
|
||||
av_pix_fmt_desc_get_id
|
||||
av_pix_fmt_desc_next
|
||||
av_pix_fmt_descriptors DATA
|
||||
av_pix_fmt_get_chroma_sub_sample
|
||||
av_pix_fmt_swap_endianness
|
||||
av_pixelutils_get_sad_fn
|
||||
av_rc4_crypt
|
||||
av_rc4_init
|
||||
av_read_image_line
|
||||
av_realloc
|
||||
av_realloc_array
|
||||
av_realloc_f
|
||||
av_reallocp
|
||||
av_reallocp_array
|
||||
av_reduce
|
||||
av_rescale
|
||||
av_rescale_delta
|
||||
av_rescale_q
|
||||
av_rescale_q_rnd
|
||||
av_rescale_rnd
|
||||
av_reverse DATA
|
||||
av_ripemd_alloc
|
||||
av_ripemd_final
|
||||
av_ripemd_init
|
||||
av_ripemd_size DATA
|
||||
av_ripemd_update
|
||||
av_sample_fmt_is_planar
|
||||
av_samples_alloc
|
||||
av_samples_alloc_array_and_samples
|
||||
av_samples_copy
|
||||
av_samples_fill_arrays
|
||||
av_samples_get_buffer_size
|
||||
av_samples_set_silence
|
||||
av_set_cpu_flags_mask
|
||||
av_set_double
|
||||
av_set_int
|
||||
av_set_options_string
|
||||
av_set_q
|
||||
av_set_string3
|
||||
av_sha512_alloc
|
||||
av_sha512_final
|
||||
av_sha512_init
|
||||
av_sha512_size DATA
|
||||
av_sha512_update
|
||||
av_sha_alloc
|
||||
av_sha_final
|
||||
av_sha_init
|
||||
av_sha_size DATA
|
||||
av_sha_update
|
||||
av_small_strptime
|
||||
av_stereo3d_alloc
|
||||
av_stereo3d_create_side_data
|
||||
av_strcasecmp
|
||||
av_strdup
|
||||
av_strerror
|
||||
av_stristart
|
||||
av_stristr
|
||||
av_strlcat
|
||||
av_strlcatf
|
||||
av_strlcpy
|
||||
av_strncasecmp
|
||||
av_strndup
|
||||
av_strnstr
|
||||
av_strstart
|
||||
av_strtod
|
||||
av_strtok
|
||||
av_sub_q
|
||||
av_tempfile
|
||||
av_thread_message_queue_alloc
|
||||
av_thread_message_queue_free
|
||||
av_thread_message_queue_recv
|
||||
av_thread_message_queue_send
|
||||
av_thread_message_queue_set_err_recv
|
||||
av_thread_message_queue_set_err_send
|
||||
av_timecode_adjust_ntsc_framenum2
|
||||
av_timecode_check_frame_rate
|
||||
av_timecode_get_smpte_from_framenum
|
||||
av_timecode_init
|
||||
av_timecode_init_from_string
|
||||
av_timecode_make_mpeg_tc_string
|
||||
av_timecode_make_smpte_tc_string
|
||||
av_timecode_make_string
|
||||
av_timegm
|
||||
av_tree_destroy
|
||||
av_tree_enumerate
|
||||
av_tree_find
|
||||
av_tree_insert
|
||||
av_tree_node_alloc
|
||||
av_tree_node_size DATA
|
||||
av_usleep
|
||||
av_utf8_decode
|
||||
av_util_ffversion DATA
|
||||
av_vbprintf
|
||||
av_vlog
|
||||
av_write_image_line
|
||||
av_xtea_crypt
|
||||
av_xtea_init
|
||||
avpriv_alloc_fixed_dsp
|
||||
avpriv_cga_font DATA
|
||||
avpriv_emms_yasm DATA
|
||||
avpriv_float_dsp_alloc
|
||||
avpriv_float_dsp_init
|
||||
avpriv_frame_get_metadatap
|
||||
avpriv_init_lls
|
||||
avpriv_open
|
||||
avpriv_report_missing_feature
|
||||
avpriv_request_sample
|
||||
avpriv_scalarproduct_float_c
|
||||
avpriv_set_systematic_pal2
|
||||
avpriv_solve_lls
|
||||
avpriv_vga16_font DATA
|
||||
avutil_configuration
|
||||
avutil_license
|
||||
avutil_version
|
BIN
Externals/ffmpeg/dev/lib/libavcodec.dll.a
vendored
BIN
Externals/ffmpeg/dev/lib/libavcodec.dll.a
vendored
Binary file not shown.
BIN
Externals/ffmpeg/dev/lib/libavdevice.dll.a
vendored
BIN
Externals/ffmpeg/dev/lib/libavdevice.dll.a
vendored
Binary file not shown.
BIN
Externals/ffmpeg/dev/lib/libavfilter.dll.a
vendored
BIN
Externals/ffmpeg/dev/lib/libavfilter.dll.a
vendored
Binary file not shown.
BIN
Externals/ffmpeg/dev/lib/libavformat.dll.a
vendored
BIN
Externals/ffmpeg/dev/lib/libavformat.dll.a
vendored
Binary file not shown.
BIN
Externals/ffmpeg/dev/lib/libavutil.dll.a
vendored
BIN
Externals/ffmpeg/dev/lib/libavutil.dll.a
vendored
Binary file not shown.
BIN
Externals/ffmpeg/dev/lib/libpostproc.dll.a
vendored
BIN
Externals/ffmpeg/dev/lib/libpostproc.dll.a
vendored
Binary file not shown.
BIN
Externals/ffmpeg/dev/lib/libswresample.dll.a
vendored
BIN
Externals/ffmpeg/dev/lib/libswresample.dll.a
vendored
Binary file not shown.
BIN
Externals/ffmpeg/dev/lib/libswscale.dll.a
vendored
BIN
Externals/ffmpeg/dev/lib/libswscale.dll.a
vendored
Binary file not shown.
11
Externals/ffmpeg/dev/lib/postproc-53.def
vendored
11
Externals/ffmpeg/dev/lib/postproc-53.def
vendored
@ -1,11 +0,0 @@
|
||||
EXPORTS
|
||||
postproc_configuration
|
||||
postproc_ffversion DATA
|
||||
postproc_license
|
||||
postproc_version
|
||||
pp_free_context
|
||||
pp_free_mode
|
||||
pp_get_context
|
||||
pp_get_mode_by_name_and_quality
|
||||
pp_help DATA
|
||||
pp_postprocess
|
BIN
Externals/ffmpeg/dev/lib/postproc.lib
vendored
BIN
Externals/ffmpeg/dev/lib/postproc.lib
vendored
Binary file not shown.
22
Externals/ffmpeg/dev/lib/swresample-1.def
vendored
22
Externals/ffmpeg/dev/lib/swresample-1.def
vendored
@ -1,22 +0,0 @@
|
||||
EXPORTS
|
||||
swr_alloc
|
||||
swr_alloc_set_opts
|
||||
swr_close
|
||||
swr_config_frame
|
||||
swr_convert
|
||||
swr_convert_frame
|
||||
swr_drop_output
|
||||
swr_ffversion DATA
|
||||
swr_free
|
||||
swr_get_class
|
||||
swr_get_delay
|
||||
swr_init
|
||||
swr_inject_silence
|
||||
swr_is_initialized
|
||||
swr_next_pts
|
||||
swr_set_channel_mapping
|
||||
swr_set_compensation
|
||||
swr_set_matrix
|
||||
swresample_configuration
|
||||
swresample_license
|
||||
swresample_version
|
BIN
Externals/ffmpeg/dev/lib/swresample.lib
vendored
BIN
Externals/ffmpeg/dev/lib/swresample.lib
vendored
Binary file not shown.
36
Externals/ffmpeg/dev/lib/swscale-3.def
vendored
36
Externals/ffmpeg/dev/lib/swscale-3.def
vendored
@ -1,36 +0,0 @@
|
||||
EXPORTS
|
||||
sws_addVec
|
||||
sws_allocVec
|
||||
sws_alloc_context
|
||||
sws_cloneVec
|
||||
sws_context_class DATA
|
||||
sws_convVec
|
||||
sws_convertPalette8ToPacked24
|
||||
sws_convertPalette8ToPacked32
|
||||
sws_freeContext
|
||||
sws_freeFilter
|
||||
sws_freeVec
|
||||
sws_getCachedContext
|
||||
sws_getCoefficients
|
||||
sws_getColorspaceDetails
|
||||
sws_getConstVec
|
||||
sws_getContext
|
||||
sws_getDefaultFilter
|
||||
sws_getGaussianVec
|
||||
sws_getIdentityVec
|
||||
sws_get_class
|
||||
sws_init_context
|
||||
sws_isSupportedEndiannessConversion
|
||||
sws_isSupportedInput
|
||||
sws_isSupportedOutput
|
||||
sws_normalizeVec
|
||||
sws_printVec2
|
||||
sws_rgb2rgb_init
|
||||
sws_scale
|
||||
sws_scaleVec
|
||||
sws_setColorspaceDetails
|
||||
sws_shiftVec
|
||||
sws_subVec
|
||||
swscale_configuration
|
||||
swscale_license
|
||||
swscale_version
|
BIN
Externals/ffmpeg/shared/bin/avdevice-56.dll
vendored
BIN
Externals/ffmpeg/shared/bin/avdevice-56.dll
vendored
Binary file not shown.
BIN
Externals/ffmpeg/shared/bin/ffmpeg.exe
vendored
BIN
Externals/ffmpeg/shared/bin/ffmpeg.exe
vendored
Binary file not shown.
BIN
Externals/ffmpeg/shared/bin/ffplay.exe
vendored
BIN
Externals/ffmpeg/shared/bin/ffplay.exe
vendored
Binary file not shown.
BIN
Externals/ffmpeg/shared/bin/ffprobe.exe
vendored
BIN
Externals/ffmpeg/shared/bin/ffprobe.exe
vendored
Binary file not shown.
BIN
Externals/ffmpeg/shared/bin/postproc-53.dll
vendored
BIN
Externals/ffmpeg/shared/bin/postproc-53.dll
vendored
Binary file not shown.
777
Externals/ffmpeg/shared/doc/developer.html
vendored
777
Externals/ffmpeg/shared/doc/developer.html
vendored
@ -1,777 +0,0 @@
|
||||
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
|
||||
<html>
|
||||
<!-- Created by GNU Texinfo 5.2, http://www.gnu.org/software/texinfo/ -->
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<title>
|
||||
Developer Documentation
|
||||
</title>
|
||||
<meta name="viewport" content="width=device-width,initial-scale=1.0">
|
||||
<link rel="stylesheet" type="text/css" href="bootstrap.min.css">
|
||||
<link rel="stylesheet" type="text/css" href="style.min.css">
|
||||
</head>
|
||||
<body>
|
||||
<div style="width: 95%; margin: auto">
|
||||
<h1>
|
||||
Developer Documentation
|
||||
</h1>
|
||||
<div align="center">
|
||||
</div>
|
||||
|
||||
<a name="SEC_Top"></a>
|
||||
|
||||
<a name="SEC_Contents"></a>
|
||||
<h2 class="contents-heading">Table of Contents</h2>
|
||||
|
||||
<div class="contents">
|
||||
|
||||
<ul class="no-bullet">
|
||||
<li><a name="toc-Developers-Guide" href="#Developers-Guide">1 Developers Guide</a>
|
||||
<ul class="no-bullet">
|
||||
<li><a name="toc-Notes-for-external-developers" href="#Notes-for-external-developers">1.1 Notes for external developers</a></li>
|
||||
<li><a name="toc-Contributing" href="#Contributing">1.2 Contributing</a></li>
|
||||
<li><a name="toc-Coding-Rules-1" href="#Coding-Rules-1">1.3 Coding Rules</a>
|
||||
<ul class="no-bullet">
|
||||
<li><a name="toc-Code-formatting-conventions" href="#Code-formatting-conventions">1.3.1 Code formatting conventions</a></li>
|
||||
<li><a name="toc-Comments" href="#Comments">1.3.2 Comments</a></li>
|
||||
<li><a name="toc-C-language-features" href="#C-language-features">1.3.3 C language features</a></li>
|
||||
<li><a name="toc-Naming-conventions" href="#Naming-conventions">1.3.4 Naming conventions</a></li>
|
||||
<li><a name="toc-Miscellaneous-conventions" href="#Miscellaneous-conventions">1.3.5 Miscellaneous conventions</a></li>
|
||||
<li><a name="toc-Editor-configuration" href="#Editor-configuration">1.3.6 Editor configuration</a></li>
|
||||
</ul></li>
|
||||
<li><a name="toc-Development-Policy" href="#Development-Policy">1.4 Development Policy</a></li>
|
||||
<li><a name="toc-Submitting-patches-1" href="#Submitting-patches-1">1.5 Submitting patches</a></li>
|
||||
<li><a name="toc-New-codecs-or-formats-checklist" href="#New-codecs-or-formats-checklist">1.6 New codecs or formats checklist</a></li>
|
||||
<li><a name="toc-patch-submission-checklist" href="#patch-submission-checklist">1.7 patch submission checklist</a></li>
|
||||
<li><a name="toc-Patch-review-process" href="#Patch-review-process">1.8 Patch review process</a></li>
|
||||
<li><a name="toc-Regression-tests-1" href="#Regression-tests-1">1.9 Regression tests</a>
|
||||
<ul class="no-bullet">
|
||||
<li><a name="toc-Adding-files-to-the-fate_002dsuite-dataset" href="#Adding-files-to-the-fate_002dsuite-dataset">1.9.1 Adding files to the fate-suite dataset</a></li>
|
||||
<li><a name="toc-Visualizing-Test-Coverage" href="#Visualizing-Test-Coverage">1.9.2 Visualizing Test Coverage</a></li>
|
||||
<li><a name="toc-Using-Valgrind" href="#Using-Valgrind">1.9.3 Using Valgrind</a></li>
|
||||
</ul></li>
|
||||
<li><a name="toc-Release-process-1" href="#Release-process-1">1.10 Release process</a>
|
||||
<ul class="no-bullet">
|
||||
<li><a name="toc-Criteria-for-Point-Releases-1" href="#Criteria-for-Point-Releases-1">1.10.1 Criteria for Point Releases</a></li>
|
||||
<li><a name="toc-Release-Checklist" href="#Release-Checklist">1.10.2 Release Checklist</a></li>
|
||||
</ul></li>
|
||||
</ul></li>
|
||||
</ul>
|
||||
</div>
|
||||
|
||||
|
||||
<a name="Developers-Guide"></a>
|
||||
<h2 class="chapter">1 Developers Guide<span class="pull-right"><a class="anchor hidden-xs" href="#Developers-Guide" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Developers-Guide" aria-hidden="true">TOC</a></span></h2>
|
||||
|
||||
<a name="Notes-for-external-developers"></a>
|
||||
<h3 class="section">1.1 Notes for external developers<span class="pull-right"><a class="anchor hidden-xs" href="#Notes-for-external-developers" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Notes-for-external-developers" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>This document is mostly useful for internal FFmpeg developers.
|
||||
External developers who need to use the API in their application should
|
||||
refer to the API doxygen documentation in the public headers, and
|
||||
check the examples in <samp>doc/examples</samp> and in the source code to
|
||||
see how the public API is employed.
|
||||
</p>
|
||||
<p>You can use the FFmpeg libraries in your commercial program, but you
|
||||
are encouraged to <em>publish any patch you make</em>. In this case the
|
||||
best way to proceed is to send your patches to the ffmpeg-devel
|
||||
mailing list following the guidelines illustrated in the remainder of
|
||||
this document.
|
||||
</p>
|
||||
<p>For more detailed legal information about the use of FFmpeg in
|
||||
external programs read the <samp>LICENSE</samp> file in the source tree and
|
||||
consult <a href="http://ffmpeg.org/legal.html">http://ffmpeg.org/legal.html</a>.
|
||||
</p>
|
||||
<a name="Contributing"></a>
|
||||
<h3 class="section">1.2 Contributing<span class="pull-right"><a class="anchor hidden-xs" href="#Contributing" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Contributing" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>There are 3 ways by which code gets into ffmpeg.
|
||||
</p><ul>
|
||||
<li> Submitting Patches to the main developer mailing list
|
||||
see <a href="#Submitting-patches">Submitting patches</a> for details.
|
||||
</li><li> Directly committing changes to the main tree.
|
||||
</li><li> Committing changes to a git clone, for example on github.com or
|
||||
gitorious.org. And asking us to merge these changes.
|
||||
</li></ul>
|
||||
|
||||
<p>Whichever way, changes should be reviewed by the maintainer of the code
|
||||
before they are committed. And they should follow the <a href="#Coding-Rules">Coding Rules</a>.
|
||||
The developer making the commit and the author are responsible for their changes
|
||||
and should try to fix issues their commit causes.
|
||||
</p>
|
||||
<a name="Coding-Rules"></a><a name="Coding-Rules-1"></a>
|
||||
<h3 class="section">1.3 Coding Rules<span class="pull-right"><a class="anchor hidden-xs" href="#Coding-Rules-1" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Coding-Rules-1" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<a name="Code-formatting-conventions"></a>
|
||||
<h4 class="subsection">1.3.1 Code formatting conventions<span class="pull-right"><a class="anchor hidden-xs" href="#Code-formatting-conventions" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Code-formatting-conventions" aria-hidden="true">TOC</a></span></h4>
|
||||
|
||||
<p>There are the following guidelines regarding the indentation in files:
|
||||
</p>
|
||||
<ul>
|
||||
<li> Indent size is 4.
|
||||
|
||||
</li><li> The TAB character is forbidden outside of Makefiles as is any
|
||||
form of trailing whitespace. Commits containing either will be
|
||||
rejected by the git repository.
|
||||
|
||||
</li><li> You should try to limit your code lines to 80 characters; however, do so if
|
||||
and only if this improves readability.
|
||||
</li></ul>
|
||||
<p>The presentation is one inspired by ’indent -i4 -kr -nut’.
|
||||
</p>
|
||||
<p>The main priority in FFmpeg is simplicity and small code size in order to
|
||||
minimize the bug count.
|
||||
</p>
|
||||
<a name="Comments"></a>
|
||||
<h4 class="subsection">1.3.2 Comments<span class="pull-right"><a class="anchor hidden-xs" href="#Comments" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Comments" aria-hidden="true">TOC</a></span></h4>
|
||||
<p>Use the JavaDoc/Doxygen format (see examples below) so that code documentation
|
||||
can be generated automatically. All nontrivial functions should have a comment
|
||||
above them explaining what the function does, even if it is just one sentence.
|
||||
All structures and their member variables should be documented, too.
|
||||
</p>
|
||||
<p>Avoid Qt-style and similar Doxygen syntax with <code>!</code> in it, i.e. replace
|
||||
<code>//!</code> with <code>///</code> and similar. Also @ syntax should be employed
|
||||
for markup commands, i.e. use <code>@param</code> and not <code>\param</code>.
|
||||
</p>
|
||||
<div class="example">
|
||||
<pre class="example">/**
|
||||
* @file
|
||||
* MPEG codec.
|
||||
* @author ...
|
||||
*/
|
||||
|
||||
/**
|
||||
* Summary sentence.
|
||||
* more text ...
|
||||
* ...
|
||||
*/
|
||||
typedef struct Foobar {
|
||||
int var1; /**< var1 description */
|
||||
int var2; ///< var2 description
|
||||
/** var3 description */
|
||||
int var3;
|
||||
} Foobar;
|
||||
|
||||
/**
|
||||
* Summary sentence.
|
||||
* more text ...
|
||||
* ...
|
||||
* @param my_parameter description of my_parameter
|
||||
* @return return value description
|
||||
*/
|
||||
int myfunc(int my_parameter)
|
||||
...
|
||||
</pre></div>
|
||||
|
||||
<a name="C-language-features"></a>
|
||||
<h4 class="subsection">1.3.3 C language features<span class="pull-right"><a class="anchor hidden-xs" href="#C-language-features" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-C-language-features" aria-hidden="true">TOC</a></span></h4>
|
||||
|
||||
<p>FFmpeg is programmed in the ISO C90 language with a few additional
|
||||
features from ISO C99, namely:
|
||||
</p>
|
||||
<ul>
|
||||
<li> the ‘<samp>inline</samp>’ keyword;
|
||||
|
||||
</li><li> ‘<samp>//</samp>’ comments;
|
||||
|
||||
</li><li> designated struct initializers (‘<samp>struct s x = { .i = 17 };</samp>’)
|
||||
|
||||
</li><li> compound literals (‘<samp>x = (struct s) { 17, 23 };</samp>’)
|
||||
</li></ul>
|
||||
|
||||
<p>These features are supported by all compilers we care about, so we will not
|
||||
accept patches to remove their use unless they absolutely do not impair
|
||||
clarity and performance.
|
||||
</p>
|
||||
<p>All code must compile with recent versions of GCC and a number of other
|
||||
currently supported compilers. To ensure compatibility, please do not use
|
||||
additional C99 features or GCC extensions. Especially watch out for:
|
||||
</p>
|
||||
<ul>
|
||||
<li> mixing statements and declarations;
|
||||
|
||||
</li><li> ‘<samp>long long</samp>’ (use ‘<samp>int64_t</samp>’ instead);
|
||||
|
||||
</li><li> ‘<samp>__attribute__</samp>’ not protected by ‘<samp>#ifdef __GNUC__</samp>’ or similar;
|
||||
|
||||
</li><li> GCC statement expressions (‘<samp>(x = ({ int y = 4; y; })</samp>’).
|
||||
</li></ul>
|
||||
|
||||
<a name="Naming-conventions"></a>
|
||||
<h4 class="subsection">1.3.4 Naming conventions<span class="pull-right"><a class="anchor hidden-xs" href="#Naming-conventions" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Naming-conventions" aria-hidden="true">TOC</a></span></h4>
|
||||
<p>All names should be composed with underscores (_), not CamelCase. For example,
|
||||
‘<samp>avfilter_get_video_buffer</samp>’ is an acceptable function name and
|
||||
‘<samp>AVFilterGetVideo</samp>’ is not. The exception from this are type names, like
|
||||
for example structs and enums; they should always be in the CamelCase
|
||||
</p>
|
||||
<p>There are the following conventions for naming variables and functions:
|
||||
</p>
|
||||
<ul>
|
||||
<li> For local variables no prefix is required.
|
||||
|
||||
</li><li> For file-scope variables and functions declared as <code>static</code>, no prefix
|
||||
is required.
|
||||
|
||||
</li><li> For variables and functions visible outside of file scope, but only used
|
||||
internally by a library, an <code>ff_</code> prefix should be used,
|
||||
e.g. ‘<samp>ff_w64_demuxer</samp>’.
|
||||
|
||||
</li><li> For variables and functions visible outside of file scope, used internally
|
||||
across multiple libraries, use <code>avpriv_</code> as prefix, for example,
|
||||
‘<samp>avpriv_aac_parse_header</samp>’.
|
||||
|
||||
</li><li> Each library has its own prefix for public symbols, in addition to the
|
||||
commonly used <code>av_</code> (<code>avformat_</code> for libavformat,
|
||||
<code>avcodec_</code> for libavcodec, <code>swr_</code> for libswresample, etc).
|
||||
Check the existing code and choose names accordingly.
|
||||
Note that some symbols without these prefixes are also exported for
|
||||
retro-compatibility reasons. These exceptions are declared in the
|
||||
<code>lib<name>/lib<name>.v</code> files.
|
||||
</li></ul>
|
||||
|
||||
<p>Furthermore, name space reserved for the system should not be invaded.
|
||||
Identifiers ending in <code>_t</code> are reserved by
|
||||
<a href="http://pubs.opengroup.org/onlinepubs/007904975/functions/xsh_chap02_02.html#tag_02_02_02">POSIX</a>.
|
||||
Also avoid names starting with <code>__</code> or <code>_</code> followed by an uppercase
|
||||
letter as they are reserved by the C standard. Names starting with <code>_</code>
|
||||
are reserved at the file level and may not be used for externally visible
|
||||
symbols. If in doubt, just avoid names starting with <code>_</code> altogether.
|
||||
</p>
|
||||
<a name="Miscellaneous-conventions"></a>
|
||||
<h4 class="subsection">1.3.5 Miscellaneous conventions<span class="pull-right"><a class="anchor hidden-xs" href="#Miscellaneous-conventions" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Miscellaneous-conventions" aria-hidden="true">TOC</a></span></h4>
|
||||
|
||||
<ul>
|
||||
<li> fprintf and printf are forbidden in libavformat and libavcodec,
|
||||
please use av_log() instead.
|
||||
|
||||
</li><li> Casts should be used only when necessary. Unneeded parentheses
|
||||
should also be avoided if they don’t make the code easier to understand.
|
||||
</li></ul>
|
||||
|
||||
<a name="Editor-configuration"></a>
|
||||
<h4 class="subsection">1.3.6 Editor configuration<span class="pull-right"><a class="anchor hidden-xs" href="#Editor-configuration" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Editor-configuration" aria-hidden="true">TOC</a></span></h4>
|
||||
<p>In order to configure Vim to follow FFmpeg formatting conventions, paste
|
||||
the following snippet into your <samp>.vimrc</samp>:
|
||||
</p><div class="example">
|
||||
<pre class="example">" indentation rules for FFmpeg: 4 spaces, no tabs
|
||||
set expandtab
|
||||
set shiftwidth=4
|
||||
set softtabstop=4
|
||||
set cindent
|
||||
set cinoptions=(0
|
||||
" Allow tabs in Makefiles.
|
||||
autocmd FileType make,automake set noexpandtab shiftwidth=8 softtabstop=8
|
||||
" Trailing whitespace and tabs are forbidden, so highlight them.
|
||||
highlight ForbiddenWhitespace ctermbg=red guibg=red
|
||||
match ForbiddenWhitespace /\s\+$\|\t/
|
||||
" Do not highlight spaces at the end of line while typing on that line.
|
||||
autocmd InsertEnter * match ForbiddenWhitespace /\t\|\s\+\%#\@<!$/
|
||||
</pre></div>
|
||||
|
||||
<p>For Emacs, add these roughly equivalent lines to your <samp>.emacs.d/init.el</samp>:
|
||||
</p><div class="example">
|
||||
<pre class="example">(c-add-style "ffmpeg"
|
||||
'("k&r"
|
||||
(c-basic-offset . 4)
|
||||
(indent-tabs-mode . nil)
|
||||
(show-trailing-whitespace . t)
|
||||
(c-offsets-alist
|
||||
(statement-cont . (c-lineup-assignments +)))
|
||||
)
|
||||
)
|
||||
(setq c-default-style "ffmpeg")
|
||||
</pre></div>
|
||||
|
||||
<a name="Development-Policy"></a>
|
||||
<h3 class="section">1.4 Development Policy<span class="pull-right"><a class="anchor hidden-xs" href="#Development-Policy" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Development-Policy" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<ol>
|
||||
<li> Contributions should be licensed under the
|
||||
<a href="http://www.gnu.org/licenses/lgpl-2.1.html">LGPL 2.1</a>,
|
||||
including an "or any later version" clause, or, if you prefer
|
||||
a gift-style license, the
|
||||
<a href="http://opensource.org/licenses/isc-license.txt">ISC</a> or
|
||||
<a href="http://mit-license.org/">MIT</a> license.
|
||||
<a href="http://www.gnu.org/licenses/gpl-2.0.html">GPL 2</a> including
|
||||
an "or any later version" clause is also acceptable, but LGPL is
|
||||
preferred.
|
||||
If you add a new file, give it a proper license header. Do not copy and
|
||||
paste it from a random place, use an existing file as template.
|
||||
|
||||
</li><li> You must not commit code which breaks FFmpeg! (Meaning unfinished but
|
||||
enabled code which breaks compilation or compiles but does not work or
|
||||
breaks the regression tests)
|
||||
You can commit unfinished stuff (for testing etc), but it must be disabled
|
||||
(#ifdef etc) by default so it does not interfere with other developers’
|
||||
work.
|
||||
|
||||
</li><li> The commit message should have a short first line in the form of
|
||||
a ‘<samp>topic: short description</samp>’ as a header, separated by a newline
|
||||
from the body consisting of an explanation of why the change is necessary.
|
||||
If the commit fixes a known bug on the bug tracker, the commit message
|
||||
should include its bug ID. Referring to the issue on the bug tracker does
|
||||
not exempt you from writing an excerpt of the bug in the commit message.
|
||||
|
||||
</li><li> You do not have to over-test things. If it works for you, and you think it
|
||||
should work for others, then commit. If your code has problems
|
||||
(portability, triggers compiler bugs, unusual environment etc) they will be
|
||||
reported and eventually fixed.
|
||||
|
||||
</li><li> Do not commit unrelated changes together, split them into self-contained
|
||||
pieces. Also do not forget that if part B depends on part A, but A does not
|
||||
depend on B, then A can and should be committed first and separate from B.
|
||||
Keeping changes well split into self-contained parts makes reviewing and
|
||||
understanding them on the commit log mailing list easier. This also helps
|
||||
in case of debugging later on.
|
||||
Also if you have doubts about splitting or not splitting, do not hesitate to
|
||||
ask/discuss it on the developer mailing list.
|
||||
|
||||
</li><li> Do not change behavior of the programs (renaming options etc) or public
|
||||
API or ABI without first discussing it on the ffmpeg-devel mailing list.
|
||||
Do not remove functionality from the code. Just improve!
|
||||
|
||||
<p>Note: Redundant code can be removed.
|
||||
</p>
|
||||
</li><li> Do not commit changes to the build system (Makefiles, configure script)
|
||||
which change behavior, defaults etc, without asking first. The same
|
||||
applies to compiler warning fixes, trivial looking fixes and to code
|
||||
maintained by other developers. We usually have a reason for doing things
|
||||
the way we do. Send your changes as patches to the ffmpeg-devel mailing
|
||||
list, and if the code maintainers say OK, you may commit. This does not
|
||||
apply to files you wrote and/or maintain.
|
||||
|
||||
</li><li> We refuse source indentation and other cosmetic changes if they are mixed
|
||||
with functional changes, such commits will be rejected and removed. Every
|
||||
developer has his own indentation style, you should not change it. Of course
|
||||
if you (re)write something, you can use your own style, even though we would
|
||||
prefer if the indentation throughout FFmpeg was consistent (Many projects
|
||||
force a given indentation style - we do not.). If you really need to make
|
||||
indentation changes (try to avoid this), separate them strictly from real
|
||||
changes.
|
||||
|
||||
<p>NOTE: If you had to put if(){ .. } over a large (> 5 lines) chunk of code,
|
||||
then either do NOT change the indentation of the inner part within (do not
|
||||
move it to the right)! or do so in a separate commit
|
||||
</p>
|
||||
</li><li> Always fill out the commit log message. Describe in a few lines what you
|
||||
changed and why. You can refer to mailing list postings if you fix a
|
||||
particular bug. Comments such as "fixed!" or "Changed it." are unacceptable.
|
||||
Recommended format:
|
||||
|
||||
<div class="example">
|
||||
<pre class="example">area changed: Short 1 line description
|
||||
|
||||
details describing what and why and giving references.
|
||||
</pre></div>
|
||||
|
||||
</li><li> Make sure the author of the commit is set correctly. (see git commit –author)
|
||||
If you apply a patch, send an
|
||||
answer to ffmpeg-devel (or wherever you got the patch from) saying that
|
||||
you applied the patch.
|
||||
|
||||
</li><li> When applying patches that have been discussed (at length) on the mailing
|
||||
list, reference the thread in the log message.
|
||||
|
||||
</li><li> Do NOT commit to code actively maintained by others without permission.
|
||||
Send a patch to ffmpeg-devel instead. If no one answers within a reasonable
|
||||
timeframe (12h for build failures and security fixes, 3 days small changes,
|
||||
1 week for big patches) then commit your patch if you think it is OK.
|
||||
Also note, the maintainer can simply ask for more time to review!
|
||||
|
||||
</li><li> Subscribe to the ffmpeg-cvslog mailing list. The diffs of all commits
|
||||
are sent there and reviewed by all the other developers. Bugs and possible
|
||||
improvements or general questions regarding commits are discussed there. We
|
||||
expect you to react if problems with your code are uncovered.
|
||||
|
||||
</li><li> Update the documentation if you change behavior or add features. If you are
|
||||
unsure how best to do this, send a patch to ffmpeg-devel, the documentation
|
||||
maintainer(s) will review and commit your stuff.
|
||||
|
||||
</li><li> Try to keep important discussions and requests (also) on the public
|
||||
developer mailing list, so that all developers can benefit from them.
|
||||
|
||||
</li><li> Never write to unallocated memory, never write over the end of arrays,
|
||||
always check values read from some untrusted source before using them
|
||||
as array index or other risky things.
|
||||
|
||||
</li><li> Remember to check if you need to bump versions for the specific libav*
|
||||
parts (libavutil, libavcodec, libavformat) you are changing. You need
|
||||
to change the version integer.
|
||||
Incrementing the first component means no backward compatibility to
|
||||
previous versions (e.g. removal of a function from the public API).
|
||||
Incrementing the second component means backward compatible change
|
||||
(e.g. addition of a function to the public API or extension of an
|
||||
existing data structure).
|
||||
Incrementing the third component means a noteworthy binary compatible
|
||||
change (e.g. encoder bug fix that matters for the decoder). The third
|
||||
component always starts at 100 to distinguish FFmpeg from Libav.
|
||||
|
||||
</li><li> Compiler warnings indicate potential bugs or code with bad style. If a type of
|
||||
warning always points to correct and clean code, that warning should
|
||||
be disabled, not the code changed.
|
||||
Thus the remaining warnings can either be bugs or correct code.
|
||||
If it is a bug, the bug has to be fixed. If it is not, the code should
|
||||
be changed to not generate a warning unless that causes a slowdown
|
||||
or obfuscates the code.
|
||||
|
||||
</li><li> Make sure that no parts of the codebase that you maintain are missing from the
|
||||
<samp>MAINTAINERS</samp> file. If something that you want to maintain is missing add it with
|
||||
your name after it.
|
||||
If at some point you no longer want to maintain some code, then please help
|
||||
finding a new maintainer and also don’t forget updating the <samp>MAINTAINERS</samp> file.
|
||||
</li></ol>
|
||||
|
||||
<p>We think our rules are not too hard. If you have comments, contact us.
|
||||
</p>
|
||||
<a name="Submitting-patches"></a><a name="Submitting-patches-1"></a>
|
||||
<h3 class="section">1.5 Submitting patches<span class="pull-right"><a class="anchor hidden-xs" href="#Submitting-patches-1" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Submitting-patches-1" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>First, read the <a href="#Coding-Rules">Coding Rules</a> above if you did not yet, in particular
|
||||
the rules regarding patch submission.
|
||||
</p>
|
||||
<p>When you submit your patch, please use <code>git format-patch</code> or
|
||||
<code>git send-email</code>. We cannot read other diffs :-)
|
||||
</p>
|
||||
<p>Also please do not submit a patch which contains several unrelated changes.
|
||||
Split it into separate, self-contained pieces. This does not mean splitting
|
||||
file by file. Instead, make the patch as small as possible while still
|
||||
keeping it as a logical unit that contains an individual change, even
|
||||
if it spans multiple files. This makes reviewing your patches much easier
|
||||
for us and greatly increases your chances of getting your patch applied.
|
||||
</p>
|
||||
<p>Use the patcheck tool of FFmpeg to check your patch.
|
||||
The tool is located in the tools directory.
|
||||
</p>
|
||||
<p>Run the <a href="#Regression-tests">Regression tests</a> before submitting a patch in order to verify
|
||||
it does not cause unexpected problems.
|
||||
</p>
|
||||
<p>It also helps quite a bit if you tell us what the patch does (for example
|
||||
’replaces lrint by lrintf’), and why (for example ’*BSD isn’t C99 compliant
|
||||
and has no lrint()’)
|
||||
</p>
|
||||
<p>Also please if you send several patches, send each patch as a separate mail,
|
||||
do not attach several unrelated patches to the same mail.
|
||||
</p>
|
||||
<p>Patches should be posted to the
|
||||
<a href="http://lists.ffmpeg.org/mailman/listinfo/ffmpeg-devel">ffmpeg-devel</a>
|
||||
mailing list. Use <code>git send-email</code> when possible since it will properly
|
||||
send patches without requiring extra care. If you cannot, then send patches
|
||||
as base64-encoded attachments, so your patch is not trashed during
|
||||
transmission.
|
||||
</p>
|
||||
<p>Your patch will be reviewed on the mailing list. You will likely be asked
|
||||
to make some changes and are expected to send in an improved version that
|
||||
incorporates the requests from the review. This process may go through
|
||||
several iterations. Once your patch is deemed good enough, some developer
|
||||
will pick it up and commit it to the official FFmpeg tree.
|
||||
</p>
|
||||
<p>Give us a few days to react. But if some time passes without reaction,
|
||||
send a reminder by email. Your patch should eventually be dealt with.
|
||||
</p>
|
||||
|
||||
<a name="New-codecs-or-formats-checklist"></a>
|
||||
<h3 class="section">1.6 New codecs or formats checklist<span class="pull-right"><a class="anchor hidden-xs" href="#New-codecs-or-formats-checklist" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-New-codecs-or-formats-checklist" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<ol>
|
||||
<li> Did you use av_cold for codec initialization and close functions?
|
||||
|
||||
</li><li> Did you add a long_name under NULL_IF_CONFIG_SMALL to the AVCodec or
|
||||
AVInputFormat/AVOutputFormat struct?
|
||||
|
||||
</li><li> Did you bump the minor version number (and reset the micro version
|
||||
number) in <samp>libavcodec/version.h</samp> or <samp>libavformat/version.h</samp>?
|
||||
|
||||
</li><li> Did you register it in <samp>allcodecs.c</samp> or <samp>allformats.c</samp>?
|
||||
|
||||
</li><li> Did you add the AVCodecID to <samp>avcodec.h</samp>?
|
||||
When adding new codec IDs, also add an entry to the codec descriptor
|
||||
list in <samp>libavcodec/codec_desc.c</samp>.
|
||||
|
||||
</li><li> If it has a FourCC, did you add it to <samp>libavformat/riff.c</samp>,
|
||||
even if it is only a decoder?
|
||||
|
||||
</li><li> Did you add a rule to compile the appropriate files in the Makefile?
|
||||
Remember to do this even if you’re just adding a format to a file that is
|
||||
already being compiled by some other rule, like a raw demuxer.
|
||||
|
||||
</li><li> Did you add an entry to the table of supported formats or codecs in
|
||||
<samp>doc/general.texi</samp>?
|
||||
|
||||
</li><li> Did you add an entry in the Changelog?
|
||||
|
||||
</li><li> If it depends on a parser or a library, did you add that dependency in
|
||||
configure?
|
||||
|
||||
</li><li> Did you <code>git add</code> the appropriate files before committing?
|
||||
|
||||
</li><li> Did you make sure it compiles standalone, i.e. with
|
||||
<code>configure --disable-everything --enable-decoder=foo</code>
|
||||
(or <code>--enable-demuxer</code> or whatever your component is)?
|
||||
</li></ol>
|
||||
|
||||
|
||||
<a name="patch-submission-checklist"></a>
|
||||
<h3 class="section">1.7 patch submission checklist<span class="pull-right"><a class="anchor hidden-xs" href="#patch-submission-checklist" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-patch-submission-checklist" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<ol>
|
||||
<li> Does <code>make fate</code> pass with the patch applied?
|
||||
|
||||
</li><li> Was the patch generated with git format-patch or send-email?
|
||||
|
||||
</li><li> Did you sign off your patch? (git commit -s)
|
||||
See <a href="http://git.kernel.org/?p=linux/kernel/git/torvalds/linux.git;a=blob_plain;f=Documentation/SubmittingPatches">http://git.kernel.org/?p=linux/kernel/git/torvalds/linux.git;a=blob_plain;f=Documentation/SubmittingPatches</a> for the meaning
|
||||
of sign off.
|
||||
|
||||
</li><li> Did you provide a clear git commit log message?
|
||||
|
||||
</li><li> Is the patch against latest FFmpeg git master branch?
|
||||
|
||||
</li><li> Are you subscribed to ffmpeg-devel?
|
||||
(the list is subscribers only due to spam)
|
||||
|
||||
</li><li> Have you checked that the changes are minimal, so that the same cannot be
|
||||
achieved with a smaller patch and/or simpler final code?
|
||||
|
||||
</li><li> If the change is to speed critical code, did you benchmark it?
|
||||
|
||||
</li><li> If you did any benchmarks, did you provide them in the mail?
|
||||
|
||||
</li><li> Have you checked that the patch does not introduce buffer overflows or
|
||||
other security issues?
|
||||
|
||||
</li><li> Did you test your decoder or demuxer against damaged data? If no, see
|
||||
tools/trasher, the noise bitstream filter, and
|
||||
<a href="http://caca.zoy.org/wiki/zzuf">zzuf</a>. Your decoder or demuxer
|
||||
should not crash, end in a (near) infinite loop, or allocate ridiculous
|
||||
amounts of memory when fed damaged data.
|
||||
|
||||
</li><li> Does the patch not mix functional and cosmetic changes?
|
||||
|
||||
</li><li> Did you add tabs or trailing whitespace to the code? Both are forbidden.
|
||||
|
||||
</li><li> Is the patch attached to the email you send?
|
||||
|
||||
</li><li> Is the mime type of the patch correct? It should be text/x-diff or
|
||||
text/x-patch or at least text/plain and not application/octet-stream.
|
||||
|
||||
</li><li> If the patch fixes a bug, did you provide a verbose analysis of the bug?
|
||||
|
||||
</li><li> If the patch fixes a bug, did you provide enough information, including
|
||||
a sample, so the bug can be reproduced and the fix can be verified?
|
||||
Note please do not attach samples >100k to mails but rather provide a
|
||||
URL, you can upload to ftp://upload.ffmpeg.org
|
||||
|
||||
</li><li> Did you provide a verbose summary about what the patch does change?
|
||||
|
||||
</li><li> Did you provide a verbose explanation why it changes things like it does?
|
||||
|
||||
</li><li> Did you provide a verbose summary of the user visible advantages and
|
||||
disadvantages if the patch is applied?
|
||||
|
||||
</li><li> Did you provide an example so we can verify the new feature added by the
|
||||
patch easily?
|
||||
|
||||
</li><li> If you added a new file, did you insert a license header? It should be
|
||||
taken from FFmpeg, not randomly copied and pasted from somewhere else.
|
||||
|
||||
</li><li> You should maintain alphabetical order in alphabetically ordered lists as
|
||||
long as doing so does not break API/ABI compatibility.
|
||||
|
||||
</li><li> Lines with similar content should be aligned vertically when doing so
|
||||
improves readability.
|
||||
|
||||
</li><li> Consider to add a regression test for your code.
|
||||
|
||||
</li><li> If you added YASM code please check that things still work with –disable-yasm
|
||||
|
||||
</li><li> Make sure you check the return values of function and return appropriate
|
||||
error codes. Especially memory allocation functions like <code>av_malloc()</code>
|
||||
are notoriously left unchecked, which is a serious problem.
|
||||
|
||||
</li><li> Test your code with valgrind and or Address Sanitizer to ensure it’s free
|
||||
of leaks, out of array accesses, etc.
|
||||
</li></ol>
|
||||
|
||||
<a name="Patch-review-process"></a>
|
||||
<h3 class="section">1.8 Patch review process<span class="pull-right"><a class="anchor hidden-xs" href="#Patch-review-process" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Patch-review-process" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>All patches posted to ffmpeg-devel will be reviewed, unless they contain a
|
||||
clear note that the patch is not for the git master branch.
|
||||
Reviews and comments will be posted as replies to the patch on the
|
||||
mailing list. The patch submitter then has to take care of every comment,
|
||||
that can be by resubmitting a changed patch or by discussion. Resubmitted
|
||||
patches will themselves be reviewed like any other patch. If at some point
|
||||
a patch passes review with no comments then it is approved, that can for
|
||||
simple and small patches happen immediately while large patches will generally
|
||||
have to be changed and reviewed many times before they are approved.
|
||||
After a patch is approved it will be committed to the repository.
|
||||
</p>
|
||||
<p>We will review all submitted patches, but sometimes we are quite busy so
|
||||
especially for large patches this can take several weeks.
|
||||
</p>
|
||||
<p>If you feel that the review process is too slow and you are willing to try to
|
||||
take over maintainership of the area of code you change then just clone
|
||||
git master and maintain the area of code there. We will merge each area from
|
||||
where its best maintained.
|
||||
</p>
|
||||
<p>When resubmitting patches, please do not make any significant changes
|
||||
not related to the comments received during review. Such patches will
|
||||
be rejected. Instead, submit significant changes or new features as
|
||||
separate patches.
|
||||
</p>
|
||||
<a name="Regression-tests"></a><a name="Regression-tests-1"></a>
|
||||
<h3 class="section">1.9 Regression tests<span class="pull-right"><a class="anchor hidden-xs" href="#Regression-tests-1" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Regression-tests-1" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>Before submitting a patch (or committing to the repository), you should at least
|
||||
test that you did not break anything.
|
||||
</p>
|
||||
<p>Running ’make fate’ accomplishes this, please see <a href="fate.html">fate.html</a> for details.
|
||||
</p>
|
||||
<p>[Of course, some patches may change the results of the regression tests. In
|
||||
this case, the reference results of the regression tests shall be modified
|
||||
accordingly].
|
||||
</p>
|
||||
<a name="Adding-files-to-the-fate_002dsuite-dataset"></a>
|
||||
<h4 class="subsection">1.9.1 Adding files to the fate-suite dataset<span class="pull-right"><a class="anchor hidden-xs" href="#Adding-files-to-the-fate_002dsuite-dataset" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Adding-files-to-the-fate_002dsuite-dataset" aria-hidden="true">TOC</a></span></h4>
|
||||
|
||||
<p>When there is no muxer or encoder available to generate test media for a
|
||||
specific test then the media has to be included in the fate-suite.
|
||||
First please make sure that the sample file is as small as possible to test the
|
||||
respective decoder or demuxer sufficiently. Large files increase network
|
||||
bandwidth and disk space requirements.
|
||||
Once you have a working fate test and fate sample, provide in the commit
|
||||
message or introductory message for the patch series that you post to
|
||||
the ffmpeg-devel mailing list, a direct link to download the sample media.
|
||||
</p>
|
||||
|
||||
<a name="Visualizing-Test-Coverage"></a>
|
||||
<h4 class="subsection">1.9.2 Visualizing Test Coverage<span class="pull-right"><a class="anchor hidden-xs" href="#Visualizing-Test-Coverage" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Visualizing-Test-Coverage" aria-hidden="true">TOC</a></span></h4>
|
||||
|
||||
<p>The FFmpeg build system allows visualizing the test coverage in an easy
|
||||
manner with the coverage tools <code>gcov</code>/<code>lcov</code>. This involves
|
||||
the following steps:
|
||||
</p>
|
||||
<ol>
|
||||
<li> Configure to compile with instrumentation enabled:
|
||||
<code>configure --toolchain=gcov</code>.
|
||||
|
||||
</li><li> Run your test case, either manually or via FATE. This can be either
|
||||
the full FATE regression suite, or any arbitrary invocation of any
|
||||
front-end tool provided by FFmpeg, in any combination.
|
||||
|
||||
</li><li> Run <code>make lcov</code> to generate coverage data in HTML format.
|
||||
|
||||
</li><li> View <code>lcov/index.html</code> in your preferred HTML viewer.
|
||||
</li></ol>
|
||||
|
||||
<p>You can use the command <code>make lcov-reset</code> to reset the coverage
|
||||
measurements. You will need to rerun <code>make lcov</code> after running a
|
||||
new test.
|
||||
</p>
|
||||
<a name="Using-Valgrind"></a>
|
||||
<h4 class="subsection">1.9.3 Using Valgrind<span class="pull-right"><a class="anchor hidden-xs" href="#Using-Valgrind" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Using-Valgrind" aria-hidden="true">TOC</a></span></h4>
|
||||
|
||||
<p>The configure script provides a shortcut for using valgrind to spot bugs
|
||||
related to memory handling. Just add the option
|
||||
<code>--toolchain=valgrind-memcheck</code> or <code>--toolchain=valgrind-massif</code>
|
||||
to your configure line, and reasonable defaults will be set for running
|
||||
FATE under the supervision of either the <strong>memcheck</strong> or the
|
||||
<strong>massif</strong> tool of the valgrind suite.
|
||||
</p>
|
||||
<p>In case you need finer control over how valgrind is invoked, use the
|
||||
<code>--target-exec='valgrind <your_custom_valgrind_options></code> option in
|
||||
your configure line instead.
|
||||
</p>
|
||||
<a name="Release-process"></a><a name="Release-process-1"></a>
|
||||
<h3 class="section">1.10 Release process<span class="pull-right"><a class="anchor hidden-xs" href="#Release-process-1" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Release-process-1" aria-hidden="true">TOC</a></span></h3>
|
||||
|
||||
<p>FFmpeg maintains a set of <strong>release branches</strong>, which are the
|
||||
recommended deliverable for system integrators and distributors (such as
|
||||
Linux distributions, etc.). At regular times, a <strong>release
|
||||
manager</strong> prepares, tests and publishes tarballs on the
|
||||
<a href="http://ffmpeg.org">http://ffmpeg.org</a> website.
|
||||
</p>
|
||||
<p>There are two kinds of releases:
|
||||
</p>
|
||||
<ol>
|
||||
<li> <strong>Major releases</strong> always include the latest and greatest
|
||||
features and functionality.
|
||||
|
||||
</li><li> <strong>Point releases</strong> are cut from <strong>release</strong> branches,
|
||||
which are named <code>release/X</code>, with <code>X</code> being the release
|
||||
version number.
|
||||
</li></ol>
|
||||
|
||||
<p>Note that we promise to our users that shared libraries from any FFmpeg
|
||||
release never break programs that have been <strong>compiled</strong> against
|
||||
previous versions of <strong>the same release series</strong> in any case!
|
||||
</p>
|
||||
<p>However, from time to time, we do make API changes that require adaptations
|
||||
in applications. Such changes are only allowed in (new) major releases and
|
||||
require further steps such as bumping library version numbers and/or
|
||||
adjustments to the symbol versioning file. Please discuss such changes
|
||||
on the <strong>ffmpeg-devel</strong> mailing list in time to allow forward planning.
|
||||
</p>
|
||||
<a name="Criteria-for-Point-Releases"></a><a name="Criteria-for-Point-Releases-1"></a>
|
||||
<h4 class="subsection">1.10.1 Criteria for Point Releases<span class="pull-right"><a class="anchor hidden-xs" href="#Criteria-for-Point-Releases-1" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Criteria-for-Point-Releases-1" aria-hidden="true">TOC</a></span></h4>
|
||||
|
||||
<p>Changes that match the following criteria are valid candidates for
|
||||
inclusion into a point release:
|
||||
</p>
|
||||
<ol>
|
||||
<li> Fixes a security issue, preferably identified by a <strong>CVE
|
||||
number</strong> issued by <a href="http://cve.mitre.org/">http://cve.mitre.org/</a>.
|
||||
|
||||
</li><li> Fixes a documented bug in <a href="https://trac.ffmpeg.org">https://trac.ffmpeg.org</a>.
|
||||
|
||||
</li><li> Improves the included documentation.
|
||||
|
||||
</li><li> Retains both source code and binary compatibility with previous
|
||||
point releases of the same release branch.
|
||||
</li></ol>
|
||||
|
||||
<p>The order for checking the rules is (1 OR 2 OR 3) AND 4.
|
||||
</p>
|
||||
|
||||
<a name="Release-Checklist"></a>
|
||||
<h4 class="subsection">1.10.2 Release Checklist<span class="pull-right"><a class="anchor hidden-xs" href="#Release-Checklist" aria-hidden="true">#</a> <a class="anchor hidden-xs"href="#toc-Release-Checklist" aria-hidden="true">TOC</a></span></h4>
|
||||
|
||||
<p>The release process involves the following steps:
|
||||
</p>
|
||||
<ol>
|
||||
<li> Ensure that the <samp>RELEASE</samp> file contains the version number for
|
||||
the upcoming release.
|
||||
|
||||
</li><li> Add the release at <a href="https://trac.ffmpeg.org/admin/ticket/versions">https://trac.ffmpeg.org/admin/ticket/versions</a>.
|
||||
|
||||
</li><li> Announce the intent to do a release to the mailing list.
|
||||
|
||||
</li><li> Make sure all relevant security fixes have been backported. See
|
||||
<a href="https://ffmpeg.org/security.html">https://ffmpeg.org/security.html</a>.
|
||||
|
||||
</li><li> Ensure that the FATE regression suite still passes in the release
|
||||
branch on at least <strong>i386</strong> and <strong>amd64</strong>
|
||||
(cf. <a href="#Regression-tests">Regression tests</a>).
|
||||
|
||||
</li><li> Prepare the release tarballs in <code>bz2</code> and <code>gz</code> formats, and
|
||||
supplementing files that contain <code>gpg</code> signatures
|
||||
|
||||
</li><li> Publish the tarballs at <a href="http://ffmpeg.org/releases">http://ffmpeg.org/releases</a>. Create and
|
||||
push an annotated tag in the form <code>nX</code>, with <code>X</code>
|
||||
containing the version number.
|
||||
|
||||
</li><li> Propose and send a patch to the <strong>ffmpeg-devel</strong> mailing list
|
||||
with a news entry for the website.
|
||||
|
||||
</li><li> Publish the news entry.
|
||||
|
||||
</li><li> Send announcement to the mailing list.
|
||||
</li></ol>
|
||||
|
||||
|
||||
<p style="font-size: small;">
|
||||
This document was generated on <em>January 14, 2015</em> using <a href="http://www.gnu.org/software/texinfo/"><em>makeinfo</em></a>.
|
||||
</p>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
44
Externals/ffmpeg/shared/doc/examples/Makefile
vendored
44
Externals/ffmpeg/shared/doc/examples/Makefile
vendored
@ -1,44 +0,0 @@
|
||||
# use pkg-config for getting CFLAGS and LDLIBS
|
||||
FFMPEG_LIBS= libavdevice \
|
||||
libavformat \
|
||||
libavfilter \
|
||||
libavcodec \
|
||||
libswresample \
|
||||
libswscale \
|
||||
libavutil \
|
||||
|
||||
CFLAGS += -Wall -g
|
||||
CFLAGS := $(shell pkg-config --cflags $(FFMPEG_LIBS)) $(CFLAGS)
|
||||
LDLIBS := $(shell pkg-config --libs $(FFMPEG_LIBS)) $(LDLIBS)
|
||||
|
||||
EXAMPLES= avio_reading \
|
||||
decoding_encoding \
|
||||
demuxing_decoding \
|
||||
extract_mvs \
|
||||
filtering_video \
|
||||
filtering_audio \
|
||||
metadata \
|
||||
muxing \
|
||||
remuxing \
|
||||
resampling_audio \
|
||||
scaling_video \
|
||||
transcode_aac \
|
||||
transcoding \
|
||||
|
||||
OBJS=$(addsuffix .o,$(EXAMPLES))
|
||||
|
||||
# the following examples make explicit use of the math library
|
||||
avcodec: LDLIBS += -lm
|
||||
decoding_encoding: LDLIBS += -lm
|
||||
muxing: LDLIBS += -lm
|
||||
resampling_audio: LDLIBS += -lm
|
||||
|
||||
.phony: all clean-test clean
|
||||
|
||||
all: $(OBJS) $(EXAMPLES)
|
||||
|
||||
clean-test:
|
||||
$(RM) test*.pgm test.h264 test.mp2 test.sw test.mpg
|
||||
|
||||
clean: clean-test
|
||||
$(RM) $(EXAMPLES) $(OBJS)
|
23
Externals/ffmpeg/shared/doc/examples/README
vendored
23
Externals/ffmpeg/shared/doc/examples/README
vendored
@ -1,23 +0,0 @@
|
||||
FFmpeg examples README
|
||||
----------------------
|
||||
|
||||
Both following use cases rely on pkg-config and make, thus make sure
|
||||
that you have them installed and working on your system.
|
||||
|
||||
|
||||
Method 1: build the installed examples in a generic read/write user directory
|
||||
|
||||
Copy to a read/write user directory and just use "make", it will link
|
||||
to the libraries on your system, assuming the PKG_CONFIG_PATH is
|
||||
correctly configured.
|
||||
|
||||
Method 2: build the examples in-tree
|
||||
|
||||
Assuming you are in the source FFmpeg checkout directory, you need to build
|
||||
FFmpeg (no need to make install in any prefix). Then just run "make examples".
|
||||
This will build the examples using the FFmpeg build system. You can clean those
|
||||
examples using "make examplesclean"
|
||||
|
||||
If you want to try the dedicated Makefile examples (to emulate the first
|
||||
method), go into doc/examples and run a command such as
|
||||
PKG_CONFIG_PATH=pc-uninstalled make.
|
134
Externals/ffmpeg/shared/doc/examples/avio_reading.c
vendored
134
Externals/ffmpeg/shared/doc/examples/avio_reading.c
vendored
@ -1,134 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2014 Stefano Sabatini
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* libavformat AVIOContext API example.
|
||||
*
|
||||
* Make libavformat demuxer access media content through a custom
|
||||
* AVIOContext read callback.
|
||||
* @example avio_reading.c
|
||||
*/
|
||||
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavformat/avio.h>
|
||||
#include <libavutil/file.h>
|
||||
|
||||
struct buffer_data {
|
||||
uint8_t *ptr;
|
||||
size_t size; ///< size left in the buffer
|
||||
};
|
||||
|
||||
static int read_packet(void *opaque, uint8_t *buf, int buf_size)
|
||||
{
|
||||
struct buffer_data *bd = (struct buffer_data *)opaque;
|
||||
buf_size = FFMIN(buf_size, bd->size);
|
||||
|
||||
printf("ptr:%p size:%zu\n", bd->ptr, bd->size);
|
||||
|
||||
/* copy internal buffer data to buf */
|
||||
memcpy(buf, bd->ptr, buf_size);
|
||||
bd->ptr += buf_size;
|
||||
bd->size -= buf_size;
|
||||
|
||||
return buf_size;
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
AVFormatContext *fmt_ctx = NULL;
|
||||
AVIOContext *avio_ctx = NULL;
|
||||
uint8_t *buffer = NULL, *avio_ctx_buffer = NULL;
|
||||
size_t buffer_size, avio_ctx_buffer_size = 4096;
|
||||
char *input_filename = NULL;
|
||||
int ret = 0;
|
||||
struct buffer_data bd = { 0 };
|
||||
|
||||
if (argc != 2) {
|
||||
fprintf(stderr, "usage: %s input_file\n"
|
||||
"API example program to show how to read from a custom buffer "
|
||||
"accessed through AVIOContext.\n", argv[0]);
|
||||
return 1;
|
||||
}
|
||||
input_filename = argv[1];
|
||||
|
||||
/* register codecs and formats and other lavf/lavc components*/
|
||||
av_register_all();
|
||||
|
||||
/* slurp file content into buffer */
|
||||
ret = av_file_map(input_filename, &buffer, &buffer_size, 0, NULL);
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
|
||||
/* fill opaque structure used by the AVIOContext read callback */
|
||||
bd.ptr = buffer;
|
||||
bd.size = buffer_size;
|
||||
|
||||
if (!(fmt_ctx = avformat_alloc_context())) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
avio_ctx_buffer = av_malloc(avio_ctx_buffer_size);
|
||||
if (!avio_ctx_buffer) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
avio_ctx = avio_alloc_context(avio_ctx_buffer, avio_ctx_buffer_size,
|
||||
0, &bd, &read_packet, NULL, NULL);
|
||||
if (!avio_ctx) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
fmt_ctx->pb = avio_ctx;
|
||||
|
||||
ret = avformat_open_input(&fmt_ctx, NULL, NULL, NULL);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not open input\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = avformat_find_stream_info(fmt_ctx, NULL);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not find stream information\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
av_dump_format(fmt_ctx, 0, input_filename, 0);
|
||||
|
||||
end:
|
||||
avformat_close_input(&fmt_ctx);
|
||||
/* note: the internal buffer could have changed, and be != avio_ctx_buffer */
|
||||
if (avio_ctx) {
|
||||
av_freep(&avio_ctx->buffer);
|
||||
av_freep(&avio_ctx);
|
||||
}
|
||||
av_file_unmap(buffer, buffer_size);
|
||||
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
@ -1,665 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2001 Fabrice Bellard
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* libavcodec API use example.
|
||||
*
|
||||
* @example decoding_encoding.c
|
||||
* Note that libavcodec only handles codecs (mpeg, mpeg4, etc...),
|
||||
* not file formats (avi, vob, mp4, mov, mkv, mxf, flv, mpegts, mpegps, etc...). See library 'libavformat' for the
|
||||
* format handling
|
||||
*/
|
||||
|
||||
#include <math.h>
|
||||
|
||||
#include <libavutil/opt.h>
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavutil/channel_layout.h>
|
||||
#include <libavutil/common.h>
|
||||
#include <libavutil/imgutils.h>
|
||||
#include <libavutil/mathematics.h>
|
||||
#include <libavutil/samplefmt.h>
|
||||
|
||||
#define INBUF_SIZE 4096
|
||||
#define AUDIO_INBUF_SIZE 20480
|
||||
#define AUDIO_REFILL_THRESH 4096
|
||||
|
||||
/* check that a given sample format is supported by the encoder */
|
||||
static int check_sample_fmt(AVCodec *codec, enum AVSampleFormat sample_fmt)
|
||||
{
|
||||
const enum AVSampleFormat *p = codec->sample_fmts;
|
||||
|
||||
while (*p != AV_SAMPLE_FMT_NONE) {
|
||||
if (*p == sample_fmt)
|
||||
return 1;
|
||||
p++;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* just pick the highest supported samplerate */
|
||||
static int select_sample_rate(AVCodec *codec)
|
||||
{
|
||||
const int *p;
|
||||
int best_samplerate = 0;
|
||||
|
||||
if (!codec->supported_samplerates)
|
||||
return 44100;
|
||||
|
||||
p = codec->supported_samplerates;
|
||||
while (*p) {
|
||||
best_samplerate = FFMAX(*p, best_samplerate);
|
||||
p++;
|
||||
}
|
||||
return best_samplerate;
|
||||
}
|
||||
|
||||
/* select layout with the highest channel count */
|
||||
static int select_channel_layout(AVCodec *codec)
|
||||
{
|
||||
const uint64_t *p;
|
||||
uint64_t best_ch_layout = 0;
|
||||
int best_nb_channels = 0;
|
||||
|
||||
if (!codec->channel_layouts)
|
||||
return AV_CH_LAYOUT_STEREO;
|
||||
|
||||
p = codec->channel_layouts;
|
||||
while (*p) {
|
||||
int nb_channels = av_get_channel_layout_nb_channels(*p);
|
||||
|
||||
if (nb_channels > best_nb_channels) {
|
||||
best_ch_layout = *p;
|
||||
best_nb_channels = nb_channels;
|
||||
}
|
||||
p++;
|
||||
}
|
||||
return best_ch_layout;
|
||||
}
|
||||
|
||||
/*
|
||||
* Audio encoding example
|
||||
*/
|
||||
static void audio_encode_example(const char *filename)
|
||||
{
|
||||
AVCodec *codec;
|
||||
AVCodecContext *c= NULL;
|
||||
AVFrame *frame;
|
||||
AVPacket pkt;
|
||||
int i, j, k, ret, got_output;
|
||||
int buffer_size;
|
||||
FILE *f;
|
||||
uint16_t *samples;
|
||||
float t, tincr;
|
||||
|
||||
printf("Encode audio file %s\n", filename);
|
||||
|
||||
/* find the MP2 encoder */
|
||||
codec = avcodec_find_encoder(AV_CODEC_ID_MP2);
|
||||
if (!codec) {
|
||||
fprintf(stderr, "Codec not found\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
c = avcodec_alloc_context3(codec);
|
||||
if (!c) {
|
||||
fprintf(stderr, "Could not allocate audio codec context\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* put sample parameters */
|
||||
c->bit_rate = 64000;
|
||||
|
||||
/* check that the encoder supports s16 pcm input */
|
||||
c->sample_fmt = AV_SAMPLE_FMT_S16;
|
||||
if (!check_sample_fmt(codec, c->sample_fmt)) {
|
||||
fprintf(stderr, "Encoder does not support sample format %s",
|
||||
av_get_sample_fmt_name(c->sample_fmt));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* select other audio parameters supported by the encoder */
|
||||
c->sample_rate = select_sample_rate(codec);
|
||||
c->channel_layout = select_channel_layout(codec);
|
||||
c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
|
||||
|
||||
/* open it */
|
||||
if (avcodec_open2(c, codec, NULL) < 0) {
|
||||
fprintf(stderr, "Could not open codec\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
f = fopen(filename, "wb");
|
||||
if (!f) {
|
||||
fprintf(stderr, "Could not open %s\n", filename);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* frame containing input raw audio */
|
||||
frame = av_frame_alloc();
|
||||
if (!frame) {
|
||||
fprintf(stderr, "Could not allocate audio frame\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
frame->nb_samples = c->frame_size;
|
||||
frame->format = c->sample_fmt;
|
||||
frame->channel_layout = c->channel_layout;
|
||||
|
||||
/* the codec gives us the frame size, in samples,
|
||||
* we calculate the size of the samples buffer in bytes */
|
||||
buffer_size = av_samples_get_buffer_size(NULL, c->channels, c->frame_size,
|
||||
c->sample_fmt, 0);
|
||||
if (buffer_size < 0) {
|
||||
fprintf(stderr, "Could not get sample buffer size\n");
|
||||
exit(1);
|
||||
}
|
||||
samples = av_malloc(buffer_size);
|
||||
if (!samples) {
|
||||
fprintf(stderr, "Could not allocate %d bytes for samples buffer\n",
|
||||
buffer_size);
|
||||
exit(1);
|
||||
}
|
||||
/* setup the data pointers in the AVFrame */
|
||||
ret = avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt,
|
||||
(const uint8_t*)samples, buffer_size, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not setup audio frame\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* encode a single tone sound */
|
||||
t = 0;
|
||||
tincr = 2 * M_PI * 440.0 / c->sample_rate;
|
||||
for (i = 0; i < 200; i++) {
|
||||
av_init_packet(&pkt);
|
||||
pkt.data = NULL; // packet data will be allocated by the encoder
|
||||
pkt.size = 0;
|
||||
|
||||
for (j = 0; j < c->frame_size; j++) {
|
||||
samples[2*j] = (int)(sin(t) * 10000);
|
||||
|
||||
for (k = 1; k < c->channels; k++)
|
||||
samples[2*j + k] = samples[2*j];
|
||||
t += tincr;
|
||||
}
|
||||
/* encode the samples */
|
||||
ret = avcodec_encode_audio2(c, &pkt, frame, &got_output);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error encoding audio frame\n");
|
||||
exit(1);
|
||||
}
|
||||
if (got_output) {
|
||||
fwrite(pkt.data, 1, pkt.size, f);
|
||||
av_free_packet(&pkt);
|
||||
}
|
||||
}
|
||||
|
||||
/* get the delayed frames */
|
||||
for (got_output = 1; got_output; i++) {
|
||||
ret = avcodec_encode_audio2(c, &pkt, NULL, &got_output);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error encoding frame\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (got_output) {
|
||||
fwrite(pkt.data, 1, pkt.size, f);
|
||||
av_free_packet(&pkt);
|
||||
}
|
||||
}
|
||||
fclose(f);
|
||||
|
||||
av_freep(&samples);
|
||||
av_frame_free(&frame);
|
||||
avcodec_close(c);
|
||||
av_free(c);
|
||||
}
|
||||
|
||||
/*
|
||||
* Audio decoding.
|
||||
*/
|
||||
static void audio_decode_example(const char *outfilename, const char *filename)
|
||||
{
|
||||
AVCodec *codec;
|
||||
AVCodecContext *c= NULL;
|
||||
int len;
|
||||
FILE *f, *outfile;
|
||||
uint8_t inbuf[AUDIO_INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE];
|
||||
AVPacket avpkt;
|
||||
AVFrame *decoded_frame = NULL;
|
||||
|
||||
av_init_packet(&avpkt);
|
||||
|
||||
printf("Decode audio file %s to %s\n", filename, outfilename);
|
||||
|
||||
/* find the mpeg audio decoder */
|
||||
codec = avcodec_find_decoder(AV_CODEC_ID_MP2);
|
||||
if (!codec) {
|
||||
fprintf(stderr, "Codec not found\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
c = avcodec_alloc_context3(codec);
|
||||
if (!c) {
|
||||
fprintf(stderr, "Could not allocate audio codec context\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* open it */
|
||||
if (avcodec_open2(c, codec, NULL) < 0) {
|
||||
fprintf(stderr, "Could not open codec\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
f = fopen(filename, "rb");
|
||||
if (!f) {
|
||||
fprintf(stderr, "Could not open %s\n", filename);
|
||||
exit(1);
|
||||
}
|
||||
outfile = fopen(outfilename, "wb");
|
||||
if (!outfile) {
|
||||
av_free(c);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* decode until eof */
|
||||
avpkt.data = inbuf;
|
||||
avpkt.size = fread(inbuf, 1, AUDIO_INBUF_SIZE, f);
|
||||
|
||||
while (avpkt.size > 0) {
|
||||
int i, ch;
|
||||
int got_frame = 0;
|
||||
|
||||
if (!decoded_frame) {
|
||||
if (!(decoded_frame = av_frame_alloc())) {
|
||||
fprintf(stderr, "Could not allocate audio frame\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
len = avcodec_decode_audio4(c, decoded_frame, &got_frame, &avpkt);
|
||||
if (len < 0) {
|
||||
fprintf(stderr, "Error while decoding\n");
|
||||
exit(1);
|
||||
}
|
||||
if (got_frame) {
|
||||
/* if a frame has been decoded, output it */
|
||||
int data_size = av_get_bytes_per_sample(c->sample_fmt);
|
||||
if (data_size < 0) {
|
||||
/* This should not occur, checking just for paranoia */
|
||||
fprintf(stderr, "Failed to calculate data size\n");
|
||||
exit(1);
|
||||
}
|
||||
for (i=0; i<decoded_frame->nb_samples; i++)
|
||||
for (ch=0; ch<c->channels; ch++)
|
||||
fwrite(decoded_frame->data[ch] + data_size*i, 1, data_size, outfile);
|
||||
}
|
||||
avpkt.size -= len;
|
||||
avpkt.data += len;
|
||||
avpkt.dts =
|
||||
avpkt.pts = AV_NOPTS_VALUE;
|
||||
if (avpkt.size < AUDIO_REFILL_THRESH) {
|
||||
/* Refill the input buffer, to avoid trying to decode
|
||||
* incomplete frames. Instead of this, one could also use
|
||||
* a parser, or use a proper container format through
|
||||
* libavformat. */
|
||||
memmove(inbuf, avpkt.data, avpkt.size);
|
||||
avpkt.data = inbuf;
|
||||
len = fread(avpkt.data + avpkt.size, 1,
|
||||
AUDIO_INBUF_SIZE - avpkt.size, f);
|
||||
if (len > 0)
|
||||
avpkt.size += len;
|
||||
}
|
||||
}
|
||||
|
||||
fclose(outfile);
|
||||
fclose(f);
|
||||
|
||||
avcodec_close(c);
|
||||
av_free(c);
|
||||
av_frame_free(&decoded_frame);
|
||||
}
|
||||
|
||||
/*
|
||||
* Video encoding example
|
||||
*/
|
||||
static void video_encode_example(const char *filename, int codec_id)
|
||||
{
|
||||
AVCodec *codec;
|
||||
AVCodecContext *c= NULL;
|
||||
int i, ret, x, y, got_output;
|
||||
FILE *f;
|
||||
AVFrame *frame;
|
||||
AVPacket pkt;
|
||||
uint8_t endcode[] = { 0, 0, 1, 0xb7 };
|
||||
|
||||
printf("Encode video file %s\n", filename);
|
||||
|
||||
/* find the mpeg1 video encoder */
|
||||
codec = avcodec_find_encoder(codec_id);
|
||||
if (!codec) {
|
||||
fprintf(stderr, "Codec not found\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
c = avcodec_alloc_context3(codec);
|
||||
if (!c) {
|
||||
fprintf(stderr, "Could not allocate video codec context\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* put sample parameters */
|
||||
c->bit_rate = 400000;
|
||||
/* resolution must be a multiple of two */
|
||||
c->width = 352;
|
||||
c->height = 288;
|
||||
/* frames per second */
|
||||
c->time_base = (AVRational){1,25};
|
||||
/* emit one intra frame every ten frames
|
||||
* check frame pict_type before passing frame
|
||||
* to encoder, if frame->pict_type is AV_PICTURE_TYPE_I
|
||||
* then gop_size is ignored and the output of encoder
|
||||
* will always be I frame irrespective to gop_size
|
||||
*/
|
||||
c->gop_size = 10;
|
||||
c->max_b_frames = 1;
|
||||
c->pix_fmt = AV_PIX_FMT_YUV420P;
|
||||
|
||||
if (codec_id == AV_CODEC_ID_H264)
|
||||
av_opt_set(c->priv_data, "preset", "slow", 0);
|
||||
|
||||
/* open it */
|
||||
if (avcodec_open2(c, codec, NULL) < 0) {
|
||||
fprintf(stderr, "Could not open codec\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
f = fopen(filename, "wb");
|
||||
if (!f) {
|
||||
fprintf(stderr, "Could not open %s\n", filename);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
frame = av_frame_alloc();
|
||||
if (!frame) {
|
||||
fprintf(stderr, "Could not allocate video frame\n");
|
||||
exit(1);
|
||||
}
|
||||
frame->format = c->pix_fmt;
|
||||
frame->width = c->width;
|
||||
frame->height = c->height;
|
||||
|
||||
/* the image can be allocated by any means and av_image_alloc() is
|
||||
* just the most convenient way if av_malloc() is to be used */
|
||||
ret = av_image_alloc(frame->data, frame->linesize, c->width, c->height,
|
||||
c->pix_fmt, 32);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate raw picture buffer\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* encode 1 second of video */
|
||||
for (i = 0; i < 25; i++) {
|
||||
av_init_packet(&pkt);
|
||||
pkt.data = NULL; // packet data will be allocated by the encoder
|
||||
pkt.size = 0;
|
||||
|
||||
fflush(stdout);
|
||||
/* prepare a dummy image */
|
||||
/* Y */
|
||||
for (y = 0; y < c->height; y++) {
|
||||
for (x = 0; x < c->width; x++) {
|
||||
frame->data[0][y * frame->linesize[0] + x] = x + y + i * 3;
|
||||
}
|
||||
}
|
||||
|
||||
/* Cb and Cr */
|
||||
for (y = 0; y < c->height/2; y++) {
|
||||
for (x = 0; x < c->width/2; x++) {
|
||||
frame->data[1][y * frame->linesize[1] + x] = 128 + y + i * 2;
|
||||
frame->data[2][y * frame->linesize[2] + x] = 64 + x + i * 5;
|
||||
}
|
||||
}
|
||||
|
||||
frame->pts = i;
|
||||
|
||||
/* encode the image */
|
||||
ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error encoding frame\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (got_output) {
|
||||
printf("Write frame %3d (size=%5d)\n", i, pkt.size);
|
||||
fwrite(pkt.data, 1, pkt.size, f);
|
||||
av_free_packet(&pkt);
|
||||
}
|
||||
}
|
||||
|
||||
/* get the delayed frames */
|
||||
for (got_output = 1; got_output; i++) {
|
||||
fflush(stdout);
|
||||
|
||||
ret = avcodec_encode_video2(c, &pkt, NULL, &got_output);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error encoding frame\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (got_output) {
|
||||
printf("Write frame %3d (size=%5d)\n", i, pkt.size);
|
||||
fwrite(pkt.data, 1, pkt.size, f);
|
||||
av_free_packet(&pkt);
|
||||
}
|
||||
}
|
||||
|
||||
/* add sequence end code to have a real mpeg file */
|
||||
fwrite(endcode, 1, sizeof(endcode), f);
|
||||
fclose(f);
|
||||
|
||||
avcodec_close(c);
|
||||
av_free(c);
|
||||
av_freep(&frame->data[0]);
|
||||
av_frame_free(&frame);
|
||||
printf("\n");
|
||||
}
|
||||
|
||||
/*
|
||||
* Video decoding example
|
||||
*/
|
||||
|
||||
static void pgm_save(unsigned char *buf, int wrap, int xsize, int ysize,
|
||||
char *filename)
|
||||
{
|
||||
FILE *f;
|
||||
int i;
|
||||
|
||||
f = fopen(filename,"w");
|
||||
fprintf(f, "P5\n%d %d\n%d\n", xsize, ysize, 255);
|
||||
for (i = 0; i < ysize; i++)
|
||||
fwrite(buf + i * wrap, 1, xsize, f);
|
||||
fclose(f);
|
||||
}
|
||||
|
||||
static int decode_write_frame(const char *outfilename, AVCodecContext *avctx,
|
||||
AVFrame *frame, int *frame_count, AVPacket *pkt, int last)
|
||||
{
|
||||
int len, got_frame;
|
||||
char buf[1024];
|
||||
|
||||
len = avcodec_decode_video2(avctx, frame, &got_frame, pkt);
|
||||
if (len < 0) {
|
||||
fprintf(stderr, "Error while decoding frame %d\n", *frame_count);
|
||||
return len;
|
||||
}
|
||||
if (got_frame) {
|
||||
printf("Saving %sframe %3d\n", last ? "last " : "", *frame_count);
|
||||
fflush(stdout);
|
||||
|
||||
/* the picture is allocated by the decoder, no need to free it */
|
||||
snprintf(buf, sizeof(buf), outfilename, *frame_count);
|
||||
pgm_save(frame->data[0], frame->linesize[0],
|
||||
avctx->width, avctx->height, buf);
|
||||
(*frame_count)++;
|
||||
}
|
||||
if (pkt->data) {
|
||||
pkt->size -= len;
|
||||
pkt->data += len;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void video_decode_example(const char *outfilename, const char *filename)
|
||||
{
|
||||
AVCodec *codec;
|
||||
AVCodecContext *c= NULL;
|
||||
int frame_count;
|
||||
FILE *f;
|
||||
AVFrame *frame;
|
||||
uint8_t inbuf[INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE];
|
||||
AVPacket avpkt;
|
||||
|
||||
av_init_packet(&avpkt);
|
||||
|
||||
/* set end of buffer to 0 (this ensures that no overreading happens for damaged mpeg streams) */
|
||||
memset(inbuf + INBUF_SIZE, 0, FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
|
||||
printf("Decode video file %s to %s\n", filename, outfilename);
|
||||
|
||||
/* find the mpeg1 video decoder */
|
||||
codec = avcodec_find_decoder(AV_CODEC_ID_MPEG1VIDEO);
|
||||
if (!codec) {
|
||||
fprintf(stderr, "Codec not found\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
c = avcodec_alloc_context3(codec);
|
||||
if (!c) {
|
||||
fprintf(stderr, "Could not allocate video codec context\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if(codec->capabilities&CODEC_CAP_TRUNCATED)
|
||||
c->flags|= CODEC_FLAG_TRUNCATED; /* we do not send complete frames */
|
||||
|
||||
/* For some codecs, such as msmpeg4 and mpeg4, width and height
|
||||
MUST be initialized there because this information is not
|
||||
available in the bitstream. */
|
||||
|
||||
/* open it */
|
||||
if (avcodec_open2(c, codec, NULL) < 0) {
|
||||
fprintf(stderr, "Could not open codec\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
f = fopen(filename, "rb");
|
||||
if (!f) {
|
||||
fprintf(stderr, "Could not open %s\n", filename);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
frame = av_frame_alloc();
|
||||
if (!frame) {
|
||||
fprintf(stderr, "Could not allocate video frame\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
frame_count = 0;
|
||||
for (;;) {
|
||||
avpkt.size = fread(inbuf, 1, INBUF_SIZE, f);
|
||||
if (avpkt.size == 0)
|
||||
break;
|
||||
|
||||
/* NOTE1: some codecs are stream based (mpegvideo, mpegaudio)
|
||||
and this is the only method to use them because you cannot
|
||||
know the compressed data size before analysing it.
|
||||
|
||||
BUT some other codecs (msmpeg4, mpeg4) are inherently frame
|
||||
based, so you must call them with all the data for one
|
||||
frame exactly. You must also initialize 'width' and
|
||||
'height' before initializing them. */
|
||||
|
||||
/* NOTE2: some codecs allow the raw parameters (frame size,
|
||||
sample rate) to be changed at any frame. We handle this, so
|
||||
you should also take care of it */
|
||||
|
||||
/* here, we use a stream based decoder (mpeg1video), so we
|
||||
feed decoder and see if it could decode a frame */
|
||||
avpkt.data = inbuf;
|
||||
while (avpkt.size > 0)
|
||||
if (decode_write_frame(outfilename, c, frame, &frame_count, &avpkt, 0) < 0)
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* some codecs, such as MPEG, transmit the I and P frame with a
|
||||
latency of one frame. You must do the following to have a
|
||||
chance to get the last frame of the video */
|
||||
avpkt.data = NULL;
|
||||
avpkt.size = 0;
|
||||
decode_write_frame(outfilename, c, frame, &frame_count, &avpkt, 1);
|
||||
|
||||
fclose(f);
|
||||
|
||||
avcodec_close(c);
|
||||
av_free(c);
|
||||
av_frame_free(&frame);
|
||||
printf("\n");
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
const char *output_type;
|
||||
|
||||
/* register all the codecs */
|
||||
avcodec_register_all();
|
||||
|
||||
if (argc < 2) {
|
||||
printf("usage: %s output_type\n"
|
||||
"API example program to decode/encode a media stream with libavcodec.\n"
|
||||
"This program generates a synthetic stream and encodes it to a file\n"
|
||||
"named test.h264, test.mp2 or test.mpg depending on output_type.\n"
|
||||
"The encoded stream is then decoded and written to a raw data output.\n"
|
||||
"output_type must be chosen between 'h264', 'mp2', 'mpg'.\n",
|
||||
argv[0]);
|
||||
return 1;
|
||||
}
|
||||
output_type = argv[1];
|
||||
|
||||
if (!strcmp(output_type, "h264")) {
|
||||
video_encode_example("test.h264", AV_CODEC_ID_H264);
|
||||
} else if (!strcmp(output_type, "mp2")) {
|
||||
audio_encode_example("test.mp2");
|
||||
audio_decode_example("test.pcm", "test.mp2");
|
||||
} else if (!strcmp(output_type, "mpg")) {
|
||||
video_encode_example("test.mpg", AV_CODEC_ID_MPEG1VIDEO);
|
||||
video_decode_example("test%02d.pgm", "test.mpg");
|
||||
} else {
|
||||
fprintf(stderr, "Invalid output type '%s', choose between 'h264', 'mp2', or 'mpg'\n",
|
||||
output_type);
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
@ -1,386 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2012 Stefano Sabatini
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* Demuxing and decoding example.
|
||||
*
|
||||
* Show how to use the libavformat and libavcodec API to demux and
|
||||
* decode audio and video data.
|
||||
* @example demuxing_decoding.c
|
||||
*/
|
||||
|
||||
#include <libavutil/imgutils.h>
|
||||
#include <libavutil/samplefmt.h>
|
||||
#include <libavutil/timestamp.h>
|
||||
#include <libavformat/avformat.h>
|
||||
|
||||
static AVFormatContext *fmt_ctx = NULL;
|
||||
static AVCodecContext *video_dec_ctx = NULL, *audio_dec_ctx;
|
||||
static AVStream *video_stream = NULL, *audio_stream = NULL;
|
||||
static const char *src_filename = NULL;
|
||||
static const char *video_dst_filename = NULL;
|
||||
static const char *audio_dst_filename = NULL;
|
||||
static FILE *video_dst_file = NULL;
|
||||
static FILE *audio_dst_file = NULL;
|
||||
|
||||
static uint8_t *video_dst_data[4] = {NULL};
|
||||
static int video_dst_linesize[4];
|
||||
static int video_dst_bufsize;
|
||||
|
||||
static int video_stream_idx = -1, audio_stream_idx = -1;
|
||||
static AVFrame *frame = NULL;
|
||||
static AVPacket pkt;
|
||||
static int video_frame_count = 0;
|
||||
static int audio_frame_count = 0;
|
||||
|
||||
/* The different ways of decoding and managing data memory. You are not
|
||||
* supposed to support all the modes in your application but pick the one most
|
||||
* appropriate to your needs. Look for the use of api_mode in this example to
|
||||
* see what are the differences of API usage between them */
|
||||
enum {
|
||||
API_MODE_OLD = 0, /* old method, deprecated */
|
||||
API_MODE_NEW_API_REF_COUNT = 1, /* new method, using the frame reference counting */
|
||||
API_MODE_NEW_API_NO_REF_COUNT = 2, /* new method, without reference counting */
|
||||
};
|
||||
|
||||
static int api_mode = API_MODE_OLD;
|
||||
|
||||
static int decode_packet(int *got_frame, int cached)
|
||||
{
|
||||
int ret = 0;
|
||||
int decoded = pkt.size;
|
||||
|
||||
*got_frame = 0;
|
||||
|
||||
if (pkt.stream_index == video_stream_idx) {
|
||||
/* decode video frame */
|
||||
ret = avcodec_decode_video2(video_dec_ctx, frame, got_frame, &pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error decoding video frame (%s)\n", av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (*got_frame) {
|
||||
printf("video_frame%s n:%d coded_n:%d pts:%s\n",
|
||||
cached ? "(cached)" : "",
|
||||
video_frame_count++, frame->coded_picture_number,
|
||||
av_ts2timestr(frame->pts, &video_dec_ctx->time_base));
|
||||
|
||||
/* copy decoded frame to destination buffer:
|
||||
* this is required since rawvideo expects non aligned data */
|
||||
av_image_copy(video_dst_data, video_dst_linesize,
|
||||
(const uint8_t **)(frame->data), frame->linesize,
|
||||
video_dec_ctx->pix_fmt, video_dec_ctx->width, video_dec_ctx->height);
|
||||
|
||||
/* write to rawvideo file */
|
||||
fwrite(video_dst_data[0], 1, video_dst_bufsize, video_dst_file);
|
||||
}
|
||||
} else if (pkt.stream_index == audio_stream_idx) {
|
||||
/* decode audio frame */
|
||||
ret = avcodec_decode_audio4(audio_dec_ctx, frame, got_frame, &pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error decoding audio frame (%s)\n", av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
/* Some audio decoders decode only part of the packet, and have to be
|
||||
* called again with the remainder of the packet data.
|
||||
* Sample: fate-suite/lossless-audio/luckynight-partial.shn
|
||||
* Also, some decoders might over-read the packet. */
|
||||
decoded = FFMIN(ret, pkt.size);
|
||||
|
||||
if (*got_frame) {
|
||||
size_t unpadded_linesize = frame->nb_samples * av_get_bytes_per_sample(frame->format);
|
||||
printf("audio_frame%s n:%d nb_samples:%d pts:%s\n",
|
||||
cached ? "(cached)" : "",
|
||||
audio_frame_count++, frame->nb_samples,
|
||||
av_ts2timestr(frame->pts, &audio_dec_ctx->time_base));
|
||||
|
||||
/* Write the raw audio data samples of the first plane. This works
|
||||
* fine for packed formats (e.g. AV_SAMPLE_FMT_S16). However,
|
||||
* most audio decoders output planar audio, which uses a separate
|
||||
* plane of audio samples for each channel (e.g. AV_SAMPLE_FMT_S16P).
|
||||
* In other words, this code will write only the first audio channel
|
||||
* in these cases.
|
||||
* You should use libswresample or libavfilter to convert the frame
|
||||
* to packed data. */
|
||||
fwrite(frame->extended_data[0], 1, unpadded_linesize, audio_dst_file);
|
||||
}
|
||||
}
|
||||
|
||||
/* If we use the new API with reference counting, we own the data and need
|
||||
* to de-reference it when we don't use it anymore */
|
||||
if (*got_frame && api_mode == API_MODE_NEW_API_REF_COUNT)
|
||||
av_frame_unref(frame);
|
||||
|
||||
return decoded;
|
||||
}
|
||||
|
||||
static int open_codec_context(int *stream_idx,
|
||||
AVFormatContext *fmt_ctx, enum AVMediaType type)
|
||||
{
|
||||
int ret;
|
||||
AVStream *st;
|
||||
AVCodecContext *dec_ctx = NULL;
|
||||
AVCodec *dec = NULL;
|
||||
AVDictionary *opts = NULL;
|
||||
|
||||
ret = av_find_best_stream(fmt_ctx, type, -1, -1, NULL, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not find %s stream in input file '%s'\n",
|
||||
av_get_media_type_string(type), src_filename);
|
||||
return ret;
|
||||
} else {
|
||||
*stream_idx = ret;
|
||||
st = fmt_ctx->streams[*stream_idx];
|
||||
|
||||
/* find decoder for the stream */
|
||||
dec_ctx = st->codec;
|
||||
dec = avcodec_find_decoder(dec_ctx->codec_id);
|
||||
if (!dec) {
|
||||
fprintf(stderr, "Failed to find %s codec\n",
|
||||
av_get_media_type_string(type));
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
/* Init the decoders, with or without reference counting */
|
||||
if (api_mode == API_MODE_NEW_API_REF_COUNT)
|
||||
av_dict_set(&opts, "refcounted_frames", "1", 0);
|
||||
if ((ret = avcodec_open2(dec_ctx, dec, &opts)) < 0) {
|
||||
fprintf(stderr, "Failed to open %s codec\n",
|
||||
av_get_media_type_string(type));
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int get_format_from_sample_fmt(const char **fmt,
|
||||
enum AVSampleFormat sample_fmt)
|
||||
{
|
||||
int i;
|
||||
struct sample_fmt_entry {
|
||||
enum AVSampleFormat sample_fmt; const char *fmt_be, *fmt_le;
|
||||
} sample_fmt_entries[] = {
|
||||
{ AV_SAMPLE_FMT_U8, "u8", "u8" },
|
||||
{ AV_SAMPLE_FMT_S16, "s16be", "s16le" },
|
||||
{ AV_SAMPLE_FMT_S32, "s32be", "s32le" },
|
||||
{ AV_SAMPLE_FMT_FLT, "f32be", "f32le" },
|
||||
{ AV_SAMPLE_FMT_DBL, "f64be", "f64le" },
|
||||
};
|
||||
*fmt = NULL;
|
||||
|
||||
for (i = 0; i < FF_ARRAY_ELEMS(sample_fmt_entries); i++) {
|
||||
struct sample_fmt_entry *entry = &sample_fmt_entries[i];
|
||||
if (sample_fmt == entry->sample_fmt) {
|
||||
*fmt = AV_NE(entry->fmt_be, entry->fmt_le);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
fprintf(stderr,
|
||||
"sample format %s is not supported as output format\n",
|
||||
av_get_sample_fmt_name(sample_fmt));
|
||||
return -1;
|
||||
}
|
||||
|
||||
int main (int argc, char **argv)
|
||||
{
|
||||
int ret = 0, got_frame;
|
||||
|
||||
if (argc != 4 && argc != 5) {
|
||||
fprintf(stderr, "usage: %s [-refcount=<old|new_norefcount|new_refcount>] "
|
||||
"input_file video_output_file audio_output_file\n"
|
||||
"API example program to show how to read frames from an input file.\n"
|
||||
"This program reads frames from a file, decodes them, and writes decoded\n"
|
||||
"video frames to a rawvideo file named video_output_file, and decoded\n"
|
||||
"audio frames to a rawaudio file named audio_output_file.\n\n"
|
||||
"If the -refcount option is specified, the program use the\n"
|
||||
"reference counting frame system which allows keeping a copy of\n"
|
||||
"the data for longer than one decode call. If unset, it's using\n"
|
||||
"the classic old method.\n"
|
||||
"\n", argv[0]);
|
||||
exit(1);
|
||||
}
|
||||
if (argc == 5) {
|
||||
const char *mode = argv[1] + strlen("-refcount=");
|
||||
if (!strcmp(mode, "old")) api_mode = API_MODE_OLD;
|
||||
else if (!strcmp(mode, "new_norefcount")) api_mode = API_MODE_NEW_API_NO_REF_COUNT;
|
||||
else if (!strcmp(mode, "new_refcount")) api_mode = API_MODE_NEW_API_REF_COUNT;
|
||||
else {
|
||||
fprintf(stderr, "unknow mode '%s'\n", mode);
|
||||
exit(1);
|
||||
}
|
||||
argv++;
|
||||
}
|
||||
src_filename = argv[1];
|
||||
video_dst_filename = argv[2];
|
||||
audio_dst_filename = argv[3];
|
||||
|
||||
/* register all formats and codecs */
|
||||
av_register_all();
|
||||
|
||||
/* open input file, and allocate format context */
|
||||
if (avformat_open_input(&fmt_ctx, src_filename, NULL, NULL) < 0) {
|
||||
fprintf(stderr, "Could not open source file %s\n", src_filename);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* retrieve stream information */
|
||||
if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {
|
||||
fprintf(stderr, "Could not find stream information\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (open_codec_context(&video_stream_idx, fmt_ctx, AVMEDIA_TYPE_VIDEO) >= 0) {
|
||||
video_stream = fmt_ctx->streams[video_stream_idx];
|
||||
video_dec_ctx = video_stream->codec;
|
||||
|
||||
video_dst_file = fopen(video_dst_filename, "wb");
|
||||
if (!video_dst_file) {
|
||||
fprintf(stderr, "Could not open destination file %s\n", video_dst_filename);
|
||||
ret = 1;
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* allocate image where the decoded image will be put */
|
||||
ret = av_image_alloc(video_dst_data, video_dst_linesize,
|
||||
video_dec_ctx->width, video_dec_ctx->height,
|
||||
video_dec_ctx->pix_fmt, 1);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate raw video buffer\n");
|
||||
goto end;
|
||||
}
|
||||
video_dst_bufsize = ret;
|
||||
}
|
||||
|
||||
if (open_codec_context(&audio_stream_idx, fmt_ctx, AVMEDIA_TYPE_AUDIO) >= 0) {
|
||||
audio_stream = fmt_ctx->streams[audio_stream_idx];
|
||||
audio_dec_ctx = audio_stream->codec;
|
||||
audio_dst_file = fopen(audio_dst_filename, "wb");
|
||||
if (!audio_dst_file) {
|
||||
fprintf(stderr, "Could not open destination file %s\n", audio_dst_filename);
|
||||
ret = 1;
|
||||
goto end;
|
||||
}
|
||||
}
|
||||
|
||||
/* dump input information to stderr */
|
||||
av_dump_format(fmt_ctx, 0, src_filename, 0);
|
||||
|
||||
if (!audio_stream && !video_stream) {
|
||||
fprintf(stderr, "Could not find audio or video stream in the input, aborting\n");
|
||||
ret = 1;
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* When using the new API, you need to use the libavutil/frame.h API, while
|
||||
* the classic frame management is available in libavcodec */
|
||||
if (api_mode == API_MODE_OLD)
|
||||
frame = avcodec_alloc_frame();
|
||||
else
|
||||
frame = av_frame_alloc();
|
||||
if (!frame) {
|
||||
fprintf(stderr, "Could not allocate frame\n");
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* initialize packet, set data to NULL, let the demuxer fill it */
|
||||
av_init_packet(&pkt);
|
||||
pkt.data = NULL;
|
||||
pkt.size = 0;
|
||||
|
||||
if (video_stream)
|
||||
printf("Demuxing video from file '%s' into '%s'\n", src_filename, video_dst_filename);
|
||||
if (audio_stream)
|
||||
printf("Demuxing audio from file '%s' into '%s'\n", src_filename, audio_dst_filename);
|
||||
|
||||
/* read frames from the file */
|
||||
while (av_read_frame(fmt_ctx, &pkt) >= 0) {
|
||||
AVPacket orig_pkt = pkt;
|
||||
do {
|
||||
ret = decode_packet(&got_frame, 0);
|
||||
if (ret < 0)
|
||||
break;
|
||||
pkt.data += ret;
|
||||
pkt.size -= ret;
|
||||
} while (pkt.size > 0);
|
||||
av_free_packet(&orig_pkt);
|
||||
}
|
||||
|
||||
/* flush cached frames */
|
||||
pkt.data = NULL;
|
||||
pkt.size = 0;
|
||||
do {
|
||||
decode_packet(&got_frame, 1);
|
||||
} while (got_frame);
|
||||
|
||||
printf("Demuxing succeeded.\n");
|
||||
|
||||
if (video_stream) {
|
||||
printf("Play the output video file with the command:\n"
|
||||
"ffplay -f rawvideo -pix_fmt %s -video_size %dx%d %s\n",
|
||||
av_get_pix_fmt_name(video_dec_ctx->pix_fmt), video_dec_ctx->width, video_dec_ctx->height,
|
||||
video_dst_filename);
|
||||
}
|
||||
|
||||
if (audio_stream) {
|
||||
enum AVSampleFormat sfmt = audio_dec_ctx->sample_fmt;
|
||||
int n_channels = audio_dec_ctx->channels;
|
||||
const char *fmt;
|
||||
|
||||
if (av_sample_fmt_is_planar(sfmt)) {
|
||||
const char *packed = av_get_sample_fmt_name(sfmt);
|
||||
printf("Warning: the sample format the decoder produced is planar "
|
||||
"(%s). This example will output the first channel only.\n",
|
||||
packed ? packed : "?");
|
||||
sfmt = av_get_packed_sample_fmt(sfmt);
|
||||
n_channels = 1;
|
||||
}
|
||||
|
||||
if ((ret = get_format_from_sample_fmt(&fmt, sfmt)) < 0)
|
||||
goto end;
|
||||
|
||||
printf("Play the output audio file with the command:\n"
|
||||
"ffplay -f %s -ac %d -ar %d %s\n",
|
||||
fmt, n_channels, audio_dec_ctx->sample_rate,
|
||||
audio_dst_filename);
|
||||
}
|
||||
|
||||
end:
|
||||
avcodec_close(video_dec_ctx);
|
||||
avcodec_close(audio_dec_ctx);
|
||||
avformat_close_input(&fmt_ctx);
|
||||
if (video_dst_file)
|
||||
fclose(video_dst_file);
|
||||
if (audio_dst_file)
|
||||
fclose(audio_dst_file);
|
||||
if (api_mode == API_MODE_OLD)
|
||||
avcodec_free_frame(&frame);
|
||||
else
|
||||
av_frame_free(&frame);
|
||||
av_free(video_dst_data[0]);
|
||||
|
||||
return ret < 0;
|
||||
}
|
185
Externals/ffmpeg/shared/doc/examples/extract_mvs.c
vendored
185
Externals/ffmpeg/shared/doc/examples/extract_mvs.c
vendored
@ -1,185 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2012 Stefano Sabatini
|
||||
* Copyright (c) 2014 Clément Bœsch
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <libavutil/motion_vector.h>
|
||||
#include <libavformat/avformat.h>
|
||||
|
||||
static AVFormatContext *fmt_ctx = NULL;
|
||||
static AVCodecContext *video_dec_ctx = NULL;
|
||||
static AVStream *video_stream = NULL;
|
||||
static const char *src_filename = NULL;
|
||||
|
||||
static int video_stream_idx = -1;
|
||||
static AVFrame *frame = NULL;
|
||||
static AVPacket pkt;
|
||||
static int video_frame_count = 0;
|
||||
|
||||
static int decode_packet(int *got_frame, int cached)
|
||||
{
|
||||
int decoded = pkt.size;
|
||||
|
||||
*got_frame = 0;
|
||||
|
||||
if (pkt.stream_index == video_stream_idx) {
|
||||
int ret = avcodec_decode_video2(video_dec_ctx, frame, got_frame, &pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error decoding video frame (%s)\n", av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (*got_frame) {
|
||||
int i;
|
||||
AVFrameSideData *sd;
|
||||
|
||||
video_frame_count++;
|
||||
sd = av_frame_get_side_data(frame, AV_FRAME_DATA_MOTION_VECTORS);
|
||||
if (sd) {
|
||||
const AVMotionVector *mvs = (const AVMotionVector *)sd->data;
|
||||
for (i = 0; i < sd->size / sizeof(*mvs); i++) {
|
||||
const AVMotionVector *mv = &mvs[i];
|
||||
printf("%d,%2d,%2d,%2d,%4d,%4d,%4d,%4d,0x%"PRIx64"\n",
|
||||
video_frame_count, mv->source,
|
||||
mv->w, mv->h, mv->src_x, mv->src_y,
|
||||
mv->dst_x, mv->dst_y, mv->flags);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return decoded;
|
||||
}
|
||||
|
||||
static int open_codec_context(int *stream_idx,
|
||||
AVFormatContext *fmt_ctx, enum AVMediaType type)
|
||||
{
|
||||
int ret;
|
||||
AVStream *st;
|
||||
AVCodecContext *dec_ctx = NULL;
|
||||
AVCodec *dec = NULL;
|
||||
AVDictionary *opts = NULL;
|
||||
|
||||
ret = av_find_best_stream(fmt_ctx, type, -1, -1, NULL, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not find %s stream in input file '%s'\n",
|
||||
av_get_media_type_string(type), src_filename);
|
||||
return ret;
|
||||
} else {
|
||||
*stream_idx = ret;
|
||||
st = fmt_ctx->streams[*stream_idx];
|
||||
|
||||
/* find decoder for the stream */
|
||||
dec_ctx = st->codec;
|
||||
dec = avcodec_find_decoder(dec_ctx->codec_id);
|
||||
if (!dec) {
|
||||
fprintf(stderr, "Failed to find %s codec\n",
|
||||
av_get_media_type_string(type));
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
/* Init the video decoder */
|
||||
av_dict_set(&opts, "flags2", "+export_mvs", 0);
|
||||
if ((ret = avcodec_open2(dec_ctx, dec, &opts)) < 0) {
|
||||
fprintf(stderr, "Failed to open %s codec\n",
|
||||
av_get_media_type_string(type));
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int ret = 0, got_frame;
|
||||
|
||||
if (argc != 2) {
|
||||
fprintf(stderr, "Usage: %s <video>\n", argv[0]);
|
||||
exit(1);
|
||||
}
|
||||
src_filename = argv[1];
|
||||
|
||||
av_register_all();
|
||||
|
||||
if (avformat_open_input(&fmt_ctx, src_filename, NULL, NULL) < 0) {
|
||||
fprintf(stderr, "Could not open source file %s\n", src_filename);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {
|
||||
fprintf(stderr, "Could not find stream information\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (open_codec_context(&video_stream_idx, fmt_ctx, AVMEDIA_TYPE_VIDEO) >= 0) {
|
||||
video_stream = fmt_ctx->streams[video_stream_idx];
|
||||
video_dec_ctx = video_stream->codec;
|
||||
}
|
||||
|
||||
av_dump_format(fmt_ctx, 0, src_filename, 0);
|
||||
|
||||
if (!video_stream) {
|
||||
fprintf(stderr, "Could not find video stream in the input, aborting\n");
|
||||
ret = 1;
|
||||
goto end;
|
||||
}
|
||||
|
||||
frame = av_frame_alloc();
|
||||
if (!frame) {
|
||||
fprintf(stderr, "Could not allocate frame\n");
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
printf("framenum,source,blockw,blockh,srcx,srcy,dstx,dsty,flags\n");
|
||||
|
||||
/* initialize packet, set data to NULL, let the demuxer fill it */
|
||||
av_init_packet(&pkt);
|
||||
pkt.data = NULL;
|
||||
pkt.size = 0;
|
||||
|
||||
/* read frames from the file */
|
||||
while (av_read_frame(fmt_ctx, &pkt) >= 0) {
|
||||
AVPacket orig_pkt = pkt;
|
||||
do {
|
||||
ret = decode_packet(&got_frame, 0);
|
||||
if (ret < 0)
|
||||
break;
|
||||
pkt.data += ret;
|
||||
pkt.size -= ret;
|
||||
} while (pkt.size > 0);
|
||||
av_free_packet(&orig_pkt);
|
||||
}
|
||||
|
||||
/* flush cached frames */
|
||||
pkt.data = NULL;
|
||||
pkt.size = 0;
|
||||
do {
|
||||
decode_packet(&got_frame, 1);
|
||||
} while (got_frame);
|
||||
|
||||
end:
|
||||
avcodec_close(video_dec_ctx);
|
||||
avformat_close_input(&fmt_ctx);
|
||||
av_frame_free(&frame);
|
||||
return ret < 0;
|
||||
}
|
365
Externals/ffmpeg/shared/doc/examples/filter_audio.c
vendored
365
Externals/ffmpeg/shared/doc/examples/filter_audio.c
vendored
@ -1,365 +0,0 @@
|
||||
/*
|
||||
* copyright (c) 2013 Andrew Kelley
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* libavfilter API usage example.
|
||||
*
|
||||
* @example filter_audio.c
|
||||
* This example will generate a sine wave audio,
|
||||
* pass it through a simple filter chain, and then compute the MD5 checksum of
|
||||
* the output data.
|
||||
*
|
||||
* The filter chain it uses is:
|
||||
* (input) -> abuffer -> volume -> aformat -> abuffersink -> (output)
|
||||
*
|
||||
* abuffer: This provides the endpoint where you can feed the decoded samples.
|
||||
* volume: In this example we hardcode it to 0.90.
|
||||
* aformat: This converts the samples to the samplefreq, channel layout,
|
||||
* and sample format required by the audio device.
|
||||
* abuffersink: This provides the endpoint where you can read the samples after
|
||||
* they have passed through the filter chain.
|
||||
*/
|
||||
|
||||
#include <inttypes.h>
|
||||
#include <math.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "libavutil/channel_layout.h"
|
||||
#include "libavutil/md5.h"
|
||||
#include "libavutil/mem.h"
|
||||
#include "libavutil/opt.h"
|
||||
#include "libavutil/samplefmt.h"
|
||||
|
||||
#include "libavfilter/avfilter.h"
|
||||
#include "libavfilter/buffersink.h"
|
||||
#include "libavfilter/buffersrc.h"
|
||||
|
||||
#define INPUT_SAMPLERATE 48000
|
||||
#define INPUT_FORMAT AV_SAMPLE_FMT_FLTP
|
||||
#define INPUT_CHANNEL_LAYOUT AV_CH_LAYOUT_5POINT0
|
||||
|
||||
#define VOLUME_VAL 0.90
|
||||
|
||||
static int init_filter_graph(AVFilterGraph **graph, AVFilterContext **src,
|
||||
AVFilterContext **sink)
|
||||
{
|
||||
AVFilterGraph *filter_graph;
|
||||
AVFilterContext *abuffer_ctx;
|
||||
AVFilter *abuffer;
|
||||
AVFilterContext *volume_ctx;
|
||||
AVFilter *volume;
|
||||
AVFilterContext *aformat_ctx;
|
||||
AVFilter *aformat;
|
||||
AVFilterContext *abuffersink_ctx;
|
||||
AVFilter *abuffersink;
|
||||
|
||||
AVDictionary *options_dict = NULL;
|
||||
uint8_t options_str[1024];
|
||||
uint8_t ch_layout[64];
|
||||
|
||||
int err;
|
||||
|
||||
/* Create a new filtergraph, which will contain all the filters. */
|
||||
filter_graph = avfilter_graph_alloc();
|
||||
if (!filter_graph) {
|
||||
fprintf(stderr, "Unable to create filter graph.\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
/* Create the abuffer filter;
|
||||
* it will be used for feeding the data into the graph. */
|
||||
abuffer = avfilter_get_by_name("abuffer");
|
||||
if (!abuffer) {
|
||||
fprintf(stderr, "Could not find the abuffer filter.\n");
|
||||
return AVERROR_FILTER_NOT_FOUND;
|
||||
}
|
||||
|
||||
abuffer_ctx = avfilter_graph_alloc_filter(filter_graph, abuffer, "src");
|
||||
if (!abuffer_ctx) {
|
||||
fprintf(stderr, "Could not allocate the abuffer instance.\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
/* Set the filter options through the AVOptions API. */
|
||||
av_get_channel_layout_string(ch_layout, sizeof(ch_layout), 0, INPUT_CHANNEL_LAYOUT);
|
||||
av_opt_set (abuffer_ctx, "channel_layout", ch_layout, AV_OPT_SEARCH_CHILDREN);
|
||||
av_opt_set (abuffer_ctx, "sample_fmt", av_get_sample_fmt_name(INPUT_FORMAT), AV_OPT_SEARCH_CHILDREN);
|
||||
av_opt_set_q (abuffer_ctx, "time_base", (AVRational){ 1, INPUT_SAMPLERATE }, AV_OPT_SEARCH_CHILDREN);
|
||||
av_opt_set_int(abuffer_ctx, "sample_rate", INPUT_SAMPLERATE, AV_OPT_SEARCH_CHILDREN);
|
||||
|
||||
/* Now initialize the filter; we pass NULL options, since we have already
|
||||
* set all the options above. */
|
||||
err = avfilter_init_str(abuffer_ctx, NULL);
|
||||
if (err < 0) {
|
||||
fprintf(stderr, "Could not initialize the abuffer filter.\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Create volume filter. */
|
||||
volume = avfilter_get_by_name("volume");
|
||||
if (!volume) {
|
||||
fprintf(stderr, "Could not find the volume filter.\n");
|
||||
return AVERROR_FILTER_NOT_FOUND;
|
||||
}
|
||||
|
||||
volume_ctx = avfilter_graph_alloc_filter(filter_graph, volume, "volume");
|
||||
if (!volume_ctx) {
|
||||
fprintf(stderr, "Could not allocate the volume instance.\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
/* A different way of passing the options is as key/value pairs in a
|
||||
* dictionary. */
|
||||
av_dict_set(&options_dict, "volume", AV_STRINGIFY(VOLUME_VAL), 0);
|
||||
err = avfilter_init_dict(volume_ctx, &options_dict);
|
||||
av_dict_free(&options_dict);
|
||||
if (err < 0) {
|
||||
fprintf(stderr, "Could not initialize the volume filter.\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Create the aformat filter;
|
||||
* it ensures that the output is of the format we want. */
|
||||
aformat = avfilter_get_by_name("aformat");
|
||||
if (!aformat) {
|
||||
fprintf(stderr, "Could not find the aformat filter.\n");
|
||||
return AVERROR_FILTER_NOT_FOUND;
|
||||
}
|
||||
|
||||
aformat_ctx = avfilter_graph_alloc_filter(filter_graph, aformat, "aformat");
|
||||
if (!aformat_ctx) {
|
||||
fprintf(stderr, "Could not allocate the aformat instance.\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
/* A third way of passing the options is in a string of the form
|
||||
* key1=value1:key2=value2.... */
|
||||
snprintf(options_str, sizeof(options_str),
|
||||
"sample_fmts=%s:sample_rates=%d:channel_layouts=0x%"PRIx64,
|
||||
av_get_sample_fmt_name(AV_SAMPLE_FMT_S16), 44100,
|
||||
(uint64_t)AV_CH_LAYOUT_STEREO);
|
||||
err = avfilter_init_str(aformat_ctx, options_str);
|
||||
if (err < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Could not initialize the aformat filter.\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Finally create the abuffersink filter;
|
||||
* it will be used to get the filtered data out of the graph. */
|
||||
abuffersink = avfilter_get_by_name("abuffersink");
|
||||
if (!abuffersink) {
|
||||
fprintf(stderr, "Could not find the abuffersink filter.\n");
|
||||
return AVERROR_FILTER_NOT_FOUND;
|
||||
}
|
||||
|
||||
abuffersink_ctx = avfilter_graph_alloc_filter(filter_graph, abuffersink, "sink");
|
||||
if (!abuffersink_ctx) {
|
||||
fprintf(stderr, "Could not allocate the abuffersink instance.\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
/* This filter takes no options. */
|
||||
err = avfilter_init_str(abuffersink_ctx, NULL);
|
||||
if (err < 0) {
|
||||
fprintf(stderr, "Could not initialize the abuffersink instance.\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Connect the filters;
|
||||
* in this simple case the filters just form a linear chain. */
|
||||
err = avfilter_link(abuffer_ctx, 0, volume_ctx, 0);
|
||||
if (err >= 0)
|
||||
err = avfilter_link(volume_ctx, 0, aformat_ctx, 0);
|
||||
if (err >= 0)
|
||||
err = avfilter_link(aformat_ctx, 0, abuffersink_ctx, 0);
|
||||
if (err < 0) {
|
||||
fprintf(stderr, "Error connecting filters\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Configure the graph. */
|
||||
err = avfilter_graph_config(filter_graph, NULL);
|
||||
if (err < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error configuring the filter graph\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
*graph = filter_graph;
|
||||
*src = abuffer_ctx;
|
||||
*sink = abuffersink_ctx;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Do something useful with the filtered data: this simple
|
||||
* example just prints the MD5 checksum of each plane to stdout. */
|
||||
static int process_output(struct AVMD5 *md5, AVFrame *frame)
|
||||
{
|
||||
int planar = av_sample_fmt_is_planar(frame->format);
|
||||
int channels = av_get_channel_layout_nb_channels(frame->channel_layout);
|
||||
int planes = planar ? channels : 1;
|
||||
int bps = av_get_bytes_per_sample(frame->format);
|
||||
int plane_size = bps * frame->nb_samples * (planar ? 1 : channels);
|
||||
int i, j;
|
||||
|
||||
for (i = 0; i < planes; i++) {
|
||||
uint8_t checksum[16];
|
||||
|
||||
av_md5_init(md5);
|
||||
av_md5_sum(checksum, frame->extended_data[i], plane_size);
|
||||
|
||||
fprintf(stdout, "plane %d: 0x", i);
|
||||
for (j = 0; j < sizeof(checksum); j++)
|
||||
fprintf(stdout, "%02X", checksum[j]);
|
||||
fprintf(stdout, "\n");
|
||||
}
|
||||
fprintf(stdout, "\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Construct a frame of audio data to be filtered;
|
||||
* this simple example just synthesizes a sine wave. */
|
||||
static int get_input(AVFrame *frame, int frame_num)
|
||||
{
|
||||
int err, i, j;
|
||||
|
||||
#define FRAME_SIZE 1024
|
||||
|
||||
/* Set up the frame properties and allocate the buffer for the data. */
|
||||
frame->sample_rate = INPUT_SAMPLERATE;
|
||||
frame->format = INPUT_FORMAT;
|
||||
frame->channel_layout = INPUT_CHANNEL_LAYOUT;
|
||||
frame->nb_samples = FRAME_SIZE;
|
||||
frame->pts = frame_num * FRAME_SIZE;
|
||||
|
||||
err = av_frame_get_buffer(frame, 0);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
/* Fill the data for each channel. */
|
||||
for (i = 0; i < 5; i++) {
|
||||
float *data = (float*)frame->extended_data[i];
|
||||
|
||||
for (j = 0; j < frame->nb_samples; j++)
|
||||
data[j] = sin(2 * M_PI * (frame_num + j) * (i + 1) / FRAME_SIZE);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
struct AVMD5 *md5;
|
||||
AVFilterGraph *graph;
|
||||
AVFilterContext *src, *sink;
|
||||
AVFrame *frame;
|
||||
uint8_t errstr[1024];
|
||||
float duration;
|
||||
int err, nb_frames, i;
|
||||
|
||||
if (argc < 2) {
|
||||
fprintf(stderr, "Usage: %s <duration>\n", argv[0]);
|
||||
return 1;
|
||||
}
|
||||
|
||||
duration = atof(argv[1]);
|
||||
nb_frames = duration * INPUT_SAMPLERATE / FRAME_SIZE;
|
||||
if (nb_frames <= 0) {
|
||||
fprintf(stderr, "Invalid duration: %s\n", argv[1]);
|
||||
return 1;
|
||||
}
|
||||
|
||||
avfilter_register_all();
|
||||
|
||||
/* Allocate the frame we will be using to store the data. */
|
||||
frame = av_frame_alloc();
|
||||
if (!frame) {
|
||||
fprintf(stderr, "Error allocating the frame\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
md5 = av_md5_alloc();
|
||||
if (!md5) {
|
||||
fprintf(stderr, "Error allocating the MD5 context\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Set up the filtergraph. */
|
||||
err = init_filter_graph(&graph, &src, &sink);
|
||||
if (err < 0) {
|
||||
fprintf(stderr, "Unable to init filter graph:");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* the main filtering loop */
|
||||
for (i = 0; i < nb_frames; i++) {
|
||||
/* get an input frame to be filtered */
|
||||
err = get_input(frame, i);
|
||||
if (err < 0) {
|
||||
fprintf(stderr, "Error generating input frame:");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* Send the frame to the input of the filtergraph. */
|
||||
err = av_buffersrc_add_frame(src, frame);
|
||||
if (err < 0) {
|
||||
av_frame_unref(frame);
|
||||
fprintf(stderr, "Error submitting the frame to the filtergraph:");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* Get all the filtered output that is available. */
|
||||
while ((err = av_buffersink_get_frame(sink, frame)) >= 0) {
|
||||
/* now do something with our filtered frame */
|
||||
err = process_output(md5, frame);
|
||||
if (err < 0) {
|
||||
fprintf(stderr, "Error processing the filtered frame:");
|
||||
goto fail;
|
||||
}
|
||||
av_frame_unref(frame);
|
||||
}
|
||||
|
||||
if (err == AVERROR(EAGAIN)) {
|
||||
/* Need to feed more frames in. */
|
||||
continue;
|
||||
} else if (err == AVERROR_EOF) {
|
||||
/* Nothing more to do, finish. */
|
||||
break;
|
||||
} else if (err < 0) {
|
||||
/* An error occurred. */
|
||||
fprintf(stderr, "Error filtering the data:");
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
avfilter_graph_free(&graph);
|
||||
av_frame_free(&frame);
|
||||
av_freep(&md5);
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
av_strerror(err, errstr, sizeof(errstr));
|
||||
fprintf(stderr, "%s\n", errstr);
|
||||
return 1;
|
||||
}
|
@ -1,280 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2010 Nicolas George
|
||||
* Copyright (c) 2011 Stefano Sabatini
|
||||
* Copyright (c) 2012 Clément Bœsch
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* API example for audio decoding and filtering
|
||||
* @example filtering_audio.c
|
||||
*/
|
||||
|
||||
#include <unistd.h>
|
||||
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavfilter/avfiltergraph.h>
|
||||
#include <libavfilter/avcodec.h>
|
||||
#include <libavfilter/buffersink.h>
|
||||
#include <libavfilter/buffersrc.h>
|
||||
#include <libavutil/opt.h>
|
||||
|
||||
static const char *filter_descr = "aresample=8000,aformat=sample_fmts=s16:channel_layouts=mono";
|
||||
static const char *player = "ffplay -f s16le -ar 8000 -ac 1 -";
|
||||
|
||||
static AVFormatContext *fmt_ctx;
|
||||
static AVCodecContext *dec_ctx;
|
||||
AVFilterContext *buffersink_ctx;
|
||||
AVFilterContext *buffersrc_ctx;
|
||||
AVFilterGraph *filter_graph;
|
||||
static int audio_stream_index = -1;
|
||||
|
||||
static int open_input_file(const char *filename)
|
||||
{
|
||||
int ret;
|
||||
AVCodec *dec;
|
||||
|
||||
if ((ret = avformat_open_input(&fmt_ctx, filename, NULL, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
if ((ret = avformat_find_stream_info(fmt_ctx, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* select the audio stream */
|
||||
ret = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_AUDIO, -1, -1, &dec, 0);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot find a audio stream in the input file\n");
|
||||
return ret;
|
||||
}
|
||||
audio_stream_index = ret;
|
||||
dec_ctx = fmt_ctx->streams[audio_stream_index]->codec;
|
||||
av_opt_set_int(dec_ctx, "refcounted_frames", 1, 0);
|
||||
|
||||
/* init the audio decoder */
|
||||
if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot open audio decoder\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int init_filters(const char *filters_descr)
|
||||
{
|
||||
char args[512];
|
||||
int ret = 0;
|
||||
AVFilter *abuffersrc = avfilter_get_by_name("abuffer");
|
||||
AVFilter *abuffersink = avfilter_get_by_name("abuffersink");
|
||||
AVFilterInOut *outputs = avfilter_inout_alloc();
|
||||
AVFilterInOut *inputs = avfilter_inout_alloc();
|
||||
static const enum AVSampleFormat out_sample_fmts[] = { AV_SAMPLE_FMT_S16, -1 };
|
||||
static const int64_t out_channel_layouts[] = { AV_CH_LAYOUT_MONO, -1 };
|
||||
static const int out_sample_rates[] = { 8000, -1 };
|
||||
const AVFilterLink *outlink;
|
||||
AVRational time_base = fmt_ctx->streams[audio_stream_index]->time_base;
|
||||
|
||||
filter_graph = avfilter_graph_alloc();
|
||||
if (!outputs || !inputs || !filter_graph) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* buffer audio source: the decoded frames from the decoder will be inserted here. */
|
||||
if (!dec_ctx->channel_layout)
|
||||
dec_ctx->channel_layout = av_get_default_channel_layout(dec_ctx->channels);
|
||||
snprintf(args, sizeof(args),
|
||||
"time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%"PRIx64,
|
||||
time_base.num, time_base.den, dec_ctx->sample_rate,
|
||||
av_get_sample_fmt_name(dec_ctx->sample_fmt), dec_ctx->channel_layout);
|
||||
ret = avfilter_graph_create_filter(&buffersrc_ctx, abuffersrc, "in",
|
||||
args, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer source\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* buffer audio sink: to terminate the filter chain. */
|
||||
ret = avfilter_graph_create_filter(&buffersink_ctx, abuffersink, "out",
|
||||
NULL, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = av_opt_set_int_list(buffersink_ctx, "sample_fmts", out_sample_fmts, -1,
|
||||
AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output sample format\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = av_opt_set_int_list(buffersink_ctx, "channel_layouts", out_channel_layouts, -1,
|
||||
AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = av_opt_set_int_list(buffersink_ctx, "sample_rates", out_sample_rates, -1,
|
||||
AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output sample rate\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* Endpoints for the filter graph. */
|
||||
outputs->name = av_strdup("in");
|
||||
outputs->filter_ctx = buffersrc_ctx;
|
||||
outputs->pad_idx = 0;
|
||||
outputs->next = NULL;
|
||||
|
||||
inputs->name = av_strdup("out");
|
||||
inputs->filter_ctx = buffersink_ctx;
|
||||
inputs->pad_idx = 0;
|
||||
inputs->next = NULL;
|
||||
|
||||
if ((ret = avfilter_graph_parse_ptr(filter_graph, filters_descr,
|
||||
&inputs, &outputs, NULL)) < 0)
|
||||
goto end;
|
||||
|
||||
if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
|
||||
goto end;
|
||||
|
||||
/* Print summary of the sink buffer
|
||||
* Note: args buffer is reused to store channel layout string */
|
||||
outlink = buffersink_ctx->inputs[0];
|
||||
av_get_channel_layout_string(args, sizeof(args), -1, outlink->channel_layout);
|
||||
av_log(NULL, AV_LOG_INFO, "Output: srate:%dHz fmt:%s chlayout:%s\n",
|
||||
(int)outlink->sample_rate,
|
||||
(char *)av_x_if_null(av_get_sample_fmt_name(outlink->format), "?"),
|
||||
args);
|
||||
|
||||
end:
|
||||
avfilter_inout_free(&inputs);
|
||||
avfilter_inout_free(&outputs);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void print_frame(const AVFrame *frame)
|
||||
{
|
||||
const int n = frame->nb_samples * av_get_channel_layout_nb_channels(av_frame_get_channel_layout(frame));
|
||||
const uint16_t *p = (uint16_t*)frame->data[0];
|
||||
const uint16_t *p_end = p + n;
|
||||
|
||||
while (p < p_end) {
|
||||
fputc(*p & 0xff, stdout);
|
||||
fputc(*p>>8 & 0xff, stdout);
|
||||
p++;
|
||||
}
|
||||
fflush(stdout);
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int ret;
|
||||
AVPacket packet0, packet;
|
||||
AVFrame *frame = av_frame_alloc();
|
||||
AVFrame *filt_frame = av_frame_alloc();
|
||||
int got_frame;
|
||||
|
||||
if (!frame || !filt_frame) {
|
||||
perror("Could not allocate frame");
|
||||
exit(1);
|
||||
}
|
||||
if (argc != 2) {
|
||||
fprintf(stderr, "Usage: %s file | %s\n", argv[0], player);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
av_register_all();
|
||||
avfilter_register_all();
|
||||
|
||||
if ((ret = open_input_file(argv[1])) < 0)
|
||||
goto end;
|
||||
if ((ret = init_filters(filter_descr)) < 0)
|
||||
goto end;
|
||||
|
||||
/* read all packets */
|
||||
packet0.data = NULL;
|
||||
packet.data = NULL;
|
||||
while (1) {
|
||||
if (!packet0.data) {
|
||||
if ((ret = av_read_frame(fmt_ctx, &packet)) < 0)
|
||||
break;
|
||||
packet0 = packet;
|
||||
}
|
||||
|
||||
if (packet.stream_index == audio_stream_index) {
|
||||
got_frame = 0;
|
||||
ret = avcodec_decode_audio4(dec_ctx, frame, &got_frame, &packet);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error decoding audio\n");
|
||||
continue;
|
||||
}
|
||||
packet.size -= ret;
|
||||
packet.data += ret;
|
||||
|
||||
if (got_frame) {
|
||||
/* push the audio data from decoded frame into the filtergraph */
|
||||
if (av_buffersrc_add_frame_flags(buffersrc_ctx, frame, 0) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error while feeding the audio filtergraph\n");
|
||||
break;
|
||||
}
|
||||
|
||||
/* pull filtered audio from the filtergraph */
|
||||
while (1) {
|
||||
ret = av_buffersink_get_frame(buffersink_ctx, filt_frame);
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
|
||||
break;
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
print_frame(filt_frame);
|
||||
av_frame_unref(filt_frame);
|
||||
}
|
||||
}
|
||||
|
||||
if (packet.size <= 0)
|
||||
av_free_packet(&packet0);
|
||||
} else {
|
||||
/* discard non-wanted packets */
|
||||
av_free_packet(&packet0);
|
||||
}
|
||||
}
|
||||
end:
|
||||
avfilter_graph_free(&filter_graph);
|
||||
avcodec_close(dec_ctx);
|
||||
avformat_close_input(&fmt_ctx);
|
||||
av_frame_free(&frame);
|
||||
av_frame_free(&filt_frame);
|
||||
|
||||
if (ret < 0 && ret != AVERROR_EOF) {
|
||||
fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
exit(0);
|
||||
}
|
@ -1,262 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2010 Nicolas George
|
||||
* Copyright (c) 2011 Stefano Sabatini
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* API example for decoding and filtering
|
||||
* @example filtering_video.c
|
||||
*/
|
||||
|
||||
#define _XOPEN_SOURCE 600 /* for usleep */
|
||||
#include <unistd.h>
|
||||
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavfilter/avfiltergraph.h>
|
||||
#include <libavfilter/avcodec.h>
|
||||
#include <libavfilter/buffersink.h>
|
||||
#include <libavfilter/buffersrc.h>
|
||||
#include <libavutil/opt.h>
|
||||
|
||||
const char *filter_descr = "scale=78:24";
|
||||
|
||||
static AVFormatContext *fmt_ctx;
|
||||
static AVCodecContext *dec_ctx;
|
||||
AVFilterContext *buffersink_ctx;
|
||||
AVFilterContext *buffersrc_ctx;
|
||||
AVFilterGraph *filter_graph;
|
||||
static int video_stream_index = -1;
|
||||
static int64_t last_pts = AV_NOPTS_VALUE;
|
||||
|
||||
static int open_input_file(const char *filename)
|
||||
{
|
||||
int ret;
|
||||
AVCodec *dec;
|
||||
|
||||
if ((ret = avformat_open_input(&fmt_ctx, filename, NULL, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
if ((ret = avformat_find_stream_info(fmt_ctx, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* select the video stream */
|
||||
ret = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, &dec, 0);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot find a video stream in the input file\n");
|
||||
return ret;
|
||||
}
|
||||
video_stream_index = ret;
|
||||
dec_ctx = fmt_ctx->streams[video_stream_index]->codec;
|
||||
av_opt_set_int(dec_ctx, "refcounted_frames", 1, 0);
|
||||
|
||||
/* init the video decoder */
|
||||
if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot open video decoder\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int init_filters(const char *filters_descr)
|
||||
{
|
||||
char args[512];
|
||||
int ret = 0;
|
||||
AVFilter *buffersrc = avfilter_get_by_name("buffer");
|
||||
AVFilter *buffersink = avfilter_get_by_name("buffersink");
|
||||
AVFilterInOut *outputs = avfilter_inout_alloc();
|
||||
AVFilterInOut *inputs = avfilter_inout_alloc();
|
||||
AVRational time_base = fmt_ctx->streams[video_stream_index]->time_base;
|
||||
enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE };
|
||||
|
||||
filter_graph = avfilter_graph_alloc();
|
||||
if (!outputs || !inputs || !filter_graph) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* buffer video source: the decoded frames from the decoder will be inserted here. */
|
||||
snprintf(args, sizeof(args),
|
||||
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
|
||||
dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
|
||||
time_base.num, time_base.den,
|
||||
dec_ctx->sample_aspect_ratio.num, dec_ctx->sample_aspect_ratio.den);
|
||||
|
||||
ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
|
||||
args, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* buffer video sink: to terminate the filter chain. */
|
||||
ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
|
||||
NULL, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = av_opt_set_int_list(buffersink_ctx, "pix_fmts", pix_fmts,
|
||||
AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* Endpoints for the filter graph. */
|
||||
outputs->name = av_strdup("in");
|
||||
outputs->filter_ctx = buffersrc_ctx;
|
||||
outputs->pad_idx = 0;
|
||||
outputs->next = NULL;
|
||||
|
||||
inputs->name = av_strdup("out");
|
||||
inputs->filter_ctx = buffersink_ctx;
|
||||
inputs->pad_idx = 0;
|
||||
inputs->next = NULL;
|
||||
|
||||
if ((ret = avfilter_graph_parse_ptr(filter_graph, filters_descr,
|
||||
&inputs, &outputs, NULL)) < 0)
|
||||
goto end;
|
||||
|
||||
if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
|
||||
goto end;
|
||||
|
||||
end:
|
||||
avfilter_inout_free(&inputs);
|
||||
avfilter_inout_free(&outputs);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void display_frame(const AVFrame *frame, AVRational time_base)
|
||||
{
|
||||
int x, y;
|
||||
uint8_t *p0, *p;
|
||||
int64_t delay;
|
||||
|
||||
if (frame->pts != AV_NOPTS_VALUE) {
|
||||
if (last_pts != AV_NOPTS_VALUE) {
|
||||
/* sleep roughly the right amount of time;
|
||||
* usleep is in microseconds, just like AV_TIME_BASE. */
|
||||
delay = av_rescale_q(frame->pts - last_pts,
|
||||
time_base, AV_TIME_BASE_Q);
|
||||
if (delay > 0 && delay < 1000000)
|
||||
usleep(delay);
|
||||
}
|
||||
last_pts = frame->pts;
|
||||
}
|
||||
|
||||
/* Trivial ASCII grayscale display. */
|
||||
p0 = frame->data[0];
|
||||
puts("\033c");
|
||||
for (y = 0; y < frame->height; y++) {
|
||||
p = p0;
|
||||
for (x = 0; x < frame->width; x++)
|
||||
putchar(" .-+#"[*(p++) / 52]);
|
||||
putchar('\n');
|
||||
p0 += frame->linesize[0];
|
||||
}
|
||||
fflush(stdout);
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int ret;
|
||||
AVPacket packet;
|
||||
AVFrame *frame = av_frame_alloc();
|
||||
AVFrame *filt_frame = av_frame_alloc();
|
||||
int got_frame;
|
||||
|
||||
if (!frame || !filt_frame) {
|
||||
perror("Could not allocate frame");
|
||||
exit(1);
|
||||
}
|
||||
if (argc != 2) {
|
||||
fprintf(stderr, "Usage: %s file\n", argv[0]);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
av_register_all();
|
||||
avfilter_register_all();
|
||||
|
||||
if ((ret = open_input_file(argv[1])) < 0)
|
||||
goto end;
|
||||
if ((ret = init_filters(filter_descr)) < 0)
|
||||
goto end;
|
||||
|
||||
/* read all packets */
|
||||
while (1) {
|
||||
if ((ret = av_read_frame(fmt_ctx, &packet)) < 0)
|
||||
break;
|
||||
|
||||
if (packet.stream_index == video_stream_index) {
|
||||
got_frame = 0;
|
||||
ret = avcodec_decode_video2(dec_ctx, frame, &got_frame, &packet);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error decoding video\n");
|
||||
break;
|
||||
}
|
||||
|
||||
if (got_frame) {
|
||||
frame->pts = av_frame_get_best_effort_timestamp(frame);
|
||||
|
||||
/* push the decoded frame into the filtergraph */
|
||||
if (av_buffersrc_add_frame_flags(buffersrc_ctx, frame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
|
||||
break;
|
||||
}
|
||||
|
||||
/* pull filtered frames from the filtergraph */
|
||||
while (1) {
|
||||
ret = av_buffersink_get_frame(buffersink_ctx, filt_frame);
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
|
||||
break;
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
display_frame(filt_frame, buffersink_ctx->inputs[0]->time_base);
|
||||
av_frame_unref(filt_frame);
|
||||
}
|
||||
av_frame_unref(frame);
|
||||
}
|
||||
}
|
||||
av_free_packet(&packet);
|
||||
}
|
||||
end:
|
||||
avfilter_graph_free(&filter_graph);
|
||||
avcodec_close(dec_ctx);
|
||||
avformat_close_input(&fmt_ctx);
|
||||
av_frame_free(&frame);
|
||||
av_frame_free(&filt_frame);
|
||||
|
||||
if (ret < 0 && ret != AVERROR_EOF) {
|
||||
fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
exit(0);
|
||||
}
|
56
Externals/ffmpeg/shared/doc/examples/metadata.c
vendored
56
Externals/ffmpeg/shared/doc/examples/metadata.c
vendored
@ -1,56 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2011 Reinhard Tartler
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* Shows how the metadata API can be used in application programs.
|
||||
* @example metadata.c
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavutil/dict.h>
|
||||
|
||||
int main (int argc, char **argv)
|
||||
{
|
||||
AVFormatContext *fmt_ctx = NULL;
|
||||
AVDictionaryEntry *tag = NULL;
|
||||
int ret;
|
||||
|
||||
if (argc != 2) {
|
||||
printf("usage: %s <input_file>\n"
|
||||
"example program to demonstrate the use of the libavformat metadata API.\n"
|
||||
"\n", argv[0]);
|
||||
return 1;
|
||||
}
|
||||
|
||||
av_register_all();
|
||||
if ((ret = avformat_open_input(&fmt_ctx, argv[1], NULL, NULL)))
|
||||
return ret;
|
||||
|
||||
while ((tag = av_dict_get(fmt_ctx->metadata, "", tag, AV_DICT_IGNORE_SUFFIX)))
|
||||
printf("%s=%s\n", tag->key, tag->value);
|
||||
|
||||
avformat_close_input(&fmt_ctx);
|
||||
return 0;
|
||||
}
|
670
Externals/ffmpeg/shared/doc/examples/muxing.c
vendored
670
Externals/ffmpeg/shared/doc/examples/muxing.c
vendored
@ -1,670 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2003 Fabrice Bellard
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* libavformat API example.
|
||||
*
|
||||
* Output a media file in any supported libavformat format. The default
|
||||
* codecs are used.
|
||||
* @example muxing.c
|
||||
*/
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <math.h>
|
||||
|
||||
#include <libavutil/avassert.h>
|
||||
#include <libavutil/channel_layout.h>
|
||||
#include <libavutil/opt.h>
|
||||
#include <libavutil/mathematics.h>
|
||||
#include <libavutil/timestamp.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libswscale/swscale.h>
|
||||
#include <libswresample/swresample.h>
|
||||
|
||||
#define STREAM_DURATION 10.0
|
||||
#define STREAM_FRAME_RATE 25 /* 25 images/s */
|
||||
#define STREAM_PIX_FMT AV_PIX_FMT_YUV420P /* default pix_fmt */
|
||||
|
||||
#define SCALE_FLAGS SWS_BICUBIC
|
||||
|
||||
// a wrapper around a single output AVStream
|
||||
typedef struct OutputStream {
|
||||
AVStream *st;
|
||||
|
||||
/* pts of the next frame that will be generated */
|
||||
int64_t next_pts;
|
||||
int samples_count;
|
||||
|
||||
AVFrame *frame;
|
||||
AVFrame *tmp_frame;
|
||||
|
||||
float t, tincr, tincr2;
|
||||
|
||||
struct SwsContext *sws_ctx;
|
||||
struct SwrContext *swr_ctx;
|
||||
} OutputStream;
|
||||
|
||||
static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt)
|
||||
{
|
||||
AVRational *time_base = &fmt_ctx->streams[pkt->stream_index]->time_base;
|
||||
|
||||
printf("pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",
|
||||
av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, time_base),
|
||||
av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, time_base),
|
||||
av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, time_base),
|
||||
pkt->stream_index);
|
||||
}
|
||||
|
||||
static int write_frame(AVFormatContext *fmt_ctx, const AVRational *time_base, AVStream *st, AVPacket *pkt)
|
||||
{
|
||||
/* rescale output packet timestamp values from codec to stream timebase */
|
||||
av_packet_rescale_ts(pkt, *time_base, st->time_base);
|
||||
pkt->stream_index = st->index;
|
||||
|
||||
/* Write the compressed frame to the media file. */
|
||||
log_packet(fmt_ctx, pkt);
|
||||
return av_interleaved_write_frame(fmt_ctx, pkt);
|
||||
}
|
||||
|
||||
/* Add an output stream. */
|
||||
static void add_stream(OutputStream *ost, AVFormatContext *oc,
|
||||
AVCodec **codec,
|
||||
enum AVCodecID codec_id)
|
||||
{
|
||||
AVCodecContext *c;
|
||||
int i;
|
||||
|
||||
/* find the encoder */
|
||||
*codec = avcodec_find_encoder(codec_id);
|
||||
if (!(*codec)) {
|
||||
fprintf(stderr, "Could not find encoder for '%s'\n",
|
||||
avcodec_get_name(codec_id));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
ost->st = avformat_new_stream(oc, *codec);
|
||||
if (!ost->st) {
|
||||
fprintf(stderr, "Could not allocate stream\n");
|
||||
exit(1);
|
||||
}
|
||||
ost->st->id = oc->nb_streams-1;
|
||||
c = ost->st->codec;
|
||||
|
||||
switch ((*codec)->type) {
|
||||
case AVMEDIA_TYPE_AUDIO:
|
||||
c->sample_fmt = (*codec)->sample_fmts ?
|
||||
(*codec)->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;
|
||||
c->bit_rate = 64000;
|
||||
c->sample_rate = 44100;
|
||||
if ((*codec)->supported_samplerates) {
|
||||
c->sample_rate = (*codec)->supported_samplerates[0];
|
||||
for (i = 0; (*codec)->supported_samplerates[i]; i++) {
|
||||
if ((*codec)->supported_samplerates[i] == 44100)
|
||||
c->sample_rate = 44100;
|
||||
}
|
||||
}
|
||||
c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
|
||||
c->channel_layout = AV_CH_LAYOUT_STEREO;
|
||||
if ((*codec)->channel_layouts) {
|
||||
c->channel_layout = (*codec)->channel_layouts[0];
|
||||
for (i = 0; (*codec)->channel_layouts[i]; i++) {
|
||||
if ((*codec)->channel_layouts[i] == AV_CH_LAYOUT_STEREO)
|
||||
c->channel_layout = AV_CH_LAYOUT_STEREO;
|
||||
}
|
||||
}
|
||||
c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
|
||||
ost->st->time_base = (AVRational){ 1, c->sample_rate };
|
||||
break;
|
||||
|
||||
case AVMEDIA_TYPE_VIDEO:
|
||||
c->codec_id = codec_id;
|
||||
|
||||
c->bit_rate = 400000;
|
||||
/* Resolution must be a multiple of two. */
|
||||
c->width = 352;
|
||||
c->height = 288;
|
||||
/* timebase: This is the fundamental unit of time (in seconds) in terms
|
||||
* of which frame timestamps are represented. For fixed-fps content,
|
||||
* timebase should be 1/framerate and timestamp increments should be
|
||||
* identical to 1. */
|
||||
ost->st->time_base = (AVRational){ 1, STREAM_FRAME_RATE };
|
||||
c->time_base = ost->st->time_base;
|
||||
|
||||
c->gop_size = 12; /* emit one intra frame every twelve frames at most */
|
||||
c->pix_fmt = STREAM_PIX_FMT;
|
||||
if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
|
||||
/* just for testing, we also add B frames */
|
||||
c->max_b_frames = 2;
|
||||
}
|
||||
if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
|
||||
/* Needed to avoid using macroblocks in which some coeffs overflow.
|
||||
* This does not happen with normal video, it just happens here as
|
||||
* the motion of the chroma plane does not match the luma plane. */
|
||||
c->mb_decision = 2;
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
/* Some formats want stream headers to be separate. */
|
||||
if (oc->oformat->flags & AVFMT_GLOBALHEADER)
|
||||
c->flags |= CODEC_FLAG_GLOBAL_HEADER;
|
||||
}
|
||||
|
||||
/**************************************************************/
|
||||
/* audio output */
|
||||
|
||||
static AVFrame *alloc_audio_frame(enum AVSampleFormat sample_fmt,
|
||||
uint64_t channel_layout,
|
||||
int sample_rate, int nb_samples)
|
||||
{
|
||||
AVFrame *frame = av_frame_alloc();
|
||||
int ret;
|
||||
|
||||
if (!frame) {
|
||||
fprintf(stderr, "Error allocating an audio frame\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
frame->format = sample_fmt;
|
||||
frame->channel_layout = channel_layout;
|
||||
frame->sample_rate = sample_rate;
|
||||
frame->nb_samples = nb_samples;
|
||||
|
||||
if (nb_samples) {
|
||||
ret = av_frame_get_buffer(frame, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error allocating an audio buffer\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
return frame;
|
||||
}
|
||||
|
||||
static void open_audio(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
|
||||
{
|
||||
AVCodecContext *c;
|
||||
int nb_samples;
|
||||
int ret;
|
||||
AVDictionary *opt = NULL;
|
||||
|
||||
c = ost->st->codec;
|
||||
|
||||
/* open it */
|
||||
av_dict_copy(&opt, opt_arg, 0);
|
||||
ret = avcodec_open2(c, codec, &opt);
|
||||
av_dict_free(&opt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not open audio codec: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* init signal generator */
|
||||
ost->t = 0;
|
||||
ost->tincr = 2 * M_PI * 110.0 / c->sample_rate;
|
||||
/* increment frequency by 110 Hz per second */
|
||||
ost->tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
|
||||
|
||||
if (c->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)
|
||||
nb_samples = 10000;
|
||||
else
|
||||
nb_samples = c->frame_size;
|
||||
|
||||
ost->frame = alloc_audio_frame(c->sample_fmt, c->channel_layout,
|
||||
c->sample_rate, nb_samples);
|
||||
ost->tmp_frame = alloc_audio_frame(AV_SAMPLE_FMT_S16, c->channel_layout,
|
||||
c->sample_rate, nb_samples);
|
||||
|
||||
/* create resampler context */
|
||||
ost->swr_ctx = swr_alloc();
|
||||
if (!ost->swr_ctx) {
|
||||
fprintf(stderr, "Could not allocate resampler context\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* set options */
|
||||
av_opt_set_int (ost->swr_ctx, "in_channel_count", c->channels, 0);
|
||||
av_opt_set_int (ost->swr_ctx, "in_sample_rate", c->sample_rate, 0);
|
||||
av_opt_set_sample_fmt(ost->swr_ctx, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0);
|
||||
av_opt_set_int (ost->swr_ctx, "out_channel_count", c->channels, 0);
|
||||
av_opt_set_int (ost->swr_ctx, "out_sample_rate", c->sample_rate, 0);
|
||||
av_opt_set_sample_fmt(ost->swr_ctx, "out_sample_fmt", c->sample_fmt, 0);
|
||||
|
||||
/* initialize the resampling context */
|
||||
if ((ret = swr_init(ost->swr_ctx)) < 0) {
|
||||
fprintf(stderr, "Failed to initialize the resampling context\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
/* Prepare a 16 bit dummy audio frame of 'frame_size' samples and
|
||||
* 'nb_channels' channels. */
|
||||
static AVFrame *get_audio_frame(OutputStream *ost)
|
||||
{
|
||||
AVFrame *frame = ost->tmp_frame;
|
||||
int j, i, v;
|
||||
int16_t *q = (int16_t*)frame->data[0];
|
||||
|
||||
/* check if we want to generate more frames */
|
||||
if (av_compare_ts(ost->next_pts, ost->st->codec->time_base,
|
||||
STREAM_DURATION, (AVRational){ 1, 1 }) >= 0)
|
||||
return NULL;
|
||||
|
||||
for (j = 0; j <frame->nb_samples; j++) {
|
||||
v = (int)(sin(ost->t) * 10000);
|
||||
for (i = 0; i < ost->st->codec->channels; i++)
|
||||
*q++ = v;
|
||||
ost->t += ost->tincr;
|
||||
ost->tincr += ost->tincr2;
|
||||
}
|
||||
|
||||
frame->pts = ost->next_pts;
|
||||
ost->next_pts += frame->nb_samples;
|
||||
|
||||
return frame;
|
||||
}
|
||||
|
||||
/*
|
||||
* encode one audio frame and send it to the muxer
|
||||
* return 1 when encoding is finished, 0 otherwise
|
||||
*/
|
||||
static int write_audio_frame(AVFormatContext *oc, OutputStream *ost)
|
||||
{
|
||||
AVCodecContext *c;
|
||||
AVPacket pkt = { 0 }; // data and size must be 0;
|
||||
AVFrame *frame;
|
||||
int ret;
|
||||
int got_packet;
|
||||
int dst_nb_samples;
|
||||
|
||||
av_init_packet(&pkt);
|
||||
c = ost->st->codec;
|
||||
|
||||
frame = get_audio_frame(ost);
|
||||
|
||||
if (frame) {
|
||||
/* convert samples from native format to destination codec format, using the resampler */
|
||||
/* compute destination number of samples */
|
||||
dst_nb_samples = av_rescale_rnd(swr_get_delay(ost->swr_ctx, c->sample_rate) + frame->nb_samples,
|
||||
c->sample_rate, c->sample_rate, AV_ROUND_UP);
|
||||
av_assert0(dst_nb_samples == frame->nb_samples);
|
||||
|
||||
/* when we pass a frame to the encoder, it may keep a reference to it
|
||||
* internally;
|
||||
* make sure we do not overwrite it here
|
||||
*/
|
||||
ret = av_frame_make_writable(ost->frame);
|
||||
if (ret < 0)
|
||||
exit(1);
|
||||
|
||||
/* convert to destination format */
|
||||
ret = swr_convert(ost->swr_ctx,
|
||||
ost->frame->data, dst_nb_samples,
|
||||
(const uint8_t **)frame->data, frame->nb_samples);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error while converting\n");
|
||||
exit(1);
|
||||
}
|
||||
frame = ost->frame;
|
||||
|
||||
frame->pts = av_rescale_q(ost->samples_count, (AVRational){1, c->sample_rate}, c->time_base);
|
||||
ost->samples_count += dst_nb_samples;
|
||||
}
|
||||
|
||||
ret = avcodec_encode_audio2(c, &pkt, frame, &got_packet);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error encoding audio frame: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (got_packet) {
|
||||
ret = write_frame(oc, &c->time_base, ost->st, &pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error while writing audio frame: %s\n",
|
||||
av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
return (frame || got_packet) ? 0 : 1;
|
||||
}
|
||||
|
||||
/**************************************************************/
|
||||
/* video output */
|
||||
|
||||
static AVFrame *alloc_picture(enum AVPixelFormat pix_fmt, int width, int height)
|
||||
{
|
||||
AVFrame *picture;
|
||||
int ret;
|
||||
|
||||
picture = av_frame_alloc();
|
||||
if (!picture)
|
||||
return NULL;
|
||||
|
||||
picture->format = pix_fmt;
|
||||
picture->width = width;
|
||||
picture->height = height;
|
||||
|
||||
/* allocate the buffers for the frame data */
|
||||
ret = av_frame_get_buffer(picture, 32);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate frame data.\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
return picture;
|
||||
}
|
||||
|
||||
static void open_video(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
|
||||
{
|
||||
int ret;
|
||||
AVCodecContext *c = ost->st->codec;
|
||||
AVDictionary *opt = NULL;
|
||||
|
||||
av_dict_copy(&opt, opt_arg, 0);
|
||||
|
||||
/* open the codec */
|
||||
ret = avcodec_open2(c, codec, &opt);
|
||||
av_dict_free(&opt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* allocate and init a re-usable frame */
|
||||
ost->frame = alloc_picture(c->pix_fmt, c->width, c->height);
|
||||
if (!ost->frame) {
|
||||
fprintf(stderr, "Could not allocate video frame\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* If the output format is not YUV420P, then a temporary YUV420P
|
||||
* picture is needed too. It is then converted to the required
|
||||
* output format. */
|
||||
ost->tmp_frame = NULL;
|
||||
if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
|
||||
ost->tmp_frame = alloc_picture(AV_PIX_FMT_YUV420P, c->width, c->height);
|
||||
if (!ost->tmp_frame) {
|
||||
fprintf(stderr, "Could not allocate temporary picture\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Prepare a dummy image. */
|
||||
static void fill_yuv_image(AVFrame *pict, int frame_index,
|
||||
int width, int height)
|
||||
{
|
||||
int x, y, i, ret;
|
||||
|
||||
/* when we pass a frame to the encoder, it may keep a reference to it
|
||||
* internally;
|
||||
* make sure we do not overwrite it here
|
||||
*/
|
||||
ret = av_frame_make_writable(pict);
|
||||
if (ret < 0)
|
||||
exit(1);
|
||||
|
||||
i = frame_index;
|
||||
|
||||
/* Y */
|
||||
for (y = 0; y < height; y++)
|
||||
for (x = 0; x < width; x++)
|
||||
pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3;
|
||||
|
||||
/* Cb and Cr */
|
||||
for (y = 0; y < height / 2; y++) {
|
||||
for (x = 0; x < width / 2; x++) {
|
||||
pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;
|
||||
pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static AVFrame *get_video_frame(OutputStream *ost)
|
||||
{
|
||||
AVCodecContext *c = ost->st->codec;
|
||||
|
||||
/* check if we want to generate more frames */
|
||||
if (av_compare_ts(ost->next_pts, ost->st->codec->time_base,
|
||||
STREAM_DURATION, (AVRational){ 1, 1 }) >= 0)
|
||||
return NULL;
|
||||
|
||||
if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
|
||||
/* as we only generate a YUV420P picture, we must convert it
|
||||
* to the codec pixel format if needed */
|
||||
if (!ost->sws_ctx) {
|
||||
ost->sws_ctx = sws_getContext(c->width, c->height,
|
||||
AV_PIX_FMT_YUV420P,
|
||||
c->width, c->height,
|
||||
c->pix_fmt,
|
||||
SCALE_FLAGS, NULL, NULL, NULL);
|
||||
if (!ost->sws_ctx) {
|
||||
fprintf(stderr,
|
||||
"Could not initialize the conversion context\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
fill_yuv_image(ost->tmp_frame, ost->next_pts, c->width, c->height);
|
||||
sws_scale(ost->sws_ctx,
|
||||
(const uint8_t * const *)ost->tmp_frame->data, ost->tmp_frame->linesize,
|
||||
0, c->height, ost->frame->data, ost->frame->linesize);
|
||||
} else {
|
||||
fill_yuv_image(ost->frame, ost->next_pts, c->width, c->height);
|
||||
}
|
||||
|
||||
ost->frame->pts = ost->next_pts++;
|
||||
|
||||
return ost->frame;
|
||||
}
|
||||
|
||||
/*
|
||||
* encode one video frame and send it to the muxer
|
||||
* return 1 when encoding is finished, 0 otherwise
|
||||
*/
|
||||
static int write_video_frame(AVFormatContext *oc, OutputStream *ost)
|
||||
{
|
||||
int ret;
|
||||
AVCodecContext *c;
|
||||
AVFrame *frame;
|
||||
int got_packet = 0;
|
||||
|
||||
c = ost->st->codec;
|
||||
|
||||
frame = get_video_frame(ost);
|
||||
|
||||
if (oc->oformat->flags & AVFMT_RAWPICTURE) {
|
||||
/* a hack to avoid data copy with some raw video muxers */
|
||||
AVPacket pkt;
|
||||
av_init_packet(&pkt);
|
||||
|
||||
if (!frame)
|
||||
return 1;
|
||||
|
||||
pkt.flags |= AV_PKT_FLAG_KEY;
|
||||
pkt.stream_index = ost->st->index;
|
||||
pkt.data = (uint8_t *)frame;
|
||||
pkt.size = sizeof(AVPicture);
|
||||
|
||||
pkt.pts = pkt.dts = frame->pts;
|
||||
av_packet_rescale_ts(&pkt, c->time_base, ost->st->time_base);
|
||||
|
||||
ret = av_interleaved_write_frame(oc, &pkt);
|
||||
} else {
|
||||
AVPacket pkt = { 0 };
|
||||
av_init_packet(&pkt);
|
||||
|
||||
/* encode the image */
|
||||
ret = avcodec_encode_video2(c, &pkt, frame, &got_packet);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (got_packet) {
|
||||
ret = write_frame(oc, &c->time_base, ost->st, &pkt);
|
||||
} else {
|
||||
ret = 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
return (frame || got_packet) ? 0 : 1;
|
||||
}
|
||||
|
||||
static void close_stream(AVFormatContext *oc, OutputStream *ost)
|
||||
{
|
||||
avcodec_close(ost->st->codec);
|
||||
av_frame_free(&ost->frame);
|
||||
av_frame_free(&ost->tmp_frame);
|
||||
sws_freeContext(ost->sws_ctx);
|
||||
swr_free(&ost->swr_ctx);
|
||||
}
|
||||
|
||||
/**************************************************************/
|
||||
/* media file output */
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
OutputStream video_st = { 0 }, audio_st = { 0 };
|
||||
const char *filename;
|
||||
AVOutputFormat *fmt;
|
||||
AVFormatContext *oc;
|
||||
AVCodec *audio_codec, *video_codec;
|
||||
int ret;
|
||||
int have_video = 0, have_audio = 0;
|
||||
int encode_video = 0, encode_audio = 0;
|
||||
AVDictionary *opt = NULL;
|
||||
|
||||
/* Initialize libavcodec, and register all codecs and formats. */
|
||||
av_register_all();
|
||||
|
||||
if (argc < 2) {
|
||||
printf("usage: %s output_file\n"
|
||||
"API example program to output a media file with libavformat.\n"
|
||||
"This program generates a synthetic audio and video stream, encodes and\n"
|
||||
"muxes them into a file named output_file.\n"
|
||||
"The output format is automatically guessed according to the file extension.\n"
|
||||
"Raw images can also be output by using '%%d' in the filename.\n"
|
||||
"\n", argv[0]);
|
||||
return 1;
|
||||
}
|
||||
|
||||
filename = argv[1];
|
||||
if (argc > 3 && !strcmp(argv[2], "-flags")) {
|
||||
av_dict_set(&opt, argv[2]+1, argv[3], 0);
|
||||
}
|
||||
|
||||
/* allocate the output media context */
|
||||
avformat_alloc_output_context2(&oc, NULL, NULL, filename);
|
||||
if (!oc) {
|
||||
printf("Could not deduce output format from file extension: using MPEG.\n");
|
||||
avformat_alloc_output_context2(&oc, NULL, "mpeg", filename);
|
||||
}
|
||||
if (!oc)
|
||||
return 1;
|
||||
|
||||
fmt = oc->oformat;
|
||||
|
||||
/* Add the audio and video streams using the default format codecs
|
||||
* and initialize the codecs. */
|
||||
if (fmt->video_codec != AV_CODEC_ID_NONE) {
|
||||
add_stream(&video_st, oc, &video_codec, fmt->video_codec);
|
||||
have_video = 1;
|
||||
encode_video = 1;
|
||||
}
|
||||
if (fmt->audio_codec != AV_CODEC_ID_NONE) {
|
||||
add_stream(&audio_st, oc, &audio_codec, fmt->audio_codec);
|
||||
have_audio = 1;
|
||||
encode_audio = 1;
|
||||
}
|
||||
|
||||
/* Now that all the parameters are set, we can open the audio and
|
||||
* video codecs and allocate the necessary encode buffers. */
|
||||
if (have_video)
|
||||
open_video(oc, video_codec, &video_st, opt);
|
||||
|
||||
if (have_audio)
|
||||
open_audio(oc, audio_codec, &audio_st, opt);
|
||||
|
||||
av_dump_format(oc, 0, filename, 1);
|
||||
|
||||
/* open the output file, if needed */
|
||||
if (!(fmt->flags & AVFMT_NOFILE)) {
|
||||
ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not open '%s': %s\n", filename,
|
||||
av_err2str(ret));
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
/* Write the stream header, if any. */
|
||||
ret = avformat_write_header(oc, &opt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error occurred when opening output file: %s\n",
|
||||
av_err2str(ret));
|
||||
return 1;
|
||||
}
|
||||
|
||||
while (encode_video || encode_audio) {
|
||||
/* select the stream to encode */
|
||||
if (encode_video &&
|
||||
(!encode_audio || av_compare_ts(video_st.next_pts, video_st.st->codec->time_base,
|
||||
audio_st.next_pts, audio_st.st->codec->time_base) <= 0)) {
|
||||
encode_video = !write_video_frame(oc, &video_st);
|
||||
} else {
|
||||
encode_audio = !write_audio_frame(oc, &audio_st);
|
||||
}
|
||||
}
|
||||
|
||||
/* Write the trailer, if any. The trailer must be written before you
|
||||
* close the CodecContexts open when you wrote the header; otherwise
|
||||
* av_write_trailer() may try to use memory that was freed on
|
||||
* av_codec_close(). */
|
||||
av_write_trailer(oc);
|
||||
|
||||
/* Close each codec. */
|
||||
if (have_video)
|
||||
close_stream(oc, &video_st);
|
||||
if (have_audio)
|
||||
close_stream(oc, &audio_st);
|
||||
|
||||
if (!(fmt->flags & AVFMT_NOFILE))
|
||||
/* Close the output file. */
|
||||
avio_closep(&oc->pb);
|
||||
|
||||
/* free the stream */
|
||||
avformat_free_context(oc);
|
||||
|
||||
return 0;
|
||||
}
|
165
Externals/ffmpeg/shared/doc/examples/remuxing.c
vendored
165
Externals/ffmpeg/shared/doc/examples/remuxing.c
vendored
@ -1,165 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2013 Stefano Sabatini
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* libavformat/libavcodec demuxing and muxing API example.
|
||||
*
|
||||
* Remux streams from one container format to another.
|
||||
* @example remuxing.c
|
||||
*/
|
||||
|
||||
#include <libavutil/timestamp.h>
|
||||
#include <libavformat/avformat.h>
|
||||
|
||||
static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt, const char *tag)
|
||||
{
|
||||
AVRational *time_base = &fmt_ctx->streams[pkt->stream_index]->time_base;
|
||||
|
||||
printf("%s: pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",
|
||||
tag,
|
||||
av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, time_base),
|
||||
av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, time_base),
|
||||
av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, time_base),
|
||||
pkt->stream_index);
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
AVOutputFormat *ofmt = NULL;
|
||||
AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx = NULL;
|
||||
AVPacket pkt;
|
||||
const char *in_filename, *out_filename;
|
||||
int ret, i;
|
||||
|
||||
if (argc < 3) {
|
||||
printf("usage: %s input output\n"
|
||||
"API example program to remux a media file with libavformat and libavcodec.\n"
|
||||
"The output format is guessed according to the file extension.\n"
|
||||
"\n", argv[0]);
|
||||
return 1;
|
||||
}
|
||||
|
||||
in_filename = argv[1];
|
||||
out_filename = argv[2];
|
||||
|
||||
av_register_all();
|
||||
|
||||
if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0) {
|
||||
fprintf(stderr, "Could not open input file '%s'", in_filename);
|
||||
goto end;
|
||||
}
|
||||
|
||||
if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0) {
|
||||
fprintf(stderr, "Failed to retrieve input stream information");
|
||||
goto end;
|
||||
}
|
||||
|
||||
av_dump_format(ifmt_ctx, 0, in_filename, 0);
|
||||
|
||||
avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename);
|
||||
if (!ofmt_ctx) {
|
||||
fprintf(stderr, "Could not create output context\n");
|
||||
ret = AVERROR_UNKNOWN;
|
||||
goto end;
|
||||
}
|
||||
|
||||
ofmt = ofmt_ctx->oformat;
|
||||
|
||||
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
|
||||
AVStream *in_stream = ifmt_ctx->streams[i];
|
||||
AVStream *out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec);
|
||||
if (!out_stream) {
|
||||
fprintf(stderr, "Failed allocating output stream\n");
|
||||
ret = AVERROR_UNKNOWN;
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = avcodec_copy_context(out_stream->codec, in_stream->codec);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Failed to copy context from input to output stream codec context\n");
|
||||
goto end;
|
||||
}
|
||||
out_stream->codec->codec_tag = 0;
|
||||
if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
|
||||
out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
|
||||
}
|
||||
av_dump_format(ofmt_ctx, 0, out_filename, 1);
|
||||
|
||||
if (!(ofmt->flags & AVFMT_NOFILE)) {
|
||||
ret = avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not open output file '%s'", out_filename);
|
||||
goto end;
|
||||
}
|
||||
}
|
||||
|
||||
ret = avformat_write_header(ofmt_ctx, NULL);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error occurred when opening output file\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
while (1) {
|
||||
AVStream *in_stream, *out_stream;
|
||||
|
||||
ret = av_read_frame(ifmt_ctx, &pkt);
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
||||
in_stream = ifmt_ctx->streams[pkt.stream_index];
|
||||
out_stream = ofmt_ctx->streams[pkt.stream_index];
|
||||
|
||||
log_packet(ifmt_ctx, &pkt, "in");
|
||||
|
||||
/* copy packet */
|
||||
pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
|
||||
pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
|
||||
pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
|
||||
pkt.pos = -1;
|
||||
log_packet(ofmt_ctx, &pkt, "out");
|
||||
|
||||
ret = av_interleaved_write_frame(ofmt_ctx, &pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error muxing packet\n");
|
||||
break;
|
||||
}
|
||||
av_free_packet(&pkt);
|
||||
}
|
||||
|
||||
av_write_trailer(ofmt_ctx);
|
||||
end:
|
||||
|
||||
avformat_close_input(&ifmt_ctx);
|
||||
|
||||
/* close output */
|
||||
if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE))
|
||||
avio_closep(&ofmt_ctx->pb);
|
||||
avformat_free_context(ofmt_ctx);
|
||||
|
||||
if (ret < 0 && ret != AVERROR_EOF) {
|
||||
fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
@ -1,214 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2012 Stefano Sabatini
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @example resampling_audio.c
|
||||
* libswresample API use example.
|
||||
*/
|
||||
|
||||
#include <libavutil/opt.h>
|
||||
#include <libavutil/channel_layout.h>
|
||||
#include <libavutil/samplefmt.h>
|
||||
#include <libswresample/swresample.h>
|
||||
|
||||
static int get_format_from_sample_fmt(const char **fmt,
|
||||
enum AVSampleFormat sample_fmt)
|
||||
{
|
||||
int i;
|
||||
struct sample_fmt_entry {
|
||||
enum AVSampleFormat sample_fmt; const char *fmt_be, *fmt_le;
|
||||
} sample_fmt_entries[] = {
|
||||
{ AV_SAMPLE_FMT_U8, "u8", "u8" },
|
||||
{ AV_SAMPLE_FMT_S16, "s16be", "s16le" },
|
||||
{ AV_SAMPLE_FMT_S32, "s32be", "s32le" },
|
||||
{ AV_SAMPLE_FMT_FLT, "f32be", "f32le" },
|
||||
{ AV_SAMPLE_FMT_DBL, "f64be", "f64le" },
|
||||
};
|
||||
*fmt = NULL;
|
||||
|
||||
for (i = 0; i < FF_ARRAY_ELEMS(sample_fmt_entries); i++) {
|
||||
struct sample_fmt_entry *entry = &sample_fmt_entries[i];
|
||||
if (sample_fmt == entry->sample_fmt) {
|
||||
*fmt = AV_NE(entry->fmt_be, entry->fmt_le);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
fprintf(stderr,
|
||||
"Sample format %s not supported as output format\n",
|
||||
av_get_sample_fmt_name(sample_fmt));
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
/**
|
||||
* Fill dst buffer with nb_samples, generated starting from t.
|
||||
*/
|
||||
static void fill_samples(double *dst, int nb_samples, int nb_channels, int sample_rate, double *t)
|
||||
{
|
||||
int i, j;
|
||||
double tincr = 1.0 / sample_rate, *dstp = dst;
|
||||
const double c = 2 * M_PI * 440.0;
|
||||
|
||||
/* generate sin tone with 440Hz frequency and duplicated channels */
|
||||
for (i = 0; i < nb_samples; i++) {
|
||||
*dstp = sin(c * *t);
|
||||
for (j = 1; j < nb_channels; j++)
|
||||
dstp[j] = dstp[0];
|
||||
dstp += nb_channels;
|
||||
*t += tincr;
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int64_t src_ch_layout = AV_CH_LAYOUT_STEREO, dst_ch_layout = AV_CH_LAYOUT_SURROUND;
|
||||
int src_rate = 48000, dst_rate = 44100;
|
||||
uint8_t **src_data = NULL, **dst_data = NULL;
|
||||
int src_nb_channels = 0, dst_nb_channels = 0;
|
||||
int src_linesize, dst_linesize;
|
||||
int src_nb_samples = 1024, dst_nb_samples, max_dst_nb_samples;
|
||||
enum AVSampleFormat src_sample_fmt = AV_SAMPLE_FMT_DBL, dst_sample_fmt = AV_SAMPLE_FMT_S16;
|
||||
const char *dst_filename = NULL;
|
||||
FILE *dst_file;
|
||||
int dst_bufsize;
|
||||
const char *fmt;
|
||||
struct SwrContext *swr_ctx;
|
||||
double t;
|
||||
int ret;
|
||||
|
||||
if (argc != 2) {
|
||||
fprintf(stderr, "Usage: %s output_file\n"
|
||||
"API example program to show how to resample an audio stream with libswresample.\n"
|
||||
"This program generates a series of audio frames, resamples them to a specified "
|
||||
"output format and rate and saves them to an output file named output_file.\n",
|
||||
argv[0]);
|
||||
exit(1);
|
||||
}
|
||||
dst_filename = argv[1];
|
||||
|
||||
dst_file = fopen(dst_filename, "wb");
|
||||
if (!dst_file) {
|
||||
fprintf(stderr, "Could not open destination file %s\n", dst_filename);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* create resampler context */
|
||||
swr_ctx = swr_alloc();
|
||||
if (!swr_ctx) {
|
||||
fprintf(stderr, "Could not allocate resampler context\n");
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* set options */
|
||||
av_opt_set_int(swr_ctx, "in_channel_layout", src_ch_layout, 0);
|
||||
av_opt_set_int(swr_ctx, "in_sample_rate", src_rate, 0);
|
||||
av_opt_set_sample_fmt(swr_ctx, "in_sample_fmt", src_sample_fmt, 0);
|
||||
|
||||
av_opt_set_int(swr_ctx, "out_channel_layout", dst_ch_layout, 0);
|
||||
av_opt_set_int(swr_ctx, "out_sample_rate", dst_rate, 0);
|
||||
av_opt_set_sample_fmt(swr_ctx, "out_sample_fmt", dst_sample_fmt, 0);
|
||||
|
||||
/* initialize the resampling context */
|
||||
if ((ret = swr_init(swr_ctx)) < 0) {
|
||||
fprintf(stderr, "Failed to initialize the resampling context\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* allocate source and destination samples buffers */
|
||||
|
||||
src_nb_channels = av_get_channel_layout_nb_channels(src_ch_layout);
|
||||
ret = av_samples_alloc_array_and_samples(&src_data, &src_linesize, src_nb_channels,
|
||||
src_nb_samples, src_sample_fmt, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate source samples\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* compute the number of converted samples: buffering is avoided
|
||||
* ensuring that the output buffer will contain at least all the
|
||||
* converted input samples */
|
||||
max_dst_nb_samples = dst_nb_samples =
|
||||
av_rescale_rnd(src_nb_samples, dst_rate, src_rate, AV_ROUND_UP);
|
||||
|
||||
/* buffer is going to be directly written to a rawaudio file, no alignment */
|
||||
dst_nb_channels = av_get_channel_layout_nb_channels(dst_ch_layout);
|
||||
ret = av_samples_alloc_array_and_samples(&dst_data, &dst_linesize, dst_nb_channels,
|
||||
dst_nb_samples, dst_sample_fmt, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate destination samples\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
t = 0;
|
||||
do {
|
||||
/* generate synthetic audio */
|
||||
fill_samples((double *)src_data[0], src_nb_samples, src_nb_channels, src_rate, &t);
|
||||
|
||||
/* compute destination number of samples */
|
||||
dst_nb_samples = av_rescale_rnd(swr_get_delay(swr_ctx, src_rate) +
|
||||
src_nb_samples, dst_rate, src_rate, AV_ROUND_UP);
|
||||
if (dst_nb_samples > max_dst_nb_samples) {
|
||||
av_freep(&dst_data[0]);
|
||||
ret = av_samples_alloc(dst_data, &dst_linesize, dst_nb_channels,
|
||||
dst_nb_samples, dst_sample_fmt, 1);
|
||||
if (ret < 0)
|
||||
break;
|
||||
max_dst_nb_samples = dst_nb_samples;
|
||||
}
|
||||
|
||||
/* convert to destination format */
|
||||
ret = swr_convert(swr_ctx, dst_data, dst_nb_samples, (const uint8_t **)src_data, src_nb_samples);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error while converting\n");
|
||||
goto end;
|
||||
}
|
||||
dst_bufsize = av_samples_get_buffer_size(&dst_linesize, dst_nb_channels,
|
||||
ret, dst_sample_fmt, 1);
|
||||
if (dst_bufsize < 0) {
|
||||
fprintf(stderr, "Could not get sample buffer size\n");
|
||||
goto end;
|
||||
}
|
||||
printf("t:%f in:%d out:%d\n", t, src_nb_samples, ret);
|
||||
fwrite(dst_data[0], 1, dst_bufsize, dst_file);
|
||||
} while (t < 10);
|
||||
|
||||
if ((ret = get_format_from_sample_fmt(&fmt, dst_sample_fmt)) < 0)
|
||||
goto end;
|
||||
fprintf(stderr, "Resampling succeeded. Play the output file with the command:\n"
|
||||
"ffplay -f %s -channel_layout %"PRId64" -channels %d -ar %d %s\n",
|
||||
fmt, dst_ch_layout, dst_nb_channels, dst_rate, dst_filename);
|
||||
|
||||
end:
|
||||
fclose(dst_file);
|
||||
|
||||
if (src_data)
|
||||
av_freep(&src_data[0]);
|
||||
av_freep(&src_data);
|
||||
|
||||
if (dst_data)
|
||||
av_freep(&dst_data[0]);
|
||||
av_freep(&dst_data);
|
||||
|
||||
swr_free(&swr_ctx);
|
||||
return ret < 0;
|
||||
}
|
140
Externals/ffmpeg/shared/doc/examples/scaling_video.c
vendored
140
Externals/ffmpeg/shared/doc/examples/scaling_video.c
vendored
@ -1,140 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2012 Stefano Sabatini
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* libswscale API use example.
|
||||
* @example scaling_video.c
|
||||
*/
|
||||
|
||||
#include <libavutil/imgutils.h>
|
||||
#include <libavutil/parseutils.h>
|
||||
#include <libswscale/swscale.h>
|
||||
|
||||
static void fill_yuv_image(uint8_t *data[4], int linesize[4],
|
||||
int width, int height, int frame_index)
|
||||
{
|
||||
int x, y;
|
||||
|
||||
/* Y */
|
||||
for (y = 0; y < height; y++)
|
||||
for (x = 0; x < width; x++)
|
||||
data[0][y * linesize[0] + x] = x + y + frame_index * 3;
|
||||
|
||||
/* Cb and Cr */
|
||||
for (y = 0; y < height / 2; y++) {
|
||||
for (x = 0; x < width / 2; x++) {
|
||||
data[1][y * linesize[1] + x] = 128 + y + frame_index * 2;
|
||||
data[2][y * linesize[2] + x] = 64 + x + frame_index * 5;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
uint8_t *src_data[4], *dst_data[4];
|
||||
int src_linesize[4], dst_linesize[4];
|
||||
int src_w = 320, src_h = 240, dst_w, dst_h;
|
||||
enum AVPixelFormat src_pix_fmt = AV_PIX_FMT_YUV420P, dst_pix_fmt = AV_PIX_FMT_RGB24;
|
||||
const char *dst_size = NULL;
|
||||
const char *dst_filename = NULL;
|
||||
FILE *dst_file;
|
||||
int dst_bufsize;
|
||||
struct SwsContext *sws_ctx;
|
||||
int i, ret;
|
||||
|
||||
if (argc != 3) {
|
||||
fprintf(stderr, "Usage: %s output_file output_size\n"
|
||||
"API example program to show how to scale an image with libswscale.\n"
|
||||
"This program generates a series of pictures, rescales them to the given "
|
||||
"output_size and saves them to an output file named output_file\n."
|
||||
"\n", argv[0]);
|
||||
exit(1);
|
||||
}
|
||||
dst_filename = argv[1];
|
||||
dst_size = argv[2];
|
||||
|
||||
if (av_parse_video_size(&dst_w, &dst_h, dst_size) < 0) {
|
||||
fprintf(stderr,
|
||||
"Invalid size '%s', must be in the form WxH or a valid size abbreviation\n",
|
||||
dst_size);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
dst_file = fopen(dst_filename, "wb");
|
||||
if (!dst_file) {
|
||||
fprintf(stderr, "Could not open destination file %s\n", dst_filename);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* create scaling context */
|
||||
sws_ctx = sws_getContext(src_w, src_h, src_pix_fmt,
|
||||
dst_w, dst_h, dst_pix_fmt,
|
||||
SWS_BILINEAR, NULL, NULL, NULL);
|
||||
if (!sws_ctx) {
|
||||
fprintf(stderr,
|
||||
"Impossible to create scale context for the conversion "
|
||||
"fmt:%s s:%dx%d -> fmt:%s s:%dx%d\n",
|
||||
av_get_pix_fmt_name(src_pix_fmt), src_w, src_h,
|
||||
av_get_pix_fmt_name(dst_pix_fmt), dst_w, dst_h);
|
||||
ret = AVERROR(EINVAL);
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* allocate source and destination image buffers */
|
||||
if ((ret = av_image_alloc(src_data, src_linesize,
|
||||
src_w, src_h, src_pix_fmt, 16)) < 0) {
|
||||
fprintf(stderr, "Could not allocate source image\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* buffer is going to be written to rawvideo file, no alignment */
|
||||
if ((ret = av_image_alloc(dst_data, dst_linesize,
|
||||
dst_w, dst_h, dst_pix_fmt, 1)) < 0) {
|
||||
fprintf(stderr, "Could not allocate destination image\n");
|
||||
goto end;
|
||||
}
|
||||
dst_bufsize = ret;
|
||||
|
||||
for (i = 0; i < 100; i++) {
|
||||
/* generate synthetic video */
|
||||
fill_yuv_image(src_data, src_linesize, src_w, src_h, i);
|
||||
|
||||
/* convert to destination format */
|
||||
sws_scale(sws_ctx, (const uint8_t * const*)src_data,
|
||||
src_linesize, 0, src_h, dst_data, dst_linesize);
|
||||
|
||||
/* write scaled image to file */
|
||||
fwrite(dst_data[0], 1, dst_bufsize, dst_file);
|
||||
}
|
||||
|
||||
fprintf(stderr, "Scaling succeeded. Play the output file with the command:\n"
|
||||
"ffplay -f rawvideo -pix_fmt %s -video_size %dx%d %s\n",
|
||||
av_get_pix_fmt_name(dst_pix_fmt), dst_w, dst_h, dst_filename);
|
||||
|
||||
end:
|
||||
fclose(dst_file);
|
||||
av_freep(&src_data[0]);
|
||||
av_freep(&dst_data[0]);
|
||||
sws_freeContext(sws_ctx);
|
||||
return ret < 0;
|
||||
}
|
755
Externals/ffmpeg/shared/doc/examples/transcode_aac.c
vendored
755
Externals/ffmpeg/shared/doc/examples/transcode_aac.c
vendored
@ -1,755 +0,0 @@
|
||||
/*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* simple audio converter
|
||||
*
|
||||
* @example transcode_aac.c
|
||||
* Convert an input audio file to AAC in an MP4 container using FFmpeg.
|
||||
* @author Andreas Unterweger (dustsigns@gmail.com)
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
#include "libavformat/avformat.h"
|
||||
#include "libavformat/avio.h"
|
||||
|
||||
#include "libavcodec/avcodec.h"
|
||||
|
||||
#include "libavutil/audio_fifo.h"
|
||||
#include "libavutil/avassert.h"
|
||||
#include "libavutil/avstring.h"
|
||||
#include "libavutil/frame.h"
|
||||
#include "libavutil/opt.h"
|
||||
|
||||
#include "libswresample/swresample.h"
|
||||
|
||||
/** The output bit rate in kbit/s */
|
||||
#define OUTPUT_BIT_RATE 48000
|
||||
/** The number of output channels */
|
||||
#define OUTPUT_CHANNELS 2
|
||||
/** The audio sample output format */
|
||||
#define OUTPUT_SAMPLE_FORMAT AV_SAMPLE_FMT_S16
|
||||
|
||||
/**
|
||||
* Convert an error code into a text message.
|
||||
* @param error Error code to be converted
|
||||
* @return Corresponding error text (not thread-safe)
|
||||
*/
|
||||
static const char *get_error_text(const int error)
|
||||
{
|
||||
static char error_buffer[255];
|
||||
av_strerror(error, error_buffer, sizeof(error_buffer));
|
||||
return error_buffer;
|
||||
}
|
||||
|
||||
/** Open an input file and the required decoder. */
|
||||
static int open_input_file(const char *filename,
|
||||
AVFormatContext **input_format_context,
|
||||
AVCodecContext **input_codec_context)
|
||||
{
|
||||
AVCodec *input_codec;
|
||||
int error;
|
||||
|
||||
/** Open the input file to read from it. */
|
||||
if ((error = avformat_open_input(input_format_context, filename, NULL,
|
||||
NULL)) < 0) {
|
||||
fprintf(stderr, "Could not open input file '%s' (error '%s')\n",
|
||||
filename, get_error_text(error));
|
||||
*input_format_context = NULL;
|
||||
return error;
|
||||
}
|
||||
|
||||
/** Get information on the input file (number of streams etc.). */
|
||||
if ((error = avformat_find_stream_info(*input_format_context, NULL)) < 0) {
|
||||
fprintf(stderr, "Could not open find stream info (error '%s')\n",
|
||||
get_error_text(error));
|
||||
avformat_close_input(input_format_context);
|
||||
return error;
|
||||
}
|
||||
|
||||
/** Make sure that there is only one stream in the input file. */
|
||||
if ((*input_format_context)->nb_streams != 1) {
|
||||
fprintf(stderr, "Expected one audio input stream, but found %d\n",
|
||||
(*input_format_context)->nb_streams);
|
||||
avformat_close_input(input_format_context);
|
||||
return AVERROR_EXIT;
|
||||
}
|
||||
|
||||
/** Find a decoder for the audio stream. */
|
||||
if (!(input_codec = avcodec_find_decoder((*input_format_context)->streams[0]->codec->codec_id))) {
|
||||
fprintf(stderr, "Could not find input codec\n");
|
||||
avformat_close_input(input_format_context);
|
||||
return AVERROR_EXIT;
|
||||
}
|
||||
|
||||
/** Open the decoder for the audio stream to use it later. */
|
||||
if ((error = avcodec_open2((*input_format_context)->streams[0]->codec,
|
||||
input_codec, NULL)) < 0) {
|
||||
fprintf(stderr, "Could not open input codec (error '%s')\n",
|
||||
get_error_text(error));
|
||||
avformat_close_input(input_format_context);
|
||||
return error;
|
||||
}
|
||||
|
||||
/** Save the decoder context for easier access later. */
|
||||
*input_codec_context = (*input_format_context)->streams[0]->codec;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Open an output file and the required encoder.
|
||||
* Also set some basic encoder parameters.
|
||||
* Some of these parameters are based on the input file's parameters.
|
||||
*/
|
||||
static int open_output_file(const char *filename,
|
||||
AVCodecContext *input_codec_context,
|
||||
AVFormatContext **output_format_context,
|
||||
AVCodecContext **output_codec_context)
|
||||
{
|
||||
AVIOContext *output_io_context = NULL;
|
||||
AVStream *stream = NULL;
|
||||
AVCodec *output_codec = NULL;
|
||||
int error;
|
||||
|
||||
/** Open the output file to write to it. */
|
||||
if ((error = avio_open(&output_io_context, filename,
|
||||
AVIO_FLAG_WRITE)) < 0) {
|
||||
fprintf(stderr, "Could not open output file '%s' (error '%s')\n",
|
||||
filename, get_error_text(error));
|
||||
return error;
|
||||
}
|
||||
|
||||
/** Create a new format context for the output container format. */
|
||||
if (!(*output_format_context = avformat_alloc_context())) {
|
||||
fprintf(stderr, "Could not allocate output format context\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
/** Associate the output file (pointer) with the container format context. */
|
||||
(*output_format_context)->pb = output_io_context;
|
||||
|
||||
/** Guess the desired container format based on the file extension. */
|
||||
if (!((*output_format_context)->oformat = av_guess_format(NULL, filename,
|
||||
NULL))) {
|
||||
fprintf(stderr, "Could not find output file format\n");
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
av_strlcpy((*output_format_context)->filename, filename,
|
||||
sizeof((*output_format_context)->filename));
|
||||
|
||||
/** Find the encoder to be used by its name. */
|
||||
if (!(output_codec = avcodec_find_encoder(AV_CODEC_ID_AAC))) {
|
||||
fprintf(stderr, "Could not find an AAC encoder.\n");
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
/** Create a new audio stream in the output file container. */
|
||||
if (!(stream = avformat_new_stream(*output_format_context, output_codec))) {
|
||||
fprintf(stderr, "Could not create new stream\n");
|
||||
error = AVERROR(ENOMEM);
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
/** Save the encoder context for easiert access later. */
|
||||
*output_codec_context = stream->codec;
|
||||
|
||||
/**
|
||||
* Set the basic encoder parameters.
|
||||
* The input file's sample rate is used to avoid a sample rate conversion.
|
||||
*/
|
||||
(*output_codec_context)->channels = OUTPUT_CHANNELS;
|
||||
(*output_codec_context)->channel_layout = av_get_default_channel_layout(OUTPUT_CHANNELS);
|
||||
(*output_codec_context)->sample_rate = input_codec_context->sample_rate;
|
||||
(*output_codec_context)->sample_fmt = AV_SAMPLE_FMT_S16;
|
||||
(*output_codec_context)->bit_rate = OUTPUT_BIT_RATE;
|
||||
|
||||
/**
|
||||
* Some container formats (like MP4) require global headers to be present
|
||||
* Mark the encoder so that it behaves accordingly.
|
||||
*/
|
||||
if ((*output_format_context)->oformat->flags & AVFMT_GLOBALHEADER)
|
||||
(*output_codec_context)->flags |= CODEC_FLAG_GLOBAL_HEADER;
|
||||
|
||||
/** Open the encoder for the audio stream to use it later. */
|
||||
if ((error = avcodec_open2(*output_codec_context, output_codec, NULL)) < 0) {
|
||||
fprintf(stderr, "Could not open output codec (error '%s')\n",
|
||||
get_error_text(error));
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
cleanup:
|
||||
avio_closep(&(*output_format_context)->pb);
|
||||
avformat_free_context(*output_format_context);
|
||||
*output_format_context = NULL;
|
||||
return error < 0 ? error : AVERROR_EXIT;
|
||||
}
|
||||
|
||||
/** Initialize one data packet for reading or writing. */
|
||||
static void init_packet(AVPacket *packet)
|
||||
{
|
||||
av_init_packet(packet);
|
||||
/** Set the packet data and size so that it is recognized as being empty. */
|
||||
packet->data = NULL;
|
||||
packet->size = 0;
|
||||
}
|
||||
|
||||
/** Initialize one audio frame for reading from the input file */
|
||||
static int init_input_frame(AVFrame **frame)
|
||||
{
|
||||
if (!(*frame = av_frame_alloc())) {
|
||||
fprintf(stderr, "Could not allocate input frame\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize the audio resampler based on the input and output codec settings.
|
||||
* If the input and output sample formats differ, a conversion is required
|
||||
* libswresample takes care of this, but requires initialization.
|
||||
*/
|
||||
static int init_resampler(AVCodecContext *input_codec_context,
|
||||
AVCodecContext *output_codec_context,
|
||||
SwrContext **resample_context)
|
||||
{
|
||||
int error;
|
||||
|
||||
/**
|
||||
* Create a resampler context for the conversion.
|
||||
* Set the conversion parameters.
|
||||
* Default channel layouts based on the number of channels
|
||||
* are assumed for simplicity (they are sometimes not detected
|
||||
* properly by the demuxer and/or decoder).
|
||||
*/
|
||||
*resample_context = swr_alloc_set_opts(NULL,
|
||||
av_get_default_channel_layout(output_codec_context->channels),
|
||||
output_codec_context->sample_fmt,
|
||||
output_codec_context->sample_rate,
|
||||
av_get_default_channel_layout(input_codec_context->channels),
|
||||
input_codec_context->sample_fmt,
|
||||
input_codec_context->sample_rate,
|
||||
0, NULL);
|
||||
if (!*resample_context) {
|
||||
fprintf(stderr, "Could not allocate resample context\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
/**
|
||||
* Perform a sanity check so that the number of converted samples is
|
||||
* not greater than the number of samples to be converted.
|
||||
* If the sample rates differ, this case has to be handled differently
|
||||
*/
|
||||
av_assert0(output_codec_context->sample_rate == input_codec_context->sample_rate);
|
||||
|
||||
/** Open the resampler with the specified parameters. */
|
||||
if ((error = swr_init(*resample_context)) < 0) {
|
||||
fprintf(stderr, "Could not open resample context\n");
|
||||
swr_free(resample_context);
|
||||
return error;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** Initialize a FIFO buffer for the audio samples to be encoded. */
|
||||
static int init_fifo(AVAudioFifo **fifo)
|
||||
{
|
||||
/** Create the FIFO buffer based on the specified output sample format. */
|
||||
if (!(*fifo = av_audio_fifo_alloc(OUTPUT_SAMPLE_FORMAT, OUTPUT_CHANNELS, 1))) {
|
||||
fprintf(stderr, "Could not allocate FIFO\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** Write the header of the output file container. */
|
||||
static int write_output_file_header(AVFormatContext *output_format_context)
|
||||
{
|
||||
int error;
|
||||
if ((error = avformat_write_header(output_format_context, NULL)) < 0) {
|
||||
fprintf(stderr, "Could not write output file header (error '%s')\n",
|
||||
get_error_text(error));
|
||||
return error;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** Decode one audio frame from the input file. */
|
||||
static int decode_audio_frame(AVFrame *frame,
|
||||
AVFormatContext *input_format_context,
|
||||
AVCodecContext *input_codec_context,
|
||||
int *data_present, int *finished)
|
||||
{
|
||||
/** Packet used for temporary storage. */
|
||||
AVPacket input_packet;
|
||||
int error;
|
||||
init_packet(&input_packet);
|
||||
|
||||
/** Read one audio frame from the input file into a temporary packet. */
|
||||
if ((error = av_read_frame(input_format_context, &input_packet)) < 0) {
|
||||
/** If we are at the end of the file, flush the decoder below. */
|
||||
if (error == AVERROR_EOF)
|
||||
*finished = 1;
|
||||
else {
|
||||
fprintf(stderr, "Could not read frame (error '%s')\n",
|
||||
get_error_text(error));
|
||||
return error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Decode the audio frame stored in the temporary packet.
|
||||
* The input audio stream decoder is used to do this.
|
||||
* If we are at the end of the file, pass an empty packet to the decoder
|
||||
* to flush it.
|
||||
*/
|
||||
if ((error = avcodec_decode_audio4(input_codec_context, frame,
|
||||
data_present, &input_packet)) < 0) {
|
||||
fprintf(stderr, "Could not decode frame (error '%s')\n",
|
||||
get_error_text(error));
|
||||
av_free_packet(&input_packet);
|
||||
return error;
|
||||
}
|
||||
|
||||
/**
|
||||
* If the decoder has not been flushed completely, we are not finished,
|
||||
* so that this function has to be called again.
|
||||
*/
|
||||
if (*finished && *data_present)
|
||||
*finished = 0;
|
||||
av_free_packet(&input_packet);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize a temporary storage for the specified number of audio samples.
|
||||
* The conversion requires temporary storage due to the different format.
|
||||
* The number of audio samples to be allocated is specified in frame_size.
|
||||
*/
|
||||
static int init_converted_samples(uint8_t ***converted_input_samples,
|
||||
AVCodecContext *output_codec_context,
|
||||
int frame_size)
|
||||
{
|
||||
int error;
|
||||
|
||||
/**
|
||||
* Allocate as many pointers as there are audio channels.
|
||||
* Each pointer will later point to the audio samples of the corresponding
|
||||
* channels (although it may be NULL for interleaved formats).
|
||||
*/
|
||||
if (!(*converted_input_samples = calloc(output_codec_context->channels,
|
||||
sizeof(**converted_input_samples)))) {
|
||||
fprintf(stderr, "Could not allocate converted input sample pointers\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
/**
|
||||
* Allocate memory for the samples of all channels in one consecutive
|
||||
* block for convenience.
|
||||
*/
|
||||
if ((error = av_samples_alloc(*converted_input_samples, NULL,
|
||||
output_codec_context->channels,
|
||||
frame_size,
|
||||
output_codec_context->sample_fmt, 0)) < 0) {
|
||||
fprintf(stderr,
|
||||
"Could not allocate converted input samples (error '%s')\n",
|
||||
get_error_text(error));
|
||||
av_freep(&(*converted_input_samples)[0]);
|
||||
free(*converted_input_samples);
|
||||
return error;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert the input audio samples into the output sample format.
|
||||
* The conversion happens on a per-frame basis, the size of which is specified
|
||||
* by frame_size.
|
||||
*/
|
||||
static int convert_samples(const uint8_t **input_data,
|
||||
uint8_t **converted_data, const int frame_size,
|
||||
SwrContext *resample_context)
|
||||
{
|
||||
int error;
|
||||
|
||||
/** Convert the samples using the resampler. */
|
||||
if ((error = swr_convert(resample_context,
|
||||
converted_data, frame_size,
|
||||
input_data , frame_size)) < 0) {
|
||||
fprintf(stderr, "Could not convert input samples (error '%s')\n",
|
||||
get_error_text(error));
|
||||
return error;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** Add converted input audio samples to the FIFO buffer for later processing. */
|
||||
static int add_samples_to_fifo(AVAudioFifo *fifo,
|
||||
uint8_t **converted_input_samples,
|
||||
const int frame_size)
|
||||
{
|
||||
int error;
|
||||
|
||||
/**
|
||||
* Make the FIFO as large as it needs to be to hold both,
|
||||
* the old and the new samples.
|
||||
*/
|
||||
if ((error = av_audio_fifo_realloc(fifo, av_audio_fifo_size(fifo) + frame_size)) < 0) {
|
||||
fprintf(stderr, "Could not reallocate FIFO\n");
|
||||
return error;
|
||||
}
|
||||
|
||||
/** Store the new samples in the FIFO buffer. */
|
||||
if (av_audio_fifo_write(fifo, (void **)converted_input_samples,
|
||||
frame_size) < frame_size) {
|
||||
fprintf(stderr, "Could not write data to FIFO\n");
|
||||
return AVERROR_EXIT;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Read one audio frame from the input file, decodes, converts and stores
|
||||
* it in the FIFO buffer.
|
||||
*/
|
||||
static int read_decode_convert_and_store(AVAudioFifo *fifo,
|
||||
AVFormatContext *input_format_context,
|
||||
AVCodecContext *input_codec_context,
|
||||
AVCodecContext *output_codec_context,
|
||||
SwrContext *resampler_context,
|
||||
int *finished)
|
||||
{
|
||||
/** Temporary storage of the input samples of the frame read from the file. */
|
||||
AVFrame *input_frame = NULL;
|
||||
/** Temporary storage for the converted input samples. */
|
||||
uint8_t **converted_input_samples = NULL;
|
||||
int data_present;
|
||||
int ret = AVERROR_EXIT;
|
||||
|
||||
/** Initialize temporary storage for one input frame. */
|
||||
if (init_input_frame(&input_frame))
|
||||
goto cleanup;
|
||||
/** Decode one frame worth of audio samples. */
|
||||
if (decode_audio_frame(input_frame, input_format_context,
|
||||
input_codec_context, &data_present, finished))
|
||||
goto cleanup;
|
||||
/**
|
||||
* If we are at the end of the file and there are no more samples
|
||||
* in the decoder which are delayed, we are actually finished.
|
||||
* This must not be treated as an error.
|
||||
*/
|
||||
if (*finished && !data_present) {
|
||||
ret = 0;
|
||||
goto cleanup;
|
||||
}
|
||||
/** If there is decoded data, convert and store it */
|
||||
if (data_present) {
|
||||
/** Initialize the temporary storage for the converted input samples. */
|
||||
if (init_converted_samples(&converted_input_samples, output_codec_context,
|
||||
input_frame->nb_samples))
|
||||
goto cleanup;
|
||||
|
||||
/**
|
||||
* Convert the input samples to the desired output sample format.
|
||||
* This requires a temporary storage provided by converted_input_samples.
|
||||
*/
|
||||
if (convert_samples((const uint8_t**)input_frame->extended_data, converted_input_samples,
|
||||
input_frame->nb_samples, resampler_context))
|
||||
goto cleanup;
|
||||
|
||||
/** Add the converted input samples to the FIFO buffer for later processing. */
|
||||
if (add_samples_to_fifo(fifo, converted_input_samples,
|
||||
input_frame->nb_samples))
|
||||
goto cleanup;
|
||||
ret = 0;
|
||||
}
|
||||
ret = 0;
|
||||
|
||||
cleanup:
|
||||
if (converted_input_samples) {
|
||||
av_freep(&converted_input_samples[0]);
|
||||
free(converted_input_samples);
|
||||
}
|
||||
av_frame_free(&input_frame);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize one input frame for writing to the output file.
|
||||
* The frame will be exactly frame_size samples large.
|
||||
*/
|
||||
static int init_output_frame(AVFrame **frame,
|
||||
AVCodecContext *output_codec_context,
|
||||
int frame_size)
|
||||
{
|
||||
int error;
|
||||
|
||||
/** Create a new frame to store the audio samples. */
|
||||
if (!(*frame = av_frame_alloc())) {
|
||||
fprintf(stderr, "Could not allocate output frame\n");
|
||||
return AVERROR_EXIT;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the frame's parameters, especially its size and format.
|
||||
* av_frame_get_buffer needs this to allocate memory for the
|
||||
* audio samples of the frame.
|
||||
* Default channel layouts based on the number of channels
|
||||
* are assumed for simplicity.
|
||||
*/
|
||||
(*frame)->nb_samples = frame_size;
|
||||
(*frame)->channel_layout = output_codec_context->channel_layout;
|
||||
(*frame)->format = output_codec_context->sample_fmt;
|
||||
(*frame)->sample_rate = output_codec_context->sample_rate;
|
||||
|
||||
/**
|
||||
* Allocate the samples of the created frame. This call will make
|
||||
* sure that the audio frame can hold as many samples as specified.
|
||||
*/
|
||||
if ((error = av_frame_get_buffer(*frame, 0)) < 0) {
|
||||
fprintf(stderr, "Could allocate output frame samples (error '%s')\n",
|
||||
get_error_text(error));
|
||||
av_frame_free(frame);
|
||||
return error;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** Encode one frame worth of audio to the output file. */
|
||||
static int encode_audio_frame(AVFrame *frame,
|
||||
AVFormatContext *output_format_context,
|
||||
AVCodecContext *output_codec_context,
|
||||
int *data_present)
|
||||
{
|
||||
/** Packet used for temporary storage. */
|
||||
AVPacket output_packet;
|
||||
int error;
|
||||
init_packet(&output_packet);
|
||||
|
||||
/**
|
||||
* Encode the audio frame and store it in the temporary packet.
|
||||
* The output audio stream encoder is used to do this.
|
||||
*/
|
||||
if ((error = avcodec_encode_audio2(output_codec_context, &output_packet,
|
||||
frame, data_present)) < 0) {
|
||||
fprintf(stderr, "Could not encode frame (error '%s')\n",
|
||||
get_error_text(error));
|
||||
av_free_packet(&output_packet);
|
||||
return error;
|
||||
}
|
||||
|
||||
/** Write one audio frame from the temporary packet to the output file. */
|
||||
if (*data_present) {
|
||||
if ((error = av_write_frame(output_format_context, &output_packet)) < 0) {
|
||||
fprintf(stderr, "Could not write frame (error '%s')\n",
|
||||
get_error_text(error));
|
||||
av_free_packet(&output_packet);
|
||||
return error;
|
||||
}
|
||||
|
||||
av_free_packet(&output_packet);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Load one audio frame from the FIFO buffer, encode and write it to the
|
||||
* output file.
|
||||
*/
|
||||
static int load_encode_and_write(AVAudioFifo *fifo,
|
||||
AVFormatContext *output_format_context,
|
||||
AVCodecContext *output_codec_context)
|
||||
{
|
||||
/** Temporary storage of the output samples of the frame written to the file. */
|
||||
AVFrame *output_frame;
|
||||
/**
|
||||
* Use the maximum number of possible samples per frame.
|
||||
* If there is less than the maximum possible frame size in the FIFO
|
||||
* buffer use this number. Otherwise, use the maximum possible frame size
|
||||
*/
|
||||
const int frame_size = FFMIN(av_audio_fifo_size(fifo),
|
||||
output_codec_context->frame_size);
|
||||
int data_written;
|
||||
|
||||
/** Initialize temporary storage for one output frame. */
|
||||
if (init_output_frame(&output_frame, output_codec_context, frame_size))
|
||||
return AVERROR_EXIT;
|
||||
|
||||
/**
|
||||
* Read as many samples from the FIFO buffer as required to fill the frame.
|
||||
* The samples are stored in the frame temporarily.
|
||||
*/
|
||||
if (av_audio_fifo_read(fifo, (void **)output_frame->data, frame_size) < frame_size) {
|
||||
fprintf(stderr, "Could not read data from FIFO\n");
|
||||
av_frame_free(&output_frame);
|
||||
return AVERROR_EXIT;
|
||||
}
|
||||
|
||||
/** Encode one frame worth of audio samples. */
|
||||
if (encode_audio_frame(output_frame, output_format_context,
|
||||
output_codec_context, &data_written)) {
|
||||
av_frame_free(&output_frame);
|
||||
return AVERROR_EXIT;
|
||||
}
|
||||
av_frame_free(&output_frame);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** Write the trailer of the output file container. */
|
||||
static int write_output_file_trailer(AVFormatContext *output_format_context)
|
||||
{
|
||||
int error;
|
||||
if ((error = av_write_trailer(output_format_context)) < 0) {
|
||||
fprintf(stderr, "Could not write output file trailer (error '%s')\n",
|
||||
get_error_text(error));
|
||||
return error;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** Convert an audio file to an AAC file in an MP4 container. */
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
AVFormatContext *input_format_context = NULL, *output_format_context = NULL;
|
||||
AVCodecContext *input_codec_context = NULL, *output_codec_context = NULL;
|
||||
SwrContext *resample_context = NULL;
|
||||
AVAudioFifo *fifo = NULL;
|
||||
int ret = AVERROR_EXIT;
|
||||
|
||||
if (argc < 3) {
|
||||
fprintf(stderr, "Usage: %s <input file> <output file>\n", argv[0]);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/** Register all codecs and formats so that they can be used. */
|
||||
av_register_all();
|
||||
/** Open the input file for reading. */
|
||||
if (open_input_file(argv[1], &input_format_context,
|
||||
&input_codec_context))
|
||||
goto cleanup;
|
||||
/** Open the output file for writing. */
|
||||
if (open_output_file(argv[2], input_codec_context,
|
||||
&output_format_context, &output_codec_context))
|
||||
goto cleanup;
|
||||
/** Initialize the resampler to be able to convert audio sample formats. */
|
||||
if (init_resampler(input_codec_context, output_codec_context,
|
||||
&resample_context))
|
||||
goto cleanup;
|
||||
/** Initialize the FIFO buffer to store audio samples to be encoded. */
|
||||
if (init_fifo(&fifo))
|
||||
goto cleanup;
|
||||
/** Write the header of the output file container. */
|
||||
if (write_output_file_header(output_format_context))
|
||||
goto cleanup;
|
||||
|
||||
/**
|
||||
* Loop as long as we have input samples to read or output samples
|
||||
* to write; abort as soon as we have neither.
|
||||
*/
|
||||
while (1) {
|
||||
/** Use the encoder's desired frame size for processing. */
|
||||
const int output_frame_size = output_codec_context->frame_size;
|
||||
int finished = 0;
|
||||
|
||||
/**
|
||||
* Make sure that there is one frame worth of samples in the FIFO
|
||||
* buffer so that the encoder can do its work.
|
||||
* Since the decoder's and the encoder's frame size may differ, we
|
||||
* need to FIFO buffer to store as many frames worth of input samples
|
||||
* that they make up at least one frame worth of output samples.
|
||||
*/
|
||||
while (av_audio_fifo_size(fifo) < output_frame_size) {
|
||||
/**
|
||||
* Decode one frame worth of audio samples, convert it to the
|
||||
* output sample format and put it into the FIFO buffer.
|
||||
*/
|
||||
if (read_decode_convert_and_store(fifo, input_format_context,
|
||||
input_codec_context,
|
||||
output_codec_context,
|
||||
resample_context, &finished))
|
||||
goto cleanup;
|
||||
|
||||
/**
|
||||
* If we are at the end of the input file, we continue
|
||||
* encoding the remaining audio samples to the output file.
|
||||
*/
|
||||
if (finished)
|
||||
break;
|
||||
}
|
||||
|
||||
/**
|
||||
* If we have enough samples for the encoder, we encode them.
|
||||
* At the end of the file, we pass the remaining samples to
|
||||
* the encoder.
|
||||
*/
|
||||
while (av_audio_fifo_size(fifo) >= output_frame_size ||
|
||||
(finished && av_audio_fifo_size(fifo) > 0))
|
||||
/**
|
||||
* Take one frame worth of audio samples from the FIFO buffer,
|
||||
* encode it and write it to the output file.
|
||||
*/
|
||||
if (load_encode_and_write(fifo, output_format_context,
|
||||
output_codec_context))
|
||||
goto cleanup;
|
||||
|
||||
/**
|
||||
* If we are at the end of the input file and have encoded
|
||||
* all remaining samples, we can exit this loop and finish.
|
||||
*/
|
||||
if (finished) {
|
||||
int data_written;
|
||||
/** Flush the encoder as it may have delayed frames. */
|
||||
do {
|
||||
if (encode_audio_frame(NULL, output_format_context,
|
||||
output_codec_context, &data_written))
|
||||
goto cleanup;
|
||||
} while (data_written);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/** Write the trailer of the output file container. */
|
||||
if (write_output_file_trailer(output_format_context))
|
||||
goto cleanup;
|
||||
ret = 0;
|
||||
|
||||
cleanup:
|
||||
if (fifo)
|
||||
av_audio_fifo_free(fifo);
|
||||
swr_free(&resample_context);
|
||||
if (output_codec_context)
|
||||
avcodec_close(output_codec_context);
|
||||
if (output_format_context) {
|
||||
avio_closep(&output_format_context->pb);
|
||||
avformat_free_context(output_format_context);
|
||||
}
|
||||
if (input_codec_context)
|
||||
avcodec_close(input_codec_context);
|
||||
if (input_format_context)
|
||||
avformat_close_input(&input_format_context);
|
||||
|
||||
return ret;
|
||||
}
|
583
Externals/ffmpeg/shared/doc/examples/transcoding.c
vendored
583
Externals/ffmpeg/shared/doc/examples/transcoding.c
vendored
@ -1,583 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2010 Nicolas George
|
||||
* Copyright (c) 2011 Stefano Sabatini
|
||||
* Copyright (c) 2014 Andrey Utkin
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* API example for demuxing, decoding, filtering, encoding and muxing
|
||||
* @example transcoding.c
|
||||
*/
|
||||
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavfilter/avfiltergraph.h>
|
||||
#include <libavfilter/avcodec.h>
|
||||
#include <libavfilter/buffersink.h>
|
||||
#include <libavfilter/buffersrc.h>
|
||||
#include <libavutil/opt.h>
|
||||
#include <libavutil/pixdesc.h>
|
||||
|
||||
static AVFormatContext *ifmt_ctx;
|
||||
static AVFormatContext *ofmt_ctx;
|
||||
typedef struct FilteringContext {
|
||||
AVFilterContext *buffersink_ctx;
|
||||
AVFilterContext *buffersrc_ctx;
|
||||
AVFilterGraph *filter_graph;
|
||||
} FilteringContext;
|
||||
static FilteringContext *filter_ctx;
|
||||
|
||||
static int open_input_file(const char *filename)
|
||||
{
|
||||
int ret;
|
||||
unsigned int i;
|
||||
|
||||
ifmt_ctx = NULL;
|
||||
if ((ret = avformat_open_input(&ifmt_ctx, filename, NULL, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
if ((ret = avformat_find_stream_info(ifmt_ctx, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
|
||||
AVStream *stream;
|
||||
AVCodecContext *codec_ctx;
|
||||
stream = ifmt_ctx->streams[i];
|
||||
codec_ctx = stream->codec;
|
||||
/* Reencode video & audio and remux subtitles etc. */
|
||||
if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO
|
||||
|| codec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
|
||||
/* Open decoder */
|
||||
ret = avcodec_open2(codec_ctx,
|
||||
avcodec_find_decoder(codec_ctx->codec_id), NULL);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Failed to open decoder for stream #%u\n", i);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
av_dump_format(ifmt_ctx, 0, filename, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int open_output_file(const char *filename)
|
||||
{
|
||||
AVStream *out_stream;
|
||||
AVStream *in_stream;
|
||||
AVCodecContext *dec_ctx, *enc_ctx;
|
||||
AVCodec *encoder;
|
||||
int ret;
|
||||
unsigned int i;
|
||||
|
||||
ofmt_ctx = NULL;
|
||||
avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, filename);
|
||||
if (!ofmt_ctx) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Could not create output context\n");
|
||||
return AVERROR_UNKNOWN;
|
||||
}
|
||||
|
||||
|
||||
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
|
||||
out_stream = avformat_new_stream(ofmt_ctx, NULL);
|
||||
if (!out_stream) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Failed allocating output stream\n");
|
||||
return AVERROR_UNKNOWN;
|
||||
}
|
||||
|
||||
in_stream = ifmt_ctx->streams[i];
|
||||
dec_ctx = in_stream->codec;
|
||||
enc_ctx = out_stream->codec;
|
||||
|
||||
if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO
|
||||
|| dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
|
||||
/* in this example, we choose transcoding to same codec */
|
||||
encoder = avcodec_find_encoder(dec_ctx->codec_id);
|
||||
if (!encoder) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Neccessary encoder not found\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
/* In this example, we transcode to same properties (picture size,
|
||||
* sample rate etc.). These properties can be changed for output
|
||||
* streams easily using filters */
|
||||
if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
|
||||
enc_ctx->height = dec_ctx->height;
|
||||
enc_ctx->width = dec_ctx->width;
|
||||
enc_ctx->sample_aspect_ratio = dec_ctx->sample_aspect_ratio;
|
||||
/* take first format from list of supported formats */
|
||||
enc_ctx->pix_fmt = encoder->pix_fmts[0];
|
||||
/* video time_base can be set to whatever is handy and supported by encoder */
|
||||
enc_ctx->time_base = dec_ctx->time_base;
|
||||
} else {
|
||||
enc_ctx->sample_rate = dec_ctx->sample_rate;
|
||||
enc_ctx->channel_layout = dec_ctx->channel_layout;
|
||||
enc_ctx->channels = av_get_channel_layout_nb_channels(enc_ctx->channel_layout);
|
||||
/* take first format from list of supported formats */
|
||||
enc_ctx->sample_fmt = encoder->sample_fmts[0];
|
||||
enc_ctx->time_base = (AVRational){1, enc_ctx->sample_rate};
|
||||
}
|
||||
|
||||
/* Third parameter can be used to pass settings to encoder */
|
||||
ret = avcodec_open2(enc_ctx, encoder, NULL);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot open video encoder for stream #%u\n", i);
|
||||
return ret;
|
||||
}
|
||||
} else if (dec_ctx->codec_type == AVMEDIA_TYPE_UNKNOWN) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Elementary stream #%d is of unknown type, cannot proceed\n", i);
|
||||
return AVERROR_INVALIDDATA;
|
||||
} else {
|
||||
/* if this stream must be remuxed */
|
||||
ret = avcodec_copy_context(ofmt_ctx->streams[i]->codec,
|
||||
ifmt_ctx->streams[i]->codec);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Copying stream context failed\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
|
||||
enc_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;
|
||||
|
||||
}
|
||||
av_dump_format(ofmt_ctx, 0, filename, 1);
|
||||
|
||||
if (!(ofmt_ctx->oformat->flags & AVFMT_NOFILE)) {
|
||||
ret = avio_open(&ofmt_ctx->pb, filename, AVIO_FLAG_WRITE);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Could not open output file '%s'", filename);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
/* init muxer, write output file header */
|
||||
ret = avformat_write_header(ofmt_ctx, NULL);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error occurred when opening output file\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int init_filter(FilteringContext* fctx, AVCodecContext *dec_ctx,
|
||||
AVCodecContext *enc_ctx, const char *filter_spec)
|
||||
{
|
||||
char args[512];
|
||||
int ret = 0;
|
||||
AVFilter *buffersrc = NULL;
|
||||
AVFilter *buffersink = NULL;
|
||||
AVFilterContext *buffersrc_ctx = NULL;
|
||||
AVFilterContext *buffersink_ctx = NULL;
|
||||
AVFilterInOut *outputs = avfilter_inout_alloc();
|
||||
AVFilterInOut *inputs = avfilter_inout_alloc();
|
||||
AVFilterGraph *filter_graph = avfilter_graph_alloc();
|
||||
|
||||
if (!outputs || !inputs || !filter_graph) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
|
||||
buffersrc = avfilter_get_by_name("buffer");
|
||||
buffersink = avfilter_get_by_name("buffersink");
|
||||
if (!buffersrc || !buffersink) {
|
||||
av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");
|
||||
ret = AVERROR_UNKNOWN;
|
||||
goto end;
|
||||
}
|
||||
|
||||
snprintf(args, sizeof(args),
|
||||
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
|
||||
dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
|
||||
dec_ctx->time_base.num, dec_ctx->time_base.den,
|
||||
dec_ctx->sample_aspect_ratio.num,
|
||||
dec_ctx->sample_aspect_ratio.den);
|
||||
|
||||
ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
|
||||
args, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
|
||||
NULL, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = av_opt_set_bin(buffersink_ctx, "pix_fmts",
|
||||
(uint8_t*)&enc_ctx->pix_fmt, sizeof(enc_ctx->pix_fmt),
|
||||
AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
|
||||
goto end;
|
||||
}
|
||||
} else if (dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
|
||||
buffersrc = avfilter_get_by_name("abuffer");
|
||||
buffersink = avfilter_get_by_name("abuffersink");
|
||||
if (!buffersrc || !buffersink) {
|
||||
av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");
|
||||
ret = AVERROR_UNKNOWN;
|
||||
goto end;
|
||||
}
|
||||
|
||||
if (!dec_ctx->channel_layout)
|
||||
dec_ctx->channel_layout =
|
||||
av_get_default_channel_layout(dec_ctx->channels);
|
||||
snprintf(args, sizeof(args),
|
||||
"time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%"PRIx64,
|
||||
dec_ctx->time_base.num, dec_ctx->time_base.den, dec_ctx->sample_rate,
|
||||
av_get_sample_fmt_name(dec_ctx->sample_fmt),
|
||||
dec_ctx->channel_layout);
|
||||
ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
|
||||
args, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer source\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
|
||||
NULL, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = av_opt_set_bin(buffersink_ctx, "sample_fmts",
|
||||
(uint8_t*)&enc_ctx->sample_fmt, sizeof(enc_ctx->sample_fmt),
|
||||
AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output sample format\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = av_opt_set_bin(buffersink_ctx, "channel_layouts",
|
||||
(uint8_t*)&enc_ctx->channel_layout,
|
||||
sizeof(enc_ctx->channel_layout), AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = av_opt_set_bin(buffersink_ctx, "sample_rates",
|
||||
(uint8_t*)&enc_ctx->sample_rate, sizeof(enc_ctx->sample_rate),
|
||||
AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output sample rate\n");
|
||||
goto end;
|
||||
}
|
||||
} else {
|
||||
ret = AVERROR_UNKNOWN;
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* Endpoints for the filter graph. */
|
||||
outputs->name = av_strdup("in");
|
||||
outputs->filter_ctx = buffersrc_ctx;
|
||||
outputs->pad_idx = 0;
|
||||
outputs->next = NULL;
|
||||
|
||||
inputs->name = av_strdup("out");
|
||||
inputs->filter_ctx = buffersink_ctx;
|
||||
inputs->pad_idx = 0;
|
||||
inputs->next = NULL;
|
||||
|
||||
if (!outputs->name || !inputs->name) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
if ((ret = avfilter_graph_parse_ptr(filter_graph, filter_spec,
|
||||
&inputs, &outputs, NULL)) < 0)
|
||||
goto end;
|
||||
|
||||
if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
|
||||
goto end;
|
||||
|
||||
/* Fill FilteringContext */
|
||||
fctx->buffersrc_ctx = buffersrc_ctx;
|
||||
fctx->buffersink_ctx = buffersink_ctx;
|
||||
fctx->filter_graph = filter_graph;
|
||||
|
||||
end:
|
||||
avfilter_inout_free(&inputs);
|
||||
avfilter_inout_free(&outputs);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int init_filters(void)
|
||||
{
|
||||
const char *filter_spec;
|
||||
unsigned int i;
|
||||
int ret;
|
||||
filter_ctx = av_malloc_array(ifmt_ctx->nb_streams, sizeof(*filter_ctx));
|
||||
if (!filter_ctx)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
|
||||
filter_ctx[i].buffersrc_ctx = NULL;
|
||||
filter_ctx[i].buffersink_ctx = NULL;
|
||||
filter_ctx[i].filter_graph = NULL;
|
||||
if (!(ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO
|
||||
|| ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO))
|
||||
continue;
|
||||
|
||||
|
||||
if (ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
|
||||
filter_spec = "null"; /* passthrough (dummy) filter for video */
|
||||
else
|
||||
filter_spec = "anull"; /* passthrough (dummy) filter for audio */
|
||||
ret = init_filter(&filter_ctx[i], ifmt_ctx->streams[i]->codec,
|
||||
ofmt_ctx->streams[i]->codec, filter_spec);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int encode_write_frame(AVFrame *filt_frame, unsigned int stream_index, int *got_frame) {
|
||||
int ret;
|
||||
int got_frame_local;
|
||||
AVPacket enc_pkt;
|
||||
int (*enc_func)(AVCodecContext *, AVPacket *, const AVFrame *, int *) =
|
||||
(ifmt_ctx->streams[stream_index]->codec->codec_type ==
|
||||
AVMEDIA_TYPE_VIDEO) ? avcodec_encode_video2 : avcodec_encode_audio2;
|
||||
|
||||
if (!got_frame)
|
||||
got_frame = &got_frame_local;
|
||||
|
||||
av_log(NULL, AV_LOG_INFO, "Encoding frame\n");
|
||||
/* encode filtered frame */
|
||||
enc_pkt.data = NULL;
|
||||
enc_pkt.size = 0;
|
||||
av_init_packet(&enc_pkt);
|
||||
ret = enc_func(ofmt_ctx->streams[stream_index]->codec, &enc_pkt,
|
||||
filt_frame, got_frame);
|
||||
av_frame_free(&filt_frame);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (!(*got_frame))
|
||||
return 0;
|
||||
|
||||
/* prepare packet for muxing */
|
||||
enc_pkt.stream_index = stream_index;
|
||||
av_packet_rescale_ts(&enc_pkt,
|
||||
ofmt_ctx->streams[stream_index]->codec->time_base,
|
||||
ofmt_ctx->streams[stream_index]->time_base);
|
||||
|
||||
av_log(NULL, AV_LOG_DEBUG, "Muxing frame\n");
|
||||
/* mux encoded frame */
|
||||
ret = av_interleaved_write_frame(ofmt_ctx, &enc_pkt);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int filter_encode_write_frame(AVFrame *frame, unsigned int stream_index)
|
||||
{
|
||||
int ret;
|
||||
AVFrame *filt_frame;
|
||||
|
||||
av_log(NULL, AV_LOG_INFO, "Pushing decoded frame to filters\n");
|
||||
/* push the decoded frame into the filtergraph */
|
||||
ret = av_buffersrc_add_frame_flags(filter_ctx[stream_index].buffersrc_ctx,
|
||||
frame, 0);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* pull filtered frames from the filtergraph */
|
||||
while (1) {
|
||||
filt_frame = av_frame_alloc();
|
||||
if (!filt_frame) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
break;
|
||||
}
|
||||
av_log(NULL, AV_LOG_INFO, "Pulling filtered frame from filters\n");
|
||||
ret = av_buffersink_get_frame(filter_ctx[stream_index].buffersink_ctx,
|
||||
filt_frame);
|
||||
if (ret < 0) {
|
||||
/* if no more frames for output - returns AVERROR(EAGAIN)
|
||||
* if flushed and no more frames for output - returns AVERROR_EOF
|
||||
* rewrite retcode to 0 to show it as normal procedure completion
|
||||
*/
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
|
||||
ret = 0;
|
||||
av_frame_free(&filt_frame);
|
||||
break;
|
||||
}
|
||||
|
||||
filt_frame->pict_type = AV_PICTURE_TYPE_NONE;
|
||||
ret = encode_write_frame(filt_frame, stream_index, NULL);
|
||||
if (ret < 0)
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int flush_encoder(unsigned int stream_index)
|
||||
{
|
||||
int ret;
|
||||
int got_frame;
|
||||
|
||||
if (!(ofmt_ctx->streams[stream_index]->codec->codec->capabilities &
|
||||
CODEC_CAP_DELAY))
|
||||
return 0;
|
||||
|
||||
while (1) {
|
||||
av_log(NULL, AV_LOG_INFO, "Flushing stream #%u encoder\n", stream_index);
|
||||
ret = encode_write_frame(NULL, stream_index, &got_frame);
|
||||
if (ret < 0)
|
||||
break;
|
||||
if (!got_frame)
|
||||
return 0;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int ret;
|
||||
AVPacket packet = { .data = NULL, .size = 0 };
|
||||
AVFrame *frame = NULL;
|
||||
enum AVMediaType type;
|
||||
unsigned int stream_index;
|
||||
unsigned int i;
|
||||
int got_frame;
|
||||
int (*dec_func)(AVCodecContext *, AVFrame *, int *, const AVPacket *);
|
||||
|
||||
if (argc != 3) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Usage: %s <input file> <output file>\n", argv[0]);
|
||||
return 1;
|
||||
}
|
||||
|
||||
av_register_all();
|
||||
avfilter_register_all();
|
||||
|
||||
if ((ret = open_input_file(argv[1])) < 0)
|
||||
goto end;
|
||||
if ((ret = open_output_file(argv[2])) < 0)
|
||||
goto end;
|
||||
if ((ret = init_filters()) < 0)
|
||||
goto end;
|
||||
|
||||
/* read all packets */
|
||||
while (1) {
|
||||
if ((ret = av_read_frame(ifmt_ctx, &packet)) < 0)
|
||||
break;
|
||||
stream_index = packet.stream_index;
|
||||
type = ifmt_ctx->streams[packet.stream_index]->codec->codec_type;
|
||||
av_log(NULL, AV_LOG_DEBUG, "Demuxer gave frame of stream_index %u\n",
|
||||
stream_index);
|
||||
|
||||
if (filter_ctx[stream_index].filter_graph) {
|
||||
av_log(NULL, AV_LOG_DEBUG, "Going to reencode&filter the frame\n");
|
||||
frame = av_frame_alloc();
|
||||
if (!frame) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
break;
|
||||
}
|
||||
av_packet_rescale_ts(&packet,
|
||||
ifmt_ctx->streams[stream_index]->time_base,
|
||||
ifmt_ctx->streams[stream_index]->codec->time_base);
|
||||
dec_func = (type == AVMEDIA_TYPE_VIDEO) ? avcodec_decode_video2 :
|
||||
avcodec_decode_audio4;
|
||||
ret = dec_func(ifmt_ctx->streams[stream_index]->codec, frame,
|
||||
&got_frame, &packet);
|
||||
if (ret < 0) {
|
||||
av_frame_free(&frame);
|
||||
av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");
|
||||
break;
|
||||
}
|
||||
|
||||
if (got_frame) {
|
||||
frame->pts = av_frame_get_best_effort_timestamp(frame);
|
||||
ret = filter_encode_write_frame(frame, stream_index);
|
||||
av_frame_free(&frame);
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
} else {
|
||||
av_frame_free(&frame);
|
||||
}
|
||||
} else {
|
||||
/* remux this frame without reencoding */
|
||||
av_packet_rescale_ts(&packet,
|
||||
ifmt_ctx->streams[stream_index]->time_base,
|
||||
ofmt_ctx->streams[stream_index]->time_base);
|
||||
|
||||
ret = av_interleaved_write_frame(ofmt_ctx, &packet);
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
}
|
||||
av_free_packet(&packet);
|
||||
}
|
||||
|
||||
/* flush filters and encoders */
|
||||
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
|
||||
/* flush filter */
|
||||
if (!filter_ctx[i].filter_graph)
|
||||
continue;
|
||||
ret = filter_encode_write_frame(NULL, i);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Flushing filter failed\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* flush encoder */
|
||||
ret = flush_encoder(i);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Flushing encoder failed\n");
|
||||
goto end;
|
||||
}
|
||||
}
|
||||
|
||||
av_write_trailer(ofmt_ctx);
|
||||
end:
|
||||
av_free_packet(&packet);
|
||||
av_frame_free(&frame);
|
||||
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
|
||||
avcodec_close(ifmt_ctx->streams[i]->codec);
|
||||
if (ofmt_ctx && ofmt_ctx->nb_streams > i && ofmt_ctx->streams[i] && ofmt_ctx->streams[i]->codec)
|
||||
avcodec_close(ofmt_ctx->streams[i]->codec);
|
||||
if (filter_ctx && filter_ctx[i].filter_graph)
|
||||
avfilter_graph_free(&filter_ctx[i].filter_graph);
|
||||
}
|
||||
av_free(filter_ctx);
|
||||
avformat_close_input(&ifmt_ctx);
|
||||
if (ofmt_ctx && !(ofmt_ctx->oformat->flags & AVFMT_NOFILE))
|
||||
avio_closep(&ofmt_ctx->pb);
|
||||
avformat_free_context(ofmt_ctx);
|
||||
|
||||
if (ret < 0)
|
||||
av_log(NULL, AV_LOG_ERROR, "Error occurred: %s\n", av_err2str(ret));
|
||||
|
||||
return ret ? 1 : 0;
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user