From: Good Guy Date: Tue, 17 Mar 2020 01:47:13 +0000 (-0600) Subject: Olaf fixes + Andrea images X-Git-Tag: 2021-05~109 X-Git-Url: https://cinelerra-gg.org/git/?a=commitdiff_plain;h=880f5be6425028cf9a0c07143f62cce0a82ea6bd;p=goodguy%2Fcin-manual-latex.git Olaf fixes + Andrea images --- diff --git a/CinelerraGG_Manual.tex b/CinelerraGG_Manual.tex index 16bd6b9..8e856bf 100644 --- a/CinelerraGG_Manual.tex +++ b/CinelerraGG_Manual.tex @@ -8,7 +8,7 @@ svgnames, \input{common/packages.tex} % common packages \input{common/settings.tex} -%\includeonly{common/title,parts/}% Introduction,parts/Installation,parts/Windows +%\includeonly{common/title,parts/Installation}% ,parts/Introduction,parts/Windows \begin{document} diff --git a/common/title.tex b/common/title.tex index 956c5dd..29e25cb 100644 --- a/common/title.tex +++ b/common/title.tex @@ -1,5 +1,5 @@ -% Cover sheet with full logo, realized -% whith eso-pic as background image. +% Cover sheet with full logo, +% realized with eso-pic as background image. \thispagestyle{empty} \newcommand\CinBackgroundLogo{% \put(0,0){% @@ -25,9 +25,9 @@ \clearpage % Remove the logo \ClearShipoutPicture% -\thispagestyle{empty} % Title page definition +\thispagestyle{empty} %\providecommand{\HUGE}{\Huge}% if not using memoir \newlength{\drop}% for my convenience diff --git a/images/align.png b/images/align.png index 6371ff4..ef0830c 100644 Binary files a/images/align.png and b/images/align.png differ diff --git a/images/cut.png b/images/cut.png index 8c3125a..2c2d759 100644 Binary files a/images/cut.png and b/images/cut.png differ diff --git a/images/editing-img001.png b/images/editing-img001.png index 0659fde..d6b6136 100644 Binary files a/images/editing-img001.png and b/images/editing-img001.png differ diff --git a/images/inter-view02.png b/images/inter-view02.png index eac5b54..b0d302b 100644 Binary files a/images/inter-view02.png and b/images/inter-view02.png differ diff --git a/images/j-cut.png b/images/j-cut.png index a5e4bdd..1cb7b6a 100644 Binary files a/images/j-cut.png and b/images/j-cut.png differ diff --git a/images/lenght.png b/images/lenght.png index 4a9210d..96afbd2 100644 Binary files a/images/lenght.png and b/images/lenght.png differ diff --git a/images/nesting.png b/images/nesting.png index 14d5a69..39dc63c 100644 Binary files a/images/nesting.png and b/images/nesting.png differ diff --git a/images/overlay.png b/images/overlay.png index 7b637c7..4cb33c9 100644 Binary files a/images/overlay.png and b/images/overlay.png differ diff --git a/images/patchbay01.png b/images/patchbay01.png index 51b9db5..a28436e 100755 Binary files a/images/patchbay01.png and b/images/patchbay01.png differ diff --git a/images/raw.png b/images/raw.png index 0e51479..f7e39ed 100644 Binary files a/images/raw.png and b/images/raw.png differ diff --git a/images/reverse01.png b/images/reverse01.png index a80ec8d..a1b40e5 100644 Binary files a/images/reverse01.png and b/images/reverse01.png differ diff --git a/images/reverse02.png b/images/reverse02.png index b060344..a03d83b 100644 Binary files a/images/reverse02.png and b/images/reverse02.png differ diff --git a/images/shuffle.png b/images/shuffle.png index f453cfe..053d9e6 100644 Binary files a/images/shuffle.png and b/images/shuffle.png differ diff --git a/images/snap.png b/images/snap.png index 863c31b..e16ba34 100644 Binary files a/images/snap.png and b/images/snap.png differ diff --git a/images/stream.png b/images/stream.png index f57b975..3c82d07 100644 Binary files a/images/stream.png and b/images/stream.png differ diff --git a/images/timeline.png b/images/timeline.png index 6df6a03..274b247 100644 Binary files a/images/timeline.png and b/images/timeline.png differ diff --git a/images/trim-display.png b/images/trim-display.png index 78374a6..8756fbe 100644 Binary files a/images/trim-display.png and b/images/trim-display.png differ diff --git a/images/trim.png b/images/trim.png index 793a988..2d3ec28 100644 Binary files a/images/trim.png and b/images/trim.png differ diff --git a/parts/Attributes.tex b/parts/Attributes.tex index eeb6d0e..8709c5f 100644 --- a/parts/Attributes.tex +++ b/parts/Attributes.tex @@ -1,40 +1,53 @@ \chapter{Project and Media Attributes}% \label{cha:project_and_media_attributes} -When you play media files in \CGG{}, the media files have a certain number of tracks, frame size, sample size, and so on. -No matter what attributes the media file has, it is played back according to the project attributes. -So, if an audio file's sample rate is different than the project attributes, it is resampled. -Similarly, if a video file's frame size is different than the project attributes, the video is composited on a black frame, either cropped or bordered with black. - -The project attributes are adjusted in \texttt{file $\rightarrow$ Set Format} (figure~\ref{fig:set-format}) or can be created in \texttt{File $\rightarrow$ New}. -When you adjust project settings in \texttt{File $\rightarrow$ New}, a new empty timeline is created. -Every timeline created from this point on uses the same settings. -When you adjust settings in \texttt{Settings $\rightarrow$ Format}, media on the timeline is left unchanged. -But every timeline created from this point uses the same settings. - -\begin{figure}[htpb] - \centering - \includegraphics[width=0.6\linewidth]{set-format.png} - \caption{Set Format window - note the Audio Channel positions} - \label{fig:set-format} +When you play media files in \CGG{}, the media files have a certain +number of tracks, frame size, sample size, and so on. No matter +what attributes the media file has, it is played back according to +the project attributes. So, if an audio file's sample rate is +different than the project attributes, it is resampled. Similarly, +if a video file's frame size is different than the project +attributes, the video is composited on a black frame, either cropped +or bordered with black. + +The project attributes are adjusted in \texttt{file $\rightarrow$ +Set Format} (figure~\ref{fig:set-format}) or can be created in +\texttt{File $\rightarrow$ New}. When you adjust project settings +in \texttt{File $\rightarrow$ New}, a new empty timeline is created. +Every timeline created from this point on uses the same settings. +When you adjust settings in \texttt{Settings $\rightarrow$ Format}, +media on the timeline is left unchanged. But every timeline created +from this point uses the same settings. + +\begin{figure}[htpb]\centering +\includegraphics[width=0.6\linewidth]{set-format.png} + \caption{Set Format window - note the Audio Channel positions} + \label{fig:set-format} \end{figure} -In addition to the standard settings for sample rate, frame rate, and frame size, \CGG{} uses some less traditional settings like channel positions, color model, and aspect ratio. -The aspect ratio refers to the screen aspect ratio. - -Edit decision lists , the EDL stored in XML, save the project settings. -Formats which contain media but no edit decisions just add data to the tracks. -Keep in mind details such as if your project sample rate is 48\,kHz and you load a sound file with 96\,kHz, you will still be playing it at 48\,kHz. -Or if you load an EDL file at 96\,kHz and the current project sample rate is 48\,kHz, you will change it to 96\,kHz. - -The New Project window has some options that are different than the Set Format window as you can see by comparing figure~\ref{fig:set-format} above with this figure~\ref{fig:new-project}. -Mostly notably is the field for a directory path and a Project Name. - -\begin{figure}[htpb] - \centering - \includegraphics[width=0.7\linewidth]{new-project.png} - \caption{New Project dialog window} - \label{fig:new-project} +In addition to the standard settings for sample rate, frame rate, +and frame size, \CGG{} uses some less traditional settings like +channel positions, color model, and aspect ratio. The aspect ratio +refers to the screen aspect ratio. + +Edit decision lists , the EDL stored in XML, save the project +settings. Formats which contain media but no edit decisions just +add data to the tracks. Keep in mind details such as if your +project sample rate is 48\,kHz and you load a sound file with +96\,kHz, you will still be playing it at 48\,kHz. Or if you load an +EDL file at 96\,kHz and the current project sample rate is 48\,kHz, +you will change it to 96\,kHz. + +The New Project window has some options that are different than the +Set Format window as you can see by comparing +figure~\ref{fig:set-format} above with this +figure~\ref{fig:new-project}. Mostly notably is the field for a +directory path and a Project Name. + +\begin{figure}[htpb] \centering +\includegraphics[width=0.7\linewidth]{new-project.png} + \caption{New Project dialog window} + \label{fig:new-project} \end{figure} Explanation of the various fields is described next. @@ -44,108 +57,152 @@ Explanation of the various fields is described next. \begin{description} - \item[Presets:] - select an option from this menu to have all the project settings set to one of the known standards. Some of the options are 1080P/24, 1080I, 720P/60, PAL, NTSC, YouTube, and CD audio. - - \item[Tracks:] - (in New Project menu only) sets the number of audio tracks for the new project. Tracks can be added or deleted later, but this option is on the New Project menu for convenience. - - \item[Samplerate:] - sets the samplerate of the audio. The project samplerate does not have to be the same as the media sample rate that you load. Media is resampled to match the project sample rate. - - \item[Channels:] - sets the number of audio channels for the new project. The number of audio channels does not have to be the same as the number of tracks. - - \item[Channel positions:] - the currently enabled audio channels and their positions in the audio panning boxes in the track patchbay are displayed in the channel position widget in the Set Format window. - You can see this display on the left side in figure~\ref{fig:set-format} above. - Channel positions are not in New Project window. - - The channels are numbered. - When rendered, the output from channel 1 is rendered to the first output track in the file or the first sound card channel of the sound card. - Later channels are rendered to output tracks numbered consecutively. - The audio channel positions correspond to where in the panning widgets each of the audio outputs is located. - The closer the panning position is to one of the audio outputs, the more signal that speaker gets. - Click on a speaker icon and drag to change the audio channel location. - The speakers can be in any orientation. - A different speaker arrangement is stored for every number of audio channels since normally you do not want the same speaker arrangement for different numbers of channels. - - Channel positions is the only setting that does not affect the output necessarily. - It is merely a convenience, so that when more than two channels are used, the pan controls on the timeline can distinguish between them. - It has nothing to do with the actual arrangement of speakers. - Different channels can be positioned very close together to make them have the same output. - +\item[Presets:] select an option from this menu to have all the +project settings set to one of the known standards. Some of the +options are 1080P/24, 1080I, 720P/60, PAL, NTSC, YouTube, and CD +audio. + +\item[Tracks:] (in New Project menu only) sets the number of audio +tracks for the new project. Tracks can be added or deleted later, +but this option is on the New Project menu for convenience. + +\item[Samplerate:] sets the samplerate of the audio. The project +samplerate does not have to be the same as the media sample rate +that you load. Media is resampled to match the project sample rate. + +\item[Channels:] sets the number of audio channels for the new +project. The number of audio channels does not have to be the same +as the number of tracks. + +\item[Channel positions:] the currently enabled audio channels and +their positions in the audio panning boxes in the track patchbay are +displayed in the channel position widget in the Set Format window. +You can see this display on the left side in +figure~\ref{fig:set-format} above. Channel positions are not in New +Project window. + + The channels are numbered. When rendered, the output from channel +1 is rendered to the first output track in the file or the first +sound card channel of the sound card. Later channels are rendered +to output tracks numbered consecutively. The audio channel +positions correspond to where in the panning widgets each of the +audio outputs is located. The closer the panning position is to one +of the audio outputs, the more signal that speaker gets. Click on a +speaker icon and drag to change the audio channel location. The +speakers can be in any orientation. A different speaker arrangement +is stored for every number of audio channels since normally you do +not want the same speaker arrangement for different numbers of +channels. + + Channel positions is the only setting that does not affect the +output necessarily. It is merely a convenience, so that when more +than two channels are used, the pan controls on the timeline can +distinguish between them. It has nothing to do with the actual +arrangement of speakers. Different channels can be positioned very +close together to make them have the same output. \end{description} + \section{Video attributes}% \label{sec:video_attributes} \begin{description} - \item[Tracks:] - (in New Project menu only) sets the number of video tracks the new project is assigned. - Tracks can be added or deleted later, but options are provided here for convenience. - - \item[Framerate:] - sets the framerate of the video. - The project framerate does not have to be the same as an individual media file frame rate that you load. - Media is reframed to match the project framerate. - - \item[Canvas size:] - sets the size of the video output. - In addition, each track also has its own frame size. - Initially, the New Project dialog creates video tracks whose size match the video output. - The video track sizes can be changed later without changing the video output. - - \item[Aspect ratio:] - sets the aspect ratio; this aspect ratio refers to the screen aspect ratio. - The aspect ratio is applied to the video output. - The aspect ratio can be different than the ratio that results from the formula: $\dfrac{h}{v}$ (the number of horizontal pixels divided into the number of vertical pixels). - If the aspect ratio differs from the results of the formula above, your output will be in non-square pixels. - - \item[Auto aspect ratio:] - if this option is checked, the Set Format dialog always recalculates the Aspect ratio setting based upon the given Canvas size. This ensures pixels are always square. - - \item[Color model:] - the internal color space of \CGG{} is X11 sRGB without color profile. \CGG{} always switches to sRGB when applying filters or using the compositing engine. Different case for decoding/playback or encoding/output; the project will be stored in the color model video that is selected in the dropdown. - Color model is important for video playback because video has the disadvantage of being slow compared to audio. - Video is stored on disk in one colormodel, usually a YUV derivative. - When played back, \CGG{} decompresses it from the file format directly into the format of the output device. - If effects are processed, the program decompresses the video into an intermediate colormodel first and then converts it to the format of the output device. - The selection of an intermediate colormodel determines how fast and accurate the effects are. - A list of the current colormodel choices follows. - - \begin{description} - \item[RGB-8 bit] - Allocates 8\,bits for the R, G, and B channels and no alpha. This is normally used for uncompressed media with low dynamic range. - \item[RGBA-8 bit] - Allocates an alpha channel to the 8\,bit RGB colormodel. It can be used for overlaying multiple tracks.\\ - \item[RGB-Float] - Allocates a 32\,bit float for the R, G, and B channels and no alpha. This is used for high dynamic range processing with no transparency. - \item[RGBA-Float] - This adds a 32\,bit float for alpha to RGB-Float. It is used for high dynamic range processing with transparency. Or when we don't want to lose data during workflow, for example in color correction, key extraction and motion tracking. \\ - \item[YUV-8 bit] - Allocates 8\,bits for Y, U, and V. This is used for low dynamic range operations in which the media is compressed in the YUV color space. Most compressed media is in YUV and this derivative allows video to be processed fast with the least color degradation. - \item[YUVA-8 bit] - Allocates an alpha channel to the 8\,bit YUV colormodel for transparency. - \end{description} - In order to do effects which involve alpha channels, a colormodel with an alpha channel must be selected. - These are RGBA-8 bit, YUVA-8 bit, and RGBA-Float. - The 4 channel colormodels are slower than 3\,channel colormodels, with the slowest being RGBA-Float. - Some effects, like fade, work around the need for alpha channels while other effects, like chromakey, require an alpha channel in order to be functional. - So in order to get faster results, it is always a good idea to try the effect without alpha channels to see if it works before settling on an alpha channel and slowing it down. - - When using compressed footage, YUV colormodels are usually faster than RGB colormodels. - They also destroy fewer colors than RGB colormodels. - If footage stored as JPEG or MPEG is processed many times in RGB, the colors will fade whereas they will not fade if processed in YUV. - Years of working with high dynamic range footage has shown floating point RGB to be the best format for high dynamic range. - 16 bit integers were used in the past and were too lossy and slow for the amount of improvement. - RGB float does not destroy information when used with YUV source footage and also supports brightness above 100\,\%. - Be aware that some effects, like Histogram, still clip above 100\,\% when in floating point. - - \item[Interlace mode:] - this is mostly obsolete in the modern digital age, but may be needed for older media such as that from broadcast TV. Interlacing uses two fields to create a frame. One field contains all odd-numbered lines in the image; the other contains all even-numbered lines. Interlaced fields are stored in alternating lines of interlaced source footage. The alternating lines missing on each output frame are interpolated. +\item[Tracks:] (in New Project menu only) sets the number of video +tracks the new project is assigned. Tracks can be added or deleted +later, but options are provided here for convenience. + +\item[Framerate:] sets the framerate of the video. The project +framerate does not have to be the same as an individual media file +frame rate that you load. Media is reframed to match the project +framerate. + +\item[Canvas size:] sets the size of the video output. In addition, +each track also has its own frame size. Initially, the New Project +dialog creates video tracks whose size match the video output. The +video track sizes can be changed later without changing the video +output. + +\item[Aspect ratio:] sets the aspect ratio; this aspect ratio refers +to the screen aspect ratio. The aspect ratio is applied to the +video output. The aspect ratio can be different than the ratio that +results from the formula: $\dfrac{h}{v}$ (the number of horizontal +pixels divided into the number of vertical pixels). If the aspect +ratio differs from the results of the formula above, your output +will be in non-square pixels. + +\item[Auto aspect ratio:] if this option is checked, the Set Format +dialog always recalculates the Aspect ratio setting based upon the +given Canvas size. This ensures pixels are always square. + +\item[Color model:] the internal color space of \CGG{} is X11 sRGB +without color profile. \CGG{} always switches to sRGB when applying +filters or using the compositing engine. Different case for +decoding/playback or encoding/output; the project will be stored in +the color model video that is selected in the dropdown. Color model +is important for video playback because video has the disadvantage +of being slow compared to audio. Video is stored on disk in one +colormodel, usually a YUV derivative. When played back, \CGG{} +decompresses it from the file format directly into the format of the +output device. If effects are processed, the program decompresses +the video into an intermediate colormodel first and then converts it +to the format of the output device. The selection of an +intermediate colormodel determines how fast and accurate the effects +are. A list of the current colormodel choices follows. + + \begin{description} + \item[RGB-8 bit] Allocates 8\,bits for the R, G, and B channels +and no alpha. This is normally used for uncompressed media with low +dynamic range. + \item[RGBA-8 bit] Allocates an alpha channel to the 8\,bit RGB +colormodel. It can be used for overlaying multiple tracks. + \item[RGB-Float] Allocates a 32\,bit float for the R, G, and B +channels and no alpha. This is used for high dynamic range +processing with no transparency. + \item[RGBA-Float] This adds a 32\,bit float for alpha to +RGB-Float. It is used for high dynamic range processing with +transparency. Or when we don't want to lose data during workflow, +for example in color correction, key extraction and motion +tracking. + \item[YUV-8 bit] Allocates 8\,bits for Y, U, and V. This is used +for low dynamic range operations in which the media is compressed in +the YUV color space. Most compressed media is in YUV and this +derivative allows video to be processed fast with the least color +degradation. + \item[YUVA-8 bit] Allocates an alpha channel to the 8\,bit YUV +colormodel for transparency. + \end{description} In order to do effects which involve alpha +channels, a colormodel with an alpha channel must be selected. +These are RGBA-8 bit, YUVA-8 bit, and RGBA-Float. The 4 channel +colormodels are slower than 3\,channel colormodels, with the slowest +being RGBA-Float. Some effects, like fade, work around the need for +alpha channels while other effects, like chromakey, require an alpha +channel in order to be functional. So in order to get faster +results, it is always a good idea to try the effect without alpha +channels to see if it works before settling on an alpha channel and +slowing it down. + + When using compressed footage, YUV colormodels are usually faster +than RGB colormodels. They also destroy fewer colors than RGB +colormodels. If footage stored as JPEG or MPEG is processed many +times in RGB, the colors will fade whereas they will not fade if +processed in YUV\@. Years of working with high dynamic range footage +has shown floating point RGB to be the best format for high dynamic +range. 16 bit integers were used in the past and were too lossy and +slow for the amount of improvement. RGB float does not destroy +information when used with YUV source footage and also supports +brightness above 100\,\%. Be aware that some effects, like +Histogram, still clip above 100\,\% when in floating point. + +\item[Interlace mode:] this is mostly obsolete in the modern digital +age, but may be needed for older media such as that from broadcast +TV\@. Interlacing uses two fields to create a frame. One field +contains all odd-numbered lines in the image; the other contains all +even-numbered lines. Interlaced fields are stored in alternating +lines of interlaced source footage. The alternating lines missing on +each output frame are interpolated. \end{description} - - - +%%% Local Variables: +%%% mode: latex +%%% TeX-master: "../CinelerraGG_Manual" +%%% End: diff --git a/parts/Editing.tex b/parts/Editing.tex index 446f508..d7b791e 100644 --- a/parts/Editing.tex +++ b/parts/Editing.tex @@ -7,8 +7,8 @@ The timeline is where all editing decisions are made (figure~\ref{fig:timeline}) \begin{figure}[htpb] \centering - \includegraphics[width=0.8\linewidth]{timeline.png} - \caption{Timeline editing session using the upcoming Cinfinity theme.} + \includegraphics[width=1.0\linewidth]{timeline.png} + \caption{Timeline editing session} \label{fig:timeline} \end{figure} @@ -17,11 +17,15 @@ timeline. The active region is determined first by the presence of in/out point timeline. If those do not exist the highlighted region is used. To reiterate, \emph{highlighting} is done in \emph{cut and paste mode} by moving the insertion point with the mouse in the timeline -to where you want to start. Then hold down the LMB, drag the mouse to where you want -the end point to be and release the LMB. In \emph{drag and drop mode}, the method to create a highlighted +to where you want to start. Then hold down the LMB\@, drag the mouse to where you want +the end point to be and release the LMB\@. In \emph{drag and drop mode}, the method to create a highlighted selection is to hold down the Ctrl key and double click with the LMB with the mouse over that column. - If no highlighted region exists, the insertion point is used as the start of the active region. Some commands treat all the space to the right of the insertion point as active while others treat the active length as 0 if no end point for the active region is defined. +If no highlighted region exists, the insertion point is used as the +start of the active region. Some commands treat all the space to +the right of the insertion point as active while others treat the +active length as 0 (zero) if no end point for the active region is +defined. Most importantly, editing decisions never affect source material meaning that it is non-destructive editing. So not only does your original media stay completely untouched, it is much faster than if you had to copy all the media affected by an edit. Editing only affects pointers to source material, so if you want to have a new modified media file at the end of your editing session which represents the editing decisions, you need to render it. Saving and loading your edit decisions is explained in the Load, Save and the EDL section and rendering is explained in the section on Rendering. @@ -38,10 +42,14 @@ On the left of the timeline is a region known as the patchbay. The patchbay ena \item[Expander] which is a down arrow on the right side, is for viewing more options on the patchbay and for viewing the effects represented on the track. You can just click on the expander to expand or collapse the patchbay and the track. If it is pointing sideways, the track is collapsed. If it is pointing down, the track is expanded. Existing effects appear below the media for the track. \end{description} -\noindent Below the textbox name are several toggles referred to as \textit{attributes} for different features (currently there are 5 as shown in figure~\ref{fig:patchbay01}). If the toggle button is shadowed by a color, the feature is enabled . If the toggle is the background color of most of the window, it is disabled. Click -on the toggle to enable/disable the feature. +\noindent Below the textbox name are several toggles referred to as +\textit{attributes} for different features (currently there are 5 as +shown in figure~\ref{fig:patchbay01}). If the toggle button is +shadowed by a color, the feature is enabled. If the toggle is the +background color of most of the window, it is disabled. Click on the +toggle to enable/disable the feature. -\begin{wrapfigure}[15]{O}{0.3\linewidth} +\begin{wrapfigure}[12]{O}{0.3\linewidth} \vspace{-2ex} \centering \includegraphics[width=0.79\linewidth]{patchbay01.png} @@ -77,7 +85,7 @@ The \textit{attributes} are described here next. \begin{figure}[htpb] \centering - \includegraphics[width=0.7\linewidth]{overlay.png} + \includegraphics[width=0.65\linewidth]{overlay.png} \caption{Video Overlay, audio Pan and Nudge.} \label{fig:overlay} \end{figure} @@ -93,7 +101,7 @@ Several convenience functions are provided for automatically setting the panning \begin{description} \item[Audio$\rightarrow$Map 1:1] This maps every track to its own channel and wraps around when all the channels are allocated. It is most useful for making 2 tracks with 2 channels map to stereo and for making 6 tracks with 6 channels map to a 6 channel sound card. - \item[Audio$\rightarrow$Map 5.1:2] This maps 6 tracks to 2 channels. The project should have 2 channels when using this function. Go to \texttt{Settings $\rightarrow$ Format} to set the output channels to 2. This is most useful for down-mixing 5.1 audio to to stereo (for more information refer to Configuration, Settings and Preferences section \ref{sub:audio_out_section}). + \item[Audio$\rightarrow$Map 5.1:2] This maps 6 tracks to 2 channels. The project should have 2 channels when using this function. Go to \texttt{Settings $\rightarrow$ Format} to set the output channels to 2. This is most useful for down-mixing 5.1 audio to stereo (for more information refer to Configuration, Settings and Preferences section~\ref{sub:audio_out_section}). \end{description} \paragraph{Standard audio mappings} Although \CGG{} lets you map any audio track to any speaker, there are standard mappings you should use to ensure the media can be played back elsewhere. Also, most audio encoders require the audio tracks to be mapped to standard speaker numbers or they will not work. @@ -215,7 +223,7 @@ Using labels and In/Out points are useful in editing audio. You can set In/Out To obtain a clip on the timeline exactly as you saw in the Viewer, you must necessarily move the In mark back from the beginning before the first desired frame or move the Out mark forward after the last desired frame, depending on the \textit{Always show next frame} setting. -Some of the confusion can be attributed to the fact that the Viewer shows frames, while the markers determine spaces, i.e. times, that are not visible between frames. You have to think of each frame as being delimited by two spaces -- one preceding and one following. The In mark is always placed before the displayed frame and the Out mark is always placed after the displayed frame, while taking into account in its calculations whether the \textit{Always show next frame }option is used or not. If you just remember that the reference of the markers is in the middle of the icon, you will avoid confusion. +Some of the confusion can be attributed to the fact that the Viewer shows frames, while the markers determine spaces, i.e.\ times, that are not visible between frames. You have to think of each frame as being delimited by two spaces -- one preceding and one following. The In mark is always placed before the displayed frame and the Out mark is always placed after the displayed frame, while taking into account in its calculations whether the \textit{Always show next frame }option is used or not. If you just remember that the reference of the markers is in the middle of the icon, you will avoid confusion. \paragraph{Overwrite} To perform overwriting within the timeline paste on a selected region (highlighted or between In/Out points). The selected region will be overwritten. If the clip pasted from the clipboard @@ -231,7 +239,7 @@ pasted one after the other, keeping the same order they have on the stack. \paragraph{Split --- blade cut and hard edges:} You can cut the tracks into 2 pieces on the timeline by putting the hairline cursor on the place you want to do a cut and then using the character “x” or the scissors tool (figure~\ref{fig:cut}). \begin{wrapfigure}[16]{O}{0.3\linewidth} - \vspace{1ex} + \vspace{-2ex} \centering \includegraphics[width=0.9\linewidth]{cut.png} \caption{Blade cut} @@ -292,7 +300,7 @@ When an edit is marked as selected, it can be cut/copied into the paste clip buf The \textit{edits} popup is activated on a track and a red and yellow colored reticle appears to temporarily mark the location when you click on the middle mouse button. An expanded explanation is provided below. \begin{center} - \begin{tabular}{l p{12.9cm}} + \begin{tabular}{l p{11cm}} \toprule \textbf{Key} & \textbf{Operations} \\ \midrule @@ -303,7 +311,7 @@ The \textit{edits} popup is activated on a track and a red and yellow colored re \end{center} \begin{center} - \begin{longtable}{l l p{11cm}} + \begin{longtable}{l l p{9.4cm}} \toprule \textbf{Popup Label} & \textbf{Key} & \textbf{Operation} \\ \midrule \endhead @@ -467,7 +475,7 @@ you can operate the following buttons to display what you need to see and to mov Figure~\ref{fig:inter-view02} displays Inter-View window and its relation to the timeline, viewer, and compositor. \begin{figure}[ht] \centering - \includegraphics[width=0.9\linewidth]{inter-view02.png} + \includegraphics[width=1.0\linewidth]{inter-view02.png} \caption{Inter-View mode and the timeline} \label{fig:inter-view02} \end{figure} @@ -512,7 +520,7 @@ Instead of using the \# number on the main menu to close the current EDL, both t \begin{figure}[h] \centering \includegraphics[width=0.8\linewidth]{editing-img001.png} - \caption{Once you have an an Open EDL, there are 2 ways to close it.} + \caption{Once you have an Open EDL, the easiest way to close it.} \label{fig:open_edl} \end{figure} \relax @@ -591,22 +599,13 @@ Here is a step by step example of how you can use \textit{File by Reference}: timeline the changes you just made in the previous step. \end{enumerate} -\begin{comment} -\begin{figure}[htpb] - \centering - \includegraphics[width=0.6\linewidth]{lenght.png} - \caption{Edit Length window} - \label{fig:lenght} -\end{figure} -\end{comment} - \subsection{Edit Length}% \label{sub:edit-lenght} To set the length of an edit in the timeline, select the region which contains the edit to be modified. Now select the menu bar \texttt{Edit $\rightarrow$ Edit Length}\dots menu item to activate the \textit{edit length} popup (figure~\ref{fig:lenght}). The duration of the edit can be reset by entering the desired edit length in seconds. Pressing OK will change all of the selected edits (in armed tracks) to the specified length. \begin{figure}[htpb] \centering - \includegraphics[width=0.6\linewidth]{lenght.png} + \includegraphics[width=0.5\linewidth]{lenght.png} \caption{Edit Length window} \label{fig:lenght} \end{figure} @@ -630,7 +629,7 @@ alignment boundary time. Align Edits works best if there are an equal number of The first two screenshots in figure~\ref{fig:align} show the Before, the Highlighted Edits to be manipulated, and the After results for the Align Edits. The third screenshot \textit{adds silence} in the second section as noted in red letters. \begin{figure}[htpb] \centering - \includegraphics[width=0.8\linewidth]{align.png} + \includegraphics[width=1.0\linewidth]{align.png} \caption{Align edits} \label{fig:align} \end{figure} @@ -640,18 +639,18 @@ The first two screenshots in figure~\ref{fig:align} show the Before, the Highlig The Reverse Edits can be useful to change the order of 2 edits in the case where you would like to put a \textit{teaser} section that occurred in the middle of a movie at the beginning instead, that is, reversed positions. To operate, highlight completely the edit areas you would like reversed and then use the pulldown \texttt{Edit $\rightarrow$ Reverse Edits}. -Figure~\ref{fig:reverse01} shows the selected / highlighted area to which Edits will be applied. Note the first edit is 00002, followed by 00003, 00004, and 00005 in that order. +Figure~\ref{fig:reverse01} shows the selected / highlighted area to which Edits will be applied. Note the first edit is 0002, followed by 0003, 0004, and 0005 in that order. \begin{figure}[htpb] \centering - \includegraphics[width=0.8\linewidth]{reverse01.png} - \caption{Selected area for edits aligment} + \includegraphics[width=0.9\linewidth]{reverse01.png} + \caption{Selected area for Reverse Edits} \label{fig:reverse01} \end{figure} -Figure~\ref{fig:reverse02} shows the results of executing \textit{Reverse Edits}. Now you will see the reversed order of 00005, 00004, 00003, and last 00002. +Figure~\ref{fig:reverse02} shows the results of executing \textit{Reverse Edits}. Now you will see the reversed order of 0005, 0004, 0003, and last 0002. \begin{figure}[htpb] \centering - \includegraphics[width=0.8\linewidth]{reverse02.png} + \includegraphics[width=0.9\linewidth]{reverse02.png} \caption{Results of the Reverse Edits} \label{fig:reverse02} \end{figure} @@ -661,10 +660,10 @@ Figure~\ref{fig:reverse02} shows the results of executing \textit{Reverse Edits} The file pulldown \texttt{Edit $\rightarrow$ Shuffle Edits} will randomly exchange the location of the edits. This feature can be used to change the order of the music like you would do from your MP4 player where you have a playlist of your favorite music. Or perhaps you are creating an advertisement background, you can randomly change it, thus the viewer sees a different order of scenes each time shown. -Figure~\ref{fig:shuffle} illustrating Shuffle Edits of the highlighted area of the first screenshot on the page. Note the permutation of the fragments resulting in 00003 now being first, then 00005, 00002, and 00004 last. +Figure~\ref{fig:shuffle} illustrating Shuffle Edits of the highlighted area of the first screenshot on the page. Note the permutation of the fragments resulting in 0002 now being first, then 0004, 0003, and 0005 last. \begin{figure}[htpb] \centering - \includegraphics[width=0.8\linewidth]{shuffle.png} + \includegraphics[width=0.9\linewidth]{shuffle.png} \caption{Shuffle edits: the edits are permutated} \label{fig:shuffle} \end{figure} @@ -794,8 +793,8 @@ Now to use this feature, create a track with edits that have trims on the left a \paragraph{Cutting/Snapping edits} cuts from an edit handle to the insert point. There are Edit Panel buttons which normally are used to move to the previous or next edit handle/label. -\begin{wrapfigure}[5]{r}{0.2\linewidth} - \vspace{-1ex} +\begin{wrapfigure}[3]{r}{0.2\linewidth} + \vspace{-2ex} \centering \includegraphics[width=0.7\linewidth]{snap.png} \end{wrapfigure} @@ -824,7 +823,7 @@ The EDL session and the rendered output are visually equivalent. Nested assets It is somewhat important to note that nested assets and nested clips will have index files automatically created. These index files can start to clutter up your \texttt{\$HOME/.bcast5} directory with files named \texttt{Nested\_\#\#\#.idx} and you may want to periodically delete any index files which are no longer in use. -\paragraph{Nested Clips} It is also possible to create \textit{clips} and convert them to \textit{nested edl}. This is done by first creating a clip using the standard cut, clipboard, paste, and/or edit panel buttons. Now, using the resources \textit{clip} folder, select a clip to be nested, and use the right mouse button to select a clip. This activates the clip popup menu. Select the \textit{Nest to media} menu item, and the clip will be converted to a \textit{Nested: Clip}. Conversely, you can select a \textit{Nested: Clip}, use the \textit{EDL to clip} menu item, and the clip will be reverted to a \textit{Clip}. This works similarly to the group / un-group editing features of many graphic design editing programs, but in this case the groups are rendered compositions (figure~\ref{fig:nesting}). +\paragraph{Nested Clips} It is also possible to create \textit{clips} and convert them to \textit{nested edl}. This is done by first creating a clip using the standard cut, clipboard, paste, and/or edit panel buttons. Now, using the resources \textit{clip} folder, select a clip to be nested, and use the right mouse button to select a clip. This activates the clip popup menu. Select the \textit{Nest to media} menu item, and the clip will be converted to a \textit{Nested: Clip} and put in Media folder. Conversely, you can select a \textit{Nested: Clip}, use the \textit{EDL to clip} menu item, and the clip will be reverted to a \textit{Clip}. This works similarly to the group / un-group editing features of many graphic design editing programs, but in this case the groups are rendered compositions (figure~\ref{fig:nesting}). Nested clips can be proxied and when they are, the resulting files are placed in the user's \$HOME/Videos directory by default. This can be modified by changing @@ -832,7 +831,7 @@ are placed in the user's \$HOME/Videos directory by default. This can be modifi \texttt{Settings $\rightarrow$ Preferences $\rightarrow$ Interface} tab, Nested Proxy Path. \begin{figure}[htpb] \centering - \includegraphics[width=0.9\linewidth]{nesting.png} + \includegraphics[width=1.0\linewidth]{nesting.png} \caption{Nested clips in Timeline and Resources window} \label{fig:nesting} \end{figure} @@ -853,7 +852,7 @@ nesting display a clip without having to actually use the Render menu. Now you can add a Reverse effect, Color3way plugin for black and white, and use the Speed auto to get the 60 seconds down to only 10 seconds. \item[Example 2:] You are working on a complex project with a team in a separate -location. You create some sub projects, i.e. sequences, that you or the +location. You create some sub projects, i.e.\ sequences, that you or the team will use in the Master project to merge the sequences in the right order and to make the final color correction steps. \end{description} @@ -924,7 +923,7 @@ A default shuttlerc file is automatically used when a shuttle device is plugged \subsection{How to Modify the Default Key Settings}% \label{sub:modify_default_key_settings} -Detailed information on how to modify your local .shuttlerc file is described next, but if you need help you can request more information in the forum at {\small \url{https://cinelerra-gg.org}}. In the \texttt{shuttlerc} file, a \# always represents a comment and blank lines are ignored. The first thing you must do is copy the system supplied \texttt{shuttlerc} file to your \texttt{\$HOME} directory and rename it as \texttt{.shuttlerc} (with a period). +Detailed information on how to modify your local \texttt{.shuttlerc} file is described next, but if you need help you can request more information in the forum at {\small \url{https://cinelerra-gg.org}}. In the \texttt{shuttlerc} file, a \# always represents a comment and blank lines are ignored. The first thing you must do is copy the system supplied \texttt{shuttlerc} file to your \texttt{\$HOME} directory and rename it as \texttt{.shuttlerc} (with a period). The \texttt{shuttlerc} file has sections that in the case of \CGG{}, represent different windows allowing you to set the keys, K1-K15 for the Pro and K5-K9 for the Xpress, the shuttle wheel positions of S0/S1/S-1 for stop, S2 through S7 for wheeling to the right, and S-7 through S-2 for wheeling to the left for reverse. Then there is JR to jog right (clockwise) and JL to jog left (counter-clockwise) for the inner smaller wheel for single frame movement. See the key arrangement on a later page for location of the keys for each of the two different shuttles. @@ -1216,3 +1215,9 @@ The following is the default setting for the ShuttlePROv2 and ShuttleXpress (tab \bottomrule \end{tabular}} \end{table} + + +%%% Local Variables: +%%% mode: latex +%%% TeX-master: "../CinelerraGG_Manual" +%%% End: diff --git a/parts/FFmpeg.tex b/parts/FFmpeg.tex index 42aa0cd..5a0ce7d 100644 --- a/parts/FFmpeg.tex +++ b/parts/FFmpeg.tex @@ -18,7 +18,7 @@ ffmpeg probes late and it reads \textit{Currently: Try FFMpeg last}. The initia the icon is on, that is, ffmpeg probes first. Suggestion is to leave it on except in a few special cases where it may be better to have early probes disabled. When you mouse over the main menu FF toggle button, the text displays ffmpeg's \textit{Currently} set position. Just left mouse click to change to the other setting. -The ffmpeg early probe state is saved between sessions and is also affected by choices made in Probe Order (refer to section \ref{sub:probe_order_loading_media}). It is important to note that the various file indexes may need to be rebuilt if you change which codec is being used to decode the file. There is a warning popup to remind you when you change the default ffmpeg early probe state (unless you have checked the box to no longer show the warning). You can easily rebuild the index for a specific media file by going to the Resources window, right mouse click on that media, and choose \texttt{Rebuild Index} from the popup choices. +The ffmpeg early probe state is saved between sessions and is also affected by choices made in Probe Order (refer to section~\ref{sub:probe_order_loading_media}). It is important to note that the various file indexes may need to be rebuilt if you change which codec is being used to decode the file. There is a warning popup to remind you when you change the default ffmpeg early probe state (unless you have checked the box to no longer show the warning). You can easily rebuild the index for a specific media file by going to the Resources window, right mouse click on that media, and choose \texttt{Rebuild Index} from the popup choices. Figure~\ref{fig:ff} shows (1) reddish colored FF in upper right hand corner of main window indicating that ffmpeg early probes is enabled; (2) \textit{Try FFMpeg last} indicator message for ffmpeg early probes enabled (note that the color is different because you highlighted the icon); and (3) black colored FF indicates ffmpeg will be used last and you are changing the behavior so that \CGG{} warns you accordingly. @@ -112,8 +112,8 @@ There are 4 special id's recognized by \CGG{} which cause special processing. T \begin{description} \item[duration] overrides the probe duration when opening media for decoding - \item[video\_filter] adds a video stream filter, eg. edgedetect,\dots at the stream level - \item[audio\_filter] adds an audio stream filter, eg. echo,\dots at the stream level + \item[video\_filter] adds a video stream filter, eg.\ edgedetect,\dots at the stream level + \item[audio\_filter] adds an audio stream filter, eg.\ echo,\dots at the stream level \item[loglevel] sets the library logging level, as quiet, panic, \dots verbose, debug \end{description} @@ -277,7 +277,20 @@ Then to use and to get 10 bit depth and preserve depth from decode to encode: There are thousands of options for using ffmpeg. Now it is possible to \textit{view} the available options for a particular video and audio choice by using the \textit{wrench icon} and then clicking on the \textit{view} box. FFmpeg has to be the selected file format for this feature to be visible. It makes it a lot easier since only the applicable options show up as opposed to everything that ffmpeg can do. These options are just \textit{Hints} and some may be missing due to the way that ffmpeg options are coded -- \CGG{} shows the option data ffmpeg has exposed. -As an example, instead of reading the entire 264 library information, you only have to look at the shown available options. Both the video and the audio are browsable. The options visible in the \textit{Audio/Video Preset} textbox are the final values which are used when rendering once you have checked OK. For assistance in choosing the options you want, use the view popup to see the objects that go with the selected format tool, highlight the option, modify the parameter value in the box at the top of the \textit{Options} window based on what you want, and then click apply. Updated parameter values or new parameters will be appended at the bottom. Note that when you highlight an option, a tooltip will show up when available in the lower right hand corner which describes the option. Also note that the Format and Codec types are shown on the top line of the Options window. +As an example, instead of reading the entire 264 library +information, you only have to look at the shown available options. +Both the video and the audio are browsable. The options visible in +the \textit{Audio/Video Preset} textbox are the final values which +are used when rendering once you have checked OK\@. For assistance +in choosing the options you want, use the view popup to see the +objects that go with the selected format tool, highlight the option, +modify the parameter value in the box at the top of the +\textit{Options} window based on what you want, and then click +apply. Updated parameter values or new parameters will be appended +at the bottom. Note that when you highlight an option, a tooltip +will show up when available in the lower right hand corner which +describes the option. Also note that the Format and Codec types are +shown on the top line of the Options window. Parameters exist in 3 layers: ffmpeg, codec, and an interface layer. You can apply parameters to each layer. The top 2 layers are accessed with the Kind popup menu. The ffmpeg layer is topmost, and is selected as Kind: ffmpeg. It can specify many of the more common parameters, such as the bitrate, quality, and so on. The middle layer is selected as Kind: codec. These options can specialize your choices, and frequently includes presets and profiles useful for coding well known parameter sets, like \textit{profile=high422}, \textit{preset=medium}, or \textit{tune=film}, etc. The interface layer may or may not be available. It is usually accessible only by an \textit{opts} parameter, like \texttt{x264-params key=value:key=value}:\dots These options are passed directly to the low level codec library. diff --git a/parts/Installation.tex b/parts/Installation.tex index 958e88b..09d83c6 100644 --- a/parts/Installation.tex +++ b/parts/Installation.tex @@ -382,7 +382,7 @@ done \label{sub:cloning_the_repository_for_faster_updates} If you want to avoid downloading the software every time an update -is available you need to create a local "repository" or repo. The +is available you need to create a local ``repository'' or repo. The repo is a directory where you first do a \texttt{git clone}. For the initial git clone, set up a local area for the repository storage, referred to as \texttt{}. The \texttt{git @@ -395,7 +395,7 @@ repo path is always a perfect clone of the main repo. \paragraph{Setting up the initial clone}% \label{par:setting_up_the_initial_clone} -You may want to add "\texttt{ -{}-depth 1}" before \texttt{cin5} +You may want to add ``\verb|--depth 1|'' before \texttt{cin5} because this will clone faster and is smaller, but has no history. \begin{lstlisting}[style=sh] @@ -584,8 +584,7 @@ yourself, there are pre-built dynamic or static binaries for various versions of Ubuntu, Mint, Suse, Fedora, Debian, Centos, Arch, and Slackware linux as well as Gentoo and FreeBSD. % -A Windows 10 version installation is described in -\ref{sec:ms_windows10}. There are also 32-bit i686 Ubuntu, Debian, +A Windows 10 version installation is described in~\ref{sec:ms_windows10}. There are also 32-bit i686 Ubuntu, Debian, and Slackware versions available. These are updated on a fairly regular basis as long as significant code changes have been made. They are in subdirectories of: @@ -628,7 +627,7 @@ the file \texttt{md5sum.txt} to ensure the channel correctly transmits the package. There is a \href{https://cinelerra-gg.org/download/README.pkgs}{README.pkgs} file in the \texttt{download} directory with instructions so you -can \textit{ cut and paste} and avoid typos; it is also shown +can \textit{cut and paste} and avoid typos; it is also shown next. % FIXME (!) It doesn't work that way. The text is set as it is @@ -827,7 +826,7 @@ is Defender). Below are the steps for installation: \item Choose your desired directory by clicking on Browse button. Choose \textit{All Users (Recommended)} and then click \textit{Next}. \item Choose the local package directory where you would like your installation files to be placed. Click \textit{Next}. \item Choose \textit{Direct Connection} if you are using Internet with plug and play device. Click \textit{Next}. - \item Choose any download site preferably "cygwin.mirror.constant.com" and then click \textit{Next}. + \item Choose any download site preferably ``cygwin.mirror.constant.com'' and then click \textit{Next}. \item For list of things to install, leave all set to \textit{Default} except these to \textit{Install} instead: \begin{tabular}{ll} @@ -955,7 +954,7 @@ Running gdb from inside a desktop resident console (not a cygwin64 window) will There are also some special complete distribution systems available that include \CGG{} for audio and video production capabilities. \subsection{AV Linux} -\label{sec:AV Linux} +\label{sec:AV_Linux} \textbf{AV Linux} is a downloadable/installable shared snapshot ISO image based on Debian. It provides the user an easy method to @@ -967,7 +966,7 @@ Click here for the \href{http://www.bandshed.net/avlinux/}{homepage of AV Linux}. \subsection{Bodhi Linux} -\label{sec:Bodhi Linux} +\label{sec:Bodhi_Linux} \textbf{Bodhi Linux Media} is a free and open source distribution that comes with a curated list of open source software for digital diff --git a/parts/Keyframes.tex b/parts/Keyframes.tex index 8cefa99..a43e4e5 100644 --- a/parts/Keyframes.tex +++ b/parts/Keyframes.tex @@ -3,15 +3,19 @@ The word \textit{keyframe} has at least 3 contextual meanings in the NLE environment. First, the oldest meaning, is the \textit{I-Frame} definition used in codecs algorithms. These are \textit{key} frames that begin a new sequence of pictures, and are anchor points for repositioning (seeks). Next are the automation parameter data points. These are usually input to primitive math forms, like translation and zoom. And last are blobs of data that are chunks of parameters to plugins that can do almost anything. The data can be a simple value, like a fader value, or more complex like a group of points and colors in a sketcher plugin keyframe. The word keyframe has changed a lot in meaning. In the context of \CGG{}, keyframes are data values that have been associated to the timeline which affect the media presentation. So a keyframe no longer refers to a frame, but to a position on the timeline. -In \CGG{}, there are two general types of keyframe data, \textit{automation keyfra\-mes} (autos) which are drawn as colored lines and box icons overlayed at a point on a media track, and \textit{plugin keyframes} which are drawn as gold key symbols on a plugin bar of a track. \quad -\includegraphics[height=\baselineskip]{auto.png} -Auto $\leftarrow$ Keyframe $\rightarrow$ -Plugin \includegraphics[height=\baselineskip]{plugin.png} - -\section{Automation Keyframes / Autos}% +In \CGG{}, there are two general types of keyframe data, +\textit{automation keyframes} (autos) which are drawn as colored +lines and box icons overlayed at a point on a media track, and +\textit{plugin keyframes} which are drawn as gold key symbols on a +plugin bar of a +track. \includegraphics[height=\baselineskip]{auto.png} Auto +$\leftarrow$ Keyframe $\rightarrow$ Plugin +\includegraphics[height=\baselineskip]{plugin.png} + +\section{Automation Keyframes,/\,Autos}% \label{sec:automation_keyframes_autos} -The \textit{autos} are created by clicking on an \textit{automation curve} to establish the time position for the new keyframe anchor point. The basic nature of these simple auto values make them primitive operations that are easy to apply when needed. +The \textit{autos} are created by clicking on an \textit{automation curve} to establish the time position for the new keyframe anchor point. The basic nature of these simple auto values make them primitive operations that are easy to apply when needed. There are many automation curve types, and most are not normally visible or clickable. To make them visible, use the \texttt{View} pulldown, or open the \texttt{Window $\rightarrow$ Show Overlays}. This window allows toggling of the parameters in the View pulldown but is more convenient because you can leave the window up to change values quickly. If all of the automation curves are turned on, the timeline will be quite cluttered, and so usually only the parameters of interest are enabled during use. When keyframes are selected, they are drawn on the timeline over the tracks to which they apply. The keyframe is represented on the timeline as a little square on the curve, for example as in fade, or as a symbol as in a mask. This square, timeline attachment point, can be used for positioning by clicking on a keyframe anchor and using drag and drop to set the new position. @@ -19,7 +23,7 @@ The automation keyframes include: mute/play audio; camera translation x,y and zoom; projector translation x,y and zoom; fade blending; audio panning; overlay mode; mask point sets and sampling speed. -Except for the mask auto, the values are all simple numbers. Mute is different from the other autos in that it is simply a toggle of either on or off. Mute keyframes determine where the track is processed but not rendered to the output. An example usage would be to use auto keyframes to fade in a clip by setting the transparency to $100\%$ at the first keyframe and adding another keyframe 5 seconds later in the timeline with a transparency of $0\%$. +Except for the mask auto, the values are all simple numbers. Mute is different from the other autos in that it is simply a toggle of either on or off. Mute keyframes determine where the track is processed but not rendered to the output. An example usage would be to use auto keyframes to fade in a clip by setting the transparency to $100\%$ at the first keyframe and adding another keyframe 5 seconds later in the timeline with a transparency of $0\%$. The Keyframes pulldown on the main timeline is used for Cut, Copy, Paste, Clear, Change to linear, Change to smooth, Create curve type of Smooth, Linear, Tangent, or Disjoint, Copy default keyframe or Paste default keyframe. If you right click on a curve keyframe on the timeline, a set of options popup including the choices \textit{keyframe type} (such as Fade, Speed, etc.), Hide keyframe type, Delete keyframe, Copy keyframe, smooth curve, linear segments, tangent edit, or disjoint edit. @@ -62,7 +66,7 @@ To make it easier to navigate curve keyframes, since there is not much room on t \end{enumerate} \end{itemize} -You can click mouse button 3 on a keyframe box and a menu pops up with the first menu item showing the keyframe type. The top menu item can be activated for immediate access to update the automation keyframe value. Some keyframe types, which have values that can be manipulated in another way than by dragging the color coded line, now show up with a different colored background to make them more visible. Keep in mind that Zoombar ranges/values must be set to appropriate values when working with specific keyframe types, such as Fade or Speed. If you do not see the auto line in the visible area of the video track, try the key combination Alt-f or select the speed in the \textit{Automation Type} drop-down menu at the bottom of the main window. To the right of this field is \textit{Automation Range} where you can set the display ratio of these lines. Simply change the values until the lines are visible again. +You can click mouse button 3 on a keyframe box and a menu pops up with the first menu item showing the keyframe type. The top menu item can be activated for immediate access to update the automation keyframe value. Some keyframe types, which have values that can be manipulated in another way than by dragging the color coded line, now show up with a different colored background to make them more visible. Keep in mind that Zoombar ranges/values must be set to appropriate values when working with specific keyframe types, such as Fade or Speed. If you do not see the auto line in the visible area of the video track, try the key combination Alt-f or select the speed in the \textit{Automation Type} drop-down menu at the bottom of the main window. To the right of this field is \textit{Automation Range} where you can set the display ratio of these lines. Simply change the values until the lines are visible again. Figure~\ref{fig:overlays1} and figure~\ref{fig:fade} shows several color coded lines for different key\-fra\-mes and specifically the slider bar for the Fade keyframe. It is in the same color as the color coded keyframe type line which is the same color which would be shown in the \textit{Show overlays} window figure~\ref{fig:overlays_window}. @@ -95,7 +99,7 @@ subsequently adjusting that slope (figure~\ref{fig:controls}). To modify a curr you just right mouse it and change to either Tangent or Disjoint edit. In the screencast to the right, the Fade Auto has pink colored curves and control points are seen as dashed lines next to the keyframe box with black filled circles on each end of the line. Use the Ctrl key with the left mouse button to modify the control point lines. -\section{Speed / Fade Automation Usage and Auto Gang}% +\section{Speed\,/\,Fade Automation Usage and Auto Gang}% \label{sec:speed_fade_automation_gang} Speed automation resamples the data at a higher or lower playback rate. Speed automation can operate @@ -152,10 +156,10 @@ all sessions. The intent is to make a parameter set that is likely to be reused It may be useful to create a default keyframe which has specific desirable values for later use. To do this, set the timeline to position 0 and be sure to disable \textit{generate keyframes while tweaking}. This will create a default keyframe at the beginning of the timeline which contains global parameters for the entire duration. Or if you have copied a non-default keyframe via Keyframes pulldown \textit{copy default keyframe}, it can be stored as the default keyframe by calling \texttt{keyframes $\rightarrow$ paste default keyframe}. After using paste default keyframe to convert a non-default keyframe into a default keyframe, you will not see the value of the default keyframe reflected until all the non-default keyframes are removed. -The \texttt{keyframes $\rightarrow$ copy default keyframe} and \texttt{keyframes} $\rightarrow$ \texttt{paste} {\texttt{de\-fault keyframe} allow conversion of the default keyframe to a non-default keyframe. +The \texttt{keyframes $\rightarrow$ copy default keyframe} and \texttt{keyframes} $\rightarrow$ \texttt{paste} \texttt{default keyframe} allow conversion of the default keyframe to a non-default keyframe. -\texttt{Keyframes $\rightarrow$ copy default keyframe} copies the default keyframe to the clipboard, no matter what region of the timeline is selected. -The \texttt{keyframes $\rightarrow$ paste} \texttt{keyframes} function may then be used to paste the clipboard as a non-default keyframe. +\texttt{Keyframes $\rightarrow$ copy default keyframe} copies the default keyframe to the clipboard, no matter what region of the timeline is selected. +The \texttt{keyframes $\rightarrow$ paste} \texttt{keyframes} function may then be used to paste the clipboard as a non-default keyframe. \textit{Typeless keyframes} enabled under the Settings pulldown allow keyframes from any track to be pasted on either audio or video tracks. Ordinarily audio keyframes can only be pasted to another audio track and video keyframes can only be pasted to another video track. @@ -190,7 +194,7 @@ Image translation, motion direction, and speed determine the results. Motion va \section{More about Editing Keyframes}% \label{sec:more_about_editing_keyframes} -Keyframes can be shifted around and moved between tracks on the timeline using similar cut and paste operations to editing media. Only the keyframes selected in the View menu are affected by keyframe editing operations. +Keyframes can be shifted around and moved between tracks on the timeline using similar cut and paste operations to editing media. Only the keyframes selected in the View menu are affected by keyframe editing operations. An often used, keyframe editing operation is replication of some curve from one track to the other to make a stereo pair. The first step is to solo the source track's record patch by Shift-clicking on the \textit{arm track} icon in the patchbay. Then either set In/Out points or highlight the desired region of keyframes. Go to \texttt{keyframes $\rightarrow$ copy keyframes} to copy them to the clipboard. Solo the destination track's record patch by Shift-clicking on it and go to \texttt{keyframes $\rightarrow$ paste keyframes} to paste the clipboard. Another common application for keyframe modification is to highlight a region on the timeline which contains multiple keyframes that you want to modify. Then when you adjust a parameter or set of parameters, the change will be applied to all keyframes within the selection instead of a new keyframe being created. This only works when the keyframe stores multiple parameters and only for mask and effect keyframes. Other types of keyframes are generated as usual. @@ -200,3 +204,9 @@ And there is an easy way to delete keyframes besides selecting a region and usin \label{sec:allow_keyframes_spanning} When you create a drag selection and you modify a value in a plugin then everything in the selection gets modified the same. It use the previous keyframe and if there is no previous keyframe, then the default keyframe in your \texttt{\$HOME/.bcast5} definitions is used. + + +%%% Local Variables: +%%% mode: latex +%%% TeX-master: "../CinelerraGG_Manual" +%%% End: diff --git a/parts/Loadandsave.tex b/parts/Loadandsave.tex index c80d7c3..1104921 100644 --- a/parts/Loadandsave.tex +++ b/parts/Loadandsave.tex @@ -15,9 +15,9 @@ The EDL contains all the project settings and locations of every edit. Instead of media, the file contains pointers to the original media files on disk. EDL files are specific to \CGG{}. -The EDL files generally have an extension of .xml. +The EDL files generally have an extension of \texttt{.xml}. The purpose of the EDL is to separate the media from all of the editing operations so that the original media remains intact. -When the .xml file is loaded, changes to the attributes of the current project are made based on the EDL. Edit decision lists are text files which means they can be edited in a text editor. EDL and XML are used interchangeably. +When the \texttt{.xml} file is loaded, changes to the attributes of the current project are made based on the EDL\@. Edit decision lists are text files which means they can be edited in a text editor. EDL and XML are used interchangeably. \section{Supported File Formats}% \label{sec:supported_file_formats} @@ -41,7 +41,7 @@ What is an MPEG file? A very common file format is MPEG because it works with m \subsection{Working with Still Images}% \label{sub:working_with_still_images} -Still images are played from 1 to any number of times, over and over; they have no duration. You can load still images on video tracks just like you do for any video file. When loaded on the track, use the down arrow on the timeline so you can see the single frame. To extend the length of the image, drag its boundaries just as you would do with regular video media. You can drag the boundaries of a still image as much as you want. Images in \CGG{} have the ability to be dragged to an infinite length. Alternatively, you can define the initial length of the loaded images. The parameter is set in the Images section of the \texttt{Settings $\rightarrow$ ~Preferences $\rightarrow$ ~Recording} window. +Still images are played from 1 to any number of times, over and over; they have no duration. You can load still images on video tracks just like you do for any video file. When loaded on the track, use the down arrow on the timeline so you can see the single frame. To extend the length of the image, drag its boundaries just as you would do with regular video media. You can drag the boundaries of a still image as much as you want. Images in \CGG{} have the ability to be dragged to an infinite length. Alternatively, you can define the initial length of the loaded images. The parameter is set in the Images section of the \texttt{Settings $\rightarrow$ Preferences $\rightarrow$ Recording} window. Unless your original material comes from a digital source using its best resolution (like a digital camera), the first thing you might have to do before you can use it is to somehow capture the assets into a usable digital medium. For old photos, paper maps, drawings or diagrams, you can scan them into a file format like PNG, TIF, TGA or JPG files by using a digital scanner. @@ -83,7 +83,7 @@ jpeglist.sh //file.jpg //DSC*.jpg An example output file from running this script residing in the directory where \texttt{DSC*.jpg} files exist is shown below. -To use this, turn off ffmpeg probes first, and open \texttt{timelapse.jpg} using File ~$\rightarrow$ ~Load files. +To use this, turn off ffmpeg probes first, and open \texttt{timelapse.jpg} using File $\rightarrow$ Load files. \begin{lstlisting}[style=sh,caption={Example: timelapse.jpg},captionpos=t] JPEGLIST @@ -133,9 +133,9 @@ This will access the media using ffmpeg which is slower so be patient. \hspace{4em} {\small \url{https://www.cybercom.net/~dcoffin/dcraw/}} -For example, included is the Canon Powershot SX60 (newly available in August, 2014). Because ffmpeg tries to load \textit{any and every} file if \textit{Try Ffmpeg first} is enabled. it will make an attempt to load Raw Camera files first before any other file driver gets the chance. In addition, there is the possibility that dcraw could conflict with the standard TIFF format, since it might be seen as format type \textit{tiff-pipe}. Therefore it is necessary to specifically enable CR2 and either move it to the top or disable \textit{FFMPEG\_Early} and enable \textit{FFMPEG\_late} in the \textit{Probe Order} as described in another section (\ref{sub:probe_order_loading_media} and \ref{sec:ffmpeg_early_probe_explanation}). These changed settings will be retained across \CGG{} sessions in .\texttt{bcast5}. Raw Camera mode is most likely going to be used by expert camera users. +For example, included is the Canon Powershot SX60 (newly available in August, 2014). Because ffmpeg tries to load \textit{any and every} file if \textit{Try Ffmpeg first} is enabled. It will make an attempt to load Raw Camera files first before any other file driver gets the chance. In addition, there is the possibility that dcraw could conflict with the standard TIFF format, since it might be seen as format type \textit{tiff-pipe}. Therefore it is necessary to specifically enable CR2 and either move it to the top or disable \textit{FFMPEG\_Early} and enable \textit{FFMPEG\_late} in the \textit{Probe Order} as described in another section (\ref{sub:probe_order_loading_media} and \ref{sec:ffmpeg_early_probe_explanation}). These changed settings will be retained across \CGG{} sessions in \texttt{.bcast5}. Raw Camera mode is most likely going to be used by expert camera users. -The first screenshot in figure~\ref{fig:raw} as in \texttt{Settings $\rightarrow$ ~Preferences $\rightarrow$ ~Playback A} Tab, shows the default checked settings of \textit{Interpolate CR2 images} and \textit{White balance CR2 images} which display the raw images in a way that you expect. However, you may want to uncheck them to ensure that no program manipulation has modified your images so that you can add plugins or make your own modifications. Unchecked indicates that the images are as closest as possible to unadulterated raw. +The first screenshot in figure~\ref{fig:raw} as in \texttt{Settings $\rightarrow$ Preferences $\rightarrow$ Playback A} Tab, shows the default checked settings of \textit{Interpolate CR2 images} and \textit{White balance CR2 images} which display the raw images in a way that you expect. However, you may want to uncheck them to ensure that no program manipulation has modified your images so that you can add plugins or make your own modifications. Unchecked indicates that the images are as closest as possible to unadulterated raw. The second screenshot showing CR2 for Raw Camera highlighed/enabled in the Preferences Probes’ screen. @@ -183,7 +183,7 @@ All data that you work with in \CGG{} is acquired either by loading from disk or \CGG{} lets you change what happens when you load a file. In the Load dialog window go to the Insertion strategy box and select one of the options in the drop down menu. Each of these options loads the file a different way. \begin{description} - \item [Replace current project:] all tracks in the current project are deleted and a set of new tracks are created to match the source file. Project attributes are only changed when loading XML. If multiple files are selected for loading, \CGG{} adds a set of new tracks for each file. New resources are created in the Resources Window, replacing the current ones. + \item [Replace current project:] all tracks in the current project are deleted and a set of new tracks are created to match the source file. Project attributes are only changed when loading XML\@. If multiple files are selected for loading, \CGG{} adds a set of new tracks for each file. New resources are created in the Resources Window, replacing the current ones. \item [Replace current project and concatenate tracks: ] same as replace current project, except that if multiple files are selected, \CGG{} will concatenate the tracks of each file, inserting different source files in the same set of tracks, one after another, in alphanumeric order, starting at 0. New resources are created in the Resources Window, replacing the current ones. Files go across the timeline. \end{description} For ffmpeg and mpeg files, when the Insertion strategy methodology in the \texttt{File $\rightarrow$ Load files} pulldown is chosen to be either \textit{Replace current project} or \textit{Replace current project and concatenate tracks}, the basic session format parameters are reinitialized to match new media. This selects the default asset and determines its width, height, and video length, frame rate, calculates the colormodel, and assumes square pixels to make an intelligent guess about aspect ratio for video. For audio, the sample rate, audio length, and channel count (mono, stereo, or 5.1) are reinitialized. In addition the \textit{Track Size} will be computed and is reinitialized to match the new loaded media. When using \textit{replace} type insertion strategy, the new asset list is the only media in use so that this update saves the user from immediately needing to change the session format to match the only possibility. @@ -279,7 +279,7 @@ The order change will not take effect until you click on the checkmark in both t \subsection{Program Selection Support after Load}% \label{sub:program_selection_support_load} -Some kinds of media have \textit{program} streams, like captured mpeg broadcast stream data. For example, you may be able to \textit{tune} to channel 9, but be able to see 9-1, 9-2, and 9-3 on your TV. If you open a capture of this kind of media, all of the channels are present in the timeline. To select and view just one program, you can use Alt-1 to select program 1, or Alt-2 to select program 2, etc. up to Alt-8. This will remove all of the other unrelated tracks and reset the format. This feature can be used even if there is only one program, by pressing Alt-1, and the effect will be to reset the session format to the parameters from the media probe. Note that there may be several audio \textit{programs} associated to a video stream; +Some kinds of media have \textit{program} streams, like captured mpeg broadcast stream data. For example, you may be able to \textit{tune} to channel 9, but be able to see 9-1, 9-2, and 9-3 on your TV\@. If you open a capture of this kind of media, all of the channels are present in the timeline. To select and view just one program, you can use Alt-1 to select program 1, or Alt-2 to select program 2, etc.\ up to Alt-8. This will remove all of the other unrelated tracks and reset the format. This feature can be used even if there is only one program, by pressing Alt-1, and the effect will be to reset the session format to the parameters from the media probe. Note that there may be several audio \textit{programs} associated to a video stream; for example, there may be dialog in another language or some kind of descriptive dialog. Since the first associated audio is always selected, this may not produce the intended results. \begin{figure}[htpb] @@ -299,7 +299,7 @@ You can save your work as a project, which is what is loaded in \CGG{} now, or a \subsection{Saving Project Files}% \label{sub:saving_project_files} -Saving XML files is useful to save the current state of \CGG{} before quitting an editing session. \CGG{} saves projects as XML files. There are a few options you can use to save your work via the File pulldown menu: \textit{Save}, \textit{Save as\dots}, \textit{Export project}, \textit{Save backup}. You can either overwrite an existing file or enter a new filename. \CGG{} automatically concatenates .xml to the filename if no .xml extension is given. +Saving XML files is useful to save the current state of \CGG{} before quitting an editing session. \CGG{} saves projects as XML files. There are a few options you can use to save your work via the File pulldown menu: \textit{Save}, \textit{Save as\dots}, \textit{Export project}, \textit{Save backup}. You can either overwrite an existing file or enter a new filename. \CGG{} automatically concatenates \texttt{.xml} to the filename if no \texttt{.xml} extension is given. When \CGG{} saves a file, it saves the EDL of the current project but does not save any media, instead just pointers to the original media files. For each media file, the XML file stores either an absolute path or just the relative path. If the media is in the same directory as the XML file, a relative path is saved. If it is in a different directory, an absolute path is saved. @@ -314,10 +314,6 @@ Real-time effects in an XML file have to be re-created every time you play it ba \subsection{Export Project – Save or Moving Project to another Computer}% \label{sub:export_project} -A File pulldown called \textit{Export Project\dots} is also available (figure~\ref{fig:export}). Although, it can be used in the same manner as the other \textit{save} options, it is very useful when it is necessary to move a project to another computer that may have a different top level directory structure or if you want to include subdirectories to better organize your files. - -Originally, the easiest way to maintain a project for moving to another computer, was to put all of the files in a single directory with no subdirectories along with the EDL saved .xml file. This is commonly called a \textit{flat} file structure. So if the media was in the same directory as the XML file, a relative path was saved. If it was in a different directory, an absolute path was saved. - \begin{figure}[htpb] \centering \includegraphics[width=0.6\linewidth]{export.png} @@ -325,6 +321,10 @@ Originally, the easiest way to maintain a project for moving to another computer \label{fig:export} \end{figure} +A File pulldown called \textit{Export Project\dots} is also available (figure~\ref{fig:export}). Although, it can be used in the same manner as the other \textit{save} options, it is very useful when it is necessary to move a project to another computer that may have a different top level directory structure or if you want to include subdirectories to better organize your files. + +Originally, the easiest way to maintain a project for moving to another computer, was to put all of the files in a single directory with no subdirectories along with the EDL saved \texttt{.xml} file. This is commonly called a \textit{flat} file structure. So if the media was in the same directory as the XML file, a relative path was saved. If it was in a different directory, an absolute path was saved. + \noindent Definition of Fields: \begin{description} @@ -380,3 +380,8 @@ Some notes to keep in mind about Perpetual session are: \item to start \CGG{} without using your Perpetual session data even if enabled, use your\_cinelerra\_path\texttt{/cin/bin -S} \end{itemize} + +%%% Local Variables: +%%% mode: latex +%%% TeX-master: "../CinelerraGG_Manual" +%%% End: diff --git a/parts/Overlays.tex b/parts/Overlays.tex index 693c7fb..7b8a4ad 100644 --- a/parts/Overlays.tex +++ b/parts/Overlays.tex @@ -64,7 +64,7 @@ D = Destination S = Source a = alpha c = chroma (color) -|| = OR (logical operator); +|| = OR (logical operator); ? : = if (true/false) ... then (conditional ternary operator) \end{lstlisting} @@ -203,3 +203,8 @@ Typical operations from popular \textit{paint} packages. \item[Softlight:] Darkens or lightens the colors, dependent on the source color value. If the source color is lighter than 0.5, the destination is lightened. If the source color is darker than $0.5$, the destination is darkened, as if it were burned in. The degree of darkening or lightening is proportional to the difference between the source color and $0.5$. If it is equal to $0.5$, the destination is unchanged. Using pure black or white produces a distinctly darker or lighter area, but does not result in pure black or white. The effect is similar to shining a diffused spotlight on the destination. A layer with pure black or white becomes markedly darker or lighter, but does not become pure black or white. Soft light is not related to “Hard light” in anything but the name, but it does tend to make the edges softer and the colors not so bright. Math formula is the same as used by Gimp; SVG formula differs. \end{description} + +%%% Local Variables: +%%% mode: latex +%%% TeX-master: "../CinelerraGG_Manual" +%%% End: diff --git a/parts/Plugins.tex b/parts/Plugins.tex index bc70102..d4d476f 100644 --- a/parts/Plugins.tex +++ b/parts/Plugins.tex @@ -1,8 +1,8 @@ \chapter{Plugins}% \label{cha:plugins} -There are realtime effects -- these are the most useful and probably all you will ever need -- and rendered effects. -The rendered effects are discussed separately in the \nameref{sec:rendered_effects} section. +There are realtime effects -- these are the most useful and probably all you will ever need -- and rendered effects. +The rendered effects are discussed separately in the \nameref{sec:rendered_effects} section. Effect plugins modify the track when played, according to how they are set, with no permanent storage of the output except when the project is rendered. There are many Plugins in \CGG{} Infinity which are actually quite easy to use just by experimenting with them. The plugins are shown and selected from the \textit{Resources window} (figure~\ref{fig:video-plugins}). They are described in more detail later. \begin{figure}[htpb] @@ -26,16 +26,16 @@ Note that when you change the plugin icons, your session will automatically save \end{figure} \begin{figure}[htpb] - \centering + \centering \begin{tikzpicture}[scale=1, transform shape] \node (img1) [yshift=0cm, xshift=0cm, rotate=0] {\includegraphics[width=0.5\linewidth]{plugin-icons.png}}; \node [yshift=-8mm, xshift=-1cm,anchor=east] at (img1.north west) (Preferences) {Preferences Window}; \node [yshift=-17mm, xshift=-1cm,anchor=east] at (img1.north west) (Tab) {Tab section}; - \node [yshift=-41mm, xshift=-1cm,anchor=east] at (img1.north west) (Icon) {Plugin icon choices}; + \node [yshift=-41mm, xshift=-1cm,anchor=east] at (img1.north west) (Icon) {Plugin icon choices}; \draw [->, line width=1mm] (Preferences) edge ([yshift=-8mm] img1.north west); \draw [->, line width=1mm] (Tab) edge ([yshift=-17mm] img1.north west); - \draw [->, line width=1mm] (Icon) edge ([yshift=-41mm] img1.north west); - \end{tikzpicture} + \draw [->, line width=1mm] (Icon) edge ([yshift=-41mm] img1.north west); + \end{tikzpicture} \caption{Screencast showing the screen to change your plugin icons set} \label{fig:plugin-icons} \end{figure} @@ -57,7 +57,7 @@ Besides the magnifying glass, for Show Controls, on the effect colored bar benea \begin{wrapfigure}[2]{r}{0.3\linewidth} \vspace{-3ex} \centering - \includegraphics[width=0.7\linewidth]{button-options.png} + \includegraphics[width=0.7\linewidth]{button-options.png} \end{wrapfigure} The rightmost knob is used to Turn Off/Turn On the effect where the default is On. This is useful to easily see that the plugin is doing what you expect. The leftmost symbol that looks like a gear is for \textit{Preset Edit} and its usage is described in the section \nameref{sec:saved_plugin_preset}. @@ -110,7 +110,7 @@ If shared effects or shared tracks are available, they appear in the shared effe Shared effects and shared tracks allow very unique things to be done. In the case of a shared effect, the shared effect is treated like a copy of the original effect, except that in the shared effect the GUI can not be brought up. All configuration of the shared effect is determined by the GUI of the original effect and only the GUI of the original effect can be brought up. -When a shared effect is played back, it is processed just like a normal effect except the configuration is copied from the original effect. Some effects detect when they are being shared. These effects determine what tracks are sharing them and either mix the two tracks together or use one track to stage some value. +When a shared effect is played back, it is processed just like a normal effect except the configuration is copied from the original effect. Some effects detect when they are being shared. These effects determine what tracks are sharing them and either mix the two tracks together or use one track to stage some value. When an original track has a shared track as one of its effects, the shared track itself is used as a \textit{realtime} effect. This is more commonly known as \textit{bouncing tracks} but \CGG{} achieves the same operation by attaching shared tracks. The fade and any effects in the shared track are applied to the original track. Once the shared track has processed the data, the original track performs any effects which come below the shared track and then composites it on the output. @@ -124,7 +124,7 @@ In order to prevent the shared track from mixing the same data as the original t \begin{wrapfigure}[4]{r}{0.3\linewidth} \vspace{-2ex} \centering - \includegraphics[width=0.7\linewidth]{preset.png} + \includegraphics[width=0.7\linewidth]{preset.png} \end{wrapfigure} Note that using this is directly changing a keyframe object so you will only want to modify parameters you are familiar with. Most of the data is obvious and safe to change. @@ -132,16 +132,16 @@ A Presets button on the plugin bar to the left of the Controls and On/Off button %\todo{I can't to remedy}% \begin{figure}[htpb] - \centering + \centering \begin{tikzpicture}[scale=1, transform shape] \node (img1) [yshift=0cm, xshift=0cm, rotate=0] {\includegraphics[width=0.6\linewidth]{preset02.png}}; \node [yshift=-30mm, xshift=-1cm,anchor=east] at (img1.north west) (Green) {A user preset Green}; \node [yshift=-101mm, xshift=-1cm,anchor=south east,text width=10em, inner ysep=-3mm] at (img1.north west) (Textbox) {Textbox to type in the title for the chosen preset or name for a new preset.}; - \node [yshift=-110mm, xshift=-1cm,anchor=north east,text width=10em,inner ysep=-3mm] at (img1.north west) (Save) {Use the Delete, Save or Apply button for operation.}; + \node [yshift=-110mm, xshift=-1cm,anchor=north east,text width=10em,inner ysep=-3mm] at (img1.north west) (Save) {Use the Delete, Save or Apply button for operation.}; \draw [->, line width=1mm] (Green) edge ([yshift=-30mm] img1.north west); \draw [->, line width=1mm] (Textbox.south east) -- ([yshift=-101mm] img1.north west); - \draw [->, line width=1mm] (Save.north east) -- ([yshift=-110mm] img1.north west); - \end{tikzpicture} + \draw [->, line width=1mm] (Save.north east) -- ([yshift=-110mm] img1.north west); + \end{tikzpicture} \caption{Screencast shows 4 Factory presets as preceded by an *.} \label{fig:preset02} \end{figure} @@ -178,9 +178,19 @@ Maybe you just don't ever use certain plugins or would prefer to only find the o \subsection{Updatable Icon Image Support}% \label{sub:updatable_icon_image_support} -When running \CGG{} Infinity builtin icons are loaded before the program starts. Png files in the path: \\ \texttt{picon/picon\_set\_name} \\ -are searched before the images loaded into memory. Override \texttt{icon.png} files must be put into the path: \\ \texttt{/picon/picon\_set\_name} \\ -There are currently 4 sets of icons and their directory names are \textit{cinfinity} (the default) and \textit{cinfinity2}, \textit{original} (the long-time original set), and \textit{smoother} (generally was in use by some of the themes). An example, to replace the cinfinity icon of Blue Banana with a red apple instead, create your .png file as desired, and replace the file in: \\ +When running \CGG{} Infinity builtin icons are loaded before the +program starts. Png files in the path:\\ +\texttt{picon/picon\_set\_name}\\ +are searched before the images loaded into memory. Override +\texttt{icon.png} files must be put into the path:\\ +\texttt{/picon/picon\_set\_name}\\ +There are currently 4 sets of icons and their directory names are +\textit{cinfinity} (the default) and \textit{cinfinity2}, +\textit{original} (the long-time original set), and +\textit{smoother} (generally was in use by some of the themes). An +example, to replace the cinfinity icon of Blue Banana with a red +apple instead, create your \texttt{.png} file as desired, and replace the +file in:\\ \texttt{/bin/plugins/picon/cinfinity/bluebanana.png}. For most User installs, the \texttt{.png} file will be located at: @@ -211,14 +221,14 @@ The \CGG{} program looks for a plugin icon in two places: \item If there is no corresponding \texttt{.png} file for a plugin, the program uses a built-in default: \begin{itemize} \item ordinary video plugins use 3 vertical color bars as a default; - \item ffmpeg plugins use the words \textit{FF} on yellow colored background as a default icon; - \item audio and ladspa plugins use a green-colored audio wave for a default. + \item ffmpeg plugins use the words \textit{FF} on yellow colored background as a default icon; + \item audio and ladspa plugins use a green-colored audio wave for a default. \end{itemize} \end{enumerate} -\begin{figure}[htpb] +\begin{figure}[htpb] \centering - \includegraphics[width=0.05\linewidth]{audio-default.png} -\end{figure} + \includegraphics[width=0.05\linewidth]{audio-default.png} +\end{figure} Keep in mind these points for newly created plugin icons: @@ -226,15 +236,15 @@ Keep in mind these points for newly created plugin icons: \item All included icon images become part of open source, in the public domain, and not proprietary. \item The preferred format is $52 \times 52$, $8\,bit$ /color RGB or RGBA, non-interlaced. \item Since plugin icons are used by different themes, it is recommended that a \textit{transparent background} be used. Otherwise some color background that looks good for one theme may not for another. - \item In order to test a new icon, you have to have write permission in the: \\ - \texttt{/plugins} directory so you may have to become the root user to copy the .png file to the correct location. + \item In order to test a new icon, you have to have write permission in the: \\ + \texttt{/plugins} directory so you may have to become the root user to copy the \texttt{.png} file to the correct location. \item If there is currently no theme-specific \texttt{.png} files present, it may be necessary to first create the theme directory in \texttt{plugins} as \texttt{} in order to put the \texttt{.png} files in that subdirectory. \item Make sure that the \textit{ownership} and file \textit{permissions} match the existing directory and files. \item All ffmpeg icons must begin with \texttt{ff\_.png} (Resources window title will still be F\_\dots) - \item For ladspa, check in the \texttt{} directory (\texttt{\$HOME/.bcast5} normally) and look for the text file \texttt{\$HOME/.bcast5/ladspa\_plugins\dots} for the names of the ladspa libraries which correspond to plugin names where the needed name is the basename of the \texttt{.so} file. + \item For ladspa, check in the \texttt{} directory (\texttt{\$HOME/.bcast5} normally) and look for the text file \texttt{\$HOME/.bcast5/ladspa\_plugins\dots} for the names of the ladspa libraries which correspond to plugin names where the needed name is the basename of the \texttt{.so} file. For example \texttt{pha\-sers\_1217.so} would need to have a \texttt{phasers\_1217.png} file. There may be multiple plugins in a single “so” file which means that you can only have 1 icon to represent all of the plugins in that file; again as in phasers. \item Once you have placed the .png file in the correct spot, you will have to restart \CGG{} to test it. - \item To submit your .png file for inclusion into \CGG{} Infinity for all to enjoy, it is best to upload it to any datafilehost and notify the community via email with any informative documentation. + \item To submit your \texttt{.png} file for inclusion into \CGG{} Infinity for all to enjoy, it is best to upload it to any datafilehost and notify the community via email with any informative documentation. \end{itemize} \subsection{Example of new Plugin Icon Testing}% @@ -255,7 +265,7 @@ cd /plugins # go to the correct directory mkdir -p picon/yournamehere # create subdirectory if does not exist ls -l picon/* # list the picon directories # check for existence (and permissions) -cp yourpicon.png ff\_aeval.png # Copy your example .png file +cp yourpicon.png ff_aeval.png # Copy your example .png file \end{lstlisting} Restart cin by changing \texttt{Settings$\rightarrow$ Preferences$\rightarrow$ Appearance} and in \textit{Plugins icons} choose a directory. @@ -289,8 +299,8 @@ Highlight the set you want to turn on and a check mark appears to show it is act \subsection{Expanders for Plugin Subtrees in the Resources Window}% \label{sub:expanders_plugin_subtrees} -To accentuate a set of common plugins, there are \textit{expander} arrows on the left side of the Resources window. You will see these expanders only when in \textit{Display text} mode, not \textit{icon} mode. -\CGG{}’s default setup is in the file \texttt{\$CIN\_DAT/expan\-ders.txt} but if the user wants their own specific setup and if the file in \texttt{\$HOME/.\\bcast5/expanders.txt} exists, it will take precedence. +To accentuate a set of common plugins, there are \textit{expander} arrows on the left side of the Resources window. You will see these expanders only when in \textit{Display text} mode, not \textit{icon} mode. +\CGG{}’s default setup is in the file \texttt{\$CIN\_DAT/expan\-ders.txt} but if the user wants their own specific setup and if the file in \texttt{\$HOME/.\\bcast5/expanders.txt} exists, it will take precedence. If there are recommendations for other relevant categories, they can be added. The subtree structure is applicable to any of the \textit{Video Effects/Transitions} or \textit{Audio Effects/Transitions}. You can not sort once an expansion is in effect (figure~\ref{fig:expander}). The \texttt{expanders.txt} file has very specific requirements. The most specific is that there are no blanks -- you must use tabs only. A \# (pound sign) can be used in column 1 to indicate a comment. Here is a short example: @@ -319,7 +329,7 @@ Audio Effects \subsection{Speed-up of Ffmpeg plugin usage with OPTS files}% \label{sub:speedup_ffmpeg_plugin_opts} -You can speed up some ffmpeg plugins that are quite time-consuming and use a lot of CPU. For a specific color-based example, \CGG{} uses 6 primary rendering color models. All of them have 3 components at full scale. Direct usage of a particular ffmpeg plugin from the ffmpeg command line might handle the planar at less than full scale chroma (yuv420), which means there is less data to manipulate. But when cinelerra loads a video it uses full scale color models. In other words: +You can speed up some ffmpeg plugins that are quite time-consuming and use a lot of CPU\@. For a specific color-based example, \CGG{} uses 6 primary rendering color models. All of them have 3 components at full scale. Direct usage of a particular ffmpeg plugin from the ffmpeg command line might handle the planar at less than full scale chroma (yuv420), which means there is less data to manipulate. But when cinelerra loads a video it uses full scale color models. In other words: \begin{itemize}[noitemsep] \item \CGG{} uses \textit{yuv444} @@ -371,8 +381,8 @@ It is an effect that modulates the signal, varies the pitch up and down (instead \begin{description} \item[Voices per channel]: number of items we want to put in the effect. Using more than 4 voices creates sound artifacts that lose the feel of a human voice choir, but can still be used as an artificial sound effect. - \item[Phase offset] (ms): is the constant delay, i.e. the amount of delay of the voices compared to the original signal. - \item[Depth] (ms): is the oscillating delay, i.e. the delay in the oscillation of the various voices from the original signal. + \item[Phase offset] (ms): is the constant delay, i.e.\ the amount of delay of the voices compared to the original signal. + \item[Depth] (ms): is the oscillating delay, i.e.\ the delay in the oscillation of the various voices from the original signal. \item[Rate] (Hz): is the speed at which we apply the oscillating delay. In other words, the speed at which the oscillations occur. \item[Wetnwss] (db): Indicates how much of the original (dry) signal is taken into account compared to delayed voices. \end{description} @@ -503,7 +513,7 @@ Echo is reflection of sound. This plugin could be used to add echoing to video o \begin{description} \item[Level] represents the volume adjustment. - \item[Atten] is attenuation which is a general term that refers to any reduction in the echo reflection. Sometimes called \textit{loss}, attenuation is a natural consequence of signal transmission over long distances. + \item[Atten] is attenuation which is a general term that refers to any reduction in the echo reflection. Sometimes called \textit{loss}, attenuation is a natural consequence of signal transmission over long distances. \item[Offset] is the lag in the attenuated echo signal. Offset means adding a DC level to a signal. It offsets the signal up or down in a DC sense without changing the size of the AC part of the signal. When you add an audio clip to the Timeline, the clip plays back from the beginning of the source audio file. The point in the audio file where the clip starts playing is called the offset. By default, a clip’s offset is zero, the beginning of the source audio file. You can change the offset so that the clip starts playing from a later point in the source audio file. \end{description} @@ -551,7 +561,7 @@ The delay introduced consists of two distinct components: the \textit{constant d \begin{description} \item[Phase Offset]: it is the constant delay. Once set, its value does not change (unless we change it, for example by use of keyframes) for the duration of the effect. \item[Starting phase] \%: is the point of oscillation where we start the oscillating delay; basically it is the attack value at which the effect starts the calculations. Not to be confused with the point on the timeline where we apply the effect. It only matches this for the 0\% value. The position on the timeline where we want to start the flanger at a given starting phase value can be chosen using keyframes. - \item[Depth]: It is the oscillating delay. This value determines the amplitude variation of the delayed (wet) signal phase. this oscillation will be maintained for the entire duration of the effect unless we change it. + \item[Depth]: It is the oscillating delay. This value determines the amplitude variation of the delayed (wet) signal phase. This oscillation will be maintained for the entire duration of the effect unless we change it. \item[Rate]: is the speed at which we apply the oscillating delay. Low values indicate a lower oscillation frequency, a high value a rapid succession of oscillations. \item[Wetness]: indicates how much of the original (dry) signal is taken into account compared to the delayed (wet) signal. \end{description} @@ -579,7 +589,7 @@ Reverses the numerical sign of the digital audio. There are no controls. \subsection{Live Audio}% \label{sub:live_audio} -The Live Audio effect reads audio directly from the sound card input. It replaces any audio on the track so it is normally applied to an empty track. To use Live Audio, highlight a horizontal region of an audio track or define In and Out points. Then drop the Live Audio effect into it. Create extra tracks and attach shared copies of the first Live Audio effect to the other tracks to have extra channels recorded. Live Audio uses the sound driver selected in \texttt{Settings $\rightarrow$ Preferences $\rightarrow$ Playback A $\rightarrow$ Audio Out for recording}, but unlike recording it uses the playback buffer size as the recording buffer size and it uses the project sample rate as the sampling rate. These settings are critical since some sound drivers can not record in the same sized buffer they play back in. +The Live Audio effect reads audio directly from the sound card input. It replaces any audio on the track so it is normally applied to an empty track. To use Live Audio, highlight a horizontal region of an audio track or define In and Out points. Then drop the Live Audio effect into it. Create extra tracks and attach shared copies of the first Live Audio effect to the other tracks to have extra channels recorded. Live Audio uses the sound driver selected in \texttt{Settings $\rightarrow$ Preferences $\rightarrow$ Playback A $\rightarrow$ Audio Out for recording}, but unlike recording it uses the playback buffer size as the recording buffer size and it uses the project sample rate as the sampling rate. These settings are critical since some sound drivers can not record in the same sized buffer they play back in. Live audio has been most reliable when ALSA is the recording driver and the playback fragment size is $2048$. Drop other effects after Live Audio to process sound card input in realtime. With live audio there is no read-ahead, so effects like compressor will either delay if they have read-ahead enabled or playback will under-run. A potential problem is that sometimes the recording clock on the sound card is slightly slower than the playback clock. The recording eventually falls behind and playback sounds choppy. Live Audio does not work in reverse. @@ -612,7 +622,7 @@ Allows you to convert an audio file from one sample rate to another. This effect \begin{tabular}{l l} \toprule Input / output > 1 & fast rate \\ - Input / output < 1 & slow rate \\ + Input / output < 1 & slow rate \\ \bottomrule \end{tabular} \end{center} @@ -620,7 +630,7 @@ Allows you to convert an audio file from one sample rate to another. This effect \subsection{Reverb}% \label{sub:reverb} -Reverb uses reflections of sound to add depth and fullness; the sound will seem to come from a space that can go from a small bare room to large natural valleys, cathedrals, etc. The reverb is made up of a group of echoes that occur at the same time making it feel like a single effect. +Reverb uses reflections of sound to add depth and fullness; the sound will seem to come from a space that can go from a small bare room to large natural valleys, cathedrals, etc. The reverb is made up of a group of echoes that occur at the same time making it feel like a single effect. Basically simulates creation of a large number of reflections, like lots of walls, which build up and then decay. You can use the reverb plugin to mix tracks together to simulate ambiance because it is a multitrack effect. The configuration window (figure~\ref{fig:reverb}) shows a graph of the full band pass filter frequencies. @@ -714,7 +724,7 @@ LV2 is an open standard for audio plugins using a simple interface with extensio Typically, a user OS has specialized package groups installed. It is difficult to create one build of \CGG{} to accommodate all potential LV2 plugins. Specifically for the \textit{Calf-Studio LV2 plugins}, you should install the \textit{Calf Plugins} package. The user’s computer must have \textit{gtk-2-runtime} installed, which seems to be automatically done already for most distros. For users doing their own builds, you can build \CGG{} without LV2 support by including \texttt{-{}-without-lv2} in the configure step. The default build is \texttt{-{}-with-lv2=yes} and requires that \textit{GTK-2-devel} must be installed or the build will fail and notify you. -LV2 plugins have their own category in the \textit{Audio Plugins Visibility} as lv2. There is a simple text interface which is available via the usual \textit{Show controls} button when the plugin is attached to the audio track. This window has a Reset button to get back to the default settings. To change a value of one of the parameters, highlight that parameter and type in the new value in the topmost text box and then hit Apply to take effect -- the reason for requiring hitting apply is so that the audio is not moving all over the place while you are still typing a value. More easily, you can just move the \textit{pot dial} or the \textit{slider} bar which take effect automatically. +LV2 plugins have their own category in the \textit{Audio Plugins Visibility} as lv2. There is a simple text interface which is available via the usual \textit{Show controls} button when the plugin is attached to the audio track. This window has a Reset button to get back to the default settings. To change a value of one of the parameters, highlight that parameter and type in the new value in the topmost text box and then hit Apply to take effect -- the reason for requiring hitting apply is so that the audio is not moving all over the place while you are still typing a value. More easily, you can just move the \textit{pot dial} or the \textit{slider} bar which take effect automatically. \CGG{}’s buffer size setting may cause a delay in activation of the changes you make taking effect, so you can lessen the time by using a small buffer. Notice that $1024$ samples at $48000$ samples per sec is only $\frac{1}{50}^{th}$ a second. This is not a lot of time to shuffle a bunch of stuff. Short buffers produce low latency, but no time for complex programs or lots of stacked effects. Bigger buffers allow for more complex setups. @@ -731,12 +741,12 @@ export LV2_PATH=/tmp/j/balance.lv2/usr/local/lib/lv2/:/usr/local/lv2 If there is no default \texttt{LV2\_PATH} set automatically, the value will be \texttt{\$CIN\_DAT/\\lv2}, which is a placeholder only so that no lv2 plugins will be loaded. When there is no system \texttt{LV2\_PATH} set it is important to note, that if you do want lv2 plugins loaded, you must set the correct path in: -\texttt{Settings $\rightarrow$ Preferences $\rightarrow$ Interface tab $\rightarrow$ Default LV2 $\rightarrow$ direc\-tory +\texttt{Settings $\rightarrow$ Preferences $\rightarrow$ Interface tab $\rightarrow$ Default LV2 $\rightarrow$ direc\-tory path name} When you change this field, cin will automatically restart and load the newly specified lv2 plugins. If when switching \texttt{LV2\_PATH} or if the lv2 audio plugins are not displayed/usable in the Resources window, you can execute a reload via: -\texttt{Settings $\rightarrow$ Preferences $\rightarrow$ Interface tab $\rightarrow$ Reload plugin in\-dex} +\texttt{Settings $\rightarrow$ Preferences $\rightarrow$ Interface tab $\rightarrow$ Reload plugin in\-dex} or else before you bring up \CGG{}, delete \texttt{\$HOME/.bcast5/\CGG{}\_\\plugins} so that the plugins get properly reloaded. There are some lv2 plugins that display a \textit{glitzy} UI (User Interface); for example the \textit{Calf plugins}. For these LV2 plugins, if you want that to automatically come up without having to click on the UI button on the simplified UI interface, there is a flag to enable that. It is at: @@ -782,7 +792,7 @@ For example: \label{fig:calf02} \end{figure} -\section[Video Effects --- Native]{Video Effects -- Native}}% +\section[Video Effects --- Native]{Video Effects -- Native}% \label{sec:video_effects_native} \settocdepth{subsection} @@ -900,7 +910,7 @@ There are two panes separated by long horizontal lines (through the middle of th This section is used to select the target color domain. First, a short explanation about alpha. The alpha channel used in BlueBanana is not transparency (\textit{matte}); it is used as the \textit{Selection mask}. Alpha plane is the alpha channel of the current image. So that: -RGBA = red/green/blue color planes, alpha data plane. +RGBA = red/green/blue color planes, alpha data plane. YUVA = luma/Cb/Cr color values, alpha data plane. The alpha data normally is input to the blending operations in the patchbay overlay mode. The alpha data usually creates the appearance of stacking order, and determines which color planes are visible in the rendered result. When BlueBanana is used, the meaning of the alpha data is changed to the selection. It is useful to think of the alpha data as more solid when it is transparency in blending, and more selected when it is used in BlueBanana. In both cases, the greater the alpha value, the more the effect is expressed. @@ -911,19 +921,19 @@ Let us now examine the instruments in \textbf{pane 1}: \begin{description} \item[Combine Selection] The selection is the intersection or union of two pixel masks. Mathematically, $A$ and $B$ are normalized, (scaled to between $0$ and $1$) and used as selection mask weights. - + $Intersection (\cap) = A\times B$ - + $Union (\cup)= A+B-A\times B$ - + where $A$ is the input alpha plane as a mask, $1$=selected, $0.4$=partially selected, and $0$=not selected; $B$ is the color selection of trims and feathers made by varying the sliders. - + The result is a new alpha plane, which will be output (if \textit{End Mask} is not set). The $0\dots1$ selection values are used to weight the color transformation filters if/when they are active and operate a change. The color adjustment filters available in Pane \#$2$ can change red, green, blue, and remap hue, saturation, value in the pane. There is also \textit{fade} which applies to the color channels and \textit{alpha} which applies to the resulting alpha plane. - + The basic plan is to either: - + - reduce a selection area by intersection (Combine selection off) $A \times B$ - + - increase a selection area by union (Combine selection on) $A+B-A\times B$ \item[Mask Selection] applies the current mask to the selection, such that the mask clips/expands the selection. When mask selection is enabled, the result of the and/or will be stored to the alpha result, but when mask selection is unchecked the mask is ignored and the selection is not modified. The selection is used to weight the effect of the filtering, or to control the output alpha. \item[End mask] only visible when \textit{Mask Selection} is checked. End Mask causes the entire alpha plane to @@ -934,13 +944,13 @@ Let us now examine the instruments in \textbf{pane 1}: \item[End Mask as used in Chroma-key Filtering:] In cases where the selection is for a chroma-key, you are interested in the alpha channel for blending, like \textit{Normal} or \textit{SrcOver}. So for this usage of the BlueBanana, don't check the End Mask. \end{description} \item[Invert Selection] reverse target color domain, which is 1 minus selection. - \item[Mark Selected Areas] when this box is checked, the chosen colors are presented in an animated + \item[Mark Selected Areas] when this box is checked, the chosen colors are presented in an animated diagonally striped pattern. \item[Hue] select a hue domain; click on the Pick button to select or check the box to the left of hue or uncheck to ignore. - \item[Saturation] select a saturation domain; click on the Pick button to select or check the box to the left. + \item[Saturation] select a saturation domain; click on the Pick button to select or check the box to the left. \item[Value] select a value domain; click on the Pick button to select or check the box to the left. \item[Fill] will fill more area or less area of your selected region. This describes how it works. Fill control is an automated way of doing grow and shrink on the selected area, to fill in small holes, or get rid of scattered speckles. If none of the Hue, Saturation, or Value sliders are active -- meaning that the whole frame is selected -- the Fill slider will have no effect even when enabled. The word fill will appear ghosted to indicate this. - + The three lower handles in the fill slider correspond to \textit{Shrink} (the left hand slider), \textit{Final} (the middle slider), and \textit{Grow} (the right hand slider). These are used in combination to alter the selection by first growing it by the amount specified by the right hand Grow slider, shrinking it to the amount specified by the left hand Shrink slider, and then growing it again to the final size specified by the middle Final slider. The top slider then feathers the resulting selection. Growing the selection and then shrinking it has the effect of filling small holes in the selected area. Similarly, shrinking and then growing tends to remove small flecks of unwanted selection. The Final slider specifies the overall desired shrinkage or growth of the selection when finished. To specify a pure Grow or Shrink operation, set the Final slider and the Grow/Shrink slider to the same value and leave the other slider at zero. \item[Pre-erode] this control reverses the order of operation to Shrink, then Grow, then Final. The change is subtle on most images, but overall removes more small features because it first removes flecks before filling in holes. @@ -954,7 +964,7 @@ This section is used to modify the color of your selection. Descriptive commenta \begin{description} \item[Filter Active] checkbox to indicate that the modifications will be shown. \item[Color Adjustment ] For Color Adjustment, RGB can be used as color weights while the HSV can transform color. - For the following items there are three sections on the slider. The \textit{center} section represents the nominal $0\%-100\%$ range; the \textit{left} section represents negative values, and the \textit{right} section represents values greater than $100\%$. Values can be out-of-range within BlueBanana without clipping, but they will clip once they leave the plugin. + For the following items there are three sections on the slider. The \textit{center} section represents the nominal $0\%-100\%$ range; the \textit{left} section represents negative values, and the \textit{right} section represents values greater than $100\%$. Values can be out-of-range within BlueBanana without clipping, but they will clip once they leave the plugin. \item[RGB] affect the color channels individually. \begin{description} \item[Red] modification color; click the Reset button to revert to default. Values are reflected in numerical textboxes on the right-hand side. @@ -1066,7 +1076,7 @@ It is important to note as you play or change the frame, the plugin re-computes \item[Activate processing] when checked, the c41 operation is used to render the image. \item[Compute negfix values] computes the current negative values of the image (inside the box). \item[Show active area] draws horizontal and vertical grid lines displaying the boxed area. - \item[Postprocess] when checked, applies contrast/brightness values as defined in $coef\frac{1}{2}$. + \item[Postprocess] when checked, applies contrast/brightness values as defined in $coef\frac{1}{2}$. \end{description} \paragraph{Values:} \begin{description} @@ -1153,7 +1163,12 @@ Start with \textit{Hue Tolerance} at $10\%$, \textit{Min Bright\-ness} at $0$, \ \item[Saturation:] Increase \textit{Min Saturation} so that only the background is masked out, and not parts of the foreground. \textit{Saturation Offset} can be used to change this, but for now leave it set to $0$. \end{description} -Check what it looks like at this stage, your mask should be pretty clean. Toggle \textit{Show Mask} to check what it looks like, it should be OK. If not, repeat steps $1 to 4$ to get a better key. The rest of the controls are useful to smear the mask to help compositing later on. They will help you to make your key look much cleaner. +Check what it looks like at this stage, your mask should be pretty +clean. Toggle \textit{Show Mask} to check what it looks like, it +should be OK\@. If not, repeat steps $1 to 4$ to get a better +key. The rest of the controls are useful to smear the mask to help +compositing later on. They will help you to make your key look much +cleaner. \begin{description} \item[Slope:] For now, the mask is a full on/ full off mask that can be really harsh and not necessarily what you are looking for. \textit{In Slope} and \textit{Out Slope} will help you to smooth that key. In Slope leaves more colors in the mask, Out Slope takes more colors out of the mask. The colors that are borderline in the mask will see their alpha channel reduced by half instead of being completely on or off. @@ -1184,7 +1199,7 @@ Together with \textit{Histogram Bezier / Curves} Color 3 Way is the main tool of \item In addition to the three reset buttons, each slider and each wheel has its own Clear button, to return it to the default value without affecting the others. \end{itemize} -This plugin allows maximum control over the result and maximum precision of adjustments when used simultaneously with the control monitors, i.e. \textit{Waveform}, \textit{RGB Parade} and \textit{Vectorscope}. It is important to keep in mind that the three zones are not clearly separated, but slightly overlapping. This results in less precision but looks better for more smooth shades. By varying the values on the color wheels all RGB channels are affected simultaneously, which can result in unwanted color dominance. Saturation is also affected and must therefore be monitored. +This plugin allows maximum control over the result and maximum precision of adjustments when used simultaneously with the control monitors, i.e.\ \textit{Waveform}, \textit{RGB Parade} and \textit{Vectorscope}. It is important to keep in mind that the three zones are not clearly separated, but slightly overlapping. This results in less precision but looks better for more smooth shades. By varying the values on the color wheels all RGB channels are affected simultaneously, which can result in unwanted color dominance. Saturation is also affected and must therefore be monitored. To use more precisely, drag the \textit{crosshair} with the mouse in the desired area and then adjust in steps of $0.001$ using the up/down and right/left arrows on the keyboard. The most common use cases (but can be adapted to virtually any situation) of the plugin are: @@ -1192,7 +1207,7 @@ The most common use cases (but can be adapted to virtually any situation) of the \item White balancing. \item Expand/compress contrast. \item Mitigate under and over exposure. - \item Balance colors, i.e. eliminate color dominance. + \item Balance colors, i.e.\ eliminate color dominance. \item Color matching Shot to Shot. \item Create a Stylized look. \end{itemize} @@ -1215,8 +1230,8 @@ Since \textit{complementary colors} are neutralized, to eliminate a \textit{colo \label{sub:color_space} This plugin is a tool that can be used to convert your input media, such as a recording from your camera, -from one color space/range to another. It works for both RGB and YUV as set by your project format. -Options are BT601, BT709, or BT2020 for Color Space input and output and JPEG or MPEG for Color Range +from one color space/range to another. It works for both RGB and YUV as set by your project format. +Options are BT601, BT709, or BT2020 for Color Space input and output and JPEG or MPEG for Color Range input and output. The Inverse option checkbox is available in case your media was rendered in the wrong color space or range so that you can fix it. @@ -1259,7 +1274,7 @@ To start, if not already checked, turn on drag. In the composer window select an \item[Threshold:] slider goes from $0\, to\, 1$. Increasing the threshold, increases the area to be filled or masked. You can also use the mouse wheel to scroll the slider. \item[Drag:] for ease of use. \item[Reset:] button to revert to only the default middle point with all others being deleted. - \item[ListBox:] "E" for Enabled with $*$ marking that; "X" is the point’s $x$ coordinate; "Y" is the point’s $y$ coordinate; "T" is the threshold value of $X,Y$ point; \textit{Tag} represents the \# of the selected + \item[ListBox:] "E" for Enabled with $*$ marking that; "X" is the point’s $x$ coordinate; "Y" is the point’s $y$ coordinate; "T" is the threshold value of $X,Y$ point; \textit{Tag} represents the \# of the selected point. \item[Hints:] for usage shortcuts. \end{description} @@ -1318,7 +1333,13 @@ It allows you to obtain a rectangle from the frame, whose dimensions are fully a \subsection{DeScratch}% \label{sub:descratch} -The descratch video plugin can be used to remove vertical scratches from film. It can also be used, after image rotation, to remove horizontal noise lines that may appear on analog \textit{VHS} captures. For best results \textit{YUV} should be the video format; however if your format is \textit{RGB}, it will first be converted to YUV. There are many tuneable parameters necessary to get good results for your specific film. +The descratch video plugin can be used to remove vertical scratches +from film. It can also be used, after image rotation, to remove +horizontal noise lines that may appear on analog \textit{VHS} +captures. For best results \textit{YUV} should be the video format; +however if your format is \textit{RGB}, it will first be converted +to YUV\@. There are many tuneable parameters necessary to get good +results for your specific film. Figure~\ref{fig:descratch01} shows a list of the parameter descriptions: @@ -1341,7 +1362,7 @@ Figure~\ref{fig:descratch01} shows a list of the parameter descriptions: \item[asymmetry] maximum asymmetry of surrounding pixels. \item[Mode] \textit{None}; \textit{Low}=black; \textit{High}=white; \textit{All}=both; - \textit{y} -- processing mode for \textit{luma} plane; + \textit{y} -- processing mode for \textit{luma} plane; \textit{u}-- processing mode for \textit{chroma u} plane; \textit{v} -- processing mode for \textit{chroma v} plane. \item[width min/max] minimal scratch width in pixels and maximum scratch width in pixels. @@ -1462,11 +1483,11 @@ Whatever the visual content of the frame, the Foreground plugin application appl \subsubsection*{Theory behind the Frames to Fields and Fields to Frames plugins} \label{ssub:theory_frames_fields} -Historically, CRT-type TVs used interlaced signals to save bandwidth. An interlaced video consists of two \textit{fields} that are read and drawn on the screen one after the other. Each field must be played at a framerate double that of the resulting video. In two steps the complete frame will be reconstructed. +Historically, CRT-type TVs used interlaced signals to save bandwidth. An interlaced video consists of two \textit{fields} that are read and drawn on the screen one after the other. Each field must be played at a framerate double that of the resulting video. In two steps the complete frame will be reconstructed. -frame 1 $\implies$ F1-field1 (\textit{Top} or \textit{Odd}), F1-field2 (\textit{Bottom} or \textit{Even}) +Frame 1 $\implies$ F1-field1 (\textit{Top} or \textit{Odd}), F1-field2 (\textit{Bottom} or \textit{Even}) -frame 2 $\implies$ F2-field1, F2-field2 +Frame 2 $\implies$ F2-field1, F2-field2 Interlaced video reading: $F1-f1$ then $F1-f2$ then $F2-f1$ then $F2-f2$ \dots There may be visual problems if the Top type interlacing is read according to a Bottom scheme. So it's important to know if a video is Top or Bottom. Generally an \textit{HD} video is Top; a \textit{DV} video (both PAL and NTSC) is Bottom; \textit{SD} (PAL) is Top; \textit{SD} (NTSC) is Bottom (but not always). Instead, high-definition videos need to be more compressed and this contrasts with the interlacing that is little and badly compressible, so modern videos are mostly \textit{progressive}. @@ -1477,7 +1498,7 @@ There may be visual problems if the Top type interlacing is read according to a \begin{enumerate} \item upload an interlaced video to the Timeline and Resources and play it for viewing. \item The video presents visual artifacts because PC monitors are progressive. - \item In the Resources window, open the media \textit{Info} with the right mouse button. Below you can see that the \textit{asset's interlacing} is active. It has four options for settings interlacing type: \textit{Unknown}, \textit{Top Fields first}, \textit{Bottom Fields first}, and \textit{Not interlaced}. If the file is (H)DV type, recognition and configuration is done automatically. All other media types will be set \textit{unknown}. We need to set the type of interlacing, so we have to manually set the interlacing. + \item In the Resources window, open the media \textit{Info} with the right mouse button. Below you can see that the \textit{asset's interlacing} is active. It has four options for settings interlacing type: \textit{Unknown}, \textit{Top Fields first}, \textit{Bottom Fields first}, and \textit{Not interlaced}. If the file is (H)DV type, recognition and configuration is done automatically. All other media types will be set \textit{unknown}. We need to set the type of interlacing, so we have to manually set the interlacing. \item Now we can to use the \textit{Frames to Fields} plugin, but we have to configure it and act manually. \end{enumerate} @@ -1489,7 +1510,7 @@ This plugin is only useful if its output is pulled with doubled framerate with r \label{ssub:processing_interlace_footage} \begin{enumerate} - \item Create a new project with doubled frame rate. That is, make it $50\,fps$ if your source footage is $25i$. + \item Create a new project with doubled frame rate. That is, make it $50\,fps$ if your source footage is $25i$. In \texttt{Resources $\rightarrow$ Media $\rightarrow$ Info} set the interlaced type (or unknown) \item Insert your source footage onto a video track in the timeline. Now, \CGG{} will playback each frame of your footage twice. There will be visual artifacts because the video is interlaced and the @@ -1522,7 +1543,24 @@ In its simplest form, highlight a region of the track to freeze, drop the \textt \textit{Log} camera images store colors in a $logarithmic$ scale. The blacks in these images are nearly $0$ and the whites are supposed to be infinity. The graphics card and most video codecs store colors in a $linear$ scale but \CGG{} keeps log camera images in their original logarithmic scale when it renders them. This is necessary because the raw image parser can not always decode the proper gamma ($\gamma$) values for the images. It also does its processing in $16\,bit$ integers, which takes away a lot of information. -Mathematically, the gamma function is exponential ($output = input^{\gamma}$) and therefore the inverse of the logarithmic function [$output = \log(input)$]. Actually the formula used by the \CGG{} plugin is: $output = input^{\frac{1}{\gamma}}$ which allows for a range of values $0 \div 1.0$. The gamma effect converts the logarithmic colors to linear colors through a \textit{gamma value} and a \textit{maximum value}. The gamma value determines how steep the output curve is (i.e. the value of the gamma parameter; for color space Rec709 is $2.4$ ($\frac{1}{\gamma} =0.41\dots$), for sRGB is $2.2$ ($\frac{1}{\gamma} =0.45\dots$), etc.). The maximum value is where $1.0$ in the output corresponds to maximum brightness in the input. It serves to avoid clipped values because it allows you to set the maximum value of the output, $1.0$, whenever range adjustment is done (see figure~\ref{fig:gamma01}). It is important to adjust the two parameters accurately in order to avoid undesired and unexpected effects, such as excessive values, unbalanced image, incorrect linearization, etc. +Mathematically, the gamma function is exponential +($output = input^{\gamma}$) and therefore the inverse of the +logarithmic function [$output = \log(input)$]. Actually the formula +used by the \CGG{} plugin is: $output = input^{\frac{1}{\gamma}}$ +which allows for a range of values $0 \div 1.0$. The gamma effect +converts the logarithmic colors to linear colors through a +\textit{gamma value} and a \textit{maximum value}. The gamma value +determines how steep the output curve is (i.e.\ the value of the +gamma parameter; for color space Rec709 is $2.4$ +($\frac{1}{\gamma} =0.41\dots$), for sRGB is $2.2$ +($\frac{1}{\gamma} =0.45\dots$), etc.). The maximum value is where +$1.0$ in the output corresponds to maximum brightness in the +input. It serves to avoid clipped values because it allows you to +set the maximum value of the output, $1.0$, whenever range +adjustment is done (see figure~\ref{fig:gamma01}). It is important +to adjust the two parameters accurately in order to avoid undesired +and unexpected effects, such as excessive values, unbalanced image, +incorrect linearization, etc. \begin{figure}[htpb] \centering @@ -1605,25 +1643,25 @@ The histogram allows an immediate view of the contrast amplitude of an image wit \includegraphics[width=0.8\linewidth]{histogram.png} \caption{Master Histogram and RGB Histogram} \label{fig:histogram} -\end{figure} +\end{figure} -The Histogram is always performed in floating point RGB regardless of the project color space. The histogram has two sets of transfer parameters: the \textit{input transfer} and the \textit{output transfer}. The input transfer has value on the horizontal axis of $x$; it is a scale of values ranging from 0 to 255 in the case of an $8\,bit$ image, or it can have normalized values in the range ($0-1.0$) or even be a scale in percentage ($0-100\%$). In the output transfer (the $y\,axis$) is represented the number of times (that is, $y$) a given value $x$ appears. A higher column ($y$ greater) indicates that many pixels have the corresponding value $x$; a lower column indicates that fewer pixels have that value. On the left we have the minimum value $0$, which is the black point. On the right we have the maximum value $1.0$ which is the white point. The intermediate values pass smoothly from one extreme to the other. The three important points (including the midtones, i.e. the Master Offset) are indicated by cursors (small triangles) at the base of the histogram. You can adjust them to change the values of the three points if you want. +The Histogram is always performed in floating point RGB regardless of the project color space. The histogram has two sets of transfer parameters: the \textit{input transfer} and the \textit{output transfer}. The input transfer has value on the horizontal axis of $x$; it is a scale of values ranging from 0 to 255 in the case of an $8\,bit$ image, or it can have normalized values in the range ($0-1.0$) or even be a scale in percentage ($0-100\%$). In the output transfer (the $y\,axis$) is represented the number of times (that is, $y$) a given value $x$ appears. A higher column ($y$ greater) indicates that many pixels have the corresponding value $x$; a lower column indicates that fewer pixels have that value. On the left we have the minimum value $0$, which is the black point. On the right we have the maximum value $1.0$ which is the white point. The intermediate values pass smoothly from one extreme to the other. The three important points (including the midtones, i.e.\ the Master Offset) are indicated by cursors (small triangles) at the base of the histogram. You can adjust them to change the values of the three points if you want. -There are 4 possible histograms in the histogram viewer. The red, green, blue histograms show the input histograms for red, green, blue and multiply them by an input transfer to get the output red, green, blue. Then the output red, green, blue is scaled by an output transfer. The scaled red, green, blue is converted into a value and plotted on the value histogram. The value histogram thus changes depending on the settings for red, green, blue. The value transfers are applied uniformly to R, G, B after their color transfers are applied. Mathematically, it is said that the values of $x$ are linked to the values of $y$ by a transfer function. This function is represented by a line that leaves the values of $x$ and $y$ unchanged, but we can intervene by modifying this line with the cursors. +There are 4 possible histograms in the histogram viewer. The red, green, blue histograms show the input histograms for red, green, blue and multiply them by an input transfer to get the output red, green, blue. Then the output red, green, blue is scaled by an output transfer. The scaled red, green, blue is converted into a value and plotted on the value histogram. The value histogram thus changes depending on the settings for red, green, blue. The value transfers are applied uniformly to R, G, B after their color transfers are applied. Mathematically, it is said that the values of $x$ are linked to the values of $y$ by a transfer function. This function is represented by a line that leaves the values of $x$ and $y$ unchanged, but we can intervene by modifying this line with the cursors. -You need to select which transfer to view by selecting one of the channels on the top of the histogram. You can also choose whether to display the master, i.e. only the values of the \textit{luma}, or show the \textit{Parade}, i.e. the three RGB channels. You can switch from one to the other with the two buttons in the upper right corner. The input transfer is defined by a graph overlaid on the histogram; this is a straight line. Video entering the histogram is first plotted on the histogram plot, then it is translated so output values now equal the output values for each input value on the input graph. +You need to select which transfer to view by selecting one of the channels on the top of the histogram. You can also choose whether to display the master, i.e.\ only the values of the \textit{luma}, or show the \textit{Parade}, i.e.\ the three RGB channels. You can switch from one to the other with the two buttons in the upper right corner. The input transfer is defined by a graph overlaid on the histogram; this is a straight line. Video entering the histogram is first plotted on the histogram plot, then it is translated so output values now equal the output values for each input value on the input graph. After the input transfer, the image is processed by the output transfer. The output transfer is simply a minimum and maximum to scale the input colors to. Input values of $100\%$ are scaled down to the output's maximum. Input values of $0\%$ are scaled up to the output minimum. Input values below $0$ are always clamped to $0$ and input values above $100\%$ are always clamped to $100\%$. Click and drag on the output gradient's triangles to change it. It also has textboxes to enter values into. Enable the \textit{Automatic} toggle to have the histogram calculate an automatic input transfer for the red, green, and blue but not the value. It does this by scaling the middle $99\%$ of the pixels to take $100\%$ of the histogram width. The number of pixels permitted to pass through is set by the \textit{Threshold} textbox. A threshold of $0.99$ scales the input so $99\%$ of the pixels pass through. Smaller thresholds permit fewer pixels to pass through and make the output look more contrasty. \textit{Plot histogram} is a checkbox that enables plotting the histogram. -\textit{Split output} is a checkbox that enables a diagonal split showing in the compositor. +\textit{Split output} is a checkbox that enables a diagonal split showing in the compositor. \textit{Reset} returns the four curves to their initial state (neutral) as well as the Value/RGB histogram buttons. \subsection{Histogram Bezier / Curves}% \label{sub:histogram_bezier_curves} -Histogram Bézier allows an immediate view of the contrast amplitude of an image with its distribution of luma and colors values using a piecewise linear method. In addition it uses a Bézier curve (parametric) on the histogram plot. When mapping color spaces, it has a variety of presentations to get smoother transitions and more pleasing output. It uses more general remapping, not just straight lines but more contour lines. Curves are perhaps the most powerful and sophisticated tool for color correction. For some repetitive details, see the previous description of the Histogram plugin. Histogram Bézier is keyframable. +Histogram Bézier allows an immediate view of the contrast amplitude of an image with its distribution of luma and colors values using a piecewise linear method. In addition it uses a Bézier curve (parametric) on the histogram plot. When mapping color spaces, it has a variety of presentations to get smoother transitions and more pleasing output. It uses more general remapping, not just straight lines but more contour lines. Curves are perhaps the most powerful and sophisticated tool for color correction. For some repetitive details, see the previous description of the Histogram plugin. Histogram Bézier is keyframable. The input graph is edited by adding and removing any number of points. Click and drag anywhere in the input graph to create a point and move it. Click on an existing point to make it active and move it. The active point is always indicated by being filled in. The active point's input X and output Y values are given in textboxes on top of the window. The input and output color of the point can be changed through these textboxes. Points can be deleted by first selecting a point and then dragging it to the other side of an adjacent point. They can also be deleted by selecting them and hitting delete (figure~\ref{fig:bezier}). @@ -1646,7 +1684,19 @@ The input graph is edited by adding and removing any number of points. Click and \item \textit{Interpolation:} type of algorithm for the parametric curves; linear, polynomial and Bezier. \end{itemize} -Curves are used by introducing \textit{control points} simply with the left mouse button and adjusting the value by dragging and dropping. If you drag along the horizontal line only, you change the value of $x$ and you can read this value in the input $x$ textbox. If you drag along the vertical line only, you change the value of $y$ and you can read the value in the input $y$ textbox. This is the output value. The newly clicked control point becomes active and is full green in color. To delete a point we have to make it active and then press the Del key, or we can drag the point beyond the position of another control point to its right or left or, finally, pressing RMB. The control points corresponding to the black point and the white point are automatically created from the beginning, to fix their values and prevent clipping. +Curves are used by introducing \textit{control points} simply with +the left mouse button and adjusting the value by dragging and +dropping. If you drag along the horizontal line only, you change the +value of $x$ and you can read this value in the input $x$ +textbox. If you drag along the vertical line only, you change the +value of $y$ and you can read the value in the input $y$ +textbox. This is the output value. The newly clicked control point +becomes active and is full green in color. To delete a point we have +to make it active and then press the Del key, or we can drag the +point beyond the position of another control point to its right or +left or, finally, pressing RMB\@. The control points corresponding to +the black point and the white point are automatically created from +the beginning, to fix their values and prevent clipping. Curves are generally adjusted by introducing several control points, some to be kept fixed (as anchors) to prevent curve modification beyond them, and others to be dragged to make the desired correction. The power of the curves lies in being able to circumscribe a small interval at will and intervene only on this without involving the remaining parts of the frame. The precision with which you can work is such that you can almost arrive at a secondary color correction. @@ -1665,17 +1715,17 @@ The \textit{Polynomial} and \textit{Bézier} types introduce \textit{control han Some examples of the use of curves to demonstrate the variety of possible interventions (figure~\ref{fig:ex-bezier}): \begin{itemize} - \item Scale the image values by increasing the white point or decreasing the white point (gain up and gain down). + \item Scale the image values by increasing the white point or decreasing the white point (gain up and gain down). You can decide the scaling value with the formula: $(Input \div Output) = Scale Factor$ \item Limit a value beyond a certain point of brightness (clamp to the value $0.587$ in the figure). - \item S-shaped curve to increase contrast without changing the black and white point (i.e. without \textit{clipping}). + \item S-shaped curve to increase contrast without changing the black and white point (i.e.\ without \textit{clipping}). \item Make a real \textit{Luma Key} by bringing a certain value of gray to $100\%$ (white) and lowering everything else to $0\%$ (black). The slope of the two sides indicates how much we want to fade the edges of the matte obtained. \end{itemize} \subsection{HolographicTV}% \label{sub:holographictv} -Incoming objects are projected like holovision seen in the movie Stars Wars as in R2-D2's video message projector of the Princess Leia. You need a movie or background image and above it a track containing the figure on which to apply the effect. This must have a transparent background. There are no configuration parameters; it only has to be applied to the upper track (figure~\ref{fig:holographictv}). +Incoming objects are projected like holovision seen in the movie Stars Wars as in R2-D2's video message projector of the Princess Leia. You need a movie or background image and above it a track containing the figure on which to apply the effect. This must have a transparent background. There are no configuration parameters; it only has to be applied to the upper track (figure~\ref{fig:holographictv}). This effect originated from {\small \url{https://effectv.com}}. @@ -1712,19 +1762,19 @@ The practical use of \textit{Interpolate Video} is a little different than the t \subsection{Inverse Telecine}% \label{sub:inverse_telecine} -This is the most effective deinterlacing tool when the footage is a video transfer of a film. This can be used to solve the problem, i.e., undo the damage caused by making film into a TV broadcast. -That process came about because film is at 24\,\emph{fps} while TV is at 29.97\,\emph{fps} and fields are at 60. -So the film was converted from 24\,\emph{fps} to 60\,\emph{fps}. -Roughly speaking, converting every 4 frames into 5 frames plus a slight slow down in speed. -Then the 60\,\emph{fps} was down-sampled to 30\,\emph{fps} by extracting odd and even lines and interlacing the lines. +This is the most effective deinterlacing tool when the footage is a video transfer of a film. This can be used to solve the problem, i.e., undo the damage caused by making film into a TV broadcast. +That process came about because film is at 24\,\emph{fps} while TV is at 29.97\,\emph{fps} and fields are at 60. +So the film was converted from 24\,\emph{fps} to 60\,\emph{fps}. +Roughly speaking, converting every 4 frames into 5 frames plus a slight slow down in speed. +Then the 60\,\emph{fps} was down-sampled to 30\,\emph{fps} by extracting odd and even lines and interlacing the lines. This process is referred to as \textit{three-two pull down} ($3:2$ pull down) in filmmaking and television production for the post production process of transferring film to video. The three-two pull down is where the telecine adds a third video field (a half frame) to every second video frame, but the untrained eye cannot see the addition of this extra video field. -The \texttt{IVTC} effect is primarily a way to convert \textit{interlaced} video to \textit{progressive} video. +The \texttt{IVTC} effect is primarily a way to convert \textit{interlaced} video to \textit{progressive} video. It reverses the effect of three patterns of interlacing. In the next lines \texttt{A}, \texttt{B}, and \texttt{C} represent fields. \texttt{A AB BC CD D} - -\texttt{AB CD CD DE EF} + +\texttt{AB CD CD DE EF} \texttt{Automatic} @@ -1850,7 +1900,7 @@ Motion tracking parameters: \begin{description} \item[Track translation] Enables translation operations. The motion tracker tracks $X$ and $Y$ motion in the master layer and adjusts $X$ and $Y$ motion in the target layer. - + \item[Translation block size] For the translation operations, a block is compared to a number of neighboring blocks to find the one with the least difference. The size of the Match Box to search for is given by this parameter. \item[Translation search radius] The size of the area to scan for the translation block. \item[Translation search steps] Ideally the search operation would compare the translation block with every other pixel in the translation search radius. To speed this operation up, a subset of the total positions is searched. Then the search area is narrowed and re-scanned by the same number of search steps until the motion is known to $\frac{1}{4}$ pixel accuracy. @@ -1961,7 +2011,7 @@ C - has only object2 visible \label{ssub:tips} \begin{enumerate} - \item The motion vector is a text file located in \texttt{/tmp}. We can open it with a plain editor and modify the wrong $X\,Y$ coordinates, i.e. those that deviate from the linearity, to correct the errors that always happen when we perform a motion tracking (jumps). It can be a long and tedious job, but it leads to good results. + \item The motion vector is a text file located in \texttt{/tmp}. We can open it with a plain editor and modify the wrong $X\,Y$ coordinates, i.e.\ those that deviate from the linearity, to correct the errors that always happen when we perform a motion tracking (jumps). It can be a long and tedious job, but it leads to good results. \item You can try tracking using reverse playback of the track. Sometimes it may lead to a better calculation. \end{enumerate} @@ -1999,7 +2049,7 @@ This effect makes video tracks appears as a painting. It can be controlled by \t This effect can combine several tracks by using the so called Overlayer. This is a basic internal device normally used by \CGGI{} to create the dissolve transitions and for compositing the final output of every track onto the output bitmap. The Overlayer has the ability to combine one or several image layers on top of a bottom layer. It can do this combining of images in several different (and switchable) output modes such as \textit{Normal}, \textit{Additive}, \textit{Subtractive}, \textit{Multiply} (Filter), \textit{Divide}, \textit{Max} and \textit{Replace}. For a detailed list refer to the on \hyperref[cha:overlays]{Overlays} chapter -- PorterDuff. -The \textit{overlay} plugin enables the use of this Overlayer device in the middle of any plugin stack, opening endless filtering and processing possibilities. It is only useful as a \textit{shared plugin} (i.e. a multitrack plugin). To use the overlay plugin: +The \textit{overlay} plugin enables the use of this Overlayer device in the middle of any plugin stack, opening endless filtering and processing possibilities. It is only useful as a \textit{shared plugin} (i.e.\ a multitrack plugin). To use the overlay plugin: \begin{enumerate} \item Add the effect to Track A. @@ -2026,13 +2076,13 @@ In (figure~\ref{fig:perspective}) you can see that there are four options for th \item[Default] if OpenGL is being used with your graphics card, this will be the option in effect. If no OpenGL, then it will be Cubic. \item[Nearest] using software, nearest neighbor can look step-py. \item[Linear] software implementation of a linear algorithm. - \item[Cubic] smoothest looking on the + \item[Cubic] smoothest looking on the edges and considered the best. \end{description} Key Presses for using the Perspective plugin: -\begin{tabular}{l l} +\begin{tabular}{l l} \toprule Left mouse button & drags the corner that is closest to current location \\ Alt key + left mouse & translates the perspective; drags the whole image \\ @@ -2043,7 +2093,7 @@ Key Presses for using the Perspective plugin: Note that the red color lines in the box show the composer boundary. -In order to see endpoints that go off the screen, you can use the zoom slider which changes only the zoom view and does nothing else. The slider uses a logarithmic scale ranging from $\frac{1}{100} to 100$. +In order to see endpoints that go off the screen, you can use the zoom slider which changes only the zoom view and does nothing else. The slider uses a logarithmic scale ranging from $\frac{1}{100} to 100$. Figure~\ref{fig:perspective01} show the results of the 4 different smoothing options. @@ -2102,8 +2152,19 @@ Figure~\ref{fig:radial} has the parameters: $Angle=-35$ and $Steps=2$. \subsection{ReframeRT}% \label{sub:reframert} -ReframeRT changes the number of frames in a sequence of video directly from the timeline. The faster method for getting the same results as this plugin is to use the \textit{speed curve} which was a later addition. But if you need very precise results, \textit{ReframeRT} is most useful. There are two ways to do this, which can be selected from the checkboxes in the configuration GUI. The first \textit{Stretch} mode changes the number of frames in the sequence, and therefore its length, but not the framerate. The \textit{Downsample} mode instead keeps the length of the movie by varying the framerate. -It is important to understand that the plugin works by varying the frames, the possible change of \textit{fps} is only a side effect of the creation of new frames due to interpolation. +ReframeRT changes the number of frames in a sequence of video +directly from the timeline. The faster method for getting the same +results as this plugin is to use the \textit{speed curve} which was +a later addition. But if you need very precise results, +\textit{ReframeRT} is most useful. There are two ways to do this, +which can be selected from the checkboxes in the configuration +GUI\@. The first \textit{Stretch} mode changes the number of frames in +the sequence, and therefore its length, but not the framerate. The +\textit{Downsample} mode instead keeps the length of the movie by +varying the framerate. It is important to understand that the +plugin works by varying the frames, the possible change of +\textit{fps} is only a side effect of the creation of new frames due +to interpolation. \subsubsection*{Stretch}% \label{ssub:stretch} @@ -2118,9 +2179,12 @@ That is, one input frame of the original movie corresponds to $8$ new output fra The stretch mode has the effect of changing the length of output video by the inverse of the scale factor. If the scale factor is greater than $1$, the output will end before the end of the sequence on the timeline. If it is less than $1$, the output will end after the end of the sequence on the timeline. The ReframeRT effect must be lengthened to the necessary length to accommodate the scale factor. Change the length of the effect by clicking on the endpoint of the effect and dragging. -Although stretch mode changes the number of the frames read from its input, it does not change the framerate of the input. Effects before ReframeRT assume the same frame rate as ReframeRT. -In stretch mode to create a fast play effect enter a value greater than $1$ to get accelerated playback. -For a slow motion effect, use ReframeRT in stretch mode with a value less than $1$. +Although stretch mode changes the number of the frames read from its +input, it does not change the framerate of the input. Effects before +ReframeRT assume the same frame rate as ReframeRT\@. In stretch +mode to create a fast play effect enter a value greater than $1$ to +get accelerated playback. For a slow motion effect, use ReframeRT +in stretch mode with a value less than $1$. \textit{Example:} you have a clip that you want to put in slow motion. The clip starts at $33.792\, seconds$ and ends at $39.765$. The clip is $5.973\, seconds$ long. You want to play it at $\frac{4}{10}^{ths}$ normal speed. You divide the clip length by the playback speed ($5.973\div0.4$) to get a final clip length of $14.9325\,seconds$. You create an in point at the start of your clip: $33.792\,seconds$. You put an out point $14.9325\,seconds$ later, at $48.7245\,seconds$ ($33.792 + 14.9325$). You attach a \texttt{ReframeRT} effect, set it to $0.4$ and stretch. You change the out point at $48.7245$ to an in point. You start your next clip after the slow motion effect at the $48.7245$ out point. You can do this without making any calculations by first applying the effect and then lengthening or shortening the bar to where the stretched movie ends. @@ -2240,7 +2304,7 @@ With the Scale Ratio plugin you can manipulate your video to maintain the pixel This plugin is designed to smooth out non-moving areas of a video clip (figure~\ref{fig:staveraging}). \vspace{2ex} -\begin{wrapfigure}[20]{O}{0.4\linewidth} +\begin{wrapfigure}[20]{O}{0.4\linewidth} % \vspace{-4ex} \includegraphics[width=0.9\linewidth]{staveraging.png} \caption{STA control window} @@ -2300,12 +2364,19 @@ Drag must be checked on to edit the data. Drag must be checked off to use \texti If the drag button flickers when clicked then another window has drag focus. Un-focus it first. Turn drag off to see what the sketcher figure will look like when rendered. -Curves can be any number of points or just a single point. -You must create a \textit{new} curve if you want a new string of points not connected to the current curve. -Highlight the curve $id\,\#$ for a specific curve to modify its values. -{Left mouse click or right mouse click on an existing point on the highlighted line/curve $id\,\#$ will automatically highlight the selected point in the Point section of the gui and turn red in the image. We can drag this point to the desired position using CTRL + LMB; or we can translate the entire selected curve using CTRL + RMB. - -With the mouse wheel we can rotate our curves; the center of rotation is given by the position of the cursor. Normally the scroll is slow to be more precise. If you make a fast scroll you have an acceleration that allows you to make complete rotations in a short time. Using the mouse wheel with the SHIFT key we can instead scale the size of the curve; the fixed scaling point is still given by the mouse position. +Curves can be any number of points or just a single point. You must +create a \textit{new} curve if you want a new string of points not +connected to the current curve. Highlight the curve $id\,\#$ for a +specific curve to modify its values. +% +Left mouse click or right mouse click on an existing point on the +highlighted line/curve $id\,\#$ will automatically highlight the +selected point in the Point section of the gui and turn red in the +image. We can drag this point to the desired position using +\texttt{CTRL + LMB}; or we can translate the entire selected curve +using \texttt{CTRL + RMB}. + +With the mouse wheel we can rotate our curves; the center of rotation is given by the position of the cursor. Normally the scroll is slow to be more precise. If you make a fast scroll you have an acceleration that allows you to make complete rotations in a short time. Using the mouse wheel with the SHIFT key we can instead scale the size of the curve; the fixed scaling point is still given by the mouse position. There will always be $1$ empty curve automatically defined when you start or even reset. You can not delete the empty default curve but you can use it for a curve. @@ -2318,8 +2389,8 @@ There is no \textit{undo} recorded between gui updates. It is recommended that y \small \begin{longtable}{{m{15em}m{13em}m{12em}}} \caption{Sketcher controls} - \label{tabular:sketcher controls} \\ % note that double backslash is mandatory here - \toprule + \label{tabular:sketcher_controls} \\ % note that double backslash is mandatory here + \toprule \textbf{Compositor: Mouse usage} & \textbf{Compositor: Action} & \textbf{Plugin GUI}\\\midrule @@ -2362,7 +2433,7 @@ There is no \textit{undo} recorded between gui updates. It is recommended that y delete key+shift & deletes highlighted curves & Del button in Curve section \\ - \bottomrule + \bottomrule \end{longtable} \end{center} @@ -2450,7 +2521,7 @@ Inside the time average effect is an accumulation buffer and a divisor. A number \label{sub:timefront} Space-temporal warping enables time to flow differently at different locations in the video (figure~\ref{fig:timefront}). -\begin{wrapfigure}[13]{O}{0.3\linewidth} +\begin{wrapfigure}[13]{O}{0.3\linewidth} \vspace{-2ex} \includegraphics[width=0.8\linewidth]{timefront.png} \caption{Temporal bands for Timefront} @@ -2505,7 +2576,7 @@ The titler supports mainly \textit{TTF}, true type fonts. It supports others but # /usr/lib/cinelerra/fonts ttmkfdir && mv fonts.scale fonts.dir \end{lstlisting} -and restart \CGG{}. The new fonts should appear. The usage of ttmkfdir changes frequently so this technique might not work. +and restart \CGG{}. The new fonts should appear. The usage of ttmkfdir changes frequently so this technique might not work. If the video is displayed on a consumer TV, the outer border is going to be cropped by $5\%$ on each side. To avoid text which is too close to the edge looking bad, you may want to enable the \textit{title-safe} tool in the compositor window. The text should not cross the inner rectangle. @@ -2533,9 +2604,9 @@ If the video is displayed on a consumer TV, the outer border is going to be crop \small \begin{longtable}{{m{6em}m{14em}m{14em}}} \caption{Titler attributes} - \label{tabular:titler attributes} \\ % note that double backslash is mandatory here - \toprule - \multicolumn{1}{c}{\textbf{Attribute name}}&\multicolumn{1}{c}{\textbf{Attribute value}}&\multicolumn{1}{c}{\textbf{Notes}}\\ + \label{tabular:titler_attributes} \\ % note that double backslash is mandatory here + \toprule + \multicolumn{1}{c}{\textbf{Attribute name}}&\multicolumn{1}{c}{\textbf{Attribute value}}&\multicolumn{1}{c}{\textbf{Notes}}\\ \midrule % \endhead % here the common page header color & color name such as RED from \textit{/guicast/colors.h} & @@ -2579,7 +2650,7 @@ If the video is displayed on a consumer TV, the outer border is going to be crop smooth & add anti-aliasing to smooth jagged edges & Turn off smooth for chroma key - \\\bottomrule + \\\bottomrule \end{longtable} \end{center} @@ -2652,10 +2723,10 @@ export BC_FONT_PATH= export BC_FONT_PATH=/usr/share/fonts \end{lstlisting} -The current set of fonts in \CGG{}'s directory will be automatically included and will be the default set if this environment variable is not set. Keep in mind that if you add a lot of fonts, it will considerably slow down the startup every time you bring up the Title plugin. +The current set of fonts in \CGG{}'s directory will be automatically included and will be the default set if this environment variable is not set. Keep in mind that if you add a lot of fonts, it will considerably slow down the startup every time you bring up the Title plugin. -If you want to only have a limited number of fonts set up, you can manipulate the \CGG{} directory directly at \texttt{ /bin/plug\-ins/fonts}. -Here you will find the default set of fonts that come with the install. Copy any other fonts you would like to include here with read permission, delete any fonts you do not want to have, then execute \texttt{mkfontscale} which creates the file \texttt{fonts.scale} that \CGG{} will read. However, the next time you install a new version of \CGG{}, your changes will be written over so you will have to make sure to save them elsewhere and then re-establish. +If you want to only have a limited number of fonts set up, you can manipulate the \CGG{} directory directly at \texttt{ /bin/plug\-ins/fonts}. +Here you will find the default set of fonts that come with the install. Copy any other fonts you would like to include here with read permission, delete any fonts you do not want to have, then execute \texttt{mkfontscale} which creates the file \texttt{fonts.scale} that \CGG{} will read. However, the next time you install a new version of \CGG{}, your changes will be written over so you will have to make sure to save them elsewhere and then re-establish. If you have problems with a specific font or set of fonts, there is a debug option available to determine which font is an issue. When starting \CGG{}, you should set up the variable: @@ -2973,7 +3044,7 @@ src=typ git,tar,git=url,tar=url: git (default git_url), tar (default: tar_url) \end{lstlisting} Once thirdparty/opencv is built, it will be reused. Use target \texttt{mrclean} to remove \texttt{thirdparty/opencv*}. -Network access is required to obtain the OpenCV source, at least once. You will need a minimum of +Network access is required to obtain the OpenCV source, at least once. You will need a minimum of $4\, GB$ in the thirdparty build directory and more time to compile. To get opencv built in the easiest way possible (need internet access because builds directly from the opencv github but this changes wildly): @@ -2985,10 +3056,10 @@ To get opencv built in the easiest way possible (need internet access because bu \subsection{Using OpenCV Plugins from the Automatic Builds}% \label{sub:using_opencv_automatic_builds} -The OpenCV plugins are built only in the 64-bit tarball builds, both static and dynamic. However, due to size these plugins are not included with pkgs. But it is relatively easy to add the current 6 plugins for your distro via a simple procedure of copying the plugins from the tarball to the cin5 install plugin path. They are: +The OpenCV plugins are built only in the 64-bit tarball builds, both static and dynamic. However, due to size these plugins are not included with pkgs. But it is relatively easy to add the current 6 plugins for your distro via a simple procedure of copying the plugins from the tarball to the cin5 install plugin path. They are: \begin{lstlisting}[style=sh] -cin/plugins/opencv/findobj.plugin +cin/plugins/opencv/findobj.plugin cin/plugins/opencv/flowobj.plugin cin/plugins/opencv/gaborobj.plugin cin/plugins/opencv/moveobj.plugin @@ -3001,10 +3072,10 @@ cin/plugins/opencv/stylizeobj.plugin \item look in {\small \url{https://cinelerra-gg.org/download/tars}} to see your distro name's static tar; \item download the corresponding distro static tarball; for example for arch: -\end{enumerate} +\end{enumerate} {\small \url{https://cinelerra-gg.org/download/tars/cinelerra-5.1-arch-{date}-x86_64-static.txz}} - + \begin{enumerate}[resume] \item create a temporary directory on your computer; \item \texttt{cd} that-directory; @@ -3012,7 +3083,7 @@ cin/plugins/opencv/stylizeobj.plugin \item \texttt{cp plugins/*obj.plugin /.} (note the period on the end!) \item Start \CGG{} and look for the six plugins in Video Effects; \item To reverse this, simply delete the six plugin files (eg. - + \texttt{rm /usr/lib*/cin*/*obj.plugin}). \end{enumerate} @@ -3110,10 +3181,15 @@ The following steps were used to set up the example in figure~\ref{fig:findobj}. \begin{itemize} \item $1^{st}$ track should be the \textit{scene}; that is the output \item $2^{nd}$ track is the \textit{object} to find - \item $3^{rd}$ track is the \textit{replacement} object + \item $3^{rd}$ track is the \textit{replacement} object \end{itemize} \item Drag the \textit{Find Object} plugin onto track \#1. - \item On each of other $2$ tracks, click the right mouse button; choose \textit{attach effect}, highlight \textit{Find Object} in the \textit{shared effect} column, and click OK. All three tracks should now have a findobj plugin effect bar. One, (probably the first one) is the master plugin and the others are attached input tracks. + \item On each of other $2$ tracks, click the right mouse button; + choose \textit{attach effect}, highlight \textit{Find Object} + in the \textit{shared effect} column, and click OK\@. All + three tracks should now have a findobj plugin effect bar. One, + (probably the first one) is the master plugin and the others + are attached input tracks. \item Check the plugin show icon on the master track to bring up the controls for the FindObj plugin. You will see that \textit{Use FLANN} is checked for using nearest neighbors. \item Set \textit{Output scene}, \textit{Object}, and \textit{Replacement object} layers’ track number accordingly (numbered from zero). \item Check \textit{Draw scene} border and you will see a white border in the compositor window surrounding the whole image of the scene. This assumes default settings for \textit{Scene center X,Y}(at $50\%$), and area \textit{W,H} ($100\%$). Adjust these however you need to via the dials or more simply by checking \textit{Drag} and dragging any of the $9$ drag points. As shown above this in the controls, units are in $0-100\%$. @@ -3190,8 +3266,11 @@ Figure~\ref{fig:stylize} show the images after adding the 6 various styles. \label{fig:stylize} \end{figure} -\section[FFmpeg Audio and Video Plugins]{FFmpeg Audio and Video Plugins\protect\footnote{credit to WPfilmmaker for the Ffmpeg info description lines taken from his contributed pdf}}% +\section[FFmpeg Audio and Video Plugins]{FFmpeg Audio and Video Plugins}% \label{sec:ffmpeg_audio_video_plugins} +% FIXME not in the section +% \protect\footnote{credit to WPfilmmaker for the Ffmpeg info +% description lines taken from his contributed pdf} \CGGI{} currently comes with more than $140$ of the video plugins and $55$ of the audio plugins developed by the FFmpeg project {\small \url{www.ffmpeg.org}}. These plugins do not have a GUI with buttons like the rest of plugins, therefore to change settings it is necessary to change the variables by hand by highlighting the \textit{option}, typing a value in the \textit{Range} box, and then hitting the \textit{Apply} button. Many of these plugins provide tooltips at the bottom right corner of the window when the option is highlighted. A \textit{slider} bar and a \textit{dial} for numerical values can be used to easily vary the values which take effect immediately. @@ -3222,228 +3301,360 @@ Simply drag and drop the plugin on the timeline. To enter the settings option, o Some of the ffmpeg plugins are not usable with \CGGI{} due to input/output requirements. Also, some do not come with legal initial supplied values for the parameters (ffmpeg works on filtergraph, while \CGG{} works on stack). These plugins get tested at least once and if they crash, cause problems, or are deemed unusable, they are commented out in the \texttt{plugin.opts} file in the \CGG{} ffmpeg subdirectory. Generally they are not retested so if the ffmpeg software changes them making them usable, they will still not be accessible until the \texttt{plugin.opts} file is changed. You can easily retest these, by temporarily uncommenting the copy of the \texttt{plugin.opts} file in the \texttt{bin} subdirectory and using \texttt{Settings $\rightarrow$ Preferences $\rightarrow$ Interface tab $\rightarrow$ Reload plugin index} FFmpeg’s plugin guide is at the link: - -{\small \url{https://ffmpeg.org/ffmpeg-filters.html}} +\href{https://ffmpeg.org/ffmpeg-filters.html}{ffmpeg-filters}. \subsection{FFmpeg Audio Plugins}% \label{sub:ffmpeg_audio_plugins} -\noindent \textbf{F\_abench:} Benchmark part of a filtergraph.\\ -\textbf{F\_acompressor:} Audio compressor.\\ -\textbf{F\_acontrast:} Simple audio dynamic range compression/expansion filter.\\ -\textbf{F\_acrusher:} Reduces audio bit resolution.\\ -\textbf{F\_acue:} Delay filtering to match a cue.\\ -\textbf{F\_adelay:} Delays one or more audio channels. \\ -\textbf{F\_aderivative:} Compute derivative of input audio.\\ -\textbf{F\_aecho:} Adds echoing to the audio.\\ -\textbf{F\_aemphasis:} Audio emphasis.\\ -\textbf{F\_aeval:} Filters audio signal according to a specific expression.\\ -\textbf{F\_afade:} Fades in/out input audio. \\ -\textbf{F\_aformat:} Convert the input audio to one of he specified formats.\\ -\textbf{F\_agate:} Audio gate.\\ -\textbf{F\_aintegral:} Compute integral of input audio.\\ -\textbf{F\_allpass:} Applies a two-pole all-pass filter.\\ -\textbf{F\_aloop:} Loops audio samples.\\ -\textbf{F\_anoisesrc:} Generates a noise audio signal.\\ -\textbf{F\_aperms:} Set permissions for the output audio frame.\\ -\textbf{F\_aphaser:} Adds a phasing effect to the audio.\\ -\textbf{F\_arealtime:} Slows down filtering to match realtime.\\ -\textbf{F\_aresample:} Resamples audio data.\\ -\textbf{F\_asetrate:} Change the sample rate without altering the data.\\ -\textbf{F\_astats:} Shows time domain statistics about audio frames.\\ -\textbf{F\_atempo:} Adjusts audio tempo.\\ -\textbf{F\_atrim:} Pick one continuous section from the input, drop the rest.\\ -\textbf{F\_bandpass:} Applies a two-pole Butterworth band-pass filter.\\ -\textbf{F\_bandreject:} Applies a two-pole Butterworth band-reject filter.\\ -\textbf{F\_bass:} Boosts or cuts lower frequencies.\\ -\textbf{F\_biquad:} Applies a biquad IIR filter with the given coefficents.\\ -\textbf{F\_chorus:} Adds a chorus effect to the audio.\\ -\textbf{F\_compand:} Compresses or expands audio dynamic range.\\ -\textbf{F\_compensationdelay:} audio compensation delay line.\\ -\textbf{F\_crossfeed:} Apply headphone crossfeed which blends the left and right channels of a stereo audio recording. It is mainly used to reduce extreme stereo separation of low frequencies in order to produce more speaker like sound.\\ -\textbf{F\_crystalizer:} Simple Expand Audio Dynamic Range filter.\\ -\textbf{F\_dcshift:} Applies a DC shift to the audio.\\ -\textbf{F\_drmeter:} Measure audio dynamic range where setting window length in seconds is used to split audio into segments of equal length.\\ -\textbf{F\_dyaudnorm:} Dynamic Audio Normalizer. When using this plugin, be sure to \textit{attach effect} to all audio tracks by dragging the plugin to the $1^{st}$ audio track and then right mouse clicking all subsequent audio tracks which brings up an menu. Highlight the effect shown in the middle section and click OK.\\ -\textbf{F\_earwax:} Widens the stereo image. When using this plugin, be sure to \textit{attach effect} to all audio tracks by dragging the plugin to the $1^{st}$ audio track and then right mouse clicking all subsequent audio tracks which brings up an menu. Highlight the effect shown in the middle section and click OK.\\ -\textbf{F\_equalizer:} Applies two-pole peaking equalization (EQ) filter.\\ -\textbf{F\_extrastereo:} Increases difference between stereo audio channels. When using this plugin, be sure to \textit{attach effect} to all audio tracks by dragging the plugin to the $1^{st}$ audio track and then right mouse clicking all subsequent audio tracks which brings up an menu. Highlight the effect shown in the middle section and click OK.\\ -\textbf{F\_flanger:} Applies a flanging effect to the audio.\\ -\textbf{F\_haas:} Apply Haas Stereo Enhancer for a more natural sounding pan effect or more clarity in the center of the mix. With this filter applied to mono signals it gives some directionality and stretches its stereo image.\\ -\textbf{F\_highpass:} Applies a high-pass filer with $3\,dB$ point frequency.\\ -\textbf{F\_hilbert:} Generate a Hilbert transform FIR coefficients.\\ -\textbf{F\_loudnorm:} \textit{EBU R128} loudness normalization.\\ -\textbf{F\_lowpass:} Applies a low-pass filter with $3\,dB$ point frequency.\\ -\textbf{F\_mcompand:} Multiband Compress or expand audiodynamic range. The input audio is divided into bands which is like the crossover of a loudspeaker, resulting in flat frequency response when absent compander action.\\ -\textbf{F\_pan:} Remix channels with coefficients (panning).\\ -\textbf{F\_silenceremove:} Removes silence.\\ -\textbf{F\_sine:} Generate sine wave audio signal.\\ -\textbf{F\_stereotools:} Applies various stereo tools. When using this plugin, be sure to \textit{attach effect} to all audio tracks by dragging the plugin to the $1^{st}$ audio track and then right mouse clicking all subsequent audio tracks which brings up an menu. Highlight the effect shown in the middle section and click OK.\\ -\textbf{F\_stereowiden:} Applies stereo widening effect. When using this plugin, be sure to \textit{attach effect} to all audio tracks by dragging the plugin to the $1^{st}$ audio track and then right mouse clicking all subsequent audio tracks which brings up an menu. Highlight the effect shown in the middle section and click OK.\\ -\textbf{F\_treble:} Boosts or cuts upper frequencies.\\ -\textbf{F\_tremolo:} Applies tremolo effect.\\ -\textbf{F\_vibrato:} Applies vibrato effect.\\ -\textbf{F\_volume:} Change input volume.\\ -\textbf{F\_volumedetect:} Detect audio volume. +The following is a list of the integrated audio plug-ins. +\begin{description} +\item [F\_abench]~\\Benchmark part of a filtergraph. +\item [F\_acompressor]~\\Audio compressor. +\item [F\_acontrast]~\\Simple audio dynamic range + compression/expansion filter. +\item [F\_acrusher]~\\Reduces audio bit resolution. +\item [F\_acue]~\\Delay filtering to match a cue. +\item [F\_adelay]~\\Delays one or more audio channels. +\item [F\_aderivative]~\\Compute derivative of input audio. +\item [F\_aecho]~\\Adds echoing to the audio. +\item [F\_aemphasis]~\\Audio emphasis. +\item [F\_aeval]~\\Filters audio signal according to a + specific expression. +\item [F\_afade]~\\Fades in/out input audio. +\item [F\_aformat]~\\Convert the input audio to one of he + specified formats. +\item [F\_agate]~\\Audio gate. +\item [F\_aintegral]~\\Compute integral of input audio. +\item [F\_allpass]~\\Applies a two-pole all-pass filter. +\item [F\_aloop]~\\Loops audio samples. +\item [F\_anoisesrc]~\\Generates a noise audio signal. +\item [F\_aperms]~\\Set permissions for the output audio + frame. +\item [F\_aphaser]~\\Adds a phasing effect to the audio. +\item [F\_arealtime]~\\Slows down filtering to match realtime. +\item [F\_aresample]~\\Resamples audio data. +\item [F\_asetrate]~\\Change the sample rate without altering + the data. +\item [F\_astats]~\\Shows time domain statistics about audio + frames. +\item [F\_atempo]~\\Adjusts audio tempo. +\item [F\_atrim]~\\Pick one continuous section from the input, + drop the rest. +\item [F\_bandpass]~\\Applies a two-pole Butterworth band-pass + filter. +\item [F\_bandreject]~\\Applies a two-pole Butterworth + band-reject filter. +\item [F\_bass]~\\Boosts or cuts lower frequencies. +\item [F\_biquad]~\\Applies a biquad IIR filter with the given + coefficents. +\item [F\_chorus]~\\Adds a chorus effect to the audio. +\item [F\_compand]~\\Compresses or expands audio dynamic + range. +\item [F\_compensationdelay]~\\audio compensation delay line. +\item [F\_crossfeed]~\\Apply headphone crossfeed which blends + the left and right channels of a stereo audio recording. It is + mainly used to reduce extreme stereo separation of low frequencies + in order to produce more speaker like sound. +\item [F\_crystalizer]~\\Simple Expand Audio Dynamic Range + filter. +\item [F\_dcshift]~\\Applies a DC shift to the audio. +\item [F\_drmeter]~\\Measure audio dynamic range where setting + window length in seconds is used to split audio into segments of + equal length. +\item [F\_dyaudnorm]~\\Dynamic Audio Normalizer. When using + this plugin, be sure to \textit{attach effect} to all audio tracks + by dragging the plugin to the $1^{st}$ audio track and then right + mouse clicking all subsequent audio tracks which brings up an + menu. Highlight the effect shown in the middle section and click OK. +\item [F\_earwax]~\\Widens the stereo image. When using this + plugin, be sure to \textit{attach effect} to all audio tracks by + dragging the plugin to the $1^{st}$ audio track and then right mouse + clicking all subsequent audio tracks which brings up an + menu. Highlight the effect shown in the middle section and click OK. +\item [F\_equalizer]~\\Applies two-pole peaking equalization + (EQ) filter. +\item [F\_extrastereo]~\\Increases difference between stereo + audio channels. When using this plugin, be sure to \textit{attach + effect} to all audio tracks by dragging the plugin to the $1^{st}$ + audio track and then right mouse clicking all subsequent audio + tracks which brings up an menu. Highlight the effect shown in the + middle section and click OK. +\item [F\_flanger]~\\Applies a flanging effect to the audio. +\item [F\_haas]~\\Apply Haas Stereo Enhancer for a more + natural sounding pan effect or more clarity in the center of the + mix. With this filter applied to mono signals it gives some + directionality and stretches its stereo image. +\item [F\_highpass]~\\Applies a high-pass filer with $3\,dB$ + point frequency. +\item [F\_hilbert]~\\Generate a Hilbert transform FIR + coefficients. +\item [F\_loudnorm]~\\\textit{EBU R128} loudness + normalization. +\item [F\_lowpass]~\\Applies a low-pass filter with $3\,dB$ + point frequency. +\item [F\_mcompand]~\\Multiband Compress or expand + audiodynamic range. The input audio is divided into bands which is + like the crossover of a loudspeaker, resulting in flat frequency + response when absent compander action. +\item [F\_pan]~\\Remix channels with coefficients (panning). +\item [F\_silenceremove]~\\Removes silence. +\item [F\_sine]~\\Generate sine wave audio signal. +\item [F\_stereotools]~\\Applies various stereo tools. When + using this plugin, be sure to \textit{attach effect} to all audio + tracks by dragging the plugin to the $1^{st}$ audio track and then + right mouse clicking all subsequent audio tracks which brings up an + menu. Highlight the effect shown in the middle section and click OK. +\item [F\_stereowiden]~\\Applies stereo widening effect. When + using this plugin, be sure to \textit{attach effect} to all audio + tracks by dragging the plugin to the $1^{st}$ audio track and then + right mouse clicking all subsequent audio tracks which brings up an + menu. Highlight the effect shown in the middle section and click OK. +\item [F\_treble]~\\Boosts or cuts upper frequencies. +\item [F\_tremolo]~\\Applies tremolo effect. +\item [F\_vibrato]~\\Applies vibrato effect. +\item [F\_volume]~\\Change input volume. +\item [F\_volumedetect]~\\Detect audio volume. +\end{description} \subsection{FFmpeg Video Plugins}% \label{sub:ffmpeg_video_plugins} -\noindent \textbf{F\_amplify:} Amplify changes between successive video frames.\\ -\textbf{F\_atadenoise:} Apply an Adaptive Temporal Averaging Denoiser.\\ -\textbf{F\_avgblur:} Apply average blur filter.\\ -\textbf{F\_bbox:} Compute bounding box for each frame.\\ -\textbf{F\_bench:} Benchmarks part of a filtergraph.\\ -\textbf{F\_bitplaneoise:} Measure bit plane noise.\\ -\textbf{F\_blackdetect:} Detect video intervals that are (almost) black.\\ -\textbf{F\_blackframe:} Detect frames that are (almost) black.\\ -\textbf{F\_boxblur:} Blurs the input video. Through the settings you are able to change the power and the radius of the boxblur applied to luma, chroma and alpha.\\ -\textbf{F\_bwdif:} Deinterlaces the input image.\\ -\textbf{F\_chromakey:} Turns a certain color into transparency. Operates on YUV colors.\\ -\textbf{F\_ciescope:} Video CIE scope.\\ -\textbf{F\_color:} Provide an uniformly colored input.\\ -\textbf{F\_colorbalance:} Adjusts the color balance.\\ -\textbf{F\_colorchannelmixer:} Adjusts colors by mixing color channels.\\ -\textbf{F\_colorkey:} Turns a certain color into transparency. Operates on RGB colors.\\ -\textbf{F\_colorlevels:} Adjusts the color levels.\\ -\textbf{F\_colormatrix:} Converts color matrix.\\ -\textbf{F\_colorspace:} Converts color space.\\ -\textbf{F\_cover\_rect:} Find and cover a user specified object.\\ -\textbf{F\_crop:} Crops the input video.\\ -\textbf{F\_cropdetect:} Auto-detect crop size.\\ -\textbf{F\_curves:} Adjust components curves.\\ -\textbf{F\_datascope:} Video data analysis.\\ -\textbf{F\_dctdnoiz:} Denoise frames using $2D DCT$.\\ -\textbf{F\_deband:} Debands video.\\ -\textbf{F\_deblock:} Deblocks video.\\ -\textbf{F\_deflate:} Applies deflate effect.\\ -\textbf{F\_deflicker:} Remove temporal frame luminance variations.\\ -\textbf{F\_dejudder:} Removes judder produced by pullup.\\ -\textbf{F\_delogo:} Removes logo from input video. When using this plugin a green box will appear on the screen, once the logo is inside the box the plugin will hide it. Through the settings you can specify the position of the logo to hide (on a $X-Y axis$) and the size of the box (so you can adjust it to the size of the logo). \\ -\textbf{F\_deshake:} Stabilizes shaky video.\\ -\textbf{F\_despill:} Remove uwanted foregrond colors, caused by reflected color of green or blue screen.\\ -\textbf{F\_dilation:} Applies dilation effect.\\ -\textbf{F\_doubleweave:} Weave input video fields into double number of frames.\\ -\textbf{F\_drawbox:} Draws a colored box on the input video. Through the settings you are able to choose the position of the box on X/Y coordinates, the size of the box, the color and the thickness of the lines.\\ -\textbf{F\_drawgraph:} Draw a graph using input video metadata.\\ -\textbf{F\_drawgrid:} Draws a colored grid on the input video. Through the settings you can select the horizontal and the vertical offset, set the width and height of the grid cell, and the color and thickness of the lines. When using the Presets button on the plugin bar on the timeline, you can choose a preset of \textit{rule\_of\_thirds}. The Rule of Thirds is a $3\times3$ grid on top of an image which is commonly used in filmmaking. The concept is that you align the key elements in the image using this grid at the intersection of the lines or along and within the vertical/horizontal lines.\\ -\textbf{F\_edgedetect:} Detects and draws edge.\\ -\textbf{F\_elbg:} Apply posterize effect, using the ELBG algorithm.\\ -\textbf{F\_entropy:} Measure video frames entropy.\\ -\textbf{F\_eq:} Adjusts brightness, contrast, gamma and saturation.\\ -\textbf{F\_erosion:} Applies erosion effect.\\ -\textbf{F\_fade:} Fade in/out input video.\\ -\textbf{F\_fftdnoiz:} Denoise frames using $3D FFT$.\\ -\textbf{F\_fftfilt:} Apply arbitrary expressions to pixels in frequency domain.\\ -\textbf{F\_field:} Extract a field from the input video.\\ -\textbf{F\_fieldorder:} Set the field order.\\ -\textbf{F\_fillborders:} Fill borders of the input video.\\ -\textbf{F\_floodfill:} Fill area of the same color with another color.\\ -\textbf{F\_format:} Convert the input video to one of the specified pixel formats.\\ -\textbf{F\_framerate:} Upsamples or downsamples progressive source between specified frame rates.\\ -\textbf{F\_framestep:} Select one frame every N frames.\\ -\textbf{F\_fspp:} Applies Fast Simple Post-processing filter.\\ -\textbf{F\_gblur:} Apply Gaussian Blur filter.\\ -\textbf{F\_gradfun:} Debands video quickly using gradients.\\ -\textbf{F\_graphmonitor:} Show various filtergraph stats.\\ -\textbf{F\_greyedge:} Estimates scene illumination by grey edge assumption.\\ -\textbf{F\_haldclutsrc:} Provide an identity Hald CLUT.\\ -\textbf{F\_hflip:} Horizontally flips the input video.\\ -\textbf{F\_histeq:} Applies global color histogram equalization.\\ -\textbf{F\_histogram:} Computes and draws a histogram.\\ -\textbf{F\_hqdn3d:} Applies a High Quality 3D Denoiser.\\ -\textbf{F\_hqx:} Scales the input by 2, 3 or 4 using the $hq*x$ magnification algorithm.\\ -\textbf{F\_hue:} Adjust the hue and saturation of the input video.\\ -\textbf{F\_idet:} Interlace detect Filter.\\ -\textbf{F\_il:} Deinterleaves or interleaves fields.\\ -\textbf{F\_inflate:} Applies inflate effect.\\ -\textbf{F\_interlace:} Convert progressive video into interlaced.\\ -\textbf{F\_kerndeint:} Applies kernel deinterlacing to the input.\\ -\textbf{F\_lenscorrection:} Rectifies the image by correcting for lens distortion.\\ -\textbf{F\_life:} Generate a life pattern.\\ -\textbf{F\_limiter:} Limit pixels components to the specified range. -\textbf{F\_loop:} Loops video frames.\\ -\textbf{F\_lumakey:} Turns a cerai luma into transparency.\\ -\textbf{F\_lut:} Compute and apply a lookup table to the RGB/YUV input video. -\textbf{F\_lut1d:} Adjust colors using a 1D LUT.\\ -\textbf{F\_lut3d:} Apply a 3D LUT (lookup table) to an input video. LUTs are used to map one color space to another and are frequently supplied with high-end cameras as a .cube file to use as input.\\ -\textbf{F\_lutrgb:} Compute and apply a lookup table to the RGB input video.\\ -\textbf{F\_lutyuv:} Combine and apply a lookup table to the YUV input video.\\ -\textbf{F\_mandelbrot:} Render a Mandelbrot fractal.\\ -\textbf{F\_mcdeint:} Applies motion compensating deinterlacing.\\ -\textbf{F\_mestimate:} Generate motion vectors.\\ -\textbf{F\_mpdecimate:} Remove near-duplicate frames.\\ -\textbf{F\_mptestsrc:} Generate various test pattern.\\ -\textbf{F\_negate:} Negates input video.\\ -\textbf{F\_nlmeans:} Non-local means denoiser. Example usage is for recovery of VHS tapes which look bad.\\ -\textbf{F\_noise:} Adds noise to the video. Through the settings you can select the variables of the noise (strength, flag and seed).\\ -\textbf{F\_normalize:} Normalize RGB video.\\ -\textbf{F\_oscilloscope:} $2D$ video oscilloscope. Useful to measure spatial impulse, step responses, and chroma delays.\\ -\textbf{F\_owdenoise:} Denoises using wavelets.\\ -\textbf{F\_pad:} Add paddings to the input image, and place the original input at the provided $x, y$ coordinates.\\ -\textbf{F\_pal100bars:} Generate PAL $100\%$ color bars.\\ -\textbf{F\_pal75bars:} Generate PAL $75\%$ color bars.\\ -\textbf{F\_perms:} Set permissions for the output video frame.\\ -\textbf{F\_perspective:} Corrects the perspective of video.\\ -\textbf{F\_phase:} Phases shift fields.\\ -\textbf{F\_pixscope:} Pixel data analysis for checking color and levels. It will display sample values of color channels.\\ -\textbf{F\_pp:} Filters video using libpostproc.\\ -\textbf{F\_pp7:} Applies Postprocessing 7 filter.\\ -\textbf{F\_prewitt:} Apply prewitt operator.\\ -\textbf{F\_pseudocolor:} Make pseudocolored video frames.\\ -\textbf{F\_readeia608:} Read \textit{EIA-608} Closed Caption codes from input video \& write to frame metadata.\\ -\textbf{F\_readvitc:} Reads vertical interval timecode and writes it to frame metadata.\\ -\textbf{F\_realtime:} Slows down filtering to match realtime.\\ -\textbf{F\_removegrain:} Removes grain.\\ -\textbf{F\_repeatfields:} Hard repeat fields based on MPEG repeat field flag.\\ -\textbf{F\_rgbtestsrc:} Generate RGB test pattern.\\ -\textbf{F\_roberts:} Apply roberts cross operator which performs a simple/quick $2D$ spatial gradient measurement on the video (usually a grayscale image). It highlights regions of high spatial -frequency which most ikely correspond to edges.\\ -\textbf{F\_rotate:} Rotates the input image.\\ -\textbf{F\_sab:} Applies shape adaptive blur.\\ -\textbf{F\_scale:} Scale the input video size and/or convert the image format.\\ -\textbf{F\_separatefields:} Split input video frames into fields.\\ -\textbf{F\_setparams:} Force field, or color property for the output video frame.\\ -\textbf{F\_setrange:} Force color range for the output video frame.\\ -\textbf{F\_showpalette:} Display frame palette.\\ -\textbf{F\_shuffleframes:} Shuffles video frames.\\ -\textbf{F\_shuffleplanes:} Shuffles video planes.\\ -\textbf{F\_signalstats:} Separates statistics from video analysis.\\ -\textbf{F\_smartblur:} Blurs the input video without impacting the outlines. Through the settings you can select the radius, the strength and the threshold of luma and chroma.\\ -\textbf{F\_smptebars:} Generate SMPTE color bars.\\ -\textbf{F\_smptehdbars:} Generate SMPTE HD color bars.\\ -\textbf{F\_sobel:} Applies sobel operator.\\ -\textbf{F\_spp:} Applies a simple post processing filter.\\ -\textbf{F\_stereo3d:} Converts video stereoscopic $3D$ view.\\ -\textbf{F\_super2xsai:} Scales the input by 2x using the $Super2xSal$ pixel art algorithm.\\ -\textbf{F\_swaprect:} Swaps 2 rectangular objects in video.\\ -\textbf{F\_swapuv:} Swaps U and V components.\\ -\textbf{F\_tblend:} Blend successive frames.\\ -\textbf{F\_testsrc:} Generate test pattern.\\ -\textbf{F\_testsrc2:} Generate another test pattern.\\ -\textbf{F\_tile:} Tile several successive frames together.\\ -\textbf{F\_tinterlace:} Performs temporal field interlacing.\\ -\textbf{F\_tlut2:} Compute and apply a lookup table from 2 successive frames.\\ -\textbf{F\_tmix:} Mix successive video frames.\\ -\textbf{F\_transpose:} Transposes input video.\\ -\textbf{F\_unsharp:} Sharpen or blur the input videlo.\\ -\textbf{F\_uspp:} Applies Ultra Simple/Slow Post-processing filter.\\ -\textbf{F\_vaguedenoiser:} Applies a Wavelet based Denoiser.\\ -\textbf{F\_vectorscope:} Video vectorscope.\\ -\textbf{F\_vflip:} Flips the input video vertically.\\ -\textbf{F\_vfrdet:} Variable frame rate detect filter.\\ -\textbf{F\_vibrance:} Boost or alter saturation.\\ -\textbf{F\_vignette:} Makes or reverses a vignette effect. Through the settings you can set the circle center position on a $X-Y axis$,choose the angle, the aspect and set the dithering of the vignette.\\ -\textbf{F\_w3dif:} Applies Martin Weston three field deinterlace.\\ -\textbf{F\_waveform:} Video waveform monitor.\\ -\textbf{F\_weave:} Weaves input video fields into frames.\\ -\textbf{F\_xbr:} Scales the input using $xBR$ algorithm.\\ -\textbf{F\_yadif:} Deinterlaces the input image.\\ -\textbf{F\_yuvtestsrc:} Generate YUV test pattern.\\ -\textbf{F\_zoompan:} Applies Zoom \& Pan effect. - -\section[Rendered Effects]{Rendered Effects\protect\footnote{This capability is going to be deleted in the future unless receive notification of need to keep}}% +The following is a list of the integrated video plug-ins. +\begin{description} +\item [F\_amplify]~\\Amplify changes between successive video + frames. +\item [F\_atadenoise]~\\Apply an Adaptive Temporal Averaging + Denoiser. +\item [F\_avgblur]~\\Apply average blur filter. +\item [F\_bbox]~\\Compute bounding box for each frame. +\item [F\_bench]~\\Benchmarks part of a filtergraph. +\item [F\_bitplaneoise]~\\Measure bit plane noise. +\item [F\_blackdetect]~\\Detect video intervals that are + (almost) black. +\item [F\_blackframe]~\\Detect frames that are (almost) black. +\item [F\_boxblur]~\\Blurs the input video. Through the + settings you are able to change the power and the radius of the + boxblur applied to luma, chroma and alpha. +\item [F\_bwdif]~\\Deinterlaces the input image. +\item [F\_chromakey]~\\Turns a certain color into + transparency. Operates on YUV colors. +\item [F\_ciescope]~\\Video CIE scope. +\item [F\_color]~\\Provide an uniformly colored input. +\item [F\_colorbalance]~\\Adjusts the color balance. +\item [F\_colorchannelmixer]~\\Adjusts colors by mixing color + channels. +\item [F\_colorkey]~\\Turns a certain color into + transparency. Operates on RGB colors. +\item [F\_colorlevels]~\\Adjusts the color levels. +\item [F\_colormatrix]~\\Converts color matrix. +\item [F\_colorspace]~\\Converts color space. +\item [F\_cover\_rect]~\\Find and cover a user specified + object. +\item [F\_crop]~\\Crops the input video. +\item [F\_cropdetect]~\\Auto-detect crop size. +\item [F\_curves]~\\Adjust components curves. +\item [F\_datascope]~\\Video data analysis. +\item [F\_dctdnoiz]~\\Denoise frames using $2D DCT$. +\item [F\_deband]~\\Debands video. +\item [F\_deblock]~\\Deblocks video. +\item [F\_deflate]~\\Applies deflate effect. +\item [F\_deflicker]~\\Remove temporal frame luminance + variations. +\item [F\_dejudder]~\\Removes judder produced by pullup. +\item [F\_delogo]~\\Removes logo from input video. When using + this plugin a green box will appear on the screen, once the logo is + inside the box the plugin will hide it. Through the settings you can + specify the position of the logo to hide (on a $X-Y axis$) and the + size of the box (so you can adjust it to the size of the logo). +\item [F\_deshake]~\\Stabilizes shaky video. +\item [F\_despill]~\\Remove uwanted foregrond colors, caused + by reflected color of green or blue screen. +\item [F\_dilation]~\\Applies dilation effect. +\item [F\_doubleweave]~\\Weave input video fields into double + number of frames. +\item [F\_drawbox]~\\Draws a colored box on the input + video. Through the settings you are able to choose the position of + the box on X/Y coordinates, the size of the box, the color and the + thickness of the lines. +\item [F\_drawgraph]~\\Draw a graph using input video + metadata. +\item [F\_drawgrid]~\\Draws a colored grid on the input + video. Through the settings you can select the horizontal and the + vertical offset, set the width and height of the grid cell, and the + color and thickness of the lines. When using the Presets button on + the plugin bar on the timeline, you can choose a preset of + \textit{rule\_of\_thirds}. The Rule of Thirds is a $3\times3$ grid + on top of an image which is commonly used in filmmaking. The concept + is that you align the key elements in the image using this grid at + the intersection of the lines or along and within the + vertical/horizontal lines. +\item [F\_edgedetect]~\\Detects and draws edge. +\item [F\_elbg]~\\Apply posterize effect, using the ELBG + algorithm. +\item [F\_entropy]~\\Measure video frames entropy. +\item [F\_eq]~\\Adjusts brightness, contrast, gamma and + saturation. +\item [F\_erosion]~\\Applies erosion effect. +\item [F\_fade]~\\Fade in/out input video. +\item [F\_fftdnoiz]~\\Denoise frames using $3D FFT$. +\item [F\_fftfilt]~\\Apply arbitrary expressions to pixels in + frequency domain. +\item [F\_field]~\\Extract a field from the input video. +\item [F\_fieldorder]~\\Set the field order. +\item [F\_fillborders]~\\Fill borders of the input video. +\item [F\_floodfill]~\\Fill area of the same color with + another color. +\item [F\_format]~\\Convert the input video to one of the + specified pixel formats. +\item [F\_framerate]~\\Upsamples or downsamples progressive + source between specified frame rates. +\item [F\_framestep]~\\Select one frame every N frames. +\item [F\_fspp]~\\Applies Fast Simple Post-processing filter. +\item [F\_gblur]~\\Apply Gaussian Blur filter. +\item [F\_gradfun]~\\Debands video quickly using gradients. +\item [F\_graphmonitor]~\\Show various filtergraph stats. +\item [F\_greyedge]~\\Estimates scene illumination by grey + edge assumption. +\item [F\_haldclutsrc]~\\Provide an identity Hald CLUT\@. +\item [F\_hflip]~\\Horizontally flips the input video. +\item [F\_histeq]~\\Applies global color histogram + equalization. +\item [F\_histogram]~\\Computes and draws a histogram. +\item [F\_hqdn3d]~\\Applies a High Quality 3D Denoiser. +\item [F\_hqx]~\\Scales the input by 2, 3 or 4 using the + $hq*x$ magnification algorithm. +\item [F\_hue]~\\Adjust the hue and saturation of the input + video. +\item [F\_idet]~\\Interlace detect Filter. +\item [F\_il]~\\Deinterleaves or interleaves fields. +\item [F\_inflate]~\\Applies inflate effect. +\item [F\_interlace]~\\Convert progressive video into + interlaced. +\item [F\_kerndeint]~\\Applies kernel deinterlacing to the + input. +\item [F\_lenscorrection]~\\Rectifies the image by correcting + for lens distortion. +\item [F\_life]~\\Generate a life pattern. +\item [F\_limiter]~\\Limit pixels components to the specified + range. +\item [F\_loop]~\\Loops video frames. +\item [F\_lumakey]~\\Turns a cerai luma into transparency. +\item [F\_lut]~\\Compute and apply a lookup table to the + RGB/YUV input video. +\item [F\_lut1d]~\\Adjust colors using a 1D LUT. +\item [F\_lut3d]~\\Apply a 3D LUT (lookup table) to an input + video. LUTs are used to map one color space to another and are + frequently supplied with high-end cameras as a \texttt{.cube} file + to use as input. +\item [F\_lutrgb]~\\Compute and apply a lookup table to the + RGB input video. +\item [F\_lutyuv]~\\Combine and apply a lookup table to the + YUV input video. +\item [F\_mandelbrot]~\\Render a Mandelbrot fractal. +\item [F\_mcdeint]~\\Applies motion compensating + deinterlacing. +\item [F\_mestimate]~\\Generate motion vectors. +\item [F\_mpdecimate]~\\Remove near-duplicate frames. +\item [F\_mptestsrc]~\\Generate various test pattern. +\item [F\_negate]~\\Negates input video. +\item [F\_nlmeans]~\\Non-local means denoiser. Example usage + is for recovery of VHS tapes which look bad. +\item [F\_noise]~\\Adds noise to the video. Through the + settings you can select the variables of the noise (strength, flag + and seed). +\item [F\_normalize]~\\Normalize RGB video. +\item [F\_oscilloscope]~\\$2D$ video oscilloscope. Useful to + measure spatial impulse, step responses, and chroma delays. +\item [F\_owdenoise]~\\Denoises using wavelets. +\item [F\_pad]~\\Add paddings to the input image, and place + the original input at the provided $x, y$ coordinates. +\item [F\_pal100bars]~\\Generate PAL $100\%$ color bars. +\item [F\_pal75bars]~\\Generate PAL $75\%$ color bars. +\item [F\_perms]~\\Set permissions for the output video frame. +\item [F\_perspective]~\\Corrects the perspective of video. +\item [F\_phase]~\\Phases shift fields. +\item [F\_pixscope]~\\Pixel data analysis for checking color + and levels. It will display sample values of color channels. +\item [F\_pp]~\\Filters video using libpostproc. +\item [F\_pp7]~\\Applies Postprocessing 7 filter. +\item [F\_prewitt]~\\Apply prewitt operator. +\item [F\_pseudocolor]~\\Make pseudocolored video frames. +\item [F\_readeia608]~\\Read \textit{EIA-608} Closed Caption + codes from input video \& write to frame metadata. +\item [F\_readvitc]~\\Reads vertical interval timecode and + writes it to frame metadata. +\item [F\_realtime]~\\Slows down filtering to match realtime. +\item [F\_removegrain]~\\Removes grain. +\item [F\_repeatfields]~\\Hard repeat fields based on MPEG + repeat field flag. +\item [F\_rgbtestsrc]~\\Generate RGB test pattern. +\item [F\_roberts]~\\Apply roberts cross operator which + performs a simple/quick $2D$ spatial gradient measurement on the + video (usually a grayscale image). It highlights regions of high + spatial frequency which most ikely correspond to edges. +\item [F\_rotate]~\\Rotates the input image. +\item [F\_sab]~\\Applies shape adaptive blur. +\item [F\_scale]~\\Scale the input video size and/or convert + the image format. +\item [F\_separatefields]~\\Split input video frames into + fields. +\item [F\_setparams]~\\Force field, or color property for the + output video frame. +\item [F\_setrange]~\\Force color range for the output video + frame. +\item [F\_showpalette]~\\Display frame palette. +\item [F\_shuffleframes]~\\Shuffles video frames. +\item [F\_shuffleplanes]~\\Shuffles video planes. +\item [F\_signalstats]~\\Separates statistics from video + analysis. +\item [F\_smartblur]~\\Blurs the input video without impacting + the outlines. Through the settings you can select the radius, the + strength and the threshold of luma and chroma. +\item [F\_smptebars]~\\Generate SMPTE color bars. +\item [F\_smptehdbars]~\\Generate SMPTE HD color bars. +\item [F\_sobel]~\\Applies sobel operator. +\item [F\_spp]~\\Applies a simple post processing filter. +\item [F\_stereo3d]~\\Converts video stereoscopic $3D$ view. +\item [F\_super2xsai]~\\Scales the input by 2x using the + $Super2xSal$ pixel art algorithm. +\item [F\_swaprect]~\\Swaps 2 rectangular objects in video. +\item [F\_swapuv]~\\Swaps U and V components. +\item [F\_tblend]~\\Blend successive frames. +\item [F\_testsrc]~\\Generate test pattern. +\item [F\_testsrc2]~\\Generate another test pattern. +\item [F\_tile]~\\Tile several successive frames together. +\item [F\_tinterlace]~\\Performs temporal field interlacing. +\item [F\_tlut2]~\\Compute and apply a lookup table from 2 + successive frames. +\item [F\_tmix]~\\Mix successive video frames. +\item [F\_transpose]~\\Transposes input video. +\item [F\_unsharp]~\\Sharpen or blur the input videlo. +\item [F\_uspp]~\\Applies Ultra Simple/Slow Post-processing + filter. +\item [F\_vaguedenoiser]~\\Applies a Wavelet based Denoiser. +\item [F\_vectorscope]~\\Video vectorscope. +\item [F\_vflip]~\\Flips the input video vertically. +\item [F\_vfrdet]~\\Variable frame rate detect filter. +\item [F\_vibrance]~\\Boost or alter saturation. +\item [F\_vignette]~\\Makes or reverses a vignette + effect. Through the settings you can set the circle center position + on a $X-Y axis$,choose the angle, the aspect and set the dithering + of the vignette. +\item [F\_w3dif]~\\Applies Martin Weston three field + deinterlace. +\item [F\_waveform]~\\Video waveform monitor. +\item [F\_weave]~\\Weaves input video fields into frames. +\item [F\_xbr]~\\Scales the input using $xBR$ algorithm. +\item [F\_yadif]~\\Deinterlaces the input image. +\item [F\_yuvtestsrc]~\\Generate YUV test pattern. +\item [F\_zoompan]~\\Applies Zoom \& Pan effect. +\end{description} + + +\section[Rendered Effects]{Rendered Effects}% \label{sec:rendered_effects} +% FIXME +% \protect\footnote{This capability is going to be deleted in the future unless receive notification of need to keep} Besides the \textit{Realtime} effects, as has been described in the previous sections, another type of effect is performed on a section of the track and the result stored somewhere before it is played back. The result is usually pasted into the track to replace the original data. The rendered effects are not listed in the resources window but instead are accessed through the \texttt{Audio $\rightarrow$ Render effect and Video $\rightarrow$ Render effect} menu options. Each of these menu options brings up a dialog for the rendered effect. In the Select an effect dialog is a list of all the realtime and all the rendered effects. The difference here is that the realtime effects are rendered to disk and not applied under the track. Rendered effects apply to only one type of track, either audio or video. If no tracks of the type exist, an error pops up. @@ -3504,3 +3715,8 @@ To create a slow-motion of fast moving video: \subsubsection*{720 to 480}% \label{ssub:720_to_480} + +%%% Local Variables: +%%% mode: latex +%%% TeX-master: "../CinelerraGG_Manual" +%%% End: diff --git a/parts/Rendering.tex b/parts/Rendering.tex index 022b80b..1be6e02 100644 --- a/parts/Rendering.tex +++ b/parts/Rendering.tex @@ -1,634 +1,1194 @@ \chapter{Rendering}% \label{cha:rendering} -Rendering takes a section of the timeline, performs all the editing, effects and compositing, and creates a new media file. You can then delete all the source assets, play the rendered file, or bring it back into \CGG{} for more editing. All rendering operations are based on a region of the timeline to be rendered. You need to define this region on the timeline. The rendering functions define the region based on a set of rules. When a region is highlighted or in/out points are set, the affected region is rendered. When no region is highlighted, everything after the insertion point is rendered. By -positioning the insertion point at the beginning of a track and unsetting all in/out points, the entire track is rendered. But you also have the choice to render \textit{one frame}. +Rendering takes a section of the timeline, performs all the editing, +effects and compositing, and creates a new media file. You can then +delete all the source assets, play the rendered file, or bring it +back into \CGG{} for more editing. All rendering operations are +based on a region of the timeline to be rendered. You need to +define this region on the timeline. The rendering functions define +the region based on a set of rules. When a region is highlighted or +in/out points are set, the affected region is rendered. When no +region is highlighted, everything after the insertion point is +rendered. By positioning the insertion point at the beginning of a +track and unsetting all in/out points, the entire track is rendered. +But you also have the choice to render \textit{one frame}. \section{Single File Rendering}% \label{sec:single_file_rendering} -Use the File pulldown and select Render to start the render dialog (figure~\ref{fig:render}). Then choose the desired parameters. +Use the File pulldown and select Render to start the render dialog +(figure~\ref{fig:render}). Then choose the desired parameters. -\begin{figure}[htpb] - \centering - \includegraphics[width=0.7\linewidth]{render.png} - \caption{Example of the Render menu} - \label{fig:render} +\begin{figure}[htpb] \centering + \includegraphics[width=0.7\linewidth]{render.png} + \caption{Example of the Render menu} + \label{fig:render} \end{figure} \begin{description} - \item[Select a file to render to:] enter the path and filename to write the rendered file to in the textbox below. - \item[File Format:] use the down arrow to see file format options. For ffmpeg, which has its own set of options, you will then have to select an ffmpeg file type from the down arrow choices. The format of the file determines whether you can render audio or video or both. - \item[Render audio tracks:] check this toggle to generate audio tracks - \item[Render video tracks:] check this toggle to generate video tracks. The Render window will sometimes automatically update the Render Audio Tracks or Render Video Tracks checkbox as allowed by the chosen file format, but you should always check (figure~\ref{fig:render01}). For example, if the PNG file format is selected, only the \textit{Render Video Tracks} will be checked. Or if an ffmpeg format is chosen and the file format does not render audio, the \textit{Render Audio Tracks} will be unchecked. The invalid choices will be ghosted out. +\item[Select a file to render to:] enter the path and filename to + write the rendered file to in the textbox below. +\item[File Format:] use the down arrow to see file format options. + For ffmpeg, which has its own set of options, you will then have to + select an ffmpeg file type from the down arrow choices. The format + of the file determines whether you can render audio or video or + both. +\item[Render audio tracks:] check this toggle to generate audio + tracks +\item[Render video tracks:] check this toggle to generate video + tracks. The Render window will sometimes automatically update the + Render Audio Tracks or Render Video Tracks checkbox as allowed by + the chosen file format, but you should always check + (figure~\ref{fig:render01}). For example, if the PNG file format is + selected, only the \textit{Render Video Tracks} will be checked. Or + if an ffmpeg format is chosen and the file format does not render + audio, the \textit{Render Audio Tracks} will be unchecked. The + invalid choices will be ghosted out. \end{description} -\begin{figure}[htpb] - \centering - \includegraphics[width=0.7\linewidth]{render01.png} - \caption{Audio and Video tracks automatically checked for Pro file type} - \label{fig:render01} +\begin{figure}[htpb] \centering + \includegraphics[width=0.7\linewidth]{render01.png} + \caption{Audio and Video tracks automatically checked for Pro file + type} + \label{fig:render01} \end{figure} \begin{description} - \item[Wrench:] select the \textit{wrench} next to each toggle to set compression parameters. If the file format can not store audio or video the compression parameters will be blank. If \textit{Render audio tracks} or \textit{Render video tracks} is selected and the file format does not support it, trying to render will result in an error message. More details in the section: \nameref{sub:extra_cin_option_ffmpeg} - \item[Create new file at each label] the option causes a new file to be created when every label in the timeline is encountered – a separate file for each. This is useful for dividing long audio recordings into individual tracks. When using the Render Farm (described later), \textit{Create new file at each label} causes one render farm job to be created at every label instead of using the internal load balancing algorithm to space jobs. If the filename given in the render dialog has a 2 digit number in it, the 2 digit number is overwritten with a different incremental number for every output file. If no 2 digit number is given, \CGG{} automatically concatenates a number to the end of the given filename for every output file. - For example, in the filename \texttt{/movies/track01.wav} the $01$ would be overwritten for every output file. - The filename \texttt{/movies/track.wav}; however, eventually would become \texttt{/movies/track.wav001} and so on. - Filename regeneration is only used when either render farm mode is active or creating new files for every label is active. - \item[Render range:] choices are \textit{Project}, \textit{Selection}, \textit{In/Out points}, and \textit{One Frame} for single images like Tiff. For these images, Render range will have \textit{One Frame} automatically checked and all of the others ghosted since nothing else makes sense (figure~\ref{fig:render02}). This makes it easy to set the insertion point where you want the 1 frame to be rendered rather than having to precisely zoom in to set the in/out pointers. Note that whichever Render range is checked, remains checked so that if \textit{One Frame} gets automatically checked, the next time you render it will still be checked and you will have to select a different one if desired. That is why you should always check the settings. +\item[Wrench:] select the \textit{wrench} next to each toggle to set + compression parameters. If the file format can not store audio or + video the compression parameters will be blank. If \textit{Render + audio tracks} or \textit{Render video tracks} is selected and the + file format does not support it, trying to render will result in an + error message. More details in the section: + \nameref{sub:extra_cin_option_ffmpeg} +\item[Create new file at each label] the option causes a new file to + be created when every label in the timeline is encountered – a + separate file for each. This is useful for dividing long audio + recordings into individual tracks. When using the Render Farm + (described later), \textit{Create new file at each label} causes one + render farm job to be created at every label instead of using the + internal load balancing algorithm to space jobs. If the filename + given in the render dialog has a 2 digit number in it, the 2 digit + number is overwritten with a different incremental number for every + output file. If no 2 digit number is given, \CGG{} automatically + concatenates a number to the end of the given filename for every + output file. For example, in the filename + \texttt{/movies/track01.wav} the $01$ would be overwritten for every + output file. The filename \texttt{/movies/track.wav}; however, + eventually would become \texttt{/movies/track.wav001} and so on. + Filename regeneration is only used when either render farm mode is + active or creating new files for every label is active. +\item[Render range:] choices are \textit{Project}, + \textit{Selection}, \textit{In/Out points}, and \textit{One Frame} + for single images like Tiff. For these images, Render range will + have \textit{One Frame} automatically checked and all of the others + ghosted since nothing else makes sense (figure~\ref{fig:render02}). + This makes it easy to set the insertion point where you want the 1 + frame to be rendered rather than having to precisely zoom in to set + the in/out pointers. Note that whichever Render range is checked, + remains checked so that if \textit{One Frame} gets automatically + checked, the next time you render it will still be checked and you + will have to select a different one if desired. That is why you + should always check the settings. \end{description} -\begin{figure}[htpb] - \centering - \includegraphics[width=0.7\linewidth]{render02.png} - \caption{Render menu displaying a PNG \textit{one frame} option} - \label{fig:render02} +\begin{figure}[htpb] \centering + \includegraphics[width=0.7\linewidth]{render02.png} + \caption{Render menu displaying a PNG \textit{one frame} option} + \label{fig:render02} \end{figure} \begin{description} - \item[Beep on done:] as a convenience when a render is complete, check this box. It gives you the chance to work on something else while waiting and still be immediately notified when the render is complete. - \item[Render Profile:] another convenience feature to take advantage of if you use specific render formats - frequently, is to save that profile for future usage without having to set it up again. - \item[Save Profile:] after setting up your render preference formats, use the save profile button to save it. - \item[Delete Profile:] if you want to delete a saved profile, highlight the one you no longer want and delete. - \item[Insertion strategy:] select an insertion mode from the available choices as seen when you click on the down arrow on the right hand side of the option. The insertion modes are the same as with loading files. In the case if you select “insert nothing” the file will be written out to disk without changing the current project. For other insertion strategies be sure to prepare the timeline to have the output inserted at the right position before the rendering operation is finished. - - Even if you only have audio or only have video rendered, a paste insertion strategy will behave like a normal paste operation, erasing any selected region of the timeline and pasting just the data that was rendered. If you render only audio and have some video tracks armed, the video tracks will get truncated while the audio output is pasted into the audio tracks. +\item[Beep on done:] as a convenience when a render is complete, + check this box. It gives you the chance to work on something else + while waiting and still be immediately notified when the render is + complete. +\item[Render Profile:] another convenience feature to take advantage + of if you use specific render formats frequently, is to save that + profile for future usage without having to set it up again. +\item[Save Profile:] after setting up your render preference + formats, use the save profile button to save it. +\item[Delete Profile:] if you want to delete a saved profile, + highlight the one you no longer want and delete. +\item[Insertion strategy:] select an insertion mode from the + available choices as seen when you click on the down arrow on the + right hand side of the option. The insertion modes are the same as + with loading files. In the case if you select “insert nothing” the + file will be written out to disk without changing the current + project. For other insertion strategies be sure to prepare the + timeline to have the output inserted at the right position before + the rendering operation is finished. + + Even if you only have audio or only have video rendered, a paste + insertion strategy will behave like a normal paste operation, + erasing any selected region of the timeline and pasting just the + data that was rendered. If you render only audio and have some + video tracks armed, the video tracks will get truncated while the + audio output is pasted into the audio tracks. \end{description} \section{Batch Rendering}% \label{sec:batch_rendering} -Batch Rendering automates the rendering of audio/video files in that you can establish a set of job parameters, save them, and use them repeatedly. It also allows for \CGG{} to be run by external programs, with no need for the user to manually interact with the user interface (figure~\ref{fig:batch01}). - -If you want to render many projects to media files without having to constantly set up the render dialog for each one, batch rendering is a more efficient method of rendering. In the Batch Render menu, you specify one or more \CGG{} project XML files, the EDL, to render and unique output files for each. (The EDL is the Edit Decision List or the set of changes to be applied to the project and media files.) Then \CGG{} loads each project file and renders it automatically. The project XML files, combined with the settings for rendering an output file, are called a batch. This allows a large amount of media to be processed without user intervention. - -\begin{figure}[htpb] - \centering - \includegraphics[width=0.8\linewidth]{batch01.png} - \caption{Example of the Batch Render menu} - \label{fig:batch01} +Batch Rendering automates the rendering of audio/video files in that +you can establish a set of job parameters, save them, and use them +repeatedly. It also allows for \CGG{} to be run by external +programs, with no need for the user to manually interact with the +user interface (figure~\ref{fig:batch01}). + +If you want to render many projects to media files without having to +constantly set up the render dialog for each one, batch rendering is +a more efficient method of rendering. In the Batch Render menu, you +specify one or more \CGG{} project XML files, the EDL, to render and +unique output files for each. (The EDL is the Edit Decision List or +the set of changes to be applied to the project and media files.) +Then \CGG{} loads each project file and renders it +automatically. The project XML files, combined with the settings for +rendering an output file, are called a batch. This allows a large +amount of media to be processed without user intervention. + +\begin{figure}[htpb] \centering + \includegraphics[width=0.8\linewidth]{batch01.png} + \caption{Example of the Batch Render menu} + \label{fig:batch01} \end{figure} -The first thing to do when preparing to do batch rendering is to create one or more \CGG{} projects to be rendered and save them as a normal project, such as \texttt{ProjectA.xml}. The batch renderer requires a separate project file for every batch to be rendered. You can use the same \CGG{} project file if you are rendering to different output files, as in an example where you might be creating the same output video in different file formats. - -To create a project file which can be used in batch render, set up your project and define the region to be rendered either by highlighting it, setting in/out points around it, or positioning the insertion point before it. Then save the project as usual to your \texttt{project.xm}l file. Define as many projects as needed this way. The batch renderer takes the active region from the EDL file for rendering. If we have not set active regions, it is better to bring the insertion point to the beginning of the timeline to avoid possible problems with the rendering. - -With all the \CGG{} xml project files prepared with active regions, go to \texttt{File $\rightarrow$ Batch Render}. This brings up the batch render dialog. The interface for batch rendering is more complex than for single file rendering. A list of batches must be defined before starting a batch rendering operation. The table of batches appears on the bottom of the batch render dialog and is called \textit{Batches to render}. Above this are the configuration parameters for a single batch; a batch is simply a pairing of a project file with a choice of output file and render settings. - -Set the \textit{Output path}, \textit{File format}, \textit{Audio}, \textit{Video}, and \textit{Create new file at each label} parameters as if you were rendering a single file. These parameters apply to only one batch. In addition to the standard rendering parameters, you must select the \textit{EDL Path} to be the project file (such as \texttt{ProjectA.xml}) that will be used in the batch job. In this case, \textit{EDL Path} is not related in anyway with the EDL files as created by \texttt{File/Export EDL}. In batch render mode the program will not overwrite an existing output file and will simply fail, so make sure that no files with the same name as the output files exist before starting. - -If the batches to render list is empty or nothing is highlighted, click \texttt{New} to create a new batch. The new batch will contain all the parameters you just set. Repeatedly press the \texttt{New} button to create more batches with the same parameters. When you highlight any batch, you can edit the configuration on the top of the batch render window. The highlighted batch is always synchronized to the information displayed. You can easily change the order in which the batch jobs are rendered, by clicking and dragging a batch to a different position. Hit \texttt{Delete} to permanently remove a highlighted batch. In the list box is a column which enables or disables the batch with an \texttt{X} meaning the batch job is enabled and will be run. This way batches can be skipped without being deleted. Click on the \texttt{Enabled} column in the list box to enable or disable a batch. - -The description of each of the columns in the batch list are as follows: +The first thing to do when preparing to do batch rendering is to +create one or more \CGG{} projects to be rendered and save them as a +normal project, such as \texttt{ProjectA.xml}. The batch renderer +requires a separate project file for every batch to be rendered. +You can use the same \CGG{} project file if you are rendering to +different output files, as in an example where you might be creating +the same output video in different file formats. + +To create a project file which can be used in batch render, set up +your project and define the region to be rendered either by +highlighting it, setting in/out points around it, or positioning the +insertion point before it. Then save the project as usual to your +\texttt{project.xm}l file. Define as many projects as needed this +way. The batch renderer takes the active region from the EDL file +for rendering. If we have not set active regions, it is better to +bring the insertion point to the beginning of the timeline to avoid +possible problems with the rendering. + +With all the \CGG{} xml project files prepared with active regions, +go to \texttt{File $\rightarrow$ Batch Render}. This brings up the +batch render dialog. The interface for batch rendering is more +complex than for single file rendering. A list of batches must be +defined before starting a batch rendering operation. The table of +batches appears on the bottom of the batch render dialog and is +called \textit{Batches to render}. Above this are the configuration +parameters for a single batch; a batch is simply a pairing of a +project file with a choice of output file and render settings. + +Set the \textit{Output path}, \textit{File format}, \textit{Audio}, +\textit{Video}, and \textit{Create new file at each label} +parameters as if you were rendering a single file. These parameters +apply to only one batch. In addition to the standard rendering +parameters, you must select the \textit{EDL Path} to be the project +file (such as \texttt{ProjectA.xml}) that will be used in the batch +job. In this case, \textit{EDL Path} is not related in anyway with +the EDL files as created by \texttt{File/Export EDL}. In batch +render mode the program will not overwrite an existing output file +and will simply fail, so make sure that no files with the same name +as the output files exist before starting. + +If the batches to render list is empty or nothing is highlighted, +click \texttt{New} to create a new batch. The new batch will contain +all the parameters you just set. Repeatedly press the \texttt{New} +button to create more batches with the same parameters. When you +highlight any batch, you can edit the configuration on the top of +the batch render window. The highlighted batch is always +synchronized to the information displayed. You can easily change +the order in which the batch jobs are rendered, by clicking and +dragging a batch to a different position. Hit \texttt{Delete} to +permanently remove a highlighted batch. In the list box is a column +which enables or disables the batch with an \texttt{X} meaning the +batch job is enabled and will be run. This way batches can be +skipped without being deleted. Click on the \texttt{Enabled} column +in the list box to enable or disable a batch. + +The description of each of the columns in the batch list are as +follows: \begin{description} - \item[Enabled:] an X in this column means the batch job will be run. - \item[Labeled:] an \texttt{X} in this column goes hand in hand with create new file at each label. - \item[Output:] path and filename for the generated output. - \item[EDL:] the path and filename of the source EDL for the batch job. - \item[Elapsed:] the amount of time taken to render the batch if finished. If field is empty, it did not run. -\end{description} -To start rendering from the first enabled batch, hit \texttt{Start}. Once rendering, the main window shows the progress of the batch. After each batch finishes, the elapsed column in the batch list is updated and the next batch is rendered until all the enabled batches are finished. The currently rendering batch is always highlighted red. To stop rendering before the batches are finished without closing the batch render dialog, hit \texttt{Stop}. To stop rendering before the batches are finished and close the batch render dialog, hit \texttt{Close}. Or you can exit the batch render dialog whether or not anything is being rendered, by hitting \texttt{Close}. - -You can automate \CGG{} batch renders from other programs. In the batch render dialog, once you have created your list of batch render jobs, you can click the button \texttt{Save Jobs} and choose a file to save your batch render list to. Once you have created this file, you can start up a batch render without needing to interact with the \CGG{} user interface. From a shell prompt, from a script, or other program, execute: +\item[Enabled:] an X in this column means the batch job will be run. +\item[Labeled:] an \texttt{X} in this column goes hand in hand with + create new file at each label. +\item[Output:] path and filename for the generated output. +\item[EDL:] the path and filename of the source EDL for the batch + job. +\item[Elapsed:] the amount of time taken to render the batch if + finished. If field is empty, it did not run. +\end{description} To start rendering from the first enabled batch, +hit \texttt{Start}. Once rendering, the main window shows the +progress of the batch. After each batch finishes, the elapsed column +in the batch list is updated and the next batch is rendered until +all the enabled batches are finished. The currently rendering batch +is always highlighted red. To stop rendering before the batches are +finished without closing the batch render dialog, hit \texttt{Stop}. +To stop rendering before the batches are finished and close the +batch render dialog, hit \texttt{Close}. Or you can exit the batch +render dialog whether or not anything is being rendered, by hitting +\texttt{Close}. + +You can automate \CGG{} batch renders from other programs. In the +batch render dialog, once you have created your list of batch render +jobs, you can click the button \texttt{Save Jobs} and choose a file +to save your batch render list to. Once you have created this file, +you can start up a batch render without needing to interact with the +\CGG{} user interface. From a shell prompt, from a script, or other +program, execute: \begin{lstlisting}[style=sh] - {path_to_cinelerra} -r batchjob.xml -\end{lstlisting} -substituting your actual filename for \texttt{batchjob.xml}. When invoked with these parameters, \CGG{} will start up and perform the rendering jobs in that list, without creating its usual windows. +{path_to_cinelerra}/cin -r batchjob.xml +\end{lstlisting} substituting your actual filename for +\texttt{batchjob.xml}. When invoked with these parameters, \CGG{} +will start up and perform the rendering jobs in that list, without +creating its usual windows. \subsection{Command Line Rendering}% \label{sub:command_line_rendering} -The command line rendering method consists of a way to load the current set of batch rendering jobs and process them without a GUI. This is useful if you want to do rendering on the other side of a low bandwidth network and you have access to a high powered computer located elsewhere. Setting up all the parameters for this operation is somewhat difficult. That is why the command line aborts if any output files already exist. - -To perform rendering from the command line, first run \CGG{} in graphical mode. Go to \texttt{File $\rightarrow$ Batch Render}. Create the batches you intend to render in the batch window and close the window. This saves the batches in a file. Set up the desired render farm attributes in \texttt{Settings $\rightarrow$ Preferences} and quit out of \CGG{} if you want to use the Render Farm capability. These settings are used the next time command line rendering is used to process the current set of batch jobs without a GUI. +The command line rendering method consists of a way to load the +current set of batch rendering jobs and process them without a +GUI\@. This is useful if you want to do rendering on the other side +of a low bandwidth network and you have access to a high powered +computer located elsewhere. Setting up all the parameters for this +operation is somewhat difficult. That is why the command line aborts +if any output files already exist. + +To perform rendering from the command line, first run \CGG{} in +graphical mode. Go to \texttt{File $\rightarrow$ Batch + Render}. Create the batches you intend to render in the batch window +and close the window. This saves the batches in a file. Set up the +desired render farm attributes in \texttt{Settings $\rightarrow$ + Preferences} and quit out of \CGG{} if you want to use the Render +Farm capability. These settings are used the next time command line +rendering is used to process the current set of batch jobs without a +GUI\@. On the command line run: \begin{lstlisting}[style=sh] -cinelerra -r +cin -r \end{lstlisting} \subsection{More about Save/Use EDL and Save/Load Jobs}% \label{sub:more_save_use_edl_jobs} -The \texttt{File $\rightarrow$ Batch Render} pulldown brings up the Batch Render window to be used for batch rendering as well as DVD/BD creation. There are some additional buttons that can save time and mistakes. These are described next. +The \texttt{File $\rightarrow$ Batch Render} pulldown brings up the +Batch Render window to be used for batch rendering as well as DVD/BD +creation. There are some additional buttons that can save time and +mistakes. These are described next. -The \textit{Save to EDL Path} and \textit{Use Current EDL} buttons can be valuable tools for advanced usage or for developers doing testing. Description of how you can expect them to work will help to illustrate how to take advantage of their capabilities. +The \textit{Save to EDL Path} and \textit{Use Current EDL} buttons +can be valuable tools for advanced usage or for developers doing +testing. Description of how you can expect them to work will help +to illustrate how to take advantage of their capabilities. \begin{description} - \item[Save to EDL Path] if you have made a change to the EDL, use this button to save the changes so - that they will be used in the render operation. Although you can get the same results by using - \texttt{File $\rightarrow$ Save\dots}, this capability was initially added to assist developers in testing the batch jobs needed to create dvd/bluray media as it keeps the work focused in a single window and retains the original - job name. An example --you have everything all set up with a new job in the Batch Render window - using \texttt{generic.xml} for the EDL path and with a job name of \texttt{original\_name.xml}. Then you realize - that you forgot to cut out a section in the media that is not wanted in the final product. You can cut - that out and then \textit{Save to EDL Path} so your change will be in effect for the rendering. Without this - button, you would be using the EDL you started with and the cut would be ignored. Alternatively, if - the cut changes are saved via \texttt{File $\rightarrow$ Save as}\dots with a filename of \texttt{new.xml} and then you use \textit{Save to EDL Path}, the current highlighted job displayed in the window as \texttt{original\_name.xml} will be - replaced with \texttt{new.xml}. However, it is important to note that the result will be saved with the name - \texttt{original\_name} – that is, the new content from \texttt{new.xml} but with the old name of \texttt{original\_name.xml}. - \item[Use Current EDL] if you are working on media and still testing out the results, you can take - advantage of this click-box to quickly get results. Basically, you change the media, save that change - with another name (in order to preserve the original name in case you don't like the changes), and - press \textit{Use Current EDL}. As an example, a user creates a new job in the Batch Render window - using the current media, previously defined in generic.xml, with the EDL path of \texttt{generic.xml}. The - user then changes the media on the timeline, saves the changes via \texttt{File $\rightarrow$ Save as\dots} with a new - name, such as \texttt{new\_name.xml}, and then clicks on \textit{Use Current EDL}. In this case, the EDL path - listbox will be automatically updated to the \texttt{new\_name.xml} and the current existing highlighted job will be replaced with the \texttt{new\_name.xml} in the EDL column. - \item[Save Jobs] when you have set up the batch jobs the way you want and you think you may have to - run them more than once, it is beneficial to save the jobs for later use so you easily run them again. - \item[Load Jobs] reload a previous set of saved jobs. This can come in handy if you did not have the - time to render them when you originally set them up, if you need to rerun, or if you got interrupted. - \item[Warn if Jobs/Session mismatched] After you set up your render and press Start, the program checks to see if the current EDL session matches your Batch Render job. If the EDL has - been changed since the batch job was created, it warns you so that you have the opportunity to \textit{Save to EDL} path to record those changes. Otherwise, you can dismiss that warning box, disable the warning message by unchecking the box and use the original values. If you never want to be warned about the mismatches, leave the box unchecked (figure~\ref{fig:batch02}). +\item[Save to EDL Path] if you have made a change to the EDL, use + this button to save the changes so that they will be used in the + render operation. Although you can get the same results by using + \texttt{File $\rightarrow$ Save\dots}, this capability was initially + added to assist developers in testing the batch jobs needed to + create dvd/bluray media as it keeps the work focused in a single + window and retains the original job name. An example --you have + everything all set up with a new job in the Batch Render window + using \texttt{generic.xml} for the EDL path and with a job name of + \texttt{original\_name.xml}. Then you realize that you forgot to + cut out a section in the media that is not wanted in the final + product. You can cut that out and then \textit{Save to EDL Path} so + your change will be in effect for the rendering. Without this + button, you would be using the EDL you started with and the cut + would be ignored. Alternatively, if the cut changes are saved via + \texttt{File $\rightarrow$ Save as}\dots with a filename of + \texttt{new.xml} and then you use \textit{Save to EDL Path}, the + current highlighted job displayed in the window as + \texttt{original\_name.xml} will be replaced with \texttt{new.xml}. + However, it is important to note that the result will be saved with + the name \texttt{original\_name} – that is, the new content from + \texttt{new.xml} but with the old name of + \texttt{original\_name.xml}. +\item[Use Current EDL] if you are working on media and still testing + out the results, you can take advantage of this click-box to quickly + get results. Basically, you change the media, save that change with + another name (in order to preserve the original name in case you + don't like the changes), and press \textit{Use Current EDL}. As an + example, a user creates a new job in the Batch Render window using + the current media, previously defined in generic.xml, with the EDL + path of \texttt{generic.xml}. The user then changes the media on + the timeline, saves the changes via \texttt{File $\rightarrow$ Save + as\dots} with a new name, such as \texttt{new\_name.xml}, and then + clicks on \textit{Use Current EDL}. In this case, the EDL path + listbox will be automatically updated to the \texttt{new\_name.xml} + and the current existing highlighted job will be replaced with the + \texttt{new\_name.xml} in the EDL column. +\item[Save Jobs] when you have set up the batch jobs the way you + want and you think you may have to run them more than once, it is + beneficial to save the jobs for later use so you easily run them + again. +\item[Load Jobs] reload a previous set of saved jobs. This can come + in handy if you did not have the time to render them when you + originally set them up, if you need to rerun, or if you got + interrupted. +\item[Warn if Jobs/Session mismatched] After you set up your render + and press Start, the program checks to see if the current EDL + session matches your Batch Render job. If the EDL has been changed + since the batch job was created, it warns you so that you have the + opportunity to \textit{Save to EDL} path to record those changes. + Otherwise, you can dismiss that warning box, disable the warning + message by unchecking the box and use the original values. If you + never want to be warned about the mismatches, leave the box + unchecked (figure~\ref{fig:batch02}). \end{description} -\begin{figure}[htpb] - \centering - \includegraphics[width=0.8\linewidth]{batch02.png} - \caption{Batch render with the 4 ghosted buttons on the right side + the Warning message below} - \label{fig:batch02} +\begin{figure}[htpb] \centering + \includegraphics[width=0.8\linewidth]{batch02.png} + \caption{Batch render with the 4 ghosted buttons on the right side + + the Warning message below} + \label{fig:batch02} \end{figure} \section{Background Rendering}% \label{sec:background_rendering} -Background rendering causes temporary output to be rendered constantly while the timeline is being modified. The temporary output is displayed during playback whenever possible. This is useful for transitions and previewing effects that are too slow to display in real time. If a Render Farm is enabled, the render farm is used for background rendering. This gives you the potential for real-time effects if enough network bandwidth and CPU nodes exist. - -Background rendering is enabled in the \texttt{Performance} tab of the \texttt{Preferences} window. It has one interactive function \texttt{Settings $\rightarrow$ Toggle background rendering}. This sets the point where background rendering starts up to the position of the insertion point. If any video exists, a red bar appears in the time ruler showing what has been background rendered (figure~\ref{fig:back-ren02}). - -\begin{figure}[htpb] - \centering - \includegraphics[width=0.8\linewidth]{back-ren02.png} - \caption{Settings Background Rendering} - \label{fig:back-ren02} +Background rendering causes temporary output to be rendered +constantly while the timeline is being modified. The temporary +output is displayed during playback whenever possible. This is +useful for transitions and previewing effects that are too slow to +display in real time. If a Render Farm is enabled, the render farm +is used for background rendering. This gives you the potential for +real-time effects if enough network bandwidth and CPU nodes exist. + +Background rendering is enabled in the \texttt{Performance} tab of +the \texttt{Preferences} window. It has one interactive function +\texttt{Settings $\rightarrow$ Toggle background rendering}. This +sets the point where background rendering starts up to the position +of the insertion point. If any video exists, a red bar appears in +the time ruler showing what has been background rendered +(figure~\ref{fig:back-ren02}). + +\begin{figure}[htpb] \centering + \includegraphics[width=0.8\linewidth]{back-ren02.png} + \caption{Settings Background Rendering} + \label{fig:back-ren02} \end{figure} -It is often useful to insert an effect or a transition and then select \texttt{Settings $\rightarrow$ Toggle background rendering} right before the effect to preview it in real time and full frame rates (figure~\ref{fig:back-ren}). +It is often useful to insert an effect or a transition and then +select \texttt{Settings $\rightarrow$ Toggle background rendering} +right before the effect to preview it in real time and full frame +rates (figure~\ref{fig:back-ren}). -\begin{figure}[htpb] - \centering - \includegraphics[width=0.8\linewidth]{back-ren.png} - \caption{Timeline with the top red bar} - \label{fig:back-ren} +\begin{figure}[htpb] \centering + \includegraphics[width=0.8\linewidth]{back-ren.png} + \caption{Timeline with the top red bar} + \label{fig:back-ren} \end{figure} \begin{description} - \item[Frames per background rendering job] This only works if a Render Farm is being used; otherwise, background rendering creates a single job for the entire timeline. The number of frames specified here is scaled to the relative CPU speed of rendering nodes and used in a single render farm job. The optimum number is 10 - 30 since network bandwidth is used to initialize each job. - \item[Frames to preroll background] This is the number of frames to render ahead of each background rendering job. Background rendering is degraded when preroll is used since the jobs are small. When using background rendering, this number is ideally 0. Some effects may require 3 frames of preroll. - \item[Output for background rendering] Background rendering generates a sequence of image files in a certain directory. This parameter determines the filename prefix of the image files. It should be accessible to every node in the render farm by the same path. Since hundreds of thousands of image files are usually created, ls commands will not work in the background rendering directory. The browse button for this option normally will not work either, but the configuration button for this option works. - \item[File format] The file format for background rendering has to be a sequence of images. The format of the image sequences determines the quality and speed of playback. JPEG generally works well. +\item[Frames per background rendering job] This only works if a + Render Farm is being used; otherwise, background rendering creates a + single job for the entire timeline. The number of frames specified + here is scaled to the relative CPU speed of rendering nodes and used + in a single render farm job. The optimum number is 10 - 30 since + network bandwidth is used to initialize each job. +\item[Frames to preroll background] This is the number of frames to + render ahead of each background rendering job. Background rendering + is degraded when preroll is used since the jobs are small. When + using background rendering, this number is ideally 0. Some effects + may require 3 frames of preroll. +\item[Output for background rendering] Background rendering + generates a sequence of image files in a certain directory. This + parameter determines the filename prefix of the image files. It + should be accessible to every node in the render farm by the same + path. Since hundreds of thousands of image files are usually + created, ls commands will not work in the background rendering + directory. The browse button for this option normally will not work + either, but the configuration button for this option works. +\item[File format] The file format for background rendering has to + be a sequence of images. The format of the image sequences + determines the quality and speed of playback. JPEG generally works + well. \end{description} \section{Render Farm Usage}% \label{sec:render_farm_usage} -Render Farm uses background rendering, a feature of \CGG{} where the video is rendered in the background, to speed up rendering significantly. Because rendering is memory and cpu intensive, using multiple computers on a network via a render farm is a significant gain. With \CGG{} installed on all nodes, the master node and the clients communicate via a network port that you specify. - -\CGG{} can distribute the rendering tasks over the network to the other computers of the Render Farm. The render farm software tries to process all of the rendering in parallel so that several computers can be used to render the results. The \textit{Total jobs to create} in the setup or labels on the timeline are used to divide a render job into that specified number of tasks. Each background job is assigned a timeline segment to process and the jobs are sent to the various computer nodes depending upon the load balance. The jobs are processed by the nodes separately and written to individual files. You will have to put the files back together via a load with concatenation, or typically by using a command line tool from a script. +Render Farm uses background rendering, a feature of \CGG{} where the +video is rendered in the background, to speed up rendering +significantly. Because rendering is memory and cpu intensive, using +multiple computers on a network via a render farm is a significant +gain. With \CGG{} installed on all nodes, the master node and the +clients communicate via a network port that you specify. + +\CGG{} can distribute the rendering tasks over the network to the +other computers of the Render Farm. The render farm software tries +to process all of the rendering in parallel so that several +computers can be used to render the results. The \textit{Total jobs + to create} in the setup or labels on the timeline are used to divide +a render job into that specified number of tasks. Each background +job is assigned a timeline segment to process and the jobs are sent +to the various computer nodes depending upon the load balance. The +jobs are processed by the nodes separately and written to individual +files. You will have to put the files back together via a load with +concatenation, or typically by using a command line tool from a +script. \subsection{Basic Steps to Start a Render Farm}% \label{sub:basic_steps_start_render_farm} -The following steps are just a guideline to start your render farm. It is assumed that you already have the master and client nodes communication, shared filesystem, permissions and usernames synched. +The following steps are just a guideline to start your render farm. +It is assumed that you already have the master and client nodes +communication, shared filesystem, permissions and usernames synched. \begin{enumerate} - \item On the master computer, use \texttt{Settings} $\rightarrow$ \texttt{Preferences} $\rightarrow$ \texttt{Performance} \texttt{tab} to set up a Render Farm: - \begin{itemize} - \item check the \textit{Use render farm} box; - \item in the \textit{Hostname} box, keyin your hostname or ip address such as 192.168.1.12 or \textit{localhost}; - \item enter in a port number such as 401--405 (only a root user can use privileged ports) or $1025$ and click on \textit{Add Nodes}; - \item you will see something like the following in the Nodes listbox to the right:\newline - \begin{tabular}{lllc} - On & Hostname & Port & Framerate \\\midrule - X & 192.168.1.12 & 401 & 0.0 \\ - X & 192.168.1.12 & 402 & 0.0 \\ - X & 192.168.1.12 & 403 & 0.0 \\ - X & 192.168.1.12 & 404 & 0.0 \\ - X & 192.168.1.12 & 405 & 0.0 \\ - X & localhost & 406 & 0.0 \\ - X & localhost & 407 & 0.0 \\ - \end{tabular} - \item set the Total number of jobs to create; - \item click OK on the bottom of the Preferences window. - \end{itemize} - \item On the client computers ($192.168.1.12$), start 5 background \CGG{} tasks via: - \begin{lstlisting}[style=sh] -cd /{path_to_cinelerra} -cin -d 401 -cin -d 402 +\item On the master computer, use \texttt{Settings} $\rightarrow$ + \texttt{Preferences} $\rightarrow$ \texttt{Performance} \texttt{tab} + to set up a Render Farm: + \begin{itemize} + \item check the \textit{Use render farm} box; + \item in the \textit{Hostname} box, keyin your hostname or ip + address such as 192.168.1.12 or \textit{localhost}; + \item enter in a port number such as 401--405 (only a root user + can use privileged ports) or $1025$ and click on \textit{Add Nodes}; + \item you will see something like the following in the Nodes + listbox to the right:\newline + \begin{tabular}{lllc} On & Hostname & Port & Framerate + \\\midrule + X & 192.168.1.12 & 401 & 0.0 \\ + X & 192.168.1.12 & 402 & 0.0 \\ + X & 192.168.1.12 & 403 & 0.0 \\ + X & 192.168.1.12 & 404 & 0.0 \\ + X & 192.168.1.12 & 405 & 0.0 \\ + X & localhost & 406 & 0.0 \\ + X & localhost & 407 & 0.0 \\ + \end{tabular} + \item set the Total number of jobs to create; + \item click OK on the bottom of the Preferences window. + \end{itemize} +\item On the client computers ($192.168.1.12$), start 5 background + \CGG{} tasks via: +\begin{lstlisting}[style=sh] +cd {path_to_cinelerra} +cin -d 401 cin -d 402 ... cin -d 405 - \end{lstlisting} - \item On the master node (localhost), start the 2 background \CGG{} tasks via: - \begin{lstlisting}[style=sh] -cd /{path_to_cinelerra} +\end{lstlisting} +\item On the master node (localhost), start the 2 background \CGG{} + tasks via: +\begin{lstlisting}[style=sh] +cd {path_to_cinelerra} cin -d 406 cin -d 407 - \end{lstlisting} - \item When your video is ready, setup a render job via \texttt{File $\rightarrow$ Render} or \texttt{File $\rightarrow$ Batch Render} and check OK. - \item The results will be in the shared file \texttt{path/filename} that you selected in the render menu with the - additional numbered job section on the end as $001, 002, 003, \dots 099$ (example, \texttt{video.webm001}). - \item When finished, load your new files on new tracks via \texttt{File $\rightarrow$ Load} \textit{concatenate to existing tracks} or if you used ffmpeg, run \textit{RenderMux} from the Shell Scripts icon. - \item If you plan on doing more rendering, you can just leave the master/client jobs running to use again - and avoid having to restart them. Or you can kill them when you no longer are using them. +\end{lstlisting} +\item When your video is ready, setup a render job via \texttt{File + $\rightarrow$ Render} or \texttt{File $\rightarrow$ Batch Render} + and check OK. +\item The results will be in the shared file \texttt{path/filename} + that you selected in the render menu with the additional numbered + job section on the end as $001, 002, 003, \dots 099$ (example, + \texttt{video.webm001}). +\item When finished, load your new files on new tracks via + \texttt{File $\rightarrow$ Load} \textit{concatenate to existing + tracks} or if you used ffmpeg, run \textit{RenderMux} from the Shell + Scripts icon. +\item If you plan on doing more rendering, you can just leave the + master/client jobs running to use again and avoid having to restart + them. Or you can kill them when you no longer are using them. \end{enumerate} \subsection{Render Farm Menu and Parameter Description}% \label{sub:render_farm_parameter_description} -Below we describe the Performance tab for configuring a render farm (figure~\ref{fig:farm}). +Below we describe the Performance tab for configuring a render farm +(figure~\ref{fig:farm}). -\begin{figure}[htpb] - \centering - \includegraphics[width=0.8\linewidth]{farm.png} - \caption{Settings $\rightarrow$ Preferences, Performance tab, menu to set up your Render Farm} - \label{fig:farm} +\begin{figure}[htpb] \centering + \includegraphics[width=0.8\linewidth]{farm.png} + \caption{Settings $\rightarrow$ Preferences, Performance tab, menu + to set up your Render Farm} + \label{fig:farm} \end{figure} \begin{description} - \item[Project SMP cpus] although this field is not Render Farm specific, it is useful for \CGG{} to have the CPU count and for using multiple threads. - \item[Use render farm] check this to turn on the render farm option. Once checked ALL rendering will be done via the farm including the usual Render (\texttt{Shift-R}). You may want to turn if off for small jobs. - \item[Nodes listbox] displays all the nodes on the render farm and shows which ones are currently enabled. The Nodes listbox has 4 columns -- On, Hostname, Port, Framerate -- which show the current values. An \textit{X} in the \textit{On} designates that that host is currently enabled; \textit{Hostname} shows the name of the host; \textit{Port} shows the port number that host uses; and \textit{Framerate} will either be zero initially or the current framerate value. - \item[Hostname] this field is used to edit the hostname of an existing node or enter a new node. - \item[Port] keyin the port number of an existing or new node here. You can also type in a range of port numbers using a hyphen, for example $1501-1505$ when you need to add many. - \item[Apply Changes] this will allow you to edit an existing node and to then commit the changes to hostname and port. The changes will not be committed if you do not click the OK button. - \item[Add Nodes] Create a new node with the hostname and port settings. - \item[Sort nodes] sorts the nodes list based on the hostname. - \item[Delete Nodes] deletes whatever node is highlighted in the nodes list. You can highlight several at once to have them all deleted. - \item[Client Watchdog Timeout] a default value of $15$ seconds is used here and the tumbler increments by $15$ seconds. A value of $0$ (zero) disables the watchdog so that if you have a slow client, it will not kill the render job while waiting for that client to respond. - \item[Total jobs to create] determines the number of jobs to dispatch to the render farm. Total jobs is used to divide a render job into that specified number of tasks. Each background job is assigned a timeline segment to process. The render farm software tries to process all of the rendering in parallel so that several computers can be used to render the results. - - To start, if you have computers of similar speed, a good number for \textit{Total jobs to create} is the number of computers multiplied by $3$. You will want to adjust this according to the capabilities of your computers and after viewing the framerates. Multiply them by $1$ to have one job dispatched for every node. If you have $10$ client nodes and one master node, specify $33$ to have a well balanced render farm. - \item[(overridden if new file at each label is checked)] instead of the number of jobs being set to \textit{Total jobs to create}, there will be a job created for each labeled section. If in the render menu, the option \textit{Create new file at each label} is selected when no labels exist, only one job will be created. It may be quite advantageous to set labels at certain points in the video to ensure that a key portion of the video will not be split into two different jobs. - \item[Reset rates] sets the framerate for all the nodes to $0$. Frame rates are used to scale job sizes based on CPU speed of the node. Frame rates are calculated only when render farm is enabled. +\item[Project SMP cpus] although this field is not Render Farm + specific, it is useful for \CGG{} to have the CPU count and for + using multiple threads. +\item[Use render farm] check this to turn on the render farm option. + Once checked ALL rendering will be done via the farm including the + usual Render (\texttt{Shift-R}). You may want to turn if off for + small jobs. +\item[Nodes listbox] displays all the nodes on the render farm and + shows which ones are currently enabled. The Nodes listbox has 4 + columns -- On, Hostname, Port, Framerate -- which show the current + values. An \textit{X} in the \textit{On} designates that that host + is currently enabled; \textit{Hostname} shows the name of the host; + \textit{Port} shows the port number that host uses; and + \textit{Framerate} will either be zero initially or the current + framerate value. +\item[Hostname] this field is used to edit the hostname of an + existing node or enter a new node. +\item[Port] keyin the port number of an existing or new node here. + You can also type in a range of port numbers using a hyphen, for + example $1501-1505$ when you need to add many. +\item[Apply Changes] this will allow you to edit an existing node + and to then commit the changes to hostname and port. The changes + will not be committed if you do not click the OK button. +\item[Add Nodes] Create a new node with the hostname and port + settings. +\item[Sort nodes] sorts the nodes list based on the hostname. +\item[Delete Nodes] deletes whatever node is highlighted in the + nodes list. You can highlight several at once to have them all + deleted. +\item[Client Watchdog Timeout] a default value of $15$ seconds is + used here and the tumbler increments by $15$ seconds. A value of + $0$ (zero) disables the watchdog so that if you have a slow client, + it will not kill the render job while waiting for that client to + respond. +\item[Total jobs to create] determines the number of jobs to + dispatch to the render farm. Total jobs is used to divide a render + job into that specified number of tasks. Each background job is + assigned a timeline segment to process. The render farm software + tries to process all of the rendering in parallel so that several + computers can be used to render the results. + + To start, if you have computers of similar speed, a good number + for \textit{Total jobs to create} is the number of computers + multiplied by $3$. You will want to adjust this according to the + capabilities of your computers and after viewing the framerates. + Multiply them by $1$ to have one job dispatched for every node. If + you have $10$ client nodes and one master node, specify $33$ to have + a well balanced render farm. +\item[(overridden if new file at each label is checked)] instead of + the number of jobs being set to \textit{Total jobs to create}, there + will be a job created for each labeled section. If in the render + menu, the option \textit{Create new file at each label} is selected + when no labels exist, only one job will be created. It may be quite + advantageous to set labels at certain points in the video to ensure + that a key portion of the video will not be split into two different + jobs. +\item[Reset rates] sets the framerate for all the nodes to $0$. + Frame rates are used to scale job sizes based on CPU speed of the + node. Frame rates are calculated only when render farm is enabled. \end{description} -Framerates can really affect how the Render Farm works. The first time you use the render farm all of the rates are displayed as $0$ in the \texttt{Settings $\rightarrow$ Preferences}, Performance tab in the Nodes box. As rendering occurs, all of the nodes send back framerate values to the master node and the preferences page is updated with these values. A rate accumulates based on speed. Once all nodes have a rate of non-zero, the program gives out less work to lower rated nodes in an effort to make the total time for the render to be almost constant. -Initially, when the framerate scaling values are zero, the program just uses package length -- render size -divided by the number of packages to portion out the work (if not labels). If something goes wrong or the rates become suspect, then all of the rest of the work will be dumped into the last job. When this happens, you really should \textit{reset rates} for the next render farm session to restart with a good balance. +Framerates can really affect how the Render Farm works. The first +time you use the render farm all of the rates are displayed as $0$ +in the \texttt{Settings $\rightarrow$ Preferences}, Performance tab +in the Nodes box. As rendering occurs, all of the nodes send back +framerate values to the master node and the preferences page is +updated with these values. A rate accumulates based on speed. Once +all nodes have a rate of non-zero, the program gives out less work +to lower rated nodes in an effort to make the total time for the +render to be almost constant. Initially, when the framerate scaling +values are zero, the program just uses package length -- render size +divided by the number of packages to portion out the work (if not +labels). If something goes wrong or the rates become suspect, then +all of the rest of the work will be dumped into the last job. When +this happens, you really should \textit{reset rates} for the next +render farm session to restart with a good balance. \begin{lstlisting}[style=sh] - {cinelerra pathname} -h #displays some of the options. +{path_to_cinelerra}/cin -h # displays some of the options. \end{lstlisting} \subsection{Detailed Setup Description}% \label{sub:detailed_setup_description} -{\color{red} CAUTION }, any exact command lines worked as of $01/2018$ on a Fedora system. These can change over time and on different operating systems/levels. Always check/verify any command line before using. +{\color{red} CAUTION }, any exact command lines worked as of +$01/2018$ on a Fedora system. These can change over time and on +different operating systems/levels. Always check/verify any command +line before using. \begin{description} - \item[Set up \CGG{}] A \CGG{} render farm is organized into a master node and any number of client nodes. The master node is the computer which is running the gui. The client nodes are anywhere else on the network with \CGG{} installed and are run from the command line. Before you start the master node for \CGG{}, you need to set up a shared filesystem on the disk storage node as this is the node that will have the common volume where all the data will be stored. - The location of the project and its files should be the same in the client computers as in the master computer and to avoid problems of permissions, it is better to use the same user in master and clients. - For example, if you have the project in \texttt{/home//project-video} you must create the same directory path on the clients, but empty. Sharing the directory of the location of your project on the master computer can be done with NFS as described next. Alternatively, you can look up on the internet how to use Samba to share a directory. - \item[Create a shared filesystem and mount using NFS] All nodes in the render farm should use the same filesystem with the same paths to the project files on all of the master and client nodes. This is easiest to do by setting up an NFS shared disk system. - \begin{enumerate} - \item On each of the computers, install the nfs software if not already installed. For example, on Debian 9 - you will need to run: (be sure to check/verify before using any command line): - \begin{lstlisting}[style=sh] +\item[Set up \CGG{}] A \CGG{} render farm is organized into a master + node and any number of client nodes. The master node is the + computer which is running the gui. The client nodes are anywhere + else on the network with \CGG{} installed and are run from the + command line. Before you start the master node for \CGG{}, you need + to set up a shared filesystem on the disk storage node as this is + the node that will have the common volume where all the data will be + stored. The location of the project and its files should be the + same in the client computers as in the master computer and to avoid + problems of permissions, it is better to use the same user in master + and clients. For example, if you have the project in + \texttt{/home//project-video} you must create the same + directory path on the clients, but empty. Sharing the directory of + the location of your project on the master computer can be done with + NFS as described next. Alternatively, you can look up on the + internet how to use Samba to share a directory. +\item[Create a shared filesystem and mount using NFS] All nodes in + the render farm should use the same filesystem with the same paths + to the project files on all of the master and client nodes. This is + easiest to do by setting up an NFS shared disk system. + \begin{enumerate} + \item On each of the computers, install the nfs software if not + already installed. For example, on Debian 9 you will need to run: + (be sure to check/verify before using any command line): +\begin{lstlisting}[style=sh] apt-get install nfs-kernel-server - \end{lstlisting} - \item On the computer that contains the disk storage to be shared, define the network filesystem. For - example to export \texttt{/tmp}, edit the \texttt{/etc/exports} file to add the following line: - \begin{lstlisting}[style=sh] -192.168.1.0/24(rw,fsid=1,no_root_squash,sync,no_subtree_check) - \end{lstlisting} - \item Next reset the exported nfs directories using: - \begin{lstlisting}[style=sh] +\end{lstlisting} + \item On the computer that contains the disk storage to be shared, + define the network filesystem. For example to export \texttt{/tmp}, + edit the \texttt{/etc/exports} file to add the following line: +\begin{lstlisting}[style=sh] + 192.168.1.0/24(rw,fsid=1,no_root_squash,sync,no_subtree_check) +\end{lstlisting} + \item Next reset the exported nfs directories using: +\begin{lstlisting}[style=sh] exportfs -ra - \end{lstlisting} - and you may have to start or restart nfs: - \begin{lstlisting}[style=sh] +\end{lstlisting} and you may have to start or restart nfs: +\begin{lstlisting}[style=sh] systemctl restart nfs - \end{lstlisting} - \item Each of the render farm computers must mount the exported nfs target path. To see the exports - which are visible from a client, login as root to the client machine and keyin: - \begin{lstlisting}[style=sh] -showmount -e #using the ip address of the storage host - \end{lstlisting} - \item to access the host disk storage from the other computers in the render farm, mount the nfs export on - the corresponding target path: (be sure to check/verify before using any command line): - \begin{lstlisting}[style=sh] -mount -t nfs :/ - \end{lstlisting} - where \texttt{} is the storage host directory, and \texttt{} is the network address of the storage host. - Because all of the computers must have the same directory path, create that same directory path with the same uid/gid/permissions on each storage client computer ahead of time. - \item To make this permanent across reboots on the client nodes, add the following line to \texttt{/etc/fstab}: - \begin{lstlisting}[style=sh] +\end{lstlisting} + \item Each of the render farm computers must mount the exported + nfs target path. To see the exports which are visible from a + client, login as root to the client machine and keyin: +\begin{lstlisting}[style=sh] +showmount -e #using the ip address of the storage host +\end{lstlisting} + \item to access the host disk storage from the other computers in + the render farm, mount the nfs export on the corresponding target + path: (be sure to check/verify before using any command line): +\begin{lstlisting}[style=sh] +mount -t nfs :/ +\end{lstlisting} where \texttt{} is the storage host + directory, and \texttt{} is the network address of the + storage host. Because all of the computers must have the same + directory path, create that same directory path with the same + uid/gid/permissions on each storage client computer ahead of time. + \item To make this permanent across reboots on the client nodes, + add the following line to \texttt{/etc/fstab}: +\begin{lstlisting}[style=sh] {masternode}:/nfsshare /mnt nfs defaults 0 0 - \end{lstlisting} - You can make this permanent on the disk storage host BUT the command lines shown, which were - correct in January 2018 on Fedora, may be different for your operating system or in the future. In - addition if your network is not up, there may be numerous problems. If you make a mistake, your - system may not boot. To make permanent, add the following line to \texttt{/etc/fstab}: - \begin{lstlisting}[style=sh] +\end{lstlisting} You can make this permanent on the disk storage + host BUT the command lines shown, which were correct in January 2018 + on Fedora, may be different for your operating system or in the + future. In addition if your network is not up, there may be + numerous problems. If you make a mistake, your system may not boot. + To make permanent, add the following line to \texttt{/etc/fstab}: +\begin{lstlisting}[style=sh] 192.168.1.12:/tmp /tmp nfs rw,async,hard,intr,noexec,noauto 0 0 - \end{lstlisting} - You will still have to mount the above manually because of the \textit{noauto} parameter but you won’t - have to remember all of the other necessary parameters. Depending on your expertise level, you can - change that. +\end{lstlisting} You will still have to mount the above manually + because of the \textit{noauto} parameter but you won’t have to + remember all of the other necessary parameters. Depending on your + expertise level, you can change that. - Later, to remove access to the storage host filesystem: - \begin{lstlisting}[style=sh] + Later, to remove access to the storage host filesystem: +\begin{lstlisting}[style=sh] umount - \end{lstlisting} - - Be aware that you may have to adjust any security or firewalls you have in place. \textit{Most firewalls will require extra rules to allow nfs access}. Many have built-in configurations for this. - \end{enumerate} - \item[Configure Rendering on Master Node] There is 1 master node which is running the \CGG{} gui and where the video will be edited and the command given to start up the rendering. Any number of client computers can be run from the command line only, so they can be headless since no X or any graphical libraries are needed. Of course, the \CGG{} software must be installed on each of the client computers. - \begin{enumerate} - \item Assuming you already have \CGG{} installed on the master node, start \CGG{} by clicking on the - icon or by typing the following command on the terminal screen: \texttt{/{cinelerra\_path}/cin}. - \item Use the file pulldown \texttt{Settings $\rightarrow$ Preferences}, the Performance tab, to set up your Render Farm - options in the Render Farm pane. - \item Check the \textit{Use render farm} option. By default, once you enable the option of Render Farm, rendering is usually done using the render farm. Batch rendering can be done locally, or farmed. - \item Add the hostname or the IP address of each of the client nodes in the Hostname textbox and the port - number that you want to use in the Port textbox. You can make sure a port number is not already in - use by keying in on the command line: - \begin{lstlisting}[style=sh] +\end{lstlisting} + + Be aware that you may have to adjust any security or firewalls + you have in place. \textit{Most firewalls will require extra rules + to allow nfs access}. Many have built-in configurations for this. + \end{enumerate} +\item[Configure Rendering on Master Node] There is 1 master node + which is running the \CGG{} gui and where the video will be edited + and the command given to start up the rendering. Any number of + client computers can be run from the command line only, so they can + be headless since no X or any graphical libraries are needed. Of + course, the \CGG{} software must be installed on each of the client + computers. + \begin{enumerate} + \item Assuming you already have \CGG{} installed on the master + node, start \CGG{} by clicking on the icon or by typing the + following command on the terminal screen: + \texttt{/{cinelerra\_path}/cin}. + \item Use the file pulldown \texttt{Settings $\rightarrow$ + Preferences}, the Performance tab, to set up your Render Farm + options in the Render Farm pane. + \item Check the \textit{Use render farm} option. By default, once + you enable the option of Render Farm, rendering is usually done + using the render farm. Batch rendering can be done locally, or + farmed. + \item Add the hostname or the IP address of each of the client + nodes in the Hostname textbox and the port number that you want to + use in the Port textbox. You can make sure a port number is not + already in use by keying in on the command line: +\begin{lstlisting}[style=sh] netstat -n -l -4 --protocol inet - \end{lstlisting} - Next, click on the \textit{Add Nodes} - button and then you will see that host appear in the Nodes list box to the right. The \texttt{X} in the first - column of the nodes box denotes that the node is active. To review the \textit{standard} port allocations, - check the \texttt{/etc/services} file. - \item Enter the total jobs that you would like to be used in the \textit{Total job} textbox. - \item The default watchdog timer initial state is usually just fine but can be adjusted later if needed. - \item Click OK on the Preferences window when done. - \end{enumerate} - \item[Create Workflow] While working on the master computer, it is recommended that you keep all the resources being used on the same shared disk. Load your video/audio piece and do your editing and preparation. Add any desired plugins, such as a Title, to fine-tune your work. You want to make sure your video is ready to be rendered into the final product. - \item[Start the Client Nodes] To start up the client nodes run \CGG{} from the command line on each of the client computers using the following command: - \begin{lstlisting}[style=sh] -/{cinelerra_pathname}/cin -d [port #] ; \#for example /mnt1/bin/cinelerra -d 401 - \end{lstlisting} - This starts \CGG{} in command prompt mode so that it listens to the specified port number for commands from the master node for rendering. When you start each of the clients up, you will see some messages scroll by as each client is created on that computer, such as: - \begin{lstlisting}[style=sh] +\end{lstlisting} Next, click on the \textit{Add Nodes} button and + then you will see that host appear in the Nodes list box to the + right. The \texttt{X} in the first column of the nodes box denotes + that the node is active. To review the \textit{standard} port + allocations, check the \texttt{/etc/services} file. + \item Enter the total jobs that you would like to be used in the + \textit{Total job} textbox. + \item The default watchdog timer initial state is usually just + fine but can be adjusted later if needed. + \item Click OK on the Preferences window when done. + \end{enumerate} +\item[Create Workflow] While working on the master computer, it is + recommended that you keep all the resources being used on the same + shared disk. Load your video/audio piece and do your editing and + preparation. Add any desired plugins, such as a Title, to fine-tune + your work. You want to make sure your video is ready to be rendered + into the final product. +\item[Start the Client Nodes] To start up the client nodes run + \CGG{} from the command line on each of the client computers using + the following command: +\begin{lstlisting}[style=sh] +/{cinelerra_pathname}/cin -d [port number] +# for example: +/mnt1/bin/cinelerra -d 401 +\end{lstlisting} This starts \CGG{} in command prompt mode so that + it listens to the specified port number for commands from the master + node for rendering. When you start each of the clients up, you will + see some messages scroll by as each client is created on that + computer, such as: +\begin{lstlisting}[style=sh] RenderFarmClient::main_loop: client started RenderFarmClient::main_loop: Session started from 127.0.0.1 - \end{lstlisting} - As it completes its jobs, you will should see: - \begin{lstlisting}[style=sh] +\end{lstlisting} As it completes its jobs, you will should see: +\begin{lstlisting}[style=sh] RenderFarmClientThread::run: Session finished - \end{lstlisting} - A quick way to start a sequence of clients is to use: - \begin{lstlisting}[style=sh] -or n in `seq 1501 1505`; do cin -d $n; done - \end{lstlisting} - \item[Render Using Render Farm] After you have followed the preceding steps, you are ready to use the render farm. Click on \texttt{File $\rightarrow$ Render}\dots which opens the render dialog. The most important point here is to use for \textit{the Output path / Select a file to render to} a path/file name that is on the shared volume that is also mounted on the clients. Click on OK to render. The \CGG{} program divides the timeline into the number of jobs specified by the user. These jobs are then dispatched to the various nodes depending upon the load balance. The first segment will always render on the master node and the other segments will be farmed out to the render nodes. Batch Rendering, as well as BD/DVD rendering, may use the render farm. Each line in the batchbay can enable/disable the render farm. Typically, video can be rendered into many file segments and concatenated, but normally audio is rendered as one monolithic file (not farmed). - - Another performance feature which can use the Render Farm is \textit{Background Rendering}. This is also enabled on the \texttt{Preferences $\rightarrow$ Performances} tab. The background render function generates a set of image files by pre-rendering the timeline data on the fly. As the timeline is update by editing, the image data is re-rendered to a \textit{background render} storage path. The Render Farm will be used for this operation if it is enabled at the same time as the \textit{background render} feature. - \item[Assemble the Output Files] Once all of the computer jobs are complete, you can put the output files together by using the shell script, \textit{RenderMux} (from the menubar \textit{scripts} button just above FF), if the files were rendered using ffmpeg, or you can load these by creating a new track and specifying concatenate to existing tracks in the load dialog in the correct numerical order. File types which support direct copy can be concatenated into a single file by rendering to the same file format with render farm disabled as long as the track dimensions, output dimensions, and asset dimensions are equal. +\end{lstlisting} A quick way to start a sequence of clients is to + use: +\begin{lstlisting}[style=sh,mathescape] +for n in `seq 1501 1505`; do + cin -d $\$$n +done +\end{lstlisting} +\item[Render Using Render Farm] After you have followed the + preceding steps, you are ready to use the render farm. Click on + \texttt{File $\rightarrow$ Render}\dots which opens the render + dialog. The most important point here is to use for \textit{the + Output path / Select a file to render to} a path/file name that is + on the shared volume that is also mounted on the clients. Click on + OK to render. The \CGG{} program divides the timeline into the + number of jobs specified by the user. These jobs are then + dispatched to the various nodes depending upon the load balance. The + first segment will always render on the master node and the other + segments will be farmed out to the render nodes. Batch Rendering, + as well as BD/DVD rendering, may use the render farm. Each line in + the batchbay can enable/disable the render farm. Typically, video + can be rendered into many file segments and concatenated, but + normally audio is rendered as one monolithic file (not farmed). + + Another performance feature which can use the Render Farm is + \textit{Background Rendering}. This is also enabled on the + \texttt{Preferences $\rightarrow$ Performances} tab. The background + render function generates a set of image files by pre-rendering the + timeline data on the fly. As the timeline is update by editing, the + image data is re-rendered to a \textit{background render} storage + path. The Render Farm will be used for this operation if it is + enabled at the same time as the \textit{background render} feature. +\item[Assemble the Output Files] Once all of the computer jobs are + complete, you can put the output files together by using the shell + script, \textit{RenderMux} (from the menubar \textit{scripts} button + just above FF), if the files were rendered using ffmpeg, or you can + load these by creating a new track and specifying concatenate to + existing tracks in the load dialog in the correct numerical order. + File types which support direct copy can be concatenated into a + single file by rendering to the same file format with render farm + disabled as long as the track dimensions, output dimensions, and + asset dimensions are equal. \end{description} -\subsection{Quick and Easy Render Farm Setup – The Buddy System Way}% +\subsection{Quick and Easy Render Farm Setup – The Buddy System + Way}% \label{sub:buddy_system_way} -These steps are for quickly setting up render farm with the least amount of additional system work, but it is non-optimal. It is useful in situations where a few people all show up with their laptops to work together on the same video/audio file and you don’t want to bother setting up NFS for a shared disk. +These steps are for quickly setting up render farm with the least +amount of additional system work, but it is non-optimal. It is +useful in situations where a few people all show up with their +laptops to work together on the same video/audio file and you don’t +want to bother setting up NFS for a shared disk. \begin{enumerate} - \item Make sure the \CGG{} program is installed on all of the computers and the network between the - main computer and the client computers is working. Use the same version if possible. - \item Load your video file on the master node and use \texttt{File $\rightarrow$ Save as}\dots to save it to \texttt{/tmp}. - \item Move that same file with the same name to \texttt{/tmp} on all of the client computers via rsh or sneaker net -- the ONLY reason you are doing this is to avoid having to set up NFS or Samba on the buddy client - laptops that show up! - \item Edit your video/audio file to get it the way you want it and add the plugins, such as a Title, etc. - \item Check for a set of unused ports in \texttt{/etc/services} file, if username is root usually $401-425$ are - available; if non-root, then $1024-1079$. - \item On the master computer, in \texttt{Settings $\rightarrow$ Preferences, Performance} tab: - \begin{itemize} - \item check the box \textit{Use render farm} - \item keyin localhost for the hostname or an ip address of the buddy client node - \item keyin the desired port number for each client; and use \textit{Add Node} for each host - \item set total jobs to the number of client computers $+1$ multiplied by $3$ (or proportion to client speeds) - \item check OK - \end{itemize} - \item On each buddy client, create a job for each port: - \begin{lstlisting}[style=sh] +\item Make sure the \CGG{} program is installed on all of the + computers and the network between the main computer and the client + computers is working. Use the same version if possible. +\item Load your video file on the master node and use \texttt{File + $\rightarrow$ Save as}\dots to save it to \texttt{/tmp}. +\item Move that same file with the same name to \texttt{/tmp} on all + of the client computers via rsh or sneaker net -- the ONLY reason + you are doing this is to avoid having to set up NFS or Samba on the + buddy client laptops that show up! +\item Edit your video/audio file to get it the way you want it and + add the plugins, such as a Title, etc. +\item Check for a set of unused ports in \texttt{/etc/services} + file, if username is root usually $401-425$ are available; if + non-root, then $1024-1079$. +\item On the master computer, in \texttt{Settings $\rightarrow$ + Preferences, Performance} tab: + \begin{itemize} + \item check the box \textit{Use render farm} + \item keyin localhost for the hostname or an ip address of the + buddy client node + \item keyin the desired port number for each client; and use + \textit{Add Node} for each host + \item set total jobs to the number of client computers $+1$ + multiplied by $3$ (or proportion to client speeds) + \item check OK + \end{itemize} +\item On each buddy client, create a job for each port: +\begin{lstlisting}[style=sh] /{cinelerra_pathname}/cin -d port# - \end{lstlisting} - \item On the master, bring up the render menu and name the output files, for example \texttt{/tmp/myoutput.mp4}. - \item The client nodes output results will be on their local \texttt{/tmp} filesystems so you will have to again use - \textit{rsh/ftp} or \textit{usb sneaker net} to move them over to the main computer. File names will be the render - job output file name with port number tacked on (e.g. \texttt{/tmp/hb.mp4001...mp4005}). - \item Load the files by concatenate to existing track on the master node or use RenderMux shell script. +\end{lstlisting} +\item On the master, bring up the render menu and name the output + files, for example \texttt{/tmp/myoutput.mp4}. +\item The client nodes output results will be on their local + \texttt{/tmp} filesystems so you will have to again use + \textit{rsh/ftp} or \textit{usb sneaker net} to move them over to + the main computer. File names will be the render job output file + name with port number tacked on + (e.g. \texttt{/tmp/hb.mp4001...mp4005}). +\item Load the files by concatenate to existing track on the master + node or use RenderMux shell script. \end{enumerate} \subsection{Multi-core Computers Render Farm Setup}% \label{sub:multi_core_render_farm_setup} -If you are lucky enough to have a computer with a large cpu core count, setting up a render farm -can really take advantage of using all of the cpus. This is much faster than the default automatic -threading capability. Since you don’t need to communicate with other computers, you will not have -to be concerned about TCP communication or shared disks/files. When you are going to be doing other -work simultaneously while rendering a large job, you will want to leave some of the cpus available -for that. Be sure to set “Project SMP cpus” in the Settings→Preferences, Performance tab to your -CPU count. +If you are lucky enough to have a computer with a large cpu core +count, setting up a render farm can really take advantage of using +all of the cpus. This is much faster than the default automatic +threading capability. Since you don’t need to communicate with other +computers, you will not have to be concerned about TCP communication +or shared disks/files. When you are going to be doing other work +simultaneously while rendering a large job, you will want to leave +some of the cpus available for that. Be sure to set “Project SMP +cpus” in the Settings→Preferences, Performance tab to your CPU +count. \subsection{Troubleshooting Tips and Warnings}% \label{sub:troubleshhoting_tips_warnings} -\noindent If you have problems running the Render Farm. Here is a list of items to check. +\noindent If you have problems running the Render Farm. Here is a +list of items to check. \begin{itemize} - \item \CGG{} must be installed on the master node and all client machines. - \item It is best to have the same username available on all nodes to avoid problems with access rights. - \item Check file permissions and ownership to ensure that the clients all have access. - \item If a node does not have access to an input asset it will not die, but just display error messages. - \item If a node can not access an output asset, the rendering will abort. - \item A port in use when stopped may take up to $30$ seconds to time out before you can restart the jobs. - \item Each of the port combinations have to be unique across clients, and not already in use in the network. - \item \CGG{} load balances on a first come, first serve basis. If the last section of the video is sent to the - slowest node, the render job will have to wait for the slowest node to finish. It would be better to - start on the slowest node with the earlier section of the video so keep that in mind when designating - port numbers. - \item If not running as root, a port number in the higher range of $1024$ and above must be used instead of - the $400+$ range. - \item The master and client jobs on the ports do not go away so if you want to stop them, you will have to - kill them via: \texttt{kill PID\#}. - \item Check to see if there are services listening on the ports to use: \texttt{netstat -n -l -4 --protocol inet} - \item There is a watchdog timer in \CGG{} and if there is no response from a client in the designated - number of seconds, it will kill the render job. - \item The \textit{localhost} should exist as $127.0.0.1$ in \texttt{/etc/hosts} and as the \texttt{lo} network device in ifconfig. - \item If the job loads become unbalanced, you may want to \textit{reset rates} to start over for new framerates. - \item If jobs are split in a key section on the timeline, you may wish to \textit{use labels} to prevent this. - \item For testing purposes, you may want to start a client in the foreground using \texttt{-f} instead of \texttt{-d}. - \item If one of the client computers is unavailable, check to see if there is an \texttt{X} to the left of the \texttt{nodename} - in the Nodes listbox. Check the \texttt{X} to disable it which sets ON to OFF. - \item A red message in the lower left hand corner of the main timeline that reads \textit{Failed to start render - farm} often means that the client \CGG{} programs were not started up. - \item A message of \texttt{RenderFarmWatchdog::run 1 killing server thread \\ \#address\#} means that the client did - not respond in time. You can adjust the timer in \texttt{Settings $\rightarrow$ Preferences, Performance} tab. - \item When you get the message \texttt{RenderFarmClient::main\_loop: bind port 400: Address already in use}, use a different port. - \item A message of \texttt{RenderFarmServerThread::open\_client: unknown host abcompany} means that the - hostname of abcompany is not in \texttt{/etc/hosts} so you will have to add it or use the ip address instead. - \item There are numerous error messages associated with file \textit{open/close/status} or problems with the file - that should be dealt with according to what is printed out. - \item Other illustrative messages may be shown such as: \texttt{RenderFarmClientThread:: run: Session finished}. +\item \CGG{} must be installed on the master node and all client + machines. +\item It is best to have the same username available on all nodes to + avoid problems with access rights. +\item Check file permissions and ownership to ensure that the + clients all have access. +\item If a node does not have access to an input asset it will not + die, but just display error messages. +\item If a node can not access an output asset, the rendering will + abort. +\item A port in use when stopped may take up to $30$ seconds to time + out before you can restart the jobs. +\item Each of the port combinations have to be unique across + clients, and not already in use in the network. +\item \CGG{} load balances on a first come, first serve basis. If + the last section of the video is sent to the slowest node, the + render job will have to wait for the slowest node to finish. It + would be better to start on the slowest node with the earlier + section of the video so keep that in mind when designating port + numbers. +\item If not running as root, a port number in the higher range of + $1024$ and above must be used instead of the $400+$ range. +\item The master and client jobs on the ports do not go away so if + you want to stop them, you will have to kill them via: \texttt{kill + PID\#}. +\item Check to see if there are services listening on the ports to + use: \texttt{netstat -n -l -4 --protocol inet} +\item There is a watchdog timer in \CGG{} and if there is no + response from a client in the designated number of seconds, it will + kill the render job. +\item The \textit{localhost} should exist as $127.0.0.1$ in + \texttt{/etc/hosts} and as the \texttt{lo} network device in + ifconfig. +\item If the job loads become unbalanced, you may want to + \textit{reset rates} to start over for new framerates. +\item If jobs are split in a key section on the timeline, you may + wish to \textit{use labels} to prevent this. +\item For testing purposes, you may want to start a client in the + foreground using \texttt{-f} instead of \texttt{-d}. +\item If one of the client computers is unavailable, check to see if + there is an \texttt{X} to the left of the \texttt{nodename} in the + Nodes listbox. Check the \texttt{X} to disable it which sets ON to + OFF. +\item A red message in the lower left hand corner of the main + timeline that reads \textit{Failed to start render farm} often means + that the client \CGG{} programs were not started up. +\item A message of \texttt{RenderFarmWatchdog::run 1 killing server + thread \\ \#address\#} means that the client did not respond in + time. You can adjust the timer in \texttt{Settings $\rightarrow$ + Preferences, Performance} tab. +\item When you get the message \texttt{RenderFarmClient::main\_loop: + bind port 400: Address already in use}, use a different port. +\item A message of \texttt{RenderFarmServerThread::open\_client: + unknown host abcompany} means that the hostname of abcompany is not + in \texttt{/etc/hosts} so you will have to add it or use the ip + address instead. +\item There are numerous error messages associated with file + \textit{open/close/status} or problems with the file that should be + dealt with according to what is printed out. +\item Other illustrative messages may be shown such as: + \texttt{RenderFarmClientThread:: run: Session finished}. \end{itemize} -And here are a couple of more tips for making Render Farm specific for your setup. +And here are a couple of more tips for making Render Farm specific +for your setup. \begin{itemize} - \item Because \textit{index files} speed up displaying the video you may want to share these files -with the clients on a shared filesystem. More information on index files configuration is outlined in -\ref{sub:index_file_section}. - \item Or, one of the convenient features of Cinelerra is the redirection of the path - via \texttt{CIN\_CONFIG} as in: +\item Because \textit{index files} speed up displaying the video you + may want to share these files with the clients on a shared + filesystem. More information on index files configuration is + outlined in~\ref{sub:index_file_section}. +\item Or, one of the convenient features of Cinelerra is the + redirection of the path via \texttt{CIN\_CONFIG} as in: \begin{lstlisting}[style=sh] -CIN_CONFIG=// //cin -\end{lstlisting} -This means that you can make project related configurations that do not impact the default \texttt{\$HOME} config. You can either export your default \texttt{\$HOME} config or the \texttt{CIN\_CONFIG} config to use on the render farm. +CIN_CONFIG="//" cin +\end{lstlisting} This means that you can make project related + configurations that do not impact the default \texttt{\$HOME} + config. You can either export your default \texttt{\$HOME} config + or the \texttt{CIN\_CONFIG} config to use on the render farm. \end{itemize} \paragraph{Warnings} -If one of the render farm computers is connected to the internet, you should use a firewall to maintain the safety of all of the computers. The ports have to be reachable for the intranet but you do not want the ports to be open to the outside. +If one of the render farm computers is connected to the internet, +you should use a firewall to maintain the safety of all of the +computers. The ports have to be reachable for the intranet but you +do not want the ports to be open to the outside. \section{Some Specific Rendering}% \label{sec:some_specific_rendering} -\noindent The next few pages relate to rendering for specific common cases. +\noindent The next few pages relate to rendering for specific common +cases. \subsection{FFmpeg Common H.264 Rendering}% \label{sub:ffmpeg_h264_rendering} -Because H.264 is so widely used, the method in \CGG{} Infinity is outlined below. These setup steps make it easy to just get started. +Because H.264 is so widely used, the method in \CGG{} Infinity is +outlined below. These setup steps make it easy to just get started. \begin{itemize} - \item File $\rightarrow$ Render - \item File Format $\rightarrow$ FFMPEG + mp4 - \item Video Wrench $\rightarrow$ Preset $\rightarrow$ h264.mp4 + bitrate: 6000000 (or whatever) + OK - \item Audio Wrench $\rightarrow$ Preset $\rightarrow$ h265.mp4 + bitrate: 224000 (or whatever) + OK - \item Set your target path in: Render $\rightarrow$ Select a file to render to - \item Set your timeline in: Render $\rightarrow$ Render range + click Project - \item Set your insertion strategy: Replace project (or whatever) - \item Press OK to start rendering. +\item File $\rightarrow$ Render +\item File Format $\rightarrow$ FFMPEG + mp4 +\item Video Wrench $\rightarrow$ Preset $\rightarrow$ h264.mp4 + + bitrate: 6000000 (or whatever) + OK +\item Audio Wrench $\rightarrow$ Preset $\rightarrow$ h265.mp4 + + bitrate: 224000 (or whatever) + OK +\item Set your target path in: Render $\rightarrow$ Select a file to + render to +\item Set your timeline in: Render $\rightarrow$ Render range + + click Project +\item Set your insertion strategy: Replace project (or whatever) +\item Press OK to start rendering. \end{itemize} \subsection{Lossless Rendering}% \label{sub:loseeless_rendering} -Lossless means that in the compression of a file, all of the original data, every single bit, can be recovered when the file is uncompressed. This is different than \textit{lossy compression} where some data is permanently deleted so that when uncompressed, all of the original data can not be exactly recovered. Lossy is generally used for video and sound, where a certain amount of information loss will not be detected by most users or the playback hardware does not reproduce it anyway -- it is a trade-off between file size and image/sound quality. The files created will be more than 10 times larger than usual. Most players will not be able to decode lossless as the bitrate will overwhelm the device. - -For x264 lossless compression to work, the only color model allowed here is yuv420p. Any other specification will be converted to yuv420p and the data will be modified. Also, keep in mind that the YUV color model has to be converted to RGB, which also modifies the data. - -To use x264 lossless rendering -- choose File format of ffmpeg, m2ts in the Render window. Click on the Video wrench, which brings up the Video Preset window and scroll down in the Compression filebox and choose \texttt{lossless.m2ts}. \textit{Preset=medium} is the default, but can be varied from \textit{ultrafast} (least amount of compression, but biggest file size) to \textit{veryslow} (most amount of compression, but still HUGE) in the parameter box where you see $qp=0$. This option is also available for bluray creation. +Lossless means that in the compression of a file, all of the +original data, every single bit, can be recovered when the file is +uncompressed. This is different than \textit{lossy compression} +where some data is permanently deleted so that when uncompressed, +all of the original data can not be exactly recovered. Lossy is +generally used for video and sound, where a certain amount of +information loss will not be detected by most users or the playback +hardware does not reproduce it anyway -- it is a trade-off between +file size and image/sound quality. The files created will be more +than 10 times larger than usual. Most players will not be able to +decode lossless as the bitrate will overwhelm the device. + +For x264 lossless compression to work, the only color model allowed +here is yuv420p. Any other specification will be converted to +yuv420p and the data will be modified. Also, keep in mind that the +YUV color model has to be converted to RGB, which also modifies the +data. + +To use x264 lossless rendering -- choose File format of ffmpeg, m2ts +in the Render window. Click on the Video wrench, which brings up +the Video Preset window and scroll down in the Compression filebox +and choose \texttt{lossless.m2ts}. \textit{Preset=medium} is the +default, but can be varied from \textit{ultrafast} (least amount of +compression, but biggest file size) to \textit{veryslow} (most +amount of compression, but still HUGE) in the parameter box where +you see $qp=0$. This option is also available for bluray creation. \subsection{Extra “cin\_” Options for Render with FFmpeg}% \label{sub:extra_cin_option_ffmpeg} -There are several special parameters that can be used in the ffmpeg options file to pass values to the codecs that are not normally available. They're called Global Options. These are explained below. - -\paragraph{cin\_pix\_fmt} The Render menus allows you to choose the codec input pixel format (figure~\ref{fig:yuv420}). The Pixels selection provides the available pixel format options for the chosen codec type; valid choices vary for the different file types. This list represents the formats that the codec advertises. It is not always complete, and it may include options that are not legal with all parameter configurations. - -\begin{figure}[htpb] - \centering - \includegraphics[width=0.6\linewidth]{yuv420.png} - \caption{Render \& Video Preset menus displaying Pixel choices} - \label{fig:yuv420} +There are several special parameters that can be used in the ffmpeg +options file to pass values to the codecs that are not normally +available. They're called Global Options. These are explained +below. + +\paragraph{cin\_pix\_fmt} The Render menus allows you to choose the +codec input pixel format (figure~\ref{fig:yuv420}). The Pixels +selection provides the available pixel format options for the chosen +codec type; valid choices vary for the different file types. This +list represents the formats that the codec advertises. It is not +always complete, and it may include options that are not legal with +all parameter configurations. + +\begin{figure}[htpb] \centering + \includegraphics[width=0.6\linewidth]{yuv420.png} + \caption{Render \& Video Preset menus displaying Pixel choices} + \label{fig:yuv420} \end{figure} \begin{itemize} - \item The \textit{Bitrate}, \textit{Quality}, and \textit{Pixels} fields are only updated when the Video Options are reloaded. This - occurs when you either change the ffmpeg file format, or video presets compression fields. - \item If the video options preset has \textit{cin\_pix\_fmt} defined, its value will be loaded as the default. If you - override the default, the value you specify will be used. - \item If the video options preset does not have \textit{cin\_pix\_fmt}, the default pixel format will be computed by ffmpeg (\textit{avcodec\_find\_best\_pix\_fmt\_of\_list}), using the session format as the source choice. The - \textit{best} is usually the format which is most similar in color and depth. - \item If no choices are available, yuv420p for video will be used. - \item You can also specify ffmpeg pixel formats which are not in the list. The list is provided by ffmpeg as input selection, but is more like suggestions than fact. For example, the raw formats can take almost any format, but the rawvideo codec actually specifies no legal formats. +\item The \textit{Bitrate}, \textit{Quality}, and \textit{Pixels} + fields are only updated when the Video Options are reloaded. This + occurs when you either change the ffmpeg file format, or video + presets compression fields. +\item If the video options preset has \textit{cin\_pix\_fmt} + defined, its value will be loaded as the default. If you override + the default, the value you specify will be used. +\item If the video options preset does not have + \textit{cin\_pix\_fmt}, the default pixel format will be computed by + ffmpeg (\textit{avcodec\_find\_best\_pix\_fmt\_of\_list}), using the + session format as the source choice. The \textit{best} is usually + the format which is most similar in color and depth. +\item If no choices are available, yuv420p for video will be used. +\item You can also specify ffmpeg pixel formats which are not in the + list. The list is provided by ffmpeg as input selection, but is + more like suggestions than fact. For example, the raw formats can + take almost any format, but the rawvideo codec actually specifies no + legal formats. \end{itemize} -\noindent Some option files provide \textit{cin\_pix\_fmt} to suggest a choice for good quality output or to prevent parameter errors when the other provided parameters conflict with the \textit{best} pixel format. This is the case in \texttt{faststart\_h264.mp4} where the \textit{profile=high} parameter dictates pixel format must be \texttt{yuv420p}. - -\paragraph{cin\_bitrate} If you specify the bitrate, you can not specify the quality.\\ -Example: \textit{cin\_bitrate=2000000} - -\paragraph{cin\_quality} If you specify the quality, you can not specify the bitrate.\\ -Example: \textit{cin\_quality=7} - -\paragraph{cin\_stats\_filename} This parameter is useful for 2 pass operations.\\ -Example: \texttt{cin\_stats\_filename /tmp/cin\_video\_vp9\_webm} - -\paragraph{cin\_sample\_fmt} For audio the preset sample format default is computed in a similar way as stated above for video or can be set with the \textit{cin\_sample\_fmt} parameter (figure~\ref{fig:audio}). If no choices are provided, s16 will be used.\\ -Example: \textit{cin\_sample\_fmt=s16} - -\begin{figure}[htpb] - \centering - \includegraphics[width=0.55\linewidth]{audio.png} - \caption{Render menu showing where Samples is} - \label{fig:audio} +\noindent Some option files provide \textit{cin\_pix\_fmt} to +suggest a choice for good quality output or to prevent parameter +errors when the other provided parameters conflict with the +\textit{best} pixel format. This is the case in +\texttt{faststart\_h264.mp4} where the \textit{profile=high} +parameter dictates pixel format must be \texttt{yuv420p}. + +\paragraph{cin\_bitrate} If you specify the bitrate, you can not +specify the quality.\\ Example: \textit{cin\_bitrate=2000000} + +\paragraph{cin\_quality} If you specify the quality, you can not +specify the bitrate.\\ Example: \textit{cin\_quality=7} + +\paragraph{cin\_stats\_filename} This parameter is useful for 2 pass +operations.\\ Example: \texttt{cin\_stats\_filename + /tmp/cin\_video\_vp9\_webm} + +\paragraph{cin\_sample\_fmt} For audio the preset sample format +default is computed in a similar way as stated above for video or +can be set with the \textit{cin\_sample\_fmt} parameter +(figure~\ref{fig:audio}). If no choices are provided, s16 will be +used.\\ Example: \textit{cin\_sample\_fmt=s16} + +\begin{figure}[htpb] \centering + \includegraphics[width=0.55\linewidth]{audio.png} + \caption{Render menu showing where Samples is} + \label{fig:audio} \end{figure} -\paragraph{Private Options} (muxers). In the window of the \textit{wrench} in addition to the \textit{View} button, which allows more global options and changes to the formats, there is an additional \textit{Format} button that allows you to modify the Private Options, i.e. relating to specific muxing formats. More information on all these options can be found at: {\small \url{https://ffmpeg.org/ffmpeg-all.html#Format-Options}} sections 19 and 21. +\paragraph{Private Options} (muxers). In the window of the +\textit{wrench} in addition to the \textit{View} button, which +allows more global options and changes to the formats, there is an +additional \textit{Format} button that allows you to modify the +Private Options, i.e.\ relating to specific muxing formats. More +information on all these options can be found at +\href{https://ffmpeg.org/ffmpeg-all.html#Format-Options}{ffmpeg.org} +sections 19 and 21. \subsection{Two-pass Encoding with FFmpeg}% \label{sub:two_pass_encoding_ffmpeg} -In \CGG{} for two-pass, you need to run ffmpeg twice, with the same settings, except for designating the options of pass 1 for the first pass and then pass 2. In pass 1, a logfile that ffmpeg needs for the second pass is created. In pass 1 the audio codec should be specified that will be used in pass 2. For more information on ffmpeg 2-pass, check {\small \url{https://trac.ffmpeg.org/wiki/Encode/H.264}}. Different libraries may have different requirements and you will probably have to determine this by looking online at ffmpeg or looking directly at that code. +In \CGG{} for two-pass, you need to run ffmpeg twice, with the same +settings, except for designating the options of pass~1 for the first +pass and then pass~2. In pass~1, a logfile that ffmpeg needs for +the second pass is created. In pass~1 the audio codec should be +specified that will be used in pass~2. For more information on +ffmpeg 2-pass, check +\href{https://trac.ffmpeg.org/wiki/Encode/H.264}{ffmpeg.org}. +Different libraries may have different requirements and you will +probably have to determine this by looking online at ffmpeg or +looking directly at that code. -This 2 line ffmpeg 2-pass operation can be functionally duplicated in \CGG{} in the steps below them: +This 2 line ffmpeg 2-pass operation can be functionally duplicated +in \CGG{} in the steps below them: \begin{lstlisting}[style=sh] -ffmpeg -y -i input -c:v libx264 -b:v 2600k -pass 1 -c:a aac -b:a 128k -f mp4 /dev/null && \ -ffmpeg -i input -c:v libx264 -b:v 2600k -pass 2 -c:a aac -b:a 128k output.mp4 +ffmpeg -y -i $INPUT \ + -c:v libx264 -b:v 2600k -pass 1 \ + -c:a aac -b:a 128k -f mp4 /dev/null && \ + ffmpeg -i $INPUT \ + -c:v libx264 -b:v 2600k -pass 2 \ + -c:a aac -b:a 128k $OUTPUT.mp4 \end{lstlisting} \begin{enumerate} - \item After you have completed your editing, do a Save Session with \texttt{File $\rightarrow$ Save as}\dots - Before starting, be sure your session is ready for batch render. That is, positioned at the beginning and nothing selected. - \item Bring up \texttt{File $\rightarrow$ Batch Render}\dots where you will do the setup. - \item Click on the \textit{Delete} box to remove old jobs highlighted in the bottom listbox. - \begin{itemize} - \item For the \textit{File Format} choose ffmpeg and mp4 for the type. - \item Set \textit{Output path} to the path and filename for the render output file. - \item Click on \textit{Use Current EDL} to use the designated EDL Path file. - \item Click on \textit{New} and you will see a new highlighted job show up in the listbox at the bottom. - \item Use the Audio wrench to set bitrate to $128000$ ($128k$ as in ffmpeg example above). - \item Click checkmark OK. Open the video tools with the video wrench. - \item Set the Video Compression to \textit{h264.mp4} (as seen in the example). - \item Set the bitrate to $2600000$ ($2600k$ as in ffmpeg example above). - \item Add the following 2 lines after the first line: - \begin{lstlisting}[style=sh] +\item After you have completed your editing, do a Save Session with + \texttt{File $\rightarrow$ Save as}\dots Before starting, be sure + your session is ready for batch render. That is, positioned at the + beginning and nothing selected. +\item Bring up \texttt{File $\rightarrow$ Batch Render}\dots where + you will do the setup. +\item Click on the \textit{Delete} box to remove old jobs + highlighted in the bottom listbox. + \begin{itemize} + \item For the \textit{File Format} choose ffmpeg and mp4 for the + type. + \item Set \textit{Output path} to the path and filename for the + render output file. + \item Click on \textit{Use Current EDL} to use the designated EDL + Path file. + \item Click on \textit{New} and you will see a new highlighted job + show up in the listbox at the bottom. + \item Use the Audio wrench to set bitrate to $128000$ ($128k$ as + in ffmpeg example above). + \item Click checkmark OK\@. Open the video tools with the video + wrench. + \item Set the Video Compression to \textit{h264.mp4} (as seen in + the example). + \item Set the bitrate to $2600000$ ($2600k$ as in ffmpeg example + above). + \item Add the following 2 lines after the first line: +\begin{lstlisting}[style=sh] flags +pass1 -passlogfile /tmp/{temporary log file name}.log - \end{lstlisting} - Click checkmark OK. - \end{itemize} - \item Click on \textit{New} to create the second pass job. You will see this second job in the listbox below. - Use the Video wrench and change pass1 to pass2 as follows. - \begin{lstlisting}[style=sh] +passlogfile /tmp/"{temporary log file name}.log" +\end{lstlisting} Click checkmark OK. + \end{itemize} +\item Click on \textit{New} to create the second pass job. You will + see this second job in the listbox below. Use the Video wrench and + change pass1 to pass2 as follows. +\begin{lstlisting}[style=sh] flags +pass2 - \end{lstlisting} - \item Click checkmark OK. - \item Click on the \textit{Start} box and watch it go! - \item You can now check the output file for results. At the time this was documented, \textit{rc=2pass} will be - in the output. +\end{lstlisting} +\item Click checkmark OK. +\item Click on the \textit{Start} box and watch it go! +\item You can now check the output file for results. At the time + this was documented, \textit{rc=2pass} will be in the output. \end{enumerate} -If you need to re-render this, the Batch Render will still be set up but you have to click on the \textit{Enabled} column in the listbox to re-enable the jobs to run which puts an X there. Click Start again. You can reuse batch job using the \textit{save jobs} and \textit{load jobs} buttons in the batch render dialog. - -\paragraph{Render shortcuts for webm, h264, h265} are available by using the option files that are already set up for this purpose. Use the render menu as usual, with ffmpeg/mp4, choose h264 or h265 \textit{pass1of2\_h26x} for the video and \textit{passes1and\-2\_h26x} for the audio; -with ffmpeg/webm, choose \textit{pass1of2\_vp9}. When that is finished, you will have to use the render menu again and this time for video, choose \textit{pass2of2\_h26x} or \textit{pass2of2\_vp9}. The logfile is hard coded in the options file so will write over any currently existing logfile if you do not change it before you start the render. - -\paragraph{Requirements for some other libraries} ~\\ (used instead of \textit{flags +pass1} \& \textit{passlogfile}): - +If you need to re-render this, the Batch Render will still be set up +but you have to click on the \textit{Enabled} column in the listbox +to re-enable the jobs to run which puts an X there. Click Start +again. You can reuse batch job using the \textit{save jobs} and +\textit{load jobs} buttons in the batch render dialog. + +\paragraph{Render shortcuts for webm, h264, h265} are available by +using the option files that are already set up for this purpose. +Use the render menu as usual, with ffmpeg/mp4, choose h264 or h265 +\textit{pass1of2\_h26x} for the video and +\textit{passes1and\-2\_h26x} for the audio; with ffmpeg/webm, choose +\textit{pass1of2\_vp9}. When that is finished, you will have to use +the render menu again and this time for video, choose +\textit{pass2of2\_h26x} or \textit{pass2of2\_vp9}. The logfile is +hard coded in the options file so will write over any currently +existing logfile if you do not change it before you start the +render. + +\paragraph{Requirements for some other libraries} (used instead +of \textit{flags +pass1} \& \textit{passlogfile}): \begin{description} - \item[x265:] add this line: - \begin{lstlisting}[style=sh] -x265-params=pass=1:stats=/tmp/{temporary log file name}.log - \end{lstlisting} - at the time this document was written, you should see in the output: \\ \textit{stats-read=2} - - \item[libvpx-vp9, xvid, and huffyuv:]~ - - \begin{lstlisting}[style=sh] - cin_stats_filename /tmp/{temporary log file name}.log - flags +pass1 (or flags +pass2 for the second pass) - \end{lstlisting} +\item[x265:] add this line: +\begin{lstlisting}[style=sh] +x265-params=pass=1:stats=/tmp/{temporary-log-file-name}.log +\end{lstlisting} at the time this document was written, you should + see in the output: \textit{stats-read=2} +\item[libvpx-vp9, xvid, and huffyuv:]~ +\begin{lstlisting}[style=sh] +cin_stats_filename /tmp/{temporary-log-file-name}.log +flags +pass1 (or flags +pass2 for the second pass) +\end{lstlisting} \end{description} \textit{NOTE:} for vp9, the best Pixels is \textit{gbrp} @@ -637,21 +1197,20 @@ x265-params=pass=1:stats=/tmp/{temporary log file name}.log \label{sub:use_case_hevc} An example of video profile based on CRF, a quality-controlled -variable bitrate, instead of fixed quality scale (ABR). -HEVC (H.265) was developed as a successor to AVC (H.264) to more +variable bitrate, instead of fixed quality scale (ABR). HEVC +(H.265) was developed as a successor to AVC (H.264) to more efficiently compress the future large amounts of data from 2/4/8k -videos. -In comparison to AVC, an average saving of around 30 percent can be -assumed for the same quality. -Because HEVC is not bound to any size format, it is suitable for -virtually any image size. +videos. In comparison to AVC, an average saving of around 30 +percent can be assumed for the same quality. Because HEVC is not +bound to any size format, it is suitable for virtually any image +size. The following example is HD and FullHD oriented and produces a -picture quality similar to the Blu-ray with some limitations. -As container Matroska (\texttt{.mkv}) is used, but also mp4 and others are -possible. +picture quality similar to the Blu-ray with some limitations. As +container Matroska (\texttt{.mkv}) is used, but also mp4 and others +are possible. -\vspace{2ex} \begin{lstlisting}[style=sh] +\begin{lstlisting}[style=sh] matroska libx265 # CRF 16 creates a balanced compromise @@ -670,7 +1229,7 @@ preset=medium # Keyint does FFmpeg automatically, otherwise # the setting must match the frame rate. -#keyint\_min=25 +#keyint_min=25 # Profile does FFmpeg automatically. #profile=high @@ -690,47 +1249,82 @@ pixel_format=yuv420p \noindent \textit{NOTE:} A CRF of 16 delivers satisfactory results in most cases. However, if -the video material is really \emph{grainy}, a CRF~16 can lead to unwanted large files. In this case, a trial export of perhaps one minute should be performed. The resulting bit rate can be used to correct the CRF to 17,\,18,\,19\ldots -- remember, a CRF of 0 means lossless, the higher the number the stronger the lossy compression. The approximate calculation of the final file size can be extrapolated from the sample export. +the video material is really \emph{grainy}, a CRF~16 can lead to +unwanted large files. In this case, a trial export of perhaps one +minute should be performed. The resulting bit rate can be used to +correct the CRF to 17,\,18,\,19\ldots -- remember, a CRF of $0$ (zero) +means lossless, the higher the number the stronger the lossy +compression. The approximate calculation of the final file size can +be extrapolated from the sample export. The color space information must be used explicitly so that it can -be included in the video. \CGG{} or FFmpeg does not write it -by itself. Without this information the players (e.\,g.\ \href{https://mpv.io/}{mpv}) stick to the dimensions of the video and take the assumed color model from a table. With videos in the dimensions from 720 to 1080 this is bt709. For smaller dimensions, e.\,g.\ DVD, bt601 is assumed and for 4k and above it is bt2020. Normally this is not a problem, but if you want to export a FullHD without color loss to a smaller size like 576 for example, you have to inform the encoder as well as the decoder of the player. This also applies if the videos are to be loaded on video platforms, where they are then converted into videos of different sizes. It is a security measure to prevent false colors, such as the color profiles in digital photos and the copies made from them. +be included in the video. \CGG{} or FFmpeg does not write it by +itself. Without this information the players (e.\,g.\ +\href{https://mpv.io/}{mpv}) stick to the dimensions of the video +and take the assumed color model from a table. With videos in the +dimensions from 720 to 1080 this is bt709. For smaller dimensions, +e.\,g.\ DVD, bt601 is assumed and for 4k and above it is +bt2020. Normally this is not a problem, but if you want to export a +FullHD without color loss to a smaller size like 576 for example, +you have to inform the encoder as well as the decoder of the +player. This also applies if the videos are to be loaded on video +platforms, where they are then converted into videos of different +sizes. It is a security measure to prevent false colors, such as the +color profiles in digital photos and the copies made from them. The HEVC tuning has not been considered here, because it is is rarely used and requires background knowledge. Further links: \begin{itemize} - \item \href{http://x265.readthedocs.org/en/default/}{x265 - Documentation} - \item \href{http://x265.readthedocs.org/en/latest/cli.html}{x265 - Command Line Options} - \item \href{http://x265.readthedocs.org/en/latest/presets.html}{x265 - Presets/Tuning} +\item \href{http://x265.readthedocs.org/en/default/}{x265 + Documentation} +\item \href{http://x265.readthedocs.org/en/latest/cli.html}{x265 + Command Line Options} +\item \href{http://x265.readthedocs.org/en/latest/presets.html}{x265 + Presets/Tuning} \end{itemize} \subsection{Piping Video to a Command Line}% \label{sub:piping_video_command_line} -You can pipe a video to any command line on the computer, such as ffmpeg. This can be especially useful with raw video files. Next is an example usage. +You can pipe a video to any command line on the computer, such as +ffmpeg. This can be especially useful with raw video files. Next +is an example usage. \begin{enumerate} - \item on a terminal window create a named pipe file, for example: - \begin{lstlisting}[style=sh] +\item on a terminal window create a named pipe file, for example: +\begin{lstlisting}[style=sh] mknod /tmp/piper.yuv p - \end{lstlisting} - load your video and do your editing - \item set up your Render (\texttt{Shift-R}), you can choose a raw format such as \textit{yuv} or \textit{rgb} - \item for the filename \textit{Select a file to render to}, use the named pipe as created in step 1 (\texttt{/tmp/piper.yuv}) - \item for \textit{Insertion Strategy}, you will want to make sure to select \textit{insert nothing} - \item click for OK on the green checkmark.(the \CGG{} gui will look like it is hanging while waiting for a command line to use the pipe.) - \item on the terminal window, keyin your command, for example: - \begin{lstlisting}[style=sh] -/mnt0/build5/cinelerra-5.1/thirdparty/ffmpeg-3.4.1/ffmpeg -f rawvideo -pixel_format yuv420p \ -video_size 1280x720 -framerate 30000/1001 -i /tmp/piper.yuv /tmp/pys.mov - \end{lstlisting} +\end{lstlisting} load your video and do your editing +\item set up your Render (\texttt{Shift-R}), you can choose a raw + format such as \textit{yuv} or \textit{rgb} +\item for the filename \textit{Select a file to render to}, use the + named pipe as created in step 1 (\texttt{/tmp/piper.yuv}) +\item for \textit{Insertion Strategy}, you will want to make sure to + select \textit{insert nothing} +\item click for OK on the green checkmark.(the \CGG{} gui will look + like it is hanging while waiting for a command line to use the + pipe.) +\item on the terminal window, keyin your command, for example: +\begin{lstlisting}[style=sh] +/mnt0/build5/cinelerra-5.1/thirdparty/ffmpeg-3.4.1/ffmpeg -f \ + rawvideo -pixel_format yuv420p -video_size 1280x720 \ + -framerate 30000/1001 -i /tmp/piper.yuv /tmp/pys.mov +\end{lstlisting} \end{enumerate} -A slightly different option can be used instead that may be more familiar to some. In the render menu after choosing the File Format of \textit{ffmpeg}, use the pulldown to choose \textit{y4m} as the file type. This choice results in putting a header on the rendered output with some pertinent information that can be used for ffmpeg processing thus alleviating the requirement for \textit{pixel\_format}, \textit{video\_size}, and \textit{framerate} on the ffmpeg command line. In this case the format is \textit{yuv4mpegpipe} instead of \textit{rawvideo}. An example command line would look as follows (assuming the created pipe is called \texttt{piper.y4m}): +A slightly different option can be used instead that may be more +familiar to some. In the render menu after choosing the File Format +of \textit{ffmpeg}, use the pulldown to choose \textit{y4m} as the +file type. This choice results in putting a header on the rendered +output with some pertinent information that can be used for ffmpeg +processing thus alleviating the requirement for +\textit{pixel\_format}, \textit{video\_size}, and \textit{framerate} +on the ffmpeg command line. In this case the format is +\textit{yuv4mpegpipe} instead of \textit{rawvideo}. An example +command line would look as follows (assuming the created pipe is +called \texttt{piper.y4m}): \begin{lstlisting}[style=sh] ffmpeg -f yuv4mpegpipe -i /tmp/piper.y4m -vcodec libx264 /tmp/test.mp4 \end{lstlisting} @@ -738,6 +1332,25 @@ ffmpeg -f yuv4mpegpipe -i /tmp/piper.y4m -vcodec libx264 /tmp/test.mp4 \subsection{Faststart Option for MOV type files}% \label{sub:faststart_option_mov0} -If you have mov video and want to be able to start playing without having to first load the entire video, \textit{-movflags=+faststart} is needed for ffmpeg to put the meta-data, known as the \textit{moov atom}, at the beginning of the file. Otherwise, ffmpeg puts this atom at the end of the video file which means you have to wait to play until the whole video is loaded. Or worse yet, if the file becomes damaged in the middle and you can not get to the end, you won’t be able to play anything. - -Now you can have the \textit{moov atom} put on the front of the file (automatically via a second pass). To do this, when rendering using ffmpeg \& either the mp4 or qt format/container, click on the video/audio wrenches and choose \textit{faststart\_h264}. With the \textit{qt} format, settings will just be the default whereas the \textit{mp4} format uses the highest quality and lowest file size as possible, but you can easily modify these options in the associated Video Preset textbox. +If you have mov video and want to be able to start playing without +having to first load the entire video, \textit{-movflags=+faststart} +is needed for ffmpeg to put the meta-data, known as the \textit{moov + atom}, at the beginning of the file. Otherwise, ffmpeg puts this +atom at the end of the video file which means you have to wait to +play until the whole video is loaded. Or worse yet, if the file +becomes damaged in the middle and you can not get to the end, you +won’t be able to play anything. + +Now you can have the \textit{moov atom} put on the front of the file +(automatically via a second pass). To do this, when rendering using +ffmpeg \& either the mp4 or qt format/container, click on the +video/audio wrenches and choose \textit{faststart\_h264}. With the +\textit{qt} format, settings will just be the default whereas the +\textit{mp4} format uses the highest quality and lowest file size as +possible, but you can easily modify these options in the associated +Video Preset textbox. + +%%% Local Variables: +%%% mode: latex +%%% TeX-master: "../CinelerraGG_Manual" +%%% End: diff --git a/parts/Shortcuts.tex b/parts/Shortcuts.tex index 2095f5c..7711a71 100644 --- a/parts/Shortcuts.tex +++ b/parts/Shortcuts.tex @@ -5,7 +5,7 @@ Almost every \CGGI{} command has its own keyboard and mouse shortcuts. Here they are listed organized by window and type. If a desktop window manager and operating system is already using a specific key for its own purpose then that key will not be available for use as a shortcut in \CGG{}. An example might be the Alt key. Some specific alternatives -are listed in \ref{ssub:key_alternatives} in the "Key Alternatives" paragraph. +are listed in~\ref{ssub:key_alternatives} in the "Key Alternatives" paragraph. \section{Main window }% \label{sec:main_window} @@ -58,7 +58,7 @@ The Main window (also called the program window) consists of pulldown menus, but & Shuffle Edits & & Randomly reorder track edits. \\ & Reverse Edits & & Reverse track edits. \\ & Edit Length\dots & & Change selected track given duration. \\ - & Align Edits & & Change selected corresp. track edits to start/end same. \\ + & Align Edits & & Change selected corresp.\ track edits to start/end same. \\ & Trans. Length\dots & & Change selected track transitions to given duration. \\ & Detach trans. & & Remove selected track transitions. \\ & Clear labels & & Delete selected timeline label markers. \\ @@ -105,7 +105,7 @@ The Main window (also called the program window) consists of pulldown menus, but & Align cursor\dots & Ctrl-a & Align cursor on frames. \\ & -- Edit labels & & Toggle labels follow edits. \\ & -- Edit effects & & Toggle plugins follow edits. \\ - & -- Keyfrs fol. edits & & Toggle keyframes follow edits. \\ + & -- Keyfrs fol.\ edits & & Toggle keyframes follow edits. \\ & -- Typeless keyfrs & & Toggle typeless keyframes mode. \\ & Save settings & Ctrl-s & Save \CGG{}\_rc. \\ & Loop Playback & Shift-L & Set loop playback region to selection/all. \\ @@ -266,9 +266,9 @@ The Main window (also called the program window) consists of pulldown menus, but & , (comma) & & Scroll window timeline display left (not insertion pt). \\ & , (comma) & Ctrl & Move cursor to label left of cursor (main shortcuts). \\ & , (comma) & Alt & Decrease auto curve limits (zoombar selected curve). \\ - & . (period) & & Scroll window timeline display right (not insertion pt). \\ - & . (period) & Ctrl & Move cursor to label right of cursor (main shortcuts). \\ - & . (period) & Alt & Move cursor to edit boundary right of cursor position. \\ + & .\ (period) & & Scroll window timeline display right (not insertion pt). \\ + & .\ (period) & Ctrl & Move cursor to label right of cursor (main shortcuts). \\ + & .\ (period) & Alt & Move cursor to edit boundary right of cursor position. \\ & $\uparrow$ (up arrow) & & Increase timeline duration (zoom out timeline). \\ & $\uparrow$ (up arrow) & Ctrl & Increase audio sample waveform scale. \\ & $\uparrow$ (up arrow) & Alt & Increase auto curve limits (zoombar selected curve). \\ @@ -360,7 +360,7 @@ The Main window (also called the program window) consists of pulldown menus, but & Show safe regs. & F10 & Draws safe regions in the video output. \\ & & p & Turn on/off Click to Play. \\ % FIXME: [] - & & Shift+w/Ctrl & With transport btn. loops play; all or with [\;]. \\ + & & Shift+w/Ctrl & With transport btn.\ loops play; all or with [\;]. \\ \midrule \textcolor{CinBlueText}{Transport} & (plus 3 below) & & Same as in Main window. \\ \midrule @@ -379,7 +379,7 @@ The Main window (also called the program window) consists of pulldown menus, but Identifier & \textbf{Key} & \textbf{Qualifier} & \textbf{Description}\\ \midrule \endhead - \textcolor{CinBlueText}{Keypresses} & Mid. click+drag & & Pans the view/moves the image. \\ + \textcolor{CinBlueText}{Keypresses} & Mid.\ click+drag & & Pans the view/moves the image. \\ & Middle mouse & Shift & Returns to Auto zoom. \\ & RMB & & Brings up zoom\%/auto;reset cam/proj;hide ctrls. \\ & LMB Zoom & & Zooms in. \\ @@ -431,18 +431,18 @@ The Main window (also called the program window) consists of pulldown menus, but \midrule \endhead \textcolor{CinBlueText}{Transport} & Rewind & home & Jump to beginning. \\ - & Fast reverse & 'KP +' /Alt-p & Toggle double speed reverse play. \\ - & Normal reverse & 'KP 6' / Alt-o & Toggle normal speed reverse play. \\ - & Frame reverse & 'KP 4' / Alt-u & Render previous frame. \\ - & Stop & 'KP 0' / Alt-m & Stop playback. \\ - & Frame forward & 'KP 1' / Alt-j & Render next frame. \\ - & Normal forward & 'KP 3' / Alt-l & Toggle normal speed forward play. \\ - & Fast forward & 'KP Enter'/A-; & Toggle double speed forward play. \\ + & Fast reverse & `KP +' /Alt-p & Toggle double speed reverse play. \\ + & Normal reverse & `KP 6' / Alt-o & Toggle normal speed reverse play. \\ + & Frame reverse & `KP 4' / Alt-u & Render previous frame. \\ + & Stop & `KP 0' / Alt-m & Stop playback. \\ + & Frame forward & `KP 1' / Alt-j & Render next frame. \\ + & Normal forward & `KP 3' / Alt-l & Toggle normal speed forward play. \\ + & Fast forward & `KP Enter'/A-; & Toggle double speed forward play. \\ & Jump to end & end & Jump to end. \\ - & (No button) & 'KP 5' / Alt-i & Toggle slow speed reverse play. \\ - & (No button) & 'KP 2' / Alt-k & Toggle slow speed forward play. \\ - & & 'KP\#-Shift’ & Shift added to KP \#, adds or subtracts audio. \\ - & & 'Ctrl-KP\#’ & If [ ] set, “KP2,3,5,6,+,enter” play between ptrs.. \\ + & (No button) & `KP 5' / Alt-i & Toggle slow speed reverse play. \\ + & (No button) & `KP 2' / Alt-k & Toggle slow speed forward play. \\ + & & `KP\#-Shift' & Shift added to KP \#, adds or subtracts audio. \\ + & & `Ctrl-KP\#' & If [ ] set, “KP2,3,5,6,+,enter” play between ptrs.. \\ & & + Shift w/Alt & Shift with Alt+x above, adds/remove audio. \\ & & + Shift w/Ctrl & Loops play; all or between In/Out. \\ & & Left click & Start or stop play forward. \\ @@ -450,21 +450,21 @@ The Main window (also called the program window) consists of pulldown menus, but & & Middle wheel & Play forward or reverse 1 frame. \\ & & p & Turns on/off Click to Play button. \\ \midrule - \textcolor{CinBlueText}{Edit} & In point & '[' or ‘<’ & Toggle In point timeline marker\\ - & Out point & ']' or ‘>’ & Toggle Out point timeline marker. \\ + \textcolor{CinBlueText}{Edit} & In point & `[' or ‘<’ & Toggle In point timeline marker\\ + & Out point & `]' or ‘>’ & Toggle Out point timeline marker. \\ & & Ctrl-t & Clear both In and Out pointers. \\ & Splice & v & Create splice. \\ & Overwrite & b & Overwrite. \\ & To clip & i & Copy selection and create clip. \\ & Copy & c & Copy selection to cut buffer. \\ - & Splice & 'v' + Shift & Create splice of the entire clip. \\ - & Overwrite & 'b' + Shift & Overwrite using the entire clip. \\ - & To clip & 'i' + Shift & Copy all and create clip. \\ - & Copy & 'c' + Shift & Copy entire clip to cut buffer. \\ + & Splice & `v' + Shift & Create splice of the entire clip. \\ + & Overwrite & `b' + Shift & Overwrite using the entire clip. \\ + & To clip & `i' + Shift & Copy all and create clip. \\ + & Copy & `c' + Shift & Copy entire clip to cut buffer. \\ & Show meters & & Toggle show meters. \\ & Toggle label & l & Toggle label at current position. \\ - & Previous label & 'Ctrl $\leftarrow$' & Move to label before cursor. \\ - & Next label & 'Ctrl $\rightarrow$' & Move to label after cursor. \\ + & Previous label & `Ctrl $\leftarrow$' & Move to label before cursor. \\ + & Next label & `Ctrl $\rightarrow$' & Move to label after cursor. \\ & Manual Goto & g & Jump to time selected by popup. \\ \midrule \textcolor{CinBlueText}{Mixer} & & Double click & On mixer window, pastes into timeline\\ diff --git a/parts/Windows.tex b/parts/Windows.tex index 95bebf1..02eead2 100644 --- a/parts/Windows.tex +++ b/parts/Windows.tex @@ -18,12 +18,17 @@ It is the output of the rendering operations and this is what is saved when you Immediately to the left of the timeline is the patchbay. The patchbay contains options that affect each track. These options are described in great detail in the Editing section (\ref{sec:patchbay}). -The \textit{Window} pulldown on the main window contains options that affect the 4 main windows. The first 3 options are used to display each of the windows in case one was accidentally closed. You can -move or resize the windows as needed, save that particular layout, and revert to the default positions -to reposition all 4 windows to the original screen configuration. -On dual headed displays, the \textit{Default positions} operation only uses the one monitor to display the windows, but as you -can see in the \textit{Window} pulldown you have more options to change that. Usage with dual monitors is -explained in \ref{sec:multiscreen_playback_configuration}. +The \textit{Window} pulldown on the main window contains options +that affect the 4 main windows. The first 3 options are used to +display each of the windows in case one was accidentally closed. +You can move or resize the windows as needed, save that particular +layout, and revert to the default positions to reposition all 4 +windows to the original screen configuration. On dual headed +displays, the \textit{Default positions} operation only uses the one +monitor to display the windows, but as you can see in the +\textit{Window} pulldown you have more options to change that. Usage +with dual monitors is explained +in~\ref{sec:multiscreen_playback_configuration}. \subsection{Video and Audio Tracks and Navigation}% \label{sub:video_and_audio_tracks_and_navigation} @@ -68,7 +73,7 @@ In order of appearance in the zoom panel as rectangular boxes and either tumbler Then more details are provided in the next paragraphs. \vspace{2ex} -\begin{tabular}{ l l } +\begin{tabular}{ll} \hline Sample zoom & Duration visible on the timeline \\ Amplitude & Audio waveform scale \\ @@ -203,12 +208,12 @@ With the arrow highlighted for \emph{drag and drop mode}, a double click with th Then dragging in the timeline repositions that edit and this can be used for moving effects, changing the order of playlists, or moving video pieces around. There are numerous methods to cut and paste in \emph{drag and drop mode} by setting In/Out points to define -a selected region or using the Copy/Paste Behavior as outlined in \ref{sub:copy_paste_behavior}. +a selected region or using the Copy/Paste Behavior as outlined in~\ref{sub:copy_paste_behavior}. In this mode, clicking the LMB in the timeline does not reposition the \textit{Insertion Point}. When the I-beam is highlighted, you are in \emph{cut and paste mode}. In cut and paste mode, clicking the LMB in the timeline does reposition the \textit{Insertion Point}. -Double clicking in the timeline selects the entire edit the cursor is over, i.e. that column. +Double clicking in the timeline selects the entire edit the cursor is over, i.e.\ that column. Dragging in the timeline with the LMB pressed down, highlights a selected region and this is the region that is affected by cut and paste operations. It is also the playback range used for the subsequent playback operation. Holding down the Shift key while clicking in the timeline extends the highlighted region. @@ -503,7 +508,7 @@ Now you can clip/play/expand or edit the previous playback selection. The speed automation causes the playback sampling rate to increase or decrease to a period controlled by the speed automation curve. This can make playback speed-up or slow-down according to the scaled sampling rate, as \textit{time is multiplied by speed} (Speed $\times$ Unit\_rate). For more information on changing -the speed, read the section on Speed Automation \ref{sec:speed_fade_automation_gang}. +the speed, read the section on Speed Automation~\ref{sec:speed_fade_automation_gang}. \subsubsection*{Alternative to using Numeric Keypad for Playing}% \label{ssub:alternative_to_using_numeric_keypad_for_playing} @@ -565,8 +570,7 @@ On the bottom of the window, there are many of the same transport buttons and controls that are available in the Program window. They work the same as in the Program window and also have tooltips that are visible when you mouse over each of the icons so their use is fairly obvious. However, -of particular note is the button \textit{Click to play} which is described in -\ref{sub:click_to_play_in_viewer_and_compositor}. +of particular note is the button \textit{Click to play} which is described in~\ref{sub:click_to_play_in_viewer_and_compositor}. Next to all of these controls all the way to the right side, there is a \textit{zoom menu} and a \textit{tally light}. The \textit{zoom menu} has a pulldown with different settings that you can choose from or you can just use the tumbler arrows to the right. Generally when just getting started, you @@ -649,7 +653,7 @@ get to the desired spot - these X,Y coordinates will be displayed in the \textit box. Clicking the LMB creates Point 1 and then continue to hold down the LMB so that a ruler line is created between this Point 1 and the stopping Point 2. \textit{Deltas} is the X,Y difference between the 2 points; \textit{Distance} is the number of pixels between the 2 points; and \textit{Angle} is the angle in degrees of the ruler line. -In Figure ~\ref{fig:safe_regions} you can see the Ruler menu on the right side of the Compositor window. +In Figure~\ref{fig:safe_regions} you can see the Ruler menu on the right side of the Compositor window. Holding down the Ctrl key while dragging with the LMB on one of the points, will ensure that the line is always at a multiple of a 45 degree angle. Holding down the Alt key while @@ -657,13 +661,13 @@ dragging with the LMB on one of the points, will translate the ruler line to ano the video while maintaining its length and angle. If you dismiss the Ruler menu, click on \textit{Show tool info} to get the menu to popup again. \item[Adjust camera automation] the camera brings up the camera editing tool. Enable \textit{Show tool info} if the popup menu does not appear. More detail for usage is provided in the subsequent -paragraph \ref{sub:camera_and_projector}. +paragraph~\ref{sub:camera_and_projector}. \item[Adjust projector automation] the projector brings up the projector editing tool. Enable \textit{Show tool info} to get the menu to popup again. More detail for usage is provided in the -subsequent paragraph \ref{sub:camera_and_projector}. +subsequent paragraph~\ref{sub:camera_and_projector}. \item[Crop a layer or output] this is a cropping tool used to reduce the visible picture area. More detail for usage is provided in a subsequent paragraph (\ref{sub:cropping}). There is also a Crop \& Position plugin that provides -a different set of capabilities \ref{sub:crop_position}. +a different set of capabilities~\ref{sub:crop_position}. \item[Get color / eyedropper] brings up the eyedropper used to detect the color at a particular spot. Enable the \textit{Show tool info} if the Color popup menu does not come up automatically or if that menu was accidentally dismissed. Click on a specific color in the video @@ -687,7 +691,7 @@ no dialog popup menus. can see in Figure~\ref{fig:safe_regions}. On some particular TVs/monitors/displays, the borders of the image are cut off and that cut off section might not be as square as it appears in the compositor window. -These are especially useful if the device for the output display is an older model TV. +These are especially useful if the device for the output display is an older model TV\@. The outside largest outline is the \textit{action safe overlay}; whereas the inside smallest outline is the \textit{title safe overlay}. @@ -789,7 +793,7 @@ editing mode, click on the \textit{Adjust projector automation} icon in the Comp will then see red border lines surrounding the image and 2 diagonal lines criss-crossing in the middle, displayed in the video window. The red outline indicates the size of the frame that will be sent to the Output. You can easily drag the box with LMB, moving the frame in $x$ and $y$ directions. -When moving along the $z-axis$ (i.e. the zoom, with SHIFT+Drag) the box exactly follows the movement +When moving along the $z-axis$ (i.e.\ the zoom, with SHIFT+Drag) the box exactly follows the movement and the size of the frame. After you position the video with the projector, you may next want to \textit{Adjust camera automation}. @@ -826,7 +830,7 @@ The green box is the Viewport; at the beginning it coincides with the size of th \label{ssub:camera_and_projector_menu} The camera and projector have shortcut operations that do not appear in the popup menu and are not represented in video overlays. -These are accessed in the \emph{Show tool info} window . +These are accessed in the \emph{Show tool info} window. Most operations in the Compositor window have a tool window which is enabled by activating the question mark icon (figure~\ref{fig:camera_tool}). \begin{wrapfigure}[10]{O}{0.45\linewidth} @@ -867,7 +871,7 @@ In the compositing window, there is a popup menu of options for the camera and p \begin{enumerate} \item Start by shrinking the projector to $z=0,500$ ($\frac{1}{4}$ of the original frame). - \item The next step is to switch to the camera and note that the green box has assumed the size of the projector, i.e. the red box. The value of $z$ of the camera is always equal to $1,000$ (default) but the frame is $\frac{1}{4}$ of the original frame, i.e. it has the size of the projector that has $z=0,500$. This is the current viewport size. + \item The next step is to switch to the camera and note that the green box has assumed the size of the projector, i.e.\ the red box. The value of $z$ of the camera is always equal to $1,000$ (default) but the frame is $\frac{1}{4}$ of the original frame, i.e.\ it has the size of the projector that has $z=0,500$. This is the current viewport size. \item You enlarge the room bringing $z=2,000$. You can see that the dimensions of the viewport (green box) do not change, remaining the same as those of the projector. However, the frame has been enlarged and this variation is indicated by the enlargement of the yellow box. Let's remember that this follows the changes made with the camera tool. \item We can drag the room so that we can center the frame to our liking. The movement of the yellow box shows well the variation compared to the green box. \item Finally, if we want, we can switch to the projector tool to move the output frame to the position we want with respect to the size of the source. Of course, we can also work on the $z$, which in the example is at $z=0.500$, if we have decided to change the size of the output. @@ -958,7 +962,7 @@ The \textit{Enable} row of masks makes it so you can enable all or none of the m There are 4 shapes that are automatically available for usage as masks – square, circle, triangle, and oval. In addition, the next 3 symbols in this section are for the purpose of loading, saving, and deleting your own customized shapes. The first symbol, \textit{Load} preset, will bring up a list of your previously saved presets. Clicking on \textit{Save} preset brings up a popup window allowing you to provide a name used to identify the preset you want to save, along with a pulldown to see the names of your other saved presets. Clicking on \textit{Delete} preset also brings up a textbox with a pulldown to choose which one to delete. There is a file, called \texttt{mask\_rc}, in \texttt{\$HOME/.bcast5} that records your custom masks. -When you click \textit{Load} preset, keep in mind that it will write the mask number that you have selected so if you already have a mask at that location, it will write over it – just \textit{Undo mask} under the main window Edit pulldown (shortcut 'z') to revert to the previous if you made this mistake. +When you click \textit{Load} preset, keep in mind that it will write the mask number that you have selected so if you already have a mask at that location, it will write over it – just \textit{Undo mask} under the main window Edit pulldown (shortcut `z') to revert to the previous if you made this mistake. \subsubsection*{Position \& Scale section}% \label{ssub:position_scale_section} @@ -1032,7 +1036,7 @@ Note: Not all OpenGL software can support the current masking methods. If your The \textit{Help} checkbox can be enabled in order to see a list of the keys used to perform various operations. If you use Masking infrequently, these are a valuable reminder to which key combinations to use. Currently they are as follows: \vspace{2ex} -\begin{tabular}{ l l } +\begin{tabular}{ll} \hline Shift+LMB & move an end point \\ Ctrl+LMB & move a control point \\ @@ -1050,7 +1054,7 @@ The \textit{Help} checkbox can be enabled in order to see a list of the keys use \vspace{2ex} Note: For some desktop window managers, certain keys may already be in use by the operating system, so you will either have to redefine them in your desktop or use different key combinations. For example, at least some desktops used with \textit{UbuntuStudio 16.04} and \textit{Arch} field the \texttt{Alt} key, thus requiring alternative key combinations to be needed. Below are some of these alternatives. \vspace{2ex} -\begin{tabular}{ l p{11cm}} +\begin{tabular}{lp{11cm}} \hline LMB & move/create an end point (to move the end point the pointer must be above the point) \\ Shift+LMB & move an end point (the pointer may be near the point, not above it) \\ @@ -1066,7 +1070,7 @@ The \textit{Help} checkbox can be enabled in order to see a list of the keys use Focus checkbox = unchecked: \vspace{2ex} -\begin{tabular}{ l l } +\begin{tabular}{ll} \hline Wheel & rotate around Pivot Point \\ Shift+Wheel & scale around Pivot Point \\ @@ -1080,7 +1084,7 @@ Focus checkbox = unchecked: Focus checkbox = checked: \vspace{2ex} -\begin{tabular}{ l l } +\begin{tabular}{ll} \hline Wheel & rotate around Pivot Point (“Custom focus point”) \\ Shift+Wheel & scale around Pivot Point (“Custom focus point”) \\ @@ -1185,7 +1189,7 @@ are listed next. \item Switch to a fullscreen display by choosing \textit{Fullscreen}. To switch back, click with the RMB on the display again and choose \textit{Windowed}. \item Change the display size by choosing the \textit{Zoom} function to select a zoom level of -25\%, 33\%, ... 300\%, or 400\% of the original media size. +25\%, 33\%, \ldots 300\%, or 400\% of the original media size. \item To remove the current media from being displayed, choose \textit{Close source}. \end{enumerate} @@ -1194,7 +1198,7 @@ original asset's format. Operations performed in the Viewer affect a temporary E than the timeline. By default, the Viewer window is automatically available but if it gets accidentally closed you can open it again by using the pulldown \texttt{Window $\rightarrow$ Show Viewer} to bring it back up. More details for editing in the Viewer window with the Two Screen -Editing method is explained in \ref{sec:two_screen_editing}. +Editing method is explained in~\ref{sec:two_screen_editing}. \section{Options in both the Compositor and Viewer Windows}% \label{sec:options_in_both_the_compositor_and_viewer_windows} @@ -1261,7 +1265,7 @@ There are either a left or right resize pointer and you can click and drag in ei \begin{figure}[htpb] \centering \includegraphics[width=1.0\linewidth]{timebar2.png} - \caption{ A left-facing arrow on the right side of the blue slider bar is used to drag the bar.} + \caption{A left-facing arrow on the right side of the blue slider bar is used to drag the bar.} \label{fig:timebar2} \end{figure} @@ -1999,8 +2003,7 @@ Overloading by less than 3 dB is usually acceptable. While overloading is treated as positive numbers in \CGG{}, it is clipped to 0 when sent to a sound card or file. - - - - - +%%% Local Variables: +%%% mode: latex +%%% TeX-master: "../CinelerraGG_Manual" +%%% End: