Skip to content
Snippets Groups Projects
Commit 24947c36 authored by Philippe Gorley's avatar Philippe Gorley Committed by Sébastien Blin
Browse files

doc: fix doxygen comments

Change-Id: I5e7a776221b2048692966b20ff270a2d8fea8bbf
parent 79354e7c
No related branches found
No related tags found
No related merge requests found
......@@ -34,7 +34,7 @@ struct SwrContext;
namespace jami {
/**
* Wrapper class for libswresample
* @brief Wrapper class for libswresample
*/
class Resampler {
public:
......@@ -42,36 +42,49 @@ public:
~Resampler();
/**
* @brief Resample a frame.
*
* Resample from @input format to @output format.
*
* NOTE: sample_rate, channel_layout, and format should be set on @output
*/
int resample(const AVFrame* input, AVFrame* output);
/**
* Wrappers around resample(AVFrame*, AVFrame*) for convenience.
* @brief Wrapper around resample(AVFrame*, AVFrame*) for convenience.
*/
void resample(const AudioBuffer& dataIn, AudioBuffer& dataOut);
/**
* @brief Wrapper around resample(AVFrame*, AVFrame*) for convenience.
*/
std::unique_ptr<AudioFrame> resample(std::unique_ptr<AudioFrame>&& in, const AudioFormat& out);
/**
* @brief Wrapper around resample(AVFrame*, AVFrame*) for convenience.
*/
std::shared_ptr<AudioFrame> resample(std::shared_ptr<AudioFrame>&& in, const AudioFormat& out);
private:
NON_COPYABLE(Resampler);
/**
* @brief Reinitializes filter according to new format.
*
* Reinitializes the resampler when new settings are detected. As long as both input and
* output formats don't change, this will only be called once.
*/
void reinit(const AVFrame* in, const AVFrame* out);
/**
* Libswresample resampler context.
* @brief Libswresample resampler context.
*
* NOTE SwrContext is an imcomplete type and cannot be stored in a smart pointer.
*/
SwrContext* swrCtx_;
/**
* Number of times @swrCtx_ has been initialized with no successful audio resampling.
* @brief Number of times @swrCtx_ has been initialized with no successful audio resampling.
*
* 0: Uninitialized
* 1: Initialized
......
......@@ -37,7 +37,7 @@ struct AVFilterInOut;
namespace jami {
/**
* Provides access to libavfilter.
* @brief Provides access to libavfilter.
*
* Can be used for filters with unlimited number of inputs.
* Multiple outputs are not supported. They add complexity for little gain.
......@@ -64,36 +64,38 @@ class MediaFilter {
~MediaFilter();
/**
* Returns the current filter graph string.
* @brief Returns the current filter graph string.
*/
std::string getFilterDesc() const;
/**
* Initializes the filter graph with one or more inputs and one output. Returns a negative code on error.
* @brief Initializes the filter graph with one or more inputs and one output. Returns a negative code on error.
*/
int initialize(const std::string& filterDesc, std::vector<MediaStream> msps);
/**
* Returns a MediaStream object describing the input specified by @inputName.
* @brief Returns a MediaStream object describing the input specified by @inputName.
*/
MediaStream getInputParams(const std::string& inputName) const;
/**
* Returns a MediaStream struct describing the frames that will be output.
* @brief Returns a MediaStream struct describing the frames that will be output.
*
* When called in an invalid state, the returned format will be invalid (less than 0).
*/
MediaStream getOutputParams() const;
/**
* Give the specified source filter an input frame. Caller is responsible for freeing the frame.
* @brief Give the specified source filter an input frame.
*
* Caller is responsible for freeing the frame.
*
* NOTE Will fail if @inputName is not found in the graph.
*/
int feedInput(AVFrame* frame, const std::string& inputName);
/**
* Pull a frame from the filter graph. Caller owns the frame reference.
* @brief Pull a frame from the filter graph. Caller owns the frame reference.
*
* Returns AVERROR(EAGAIN) if filter graph requires more input.
*
......@@ -102,7 +104,7 @@ class MediaFilter {
std::unique_ptr<MediaFrame> readOutput();
/**
* Flush filter to indicate EOF.
* @brief Flush filter to indicate EOF.
*/
void flush();
......@@ -110,59 +112,67 @@ class MediaFilter {
NON_COPYABLE(MediaFilter);
/**
* Initializes output of filter graph.
* @brief Initializes output of filter graph.
*/
int initOutputFilter(AVFilterInOut* out);
/**
* Initializes an input of filter graph.
* @brief Initializes an input of filter graph.
*/
int initInputFilter(AVFilterInOut* in, MediaStream msp);
/**
* Reinitializes the filter graph with @inputParams_, which should be updated beforehand.
* @brief Reinitializes the filter graph.
*
* Reinitializes with @inputParams_, which should be updated beforehand.
*/
int reinitialize();
/**
* Convenience method that prints @msg and returns err.
* @brief Convenience method that prints @msg and returns err.
*
* NOTE @msg should not be null.
*/
int fail(std::string msg, int err) const;
/**
* Frees resources used by MediaFilter.
* @brief Frees resources used by MediaFilter.
*/
void clean();
/**
* Filter graph pointer.
* @brief Filter graph pointer.
*/
AVFilterGraph* graph_ = nullptr;
/**
* Filter graph output. Corresponds to a buffersink/abuffersink filter.
* @brief Filter graph output.
*
* Corresponds to a buffersink/abuffersink filter.
*/
AVFilterContext* output_ = nullptr;
/**
* List of filter graph inputs. Each corresponds to a buffer/abuffer filter.
* @brief List of filter graph inputs.
*
* Each corresponds to a buffer/abuffer filter.
*/
std::vector<AVFilterContext*> inputs_;
/**
* List of filter graph input parameters. Same order as @inputs_.
* @brief List of filter graph input parameters.
*
* Same order as @inputs_.
*/
std::vector<MediaStream> inputParams_;
/**
* Filter graph string.
* @brief Filter graph string.
*/
std::string desc_ {};
/**
* Flag to know whether or not the filter graph is initialized.
* @brief Flag to know whether or not the filter graph is initialized.
*/
bool initialized_ {false};
};
......
......@@ -45,12 +45,14 @@ public:
~MediaRecorder();
/**
* Gets whether or not the recorder is active.
* @brief Gets whether or not the recorder is active.
*/
bool isRecording() const;
/**
* Get file path of file to be recorded. Same path as sent to @setPath, but with
* @brief Get file path of file to be recorded.
*
* Same path as sent to @setPath, but with
* file extension appended.
*
* NOTE @audioOnly must be called to have the right extension.
......@@ -58,20 +60,24 @@ public:
std::string getPath() const;
/**
* @brief Resulting file will be audio or video.
*
* Sets whether or not output file will have audio. Determines the extension of the
* output file (.ogg or .webm).
*/
void audioOnly(bool audioOnly);
/**
* Sets output file path.
* @brief Sets output file path.
*
* NOTE An empty path will put the output file in the working directory.
*/
void setPath(const std::string& path);
/**
* Sets title and description metadata for the file. Uses default if either is empty.
* @brief Sets title and description metadata for the file.
*
* Uses default if either is empty.
* Default title is "Conversation at %Y-%m-%d %H:%M:%S".
* Default description is "Recorded with Jami https://jami.net".
*
......@@ -80,23 +86,30 @@ public:
void setMetadata(const std::string& title, const std::string& desc);
/**
* Adds a stream to the recorder. Caller must then attach this to the media source.
* @brief Adds a stream to the recorder.
*
* Caller must then attach this to the media source.
*/
Observer<std::shared_ptr<MediaFrame>>* addStream(const MediaStream& ms);
/**
* Gets the stream observer so the caller can detach it from the media source.
* @brief Gets the stream observer.
*
* This is so the caller can detach it from the media source.
*/
Observer<std::shared_ptr<MediaFrame>>* getStream(const std::string& name) const;
/**
* Starts the record. Streams must have been added using Observable::attach and
* @addStream.
* @brief Initializes the file.
*
* Streams must have been added using Observable::attach and @addStream.
*/
int startRecording();
/**
* Stops the record. Streams must be removed using Observable::detach afterwards.
* @brief Finalizes the file.
*
* Streams must be removed using Observable::detach afterwards.
*/
void stopRecording();
......
......@@ -34,69 +34,81 @@ extern "C" {
namespace jami { namespace video {
/**
* Provides an abstraction layer to the hardware acceleration APIs in FFmpeg.
* @brief Provides an abstraction layer to the hardware acceleration APIs in FFmpeg.
*/
class HardwareAccel {
public:
/**
* Static factory method for hardware decoding.
* @brief Static factory method for hardware decoding.
*/
static std::unique_ptr<HardwareAccel> setupDecoder(AVCodecID id, int width, int height);
/**
* Static factory method for hardware encoding.
* @brief Static factory method for hardware encoding.
*/
static std::unique_ptr<HardwareAccel> setupEncoder(AVCodecID id, int width, int height,
AVBufferRef* framesCtx = nullptr);
/**
* @brief Transfers hardware frame to main memory.
*
* Transfers a hardware decoded frame back to main memory. Should be called after
* the frame is decoded using avcodec_send_packet/avcodec_receive_frame.
*
* @frame: Refrerence to the decoded hardware frame.
* @returns: Software frame.
* If @frame is software, this is a no-op.
*
* @param frame Refrerence to the decoded hardware frame.
* @param desiredFormat Software pixel format that the hardware outputs.
* @returns Software frame.
*/
static std::unique_ptr<VideoFrame> transferToMainMemory(const VideoFrame& frame, AVPixelFormat desiredFormat);
/**
* @brief Constructs a HardwareAccel object
*
* Made public so std::unique_ptr can access it. Should not be called.
*/
HardwareAccel(AVCodecID id, const std::string& name, AVHWDeviceType hwType, AVPixelFormat format, AVPixelFormat swFormat, CodecType type);
/**
* Dereferences hardware contexts.
* @brief Dereferences hardware contexts.
*/
~HardwareAccel();
/**
* Codec that is being accelerated.
* @brief Codec that is being accelerated.
*/
AVCodecID getCodecId() const { return id_; };
/**
* Name of the hardware layer/API being used.
* @brief Name of the hardware layer/API being used.
*/
std::string getName() const { return name_; };
/**
* Hardware format.
* @brief Hardware format.
*/
AVPixelFormat getFormat() const { return format_; };
/**
* Software format. For encoding it is the format expected by the hardware. For decoding
* @brief Software format.
*
* For encoding it is the format expected by the hardware. For decoding
* it is the format output by the hardware.
*/
AVPixelFormat getSoftwareFormat() const { return swFormat_; }
/**
* Gets the name of the codec.
* @brief Gets the name of the codec.
*
* Decoding: avcodec_get_name(id_)
* Encoding: avcodec_get_name(id_) + '_' + name_
*/
std::string getCodecName() const;
/**
* @brief If hardware decoder can feed hardware encoder directly.
*
* Returns whether or not the decoder is linked to an encoder or vice-versa. Being linked
* means an encoder can directly use the decoder's hardware frame, without first
* transferring it to main memory.
......@@ -104,7 +116,9 @@ public:
bool isLinked() const { return linked_; }
/**
* Set some extra details in the codec context. Should be called after a successful
* @brief Set some extra details in the codec context.
*
* Should be called after a successful
* setup (setupDecoder or setupEncoder).
* For decoding, sets the hw_device_ctx and get_format callback. If the decoder has
* a frames context, mark as linked.
......@@ -114,18 +128,21 @@ public:
void setDetails(AVCodecContext* codecCtx);
/**
* @brief Transfers a frame to/from the GPU memory.
*
* Transfers a hardware decoded frame back to main memory. Should be called after
* the frame is decoded using avcodec_send_packet/avcodec_receive_frame or before
* the frame is encoded using avcodec_send_frame/avcodec_receive_packet.
*
* @frame: Hardware frame when decoding, software frame when encoding.
* @returns: Software frame when decoding, hardware frame when encoding.
* @param frame Hardware frame when decoding, software frame when encoding.
* @returns Software frame when decoding, hardware frame when encoding.
*/
std::unique_ptr<VideoFrame> transfer(const VideoFrame& frame);
/**
* Links this HardwareAccel's frames context with the passed in context. This serves
* to skip transferring a decoded frame back to main memory before encoding.
* @brief Links this HardwareAccel's frames context with the passed in context.
*
* This serves to skip transferring a decoded frame back to main memory before encoding.
*/
bool linkHardware(AVBufferRef* framesCtx);
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment