Skip to content
Snippets Groups Projects
Commit f4c663b8 authored by Tristan Matthews's avatar Tristan Matthews
Browse files

* #18631: video: cleanup capture and encoding

parent 708c2de1
Branches
Tags
No related merge requests found
...@@ -38,6 +38,6 @@ ...@@ -38,6 +38,6 @@
#define atomic_decrement(x) __sync_fetch_and_sub(x, 1) #define atomic_decrement(x) __sync_fetch_and_sub(x, 1)
// If condition A is false, print the error message in M and exit thread // If condition A is false, print the error message in M and exit thread
#define EXIT_IF_FAIL(A, M, ...) if (!(A)) { ERROR(M, ##__VA_ARGS__); __sync_fetch_and_and(&threadRunning_, false); pthread_exit(NULL); } #define EXIT_IF_FAIL(A, M, ...) if (!(A)) { ERROR(M, ##__VA_ARGS__); set_false_atomic(&threadRunning_); pthread_exit(NULL); }
#endif // CHECK_H_ #endif // CHECK_H_
...@@ -259,7 +259,7 @@ void VideoSendThread::setup() ...@@ -259,7 +259,7 @@ void VideoSendThread::setup()
forcePresetX264(); forcePresetX264();
} }
scaledPicture_ = avcodec_alloc_frame(); scaledInput_ = avcodec_alloc_frame();
// open encoder // open encoder
...@@ -293,7 +293,6 @@ void VideoSendThread::setup() ...@@ -293,7 +293,6 @@ void VideoSendThread::setup()
av_dict_set(&outOptions, "payload_type", args_["payload_type"].c_str(), 0); av_dict_set(&outOptions, "payload_type", args_["payload_type"].c_str(), 0);
} }
ret = avformat_write_header(outputCtx_, outOptions ? &outOptions : NULL); ret = avformat_write_header(outputCtx_, outOptions ? &outOptions : NULL);
if (outOptions) if (outOptions)
av_dict_free(&outOptions); av_dict_free(&outOptions);
...@@ -302,36 +301,21 @@ void VideoSendThread::setup() ...@@ -302,36 +301,21 @@ void VideoSendThread::setup()
av_dump_format(outputCtx_, 0, outputCtx_->filename, 1); av_dump_format(outputCtx_, 0, outputCtx_->filename, 1);
print_sdp(); print_sdp();
// allocate video frame // allocate frame for raw input from camera
rawFrame_ = avcodec_alloc_frame(); rawFrame_ = avcodec_alloc_frame();
// alloc image and output buffer // allocate buffers for both scaled (pre-encoder) and encoded frames
outbufSize_ = avpicture_get_size(encoderCtx_->pix_fmt, encoderCtx_->width, encoderBufferSize_ = avpicture_get_size(encoderCtx_->pix_fmt, encoderCtx_->width,
encoderCtx_->height); encoderCtx_->height);
outbuf_ = reinterpret_cast<uint8_t*>(av_malloc(outbufSize_)); EXIT_IF_FAIL(encoderBufferSize_ > FF_MIN_BUFFER_SIZE, "Encoder buffer too small");
// allocate buffer that fits YUV 420 encoderBuffer_ = reinterpret_cast<uint8_t*>(av_malloc(encoderBufferSize_));
scaledPictureBuf_ = reinterpret_cast<uint8_t*>(av_malloc((outbufSize_ * 3) / 2)); const int scaledInputSize = avpicture_get_size(encoderCtx_->pix_fmt, encoderCtx_->width, encoderCtx_->height);
scaledInputBuffer_ = reinterpret_cast<uint8_t*>(av_malloc(scaledInputSize));
scaledPicture_->data[0] = reinterpret_cast<uint8_t*>(scaledPictureBuf_); avpicture_fill(reinterpret_cast<AVPicture *>(scaledInput_),
scaledPicture_->data[1] = scaledPicture_->data[0] + outbufSize_; static_cast<uint8_t *>(scaledInputBuffer_),
scaledPicture_->data[2] = scaledPicture_->data[1] + outbufSize_ / 4; encoderCtx_->pix_fmt,
scaledPicture_->linesize[0] = encoderCtx_->width;
scaledPicture_->linesize[1] = encoderCtx_->width / 2;
scaledPicture_->linesize[2] = encoderCtx_->width / 2;
}
void VideoSendThread::createScalingContext()
{
// Create scaling context
imgConvertCtx_ = sws_getCachedContext(imgConvertCtx_,
inputDecoderCtx_->width,
inputDecoderCtx_->height,
inputDecoderCtx_->pix_fmt,
encoderCtx_->width, encoderCtx_->width,
encoderCtx_->height, encoderCtx_->height);
encoderCtx_->pix_fmt, SWS_BICUBIC,
NULL, NULL, NULL);
EXIT_IF_FAIL(imgConvertCtx_, "Cannot init the conversion context");
} }
// This callback is used by libav internally to break out of blocking calls // This callback is used by libav internally to break out of blocking calls
...@@ -343,18 +327,19 @@ int VideoSendThread::interruptCb(void *ctx) ...@@ -343,18 +327,19 @@ int VideoSendThread::interruptCb(void *ctx)
VideoSendThread::VideoSendThread(const std::string &id, const std::map<string, string> &args) : VideoSendThread::VideoSendThread(const std::string &id, const std::map<string, string> &args) :
args_(args), args_(args),
scaledPictureBuf_(0), scaledInputBuffer_(0),
outbuf_(0), encoderBuffer_(0),
inputDecoderCtx_(0), inputDecoderCtx_(0),
rawFrame_(0), rawFrame_(0),
scaledPicture_(0), scaledInput_(0),
streamIndex_(-1), streamIndex_(-1),
outbufSize_(0), encoderBufferSize_(0),
encoderCtx_(0), encoderCtx_(0),
stream_(0), stream_(0),
inputCtx_(0), inputCtx_(0),
outputCtx_(0), outputCtx_(0),
imgConvertCtx_(0), previewConvertCtx_(0),
encoderConvertCtx_(0),
sdp_(), sdp_(),
sink_(), sink_(),
bufferSize_(0), bufferSize_(0),
...@@ -374,8 +359,10 @@ struct VideoTxContextHandle { ...@@ -374,8 +359,10 @@ struct VideoTxContextHandle {
~VideoTxContextHandle() ~VideoTxContextHandle()
{ {
if (tx_.imgConvertCtx_) if (tx_.encoderConvertCtx_)
sws_freeContext(tx_.imgConvertCtx_); sws_freeContext(tx_.encoderConvertCtx_);
if (tx_.previewConvertCtx_)
sws_freeContext(tx_.previewConvertCtx_);
// write the trailer, if any. the trailer must be written // write the trailer, if any. the trailer must be written
// before you close the CodecContexts open when you wrote the // before you close the CodecContexts open when you wrote the
...@@ -387,15 +374,15 @@ struct VideoTxContextHandle { ...@@ -387,15 +374,15 @@ struct VideoTxContextHandle {
avio_close(tx_.outputCtx_->pb); avio_close(tx_.outputCtx_->pb);
} }
if (tx_.scaledPictureBuf_) if (tx_.scaledInputBuffer_)
av_free(tx_.scaledPictureBuf_); av_free(tx_.scaledInputBuffer_);
if (tx_.outbuf_) if (tx_.encoderBuffer_)
av_free(tx_.outbuf_); av_free(tx_.encoderBuffer_);
// free the scaled frame // free the scaled frame
if (tx_.scaledPicture_) if (tx_.scaledInput_)
av_free(tx_.scaledPicture_); av_free(tx_.scaledInput_);
// free the YUV frame // free the YUV frame
if (tx_.rawFrame_) if (tx_.rawFrame_)
...@@ -443,7 +430,6 @@ void VideoSendThread::run() ...@@ -443,7 +430,6 @@ void VideoSendThread::run()
// We don't want setup() called in the main thread in case it exits or blocks // We don't want setup() called in the main thread in case it exits or blocks
VideoTxContextHandle handle(*this); VideoTxContextHandle handle(*this);
setup(); setup();
createScalingContext();
while (threadRunning_) while (threadRunning_)
if (captureFrame()) { if (captureFrame()) {
...@@ -462,7 +448,7 @@ void VideoSendThread::fillBuffer(void *data) ...@@ -462,7 +448,7 @@ void VideoSendThread::fillBuffer(void *data)
inputDecoderCtx_->width, inputDecoderCtx_->width,
inputDecoderCtx_->height); inputDecoderCtx_->height);
// Just need to convert colour space to BGRA // Just need to convert colour space to BGRA
imgConvertCtx_ = sws_getCachedContext(imgConvertCtx_, previewConvertCtx_ = sws_getCachedContext(previewConvertCtx_,
inputDecoderCtx_->width, inputDecoderCtx_->width,
inputDecoderCtx_->height, inputDecoderCtx_->height,
inputDecoderCtx_->pix_fmt, inputDecoderCtx_->pix_fmt,
...@@ -470,7 +456,8 @@ void VideoSendThread::fillBuffer(void *data) ...@@ -470,7 +456,8 @@ void VideoSendThread::fillBuffer(void *data)
inputDecoderCtx_->height, inputDecoderCtx_->height,
PIX_FMT_BGRA, SWS_BICUBIC, PIX_FMT_BGRA, SWS_BICUBIC,
NULL, NULL, NULL); NULL, NULL, NULL);
sws_scale(imgConvertCtx_, rawFrame_->data, rawFrame_->linesize, 0, EXIT_IF_FAIL(previewConvertCtx_, "Could not get preview context");
sws_scale(previewConvertCtx_, rawFrame_->data, rawFrame_->linesize, 0,
inputDecoderCtx_->height, preview.data, inputDecoderCtx_->height, preview.data,
preview.linesize); preview.linesize);
} }
...@@ -508,13 +495,21 @@ bool VideoSendThread::captureFrame() ...@@ -508,13 +495,21 @@ bool VideoSendThread::captureFrame()
if (!frameFinished) if (!frameFinished)
return false; return false;
createScalingContext(); encoderConvertCtx_ = sws_getCachedContext(encoderConvertCtx_,
sws_scale(imgConvertCtx_, rawFrame_->data, rawFrame_->linesize, 0, inputDecoderCtx_->width,
inputDecoderCtx_->height, scaledPicture_->data, inputDecoderCtx_->height,
scaledPicture_->linesize); inputDecoderCtx_->pix_fmt,
encoderCtx_->width,
encoderCtx_->height,
encoderCtx_->pix_fmt, SWS_BICUBIC,
NULL, NULL, NULL);
EXIT_IF_FAIL(encoderConvertCtx_, "Could not get encoder convert context");
sws_scale(encoderConvertCtx_, rawFrame_->data, rawFrame_->linesize, 0,
inputDecoderCtx_->height, scaledInput_->data,
scaledInput_->linesize);
// Set presentation timestamp on our scaled frame before encoding it // Set presentation timestamp on our scaled frame before encoding it
scaledPicture_->pts = frameNumber_++; scaledInput_->pts = frameNumber_++;
return true; return true;
} }
...@@ -524,15 +519,15 @@ void VideoSendThread::encodeAndSendVideo() ...@@ -524,15 +519,15 @@ void VideoSendThread::encodeAndSendVideo()
{ {
if (forceKeyFrame_ > 0) { if (forceKeyFrame_ > 0) {
#if LIBAVCODEC_VERSION_INT > AV_VERSION_INT(53, 20, 0) #if LIBAVCODEC_VERSION_INT > AV_VERSION_INT(53, 20, 0)
scaledPicture_->pict_type = AV_PICTURE_TYPE_I; scaledInput_->pict_type = AV_PICTURE_TYPE_I;
#else #else
scaledPicture_->pict_type = FF_I_TYPE; scaledInput_->pict_type = FF_I_TYPE;
#endif #endif
atomic_decrement(&forceKeyFrame_); atomic_decrement(&forceKeyFrame_);
} }
const int encodedSize = avcodec_encode_video(encoderCtx_, outbuf_, const int encodedSize = avcodec_encode_video(encoderCtx_, encoderBuffer_,
outbufSize_, scaledPicture_); encoderBufferSize_, scaledInput_);
if (encodedSize <= 0) if (encodedSize <= 0)
return; return;
...@@ -541,7 +536,7 @@ void VideoSendThread::encodeAndSendVideo() ...@@ -541,7 +536,7 @@ void VideoSendThread::encodeAndSendVideo()
av_init_packet(&opkt); av_init_packet(&opkt);
PacketHandle opkt_handle(opkt); PacketHandle opkt_handle(opkt);
opkt.data = outbuf_; opkt.data = encoderBuffer_;
opkt.size = encodedSize; opkt.size = encodedSize;
// rescale pts from encoded video framerate to rtp // rescale pts from encoded video framerate to rtp
......
...@@ -57,7 +57,6 @@ class VideoSendThread : public VideoProvider { ...@@ -57,7 +57,6 @@ class VideoSendThread : public VideoProvider {
void print_sdp(); void print_sdp();
void setup(); void setup();
void prepareEncoderContext(AVCodec *encoder); void prepareEncoderContext(AVCodec *encoder);
void createScalingContext();
void fillBuffer(void *data); void fillBuffer(void *data);
static int interruptCb(void *ctx); static int interruptCb(void *ctx);
...@@ -65,18 +64,19 @@ class VideoSendThread : public VideoProvider { ...@@ -65,18 +64,19 @@ class VideoSendThread : public VideoProvider {
/*-------------------------------------------------------------*/ /*-------------------------------------------------------------*/
/* These variables should be used in thread (i.e. run()) only! */ /* These variables should be used in thread (i.e. run()) only! */
/*-------------------------------------------------------------*/ /*-------------------------------------------------------------*/
uint8_t *scaledPictureBuf_; uint8_t *scaledInputBuffer_;
uint8_t *outbuf_; uint8_t *encoderBuffer_;
AVCodecContext *inputDecoderCtx_; AVCodecContext *inputDecoderCtx_;
AVFrame *rawFrame_; AVFrame *rawFrame_;
AVFrame *scaledPicture_; AVFrame *scaledInput_;
int streamIndex_; int streamIndex_;
int outbufSize_; int encoderBufferSize_;
AVCodecContext *encoderCtx_; AVCodecContext *encoderCtx_;
AVStream *stream_; AVStream *stream_;
AVFormatContext *inputCtx_; AVFormatContext *inputCtx_;
AVFormatContext *outputCtx_; AVFormatContext *outputCtx_;
SwsContext *imgConvertCtx_; SwsContext *previewConvertCtx_;
SwsContext *encoderConvertCtx_;
std::string sdp_; std::string sdp_;
AVIOInterruptCB interruptCb_; AVIOInterruptCB interruptCb_;
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment