Commit dd96503a authored by Tristan Matthews's avatar Tristan Matthews

* #8940: removed video test source for now

parent 2909cf41
......@@ -11,7 +11,8 @@ libvideo_la_SOURCES = video_endpoint.cpp video_endpoint.h \
video_preview.h video_preview.cpp \
video_v4l2.cpp video_v4l2_list.cpp \
video_v4l2.h video_v4l2_list.h \
video_preferences.h video_preferences.cpp
video_preferences.h video_preferences.cpp \
packet_handle.h packet_handle.cpp
libvideo_la_LIBADD = @LIBAVCODEC_LIBS@ @LIBAVFORMAT_LIBS@ @LIBAVDEVICE_LIBS@ @LIBSWSCALE_LIBS@ @LIBAVUTIL_LIBS@ @CCRTP_LIBS@ @UDEV_LIBS@
......
/*
* Copyright (C) 2011 Savoir-Faire Linux Inc.
* Author: Tristan Matthews <tristan.matthews@savoirfairelinux.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Additional permission under GNU GPL version 3 section 7:
*
* If you modify this program, or any covered work, by linking or
* combining it with the OpenSSL project's OpenSSL library (or a
* modified version of that library), containing parts covered by the
* terms of the OpenSSL or SSLeay licenses, Savoir-Faire Linux Inc.
* grants you additional permission to convey the resulting work.
* Corresponding Source for a non-source form of such a combination
* shall include the source code for the parts of OpenSSL used as well
* as that of the covered work.
*/
#include "packet_handle.h"
// libav includes
extern "C" {
#include <libavformat/avformat.h>
}
PacketHandle::PacketHandle(AVPacket &inpacket) : inpacket_(inpacket) {}
PacketHandle::~PacketHandle()
{
av_free_packet(&inpacket_);
}
/*
* Copyright (C) 2011 Savoir-Faire Linux Inc.
* Author: Tristan Matthews <tristan.matthews@savoirfairelinux.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Additional permission under GNU GPL version 3 section 7:
*
* If you modify this program, or any covered work, by linking or
* combining it with the OpenSSL project's OpenSSL library (or a
* modified version of that library), containing parts covered by the
* terms of the OpenSSL or SSLeay licenses, Savoir-Faire Linux Inc.
* grants you additional permission to convey the resulting work.
* Corresponding Source for a non-source form of such a combination
* shall include the source code for the parts of OpenSSL used as well
* as that of the covered work.
*/
#ifndef PACKET_HANDLE_H_
#define PACKET_HANDLE_H_
class AVPacket;
class PacketHandle {
private:
AVPacket &inpacket_;
public:
PacketHandle(AVPacket &inpacket);
~PacketHandle();
};
#endif // PACKET_HANDLE_H_
......@@ -30,6 +30,7 @@
*/
#include "video_receive_thread.h"
#include "packet_handle.h"
// libav includes
extern "C" {
......@@ -59,11 +60,13 @@ static const enum PixelFormat video_rgb_format = PIX_FMT_BGRA;
namespace sfl_video {
using std::map;
using std::string;
namespace { // anonymouse namespace
#if _SEM_SEMUN_UNDEFINED
union semun
{
union semun {
int val; /* value for SETVAL */
struct semid_ds *buf; /* buffer for IPC_STAT & IPC_SET */
unsigned short int *array; /* array for GETALL & SETALL */
......@@ -119,9 +122,8 @@ int createShm(unsigned numBytes, int *shmKey)
uint8_t *attachShm(int shm_id)
{
/* attach to the segment and get a pointer to it */
uint8_t *data = reinterpret_cast<uint8_t*>(shmat(shm_id, (void *)0, 0));
if (data == reinterpret_cast<uint8_t *>(-1))
{
uint8_t *data = reinterpret_cast<uint8_t*>(shmat(shm_id, (void *) 0, 0));
if (data == reinterpret_cast<uint8_t *>(-1)) {
ERROR("%s:shmat:%m", __PRETTY_FUNCTION__);
data = NULL;
}
......@@ -156,52 +158,45 @@ int bufferSize(int width, int height, int format)
}
std::string openTemp(std::string path, std::ofstream& f)
string openTemp(string path, std::ofstream& f)
{
path += "/XXXXXX";
std::vector<char> dst_path(path.begin(), path.end());
dst_path.push_back('\0');
int fd = -1;
while (fd == -1)
{
while (fd == -1) {
fd = mkstemp(&dst_path[0]);
if (fd != -1)
{
if (fd != -1) {
path.assign(dst_path.begin(), dst_path.end() - 1);
f.open(path.c_str(),
std::ios_base::trunc | std::ios_base::out);
f.open(path.c_str(), std::ios_base::trunc | std::ios_base::out);
close(fd);
}
}
return path;
}
} // end anonymous namespace
int VideoReceiveThread::createSemSet(int shmKey, int *semKey)
{
/* this variable will contain the semaphore set. */
int sem_set_id;
/* semaphore value, for semctl(). */
union semun sem_val;
key_t key;
do
key = ftok(fileutils::get_program_dir(), rand());
while (key == shmKey);
*semKey = key;
/* first we create a semaphore set with a single semaphore, */
/* whose counter is initialized to '0'. */
sem_set_id = semget(key, 1, 0600 | IPC_CREAT);
/* first we create a semaphore set with a single semaphore,
whose counter is initialized to '0'. */
int sem_set_id = semget(key, 1, 0600 | IPC_CREAT);
if (sem_set_id == -1)
{
ERROR("%s:semget:%m", __PRETTY_FUNCTION__);
ost::Thread::exit();
}
/* semaphore value, for semctl(). */
union semun sem_val;
sem_val.val = 0;
semctl(sem_set_id, 0, SETVAL, sem_val);
return sem_set_id;
......@@ -217,7 +212,7 @@ void VideoReceiveThread::loadSDP()
os << args_["receiving_sdp"];
DEBUG("%s:loaded SDP %s", __PRETTY_FUNCTION__,
args_["receiving_sdp"].c_str());
args_["receiving_sdp"].c_str());
os.close();
}
......@@ -229,107 +224,93 @@ void VideoReceiveThread::setup()
AVInputFormat *file_iformat = 0;
if (!test_source_)
{
if (args_["input"].empty())
{
loadSDP();
args_["input"] = sdpFilename_;
file_iformat = av_find_input_format("sdp");
if (!file_iformat)
{
ERROR("%s:Could not find format \"sdp\"", __PRETTY_FUNCTION__);
ost::Thread::exit();
}
}
else if (args_["input"].substr(0, strlen("/dev/video")) == "/dev/video")
{
// it's a v4l device if starting with /dev/video
// FIXME: This is not the most robust way of checking if we mean to use a
// v4l device
DEBUG("Using v4l2 format");
file_iformat = av_find_input_format("video4linux2");
if (!file_iformat)
{
ERROR("%s:Could not find format!", __PRETTY_FUNCTION__);
ost::Thread::exit();
}
if (args_["input"].empty()) {
loadSDP();
args_["input"] = sdpFilename_;
file_iformat = av_find_input_format("sdp");
if (!file_iformat) {
ERROR("%s:Could not find format \"sdp\"", __PRETTY_FUNCTION__);
ost::Thread::exit();
}
AVDictionary *options = NULL;
if (!args_["framerate"].empty())
av_dict_set(&options, "framerate", args_["framerate"].c_str(), 0);
if (!args_["video_size"].empty())
av_dict_set(&options, "video_size", args_["video_size"].c_str(), 0);
if (!args_["channel"].empty())
av_dict_set(&options, "channel", args_["channel"].c_str(), 0);
// Open video file
if (avformat_open_input(&inputCtx_, args_["input"].c_str(),
file_iformat, &options) != 0)
{
ERROR("%s:Could not open input file \"%s\"", __PRETTY_FUNCTION__,
args_["input"].c_str());
} else if (args_["input"].substr(0, strlen("/dev/video")) == "/dev/video") {
// it's a v4l device if starting with /dev/video
// FIXME: This is not the most robust way of checking if we mean to use a
// v4l device
DEBUG("Using v4l2 format");
file_iformat = av_find_input_format("video4linux2");
if (!file_iformat) {
ERROR("%s:Could not find format!", __PRETTY_FUNCTION__);
ost::Thread::exit();
}
}
// retrieve stream information
AVDictionary *options = NULL;
if (!args_["framerate"].empty())
av_dict_set(&options, "framerate", args_["framerate"].c_str(), 0);
if (!args_["video_size"].empty())
av_dict_set(&options, "video_size", args_["video_size"].c_str(), 0);
if (!args_["channel"].empty())
av_dict_set(&options, "channel", args_["channel"].c_str(), 0);
// Open video file
if (avformat_open_input(&inputCtx_, args_["input"].c_str(), file_iformat,
&options) != 0) {
ERROR("%s:Could not open input file \"%s\"", __PRETTY_FUNCTION__,
args_["input"].c_str());
ost::Thread::exit();
}
// retrieve stream information
#if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(53, 8, 0)
if (av_find_stream_info(inputCtx_) < 0)
if (av_find_stream_info(inputCtx_) < 0)
#else
if (avformat_find_stream_info(inputCtx_, NULL) < 0)
if (avformat_find_stream_info(inputCtx_, NULL) < 0)
#endif
{
ERROR("%s:Could not find stream info!", __PRETTY_FUNCTION__);
ost::Thread::exit();
}
{
ERROR("%s:Could not find stream info!", __PRETTY_FUNCTION__);
ost::Thread::exit();
}
// find the first video stream from the input
for (unsigned i = 0; i < inputCtx_->nb_streams; i++)
{
if (inputCtx_->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
{
videoStreamIndex_ = i;
break;
}
}
if (videoStreamIndex_ == -1)
{
ERROR("%s:Could not find video stream!", __PRETTY_FUNCTION__);
ost::Thread::exit();
// find the first video stream from the input
for (unsigned i = 0; i < inputCtx_->nb_streams; i++) {
if (inputCtx_->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
videoStreamIndex_ = i;
break;
}
}
// Get a pointer to the codec context for the video stream
decoderCtx_ = inputCtx_->streams[videoStreamIndex_]->codec;
if (videoStreamIndex_ == -1) {
ERROR("%s:Could not find video stream!", __PRETTY_FUNCTION__);
ost::Thread::exit();
}
// find the decoder for the video stream
AVCodec *inputDecoder = avcodec_find_decoder(decoderCtx_->codec_id);
if (inputDecoder == NULL)
{
ERROR("%s:Unsupported codec!", __PRETTY_FUNCTION__);
ost::Thread::exit();
}
// Get a pointer to the codec context for the video stream
decoderCtx_ = inputCtx_->streams[videoStreamIndex_]->codec;
// find the decoder for the video stream
AVCodec *inputDecoder = avcodec_find_decoder(decoderCtx_->codec_id);
if (inputDecoder == NULL) {
ERROR("%s:Unsupported codec!", __PRETTY_FUNCTION__);
ost::Thread::exit();
}
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(53, 6, 0)
if (avcodec_open(decoderCtx_, inputDecoder) < 0)
if (avcodec_open(decoderCtx_, inputDecoder) < 0)
#else
if (avcodec_open2(decoderCtx_, inputDecoder, NULL) < 0)
if (avcodec_open2(decoderCtx_, inputDecoder, NULL) < 0)
#endif
{
ERROR("%s:Could not open codec!", __PRETTY_FUNCTION__);
ost::Thread::exit();
}
{
ERROR("%s:Could not open codec!", __PRETTY_FUNCTION__);
ost::Thread::exit();
}
scaledPicture_ = avcodec_alloc_frame();
if (scaledPicture_ == 0)
{
if (scaledPicture_ == 0) {
ERROR("%s:Could not allocated output frame!", __PRETTY_FUNCTION__);
ost::Thread::exit();
}
if (dstWidth_ == 0 and dstHeight_ == 0)
{
if (dstWidth_ == 0 and dstHeight_ == 0) {
dstWidth_ = decoderCtx_->width;
dstHeight_ = decoderCtx_->height;
}
......@@ -347,8 +328,7 @@ void VideoReceiveThread::setup()
rawFrame_ = avcodec_alloc_frame();
// we're receiving RTP
if (args_["input"] == sdpFilename_)
{
if (args_["input"] == sdpFilename_) {
// publish our new video stream's existence
DEBUG("Publishing shm: %d sem: %d size: %d", shmKey_, semKey_,
videoBufferSize_);
......@@ -369,18 +349,17 @@ void VideoReceiveThread::createScalingContext()
{
// Create scaling context, no scaling done here
imgConvertCtx_ = sws_getCachedContext(imgConvertCtx_, decoderCtx_->width,
decoderCtx_->height, decoderCtx_->pix_fmt, dstWidth_,
dstHeight_, video_rgb_format, SWS_BICUBIC,
NULL, NULL, NULL);
if (imgConvertCtx_ == 0)
{
decoderCtx_->height,
decoderCtx_->pix_fmt, dstWidth_,
dstHeight_, video_rgb_format,
SWS_BICUBIC, NULL, NULL, NULL);
if (imgConvertCtx_ == 0) {
ERROR("Cannot init the conversion context!");
ost::Thread::exit();
}
}
VideoReceiveThread::VideoReceiveThread(const std::map<std::string, std::string> &args) : args_(args),
test_source_(args_["input"] == "SFLTEST"),
VideoReceiveThread::VideoReceiveThread(const map<string, string> &args) : args_(args),
frameNumber_(0),
shmBuffer_(0),
shmID_(-1),
......@@ -402,73 +381,36 @@ VideoReceiveThread::VideoReceiveThread(const std::map<std::string, std::string>
setCancel(cancelDeferred);
}
void VideoReceiveThread::runAsTestSource()
{
while (not testCancel())
{
// assign appropriate parts of buffer to image planes in scaledPicture
avpicture_fill(reinterpret_cast<AVPicture *>(scaledPicture_),
shmBuffer_, video_rgb_format, dstWidth_, dstHeight_);
const AVPixFmtDescriptor *pixdesc = &av_pix_fmt_descriptors[video_rgb_format];
int components = pixdesc->nb_components;
int planes = 0;
for (int i = 0; i < components; i++)
if (pixdesc->comp[i].plane > planes)
planes = pixdesc->comp[i].plane;
planes++;
int i = frameNumber_++;
const unsigned pitch = scaledPicture_->linesize[0];
for (int y = 0; y < dstHeight_; y++)
for (unsigned x=0; x < pitch; x++)
scaledPicture_->data[0][y * pitch + x] = x + y + i * planes;
/* signal the semaphore that a new frame is ready */
sem_signal(semSetID_);
}
}
void VideoReceiveThread::run()
{
setup();
if (test_source_)
runAsTestSource();
else
{
createScalingContext();
while (not testCancel())
{
AVPacket inpacket;
errno = av_read_frame(inputCtx_, &inpacket);
if (errno < 0)
{
ERROR("Couldn't read frame : %m\n");
break;
}
// is this a packet from the video stream?
if (inpacket.stream_index == videoStreamIndex_)
{
int frameFinished;
avcodec_decode_video2(decoderCtx_, rawFrame_, &frameFinished,
&inpacket);
if (frameFinished)
{
avpicture_fill(reinterpret_cast<AVPicture *>(scaledPicture_),
shmBuffer_, video_rgb_format, dstWidth_,
dstHeight_);
sws_scale(imgConvertCtx_, rawFrame_->data, rawFrame_->linesize,
0, decoderCtx_->height, scaledPicture_->data,
scaledPicture_->linesize);
// signal the semaphore that a new frame is ready
sem_signal(semSetID_);
}
createScalingContext();
while (not testCancel()) {
AVPacket inpacket;
errno = av_read_frame(inputCtx_, &inpacket);
if (errno < 0) {
ERROR("Couldn't read frame : %m\n");
break;
}
PacketHandle inpacket_handle(inpacket);
// is this a packet from the video stream?
if (inpacket.stream_index == videoStreamIndex_) {
int frameFinished;
avcodec_decode_video2(decoderCtx_, rawFrame_, &frameFinished, &inpacket);
if (frameFinished) {
avpicture_fill(reinterpret_cast<AVPicture *>(scaledPicture_),
shmBuffer_, video_rgb_format, dstWidth_,
dstHeight_);
sws_scale(imgConvertCtx_, rawFrame_->data, rawFrame_->linesize,
0, decoderCtx_->height, scaledPicture_->data,
scaledPicture_->linesize);
// signal the semaphore that a new frame is ready
sem_signal(semSetID_);
}
av_free_packet(&inpacket);
}
}
}
......
......@@ -48,7 +48,6 @@ class VideoReceiveThread : public ost::Thread {
private:
NON_COPYABLE(VideoReceiveThread);
std::map<std::string, std::string> args_;
bool test_source_;
unsigned frameNumber_;
/*-------------------------------------------------------------*/
......@@ -70,15 +69,13 @@ class VideoReceiveThread : public ost::Thread {
int dstWidth_;
int dstHeight_;
ost::Event shmReady_;
std::string sdpFilename_;
void setup();
void createScalingContext();
int createSemSet(int shmKey, int *semKey);
ost::Event shmReady_;
std::string sdpFilename_;
void loadSDP();
void runAsTestSource();
public:
explicit VideoReceiveThread(const std::map<std::string, std::string> &args);
......
......@@ -44,63 +44,58 @@
namespace sfl_video {
VideoRtpSession::VideoRtpSession(const std::map<std::string, std::string> &txArgs) : sendThread_(), receiveThread_(),
txArgs_(txArgs), rxArgs_(), sending_(true), receiving_(true)
using std::map;
using std::string;
VideoRtpSession::VideoRtpSession(const map<string, string> &txArgs) :
sendThread_(), receiveThread_(), txArgs_(txArgs), rxArgs_(),
sending_(true), receiving_(true)
{
txArgs_["bitrate"] = "500000";
}
VideoRtpSession::VideoRtpSession(const std::map<std::string, std::string> &txArgs,
const std::map<std::string, std::string> &rxArgs) :
VideoRtpSession::VideoRtpSession(const map<string, string> &txArgs,
const map<string, string> &rxArgs) :
sendThread_(), receiveThread_(), txArgs_(txArgs), rxArgs_(rxArgs),
sending_(true), receiving_(true)
{}
void VideoRtpSession::updateSDP(const Sdp &sdp)
{
std::vector<std::string> v(sdp.getActiveVideoDescription());
const std::string &desc = v[0];
const std::vector<string> v(sdp.getActiveVideoDescription());
const string &desc = v[0];
// if port has changed
if (desc != rxArgs_["receiving_sdp"])
{
if (desc != rxArgs_["receiving_sdp"]) {
rxArgs_["receiving_sdp"] = desc;
DEBUG("%s:Updated incoming SDP to:\n %s", __PRETTY_FUNCTION__,
rxArgs_["receiving_sdp"].c_str());
rxArgs_["receiving_sdp"].c_str());
}
if (desc.find("sendrecv") != std::string::npos)
{
if (desc.find("sendrecv") != string::npos) {
DEBUG("Sending and receiving video");
receiving_ = true;
sending_ = true;
}
else if (desc.find("inactive") != std::string::npos)
{
} else if (desc.find("inactive") != string::npos) {
DEBUG("Video is inactive");
receiving_ = false;
sending_ = false;
}
else if (desc.find("sendonly") != std::string::npos)
{
} else if (desc.find("sendonly") != string::npos) {
DEBUG("Receiving video disabled, video set to sendonly");
receiving_ = false;
sending_ = true;
}
else if (desc.find("recvonly") != std::string::npos)
{
} else if (desc.find("recvonly") != string::npos) {
DEBUG("Sending video disabled, video set to recvonly");
sending_ = false;
receiving_ = true;