Skip to content
Snippets Groups Projects
Commit d36d4d6e authored by Philippe Gorley's avatar Philippe Gorley
Browse files

media: simplify timestamp computation

Audio time bases are always the inverse of the sample rate, meaning the
increment is always the number of samples in a frame. Video usually has
a time base inverse of its framerate, but this is not always the case.

Change-Id: I50d2d84d073052f8b3a832e8b99725b9d66b12a8
parent e86600b6
No related branches found
No related tags found
No related merge requests found
......@@ -52,16 +52,6 @@ AudioInput::~AudioInput()
loop_.join();
}
// seq: frame number for video, sent samples audio
// sampleFreq: fps for video, sample rate for audio
// clock: stream time base (packetization interval times)
// FIXME duplicate code from media encoder
int64_t
getNextTimestamp(int64_t seq, rational<int64_t> sampleFreq, rational<int64_t> clock)
{
return (seq / (sampleFreq * clock)).real<int64_t>();
}
void
AudioInput::process()
{
......@@ -99,7 +89,7 @@ AudioInput::process()
auto audioFrame = resampled.toAVFrame();
auto frame = audioFrame->pointer();
auto ms = MediaStream("a:local", format_);
frame->pts = getNextTimestamp(sent_samples, ms.sampleRate, static_cast<rational<int64_t>>(ms.timeBase));
frame->pts = sent_samples;
sent_samples += frame->nb_samples;
{
......
......@@ -156,23 +156,13 @@ AudioSender::setup(SocketPair& socketPair)
return true;
}
// seq: frame number for video, sent samples audio
// sampleFreq: fps for video, sample rate for audio
// clock: stream time base (packetization interval times)
// FIXME duplicate code from media_encoder
static int64_t
getNextTimestamp(int64_t seq, rational<int64_t> sampleFreq, rational<int64_t> clock)
{
return (seq / (sampleFreq * clock)).real<int64_t>();
}
void
AudioSender::update(Observable<std::shared_ptr<ring::AudioFrame>>* /*obs*/, const std::shared_ptr<ring::AudioFrame>& framePtr)
{
auto frame = framePtr->pointer();
auto ms = MediaStream("a:local", frame->format, rational<int>(1, frame->sample_rate),
frame->sample_rate, frame->channels);
frame->pts = getNextTimestamp(sent_samples, ms.sampleRate, static_cast<rational<int64_t>>(ms.timeBase));
frame->pts = sent_samples;
ms.firstTimestamp = frame->pts;
sent_samples += frame->nb_samples;
......
......@@ -343,15 +343,6 @@ MediaEncoder::startIO()
#endif
}
// seq: frame number for video, sent samples audio
// sampleFreq: fps for video, sample rate for audio
// clock: stream time base (packetization interval times)
static int64_t
getNextTimestamp(int64_t seq, rational<int64_t> sampleFreq, rational<int64_t> clock)
{
return (seq / (sampleFreq * clock)).real<int64_t>();
}
#ifdef RING_VIDEO
int
MediaEncoder::encode(VideoFrame& input, bool is_keyframe,
......@@ -366,7 +357,11 @@ MediaEncoder::encode(VideoFrame& input, bool is_keyframe,
auto frame = scaledFrame_.pointer();
AVCodecContext* enc = encoders_[currentStreamIdx_];
frame->pts = getNextTimestamp(frame_number, enc->framerate, enc->time_base);
// ideally, time base is the inverse of framerate, but this may not always be the case
if (enc->framerate.num == enc->time_base.den && enc->framerate.den == enc->time_base.num)
frame->pts = frame_number;
else
frame->pts = (frame_number / (rational<int64_t>(enc->framerate) * rational<int64_t>(enc->time_base))).real<int64_t>();
if (is_keyframe) {
frame->pict_type = AV_PICTURE_TYPE_I;
......@@ -382,8 +377,7 @@ MediaEncoder::encode(VideoFrame& input, bool is_keyframe,
int MediaEncoder::encodeAudio(AudioFrame& frame)
{
auto enc = encoders_[currentStreamIdx_];
frame.pointer()->pts = getNextTimestamp(sent_samples, enc->sample_rate, enc->time_base);
frame.pointer()->pts = sent_samples;
sent_samples += frame.pointer()->nb_samples;
encode(frame.pointer(), currentStreamIdx_);
return 0;
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment