From 912981fd0cd17413b9dfee14f33fdde63cd063d4 Mon Sep 17 00:00:00 2001 From: "stefan@webrtc.org" Date: Fri, 12 Oct 2012 07:04:52 +0000 Subject: [PATCH] Refactoring jitter_buffer.h/.cc to Google style. BUG= Review URL: https://webrtc-codereview.appspot.com/872006 git-svn-id: http://webrtc.googlecode.com/svn/trunk@2920 4adac7df-926f-26a2-2b94-8c16560cd09d --- .../video_coding/main/source/frame_buffer.cc | 2 +- .../video_coding/main/source/frame_buffer.h | 2 +- .../video_coding/main/source/jitter_buffer.cc | 2898 ++++++++--------- .../video_coding/main/source/jitter_buffer.h | 381 +-- .../main/source/jitter_buffer_unittest.cc | 11 +- .../video_coding/main/source/receiver.cc | 28 +- .../main/test/jitter_buffer_test.cc | 86 +- 7 files changed, 1540 insertions(+), 1868 deletions(-) diff --git a/src/modules/video_coding/main/source/frame_buffer.cc b/src/modules/video_coding/main/source/frame_buffer.cc index abaadffb3a..fe30bea358 100644 --- a/src/modules/video_coding/main/source/frame_buffer.cc +++ b/src/modules/video_coding/main/source/frame_buffer.cc @@ -199,7 +199,7 @@ VCMFrameBuffer::InsertPacket(const VCMPacket& packet, WebRtc_Word64 timeInMs, } WebRtc_Word64 -VCMFrameBuffer::LatestPacketTimeMs() +VCMFrameBuffer::LatestPacketTimeMs() const { return _latestPacketTimeMs; } diff --git a/src/modules/video_coding/main/source/frame_buffer.h b/src/modules/video_coding/main/source/frame_buffer.h index ea05754125..eeacfad9c3 100644 --- a/src/modules/video_coding/main/source/frame_buffer.h +++ b/src/modules/video_coding/main/source/frame_buffer.h @@ -74,7 +74,7 @@ public: void IncrementNackCount(); WebRtc_Word16 GetNackCount() const; - WebRtc_Word64 LatestPacketTimeMs(); + WebRtc_Word64 LatestPacketTimeMs() const; webrtc::FrameType FrameType() const; void SetPreviousFrameLoss(); diff --git a/src/modules/video_coding/main/source/jitter_buffer.cc b/src/modules/video_coding/main/source/jitter_buffer.cc index 0c4dd33fa9..405c05e8d3 100644 --- a/src/modules/video_coding/main/source/jitter_buffer.cc +++ b/src/modules/video_coding/main/source/jitter_buffer.cc @@ -28,10 +28,10 @@ namespace webrtc { // Predicates used when searching for frames in the frame buffer list class FrameSmallerTimestamp { public: - FrameSmallerTimestamp(uint32_t timestamp) : timestamp_(timestamp) {} + explicit FrameSmallerTimestamp(uint32_t timestamp) : timestamp_(timestamp) {} bool operator()(VCMFrameBuffer* frame) { return (LatestTimestamp(timestamp_, frame->TimeStamp(), NULL) == - timestamp_); + timestamp_); } private: @@ -40,7 +40,7 @@ class FrameSmallerTimestamp { class FrameEqualTimestamp { public: - FrameEqualTimestamp(uint32_t timestamp) : timestamp_(timestamp) {} + explicit FrameEqualTimestamp(uint32_t timestamp) : timestamp_(timestamp) {} bool operator()(VCMFrameBuffer* frame) { return (timestamp_ == frame->TimeStamp()); } @@ -58,521 +58,1183 @@ class CompleteDecodableKeyFrameCriteria { } }; -// Constructor VCMJitterBuffer::VCMJitterBuffer(TickTimeBase* clock, - WebRtc_Word32 vcmId, - WebRtc_Word32 receiverId, - bool master) : - _vcmId(vcmId), - _receiverId(receiverId), - _clock(clock), - _running(false), - _critSect(CriticalSectionWrapper::CreateCriticalSection()), - _master(master), - _frameEvent(), - _packetEvent(), - _maxNumberOfFrames(kStartNumberOfFrames), - _frameBuffers(), - _frameList(), - _lastDecodedState(), - _packetsNotDecodable(0), - _receiveStatistics(), - _incomingFrameRate(0), - _incomingFrameCount(0), - _timeLastIncomingFrameCount(0), - _incomingBitCount(0), - _incomingBitRate(0), - _dropCount(0), - _numConsecutiveOldFrames(0), - _numConsecutiveOldPackets(0), - _discardedPackets(0), - _jitterEstimate(vcmId, receiverId), - _delayEstimate(_clock->MillisecondTimestamp()), - _rttMs(0), - _nackMode(kNoNack), - _lowRttNackThresholdMs(-1), - _highRttNackThresholdMs(-1), - _NACKSeqNum(), - _NACKSeqNumLength(0), - _waitingForKeyFrame(false), - _firstPacket(true) -{ - memset(_frameBuffers, 0, sizeof(_frameBuffers)); - memset(_receiveStatistics, 0, sizeof(_receiveStatistics)); - memset(_NACKSeqNumInternal, -1, sizeof(_NACKSeqNumInternal)); + int vcm_id, + int receiver_id, + bool master) + : vcm_id_(vcm_id), + receiver_id_(receiver_id), + clock_(clock), + running_(false), + crit_sect_(CriticalSectionWrapper::CreateCriticalSection()), + master_(master), + frame_event_(), + packet_event_(), + max_number_of_frames_(kStartNumberOfFrames), + frame_buffers_(), + frame_list_(), + last_decoded_state_(), + first_packet_(true), + num_not_decodable_packets_(0), + receive_statistics_(), + incoming_frame_rate_(0), + incoming_frame_count_(0), + time_last_incoming_frame_count_(0), + incoming_bit_count_(0), + incoming_bit_rate_(0), + drop_count_(0), + num_consecutive_old_frames_(0), + num_consecutive_old_packets_(0), + num_discarded_packets_(0), + jitter_estimate_(vcm_id, receiver_id), + inter_frame_delay_(clock_->MillisecondTimestamp()), + rtt_ms_(0), + nack_mode_(kNoNack), + low_rtt_nack_threshold_ms_(-1), + high_rtt_nack_threshold_ms_(-1), + nack_seq_nums_(), + nack_seq_nums_length_(0), + waiting_for_key_frame_(false) { + memset(frame_buffers_, 0, sizeof(frame_buffers_)); + memset(receive_statistics_, 0, sizeof(receive_statistics_)); + memset(nack_seq_nums_internal_, -1, sizeof(nack_seq_nums_internal_)); - for (int i = 0; i< kStartNumberOfFrames; i++) - { - _frameBuffers[i] = new VCMFrameBuffer(); - } + for (int i = 0; i < kStartNumberOfFrames; i++) { + frame_buffers_[i] = new VCMFrameBuffer(); + } } -// Destructor -VCMJitterBuffer::~VCMJitterBuffer() -{ - Stop(); - for (int i = 0; i< kMaxNumberOfFrames; i++) - { - if (_frameBuffers[i]) - { - delete _frameBuffers[i]; - } +VCMJitterBuffer::~VCMJitterBuffer() { + Stop(); + for (int i = 0; i < kMaxNumberOfFrames; i++) { + if (frame_buffers_[i]) { + delete frame_buffers_[i]; } - delete _critSect; + } + delete crit_sect_; } -void -VCMJitterBuffer::CopyFrom(const VCMJitterBuffer& rhs) -{ - if (this != &rhs) - { - _critSect->Enter(); - rhs._critSect->Enter(); - _vcmId = rhs._vcmId; - _receiverId = rhs._receiverId; - _running = rhs._running; - _master = !rhs._master; - _maxNumberOfFrames = rhs._maxNumberOfFrames; - _incomingFrameRate = rhs._incomingFrameRate; - _incomingFrameCount = rhs._incomingFrameCount; - _timeLastIncomingFrameCount = rhs._timeLastIncomingFrameCount; - _incomingBitCount = rhs._incomingBitCount; - _incomingBitRate = rhs._incomingBitRate; - _dropCount = rhs._dropCount; - _numConsecutiveOldFrames = rhs._numConsecutiveOldFrames; - _numConsecutiveOldPackets = rhs._numConsecutiveOldPackets; - _discardedPackets = rhs._discardedPackets; - _jitterEstimate = rhs._jitterEstimate; - _delayEstimate = rhs._delayEstimate; - _waitingForCompletion = rhs._waitingForCompletion; - _rttMs = rhs._rttMs; - _NACKSeqNumLength = rhs._NACKSeqNumLength; - _waitingForKeyFrame = rhs._waitingForKeyFrame; - _firstPacket = rhs._firstPacket; - _lastDecodedState = rhs._lastDecodedState; - _packetsNotDecodable = rhs._packetsNotDecodable; - memcpy(_receiveStatistics, rhs._receiveStatistics, - sizeof(_receiveStatistics)); - memcpy(_NACKSeqNumInternal, rhs._NACKSeqNumInternal, - sizeof(_NACKSeqNumInternal)); - memcpy(_NACKSeqNum, rhs._NACKSeqNum, sizeof(_NACKSeqNum)); - for (int i = 0; i < kMaxNumberOfFrames; i++) - { - if (_frameBuffers[i] != NULL) - { - delete _frameBuffers[i]; - _frameBuffers[i] = NULL; - } - } - _frameList.clear(); - for (int i = 0; i < _maxNumberOfFrames; i++) - { - _frameBuffers[i] = new VCMFrameBuffer(*(rhs._frameBuffers[i])); - if (_frameBuffers[i]->Length() > 0) - { - FrameList::reverse_iterator rit = std::find_if( - _frameList.rbegin(), _frameList.rend(), - FrameSmallerTimestamp(_frameBuffers[i]->TimeStamp())); - _frameList.insert(rit.base(), _frameBuffers[i]); - } - } - rhs._critSect->Leave(); - _critSect->Leave(); +void VCMJitterBuffer::CopyFrom(const VCMJitterBuffer& rhs) { + if (this != &rhs) { + crit_sect_->Enter(); + rhs.crit_sect_->Enter(); + vcm_id_ = rhs.vcm_id_; + receiver_id_ = rhs.receiver_id_; + running_ = rhs.running_; + master_ = !rhs.master_; + max_number_of_frames_ = rhs.max_number_of_frames_; + incoming_frame_rate_ = rhs.incoming_frame_rate_; + incoming_frame_count_ = rhs.incoming_frame_count_; + time_last_incoming_frame_count_ = rhs.time_last_incoming_frame_count_; + incoming_bit_count_ = rhs.incoming_bit_count_; + incoming_bit_rate_ = rhs.incoming_bit_rate_; + drop_count_ = rhs.drop_count_; + num_consecutive_old_frames_ = rhs.num_consecutive_old_frames_; + num_consecutive_old_packets_ = rhs.num_consecutive_old_packets_; + num_discarded_packets_ = rhs.num_discarded_packets_; + jitter_estimate_ = rhs.jitter_estimate_; + inter_frame_delay_ = rhs.inter_frame_delay_; + waiting_for_completion_ = rhs.waiting_for_completion_; + rtt_ms_ = rhs.rtt_ms_; + nack_seq_nums_length_ = rhs.nack_seq_nums_length_; + waiting_for_key_frame_ = rhs.waiting_for_key_frame_; + first_packet_ = rhs.first_packet_; + last_decoded_state_ = rhs.last_decoded_state_; + num_not_decodable_packets_ = rhs.num_not_decodable_packets_; + memcpy(receive_statistics_, rhs.receive_statistics_, + sizeof(receive_statistics_)); + memcpy(nack_seq_nums_internal_, rhs.nack_seq_nums_internal_, + sizeof(nack_seq_nums_internal_)); + memcpy(nack_seq_nums_, rhs.nack_seq_nums_, sizeof(nack_seq_nums_)); + for (int i = 0; i < kMaxNumberOfFrames; i++) { + if (frame_buffers_[i] != NULL) { + delete frame_buffers_[i]; + frame_buffers_[i] = NULL; + } } + frame_list_.clear(); + for (int i = 0; i < max_number_of_frames_; i++) { + frame_buffers_[i] = new VCMFrameBuffer(*(rhs.frame_buffers_[i])); + if (frame_buffers_[i]->Length() > 0) { + FrameList::reverse_iterator rit = std::find_if( + frame_list_.rbegin(), frame_list_.rend(), + FrameSmallerTimestamp(frame_buffers_[i]->TimeStamp())); + frame_list_.insert(rit.base(), frame_buffers_[i]); + } + } + rhs.crit_sect_->Leave(); + crit_sect_->Leave(); + } } -// Start jitter buffer -void -VCMJitterBuffer::Start() -{ - CriticalSectionScoped cs(_critSect); - _running = true; - _incomingFrameCount = 0; - _incomingFrameRate = 0; - _incomingBitCount = 0; - _incomingBitRate = 0; - _timeLastIncomingFrameCount = _clock->MillisecondTimestamp(); - memset(_receiveStatistics, 0, sizeof(_receiveStatistics)); +void VCMJitterBuffer::Start() { + CriticalSectionScoped cs(crit_sect_); + running_ = true; + incoming_frame_count_ = 0; + incoming_frame_rate_ = 0; + incoming_bit_count_ = 0; + incoming_bit_rate_ = 0; + time_last_incoming_frame_count_ = clock_->MillisecondTimestamp(); + memset(receive_statistics_, 0, sizeof(receive_statistics_)); - _numConsecutiveOldFrames = 0; - _numConsecutiveOldPackets = 0; - _discardedPackets = 0; + num_consecutive_old_frames_ = 0; + num_consecutive_old_packets_ = 0; + num_discarded_packets_ = 0; - _frameEvent.Reset(); // start in a non-signaled state - _packetEvent.Reset(); // start in a non-signaled state - _waitingForCompletion.frameSize = 0; - _waitingForCompletion.timestamp = 0; - _waitingForCompletion.latestPacketTime = -1; - _firstPacket = true; - _NACKSeqNumLength = 0; - _waitingForKeyFrame = false; - _rttMs = 0; - _packetsNotDecodable = 0; + // Start in a non-signaled state. + frame_event_.Reset(); + packet_event_.Reset(); + waiting_for_completion_.frame_size = 0; + waiting_for_completion_.timestamp = 0; + waiting_for_completion_.latest_packet_time = -1; + first_packet_ = true; + nack_seq_nums_length_ = 0; + waiting_for_key_frame_ = false; + rtt_ms_ = 0; + num_not_decodable_packets_ = 0; - WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, VCMId(_vcmId, - _receiverId), "JB(0x%x): Jitter buffer: start", this); + WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, + VCMId(vcm_id_, receiver_id_), "JB(0x%x): Jitter buffer: start", + this); } - -// Stop jitter buffer -void -VCMJitterBuffer::Stop() -{ - _critSect->Enter(); - _running = false; - _lastDecodedState.Reset(); - _frameList.clear(); - for (int i = 0; i < kMaxNumberOfFrames; i++) - { - if (_frameBuffers[i] != NULL) - { - static_cast(_frameBuffers[i])->SetState(kStateFree); - } +void VCMJitterBuffer::Stop() { + crit_sect_->Enter(); + running_ = false; + last_decoded_state_.Reset(); + frame_list_.clear(); + for (int i = 0; i < kMaxNumberOfFrames; i++) { + if (frame_buffers_[i] != NULL) { + static_cast(frame_buffers_[i])->SetState(kStateFree); } + } - _critSect->Leave(); - _frameEvent.Set(); // Make sure we exit from trying to get a frame to decoder - _packetEvent.Set(); // Make sure we exit from trying to get a sequence number - WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, VCMId(_vcmId, - _receiverId), "JB(0x%x): Jitter buffer: stop", this); + crit_sect_->Leave(); + // Make sure we wake up any threads waiting on these events. + frame_event_.Set(); + packet_event_.Set(); + WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, + VCMId(vcm_id_, receiver_id_), "JB(0x%x): Jitter buffer: stop", + this); } -bool -VCMJitterBuffer::Running() const -{ - CriticalSectionScoped cs(_critSect); - return _running; +bool VCMJitterBuffer::Running() const { + CriticalSectionScoped cs(crit_sect_); + return running_; } -// Flush jitter buffer -void -VCMJitterBuffer::Flush() -{ - CriticalSectionScoped cs(_critSect); - FlushInternal(); -} - -// Must be called under the critical section _critSect -void -VCMJitterBuffer::FlushInternal() -{ - // Erase all frames from the sorted list and set their state to free. - _frameList.clear(); - for (WebRtc_Word32 i = 0; i < _maxNumberOfFrames; i++) - { - ReleaseFrameInternal(_frameBuffers[i]); - } - _lastDecodedState.Reset(); // TODO (mikhal): sync reset - _packetsNotDecodable = 0; - - _frameEvent.Reset(); - _packetEvent.Reset(); - - _numConsecutiveOldFrames = 0; - _numConsecutiveOldPackets = 0; - - // Also reset the jitter and delay estimates - _jitterEstimate.Reset(); - _delayEstimate.Reset(_clock->MillisecondTimestamp()); - - _waitingForCompletion.frameSize = 0; - _waitingForCompletion.timestamp = 0; - _waitingForCompletion.latestPacketTime = -1; - - _firstPacket = true; - - _NACKSeqNumLength = 0; - - WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, VCMId(_vcmId, - _receiverId), "JB(0x%x): Jitter buffer: flush", this); -} - -// Set the frame state to free and remove it from the sorted -// frame list. Must be called from inside the critical section _critSect. -void -VCMJitterBuffer::ReleaseFrameInternal(VCMFrameBuffer* frame) -{ - if (frame != NULL && frame->GetState() != kStateDecoding) - { - frame->SetState(kStateFree); - } -} - -// Update frame state (set as complete if conditions are met) -// Doing it here increases the degree of freedom for e.g. future -// reconstructability of separate layers. Must be called under the -// critical section _critSect. -VCMFrameBufferEnum -VCMJitterBuffer::UpdateFrameState(VCMFrameBuffer* frame) -{ - if (frame == NULL) - { - WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceVideoCoding, - VCMId(_vcmId, _receiverId), "JB(0x%x) FB(0x%x): " - "UpdateFrameState NULL frame pointer", this, frame); - return kNoError; - } - - int length = frame->Length(); - if (_master) - { - // Only trace the primary jitter buffer to make it possible to parse - // and plot the trace file. - WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, - VCMId(_vcmId, _receiverId), - "JB(0x%x) FB(0x%x): Complete frame added to jitter buffer," - " size:%d type %d", - this, frame,length,frame->FrameType()); - } - - if (length != 0 && !frame->GetCountedFrame()) - { - // ignore Ack frames - _incomingFrameCount++; - frame->SetCountedFrame(true); - } - - // Check if we should drop frame - // an old complete frame can arrive too late - if (_lastDecodedState.IsOldFrame(frame)) - { - // Frame is older than the latest decoded frame, drop it. Will be - // released by CleanUpOldFrames later. - frame->Reset(); - frame->SetState(kStateEmpty); - - WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, - VCMId(_vcmId, _receiverId), - "JB(0x%x) FB(0x%x): Dropping old frame in Jitter buffer", - this, frame); - _dropCount++; - WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceVideoCoding, - VCMId(_vcmId, _receiverId), - "Jitter buffer drop count: %d, consecutive drops: %u", - _dropCount, _numConsecutiveOldFrames); - // Flush() if this happens consistently. - _numConsecutiveOldFrames++; - if (_numConsecutiveOldFrames > kMaxConsecutiveOldFrames) { - FlushInternal(); - return kFlushIndicator; - } - return kNoError; - } - _numConsecutiveOldFrames = 0; - frame->SetState(kStateComplete); - - - // Update receive statistics. We count all layers, thus when you use layers - // adding all key and delta frames might differ from frame count - if (frame->IsSessionComplete()) - { - switch (frame->FrameType()) - { - case kVideoFrameKey: - { - _receiveStatistics[0]++; - break; - } - case kVideoFrameDelta: - { - _receiveStatistics[1]++; - break; - } - case kVideoFrameGolden: - { - _receiveStatistics[2]++; - break; - } - case kVideoFrameAltRef: - { - _receiveStatistics[3]++; - break; - } - default: - assert(false); - - } - } - const FrameList::iterator it = FindOldestCompleteContinuousFrame(false); - VCMFrameBuffer* oldFrame = NULL; - if (it != _frameList.end()) - { - oldFrame = *it; - } - - // Only signal if this is the oldest frame. - // Not necessary the case due to packet reordering or NACK. - if (!WaitForNack() || (oldFrame != NULL && oldFrame == frame)) - { - _frameEvent.Set(); - } - return kNoError; +void VCMJitterBuffer::Flush() { + CriticalSectionScoped cs(crit_sect_); + // Erase all frames from the sorted list and set their state to free. + frame_list_.clear(); + for (int i = 0; i < max_number_of_frames_; i++) { + ReleaseFrameIfNotDecoding(frame_buffers_[i]); + } + last_decoded_state_.Reset(); // TODO(mikhal): sync reset. + num_not_decodable_packets_ = 0; + frame_event_.Reset(); + packet_event_.Reset(); + num_consecutive_old_frames_ = 0; + num_consecutive_old_packets_ = 0; + // Also reset the jitter and delay estimates + jitter_estimate_.Reset(); + inter_frame_delay_.Reset(clock_->MillisecondTimestamp()); + waiting_for_completion_.frame_size = 0; + waiting_for_completion_.timestamp = 0; + waiting_for_completion_.latest_packet_time = -1; + first_packet_ = true; + nack_seq_nums_length_ = 0; + WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, + VCMId(vcm_id_, receiver_id_), "JB(0x%x): Jitter buffer: flush", + this); } // Get received key and delta frames -WebRtc_Word32 -VCMJitterBuffer::GetFrameStatistics(WebRtc_UWord32& receivedDeltaFrames, - WebRtc_UWord32& receivedKeyFrames) const -{ - { - CriticalSectionScoped cs(_critSect); - receivedDeltaFrames = _receiveStatistics[1] + _receiveStatistics[3]; - receivedKeyFrames = _receiveStatistics[0] + _receiveStatistics[2]; +void VCMJitterBuffer::FrameStatistics(uint32_t* received_delta_frames, + uint32_t* received_key_frames) const { + assert(received_delta_frames); + assert(received_key_frames); + CriticalSectionScoped cs(crit_sect_); + *received_delta_frames = receive_statistics_[1] + receive_statistics_[3]; + *received_key_frames = receive_statistics_[0] + receive_statistics_[2]; +} + +int VCMJitterBuffer::num_not_decodable_packets() const { + CriticalSectionScoped cs(crit_sect_); + return num_not_decodable_packets_; +} + +int VCMJitterBuffer::num_discarded_packets() const { + CriticalSectionScoped cs(crit_sect_); + return num_discarded_packets_; +} + +// Calculate framerate and bitrate. +void VCMJitterBuffer::IncomingRateStatistics(unsigned int* framerate, + unsigned int* bitrate) { + assert(framerate); + assert(bitrate); + CriticalSectionScoped cs(crit_sect_); + const int64_t now = clock_->MillisecondTimestamp(); + int64_t diff = now - time_last_incoming_frame_count_; + if (diff < 1000 && incoming_frame_rate_ > 0 && incoming_bit_rate_ > 0) { + // Make sure we report something even though less than + // 1 second has passed since last update. + *framerate = incoming_frame_rate_; + *bitrate = incoming_bit_rate_; + } else if (incoming_frame_count_ != 0) { + // We have received frame(s) since last call to this function + + // Prepare calculations + if (diff <= 0) { + diff = 1; } - return 0; + // we add 0.5f for rounding + float rate = 0.5f + ((incoming_frame_count_ * 1000.0f) / diff); + if (rate < 1.0f) { + rate = 1.0f; + } + + // Calculate frame rate + // Let r be rate. + // r(0) = 1000*framecount/delta_time. + // (I.e. frames per second since last calculation.) + // frame_rate = r(0)/2 + r(-1)/2 + // (I.e. fr/s average this and the previous calculation.) + *framerate = (incoming_frame_rate_ + static_cast(rate)) / 2; + incoming_frame_rate_ = static_cast(rate); + + // Calculate bit rate + if (incoming_bit_count_ == 0) { + *bitrate = 0; + } else { + *bitrate = 10 * ((100 * incoming_bit_count_) / + static_cast(diff)); + } + incoming_bit_rate_ = *bitrate; + + // Reset count + incoming_frame_count_ = 0; + incoming_bit_count_ = 0; + time_last_incoming_frame_count_ = now; + + } else { + // No frames since last call + time_last_incoming_frame_count_ = clock_->MillisecondTimestamp(); + *framerate = 0; + bitrate = 0; + incoming_bit_rate_ = 0; + } } -WebRtc_UWord32 VCMJitterBuffer::NumNotDecodablePackets() const { - CriticalSectionScoped cs(_critSect); - return _packetsNotDecodable; +// Wait for the first packet in the next frame to arrive. +int64_t VCMJitterBuffer::NextTimestamp(uint32_t max_wait_time_ms, + FrameType* incoming_frame_type, + int64_t* render_time_ms) { + assert(incoming_frame_type); + assert(render_time_ms); + if (!running_) { + return -1; + } + + crit_sect_->Enter(); + + // Finding oldest frame ready for decoder, check sequence number and size. + CleanUpOldFrames(); + + FrameList::iterator it = frame_list_.begin(); + + if (it == frame_list_.end()) { + packet_event_.Reset(); + crit_sect_->Leave(); + + if (packet_event_.Wait(max_wait_time_ms) == kEventSignaled) { + // are we closing down the Jitter buffer + if (!running_) { + return -1; + } + crit_sect_->Enter(); + + CleanUpOldFrames(); + it = frame_list_.begin(); + } else { + crit_sect_->Enter(); + } + } + + if (it == frame_list_.end()) { + crit_sect_->Leave(); + return -1; + } + // We have a frame. + *incoming_frame_type = (*it)->FrameType(); + *render_time_ms = (*it)->RenderTimeMs(); + const uint32_t timestamp = (*it)->TimeStamp(); + crit_sect_->Leave(); + + return timestamp; } -WebRtc_UWord32 VCMJitterBuffer::DiscardedPackets() const { - CriticalSectionScoped cs(_critSect); - return _discardedPackets; +// Answers the question: +// Will the packet sequence be complete if the next frame is grabbed for +// decoding right now? That is, have we lost a frame between the last decoded +// frame and the next, or is the next +// frame missing one or more packets? +bool VCMJitterBuffer::CompleteSequenceWithNextFrame() { + CriticalSectionScoped cs(crit_sect_); + // Finding oldest frame ready for decoder, check sequence number and size + CleanUpOldFrames(); + + if (frame_list_.empty()) + return true; + + VCMFrameBuffer* oldest_frame = frame_list_.front(); + if (frame_list_.size() <= 1 && + oldest_frame->GetState() != kStateComplete) { + // Frame not ready to be decoded. + return true; + } + if (!oldest_frame->Complete()) { + return false; + } + + // See if we have lost a frame before this one. + if (last_decoded_state_.init()) { + // Following start, reset or flush -> check for key frame. + if (oldest_frame->FrameType() != kVideoFrameKey) { + return false; + } + } else if (oldest_frame->GetLowSeqNum() == -1) { + return false; + } else if (!last_decoded_state_.ContinuousFrame(oldest_frame)) { + return false; + } + return true; +} + +// Returns immediately or a |max_wait_time_ms| ms event hang waiting for a +// complete frame, |max_wait_time_ms| decided by caller. +VCMEncodedFrame* VCMJitterBuffer::GetCompleteFrameForDecoding( + uint32_t max_wait_time_ms) { + if (!running_) { + return NULL; + } + + crit_sect_->Enter(); + + CleanUpOldFrames(); + + if (last_decoded_state_.init() && WaitForRetransmissions()) { + waiting_for_key_frame_ = true; + } + + FrameList::iterator it = FindOldestCompleteContinuousFrame(false); + if (it == frame_list_.end()) { + if (max_wait_time_ms == 0) { + crit_sect_->Leave(); + return NULL; + } + const int64_t end_wait_time_ms = clock_->MillisecondTimestamp() + + max_wait_time_ms; + int64_t wait_time_ms = max_wait_time_ms; + while (wait_time_ms > 0) { + crit_sect_->Leave(); + const EventTypeWrapper ret = + frame_event_.Wait(static_cast(wait_time_ms)); + crit_sect_->Enter(); + if (ret == kEventSignaled) { + // are we closing down the Jitter buffer + if (!running_) { + crit_sect_->Leave(); + return NULL; + } + + // Finding oldest frame ready for decoder, but check + // sequence number and size + CleanUpOldFrames(); + it = FindOldestCompleteContinuousFrame(false); + if (it == frame_list_.end()) { + wait_time_ms = end_wait_time_ms - + clock_->MillisecondTimestamp(); + } else { + break; + } + } else { + crit_sect_->Leave(); + return NULL; + } + } + // Inside |crit_sect_|. + } else { + // We already have a frame reset the event. + frame_event_.Reset(); + } + + if (it == frame_list_.end()) { + // Even after signaling we're still missing a complete continuous frame. + crit_sect_->Leave(); + return NULL; + } + + VCMFrameBuffer* oldest_frame = *it; + it = frame_list_.erase(it); + + // Update jitter estimate. + const bool retransmitted = (oldest_frame->GetNackCount() > 0); + if (retransmitted) { + jitter_estimate_.FrameNacked(); + } else if (oldest_frame->Length() > 0) { + // Ignore retransmitted and empty frames. + UpdateJitterEstimate(*oldest_frame, false); + } + + oldest_frame->SetState(kStateDecoding); + + CleanUpOldFrames(); + + if (oldest_frame->FrameType() == kVideoFrameKey) { + waiting_for_key_frame_ = false; + } + + // We have a frame - update decoded state with frame info. + last_decoded_state_.SetState(oldest_frame); + + crit_sect_->Leave(); + + return oldest_frame; +} + +VCMEncodedFrame* VCMJitterBuffer::GetFrameForDecoding() { + CriticalSectionScoped cs(crit_sect_); + if (!running_) { + return NULL; + } + + if (WaitForRetransmissions()) { + return GetFrameForDecodingNACK(); + } + + CleanUpOldFrames(); + + if (frame_list_.empty()) { + return NULL; + } + + VCMFrameBuffer* oldest_frame = frame_list_.front(); + if (frame_list_.size() <= 1 && + oldest_frame->GetState() != kStateComplete) { + return NULL; + } + + // Incomplete frame pulled out from jitter buffer, + // update the jitter estimate with what we currently know. + // This frame shouldn't have been retransmitted, but if we recently + // turned off NACK this might still happen. + const bool retransmitted = (oldest_frame->GetNackCount() > 0); + if (retransmitted) { + jitter_estimate_.FrameNacked(); + } else if (oldest_frame->Length() > 0) { + // Ignore retransmitted and empty frames. + // Update with the previous incomplete frame first + if (waiting_for_completion_.latest_packet_time >= 0) { + UpdateJitterEstimate(waiting_for_completion_, true); + } + // Then wait for this one to get complete + waiting_for_completion_.frame_size = oldest_frame->Length(); + waiting_for_completion_.latest_packet_time = + oldest_frame->LatestPacketTimeMs(); + waiting_for_completion_.timestamp = oldest_frame->TimeStamp(); + } + frame_list_.erase(frame_list_.begin()); + + // Look for previous frame loss + VerifyAndSetPreviousFrameLost(oldest_frame); + + // The state must be changed to decoding before cleaning up zero sized + // frames to avoid empty frames being cleaned up and then given to the + // decoder. + // Set as decoding. Propagates the missing_frame bit. + oldest_frame->SetState(kStateDecoding); + + CleanUpOldFrames(); + + if (oldest_frame->FrameType() == kVideoFrameKey) { + waiting_for_key_frame_ = false; + } + + num_not_decodable_packets_ += oldest_frame->NotDecodablePackets(); + + // We have a frame - update decoded state with frame info. + last_decoded_state_.SetState(oldest_frame); + + return oldest_frame; +} + +// Release frame when done with decoding. Should never be used to release +// frames from within the jitter buffer. +void VCMJitterBuffer::ReleaseFrame(VCMEncodedFrame* frame) { + CriticalSectionScoped cs(crit_sect_); + VCMFrameBuffer* frame_buffer = static_cast(frame); + if (frame_buffer) + frame_buffer->SetState(kStateFree); } // Gets frame to use for this timestamp. If no match, get empty frame. -WebRtc_Word32 -VCMJitterBuffer::GetFrame(const VCMPacket& packet, VCMEncodedFrame*& frame) -{ - if (!_running) // don't accept incoming packets until we are started - { - return VCM_UNINITIALIZED; +int VCMJitterBuffer::GetFrame(const VCMPacket& packet, + VCMEncodedFrame*& frame) { + if (!running_) { // Don't accept incoming packets until we are started. + return VCM_UNINITIALIZED; + } + + crit_sect_->Enter(); + // Does this packet belong to an old frame? + if (last_decoded_state_.IsOldPacket(&packet)) { + // Account only for media packets. + if (packet.sizeBytes > 0) { + num_discarded_packets_++; + num_consecutive_old_packets_++; } + // Update last decoded sequence number if the packet arrived late and + // belongs to a frame with a timestamp equal to the last decoded + // timestamp. + last_decoded_state_.UpdateOldPacket(&packet); - _critSect->Enter(); - // Does this packet belong to an old frame? - if (_lastDecodedState.IsOldPacket(&packet)) - { - // Account only for media packets - if (packet.sizeBytes > 0) - { - _discardedPackets++; - _numConsecutiveOldPackets++; - } - // Update last decoded sequence number if the packet arrived late and - // belongs to a frame with a timestamp equal to the last decoded - // timestamp. - _lastDecodedState.UpdateOldPacket(&packet); - - if (_numConsecutiveOldPackets > kMaxConsecutiveOldPackets) - { - FlushInternal(); - _critSect->Leave(); - return VCM_FLUSH_INDICATOR; - } - _critSect->Leave(); - return VCM_OLD_PACKET_ERROR; + if (num_consecutive_old_packets_ > kMaxConsecutiveOldPackets) { + Flush(); + crit_sect_->Leave(); + return VCM_FLUSH_INDICATOR; } - _numConsecutiveOldPackets = 0; + crit_sect_->Leave(); + return VCM_OLD_PACKET_ERROR; + } + num_consecutive_old_packets_ = 0; - FrameList::iterator it = std::find_if( - _frameList.begin(), - _frameList.end(), - FrameEqualTimestamp(packet.timestamp)); + FrameList::iterator it = std::find_if( + frame_list_.begin(), + frame_list_.end(), + FrameEqualTimestamp(packet.timestamp)); - if (it != _frameList.end()) { - frame = *it; - _critSect->Leave(); - return VCM_OK; - } + if (it != frame_list_.end()) { + frame = *it; + crit_sect_->Leave(); + return VCM_OK; + } - _critSect->Leave(); + crit_sect_->Leave(); - // No match, return empty frame - frame = GetEmptyFrame(); - if (frame != NULL) - { - return VCM_OK; - } - // No free frame! Try to reclaim some... - _critSect->Enter(); - RecycleFramesUntilKeyFrame(); - _critSect->Leave(); + // No match, return empty frame. + frame = GetEmptyFrame(); + if (frame != NULL) { + return VCM_OK; + } + // No free frame! Try to reclaim some... + crit_sect_->Enter(); + RecycleFramesUntilKeyFrame(); + crit_sect_->Leave(); - frame = GetEmptyFrame(); - if (frame != NULL) - { - return VCM_OK; - } - return VCM_JITTER_BUFFER_ERROR; + frame = GetEmptyFrame(); + if (frame != NULL) { + return VCM_OK; + } + return VCM_JITTER_BUFFER_ERROR; } // Deprecated! Kept for testing purposes. -VCMEncodedFrame* -VCMJitterBuffer::GetFrame(const VCMPacket& packet) -{ - VCMEncodedFrame* frame = NULL; - if (GetFrame(packet, frame) < 0) - { - return NULL; - } - return frame; -} - -// Get empty frame, creates new (i.e. increases JB size) if necessary -VCMFrameBuffer* -VCMJitterBuffer::GetEmptyFrame() -{ - if (!_running) // don't accept incoming packets until we are started - { - return NULL; - } - - _critSect->Enter(); - - for (int i = 0; i <_maxNumberOfFrames; ++i) - { - if (kStateFree == _frameBuffers[i]->GetState()) - { - // found a free buffer - _frameBuffers[i]->SetState(kStateEmpty); - _critSect->Leave(); - return _frameBuffers[i]; - } - } - - // Check if we can increase JB size - if (_maxNumberOfFrames < kMaxNumberOfFrames) - { - VCMFrameBuffer* ptrNewBuffer = new VCMFrameBuffer(); - ptrNewBuffer->SetState(kStateEmpty); - _frameBuffers[_maxNumberOfFrames] = ptrNewBuffer; - _maxNumberOfFrames++; - - _critSect->Leave(); - WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, - VCMId(_vcmId, _receiverId), "JB(0x%x) FB(0x%x): Jitter buffer " - "increased to:%d frames", this, ptrNewBuffer, _maxNumberOfFrames); - return ptrNewBuffer; - } - _critSect->Leave(); - - // We have reached max size, cannot increase JB size +VCMEncodedFrame* VCMJitterBuffer::GetFrame(const VCMPacket& packet) { + VCMEncodedFrame* frame = NULL; + if (GetFrame(packet, frame) < 0) { return NULL; + } + return frame; } +int64_t VCMJitterBuffer::LastPacketTime(VCMEncodedFrame* frame, + bool* retransmitted) const { + assert(retransmitted); + CriticalSectionScoped cs(crit_sect_); + *retransmitted = (static_cast(frame)->GetNackCount() > 0); + return static_cast(frame)->LatestPacketTimeMs(); +} + +VCMFrameBufferEnum VCMJitterBuffer::InsertPacket(VCMEncodedFrame* encoded_frame, + const VCMPacket& packet) { + assert(encoded_frame); + CriticalSectionScoped cs(crit_sect_); + int64_t now_ms = clock_->MillisecondTimestamp(); + VCMFrameBufferEnum buffer_return = kSizeError; + VCMFrameBufferEnum ret = kSizeError; + VCMFrameBuffer* frame = static_cast(encoded_frame); + + // We are keeping track of the first seq num, the latest seq num and + // the number of wraps to be able to calculate how many packets we expect. + if (first_packet_) { + // Now it's time to start estimating jitter + // reset the delay estimate. + inter_frame_delay_.Reset(clock_->MillisecondTimestamp()); + first_packet_ = false; + } + + // Empty packets may bias the jitter estimate (lacking size component), + // therefore don't let empty packet trigger the following updates: + if (packet.frameType != kFrameEmpty) { + if (waiting_for_completion_.timestamp == packet.timestamp) { + // This can get bad if we have a lot of duplicate packets, + // we will then count some packet multiple times. + waiting_for_completion_.frame_size += packet.sizeBytes; + waiting_for_completion_.latest_packet_time = now_ms; + } else if (waiting_for_completion_.latest_packet_time >= 0 && + waiting_for_completion_.latest_packet_time + 2000 <= now_ms) { + // A packet should never be more than two seconds late + UpdateJitterEstimate(waiting_for_completion_, true); + waiting_for_completion_.latest_packet_time = -1; + waiting_for_completion_.frame_size = 0; + waiting_for_completion_.timestamp = 0; + } + } + + VCMFrameBufferStateEnum state = frame->GetState(); + last_decoded_state_.UpdateOldPacket(&packet); + // Insert packet + // Check for first packet + // High sequence number will be -1 if neither an empty packet nor + // a media packet has been inserted. + bool first = (frame->GetHighSeqNum() == -1); + // When in Hybrid mode, we allow for a decodable state + // Note: Under current version, a decodable frame will never be + // triggered, as the body of the function is empty. + // TODO(mikhal): Update when decodable is enabled. + buffer_return = frame->InsertPacket(packet, now_ms, + nack_mode_ == kNackHybrid, + rtt_ms_); + ret = buffer_return; + if (buffer_return > 0) { + incoming_bit_count_ += packet.sizeBytes << 3; + + // Has this packet been nacked or is it about to be nacked? + if (IsPacketRetransmitted(packet)) { + frame->IncrementNackCount(); + } + + // Insert each frame once on the arrival of the first packet + // belonging to that frame (media or empty). + if (state == kStateEmpty && first) { + ret = kFirstPacket; + FrameList::reverse_iterator rit = std::find_if( + frame_list_.rbegin(), + frame_list_.rend(), + FrameSmallerTimestamp(frame->TimeStamp())); + frame_list_.insert(rit.base(), frame); + } + } + switch (buffer_return) { + case kStateError: + case kTimeStampError: + case kSizeError: { + if (frame != NULL) { + // Will be released when it gets old. + frame->Reset(); + frame->SetState(kStateEmpty); + } + break; + } + case kCompleteSession: { + // Only update return value for a JB flush indicator. + if (UpdateFrameState(frame) == kFlushIndicator) + ret = kFlushIndicator; + // Signal that we have a received packet. + packet_event_.Set(); + break; + } + case kDecodableSession: + case kIncomplete: { + // Signal that we have a received packet. + packet_event_.Set(); + break; + } + case kNoError: + case kDuplicatePacket: { + break; + } + default: { + assert(false && "JitterBuffer::InsertPacket: Undefined value"); + } + } + return ret; +} + +uint32_t VCMJitterBuffer::EstimatedJitterMs() { + CriticalSectionScoped cs(crit_sect_); + uint32_t estimate = VCMJitterEstimator::OPERATING_SYSTEM_JITTER; + + // Compute RTT multiplier for estimation + // low_rtt_nackThresholdMs_ == -1 means no FEC. + double rtt_mult = 1.0f; + if (nack_mode_ == kNackHybrid && (low_rtt_nack_threshold_ms_ >= 0 && + static_cast(rtt_ms_) > low_rtt_nack_threshold_ms_)) { + // from here we count on FEC + rtt_mult = 0.0f; + } + estimate += static_cast + (jitter_estimate_.GetJitterEstimate(rtt_mult) + 0.5); + return estimate; +} + +void VCMJitterBuffer::UpdateRtt(uint32_t rtt_ms) { + CriticalSectionScoped cs(crit_sect_); + rtt_ms_ = rtt_ms; + jitter_estimate_.UpdateRtt(rtt_ms); +} + +void VCMJitterBuffer::SetNackMode(VCMNackMode mode, + int low_rtt_nack_threshold_ms, + int high_rtt_nack_threshold_ms) { + CriticalSectionScoped cs(crit_sect_); + nack_mode_ = mode; + assert(low_rtt_nack_threshold_ms >= -1 && high_rtt_nack_threshold_ms >= -1); + assert(high_rtt_nack_threshold_ms == -1 || + low_rtt_nack_threshold_ms <= high_rtt_nack_threshold_ms); + assert(low_rtt_nack_threshold_ms > -1 || high_rtt_nack_threshold_ms == -1); + low_rtt_nack_threshold_ms_ = low_rtt_nack_threshold_ms; + high_rtt_nack_threshold_ms_ = high_rtt_nack_threshold_ms; + if (nack_mode_ == kNoNack) { + jitter_estimate_.ResetNackCount(); + } +} + +VCMNackMode VCMJitterBuffer::nack_mode() const { + CriticalSectionScoped cs(crit_sect_); + return nack_mode_; +} + +uint16_t* VCMJitterBuffer::CreateNackList(uint16_t* nack_list_size, + bool* list_extended) { + assert(nack_list_size); + assert(list_extended); + // TODO(mikhal/stefan): Refactor to use last_decoded_state. + CriticalSectionScoped cs(crit_sect_); + int i = 0; + int32_t low_seq_num = -1; + int32_t high_seq_num = -1; + *list_extended = false; + + // Don't create a NACK list if we won't wait for the retransmitted packets. + if (!WaitForRetransmissions()) { + *nack_list_size = 0; + return NULL; + } + + // Find the lowest (last decoded) sequence number and + // the highest (highest sequence number of the newest frame) + // sequence number. The NACK list is a subset of the range + // between those two numbers. + GetLowHighSequenceNumbers(&low_seq_num, &high_seq_num); + + // Build a list of all sequence numbers we have. + if (low_seq_num == -1 || high_seq_num == -1) { + // This happens if we lose the first packet, nothing is popped. + if (high_seq_num == -1) { + // We have not received any packets yet. + *nack_list_size = 0; + } else { + // Signal that we want a key frame request to be sent. + *nack_list_size = 0xffff; + } + return NULL; + } + + int number_of_seq_num = 0; + if (low_seq_num > high_seq_num) { + if (low_seq_num - high_seq_num > 0x00ff) { + // Wrap. + number_of_seq_num = (0xffff - low_seq_num) + high_seq_num + 1; + } + } else { + number_of_seq_num = high_seq_num - low_seq_num; + } + + if (number_of_seq_num > kNackHistoryLength) { + // NACK list has grown too big, flush and try to restart. + WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceVideoCoding, + VCMId(vcm_id_, receiver_id_), + "Nack list too large, try to find a key frame and restart " + "from seq: %d. Lowest seq in jb %d", + high_seq_num, low_seq_num); + + // This NACK size will trigger a key frame request. + bool found_key_frame = false; + + while (number_of_seq_num > kNackHistoryLength) { + found_key_frame = RecycleFramesUntilKeyFrame(); + + if (!found_key_frame) { + break; + } + + // Check if we still have too many packets in the jitter buffer. + low_seq_num = -1; + high_seq_num = -1; + GetLowHighSequenceNumbers(&low_seq_num, &high_seq_num); + + if (high_seq_num == -1) { + assert(low_seq_num != -1); // This should never happen. + // We can't calculate the NACK list length. + return NULL; + } + + number_of_seq_num = 0; + if (low_seq_num > high_seq_num) { + if (low_seq_num - high_seq_num > 0x00ff) { + // wrap + number_of_seq_num = (0xffff - low_seq_num) + high_seq_num + 1; + high_seq_num = low_seq_num; + } + } else { + number_of_seq_num = high_seq_num - low_seq_num; + } + } + + if (!found_key_frame) { + // Set the last decoded sequence number to current high. + // This is to not get a large nack list again right away. + last_decoded_state_.SetSeqNum(static_cast(high_seq_num)); + // Set to trigger key frame signal. + *nack_list_size = 0xffff; + *list_extended = true; + WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, -1, + "\tNo key frame found, request one. last_decoded_seq_num_ " + "%d", last_decoded_state_.sequence_num()); + } else { + // We have cleaned up the jitter buffer and found a key frame. + WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, -1, + "\tKey frame found. last_decoded_seq_num_ %d", + last_decoded_state_.sequence_num()); + *nack_list_size = 0; + } + + return NULL; + } + + uint16_t seq_number_iterator = static_cast(low_seq_num + 1); + for (i = 0; i < number_of_seq_num; i++) { + nack_seq_nums_internal_[i] = seq_number_iterator; + seq_number_iterator++; + } + // Now we have a list of all sequence numbers that could have been sent. + // Zero out the ones we have received. + for (i = 0; i < max_number_of_frames_; i++) { + // We don't need to check if frame is decoding since low_seq_num is based + // on the last decoded sequence number. + VCMFrameBufferStateEnum state = frame_buffers_[i]->GetState(); + if ((kStateFree != state) && + (kStateEmpty != state)) { + // Reaching thus far means we are going to update the NACK list + // When in hybrid mode, we use the soft NACKing feature. + if (nack_mode_ == kNackHybrid) { + frame_buffers_[i]->BuildSoftNackList(nack_seq_nums_internal_, + number_of_seq_num, + rtt_ms_); + } else { + // Used when the frame is being processed by the decoding thread + // don't need to use that info in this loop. + frame_buffers_[i]->BuildHardNackList(nack_seq_nums_internal_, + number_of_seq_num); + } + } + } + + // Compress the list. + int empty_index = -1; + for (i = 0; i < number_of_seq_num; i++) { + if (nack_seq_nums_internal_[i] == -1 || nack_seq_nums_internal_[i] == -2) { + // This is empty. + if (empty_index == -1) { + // No empty index before, remember this position. + empty_index = i; + } + } else { + // This is not empty. + if (empty_index == -1) { + // No empty index, continue. + } else { + nack_seq_nums_internal_[empty_index] = nack_seq_nums_internal_[i]; + nack_seq_nums_internal_[i] = -1; + empty_index++; + } + } + } + + if (empty_index == -1) { + // No empty. + *nack_list_size = number_of_seq_num; + } else { + *nack_list_size = empty_index; + } + + if (*nack_list_size > nack_seq_nums_length_) { + // Larger list: NACK list was extended since the last call. + *list_extended = true; + } + + for (unsigned int j = 0; j < *nack_list_size; j++) { + // Check if the list has been extended since it was last created, i.e, + // new items have been added. + if (nack_seq_nums_length_ > j && !*list_extended) { + unsigned int k = 0; + for (k = j; k < nack_seq_nums_length_; k++) { + // Found the item in the last list, i.e, no new items found yet. + if (nack_seq_nums_[k] == + static_cast(nack_seq_nums_internal_[j])) { + break; + } + } + if (k == nack_seq_nums_length_) { // New item not found in last list. + *list_extended = true; + } + } else { + *list_extended = true; + } + nack_seq_nums_[j] = static_cast(nack_seq_nums_internal_[j]); + } + + nack_seq_nums_length_ = *nack_list_size; + + return nack_seq_nums_; +} + +int64_t VCMJitterBuffer::LastDecodedTimestamp() const { + CriticalSectionScoped cs(crit_sect_); + return last_decoded_state_.time_stamp(); +} + +VCMEncodedFrame* VCMJitterBuffer::GetFrameForDecodingNACK() { + CleanUpOldFrames(); + // First look for a complete continuous__ frame. + // When waiting for nack, wait for a key frame, if a continuous frame cannot + // be determined (i.e. initial decoding state). + if (last_decoded_state_.init()) { + waiting_for_key_frame_ = true; + } + // Allow for a decodable frame when in Hybrid mode. + bool enable_decodable = nack_mode_ == kNackHybrid ? true : false; + FrameList::iterator it = FindOldestCompleteContinuousFrame(enable_decodable); + if (it == frame_list_.end()) { + // If we didn't find one we're good with a complete key/decodable frame. + it = find_if(frame_list_.begin(), frame_list_.end(), + CompleteDecodableKeyFrameCriteria()); + if (it == frame_list_.end()) { + return NULL; + } + } + VCMFrameBuffer* oldest_frame = *it; + // Update jitter estimate + const bool retransmitted = (oldest_frame->GetNackCount() > 0); + if (retransmitted) { + jitter_estimate_.FrameNacked(); + } else if (oldest_frame->Length() > 0) { + // Ignore retransmitted and empty frames. + UpdateJitterEstimate(*oldest_frame, false); + } + it = frame_list_.erase(it); + + // Look for previous frame loss. + VerifyAndSetPreviousFrameLost(oldest_frame); + + // The state must be changed to decoding before cleaning up zero sized + // frames to avoid empty frames being cleaned up and then given to the + // decoder. + oldest_frame->SetState(kStateDecoding); + + // Clean up old frames and empty frames. + CleanUpOldFrames(); + + if (oldest_frame->FrameType() == kVideoFrameKey) { + waiting_for_key_frame_ = false; + } + + // We have a frame - update decoded state with frame info. + last_decoded_state_.SetState(oldest_frame); + + return oldest_frame; +} + +// Set the frame state to free and remove it from the sorted +// frame list. Must be called from inside the critical section crit_sect_. +void VCMJitterBuffer::ReleaseFrameIfNotDecoding(VCMFrameBuffer* frame) { + if (frame != NULL && frame->GetState() != kStateDecoding) { + frame->SetState(kStateFree); + } +} + +VCMFrameBuffer* VCMJitterBuffer::GetEmptyFrame() { + if (!running_) { + return NULL; + } + + crit_sect_->Enter(); + + for (int i = 0; i < max_number_of_frames_; ++i) { + if (kStateFree == frame_buffers_[i]->GetState()) { + // found a free buffer + frame_buffers_[i]->SetState(kStateEmpty); + crit_sect_->Leave(); + return frame_buffers_[i]; + } + } + + // Check if we can increase JB size + if (max_number_of_frames_ < kMaxNumberOfFrames) { + VCMFrameBuffer* ptr_new_buffer = new VCMFrameBuffer(); + ptr_new_buffer->SetState(kStateEmpty); + frame_buffers_[max_number_of_frames_] = ptr_new_buffer; + max_number_of_frames_++; + + crit_sect_->Leave(); + WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, + VCMId(vcm_id_, receiver_id_), + "JB(0x%x) FB(0x%x): Jitter buffer increased to:%d frames", + this, ptr_new_buffer, max_number_of_frames_); + return ptr_new_buffer; + } + crit_sect_->Leave(); + + // We have reached max size, cannot increase JB size + return NULL; +} + +// Recycle oldest frames up to a key frame, used if jitter buffer is completely +// full. +bool VCMJitterBuffer::RecycleFramesUntilKeyFrame() { + // Remove up to oldest key frame + while (frame_list_.size() > 0) { + // Throw at least one frame. + drop_count_++; + FrameList::iterator it = frame_list_.begin(); + WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceVideoCoding, + VCMId(vcm_id_, receiver_id_), + "Jitter buffer drop count:%d, low_seq %d", drop_count_, + (*it)->GetLowSeqNum()); + ReleaseFrameIfNotDecoding(*it); + it = frame_list_.erase(it); + if (it != frame_list_.end() && (*it)->FrameType() == kVideoFrameKey) { + // Fake the last_decoded_state to match this key frame. + last_decoded_state_.SetStateOneBack(*it); + return true; + } + } + waiting_for_key_frame_ = true; + last_decoded_state_.Reset(); // TODO(mikhal): No sync. + return false; +} + +// Must be called under the critical section |crit_sect_|. +VCMFrameBufferEnum VCMJitterBuffer::UpdateFrameState(VCMFrameBuffer* frame) { + if (frame == NULL) { + WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceVideoCoding, + VCMId(vcm_id_, receiver_id_), "JB(0x%x) FB(0x%x): " + "UpdateFrameState NULL frame pointer", this, frame); + return kNoError; + } + + int length = frame->Length(); + if (master_) { + // Only trace the primary jitter buffer to make it possible to parse + // and plot the trace file. + WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, + VCMId(vcm_id_, receiver_id_), + "JB(0x%x) FB(0x%x): Complete frame added to jitter buffer," + " size:%d type %d", + this, frame, length, frame->FrameType()); + } + + if (length != 0 && !frame->GetCountedFrame()) { + // Ignore ACK frames. + incoming_frame_count_++; + frame->SetCountedFrame(true); + } + + // Check if we should drop the frame. A complete frame can arrive too late. + if (last_decoded_state_.IsOldFrame(frame)) { + // Frame is older than the latest decoded frame, drop it. Will be + // released by CleanUpOldFrames later. + frame->Reset(); + frame->SetState(kStateEmpty); + WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, + VCMId(vcm_id_, receiver_id_), + "JB(0x%x) FB(0x%x): Dropping old frame in Jitter buffer", + this, frame); + drop_count_++; + WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceVideoCoding, + VCMId(vcm_id_, receiver_id_), + "Jitter buffer drop count: %d, consecutive drops: %u", + drop_count_, num_consecutive_old_frames_); + // Flush() if this happens consistently. + num_consecutive_old_frames_++; + if (num_consecutive_old_frames_ > kMaxConsecutiveOldFrames) { + Flush(); + return kFlushIndicator; + } + return kNoError; + } + num_consecutive_old_frames_ = 0; + frame->SetState(kStateComplete); + + // Update receive statistics. We count all layers, thus when you use layers + // adding all key and delta frames might differ from frame count. + if (frame->IsSessionComplete()) { + switch (frame->FrameType()) { + case kVideoFrameKey: { + receive_statistics_[0]++; + break; + } + case kVideoFrameDelta: { + receive_statistics_[1]++; + break; + } + case kVideoFrameGolden: { + receive_statistics_[2]++; + break; + } + case kVideoFrameAltRef: { + receive_statistics_[3]++; + break; + } + default: + assert(false); + } + } + const FrameList::iterator it = FindOldestCompleteContinuousFrame(false); + VCMFrameBuffer* old_frame = NULL; + if (it != frame_list_.end()) { + old_frame = *it; + } + + // Only signal if this is the oldest frame. + // Not necessarily the case due to packet reordering or NACK. + if (!WaitForRetransmissions() || (old_frame != NULL && old_frame == frame)) { + frame_event_.Set(); + } + return kNoError; +} // Find oldest complete frame used for getting next frame to decode // Must be called under critical section -FrameList::iterator -VCMJitterBuffer::FindOldestCompleteContinuousFrame(bool enable_decodable) { +FrameList::iterator VCMJitterBuffer::FindOldestCompleteContinuousFrame( + bool enable_decodable) { // If we have more than one frame done since last time, pick oldest. VCMFrameBuffer* oldest_frame = NULL; - FrameList::iterator it = _frameList.begin(); + FrameList::iterator it = frame_list_.begin(); // When temporal layers are available, we search for a complete or decodable // frame until we hit one of the following: // 1. Continuous base or sync layer. // 2. The end of the list was reached. - for (; it != _frameList.end(); ++it) { + for (; it != frame_list_.end(); ++it) { oldest_frame = *it; VCMFrameBufferStateEnum state = oldest_frame->GetState(); // Is this frame complete or decodable and continuous? if ((state == kStateComplete || - (enable_decodable && state == kStateDecodable)) && - _lastDecodedState.ContinuousFrame(oldest_frame)) { + (enable_decodable && state == kStateDecodable)) && + last_decoded_state_.ContinuousFrame(oldest_frame)) { break; } else { int temporal_id = oldest_frame->TemporalId(); @@ -587,1184 +1249,184 @@ VCMJitterBuffer::FindOldestCompleteContinuousFrame(bool enable_decodable) { if (oldest_frame == NULL) { // No complete frame no point to continue. - return _frameList.end(); - } else if (_waitingForKeyFrame && + return frame_list_.end(); + } else if (waiting_for_key_frame_ && oldest_frame->FrameType() != kVideoFrameKey) { // We are waiting for a key frame. - return _frameList.end(); + return frame_list_.end(); } - // We have a complete continuous frame. return it; } -// Call from inside the critical section _critSect -void -VCMJitterBuffer::RecycleFrame(VCMFrameBuffer* frame) -{ - if (frame == NULL) - { - return; - } - - WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, - VCMId(_vcmId, _receiverId), - "JB(0x%x) FB(0x%x): RecycleFrame, size:%d", - this, frame, frame->Length()); - - ReleaseFrameInternal(frame); -} - -// Calculate frame and bit rates -WebRtc_Word32 -VCMJitterBuffer::GetUpdate(WebRtc_UWord32& frameRate, WebRtc_UWord32& bitRate) -{ - CriticalSectionScoped cs(_critSect); - const WebRtc_Word64 now = _clock->MillisecondTimestamp(); - WebRtc_Word64 diff = now - _timeLastIncomingFrameCount; - if (diff < 1000 && _incomingFrameRate > 0 && _incomingBitRate > 0) - { - // Make sure we report something even though less than - // 1 second has passed since last update. - frameRate = _incomingFrameRate; - bitRate = _incomingBitRate; - } - else if (_incomingFrameCount != 0) - { - // We have received frame(s) since last call to this function - - // Prepare calculations - if (diff <= 0) - { - diff = 1; - } - // we add 0.5f for rounding - float rate = 0.5f + ((_incomingFrameCount * 1000.0f) / diff); - if (rate < 1.0f) // don't go below 1, can crash - { - rate = 1.0f; - } - - // Calculate frame rate - // Let r be rate. - // r(0) = 1000*framecount/delta_time. - // (I.e. frames per second since last calculation.) - // frameRate = r(0)/2 + r(-1)/2 - // (I.e. fr/s average this and the previous calculation.) - frameRate = (_incomingFrameRate + (WebRtc_Word32)rate) >> 1; - _incomingFrameRate = (WebRtc_UWord8)rate; - - // Calculate bit rate - if (_incomingBitCount == 0) - { - bitRate = 0; - } - else - { - bitRate = 10 * ((100 * _incomingBitCount) / - static_cast(diff)); - } - _incomingBitRate = bitRate; - - // Reset count - _incomingFrameCount = 0; - _incomingBitCount = 0; - _timeLastIncomingFrameCount = now; - - } - else - { - // No frames since last call - _timeLastIncomingFrameCount = _clock->MillisecondTimestamp(); - frameRate = 0; - bitRate = 0; - _incomingBitRate = 0; - } - - return 0; -} - -// Returns immediately or a X ms event hang waiting for a complete frame, -// X decided by caller -VCMEncodedFrame* -VCMJitterBuffer::GetCompleteFrameForDecoding(WebRtc_UWord32 maxWaitTimeMS) -{ - if (!_running) - { - return NULL; - } - - _critSect->Enter(); - - CleanUpOldFrames(); - - if (_lastDecodedState.init() && WaitForNack()) { - _waitingForKeyFrame = true; - } - - FrameList::iterator it = FindOldestCompleteContinuousFrame(false); - if (it == _frameList.end()) - { - if (maxWaitTimeMS == 0) - { - _critSect->Leave(); - return NULL; - } - const WebRtc_Word64 endWaitTimeMs = _clock->MillisecondTimestamp() - + maxWaitTimeMS; - WebRtc_Word64 waitTimeMs = maxWaitTimeMS; - while (waitTimeMs > 0) - { - _critSect->Leave(); - const EventTypeWrapper ret = - _frameEvent.Wait(static_cast(waitTimeMs)); - _critSect->Enter(); - if (ret == kEventSignaled) - { - // are we closing down the Jitter buffer - if (!_running) - { - _critSect->Leave(); - return NULL; - } - - // Finding oldest frame ready for decoder, but check - // sequence number and size - CleanUpOldFrames(); - it = FindOldestCompleteContinuousFrame(false); - if (it == _frameList.end()) - { - waitTimeMs = endWaitTimeMs - - _clock->MillisecondTimestamp(); - } - else - { - break; - } - } - else - { - _critSect->Leave(); - return NULL; - } - } - // Inside critSect - } - else - { - // we already have a frame reset the event - _frameEvent.Reset(); - } - - if (it == _frameList.end()) - { - // Even after signaling we're still missing a complete continuous frame - _critSect->Leave(); - return NULL; - } - - VCMFrameBuffer* oldestFrame = *it; - it = _frameList.erase(it); - - // Update jitter estimate - const bool retransmitted = (oldestFrame->GetNackCount() > 0); - if (retransmitted) - { - _jitterEstimate.FrameNacked(); - } - else if (oldestFrame->Length() > 0) - { - // Ignore retransmitted and empty frames. - UpdateJitterAndDelayEstimates(*oldestFrame, false); - } - - oldestFrame->SetState(kStateDecoding); - - CleanUpOldFrames(); - - if (oldestFrame->FrameType() == kVideoFrameKey) - { - _waitingForKeyFrame = false; - } - - // We have a frame - update decoded state with frame info. - _lastDecodedState.SetState(oldestFrame); - - _critSect->Leave(); - - return oldestFrame; -} - -WebRtc_UWord32 -VCMJitterBuffer::GetEstimatedJitterMS() -{ - CriticalSectionScoped cs(_critSect); - return GetEstimatedJitterMsInternal(); -} - -WebRtc_UWord32 -VCMJitterBuffer::GetEstimatedJitterMsInternal() -{ - WebRtc_UWord32 estimate = VCMJitterEstimator::OPERATING_SYSTEM_JITTER; - - // Compute RTT multiplier for estimation - // _lowRttNackThresholdMs == -1 means no FEC. - double rttMult = 1.0f; - if (_nackMode == kNackHybrid && (_lowRttNackThresholdMs >= 0 && - static_cast(_rttMs) > _lowRttNackThresholdMs)) - { - // from here we count on FEC - rttMult = 0.0f; - } - estimate += static_cast - (_jitterEstimate.GetJitterEstimate(rttMult) + 0.5); - return estimate; -} - -void -VCMJitterBuffer::UpdateRtt(WebRtc_UWord32 rttMs) -{ - CriticalSectionScoped cs(_critSect); - _rttMs = rttMs; - _jitterEstimate.UpdateRtt(rttMs); -} - -// wait for the first packet in the next frame to arrive -WebRtc_Word64 -VCMJitterBuffer::GetNextTimeStamp(WebRtc_UWord32 maxWaitTimeMS, - FrameType& incomingFrameType, - WebRtc_Word64& renderTimeMs) -{ - if (!_running) - { - return -1; - } - - _critSect->Enter(); - - // Finding oldest frame ready for decoder, check sequence number and size - CleanUpOldFrames(); - - FrameList::iterator it = _frameList.begin(); - - if (it == _frameList.end()) - { - _packetEvent.Reset(); - _critSect->Leave(); - - if (_packetEvent.Wait(maxWaitTimeMS) == kEventSignaled) - { - // are we closing down the Jitter buffer - if (!_running) - { - return -1; - } - _critSect->Enter(); - - CleanUpOldFrames(); - it = _frameList.begin(); - } - else - { - _critSect->Enter(); - } - } - - if (it == _frameList.end()) - { - _critSect->Leave(); - return -1; - } - // we have a frame - - // return frame type - // All layers are assumed to have the same type - incomingFrameType = (*it)->FrameType(); - - renderTimeMs = (*it)->RenderTimeMs(); - - const WebRtc_UWord32 timestamp = (*it)->TimeStamp(); - - _critSect->Leave(); - - // return current time - return timestamp; -} - -// Answers the question: -// Will the packet sequence be complete if the next frame is grabbed for -// decoding right now? That is, have we lost a frame between the last decoded -// frame and the next, or is the next -// frame missing one or more packets? -bool -VCMJitterBuffer::CompleteSequenceWithNextFrame() -{ - CriticalSectionScoped cs(_critSect); - // Finding oldest frame ready for decoder, check sequence number and size - CleanUpOldFrames(); - - if (_frameList.empty()) - return true; - - VCMFrameBuffer* oldestFrame = _frameList.front(); - if (_frameList.size() <= 1 && - oldestFrame->GetState() != kStateComplete) - { - // Frame not ready to be decoded. - return true; - } - if (!oldestFrame->Complete()) - { - return false; - } - - // See if we have lost a frame before this one. - if (_lastDecodedState.init()) - { - // Following start, reset or flush -> check for key frame. - if (oldestFrame->FrameType() != kVideoFrameKey) - { - return false; - } - } - else if (oldestFrame->GetLowSeqNum() == -1) - { - return false; - } - else if (!_lastDecodedState.ContinuousFrame(oldestFrame)) - { - return false; - } - return true; -} - -// Returns immediately -VCMEncodedFrame* -VCMJitterBuffer::GetFrameForDecoding() -{ - CriticalSectionScoped cs(_critSect); - if (!_running) - { - return NULL; - } - - if (WaitForNack()) - { - return GetFrameForDecodingNACK(); - } - - CleanUpOldFrames(); - - if (_frameList.empty()) { - return NULL; - } - - VCMFrameBuffer* oldestFrame = _frameList.front(); - if (_frameList.size() <= 1 && - oldestFrame->GetState() != kStateComplete) { - return NULL; - } - - // Incomplete frame pulled out from jitter buffer, - // update the jitter estimate with what we currently know. - // This frame shouldn't have been retransmitted, but if we recently - // turned off NACK this might still happen. - const bool retransmitted = (oldestFrame->GetNackCount() > 0); - if (retransmitted) - { - _jitterEstimate.FrameNacked(); - } - else if (oldestFrame->Length() > 0) - { - // Ignore retransmitted and empty frames. - // Update with the previous incomplete frame first - if (_waitingForCompletion.latestPacketTime >= 0) - { - UpdateJitterAndDelayEstimates(_waitingForCompletion, true); - } - // Then wait for this one to get complete - _waitingForCompletion.frameSize = oldestFrame->Length(); - _waitingForCompletion.latestPacketTime = - oldestFrame->LatestPacketTimeMs(); - _waitingForCompletion.timestamp = oldestFrame->TimeStamp(); - } - _frameList.erase(_frameList.begin()); - - // Look for previous frame loss - VerifyAndSetPreviousFrameLost(*oldestFrame); - - // The state must be changed to decoding before cleaning up zero sized - // frames to avoid empty frames being cleaned up and then given to the - // decoder. - // Set as decoding. Propagates the missingFrame bit. - oldestFrame->SetState(kStateDecoding); - - CleanUpOldFrames(); - - if (oldestFrame->FrameType() == kVideoFrameKey) - { - _waitingForKeyFrame = false; - } - - _packetsNotDecodable += oldestFrame->NotDecodablePackets(); - - // We have a frame - update decoded state with frame info. - _lastDecodedState.SetState(oldestFrame); - - return oldestFrame; -} - -VCMEncodedFrame* -VCMJitterBuffer::GetFrameForDecodingNACK() -{ - // when we use NACK we don't release non complete frames - // unless we have a complete key frame. - // In hybrid mode, we may release decodable frames (non-complete) - - // Clean up old frames and empty frames - CleanUpOldFrames(); - - // First look for a complete _continuous_ frame. - // When waiting for nack, wait for a key frame, if a continuous frame cannot - // be determined (i.e. initial decoding state). - if (_lastDecodedState.init()) { - _waitingForKeyFrame = true; - } - - // Allow for a decodable frame when in Hybrid mode. - bool enableDecodable = _nackMode == kNackHybrid ? true : false; - FrameList::iterator it = FindOldestCompleteContinuousFrame(enableDecodable); - if (it == _frameList.end()) - { - // If we didn't find one we're good with a complete key/decodable frame. - it = find_if(_frameList.begin(), _frameList.end(), - CompleteDecodableKeyFrameCriteria()); - if (it == _frameList.end()) - { - return NULL; - } - } - VCMFrameBuffer* oldestFrame = *it; - // Update jitter estimate - const bool retransmitted = (oldestFrame->GetNackCount() > 0); - if (retransmitted) - { - _jitterEstimate.FrameNacked(); - } - else if (oldestFrame->Length() > 0) - { - // Ignore retransmitted and empty frames. - UpdateJitterAndDelayEstimates(*oldestFrame, false); - } - it = _frameList.erase(it); - - // Look for previous frame loss - VerifyAndSetPreviousFrameLost(*oldestFrame); - - // The state must be changed to decoding before cleaning up zero sized - // frames to avoid empty frames being cleaned up and then given to the - // decoder. - oldestFrame->SetState(kStateDecoding); - - // Clean up old frames and empty frames - CleanUpOldFrames(); - - if (oldestFrame->FrameType() == kVideoFrameKey) - { - _waitingForKeyFrame = false; - } - - // We have a frame - update decoded state with frame info. - _lastDecodedState.SetState(oldestFrame); - - return oldestFrame; -} - -// Must be called under the critical section _critSect. Should never be called -// with retransmitted frames, they must be filtered out before this function is -// called. -void -VCMJitterBuffer::UpdateJitterAndDelayEstimates(VCMJitterSample& sample, - bool incompleteFrame) -{ - if (sample.latestPacketTime == -1) - { - return; - } - if (incompleteFrame) - { - WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, - VCMId(_vcmId, _receiverId), "Received incomplete frame " - "timestamp %u frame size %u at time %u", - sample.timestamp, sample.frameSize, - MaskWord64ToUWord32(sample.latestPacketTime)); - } - else - { - WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, - VCMId(_vcmId, _receiverId), "Received complete frame " - "timestamp %u frame size %u at time %u", - sample.timestamp, sample.frameSize, - MaskWord64ToUWord32(sample.latestPacketTime)); - } - UpdateJitterAndDelayEstimates(sample.latestPacketTime, - sample.timestamp, - sample.frameSize, - incompleteFrame); -} - -// Must be called under the critical section _critSect. Should never be -// called with retransmitted frames, they must be filtered out before this -// function is called. -void -VCMJitterBuffer::UpdateJitterAndDelayEstimates(VCMFrameBuffer& frame, - bool incompleteFrame) -{ - if (frame.LatestPacketTimeMs() == -1) - { - return; - } - // No retransmitted frames should be a part of the jitter - // estimate. - if (incompleteFrame) - { - WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, - VCMId(_vcmId, _receiverId), - "Received incomplete frame timestamp %u frame type %d " - "frame size %u at time %u, jitter estimate was %u", - frame.TimeStamp(), frame.FrameType(), frame.Length(), - MaskWord64ToUWord32(frame.LatestPacketTimeMs()), - GetEstimatedJitterMsInternal()); - } - else - { - WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, - VCMId(_vcmId, _receiverId),"Received complete frame " - "timestamp %u frame type %d frame size %u at time %u, " - "jitter estimate was %u", - frame.TimeStamp(), frame.FrameType(), frame.Length(), - MaskWord64ToUWord32(frame.LatestPacketTimeMs()), - GetEstimatedJitterMsInternal()); - } - UpdateJitterAndDelayEstimates(frame.LatestPacketTimeMs(), frame.TimeStamp(), - frame.Length(), incompleteFrame); -} - -// Must be called under the critical section _critSect. Should never be called -// with retransmitted frames, they must be filtered out before this function -// is called. -void -VCMJitterBuffer::UpdateJitterAndDelayEstimates(WebRtc_Word64 latestPacketTimeMs, - WebRtc_UWord32 timestamp, - WebRtc_UWord32 frameSize, - bool incompleteFrame) -{ - if (latestPacketTimeMs == -1) - { - return; - } - WebRtc_Word64 frameDelay; - // Calculate the delay estimate - WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, - VCMId(_vcmId, _receiverId), - "Packet received and sent to jitter estimate with: " - "timestamp=%u wallClock=%u", timestamp, - MaskWord64ToUWord32(latestPacketTimeMs)); - bool notReordered = _delayEstimate.CalculateDelay(timestamp, - &frameDelay, - latestPacketTimeMs); - // Filter out frames which have been reordered in time by the network - if (notReordered) - { - // Update the jitter estimate with the new samples - _jitterEstimate.UpdateEstimate(frameDelay, frameSize, incompleteFrame); - } -} - -WebRtc_UWord16* -VCMJitterBuffer::GetNackList(WebRtc_UWord16& nackSize,bool& listExtended) -{ - return CreateNackList(nackSize,listExtended); -} - -// Assume called internally with critsect -WebRtc_Word32 -VCMJitterBuffer::GetLowHighSequenceNumbers(WebRtc_Word32& lowSeqNum, - WebRtc_Word32& highSeqNum) const -{ - // TODO (mikhal/stefan): refactor to use lastDecodedState - WebRtc_Word32 i = 0; - WebRtc_Word32 seqNum = -1; - - highSeqNum = -1; - lowSeqNum = -1; - if (!_lastDecodedState.init()) - lowSeqNum = _lastDecodedState.sequence_num(); - - // find highest seq numbers - for (i = 0; i < _maxNumberOfFrames; ++i) - { - seqNum = _frameBuffers[i]->GetHighSeqNum(); - - // Ignore free / empty frames - VCMFrameBufferStateEnum state = _frameBuffers[i]->GetState(); - - if ((kStateFree != state) && - (kStateEmpty != state) && - (kStateDecoding != state) && - seqNum != -1) - { - bool wrap; - highSeqNum = LatestSequenceNumber(seqNum, highSeqNum, &wrap); - } - } // for - return 0; -} - - -WebRtc_UWord16* -VCMJitterBuffer::CreateNackList(WebRtc_UWord16& nackSize, bool& listExtended) -{ - // TODO (mikhal/stefan): Refactor to use lastDecodedState. - CriticalSectionScoped cs(_critSect); - int i = 0; - WebRtc_Word32 lowSeqNum = -1; - WebRtc_Word32 highSeqNum = -1; - listExtended = false; - - // Don't create list, if we won't wait for it - if (!WaitForNack()) - { - nackSize = 0; - return NULL; - } - - // Find the lowest (last decoded) sequence number and - // the highest (highest sequence number of the newest frame) - // sequence number. The nack list is a subset of the range - // between those two numbers. - GetLowHighSequenceNumbers(lowSeqNum, highSeqNum); - - // write a list of all seq num we have - if (lowSeqNum == -1 || highSeqNum == -1) - { - // This happens if we lose the first packet, nothing is popped - if (highSeqNum == -1) - { - // we have not received any packets yet - nackSize = 0; - } - else - { - // signal that we want a key frame request to be sent - nackSize = 0xffff; - } - return NULL; - } - - int numberOfSeqNum = 0; - if (lowSeqNum > highSeqNum) - { - if (lowSeqNum - highSeqNum > 0x00ff) - { - // wrap - numberOfSeqNum = (0xffff-lowSeqNum) + highSeqNum + 1; - } - } - else - { - numberOfSeqNum = highSeqNum - lowSeqNum; - } - - if (numberOfSeqNum > kNackHistoryLength) - { - // Nack list is too big, flush and try to restart. - WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceVideoCoding, - VCMId(_vcmId, _receiverId), - "Nack list too large, try to find a key frame and restart " - "from seq: %d. Lowest seq in jb %d", highSeqNum,lowSeqNum); - - // This nack size will trigger a key request... - bool foundKeyFrame = false; - - while (numberOfSeqNum > kNackHistoryLength) - { - foundKeyFrame = RecycleFramesUntilKeyFrame(); - - if (!foundKeyFrame) - { - break; - } - - // Check if we still have too many packets in JB - lowSeqNum = -1; - highSeqNum = -1; - GetLowHighSequenceNumbers(lowSeqNum, highSeqNum); - - if (highSeqNum == -1) - { - assert(lowSeqNum != -1); // This should never happen - // We can't calculate the nack list length... - return NULL; - } - - numberOfSeqNum = 0; - if (lowSeqNum > highSeqNum) - { - if (lowSeqNum - highSeqNum > 0x00ff) - { - // wrap - numberOfSeqNum = (0xffff-lowSeqNum) + highSeqNum + 1; - highSeqNum=lowSeqNum; - } - } - else - { - numberOfSeqNum = highSeqNum - lowSeqNum; - } - - } // end while - - if (!foundKeyFrame) - { - // No key frame in JB. - - // Set the last decoded sequence number to current high. - // This is to not get a large nack list again right away - _lastDecodedState.SetSeqNum(static_cast(highSeqNum)); - // Set to trigger key frame signal - nackSize = 0xffff; - listExtended = true; - WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, -1, - "\tNo key frame found, request one. _lastDecodedSeqNum[0] " - "%d", _lastDecodedState.sequence_num()); - } - else - { - // We have cleaned up the jb and found a key frame - // The function itself has set last decoded seq. - WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, -1, - "\tKey frame found. _lastDecodedSeqNum[0] %d", - _lastDecodedState.sequence_num()); - nackSize = 0; - } - - return NULL; - } - - WebRtc_UWord16 seqNumberIterator = (WebRtc_UWord16)(lowSeqNum + 1); - for (i = 0; i < numberOfSeqNum; i++) - { - _NACKSeqNumInternal[i] = seqNumberIterator; - seqNumberIterator++; - } - - // now we have a list of all sequence numbers that could have been sent - - // zero out the ones we have received - for (i = 0; i < _maxNumberOfFrames; i++) - { - // loop all created frames - // We don't need to check if frame is decoding since lowSeqNum is based - // on _lastDecodedSeqNum - // Ignore free frames - VCMFrameBufferStateEnum state = _frameBuffers[i]->GetState(); - - if ((kStateFree != state) && - (kStateEmpty != state) && - (kStateDecoding != state)) - { - // Reaching thus far means we are going to update the nack list - // When in hybrid mode, we use the soft NACKing feature. - if (_nackMode == kNackHybrid) - { - _frameBuffers[i]->BuildSoftNackList(_NACKSeqNumInternal, - numberOfSeqNum, - _rttMs); - } - else - { - // Used when the frame is being processed by the decoding thread - // don't need to use that info in this loop. - _frameBuffers[i]->BuildHardNackList(_NACKSeqNumInternal, - numberOfSeqNum); - } - } - } - - // compress list - int emptyIndex = -1; - for (i = 0; i < numberOfSeqNum; i++) - { - if (_NACKSeqNumInternal[i] == -1 || _NACKSeqNumInternal[i] == -2 ) - { - // this is empty - if (emptyIndex == -1) - { - // no empty index before, remember this position - emptyIndex = i; - } - } - else - { - // this is not empty - if (emptyIndex == -1) - { - // no empty index, continue - } - else - { - _NACKSeqNumInternal[emptyIndex] = _NACKSeqNumInternal[i]; - _NACKSeqNumInternal[i] = -1; - emptyIndex++; - } - } - } // for - - if (emptyIndex == -1) - { - // no empty - nackSize = numberOfSeqNum; - } - else - { - nackSize = emptyIndex; - } - - if (nackSize > _NACKSeqNumLength) - { - // Larger list: nack list was extended since the last call. - listExtended = true; - } - - for (WebRtc_UWord32 j = 0; j < nackSize; j++) - { - // Check if the list has been extended since it was last created. I.e, - // new items have been added - if (_NACKSeqNumLength > j && !listExtended) - { - WebRtc_UWord32 k = 0; - for (k = j; k < _NACKSeqNumLength; k++) - { - // Found the item in the last list, i.e, no new items found yet. - if (_NACKSeqNum[k] == (WebRtc_UWord16)_NACKSeqNumInternal[j]) - { - break; - } - } - if (k == _NACKSeqNumLength) // New item not found in last list. - { - listExtended = true; - } - } - else - { - listExtended = true; - } - _NACKSeqNum[j] = (WebRtc_UWord16)_NACKSeqNumInternal[j]; - } - - _NACKSeqNumLength = nackSize; - - return _NACKSeqNum; -} - -// Release frame when done with decoding. Should never be used to release -// frames from within the jitter buffer. -void -VCMJitterBuffer::ReleaseFrame(VCMEncodedFrame* frame) -{ - CriticalSectionScoped cs(_critSect); - VCMFrameBuffer* frameBuffer = static_cast(frame); - if (frameBuffer != NULL) - frameBuffer->SetState(kStateFree); -} - -WebRtc_Word64 -VCMJitterBuffer::LastPacketTime(VCMEncodedFrame* frame, - bool& retransmitted) const -{ - CriticalSectionScoped cs(_critSect); - retransmitted = (static_cast(frame)->GetNackCount() > 0); - return static_cast(frame)->LatestPacketTimeMs(); -} - -WebRtc_Word64 -VCMJitterBuffer::LastDecodedTimestamp() const -{ - CriticalSectionScoped cs(_critSect); - return _lastDecodedState.time_stamp(); -} - -// Insert packet -// Takes crit sect, and inserts packet in frame buffer, possibly does logging -VCMFrameBufferEnum -VCMJitterBuffer::InsertPacket(VCMEncodedFrame* buffer, const VCMPacket& packet) -{ - CriticalSectionScoped cs(_critSect); - WebRtc_Word64 nowMs = _clock->MillisecondTimestamp(); - VCMFrameBufferEnum bufferReturn = kSizeError; - VCMFrameBufferEnum ret = kSizeError; - VCMFrameBuffer* frame = static_cast(buffer); - - // We are keeping track of the first seq num, the latest seq num and - // the number of wraps to be able to calculate how many packets we expect. - if (_firstPacket) - { - // Now it's time to start estimating jitter - // reset the delay estimate. - _delayEstimate.Reset(_clock->MillisecondTimestamp()); - _firstPacket = false; - } - - // Empty packets may bias the jitter estimate (lacking size component), - // therefore don't let empty packet trigger the following updates: - if (packet.frameType != kFrameEmpty) - { - if (_waitingForCompletion.timestamp == packet.timestamp) - { - // This can get bad if we have a lot of duplicate packets, - // we will then count some packet multiple times. - _waitingForCompletion.frameSize += packet.sizeBytes; - _waitingForCompletion.latestPacketTime = nowMs; - } - else if (_waitingForCompletion.latestPacketTime >= 0 && - _waitingForCompletion.latestPacketTime + 2000 <= nowMs) - { - // A packet should never be more than two seconds late - UpdateJitterAndDelayEstimates(_waitingForCompletion, true); - _waitingForCompletion.latestPacketTime = -1; - _waitingForCompletion.frameSize = 0; - _waitingForCompletion.timestamp = 0; - } - } - - if (frame != NULL) - { - VCMFrameBufferStateEnum state = frame->GetState(); - _lastDecodedState.UpdateOldPacket(&packet); - // Insert packet - // Check for first packet - // High sequence number will be -1 if neither an empty packet nor - // a media packet has been inserted. - bool first = (frame->GetHighSeqNum() == -1); - // When in Hybrid mode, we allow for a decodable state - // Note: Under current version, a decodable frame will never be - // triggered, as the body of the function is empty. - // TODO (mikhal): Update when decodable is enabled. - bufferReturn = frame->InsertPacket(packet, nowMs, - _nackMode == kNackHybrid, - _rttMs); - ret = bufferReturn; - - if (bufferReturn > 0) - { - _incomingBitCount += packet.sizeBytes << 3; - - // Has this packet been nacked or is it about to be nacked? - if (IsPacketRetransmitted(packet)) - { - frame->IncrementNackCount(); - } - - // Insert each frame once on the arrival of the first packet - // belonging to that frame (media or empty) - if (state == kStateEmpty && first) - { - ret = kFirstPacket; - FrameList::reverse_iterator rit = std::find_if( - _frameList.rbegin(), _frameList.rend(), - FrameSmallerTimestamp(frame->TimeStamp())); - _frameList.insert(rit.base(), frame); - } - } - } - switch(bufferReturn) - { - case kStateError: - case kTimeStampError: - case kSizeError: - { - if (frame != NULL) - { - // Will be released when it gets old. - frame->Reset(); - frame->SetState(kStateEmpty); - } - break; - } - case kCompleteSession: - { - // Only update return value for a JB flush indicator. - if (UpdateFrameState(frame) == kFlushIndicator) - ret = kFlushIndicator; - // Signal that we have a received packet - _packetEvent.Set(); - break; - } - case kDecodableSession: - case kIncomplete: - { - // Signal that we have a received packet - _packetEvent.Set(); - break; - } - case kNoError: - case kDuplicatePacket: - { - break; - } - default: - { - assert(false && "JitterBuffer::InsertPacket: Undefined value"); - } - } - return ret; -} - -// Must be called from within _critSect -void -VCMJitterBuffer::UpdateOldJitterSample(const VCMPacket& packet) -{ - if (_waitingForCompletion.timestamp != packet.timestamp && - LatestTimestamp(_waitingForCompletion.timestamp, packet.timestamp, - NULL) == packet.timestamp) - { - // This is a newer frame than the one waiting for completion. - _waitingForCompletion.frameSize = packet.sizeBytes; - _waitingForCompletion.timestamp = packet.timestamp; - } - else - { - // This can get bad if we have a lot of duplicate packets, - // we will then count some packet multiple times. - _waitingForCompletion.frameSize += packet.sizeBytes; - _jitterEstimate.UpdateMaxFrameSize(_waitingForCompletion.frameSize); - } -} - -// Must be called from within _critSect -bool -VCMJitterBuffer::IsPacketRetransmitted(const VCMPacket& packet) const -{ - if (_NACKSeqNumLength > 0) - { - for (WebRtc_UWord16 i = 0; i < _NACKSeqNumLength; i++) - { - if (packet.seqNum == _NACKSeqNum[i]) - { - return true; - } - } - } - return false; -} - -// Get nack status (enabled/disabled) -VCMNackMode -VCMJitterBuffer::GetNackMode() const -{ - CriticalSectionScoped cs(_critSect); - return _nackMode; -} - -// Set NACK mode -void -VCMJitterBuffer::SetNackMode(VCMNackMode mode, - int lowRttNackThresholdMs, - int highRttNackThresholdMs) -{ - CriticalSectionScoped cs(_critSect); - _nackMode = mode; - assert(lowRttNackThresholdMs >= -1 && highRttNackThresholdMs >= -1); - assert(highRttNackThresholdMs == -1 || - lowRttNackThresholdMs <= highRttNackThresholdMs); - assert(lowRttNackThresholdMs > -1 || highRttNackThresholdMs == -1); - _lowRttNackThresholdMs = lowRttNackThresholdMs; - _highRttNackThresholdMs = highRttNackThresholdMs; - if (_nackMode == kNoNack) - { - _jitterEstimate.ResetNackCount(); - } -} - - -// Recycle oldest frames up to a key frame, used if JB is completely full -bool -VCMJitterBuffer::RecycleFramesUntilKeyFrame() -{ - // Remove up to oldest key frame - while (_frameList.size() > 0) - { - // Throw at least one frame. - _dropCount++; - FrameList::iterator it = _frameList.begin(); - WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceVideoCoding, - VCMId(_vcmId, _receiverId), - "Jitter buffer drop count:%d, lowSeq %d", _dropCount, - (*it)->GetLowSeqNum()); - RecycleFrame(*it); - it = _frameList.erase(it); - if (it != _frameList.end() && (*it)->FrameType() == kVideoFrameKey) - { - // Fake the lastDecodedState to match this key frame. - _lastDecodedState.SetStateOneBack(*it); - return true; - } - } - _waitingForKeyFrame = true; - _lastDecodedState.Reset(); // TODO (mikhal): no sync - return false; -} - -// Must be called under the critical section _critSect. +// Must be called under the critical section |crit_sect_|. void VCMJitterBuffer::CleanUpOldFrames() { - while (_frameList.size() > 0) { - VCMFrameBuffer* oldestFrame = _frameList.front(); - bool nextFrameEmpty = (_lastDecodedState.ContinuousFrame(oldestFrame) && - oldestFrame->GetState() == kStateEmpty); - if (_lastDecodedState.IsOldFrame(oldestFrame) || - (nextFrameEmpty && _frameList.size() > 1)) { - ReleaseFrameInternal(_frameList.front()); - _frameList.erase(_frameList.begin()); + while (frame_list_.size() > 0) { + VCMFrameBuffer* oldest_frame = frame_list_.front(); + bool next_frame_empty = + (last_decoded_state_.ContinuousFrame(oldest_frame) && + oldest_frame->GetState() == kStateEmpty); + if (last_decoded_state_.IsOldFrame(oldest_frame) || + (next_frame_empty && frame_list_.size() > 1)) { + ReleaseFrameIfNotDecoding(frame_list_.front()); + frame_list_.erase(frame_list_.begin()); } else { break; } } } -// Used in GetFrameForDecoding -void VCMJitterBuffer::VerifyAndSetPreviousFrameLost(VCMFrameBuffer& frame) { - frame.MakeSessionDecodable(); // Make sure the session can be decoded. - if (frame.FrameType() == kVideoFrameKey) +void VCMJitterBuffer::VerifyAndSetPreviousFrameLost(VCMFrameBuffer* frame) { + assert(frame); + frame->MakeSessionDecodable(); // Make sure the session can be decoded. + if (frame->FrameType() == kVideoFrameKey) return; - if (!_lastDecodedState.ContinuousFrame(&frame)) - frame.SetPreviousFrameLoss(); + if (!last_decoded_state_.ContinuousFrame(frame)) + frame->SetPreviousFrameLoss(); } -bool -VCMJitterBuffer::WaitForNack() -{ - // NACK disabled -> can't wait - if (_nackMode == kNoNack) - { - return false; - } - // NACK only -> always wait - else if (_nackMode == kNackInfinite) - { - return true; - } - // else: hybrid mode, evaluate - // RTT high, don't wait - if (_highRttNackThresholdMs >= 0 && - _rttMs >= static_cast(_highRttNackThresholdMs)) - { - return false; - } - // Either NACK only or hybrid - return true; +// Must be called from within |crit_sect_|. +bool VCMJitterBuffer::IsPacketRetransmitted(const VCMPacket& packet) const { + if (nack_seq_nums_length_ > 0) { + for (unsigned int i = 0; i < nack_seq_nums_length_; i++) { + if (packet.seqNum == nack_seq_nums_[i]) { + return true; + } + } + } + return false; } +// Must be called under the critical section |crit_sect_|. Should never be +// called with retransmitted frames, they must be filtered out before this +// function is called. +void VCMJitterBuffer::UpdateJitterEstimate(const VCMJitterSample& sample, + bool incomplete_frame) { + if (sample.latest_packet_time == -1) { + return; + } + if (incomplete_frame) { + WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, + VCMId(vcm_id_, receiver_id_), "Received incomplete frame " + "timestamp %u frame size %u at time %u", + sample.timestamp, sample.frame_size, + MaskWord64ToUWord32(sample.latest_packet_time)); + } else { + WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, + VCMId(vcm_id_, receiver_id_), "Received complete frame " + "timestamp %u frame size %u at time %u", + sample.timestamp, sample.frame_size, + MaskWord64ToUWord32(sample.latest_packet_time)); + } + UpdateJitterEstimate(sample.latest_packet_time, sample.timestamp, + sample.frame_size, incomplete_frame); +} + +// Must be called under the critical section crit_sect_. Should never be +// called with retransmitted frames, they must be filtered out before this +// function is called. +void VCMJitterBuffer::UpdateJitterEstimate(const VCMFrameBuffer& frame, + bool incomplete_frame) { + if (frame.LatestPacketTimeMs() == -1) { + return; + } + // No retransmitted frames should be a part of the jitter + // estimate. + if (incomplete_frame) { + WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, + VCMId(vcm_id_, receiver_id_), + "Received incomplete frame timestamp %u frame type %d " + "frame size %u at time %u, jitter estimate was %u", + frame.TimeStamp(), frame.FrameType(), frame.Length(), + MaskWord64ToUWord32(frame.LatestPacketTimeMs()), + EstimatedJitterMs()); + } else { + WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, + VCMId(vcm_id_, receiver_id_), "Received complete frame " + "timestamp %u frame type %d frame size %u at time %u, " + "jitter estimate was %u", + frame.TimeStamp(), frame.FrameType(), frame.Length(), + MaskWord64ToUWord32(frame.LatestPacketTimeMs()), + EstimatedJitterMs()); + } + UpdateJitterEstimate(frame.LatestPacketTimeMs(), frame.TimeStamp(), + frame.Length(), incomplete_frame); +} + +// Must be called under the critical section |crit_sect_|. Should never be +// called with retransmitted frames, they must be filtered out before this +// function is called. +void VCMJitterBuffer::UpdateJitterEstimate( + int64_t latest_packet_time_ms, + uint32_t timestamp, + unsigned int frame_size, + bool incomplete_frame) { + if (latest_packet_time_ms == -1) { + return; + } + int64_t frame_delay; + // Calculate the delay estimate + WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, + VCMId(vcm_id_, receiver_id_), + "Packet received and sent to jitter estimate with: " + "timestamp=%u wall_clock=%u", timestamp, + MaskWord64ToUWord32(latest_packet_time_ms)); + bool not_reordered = inter_frame_delay_.CalculateDelay(timestamp, + &frame_delay, + latest_packet_time_ms); + // Filter out frames which have been reordered in time by the network + if (not_reordered) { + // Update the jitter estimate with the new samples + jitter_estimate_.UpdateEstimate(frame_delay, frame_size, incomplete_frame); + } +} + +// Assumed to be called internally from inside a critical section. +void VCMJitterBuffer::GetLowHighSequenceNumbers( + int32_t* low_seq_num, int32_t* high_seq_num) const { + assert(low_seq_num); + assert(high_seq_num); + // TODO(mikhal/stefan): Refactor to use last_decoded_state. + int i = 0; + int32_t seq_num = -1; + + *high_seq_num = -1; + *low_seq_num = -1; + if (!last_decoded_state_.init()) + *low_seq_num = last_decoded_state_.sequence_num(); + + // find highest seq numbers + for (i = 0; i < max_number_of_frames_; ++i) { + seq_num = frame_buffers_[i]->GetHighSeqNum(); + + // Ignore free / empty frames + VCMFrameBufferStateEnum state = frame_buffers_[i]->GetState(); + + if ((kStateFree != state) && + (kStateEmpty != state) && + (kStateDecoding != state) && + seq_num != -1) { + bool wrap; + *high_seq_num = LatestSequenceNumber(seq_num, *high_seq_num, &wrap); + } + } +} + +bool VCMJitterBuffer::WaitForRetransmissions() { + if (nack_mode_ == kNoNack) { + // NACK disabled -> don't wait for retransmissions. + return false; + } else if (nack_mode_ == kNackInfinite) { + // NACK only -> always wait for retransmissions. + return true; + } + // Hybrid mode. Evaluate if the RTT is high, and in that case we don't wait + // for retransmissions. + if (high_rtt_nack_threshold_ms_ >= 0 && + rtt_ms_ >= static_cast(high_rtt_nack_threshold_ms_)) { + return false; + } + return true; +} } // namespace webrtc diff --git a/src/modules/video_coding/main/source/jitter_buffer.h b/src/modules/video_coding/main/source/jitter_buffer.h index d9511870d1..5b30fb8ce9 100644 --- a/src/modules/video_coding/main/source/jitter_buffer.h +++ b/src/modules/video_coding/main/source/jitter_buffer.h @@ -8,8 +8,8 @@ * be found in the AUTHORS file in the root of the source tree. */ -#ifndef WEBRTC_MODULES_VIDEO_CODING_JITTER_BUFFER_H_ -#define WEBRTC_MODULES_VIDEO_CODING_JITTER_BUFFER_H_ +#ifndef WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_JITTER_BUFFER_H_ +#define WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_JITTER_BUFFER_H_ #include @@ -24,14 +24,12 @@ #include "system_wrappers/interface/critical_section_wrapper.h" #include "typedefs.h" -namespace webrtc -{ +namespace webrtc { -enum VCMNackMode -{ - kNackInfinite, - kNackHybrid, - kNoNack +enum VCMNackMode { + kNackInfinite, + kNackHybrid, + kNoNack }; typedef std::list FrameList; @@ -42,218 +40,227 @@ class VCMFrameBuffer; class VCMPacket; class VCMEncodedFrame; -class VCMJitterSample -{ -public: - VCMJitterSample() : timestamp(0), frameSize(0), latestPacketTime(-1) {} - WebRtc_UWord32 timestamp; - WebRtc_UWord32 frameSize; - WebRtc_Word64 latestPacketTime; +struct VCMJitterSample { + VCMJitterSample() : timestamp(0), frame_size(0), latest_packet_time(-1) {} + uint32_t timestamp; + uint32_t frame_size; + int64_t latest_packet_time; }; -class VCMJitterBuffer -{ -public: - VCMJitterBuffer(TickTimeBase* clock, - WebRtc_Word32 vcmId = -1, - WebRtc_Word32 receiverId = -1, - bool master = true); - virtual ~VCMJitterBuffer(); +class VCMJitterBuffer { + public: + VCMJitterBuffer(TickTimeBase* clock, int vcm_id = -1, int receiver_id = -1, + bool master = true); + virtual ~VCMJitterBuffer(); - void CopyFrom(const VCMJitterBuffer& rhs); + // Makes |this| a deep copy of |rhs|. + void CopyFrom(const VCMJitterBuffer& rhs); - // We need a start and stop to break out of the wait event - // used in GetCompleteFrameForDecoding - void Start(); - void Stop(); - bool Running() const; + // Initializes and starts jitter buffer. + void Start(); - // Empty the Jitter buffer of all its data - void Flush(); + // Signals all internal events and stops the jitter buffer. + void Stop(); - // Statistics, Get received key and delta frames - WebRtc_Word32 GetFrameStatistics(WebRtc_UWord32& receivedDeltaFrames, - WebRtc_UWord32& receivedKeyFrames) const; + // Returns true if the jitter buffer is running. + bool Running() const; - // The number of packets discarded by the jitter buffer because the decoder - // won't be able to decode them. - WebRtc_UWord32 NumNotDecodablePackets() const; - // Get number of packets discarded by the jitter buffer - WebRtc_UWord32 DiscardedPackets() const; + // Empty the jitter buffer of all its data. + void Flush(); - // Statistics, Calculate frame and bit rates - WebRtc_Word32 GetUpdate(WebRtc_UWord32& frameRate, WebRtc_UWord32& bitRate); + // Get the number of received key and delta frames since the jitter buffer + // was started. + void FrameStatistics(uint32_t* received_delta_frames, + uint32_t* received_key_frames) const; - // Wait for the first packet in the next frame to arrive, blocks - // for <= maxWaitTimeMS ms - WebRtc_Word64 GetNextTimeStamp(WebRtc_UWord32 maxWaitTimeMS, - FrameType& incomingFrameType, - WebRtc_Word64& renderTimeMs); + // The number of packets discarded by the jitter buffer because the decoder + // won't be able to decode them. + int num_not_decodable_packets() const; - // Will the packet sequence be complete if the next frame is grabbed - // for decoding right now? That is, have we lost a frame between the - // last decoded frame and the next, or is the next frame missing one - // or more packets? - bool CompleteSequenceWithNextFrame(); + // Gets number of packets discarded by the jitter buffer. + int num_discarded_packets() const; - // TODO (mikhal/stefan): Merge all GetFrameForDecoding into one. - // Wait maxWaitTimeMS for a complete frame to arrive. After timeout NULL - // is returned. - VCMEncodedFrame* GetCompleteFrameForDecoding(WebRtc_UWord32 maxWaitTimeMS); + // Statistics, Calculate frame and bit rates. + void IncomingRateStatistics(unsigned int* framerate, + unsigned int* bitrate); - // Get a frame for decoding (even an incomplete) without delay. - VCMEncodedFrame* GetFrameForDecoding(); + // Waits for the first packet in the next frame to arrive and then returns + // the timestamp of that frame. |incoming_frame_type| and |render_time_ms| are + // set to the frame type and render time of the next frame. + // Blocks for up to |max_wait_time_ms| ms. Returns -1 if no packet has arrived + // after |max_wait_time_ms| ms. + int64_t NextTimestamp(uint32_t max_wait_time_ms, + FrameType* incoming_frame_type, + int64_t* render_time_ms); - VCMEncodedFrame* GetFrameForDecodingNACK(); + // Checks if the packet sequence will be complete if the next frame would be + // grabbed for decoding. That is, if a frame has been lost between the + // last decoded frame and the next, or if the next frame is missing one + // or more packets. + bool CompleteSequenceWithNextFrame(); - // Release frame (when done with decoding) - void ReleaseFrame(VCMEncodedFrame* frame); + // TODO(mikhal/stefan): Merge all GetFrameForDecoding into one. + // Wait |max_wait_time_ms| for a complete frame to arrive. After timeout NULL + // is returned. + VCMEncodedFrame* GetCompleteFrameForDecoding(uint32_t max_wait_time_ms); - // Get frame to use for this timestamp - WebRtc_Word32 GetFrame(const VCMPacket& packet, VCMEncodedFrame*&); - VCMEncodedFrame* GetFrame(const VCMPacket& packet); // deprecated + // Get a frame for decoding (even an incomplete) without delay. + VCMEncodedFrame* GetFrameForDecoding(); - // Returns the time in ms when the latest packet was inserted into the frame. - // Retransmitted is set to true if any of the packets belonging to the frame - // has been retransmitted. - WebRtc_Word64 LastPacketTime(VCMEncodedFrame* frame, - bool& retransmitted) const; + // Releases a frame returned from the jitter buffer, should be called when + // done with decoding. + void ReleaseFrame(VCMEncodedFrame* frame); - // Insert a packet into a frame - VCMFrameBufferEnum InsertPacket(VCMEncodedFrame* frame, - const VCMPacket& packet); + // Returns the frame assigned to this timestamp. + int GetFrame(const VCMPacket& packet, VCMEncodedFrame*&); + VCMEncodedFrame* GetFrame(const VCMPacket& packet); // Deprecated. - // Sync - WebRtc_UWord32 GetEstimatedJitterMS(); - void UpdateRtt(WebRtc_UWord32 rttMs); + // Returns the time in ms when the latest packet was inserted into the frame. + // Retransmitted is set to true if any of the packets belonging to the frame + // has been retransmitted. + int64_t LastPacketTime(VCMEncodedFrame* frame, bool* retransmitted) const; - // NACK - // Set the NACK mode. "highRttNackThreshold" is an RTT threshold in ms above - // which NACK will be disabled if the NACK mode is "kNackHybrid", - // -1 meaning that NACK is always enabled in the Hybrid mode. - // "lowRttNackThreshold" is an RTT threshold in ms below which we expect to - // rely on NACK only, and therefore are using larger buffers to have time to - // wait for retransmissions. - void SetNackMode(VCMNackMode mode, - int lowRttNackThresholdMs, - int highRttNackThresholdMs); - VCMNackMode GetNackMode() const; // Get nack mode - // Get list of missing sequence numbers (size in number of elements) - WebRtc_UWord16* GetNackList(WebRtc_UWord16& nackSize, - bool& listExtended); + // Inserts a packet into a frame returned from GetFrame(). + VCMFrameBufferEnum InsertPacket(VCMEncodedFrame* frame, + const VCMPacket& packet); - WebRtc_Word64 LastDecodedTimestamp() const; + // Returns the estimated jitter in milliseconds. + uint32_t EstimatedJitterMs(); -private: - // Misc help functions - // Recycle (release) frame, used if we didn't receive whole frame - void RecycleFrame(VCMFrameBuffer* frame); - void ReleaseFrameInternal(VCMFrameBuffer* frame); - // Flush and reset the jitter buffer. Call under critical section. - void FlushInternal(); + // Updates the round-trip time estimate. + void UpdateRtt(uint32_t rtt_ms); - // Help functions for insert packet - // Get empty frame, creates new (i.e. increases JB size) if necessary - VCMFrameBuffer* GetEmptyFrame(); - // Recycle oldest frames up to a key frame, used if JB is completely full - bool RecycleFramesUntilKeyFrame(); - // Update frame state - // (set as complete or reconstructable if conditions are met) - VCMFrameBufferEnum UpdateFrameState(VCMFrameBuffer* frameListItem); + // Set the NACK mode. |highRttNackThreshold| is an RTT threshold in ms above + // which NACK will be disabled if the NACK mode is |kNackHybrid|, -1 meaning + // that NACK is always enabled in the hybrid mode. + // |lowRttNackThreshold| is an RTT threshold in ms below which we expect to + // rely on NACK only, and therefore are using larger buffers to have time to + // wait for retransmissions. + void SetNackMode(VCMNackMode mode, int low_rtt_nack_threshold_ms, + int high_rtt_nack_threshold_ms); - // Help functions for getting a frame - // Find oldest complete frame, used for getting next frame to decode - // When enabled, will return a decodable frame - FrameList::iterator FindOldestCompleteContinuousFrame(bool enableDecodable); + // Returns the current NACK mode. + VCMNackMode nack_mode() const; - void CleanUpOldFrames(); + // Creates a list of missing sequence numbers. + uint16_t* CreateNackList(uint16_t* nack_list_size, bool* list_extended); - void VerifyAndSetPreviousFrameLost(VCMFrameBuffer& frame); - bool IsPacketRetransmitted(const VCMPacket& packet) const; + int64_t LastDecodedTimestamp() const; - void UpdateJitterAndDelayEstimates(VCMJitterSample& sample, - bool incompleteFrame); - void UpdateJitterAndDelayEstimates(VCMFrameBuffer& frame, - bool incompleteFrame); - void UpdateJitterAndDelayEstimates(WebRtc_Word64 latestPacketTimeMs, - WebRtc_UWord32 timestamp, - WebRtc_UWord32 frameSize, - bool incompleteFrame); - void UpdateOldJitterSample(const VCMPacket& packet); - WebRtc_UWord32 GetEstimatedJitterMsInternal(); + private: + // In NACK-only mode this function doesn't return or release non-complete + // frames unless we have a complete key frame. In hybrid mode, we may release + // "decodable", incomplete frames. + VCMEncodedFrame* GetFrameForDecodingNACK(); - // NACK help - WebRtc_UWord16* CreateNackList(WebRtc_UWord16& nackSize, - bool& listExtended); - WebRtc_Word32 GetLowHighSequenceNumbers(WebRtc_Word32& lowSeqNum, - WebRtc_Word32& highSeqNum) const; + void ReleaseFrameIfNotDecoding(VCMFrameBuffer* frame); - // Decide whether should wait for NACK (mainly relevant for hybrid mode) - bool WaitForNack(); + // Gets an empty frame, creating a new frame if necessary (i.e. increases + // jitter buffer size). + VCMFrameBuffer* GetEmptyFrame(); - WebRtc_Word32 _vcmId; - WebRtc_Word32 _receiverId; - TickTimeBase* _clock; - // If we are running (have started) or not - bool _running; - CriticalSectionWrapper* _critSect; - bool _master; - // Event to signal when we have a frame ready for decoder - VCMEvent _frameEvent; - // Event to signal when we have received a packet - VCMEvent _packetEvent; - // Number of allocated frames - WebRtc_Word32 _maxNumberOfFrames; - // Array of pointers to the frames in JB - VCMFrameBuffer* _frameBuffers[kMaxNumberOfFrames]; - FrameList _frameList; + // Recycles oldest frames until a key frame is found. Used if jitter buffer is + // completely full. Returns true if a key frame was found. + bool RecycleFramesUntilKeyFrame(); - // timing - VCMDecodingState _lastDecodedState; - WebRtc_UWord32 _packetsNotDecodable; + // Sets the state of |frame| to complete if it's not too old to be decoded. + // Also updates the frame statistics. Signals the |frame_event| if this is + // the next frame to be decoded. + VCMFrameBufferEnum UpdateFrameState(VCMFrameBuffer* frame); - // Statistics - // Frame counter for each type (key, delta, golden, key-delta) - WebRtc_UWord8 _receiveStatistics[4]; - // Latest calculated frame rates of incoming stream - WebRtc_UWord8 _incomingFrameRate; - // Frame counter, reset in GetUpdate - WebRtc_UWord32 _incomingFrameCount; - // Real time for last _frameCount reset - WebRtc_Word64 _timeLastIncomingFrameCount; - // Received bits counter, reset in GetUpdate - WebRtc_UWord32 _incomingBitCount; - WebRtc_UWord32 _incomingBitRate; - WebRtc_UWord32 _dropCount; // Frame drop counter - // Number of frames in a row that have been too old - WebRtc_UWord32 _numConsecutiveOldFrames; - // Number of packets in a row that have been too old - WebRtc_UWord32 _numConsecutiveOldPackets; - // Number of packets discarded by the jitter buffer - WebRtc_UWord32 _discardedPackets; + // Finds the oldest complete frame, used for getting next frame to decode. + // Can return a decodable, incomplete frame if |enable_decodable| is true. + FrameList::iterator FindOldestCompleteContinuousFrame(bool enable_decodable); - // Filters for estimating jitter - VCMJitterEstimator _jitterEstimate; - // Calculates network delays used for jitter calculations - VCMInterFrameDelay _delayEstimate; - VCMJitterSample _waitingForCompletion; - WebRtc_UWord32 _rttMs; + void CleanUpOldFrames(); - // NACK - VCMNackMode _nackMode; - int _lowRttNackThresholdMs; - int _highRttNackThresholdMs; - // Holds the internal nack list (the missing sequence numbers) - WebRtc_Word32 _NACKSeqNumInternal[kNackHistoryLength]; - WebRtc_UWord16 _NACKSeqNum[kNackHistoryLength]; - WebRtc_UWord32 _NACKSeqNumLength; - bool _waitingForKeyFrame; + // Sets the "decodable" and "frame loss" flags of a frame depending on which + // packets have been received and which are missing. + // A frame is "decodable" if enough packets of that frame has been received + // for it to be usable by the decoder. + // A frame has the "frame loss" flag set if packets are missing after the + // last decoded frame and before |frame|. + void VerifyAndSetPreviousFrameLost(VCMFrameBuffer* frame); - bool _firstPacket; + // Returns true if |packet| is likely to have been retransmitted. + bool IsPacketRetransmitted(const VCMPacket& packet) const; - DISALLOW_COPY_AND_ASSIGN(VCMJitterBuffer); + // The following three functions update the jitter estimate with the + // payload size, receive time and RTP timestamp of a frame. + void UpdateJitterEstimate(const VCMJitterSample& sample, + bool incomplete_frame); + void UpdateJitterEstimate(const VCMFrameBuffer& frame, bool incomplete_frame); + void UpdateJitterEstimate(int64_t latest_packet_time_ms, + uint32_t timestamp, + unsigned int frame_size, + bool incomplete_frame); + + // Returns the lowest and highest known sequence numbers, where the lowest is + // the last decoded sequence number if a frame has been decoded. + // -1 is returned if a sequence number cannot be determined. + void GetLowHighSequenceNumbers(int32_t* low_seq_num, + int32_t* high_seq_num) const; + + // Returns true if we should wait for retransmissions, false otherwise. + bool WaitForRetransmissions(); + + int vcm_id_; + int receiver_id_; + TickTimeBase* clock_; + // If we are running (have started) or not. + bool running_; + CriticalSectionWrapper* crit_sect_; + bool master_; + // Event to signal when we have a frame ready for decoder. + VCMEvent frame_event_; + // Event to signal when we have received a packet. + VCMEvent packet_event_; + // Number of allocated frames. + int max_number_of_frames_; + // Array of pointers to the frames in jitter buffer. + VCMFrameBuffer* frame_buffers_[kMaxNumberOfFrames]; + FrameList frame_list_; + VCMDecodingState last_decoded_state_; + bool first_packet_; + + // Statistics. + int num_not_decodable_packets_; + // Frame counter for each type (key, delta, golden, key-delta). + unsigned int receive_statistics_[4]; + // Latest calculated frame rates of incoming stream. + unsigned int incoming_frame_rate_; + unsigned int incoming_frame_count_; + int64_t time_last_incoming_frame_count_; + unsigned int incoming_bit_count_; + unsigned int incoming_bit_rate_; + unsigned int drop_count_; // Frame drop counter. + // Number of frames in a row that have been too old. + int num_consecutive_old_frames_; + // Number of packets in a row that have been too old. + int num_consecutive_old_packets_; + // Number of packets discarded by the jitter buffer. + int num_discarded_packets_; + + // Jitter estimation. + // Filter for estimating jitter. + VCMJitterEstimator jitter_estimate_; + // Calculates network delays used for jitter calculations. + VCMInterFrameDelay inter_frame_delay_; + VCMJitterSample waiting_for_completion_; + WebRtc_UWord32 rtt_ms_; + + // NACK and retransmissions. + VCMNackMode nack_mode_; + int low_rtt_nack_threshold_ms_; + int high_rtt_nack_threshold_ms_; + // Holds the internal NACK list (the missing sequence numbers). + int32_t nack_seq_nums_internal_[kNackHistoryLength]; + uint16_t nack_seq_nums_[kNackHistoryLength]; + unsigned int nack_seq_nums_length_; + bool waiting_for_key_frame_; + + DISALLOW_COPY_AND_ASSIGN(VCMJitterBuffer); }; +} // namespace webrtc -} // namespace webrtc - -#endif // WEBRTC_MODULES_VIDEO_CODING_JITTER_BUFFER_H_ +#endif // WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_JITTER_BUFFER_H_ diff --git a/src/modules/video_coding/main/source/jitter_buffer_unittest.cc b/src/modules/video_coding/main/source/jitter_buffer_unittest.cc index 6e48ea7b02..88ca597cef 100644 --- a/src/modules/video_coding/main/source/jitter_buffer_unittest.cc +++ b/src/modules/video_coding/main/source/jitter_buffer_unittest.cc @@ -287,7 +287,8 @@ TEST_F(TestJitterBufferNack, TestNackListFull) { uint16_t nack_list_length = kNackHistoryLength; bool extended; - uint16_t* nack_list = jitter_buffer_->GetNackList(nack_list_length, extended); + uint16_t* nack_list = jitter_buffer_->CreateNackList(&nack_list_length, + &extended); // Verify that the jitter buffer requests a key frame. EXPECT_TRUE(nack_list_length == 0xffff && nack_list == NULL); @@ -302,14 +303,14 @@ TEST_F(TestJitterBufferNack, TestNackBeforeDecode) { InsertFrame(kVideoFrameDelta); uint16_t nack_list_size = 0; bool extended = false; - uint16_t* list = jitter_buffer_->GetNackList(nack_list_size, extended); + uint16_t* list = jitter_buffer_->CreateNackList(&nack_list_size, &extended); // No list generated, and a key frame request is signaled. EXPECT_TRUE(list == NULL); EXPECT_EQ(0xFFFF, nack_list_size); } TEST_F(TestJitterBufferNack, TestNormalOperation) { - EXPECT_EQ(kNackInfinite, jitter_buffer_->GetNackMode()); + EXPECT_EQ(kNackInfinite, jitter_buffer_->nack_mode()); InsertFrame(kVideoFrameKey); EXPECT_TRUE(DecodeFrame()); @@ -335,7 +336,7 @@ TEST_F(TestJitterBufferNack, TestNormalOperation) { EXPECT_FALSE(DecodeFrame()); uint16_t nack_list_size = 0; bool extended = false; - uint16_t* list = jitter_buffer_->GetNackList(nack_list_size, extended); + uint16_t* list = jitter_buffer_->CreateNackList(&nack_list_size, &extended); // Verify the NACK list. const int kExpectedNackSize = 9; ASSERT_EQ(kExpectedNackSize, nack_list_size); @@ -365,7 +366,7 @@ TEST_F(TestJitterBufferNack, TestNormalOperationWrap) { EXPECT_FALSE(DecodeCompleteFrame()); uint16_t nack_list_size = 0; bool extended = false; - uint16_t* list = jitter_buffer_->GetNackList(nack_list_size, extended); + uint16_t* list = jitter_buffer_->CreateNackList(&nack_list_size, &extended); // Verify the NACK list. const int kExpectedNackSize = 10; ASSERT_EQ(kExpectedNackSize, nack_list_size); diff --git a/src/modules/video_coding/main/source/receiver.cc b/src/modules/video_coding/main/source/receiver.cc index 6be53361c2..f9b81dc34f 100644 --- a/src/modules/video_coding/main/source/receiver.cc +++ b/src/modules/video_coding/main/source/receiver.cc @@ -200,9 +200,9 @@ VCMEncodedFrame* VCMReceiver::FrameForDecoding(WebRtc_UWord16 maxWaitTimeMs, FrameType incomingFrameType = kVideoFrameDelta; nextRenderTimeMs = -1; const WebRtc_Word64 startTimeMs = _clock->MillisecondTimestamp(); - WebRtc_Word64 ret = _jitterBuffer.GetNextTimeStamp(maxWaitTimeMs, - incomingFrameType, - nextRenderTimeMs); + WebRtc_Word64 ret = _jitterBuffer.NextTimestamp(maxWaitTimeMs, + &incomingFrameType, + &nextRenderTimeMs); if (ret < 0) { // No timestamp in jitter buffer at the moment @@ -211,7 +211,7 @@ VCMEncodedFrame* VCMReceiver::FrameForDecoding(WebRtc_UWord16 maxWaitTimeMs, const WebRtc_UWord32 timeStamp = static_cast(ret); // Update the timing - _timing.SetRequiredDelay(_jitterBuffer.GetEstimatedJitterMS()); + _timing.SetRequiredDelay(_jitterBuffer.EstimatedJitterMs()); _timing.UpdateCurrentDelay(timeStamp); const WebRtc_Word32 tempWaitTime = maxWaitTimeMs - @@ -233,7 +233,7 @@ VCMEncodedFrame* VCMReceiver::FrameForDecoding(WebRtc_UWord16 maxWaitTimeMs, { bool retransmitted = false; const WebRtc_Word64 lastPacketTimeMs = - _jitterBuffer.LastPacketTime(frame, retransmitted); + _jitterBuffer.LastPacketTime(frame, &retransmitted); if (lastPacketTimeMs >= 0 && !retransmitted) { // We don't want to include timestamps which have suffered from retransmission @@ -367,20 +367,21 @@ VCMReceiver::ReleaseFrame(VCMEncodedFrame* frame) WebRtc_Word32 VCMReceiver::ReceiveStatistics(WebRtc_UWord32& bitRate, WebRtc_UWord32& frameRate) { - const WebRtc_Word32 ret = _jitterBuffer.GetUpdate(frameRate, bitRate); + _jitterBuffer.IncomingRateStatistics(&frameRate, &bitRate); bitRate /= 1000; // Should be in kbps - return ret; + return 0; } WebRtc_Word32 VCMReceiver::ReceivedFrameCount(VCMFrameCount& frameCount) const { - return _jitterBuffer.GetFrameStatistics(frameCount.numDeltaFrames, - frameCount.numKeyFrames); + _jitterBuffer.FrameStatistics(&frameCount.numDeltaFrames, + &frameCount.numKeyFrames); + return 0; } WebRtc_UWord32 VCMReceiver::DiscardedPackets() const { - return _jitterBuffer.DiscardedPackets(); + return _jitterBuffer.num_discarded_packets(); } void @@ -399,7 +400,7 @@ VCMNackMode VCMReceiver::NackMode() const { CriticalSectionScoped cs(_critSect); - return _jitterBuffer.GetNackMode(); + return _jitterBuffer.nack_mode(); } VCMNackStatus @@ -407,7 +408,8 @@ VCMReceiver::NackList(WebRtc_UWord16* nackList, WebRtc_UWord16& size) { bool extended = false; WebRtc_UWord16 nackListSize = 0; - WebRtc_UWord16* internalNackList = _jitterBuffer.GetNackList(nackListSize, extended); + WebRtc_UWord16* internalNackList = _jitterBuffer.CreateNackList( + &nackListSize, &extended); if (internalNackList == NULL && nackListSize == 0xffff) { // This combination is used to trigger key frame requests. @@ -468,7 +470,7 @@ VCMReceiver::UpdateState(VCMReceiverState newState) void VCMReceiver::UpdateState(VCMEncodedFrame& frame) { - if (_jitterBuffer.GetNackMode() == kNoNack) + if (_jitterBuffer.nack_mode() == kNoNack) { // Dual decoder mode has not been enabled. return; diff --git a/src/modules/video_coding/main/test/jitter_buffer_test.cc b/src/modules/video_coding/main/test/jitter_buffer_test.cc index 2066983635..99d8ac554b 100644 --- a/src/modules/video_coding/main/test/jitter_buffer_test.cc +++ b/src/modules/video_coding/main/test/jitter_buffer_test.cc @@ -135,13 +135,9 @@ int JitterBufferTest(CmdArgs& args) } } - // Test out of range inputs - TEST(kSizeError == jb.InsertPacket(0, packet)); - jb.ReleaseFrame(0); - // Not started TEST(0 == jb.GetFrame(packet)); - TEST(-1 == jb.GetNextTimeStamp(10, incomingFrameType, renderTimeMs)); + TEST(-1 == jb.NextTimestamp(10, &incomingFrameType, &renderTimeMs)); TEST(0 == jb.GetCompleteFrameForDecoding(10)); TEST(0 == jb.GetFrameForDecoding()); @@ -179,7 +175,7 @@ int JitterBufferTest(CmdArgs& args) TEST(kFirstPacket == jb.InsertPacket(frameIn, packet)); // get packet notification - TEST(timeStamp == jb.GetNextTimeStamp(10, incomingFrameType, renderTimeMs)); + TEST(timeStamp == jb.NextTimestamp(10, &incomingFrameType, &renderTimeMs)); // check incoming frame type TEST(incomingFrameType == kVideoFrameDelta); @@ -220,7 +216,7 @@ int JitterBufferTest(CmdArgs& args) TEST(kFirstPacket == jb.InsertPacket(frameIn, packet)); // get packet notification - TEST(timeStamp == jb.GetNextTimeStamp(10, incomingFrameType, renderTimeMs)); + TEST(timeStamp == jb.NextTimestamp(10, &incomingFrameType, &renderTimeMs)); // check incoming frame type TEST(incomingFrameType == kVideoFrameDelta); @@ -279,7 +275,7 @@ int JitterBufferTest(CmdArgs& args) TEST(kFirstPacket == jb.InsertPacket(frameIn, packet)); // get packet notification - TEST(timeStamp == jb.GetNextTimeStamp(10, incomingFrameType, renderTimeMs)); + TEST(timeStamp == jb.NextTimestamp(10, &incomingFrameType, &renderTimeMs)); // check incoming frame type TEST(incomingFrameType == kVideoFrameKey); @@ -355,7 +351,7 @@ int JitterBufferTest(CmdArgs& args) TEST(kFirstPacket == jb.InsertPacket(frameIn, packet)); // get packet notification - TEST(timeStamp == jb.GetNextTimeStamp(10, incomingFrameType, renderTimeMs)); + TEST(timeStamp == jb.NextTimestamp(10, &incomingFrameType, &renderTimeMs)); // check incoming frame type TEST(incomingFrameType == kVideoFrameDelta); @@ -432,7 +428,7 @@ int JitterBufferTest(CmdArgs& args) TEST(kFirstPacket == jb.InsertPacket(frameIn, packet)); // get packet notification - TEST(timeStamp == jb.GetNextTimeStamp(10, incomingFrameType, renderTimeMs)); + TEST(timeStamp == jb.NextTimestamp(10, &incomingFrameType, &renderTimeMs)); // check incoming frame type TEST(incomingFrameType == kVideoFrameDelta); @@ -509,7 +505,7 @@ int JitterBufferTest(CmdArgs& args) TEST(kFirstPacket == jb.InsertPacket(frameIn, packet)); // get packet notification - TEST(timeStamp == jb.GetNextTimeStamp(10, incomingFrameType, renderTimeMs)); + TEST(timeStamp == jb.NextTimestamp(10, &incomingFrameType, &renderTimeMs)); // check incoming frame type TEST(incomingFrameType == kVideoFrameDelta); @@ -550,7 +546,7 @@ int JitterBufferTest(CmdArgs& args) TEST(kFirstPacket == jb.InsertPacket(frameIn, packet)); // get packet notification - TEST(timeStamp == jb.GetNextTimeStamp(10, incomingFrameType, renderTimeMs)); + TEST(timeStamp == jb.NextTimestamp(10, &incomingFrameType, &renderTimeMs)); // check incoming frame type TEST(incomingFrameType == kVideoFrameDelta); @@ -624,7 +620,7 @@ int JitterBufferTest(CmdArgs& args) TEST(kFirstPacket == jb.InsertPacket(frameIn, packet)); // get packet notification - TEST(timeStamp == jb.GetNextTimeStamp(10, incomingFrameType, renderTimeMs)); + TEST(timeStamp == jb.NextTimestamp(10, &incomingFrameType, &renderTimeMs)); // check incoming frame type TEST(incomingFrameType == kVideoFrameDelta); @@ -686,7 +682,7 @@ int JitterBufferTest(CmdArgs& args) TEST(kFirstPacket == jb.InsertPacket(frameIn, packet)); // get packet notification - TEST(timeStamp == jb.GetNextTimeStamp(10, incomingFrameType, renderTimeMs)); + TEST(timeStamp == jb.NextTimestamp(10, &incomingFrameType, &renderTimeMs)); // check incoming frame type TEST(incomingFrameType == kVideoFrameDelta); @@ -728,14 +724,14 @@ int JitterBufferTest(CmdArgs& args) // WebRtc_UWord32 numDeltaFrames = 0; WebRtc_UWord32 numKeyFrames = 0; - TEST(jb.GetFrameStatistics(numDeltaFrames, numKeyFrames) == 0); + jb.FrameStatistics(&numDeltaFrames, &numKeyFrames); TEST(numDeltaFrames == 8); TEST(numKeyFrames == 1); WebRtc_UWord32 frameRate; WebRtc_UWord32 bitRate; - TEST(jb.GetUpdate(frameRate, bitRate) == 0); + jb.IncomingRateStatistics(&frameRate, &bitRate); // these depend on CPU speed works on a T61 TEST(frameRate > 30); @@ -786,8 +782,8 @@ int JitterBufferTest(CmdArgs& args) TEST(kFirstPacket == jb.InsertPacket(frameIn, packet)); // Get packet notification - TEST(timeStamp - 33 * 90 == jb.GetNextTimeStamp(10, incomingFrameType, - renderTimeMs)); + TEST(timeStamp - 33 * 90 == jb.NextTimestamp(10, &incomingFrameType, + &renderTimeMs)); // Check incoming frame type if (i == 0) @@ -858,7 +854,7 @@ int JitterBufferTest(CmdArgs& args) jb.ReleaseFrame(frameOut); } - TEST(jb.NumNotDecodablePackets() == 10); + TEST(jb.num_not_decodable_packets() == 10); // Insert 3 old packets and verify that we have 3 discarded packets // Match value to actual latest timestamp decoded @@ -875,12 +871,12 @@ int JitterBufferTest(CmdArgs& args) frameIn = jb.GetFrame(packet); TEST(frameIn == NULL); - TEST(jb.DiscardedPackets() == 3); + TEST(jb.num_discarded_packets() == 3); jb.Flush(); // This statistic shouldn't be reset by a flush. - TEST(jb.DiscardedPackets() == 3); + TEST(jb.num_discarded_packets() == 3); //printf("DONE Statistics\n"); @@ -916,7 +912,7 @@ int JitterBufferTest(CmdArgs& args) TEST(kFirstPacket == jb.InsertPacket(frameIn, packet)); // get packet notification - TEST(timeStamp == jb.GetNextTimeStamp(10, incomingFrameType, renderTimeMs)); + TEST(timeStamp == jb.NextTimestamp(10, &incomingFrameType, &renderTimeMs)); // check incoming frame type TEST(incomingFrameType == kVideoFrameDelta); @@ -943,7 +939,8 @@ int JitterBufferTest(CmdArgs& args) TEST(kIncomplete == jb.InsertPacket(frameIn, packet)); // get packet notification - TEST(timeStamp == jb.GetNextTimeStamp(2, incomingFrameType, renderTimeMs)); + TEST(timeStamp == jb.NextTimestamp(2, &incomingFrameType, + &renderTimeMs)); // check incoming frame type TEST(incomingFrameType == kVideoFrameDelta); @@ -1009,7 +1006,7 @@ int JitterBufferTest(CmdArgs& args) TEST(kFirstPacket == jb.InsertPacket(frameIn, packet)); // get packet notification - TEST(timeStamp == jb.GetNextTimeStamp(10, incomingFrameType, renderTimeMs)); + TEST(timeStamp == jb.NextTimestamp(10, &incomingFrameType, &renderTimeMs)); // check incoming frame type TEST(incomingFrameType == kVideoFrameDelta); @@ -1036,7 +1033,8 @@ int JitterBufferTest(CmdArgs& args) TEST(kIncomplete == jb.InsertPacket(frameIn, packet)); // get packet notification - TEST(timeStamp == jb.GetNextTimeStamp(2, incomingFrameType, renderTimeMs)); + TEST(timeStamp == jb.NextTimestamp(2, &incomingFrameType, + &renderTimeMs)); // check incoming frame type TEST(incomingFrameType == kVideoFrameDelta); @@ -1101,7 +1099,7 @@ int JitterBufferTest(CmdArgs& args) TEST(kFirstPacket == jb.InsertPacket(frameIn, packet)); // get packet notification - TEST(timeStamp == jb.GetNextTimeStamp(10, incomingFrameType, renderTimeMs)); + TEST(timeStamp == jb.NextTimestamp(10, &incomingFrameType, &renderTimeMs)); // check incoming frame type TEST(incomingFrameType == kVideoFrameDelta); @@ -1125,7 +1123,7 @@ int JitterBufferTest(CmdArgs& args) TEST(kIncomplete == jb.InsertPacket(frameIn, packet)); // get packet notification - TEST(timeStamp == jb.GetNextTimeStamp(10, incomingFrameType, renderTimeMs)); + TEST(timeStamp == jb.NextTimestamp(10, &incomingFrameType, &renderTimeMs)); // check incoming frame type TEST(incomingFrameType == kVideoFrameDelta); @@ -1186,7 +1184,7 @@ int JitterBufferTest(CmdArgs& args) TEST(kFirstPacket == jb.InsertPacket(frameIn, packet)); // get packet notification - TEST(3000 == jb.GetNextTimeStamp(10, incomingFrameType, renderTimeMs)); + TEST(3000 == jb.NextTimestamp(10, &incomingFrameType, &renderTimeMs)); TEST(kVideoFrameDelta == incomingFrameType); // Get the frame @@ -1240,7 +1238,7 @@ int JitterBufferTest(CmdArgs& args) TEST(kFirstPacket == jb.InsertPacket(frameIn, packet)); // get packet notification - TEST(timeStamp == jb.GetNextTimeStamp(10, incomingFrameType, renderTimeMs)); + TEST(timeStamp == jb.NextTimestamp(10, &incomingFrameType, &renderTimeMs)); TEST(kVideoFrameDelta == incomingFrameType); // Get the frame @@ -1291,7 +1289,7 @@ int JitterBufferTest(CmdArgs& args) TEST(kFirstPacket == jb.InsertPacket(frameIn, packet)); // get packet notification - TEST(timeStamp == jb.GetNextTimeStamp(10, incomingFrameType, renderTimeMs)); + TEST(timeStamp == jb.NextTimestamp(10, &incomingFrameType, &renderTimeMs)); // check incoming frame type TEST(incomingFrameType == kVideoFrameDelta); @@ -1334,7 +1332,7 @@ int JitterBufferTest(CmdArgs& args) TEST(kFirstPacket == jb.InsertPacket(frameIn, packet)); // get packet notification - TEST(timeStamp == jb.GetNextTimeStamp(10, incomingFrameType, renderTimeMs)); + TEST(timeStamp == jb.NextTimestamp(10, &incomingFrameType, &renderTimeMs)); // check incoming frame type TEST(incomingFrameType == kVideoFrameDelta); @@ -1394,7 +1392,7 @@ int JitterBufferTest(CmdArgs& args) TEST(kFirstPacket == jb.InsertPacket(frameIn, packet)); // Get packet notification - TEST(0xffffff00 == jb.GetNextTimeStamp(10, incomingFrameType, renderTimeMs)); + TEST(0xffffff00 == jb.NextTimestamp(10, &incomingFrameType, &renderTimeMs)); TEST(kVideoFrameDelta == incomingFrameType); // Insert next frame @@ -1413,7 +1411,7 @@ int JitterBufferTest(CmdArgs& args) TEST(kFirstPacket == jb.InsertPacket(frameIn, packet)); // Get packet notification - TEST(0xffffff00 == jb.GetNextTimeStamp(10, incomingFrameType, renderTimeMs)); + TEST(0xffffff00 == jb.NextTimestamp(10, &incomingFrameType, &renderTimeMs)); TEST(kVideoFrameDelta == incomingFrameType); // Get frame @@ -1426,7 +1424,7 @@ int JitterBufferTest(CmdArgs& args) TEST(frameOut->FrameType() == kVideoFrameDelta); // Get packet notification - TEST(2700 == jb.GetNextTimeStamp(0, incomingFrameType, renderTimeMs)); + TEST(2700 == jb.NextTimestamp(0, &incomingFrameType, &renderTimeMs)); TEST(kVideoFrameDelta == incomingFrameType); // Get frame @@ -1469,7 +1467,7 @@ int JitterBufferTest(CmdArgs& args) TEST(kFirstPacket == jb.InsertPacket(frameIn, packet)); // Get packet notification - TEST(2700 == jb.GetNextTimeStamp(10, incomingFrameType, renderTimeMs)); + TEST(2700 == jb.NextTimestamp(10, &incomingFrameType, &renderTimeMs)); TEST(kVideoFrameDelta == incomingFrameType); // Insert second frame @@ -1488,7 +1486,7 @@ int JitterBufferTest(CmdArgs& args) TEST(kFirstPacket == jb.InsertPacket(frameIn, packet)); // Get packet notification - TEST(0xffffff00 == jb.GetNextTimeStamp(10, incomingFrameType, renderTimeMs)); + TEST(0xffffff00 == jb.NextTimestamp(10, &incomingFrameType, &renderTimeMs)); TEST(kVideoFrameDelta == incomingFrameType); // Get frame @@ -1501,7 +1499,7 @@ int JitterBufferTest(CmdArgs& args) TEST(frameOut->FrameType() == kVideoFrameDelta); // get packet notification - TEST(2700 == jb.GetNextTimeStamp(0, incomingFrameType, renderTimeMs)); + TEST(2700 == jb.NextTimestamp(0, &incomingFrameType, &renderTimeMs)); TEST(kVideoFrameDelta == incomingFrameType); // Get frame @@ -1551,7 +1549,8 @@ int JitterBufferTest(CmdArgs& args) } // get packet notification - TEST(packet.timestamp == jb.GetNextTimeStamp(10, incomingFrameType, renderTimeMs)); + TEST(packet.timestamp == jb.NextTimestamp(10, &incomingFrameType, + &renderTimeMs)); // check incoming frame type TEST(incomingFrameType == kVideoFrameDelta); @@ -1622,8 +1621,8 @@ int JitterBufferTest(CmdArgs& args) TEST(kFirstPacket == jb.InsertPacket(frameIn, packet)); // Get packet notification, should be first inserted frame - TEST(timeStampStart == jb.GetNextTimeStamp(10, incomingFrameType, - renderTimeMs)); + TEST(timeStampStart == jb.NextTimestamp(10, &incomingFrameType, + &renderTimeMs)); // check incoming frame type TEST(incomingFrameType == kVideoFrameDelta); @@ -1650,8 +1649,8 @@ int JitterBufferTest(CmdArgs& args) TEST(kFirstPacket == jb.InsertPacket(frameIn, packet)); // First inserted key frame should be oldest in buffer - TEST(timeStampFirstKey == jb.GetNextTimeStamp(10, incomingFrameType, - renderTimeMs)); + TEST(timeStampFirstKey == jb.NextTimestamp(10, &incomingFrameType, + &renderTimeMs)); // check incoming frame type TEST(incomingFrameType == kVideoFrameKey); @@ -1764,7 +1763,8 @@ int JitterBufferTest(CmdArgs& args) TEST(kFirstPacket == jb.InsertPacket(frameIn, packet)); // Get packet notification - TEST(timeStamp == jb.GetNextTimeStamp(10, incomingFrameType, renderTimeMs)); + TEST(timeStamp == jb.NextTimestamp(10, &incomingFrameType, + &renderTimeMs)); frameOut = jb.GetFrameForDecoding(); // We can decode everything from a NALU until a packet has been lost.