diff --git a/webrtc/modules/video_coding/main/source/decoding_state_unittest.cc b/webrtc/modules/video_coding/main/source/decoding_state_unittest.cc index 5f01edbc36..d1c5569cef 100644 --- a/webrtc/modules/video_coding/main/source/decoding_state_unittest.cc +++ b/webrtc/modules/video_coding/main/source/decoding_state_unittest.cc @@ -32,8 +32,6 @@ TEST(TestDecodingState, FrameContinuity) { // Check that makes decision based on correct method. VCMFrameBuffer frame; VCMFrameBuffer frame_key; - frame.SetState(kStateEmpty); - frame_key.SetState(kStateEmpty); VCMPacket* packet = new VCMPacket(); packet->isFirstPacket = 1; packet->timestamp = 1; @@ -56,7 +54,6 @@ TEST(TestDecodingState, FrameContinuity) { frame.InsertPacket(*packet, 0, false, 0); EXPECT_FALSE(dec_state.ContinuousFrame(&frame)); frame.Reset(); - frame.SetState(kStateEmpty); packet->codecSpecificHeader.codecHeader.VP8.pictureId = 0; packet->seqNum = 10; frame.InsertPacket(*packet, 0, false, 0); @@ -65,12 +62,10 @@ TEST(TestDecodingState, FrameContinuity) { // Use sequence numbers. packet->codecSpecificHeader.codecHeader.VP8.pictureId = kNoPictureId; frame.Reset(); - frame.SetState(kStateEmpty); packet->seqNum = dec_state.sequence_num() - 1u; frame.InsertPacket(*packet, 0, false, 0); EXPECT_FALSE(dec_state.ContinuousFrame(&frame)); frame.Reset(); - frame.SetState(kStateEmpty); packet->seqNum = dec_state.sequence_num() + 1u; frame.InsertPacket(*packet, 0, false, 0); // Insert another packet to this frame @@ -84,7 +79,6 @@ TEST(TestDecodingState, FrameContinuity) { // Insert packet with temporal info. dec_state.Reset(); frame.Reset(); - frame.SetState(kStateEmpty); packet->codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 0; packet->codecSpecificHeader.codecHeader.VP8.temporalIdx = 0; packet->codecSpecificHeader.codecHeader.VP8.pictureId = 0; @@ -95,7 +89,6 @@ TEST(TestDecodingState, FrameContinuity) { dec_state.SetState(&frame); EXPECT_TRUE(dec_state.full_sync()); frame.Reset(); - frame.SetState(kStateEmpty); // 1 layer up - still good. packet->codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 0; packet->codecSpecificHeader.codecHeader.VP8.temporalIdx = 1; @@ -107,7 +100,6 @@ TEST(TestDecodingState, FrameContinuity) { dec_state.SetState(&frame); EXPECT_TRUE(dec_state.full_sync()); frame.Reset(); - frame.SetState(kStateEmpty); // Lost non-base layer packet => should update sync parameter. packet->codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 0; packet->codecSpecificHeader.codecHeader.VP8.temporalIdx = 3; @@ -118,7 +110,6 @@ TEST(TestDecodingState, FrameContinuity) { EXPECT_FALSE(dec_state.ContinuousFrame(&frame)); // Now insert the next non-base layer (belonging to a next tl0PicId). frame.Reset(); - frame.SetState(kStateEmpty); packet->codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 1; packet->codecSpecificHeader.codecHeader.VP8.temporalIdx = 2; packet->codecSpecificHeader.codecHeader.VP8.pictureId = 4; @@ -131,7 +122,6 @@ TEST(TestDecodingState, FrameContinuity) { EXPECT_TRUE(dec_state.full_sync()); // Next base layer (dropped interim non-base layers) - should update sync. frame.Reset(); - frame.SetState(kStateEmpty); packet->codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 1; packet->codecSpecificHeader.codecHeader.VP8.temporalIdx = 0; packet->codecSpecificHeader.codecHeader.VP8.pictureId = 5; @@ -144,7 +134,6 @@ TEST(TestDecodingState, FrameContinuity) { // Check wrap for temporal layers. frame.Reset(); - frame.SetState(kStateEmpty); packet->codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 0x00FF; packet->codecSpecificHeader.codecHeader.VP8.temporalIdx = 0; packet->codecSpecificHeader.codecHeader.VP8.pictureId = 6; @@ -154,7 +143,6 @@ TEST(TestDecodingState, FrameContinuity) { dec_state.SetState(&frame); EXPECT_FALSE(dec_state.full_sync()); frame.Reset(); - frame.SetState(kStateEmpty); packet->codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 0x0000; packet->codecSpecificHeader.codecHeader.VP8.temporalIdx = 0; packet->codecSpecificHeader.codecHeader.VP8.pictureId = 7; @@ -173,7 +161,6 @@ TEST(TestDecodingState, UpdateOldPacket) { // Update only if zero size and newer than previous. // Should only update if the timeStamp match. VCMFrameBuffer frame; - frame.SetState(kStateEmpty); VCMPacket* packet = new VCMPacket(); packet->timestamp = 1; packet->seqNum = 1; @@ -222,7 +209,6 @@ TEST(TestDecodingState, MultiLayerBehavior) { VCMPacket* packet = new VCMPacket(); packet->frameType = kVideoFrameDelta; packet->codecSpecificHeader.codec = kRTPVideoVP8; - frame.SetState(kStateEmpty); packet->timestamp = 0; packet->seqNum = 0; packet->codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 0; @@ -232,7 +218,6 @@ TEST(TestDecodingState, MultiLayerBehavior) { dec_state.SetState(&frame); // tl0PicIdx 0, temporal id 1. frame.Reset(); - frame.SetState(kStateEmpty); packet->timestamp = 1; packet->seqNum = 1; packet->codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 0; @@ -245,7 +230,6 @@ TEST(TestDecodingState, MultiLayerBehavior) { // Lost tl0PicIdx 0, temporal id 2. // Insert tl0PicIdx 0, temporal id 3. frame.Reset(); - frame.SetState(kStateEmpty); packet->timestamp = 3; packet->seqNum = 3; packet->codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 0; @@ -257,7 +241,6 @@ TEST(TestDecodingState, MultiLayerBehavior) { EXPECT_FALSE(dec_state.full_sync()); // Insert next base layer frame.Reset(); - frame.SetState(kStateEmpty); packet->timestamp = 4; packet->seqNum = 4; packet->codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 1; @@ -270,7 +253,6 @@ TEST(TestDecodingState, MultiLayerBehavior) { // Insert key frame - should update sync value. // A key frame is always a base layer. frame.Reset(); - frame.SetState(kStateEmpty); packet->frameType = kVideoFrameKey; packet->isFirstPacket = 1; packet->timestamp = 5; @@ -285,7 +267,6 @@ TEST(TestDecodingState, MultiLayerBehavior) { // After sync, a continuous PictureId is required // (continuous base layer is not enough ) frame.Reset(); - frame.SetState(kStateEmpty); packet->frameType = kVideoFrameDelta; packet->timestamp = 6; packet->seqNum = 6; @@ -296,7 +277,6 @@ TEST(TestDecodingState, MultiLayerBehavior) { EXPECT_TRUE(dec_state.ContinuousFrame(&frame)); EXPECT_TRUE(dec_state.full_sync()); frame.Reset(); - frame.SetState(kStateEmpty); packet->frameType = kVideoFrameDelta; packet->isFirstPacket = 1; packet->timestamp = 8; @@ -312,7 +292,6 @@ TEST(TestDecodingState, MultiLayerBehavior) { // Insert a non-ref frame - should update sync value. frame.Reset(); - frame.SetState(kStateEmpty); packet->frameType = kVideoFrameDelta; packet->isFirstPacket = 1; packet->timestamp = 9; @@ -333,7 +312,6 @@ TEST(TestDecodingState, MultiLayerBehavior) { // Base layer. frame.Reset(); dec_state.Reset(); - frame.SetState(kStateEmpty); packet->frameType = kVideoFrameDelta; packet->isFirstPacket = 1; packet->markerBit = 1; @@ -348,7 +326,6 @@ TEST(TestDecodingState, MultiLayerBehavior) { EXPECT_TRUE(dec_state.full_sync()); // Layer 2 - 2 packets (insert one, lose one). frame.Reset(); - frame.SetState(kStateEmpty); packet->frameType = kVideoFrameDelta; packet->isFirstPacket = 1; packet->markerBit = 0; @@ -362,7 +339,6 @@ TEST(TestDecodingState, MultiLayerBehavior) { EXPECT_TRUE(dec_state.ContinuousFrame(&frame)); // Layer 1 frame.Reset(); - frame.SetState(kStateEmpty); packet->frameType = kVideoFrameDelta; packet->isFirstPacket = 1; packet->markerBit = 1; @@ -384,7 +360,6 @@ TEST(TestDecodingState, DiscontinuousPicIdContinuousSeqNum) { VCMFrameBuffer frame; VCMPacket packet; frame.Reset(); - frame.SetState(kStateEmpty); packet.frameType = kVideoFrameKey; packet.codecSpecificHeader.codec = kRTPVideoVP8; packet.timestamp = 0; @@ -399,7 +374,6 @@ TEST(TestDecodingState, DiscontinuousPicIdContinuousSeqNum) { // Continuous sequence number but discontinuous picture id. This implies a // a loss and we have to fall back to only decoding the base layer. frame.Reset(); - frame.SetState(kStateEmpty); packet.frameType = kVideoFrameDelta; packet.timestamp += 3000; ++packet.seqNum; @@ -416,7 +390,6 @@ TEST(TestDecodingState, OldInput) { // Identify packets belonging to old frames/packets. // Set state for current frames. VCMFrameBuffer frame; - frame.SetState(kStateEmpty); VCMPacket* packet = new VCMPacket(); packet->timestamp = 10; packet->seqNum = 1; diff --git a/webrtc/modules/video_coding/main/source/frame_buffer.cc b/webrtc/modules/video_coding/main/source/frame_buffer.cc index 5226ecbd52..a88ff977bf 100644 --- a/webrtc/modules/video_coding/main/source/frame_buffer.cc +++ b/webrtc/modules/video_coding/main/source/frame_buffer.cc @@ -18,7 +18,7 @@ namespace webrtc { VCMFrameBuffer::VCMFrameBuffer() : - _state(kStateFree), + _state(kStateEmpty), _frameCounted(false), _nackCount(0), _latestPacketTimeMs(-1) { @@ -46,12 +46,6 @@ VCMFrameBuffer::FrameType() const return _sessionInfo.FrameType(); } -void -VCMFrameBuffer::SetPreviousFrameLoss() -{ - _sessionInfo.SetPreviousFrameLoss(); -} - int32_t VCMFrameBuffer::GetLowSeqNum() const { @@ -95,18 +89,6 @@ VCMFrameBufferEnum VCMFrameBuffer::InsertPacket(const VCMPacket& packet, int64_t timeInMs, bool enableDecodableState, uint32_t rttMS) { - if (_state == kStateDecoding) - { - // Do not insert packet - return kNoError; - } - - // Sanity to check if the frame has been freed. (Too old for example) - if (_state == kStateFree) - { - return kGeneralError; - } - // is this packet part of this frame if (TimeStamp() && (TimeStamp() != packet.timestamp)) { @@ -188,17 +170,11 @@ VCMFrameBuffer::InsertPacket(const VCMPacket& packet, int64_t timeInMs, _latestPacketTimeMs = timeInMs; if (_sessionInfo.complete()) { + SetState(kStateComplete); return kCompleteSession; } else if (_sessionInfo.decodable()) { SetState(kStateDecodable); return kDecodableSession; - } else { - // this layer is not complete - if (_state == kStateComplete) { - // we already have a complete layer - // wait for all independent layers belonging to the same frame - _state = kStateIncomplete; - } } return kIncomplete; } @@ -243,26 +219,10 @@ VCMFrameBuffer::Reset() _payloadType = 0; _nackCount = 0; _latestPacketTimeMs = -1; - _state = kStateFree; + _state = kStateEmpty; VCMEncodedFrame::Reset(); } -// Makes sure the session contains a decodable stream. -void -VCMFrameBuffer::MakeSessionDecodable() -{ - uint32_t retVal; -#ifdef INDEPENDENT_PARTITIONS - if (_codec != kVideoCodecVP8) { - retVal = _sessionInfo.MakeDecodable(); - _length -= retVal; - } -#else - retVal = _sessionInfo.MakeDecodable(); - _length -= retVal; -#endif -} - // Set state of frame void VCMFrameBuffer::SetState(VCMFrameBufferStateEnum state) @@ -273,19 +233,9 @@ VCMFrameBuffer::SetState(VCMFrameBufferStateEnum state) } switch (state) { - case kStateFree: - // Reset everything - // We can go to this state from all other states. - // The one setting the state to free must ensure - // that the frame is removed from the timestamp - // ordered frame list in the jb. - Reset(); - break; - case kStateIncomplete: // we can go to this state from state kStateEmpty - assert(_state == kStateEmpty || - _state == kStateDecoding); + assert(_state == kStateEmpty); // Do nothing, we received a packet break; @@ -298,20 +248,8 @@ VCMFrameBuffer::SetState(VCMFrameBufferStateEnum state) break; case kStateEmpty: - assert(_state == kStateFree); - // Do nothing - break; - - case kStateDecoding: - // A frame might have received empty packets, or media packets might - // have been removed when making the frame decodable. The frame can - // still be set to decodable since it can be used to inform the - // decoder of a frame loss. - assert(_state == kStateComplete || _state == kStateIncomplete || - _state == kStateDecodable || _state == kStateEmpty); - // Transfer frame information to EncodedFrame and create any codec - // specific information - RestructureFrameInformation(); + // Should only be set to empty through Reset(). + assert(false); break; case kStateDecodable: @@ -322,15 +260,6 @@ VCMFrameBuffer::SetState(VCMFrameBufferStateEnum state) _state = state; } -void -VCMFrameBuffer::RestructureFrameInformation() -{ - PrepareForDecode(); - _frameType = ConvertFrameType(_sessionInfo.FrameType()); - _completeFrame = _sessionInfo.complete(); - _missingFrame = _sessionInfo.PreviousFrameLoss(); -} - int32_t VCMFrameBuffer::ExtractFromStorage(const EncodedVideoData& frameFromStorage) { @@ -391,7 +320,7 @@ VCMFrameBuffer::IsRetransmitted() const } void -VCMFrameBuffer::PrepareForDecode() +VCMFrameBuffer::PrepareForDecode(bool continuous) { #ifdef INDEPENDENT_PARTITIONS if (_codec == kVideoCodecVP8) @@ -399,8 +328,19 @@ VCMFrameBuffer::PrepareForDecode() _length = _sessionInfo.BuildVP8FragmentationHeader(_buffer, _length, &_fragmentation); + } else { + int bytes_removed = _sessionInfo.MakeDecodable(); + _length -= bytes_removed; } +#else + int bytes_removed = _sessionInfo.MakeDecodable(); + _length -= bytes_removed; #endif + // Transfer frame information to EncodedFrame and create any codec + // specific information. + _frameType = ConvertFrameType(_sessionInfo.FrameType()); + _completeFrame = _sessionInfo.complete(); + _missingFrame = !continuous; } } diff --git a/webrtc/modules/video_coding/main/source/frame_buffer.h b/webrtc/modules/video_coding/main/source/frame_buffer.h index 81400a277e..245302d5c2 100644 --- a/webrtc/modules/video_coding/main/source/frame_buffer.h +++ b/webrtc/modules/video_coding/main/source/frame_buffer.h @@ -40,7 +40,7 @@ public: VCMFrameBufferStateEnum GetState() const; // Get current state and timestamp of frame VCMFrameBufferStateEnum GetState(uint32_t& timeStamp) const; - void SetState(VCMFrameBufferStateEnum state); // Set state of frame + void PrepareForDecode(bool continuous); bool IsRetransmitted() const; bool IsSessionComplete() const; @@ -83,11 +83,9 @@ public: // them. int NotDecodablePackets() const; -protected: - void RestructureFrameInformation(); - void PrepareForDecode(); - private: + void SetState(VCMFrameBufferStateEnum state); // Set state of frame + VCMFrameBufferStateEnum _state; // Current state of the frame bool _frameCounted; // Was this frame counted by JB? VCMSessionInfo _sessionInfo; diff --git a/webrtc/modules/video_coding/main/source/jitter_buffer.cc b/webrtc/modules/video_coding/main/source/jitter_buffer.cc index 6dcdf4fd8e..21eb489cb0 100644 --- a/webrtc/modules/video_coding/main/source/jitter_buffer.cc +++ b/webrtc/modules/video_coding/main/source/jitter_buffer.cc @@ -69,7 +69,8 @@ VCMFrameBuffer* FrameList::Back() const { return rbegin()->second; } -int FrameList::RecycleFramesUntilKeyFrame(FrameList::iterator* key_frame_it) { +int FrameList::RecycleFramesUntilKeyFrame(FrameList::iterator* key_frame_it, + UnorderedFrameList* free_frames) { int drop_count = 0; FrameList::iterator it = begin(); while (!empty()) { @@ -78,9 +79,8 @@ int FrameList::RecycleFramesUntilKeyFrame(FrameList::iterator* key_frame_it) { "Recycling: type=%s, low seqnum=%u", it->second->FrameType() == kVideoFrameKey ? "key" : "delta", it->second->GetLowSeqNum()); - if (it->second->GetState() != kStateDecoding) { - it->second->SetState(kStateFree); - } + it->second->Reset(); + free_frames->push_back(it->second); erase(it++); ++drop_count; if (it != end() && it->second->FrameType() == kVideoFrameKey) { @@ -92,7 +92,8 @@ int FrameList::RecycleFramesUntilKeyFrame(FrameList::iterator* key_frame_it) { return drop_count; } -int FrameList::CleanUpOldOrEmptyFrames(VCMDecodingState* decoding_state) { +int FrameList::CleanUpOldOrEmptyFrames(VCMDecodingState* decoding_state, + UnorderedFrameList* free_frames) { int drop_count = 0; while (!empty()) { VCMFrameBuffer* oldest_frame = Front(); @@ -107,9 +108,7 @@ int FrameList::CleanUpOldOrEmptyFrames(VCMDecodingState* decoding_state) { if (!remove_frame) { break; } - if (oldest_frame->GetState() != kStateDecoding) { - oldest_frame->SetState(kStateFree); - } + free_frames->push_back(oldest_frame); ++drop_count; TRACE_EVENT_INSTANT1("webrtc", "JB::OldOrEmptyFrameDropped", "timestamp", oldest_frame->TimeStamp()); @@ -122,6 +121,14 @@ int FrameList::CleanUpOldOrEmptyFrames(VCMDecodingState* decoding_state) { return drop_count; } +void FrameList::Reset(UnorderedFrameList* free_frames) { + while (!empty()) { + begin()->second->Reset(); + free_frames->push_back(begin()->second); + erase(begin()); + } +} + VCMJitterBuffer::VCMJitterBuffer(Clock* clock, EventFactory* event_factory, int vcm_id, @@ -137,6 +144,7 @@ VCMJitterBuffer::VCMJitterBuffer(Clock* clock, packet_event_(event_factory->CreateEvent()), max_number_of_frames_(kStartNumberOfFrames), frame_buffers_(), + free_frames_(), decodable_frames_(), incomplete_frames_(), last_decoded_state_(), @@ -169,6 +177,7 @@ VCMJitterBuffer::VCMJitterBuffer(Clock* clock, for (int i = 0; i < kStartNumberOfFrames; i++) { frame_buffers_[i] = new VCMFrameBuffer(); + free_frames_.push_back(frame_buffers_[i]); } } @@ -222,26 +231,32 @@ void VCMJitterBuffer::CopyFrom(const VCMJitterBuffer& rhs) { frame_buffers_[i] = NULL; } } + free_frames_.clear(); decodable_frames_.clear(); - int i = 0; - for (FrameList::const_iterator it = rhs.decodable_frames_.begin(); - it != rhs.decodable_frames_.end(); ++it, ++i) { - frame_buffers_[i] = new VCMFrameBuffer(*it->second); - decodable_frames_.insert(decodable_frames_.rbegin().base(), - FrameListPair(frame_buffers_[i]->TimeStamp(), frame_buffers_[i])); - } incomplete_frames_.clear(); - for (FrameList::const_iterator it = rhs.incomplete_frames_.begin(); - it != rhs.incomplete_frames_.end(); ++it, ++i) { - frame_buffers_[i] = new VCMFrameBuffer(*it->second); - incomplete_frames_.insert(incomplete_frames_.rbegin().base(), - FrameListPair(frame_buffers_[i]->TimeStamp(), frame_buffers_[i])); + int i = 0; + for (UnorderedFrameList::const_iterator it = rhs.free_frames_.begin(); + it != rhs.free_frames_.end(); ++it, ++i) { + frame_buffers_[i] = new VCMFrameBuffer; + free_frames_.push_back(frame_buffers_[i]); } + CopyFrames(&decodable_frames_, rhs.decodable_frames_, &i); + CopyFrames(&incomplete_frames_, rhs.incomplete_frames_, &i); rhs.crit_sect_->Leave(); crit_sect_->Leave(); } } +void VCMJitterBuffer::CopyFrames(FrameList* to_list, + const FrameList& from_list, int* index) { + to_list->clear(); + for (FrameList::const_iterator it = from_list.begin(); + it != from_list.end(); ++it, ++*index) { + frame_buffers_[*index] = new VCMFrameBuffer(*it->second); + to_list->InsertFrame(frame_buffers_[*index]); + } +} + void VCMJitterBuffer::Start() { CriticalSectionScoped cs(crit_sect_); running_ = true; @@ -276,15 +291,17 @@ void VCMJitterBuffer::Stop() { crit_sect_->Enter(); running_ = false; last_decoded_state_.Reset(); + free_frames_.clear(); decodable_frames_.clear(); incomplete_frames_.clear(); - TRACE_EVENT_INSTANT1("webrtc", "JB::FrameListEmptied", "type", "Stop"); + // Make sure all frames are reset and free. for (int i = 0; i < kMaxNumberOfFrames; i++) { if (frame_buffers_[i] != NULL) { - static_cast(frame_buffers_[i])->SetState(kStateFree); + static_cast(frame_buffers_[i])->Reset(); + free_frames_.push_back(frame_buffers_[i]); } } - + TRACE_EVENT_INSTANT1("webrtc", "JB::FrameListEmptied", "type", "Stop"); crit_sect_->Leave(); // Make sure we wake up any threads waiting on these events. frame_event_->Set(); @@ -301,14 +318,10 @@ bool VCMJitterBuffer::Running() const { void VCMJitterBuffer::Flush() { CriticalSectionScoped cs(crit_sect_); - // Erase all frames from the sorted list and set their state to free. - decodable_frames_.clear(); - incomplete_frames_.clear(); + decodable_frames_.Reset(&free_frames_); + incomplete_frames_.Reset(&free_frames_); TRACE_EVENT_INSTANT2("webrtc", "JB::FrameListEmptied", "type", "Flush", "frames", max_number_of_frames_); - for (int i = 0; i < max_number_of_frames_; i++) { - ReleaseFrameIfNotDecoding(frame_buffers_[i]); - } last_decoded_state_.Reset(); // TODO(mikhal): sync reset. num_not_decodable_packets_ = 0; frame_event_->Reset(); @@ -454,8 +467,7 @@ bool VCMJitterBuffer::NextCompleteTimestamp( crit_sect_->Leave(); return false; } - // Finding oldest frame ready for decoder, but check - // sequence number and size + // Finding oldest frame ready for decoder. CleanUpOldOrEmptyFrames(); if (decodable_frames_.empty()) { wait_time_ms = end_wait_time_ms - clock_->TimeInMilliseconds(); @@ -522,9 +534,12 @@ VCMEncodedFrame* VCMJitterBuffer::ExtractAndSetDecode(uint32_t timestamp) { } // Extract the frame with the desired timestamp. VCMFrameBuffer* frame = decodable_frames_.PopFrame(timestamp); + bool continuous = true; if (!frame) { frame = incomplete_frames_.PopFrame(timestamp); - if (!frame) + if (frame) + continuous = last_decoded_state_.ContinuousFrame(frame); + else return NULL; } if (!NextFrame()) { @@ -550,13 +565,11 @@ VCMEncodedFrame* VCMJitterBuffer::ExtractAndSetDecode(uint32_t timestamp) { waiting_for_completion_.timestamp = frame->TimeStamp(); } } - // Look for previous frame loss. - VerifyAndSetPreviousFrameLost(frame); // The state must be changed to decoding before cleaning up zero sized // frames to avoid empty frames being cleaned up and then given to the // decoder. Propagates the missing_frame bit. - frame->SetState(kStateDecoding); + frame->PrepareForDecode(continuous); num_not_decodable_packets_ += frame->NotDecodablePackets(); @@ -571,8 +584,9 @@ VCMEncodedFrame* VCMJitterBuffer::ExtractAndSetDecode(uint32_t timestamp) { void VCMJitterBuffer::ReleaseFrame(VCMEncodedFrame* frame) { CriticalSectionScoped cs(crit_sect_); VCMFrameBuffer* frame_buffer = static_cast(frame); - if (frame_buffer) - frame_buffer->SetState(kStateFree); + if (frame_buffer) { + free_frames_.push_back(frame_buffer); + } } // Gets frame to use for this timestamp. If no match, get empty frame. @@ -604,27 +618,26 @@ VCMFrameBufferEnum VCMJitterBuffer::GetFrame(const VCMPacket& packet, num_consecutive_old_packets_ = 0; *frame = incomplete_frames_.FindFrame(packet.timestamp); - if (*frame) { + if (*frame) return kNoError; - } *frame = decodable_frames_.FindFrame(packet.timestamp); - if (*frame) { + if (*frame) return kNoError; - } // No match, return empty frame. *frame = GetEmptyFrame(); - if (*frame != NULL) { - return kNoError; + VCMFrameBufferEnum ret = kNoError; + if (!*frame) { + // No free frame! Try to reclaim some... + LOG_F(LS_INFO) << "Unable to get empty frame; Recycling."; + bool found_key_frame = RecycleFramesUntilKeyFrame(); + *frame = GetEmptyFrame(); + if (!*frame) + return kGeneralError; + else if (!found_key_frame) + ret = kFlushIndicator; } - // No free frame! Try to reclaim some... - LOG_F(LS_INFO) << "Unable to get empty frame; Recycling."; - RecycleFramesUntilKeyFrame(); - - *frame = GetEmptyFrame(); - if (*frame != NULL) { - return kNoError; - } - return kGeneralError; + (*frame)->Reset(); + return ret; } int64_t VCMJitterBuffer::LastPacketTime(const VCMEncodedFrame* frame, @@ -643,9 +656,10 @@ VCMFrameBufferEnum VCMJitterBuffer::InsertPacket(const VCMPacket& packet, int64_t now_ms = clock_->TimeInMilliseconds(); VCMFrameBufferEnum buffer_return = kSizeError; VCMFrameBufferEnum ret = kSizeError; + VCMFrameBuffer* frame = NULL; const VCMFrameBufferEnum error = GetFrame(packet, &frame); - if (error != kNoError) { + if (error != kNoError && frame == NULL) { return error; } @@ -660,7 +674,6 @@ VCMFrameBufferEnum VCMJitterBuffer::InsertPacket(const VCMPacket& packet, // This packet belongs to an old, already decoded frame, we want to update // the last decoded sequence number. last_decoded_state_.UpdateOldPacket(&packet); - frame->SetState(kStateFree); TRACE_EVENT_INSTANT1("webrtc", "JB::DropLateFrame", "timestamp", frame->TimeStamp()); drop_count_++; @@ -726,17 +739,26 @@ VCMFrameBufferEnum VCMJitterBuffer::InsertPacket(const VCMPacket& packet, case kGeneralError: case kTimeStampError: case kSizeError: { - if (frame != NULL) { - frame->Reset(); - frame->SetState(kStateFree); - } + // This frame will be cleaned up later from the frame lists. + frame->Reset(); break; } case kCompleteSession: { + if (master_) { + // Only trace the primary jitter buffer to make it possible to parse + // and plot the trace file. + WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, + VCMId(vcm_id_, receiver_id_), + "JB(0x%x) FB(0x%x): Complete frame added to jitter buffer," + " size:%d type %d", + this, frame, frame->Length(), frame->FrameType()); + } // Don't let the first packet be overridden by a complete session. ret = kCompleteSession; // Only update return value for a JB flush indicator. - UpdateFrameState(frame); + *retransmitted = (frame->GetNackCount() > 0); + CountFrame(*frame); + frame->SetCountedFrame(true); *retransmitted = (frame->GetNackCount() > 0); if (IsContinuous(*frame) && previous_state != kStateComplete) { if (!first) { @@ -758,14 +780,16 @@ VCMFrameBufferEnum VCMJitterBuffer::InsertPacket(const VCMPacket& packet, // No point in storing empty continuous frames. if (frame->GetState() == kStateEmpty && last_decoded_state_.UpdateEmptyFrame(frame)) { - frame->SetState(kStateFree); + free_frames_.push_back(frame); + frame->Reset(); + frame = NULL; ret = kNoError; } else if (first) { ret = kFirstPacket; incomplete_frames_.InsertFrame(frame); - // Signal that we have received a packet. - packet_event_->Set(); } + // Signal that we have received a packet. + packet_event_->Set(); break; } case kNoError: @@ -946,9 +970,8 @@ uint16_t* VCMJitterBuffer::GetNackList(uint16_t* nack_list_size, incomplete_frames_.begin(), incomplete_frames_.end(), HasNonEmptyState); } - if (have_non_empty_frame) { + if (have_non_empty_frame) LOG_F(LS_INFO) << "First frame is not key; Recycling."; - } bool found_key_frame = RecycleFramesUntilKeyFrame(); if (!found_key_frame) { *request_key_frame = have_non_empty_frame; @@ -1106,40 +1129,30 @@ void VCMJitterBuffer::RenderBufferSize(uint32_t* timestamp_start, *timestamp_end = decodable_frames_.Back()->TimeStamp(); } -// Set the frame state to free and remove it from the sorted -// frame list. Must be called from inside the critical section crit_sect_. -void VCMJitterBuffer::ReleaseFrameIfNotDecoding(VCMFrameBuffer* frame) { - if (frame != NULL && frame->GetState() != kStateDecoding) { - frame->SetState(kStateFree); - } -} - VCMFrameBuffer* VCMJitterBuffer::GetEmptyFrame() { - for (int i = 0; i < max_number_of_frames_; ++i) { - if (kStateFree == frame_buffers_[i]->GetState()) { - // found a free buffer - frame_buffers_[i]->SetState(kStateEmpty); - return frame_buffers_[i]; + if (free_frames_.empty()) { + if (!TryToIncreaseJitterBufferSize()) { + return NULL; } } + VCMFrameBuffer* frame = free_frames_.front(); + free_frames_.pop_front(); + return frame; +} - // Check if we can increase JB size - if (max_number_of_frames_ < kMaxNumberOfFrames) { - VCMFrameBuffer* ptr_new_buffer = new VCMFrameBuffer(); - ptr_new_buffer->SetState(kStateEmpty); - frame_buffers_[max_number_of_frames_] = ptr_new_buffer; - max_number_of_frames_++; - - WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, - VCMId(vcm_id_, receiver_id_), - "JB(0x%x) FB(0x%x): Jitter buffer increased to:%d frames", - this, ptr_new_buffer, max_number_of_frames_); - TRACE_COUNTER1("webrtc", "JBMaxFrames", max_number_of_frames_); - return ptr_new_buffer; - } - - // We have reached max size, cannot increase JB size - return NULL; +bool VCMJitterBuffer::TryToIncreaseJitterBufferSize() { + if (max_number_of_frames_ >= kMaxNumberOfFrames) + return false; + VCMFrameBuffer* new_frame = new VCMFrameBuffer(); + frame_buffers_[max_number_of_frames_] = new_frame; + free_frames_.push_back(new_frame); + ++max_number_of_frames_; + WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, + VCMId(vcm_id_, receiver_id_), + "JB(0x%x) FB(0x%x): Jitter buffer increased to:%d frames", + this, new_frame, max_number_of_frames_); + TRACE_COUNTER1("webrtc", "JBMaxFrames", max_number_of_frames_); + return true; } // Recycle oldest frames up to a key frame, used if jitter buffer is completely @@ -1151,11 +1164,11 @@ bool VCMJitterBuffer::RecycleFramesUntilKeyFrame() { bool key_frame_found = false; int dropped_frames = 0; dropped_frames += incomplete_frames_.RecycleFramesUntilKeyFrame( - &key_frame_it); + &key_frame_it, &free_frames_); key_frame_found = key_frame_it != incomplete_frames_.end(); if (dropped_frames == 0) { dropped_frames += decodable_frames_.RecycleFramesUntilKeyFrame( - &key_frame_it); + &key_frame_it, &free_frames_); key_frame_found = key_frame_it != decodable_frames_.end(); if (!key_frame_found) { TRACE_EVENT_INSTANT1("webrtc", "JB::FrameListEmptied", "type", @@ -1173,47 +1186,37 @@ bool VCMJitterBuffer::RecycleFramesUntilKeyFrame() { last_decoded_state_.Reset(); DropPacketsFromNackList(EstimatedLowSequenceNumber(*key_frame_it->second)); } else if (decodable_frames_.empty()) { - last_decoded_state_.Reset(); // TODO(mikhal): No sync. + // All frames dropped. Reset the decoding state and clear missing sequence + // numbers as we're starting fresh. + last_decoded_state_.Reset(); missing_sequence_numbers_.clear(); } return key_frame_found; } // Must be called under the critical section |crit_sect_|. -void VCMJitterBuffer::UpdateFrameState(VCMFrameBuffer* frame) { - if (master_) { - // Only trace the primary jitter buffer to make it possible to parse - // and plot the trace file. - WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, - VCMId(vcm_id_, receiver_id_), - "JB(0x%x) FB(0x%x): Complete frame added to jitter buffer," - " size:%d type %d", - this, frame, frame->Length(), frame->FrameType()); - } - +void VCMJitterBuffer::CountFrame(const VCMFrameBuffer& frame) { bool frame_counted = false; - if (!frame->GetCountedFrame()) { + if (!frame.GetCountedFrame()) { // Ignore ACK frames. incoming_frame_count_++; - frame->SetCountedFrame(true); frame_counted = true; } - frame->SetState(kStateComplete); - if (frame->FrameType() == kVideoFrameKey) { + if (frame.FrameType() == kVideoFrameKey) { TRACE_EVENT_INSTANT2("webrtc", "JB::AddKeyFrame", - "timestamp", frame->TimeStamp(), + "timestamp", frame.TimeStamp(), "retransmit", !frame_counted); } else { TRACE_EVENT_INSTANT2("webrtc", "JB::AddFrame", - "timestamp", frame->TimeStamp(), + "timestamp", frame.TimeStamp(), "retransmit", !frame_counted); } // Update receive statistics. We count all layers, thus when you use layers // adding all key and delta frames might differ from frame count. - if (frame->IsSessionComplete()) { - switch (frame->FrameType()) { + if (frame.IsSessionComplete()) { + switch (frame.FrameType()) { case kVideoFrameKey: { receive_statistics_[0]++; break; @@ -1239,25 +1242,17 @@ void VCMJitterBuffer::UpdateFrameState(VCMFrameBuffer* frame) { // Must be called under the critical section |crit_sect_|. void VCMJitterBuffer::CleanUpOldOrEmptyFrames() { drop_count_ += - decodable_frames_.CleanUpOldOrEmptyFrames(&last_decoded_state_); + decodable_frames_.CleanUpOldOrEmptyFrames(&last_decoded_state_, + &free_frames_); drop_count_ += - incomplete_frames_.CleanUpOldOrEmptyFrames(&last_decoded_state_); + incomplete_frames_.CleanUpOldOrEmptyFrames(&last_decoded_state_, + &free_frames_); TRACE_COUNTER1("webrtc", "JBDroppedLateFrames", drop_count_); if (!last_decoded_state_.in_initial_state()) { DropPacketsFromNackList(last_decoded_state_.sequence_num()); } } -void VCMJitterBuffer::VerifyAndSetPreviousFrameLost(VCMFrameBuffer* frame) { - assert(frame); - frame->MakeSessionDecodable(); // Make sure the session can be decoded. - if (frame->FrameType() == kVideoFrameKey) - return; - - if (!last_decoded_state_.ContinuousFrame(frame)) - frame->SetPreviousFrameLoss(); -} - // Must be called from within |crit_sect_|. bool VCMJitterBuffer::IsPacketRetransmitted(const VCMPacket& packet) const { return missing_sequence_numbers_.find(packet.seqNum) != diff --git a/webrtc/modules/video_coding/main/source/jitter_buffer.h b/webrtc/modules/video_coding/main/source/jitter_buffer.h index 2dfcac771f..dff428c0b1 100644 --- a/webrtc/modules/video_coding/main/source/jitter_buffer.h +++ b/webrtc/modules/video_coding/main/source/jitter_buffer.h @@ -11,6 +11,7 @@ #ifndef WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_JITTER_BUFFER_H_ #define WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_JITTER_BUFFER_H_ +#include #include #include #include @@ -40,6 +41,8 @@ class VCMFrameBuffer; class VCMPacket; class VCMEncodedFrame; +typedef std::list UnorderedFrameList; + struct VCMJitterSample { VCMJitterSample() : timestamp(0), frame_size(0), latest_packet_time(-1) {} uint32_t timestamp; @@ -63,8 +66,11 @@ class FrameList : VCMFrameBuffer* PopFrame(uint32_t timestamp); VCMFrameBuffer* Front() const; VCMFrameBuffer* Back() const; - int RecycleFramesUntilKeyFrame(FrameList::iterator* key_frame_it); - int CleanUpOldOrEmptyFrames(VCMDecodingState* decoding_state); + int RecycleFramesUntilKeyFrame(FrameList::iterator* key_frame_it, + UnorderedFrameList* free_frames); + int CleanUpOldOrEmptyFrames(VCMDecodingState* decoding_state, + UnorderedFrameList* free_frames); + void Reset(UnorderedFrameList* free_frames); }; class VCMJitterBuffer { @@ -191,6 +197,8 @@ class VCMJitterBuffer { // existing frames if no free frames are available. Returns an error code if // failing, or kNoError on success. VCMFrameBufferEnum GetFrame(const VCMPacket& packet, VCMFrameBuffer** frame); + void CopyFrames(FrameList* to_list, const FrameList& from_list); + void CopyFrames(FrameList* to_list, const FrameList& from_list, int* index); // Returns true if |frame| is continuous in |decoding_state|, not taking // decodable frames into account. bool IsContinuousInState(const VCMFrameBuffer& frame, @@ -225,26 +233,21 @@ class VCMJitterBuffer { // jitter buffer size). VCMFrameBuffer* GetEmptyFrame(); + // Attempts to increase the size of the jitter buffer. Returns true on + // success, false otherwise. + bool TryToIncreaseJitterBufferSize(); + // Recycles oldest frames until a key frame is found. Used if jitter buffer is // completely full. Returns true if a key frame was found. bool RecycleFramesUntilKeyFrame(); - // Sets the state of |frame| to complete if it's not too old to be decoded. - // Also updates the frame statistics. - void UpdateFrameState(VCMFrameBuffer* frame); + // Updates the frame statistics. + void CountFrame(const VCMFrameBuffer& frame); // Cleans the frame list in the JB from old/empty frames. // Should only be called prior to actual use. void CleanUpOldOrEmptyFrames(); - // Sets the "decodable" and "frame loss" flags of a frame depending on which - // packets have been received and which are missing. - // A frame is "decodable" if enough packets of that frame has been received - // for it to be usable by the decoder. - // A frame has the "frame loss" flag set if packets are missing after the - // last decoded frame and before |frame|. - void VerifyAndSetPreviousFrameLost(VCMFrameBuffer* frame); - // Returns true if |packet| is likely to have been retransmitted. bool IsPacketRetransmitted(const VCMPacket& packet) const; @@ -280,6 +283,7 @@ class VCMJitterBuffer { int max_number_of_frames_; // Array of pointers to the frames in jitter buffer. VCMFrameBuffer* frame_buffers_[kMaxNumberOfFrames]; + UnorderedFrameList free_frames_; FrameList decodable_frames_; FrameList incomplete_frames_; VCMDecodingState last_decoded_state_; diff --git a/webrtc/modules/video_coding/main/source/jitter_buffer_common.h b/webrtc/modules/video_coding/main/source/jitter_buffer_common.h index 9ecd9273cd..f20843749d 100644 --- a/webrtc/modules/video_coding/main/source/jitter_buffer_common.h +++ b/webrtc/modules/video_coding/main/source/jitter_buffer_common.h @@ -43,11 +43,9 @@ enum VCMFrameBufferEnum { }; enum VCMFrameBufferStateEnum { - kStateFree, // Unused frame in the JB kStateEmpty, // frame popped by the RTP receiver kStateIncomplete, // frame that have one or more packet(s) stored kStateComplete, // frame that have all packets - kStateDecoding, // frame popped by the decoding thread kStateDecodable // Hybrid mode - frame can be decoded }; diff --git a/webrtc/modules/video_coding/main/source/session_info.cc b/webrtc/modules/video_coding/main/source/session_info.cc index 6db57f2d22..24f8b162d2 100644 --- a/webrtc/modules/video_coding/main/source/session_info.cc +++ b/webrtc/modules/video_coding/main/source/session_info.cc @@ -19,7 +19,6 @@ VCMSessionInfo::VCMSessionInfo() complete_(false), decodable_(false), frame_type_(kVideoFrameDelta), - previous_frame_loss_(false), packets_(), empty_seq_num_low_(-1), empty_seq_num_high_(-1), @@ -89,7 +88,6 @@ void VCMSessionInfo::Reset() { complete_ = false; decodable_ = false; frame_type_ = kVideoFrameDelta; - previous_frame_loss_ = false; packets_.clear(); empty_seq_num_low_ = -1; empty_seq_num_high_ = -1; diff --git a/webrtc/modules/video_coding/main/source/session_info.h b/webrtc/modules/video_coding/main/source/session_info.h index 18ee7a4999..bfd8bdedcf 100644 --- a/webrtc/modules/video_coding/main/source/session_info.h +++ b/webrtc/modules/video_coding/main/source/session_info.h @@ -72,8 +72,6 @@ class VCMSessionInfo { bool LayerSync() const; int Tl0PicId() const; bool NonReference() const; - void SetPreviousFrameLoss() { previous_frame_loss_ = true; } - bool PreviousFrameLoss() const { return previous_frame_loss_; } // The number of packets discarded because the decoder can't make use of // them.