diff --git a/sdk/objc/Framework/Classes/VideoToolbox/RTCVideoEncoderH264.mm b/sdk/objc/Framework/Classes/VideoToolbox/RTCVideoEncoderH264.mm index f84998efa6..86855a293c 100644 --- a/sdk/objc/Framework/Classes/VideoToolbox/RTCVideoEncoderH264.mm +++ b/sdk/objc/Framework/Classes/VideoToolbox/RTCVideoEncoderH264.mm @@ -502,11 +502,10 @@ CFStringRef ExtractProfile(webrtc::SdpVideoFormat videoFormat) { // If we're capturing native frames in another pixel format than the compression session is // configured with, make sure the compression session is reset using the correct pixel format. + // If we're capturing non-native frames and the compression session is configured with a non-NV12 + // format, reset it to NV12. OSType framePixelFormat = kNV12PixelFormat; - if (pixelBufferPool && [frame.buffer isKindOfClass:[RTCCVPixelBuffer class]]) { - RTCCVPixelBuffer *rtcPixelBuffer = (RTCCVPixelBuffer *)frame.buffer; - framePixelFormat = CVPixelBufferGetPixelFormatType(rtcPixelBuffer.pixelBuffer); - + if (pixelBufferPool) { // The pool attribute `kCVPixelBufferPixelFormatTypeKey` can contain either an array of pixel // formats or a single pixel format. NSDictionary *poolAttributes = @@ -520,6 +519,11 @@ CFStringRef ExtractProfile(webrtc::SdpVideoFormat videoFormat) { compressionSessionPixelFormats = @[ (NSNumber *)pixelFormats ]; } + if ([frame.buffer isKindOfClass:[RTCCVPixelBuffer class]]) { + RTCCVPixelBuffer *rtcPixelBuffer = (RTCCVPixelBuffer *)frame.buffer; + framePixelFormat = CVPixelBufferGetPixelFormatType(rtcPixelBuffer.pixelBuffer); + } + if (![compressionSessionPixelFormats containsObject:[NSNumber numberWithLong:framePixelFormat]]) { resetCompressionSession = YES;