Formatting some files with LOG macros usage.

In order to create a clean CL to switch to RTC_ prefixed LOG macros
this CL runs `git cl format --full` on the files with LOG macros in
the following directories:
- modules/audio_device
- modules/media_file
- modules/video_capture

This CL has been automatically generated with:

for m in PLOG \
  LOG_TAG \
  LOG_GLEM \
  LOG_GLE_EX \
  LOG_GLE \
  LAST_SYSTEM_ERROR \
  LOG_ERRNO_EX \
  LOG_ERRNO \
  LOG_ERR_EX \
  LOG_ERR \
  LOG_V \
  LOG_F \
  LOG_T_F \
  LOG_E \
  LOG_T \
  LOG_CHECK_LEVEL_V \
  LOG_CHECK_LEVEL \
  LOG
do
  for d in media_file video_capture audio_device; do
    cd modules/$d
    git grep -l $m | grep -E "\.(cc|h|m|mm)$" | xargs sed -i "1 s/$/ /"
    cd ../..
  done
done
git cl format --full

Bug: webrtc:8452
Change-Id: I2858b6928e6bd79957f2e5e0b07028eb68a304b2
Reviewed-on: https://webrtc-review.googlesource.com/21322
Commit-Queue: Mirko Bonadei <mbonadei@webrtc.org>
Reviewed-by: Niels Moller <nisse@webrtc.org>
Reviewed-by: Karl Wiberg <kwiberg@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#20613}
This commit is contained in:
Mirko Bonadei 2017-11-09 09:33:23 +01:00 committed by Commit Bot
parent b52a4d90c2
commit 72c4250cab
30 changed files with 9736 additions and 11716 deletions

View File

@ -100,18 +100,16 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
return 1;
}
int32_t PlayoutDeviceName(
uint16_t index,
char name[kAdmMaxDeviceNameSize],
char guid[kAdmMaxGuidSize]) override {
int32_t PlayoutDeviceName(uint16_t index,
char name[kAdmMaxDeviceNameSize],
char guid[kAdmMaxGuidSize]) override {
FATAL() << "Should never be called";
return -1;
}
int32_t RecordingDeviceName(
uint16_t index,
char name[kAdmMaxDeviceNameSize],
char guid[kAdmMaxGuidSize]) override {
int32_t RecordingDeviceName(uint16_t index,
char name[kAdmMaxDeviceNameSize],
char guid[kAdmMaxGuidSize]) override {
FATAL() << "Should never be called";
return -1;
}
@ -215,9 +213,7 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
return err;
}
bool Recording() const override {
return input_.Recording() ;
}
bool Recording() const override { return input_.Recording(); }
int32_t SetAGC(bool enable) override {
if (enable) {
@ -276,7 +272,7 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
return output_.MinSpeakerVolume(minVolume);
}
int32_t MicrophoneVolumeIsAvailable(bool& available) override{
int32_t MicrophoneVolumeIsAvailable(bool& available) override {
available = false;
return -1;
}

View File

@ -107,9 +107,9 @@ void AudioManager::SetActiveAudioLayer(
// that the user explicitly selects the high-latency audio path, hence we use
// the selected |audio_layer| here to set the delay estimate.
delay_estimate_in_milliseconds_ =
(audio_layer == AudioDeviceModule::kAndroidJavaAudio) ?
kHighLatencyModeDelayEstimateInMilliseconds :
kLowLatencyModeDelayEstimateInMilliseconds;
(audio_layer == AudioDeviceModule::kAndroidJavaAudio)
? kHighLatencyModeDelayEstimateInMilliseconds
: kLowLatencyModeDelayEstimateInMilliseconds;
ALOGD("delay_estimate_in_milliseconds: %d", delay_estimate_in_milliseconds_);
}
@ -201,8 +201,9 @@ bool AudioManager::IsLowLatencyPlayoutSupported() const {
ALOGD("IsLowLatencyPlayoutSupported()");
// Some devices are blacklisted for usage of OpenSL ES even if they report
// that low-latency playout is supported. See b/21485703 for details.
return j_audio_manager_->IsDeviceBlacklistedForOpenSLESUsage() ?
false : low_latency_playout_;
return j_audio_manager_->IsDeviceBlacklistedForOpenSLESUsage()
? false
: low_latency_playout_;
}
bool AudioManager::IsLowLatencyRecordSupported() const {

View File

@ -41,8 +41,8 @@ AudioRecordJni::JavaAudioRecord::JavaAudioRecord(
AudioRecordJni::JavaAudioRecord::~JavaAudioRecord() {}
int AudioRecordJni::JavaAudioRecord::InitRecording(
int sample_rate, size_t channels) {
int AudioRecordJni::JavaAudioRecord::InitRecording(int sample_rate,
size_t channels) {
return audio_record_->CallIntMethod(init_recording_,
static_cast<jint>(sample_rate),
static_cast<jint>(channels));
@ -83,10 +83,10 @@ AudioRecordJni::AudioRecordJni(AudioManager* audio_manager)
RTC_CHECK(j_environment_);
JNINativeMethod native_methods[] = {
{"nativeCacheDirectBufferAddress", "(Ljava/nio/ByteBuffer;J)V",
reinterpret_cast<void*>(
&webrtc::AudioRecordJni::CacheDirectBufferAddress)},
reinterpret_cast<void*>(
&webrtc::AudioRecordJni::CacheDirectBufferAddress)},
{"nativeDataIsRecorded", "(IJ)V",
reinterpret_cast<void*>(&webrtc::AudioRecordJni::DataIsRecorded)}};
reinterpret_cast<void*>(&webrtc::AudioRecordJni::DataIsRecorded)}};
j_native_registration_ = j_environment_->RegisterNatives(
"org/webrtc/voiceengine/WebRtcAudioRecord", native_methods,
arraysize(native_methods));
@ -168,7 +168,7 @@ int32_t AudioRecordJni::StopRecording() {
thread_checker_java_.DetachFromThread();
initialized_ = false;
recording_ = false;
direct_buffer_address_= nullptr;
direct_buffer_address_ = nullptr;
return 0;
}
@ -206,29 +206,32 @@ int32_t AudioRecordJni::EnableBuiltInNS(bool enable) {
return j_audio_record_->EnableBuiltInNS(enable) ? 0 : -1;
}
void JNICALL AudioRecordJni::CacheDirectBufferAddress(
JNIEnv* env, jobject obj, jobject byte_buffer, jlong nativeAudioRecord) {
void JNICALL AudioRecordJni::CacheDirectBufferAddress(JNIEnv* env,
jobject obj,
jobject byte_buffer,
jlong nativeAudioRecord) {
webrtc::AudioRecordJni* this_object =
reinterpret_cast<webrtc::AudioRecordJni*> (nativeAudioRecord);
reinterpret_cast<webrtc::AudioRecordJni*>(nativeAudioRecord);
this_object->OnCacheDirectBufferAddress(env, byte_buffer);
}
void AudioRecordJni::OnCacheDirectBufferAddress(
JNIEnv* env, jobject byte_buffer) {
void AudioRecordJni::OnCacheDirectBufferAddress(JNIEnv* env,
jobject byte_buffer) {
ALOGD("OnCacheDirectBufferAddress");
RTC_DCHECK(thread_checker_.CalledOnValidThread());
RTC_DCHECK(!direct_buffer_address_);
direct_buffer_address_ =
env->GetDirectBufferAddress(byte_buffer);
direct_buffer_address_ = env->GetDirectBufferAddress(byte_buffer);
jlong capacity = env->GetDirectBufferCapacity(byte_buffer);
ALOGD("direct buffer capacity: %lld", capacity);
direct_buffer_capacity_in_bytes_ = static_cast<size_t>(capacity);
}
void JNICALL AudioRecordJni::DataIsRecorded(
JNIEnv* env, jobject obj, jint length, jlong nativeAudioRecord) {
void JNICALL AudioRecordJni::DataIsRecorded(JNIEnv* env,
jobject obj,
jint length,
jlong nativeAudioRecord) {
webrtc::AudioRecordJni* this_object =
reinterpret_cast<webrtc::AudioRecordJni*> (nativeAudioRecord);
reinterpret_cast<webrtc::AudioRecordJni*>(nativeAudioRecord);
this_object->OnDataIsRecorded(length);
}

View File

@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_device/android/audio_manager.h"
#include "modules/audio_device/android/audio_track_jni.h"
#include "modules/audio_device/android/audio_manager.h"
#include <utility>
@ -82,10 +82,10 @@ AudioTrackJni::AudioTrackJni(AudioManager* audio_manager)
RTC_CHECK(j_environment_);
JNINativeMethod native_methods[] = {
{"nativeCacheDirectBufferAddress", "(Ljava/nio/ByteBuffer;J)V",
reinterpret_cast<void*>(
&webrtc::AudioTrackJni::CacheDirectBufferAddress)},
reinterpret_cast<void*>(
&webrtc::AudioTrackJni::CacheDirectBufferAddress)},
{"nativeGetPlayoutData", "(IJ)V",
reinterpret_cast<void*>(&webrtc::AudioTrackJni::GetPlayoutData)}};
reinterpret_cast<void*>(&webrtc::AudioTrackJni::GetPlayoutData)}};
j_native_registration_ = j_environment_->RegisterNatives(
"org/webrtc/voiceengine/WebRtcAudioTrack", native_methods,
arraysize(native_methods));
@ -122,8 +122,8 @@ int32_t AudioTrackJni::InitPlayout() {
RTC_DCHECK(thread_checker_.CalledOnValidThread());
RTC_DCHECK(!initialized_);
RTC_DCHECK(!playing_);
if (!j_audio_track_->InitPlayout(
audio_parameters_.sample_rate(), audio_parameters_.channels())) {
if (!j_audio_track_->InitPlayout(audio_parameters_.sample_rate(),
audio_parameters_.channels())) {
ALOGE("InitPlayout failed!");
return -1;
}
@ -209,20 +209,21 @@ void AudioTrackJni::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
audio_device_buffer_->SetPlayoutChannels(channels);
}
void JNICALL AudioTrackJni::CacheDirectBufferAddress(
JNIEnv* env, jobject obj, jobject byte_buffer, jlong nativeAudioTrack) {
void JNICALL AudioTrackJni::CacheDirectBufferAddress(JNIEnv* env,
jobject obj,
jobject byte_buffer,
jlong nativeAudioTrack) {
webrtc::AudioTrackJni* this_object =
reinterpret_cast<webrtc::AudioTrackJni*> (nativeAudioTrack);
reinterpret_cast<webrtc::AudioTrackJni*>(nativeAudioTrack);
this_object->OnCacheDirectBufferAddress(env, byte_buffer);
}
void AudioTrackJni::OnCacheDirectBufferAddress(
JNIEnv* env, jobject byte_buffer) {
void AudioTrackJni::OnCacheDirectBufferAddress(JNIEnv* env,
jobject byte_buffer) {
ALOGD("OnCacheDirectBufferAddress");
RTC_DCHECK(thread_checker_.CalledOnValidThread());
RTC_DCHECK(!direct_buffer_address_);
direct_buffer_address_ =
env->GetDirectBufferAddress(byte_buffer);
direct_buffer_address_ = env->GetDirectBufferAddress(byte_buffer);
jlong capacity = env->GetDirectBufferCapacity(byte_buffer);
ALOGD("direct buffer capacity: %lld", capacity);
direct_buffer_capacity_in_bytes_ = static_cast<size_t>(capacity);
@ -231,10 +232,12 @@ void AudioTrackJni::OnCacheDirectBufferAddress(
ALOGD("frames_per_buffer: %" PRIuS, frames_per_buffer_);
}
void JNICALL AudioTrackJni::GetPlayoutData(
JNIEnv* env, jobject obj, jint length, jlong nativeAudioTrack) {
void JNICALL AudioTrackJni::GetPlayoutData(JNIEnv* env,
jobject obj,
jint length,
jlong nativeAudioTrack) {
webrtc::AudioTrackJni* this_object =
reinterpret_cast<webrtc::AudioTrackJni*> (nativeAudioTrack);
reinterpret_cast<webrtc::AudioTrackJni*>(nativeAudioTrack);
this_object->OnGetPlayoutData(static_cast<size_t>(length));
}

View File

@ -289,10 +289,10 @@ bool OpenSLESPlayer::CreateAudioPlayer() {
SLDataSink audio_sink = {&locator_output_mix, nullptr};
// Define interfaces that we indend to use and realize.
const SLInterfaceID interface_ids[] = {
SL_IID_ANDROIDCONFIGURATION, SL_IID_BUFFERQUEUE, SL_IID_VOLUME};
const SLboolean interface_required[] = {
SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE};
const SLInterfaceID interface_ids[] = {SL_IID_ANDROIDCONFIGURATION,
SL_IID_BUFFERQUEUE, SL_IID_VOLUME};
const SLboolean interface_required[] = {SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE,
SL_BOOLEAN_TRUE};
// Create the audio player on the engine interface.
RETURN_ON_ERROR(

View File

@ -197,7 +197,7 @@ class AudioDeviceBuffer {
// dynamically.
rtc::BufferT<int16_t> rec_buffer_ RTC_ACCESS_ON(recording_thread_checker_);
// AGC parameters.
// AGC parameters.
#if !defined(WEBRTC_WIN)
uint32_t current_mic_level_ RTC_ACCESS_ON(recording_thread_checker_);
#else

View File

@ -386,7 +386,7 @@ int32_t AudioDeviceModuleImpl::MinSpeakerVolume(uint32_t* minVolume) const {
int32_t AudioDeviceModuleImpl::SpeakerMuteIsAvailable(bool* available) {
LOG(INFO) << __FUNCTION__;
CHECKinitialized_();
bool isAvailable = false;
bool isAvailable = false;
if (audio_device_->SpeakerMuteIsAvailable(isAvailable) == -1) {
return -1;
}

View File

@ -26,24 +26,23 @@ const size_t kRecordingBufferSize =
kRecordingFixedSampleRate / 100 * kRecordingNumChannels * 2;
FileAudioDevice::FileAudioDevice(const char* inputFilename,
const char* outputFilename):
_ptrAudioBuffer(NULL),
_recordingBuffer(NULL),
_playoutBuffer(NULL),
_recordingFramesLeft(0),
_playoutFramesLeft(0),
_recordingBufferSizeIn10MS(0),
_recordingFramesIn10MS(0),
_playoutFramesIn10MS(0),
_playing(false),
_recording(false),
_lastCallPlayoutMillis(0),
_lastCallRecordMillis(0),
_outputFile(*FileWrapper::Create()),
_inputFile(*FileWrapper::Create()),
_outputFilename(outputFilename),
_inputFilename(inputFilename) {
}
const char* outputFilename)
: _ptrAudioBuffer(NULL),
_recordingBuffer(NULL),
_playoutBuffer(NULL),
_recordingFramesLeft(0),
_playoutFramesLeft(0),
_recordingBufferSizeIn10MS(0),
_recordingFramesIn10MS(0),
_playoutFramesIn10MS(0),
_playing(false),
_recording(false),
_lastCallPlayoutMillis(0),
_lastCallRecordMillis(0),
_outputFile(*FileWrapper::Create()),
_inputFile(*FileWrapper::Create()),
_outputFilename(outputFilename),
_inputFilename(inputFilename) {}
FileAudioDevice::~FileAudioDevice() {
delete &_outputFile;
@ -59,9 +58,13 @@ AudioDeviceGeneric::InitStatus FileAudioDevice::Init() {
return InitStatus::OK;
}
int32_t FileAudioDevice::Terminate() { return 0; }
int32_t FileAudioDevice::Terminate() {
return 0;
}
bool FileAudioDevice::Initialized() const { return true; }
bool FileAudioDevice::Initialized() const {
return true;
}
int16_t FileAudioDevice::PlayoutDevices() {
return 1;
@ -72,8 +75,8 @@ int16_t FileAudioDevice::RecordingDevices() {
}
int32_t FileAudioDevice::PlayoutDeviceName(uint16_t index,
char name[kAdmMaxDeviceNameSize],
char guid[kAdmMaxGuidSize]) {
char name[kAdmMaxDeviceNameSize],
char guid[kAdmMaxGuidSize]) {
const char* kName = "dummy_device";
const char* kGuid = "dummy_device_unique_id";
if (index < 1) {
@ -87,8 +90,8 @@ int32_t FileAudioDevice::PlayoutDeviceName(uint16_t index,
}
int32_t FileAudioDevice::RecordingDeviceName(uint16_t index,
char name[kAdmMaxDeviceNameSize],
char guid[kAdmMaxGuidSize]) {
char name[kAdmMaxDeviceNameSize],
char guid[kAdmMaxGuidSize]) {
const char* kName = "dummy_device";
const char* kGuid = "dummy_device_unique_id";
if (index < 1) {
@ -138,9 +141,9 @@ int32_t FileAudioDevice::PlayoutIsAvailable(bool& available) {
int32_t FileAudioDevice::InitPlayout() {
if (_ptrAudioBuffer) {
// Update webrtc audio buffer with the selected parameters
_ptrAudioBuffer->SetPlayoutSampleRate(kPlayoutFixedSampleRate);
_ptrAudioBuffer->SetPlayoutChannels(kPlayoutNumChannels);
// Update webrtc audio buffer with the selected parameters
_ptrAudioBuffer->SetPlayoutSampleRate(kPlayoutFixedSampleRate);
_ptrAudioBuffer->SetPlayoutChannels(kPlayoutNumChannels);
}
return 0;
}
@ -180,7 +183,7 @@ bool FileAudioDevice::RecordingIsInitialized() const {
int32_t FileAudioDevice::StartPlayout() {
if (_playing) {
return 0;
return 0;
}
_playoutFramesIn10MS = static_cast<size_t>(kPlayoutFixedSampleRate / 100);
@ -188,7 +191,7 @@ int32_t FileAudioDevice::StartPlayout() {
_playoutFramesLeft = 0;
if (!_playoutBuffer) {
_playoutBuffer = new int8_t[kPlayoutBufferSize];
_playoutBuffer = new int8_t[kPlayoutBufferSize];
}
if (!_playoutBuffer) {
_playing = false;
@ -200,7 +203,7 @@ int32_t FileAudioDevice::StartPlayout() {
!_outputFile.OpenFile(_outputFilename.c_str(), false)) {
LOG(LS_ERROR) << "Failed to open playout file: " << _outputFilename;
_playing = false;
delete [] _playoutBuffer;
delete[] _playoutBuffer;
_playoutBuffer = NULL;
return -1;
}
@ -210,32 +213,30 @@ int32_t FileAudioDevice::StartPlayout() {
_ptrThreadPlay->Start();
_ptrThreadPlay->SetPriority(rtc::kRealtimePriority);
LOG(LS_INFO) << "Started playout capture to output file: "
<< _outputFilename;
LOG(LS_INFO) << "Started playout capture to output file: " << _outputFilename;
return 0;
}
int32_t FileAudioDevice::StopPlayout() {
{
rtc::CritScope lock(&_critSect);
_playing = false;
rtc::CritScope lock(&_critSect);
_playing = false;
}
// stop playout thread first
if (_ptrThreadPlay) {
_ptrThreadPlay->Stop();
_ptrThreadPlay.reset();
_ptrThreadPlay->Stop();
_ptrThreadPlay.reset();
}
rtc::CritScope lock(&_critSect);
_playoutFramesLeft = 0;
delete [] _playoutBuffer;
delete[] _playoutBuffer;
_playoutBuffer = NULL;
_outputFile.CloseFile();
LOG(LS_INFO) << "Stopped playout capture to output file: "
<< _outputFilename;
LOG(LS_INFO) << "Stopped playout capture to output file: " << _outputFilename;
return 0;
}
@ -247,11 +248,10 @@ int32_t FileAudioDevice::StartRecording() {
_recording = true;
// Make sure we only create the buffer once.
_recordingBufferSizeIn10MS = _recordingFramesIn10MS *
kRecordingNumChannels *
2;
_recordingBufferSizeIn10MS =
_recordingFramesIn10MS * kRecordingNumChannels * 2;
if (!_recordingBuffer) {
_recordingBuffer = new int8_t[_recordingBufferSizeIn10MS];
_recordingBuffer = new int8_t[_recordingBufferSizeIn10MS];
}
if (!_inputFilename.empty() &&
@ -269,13 +269,11 @@ int32_t FileAudioDevice::StartRecording() {
_ptrThreadRec->Start();
_ptrThreadRec->SetPriority(rtc::kRealtimePriority);
LOG(LS_INFO) << "Started recording from input file: "
<< _inputFilename;
LOG(LS_INFO) << "Started recording from input file: " << _inputFilename;
return 0;
}
int32_t FileAudioDevice::StopRecording() {
{
rtc::CritScope lock(&_critSect);
@ -283,20 +281,19 @@ int32_t FileAudioDevice::StopRecording() {
}
if (_ptrThreadRec) {
_ptrThreadRec->Stop();
_ptrThreadRec.reset();
_ptrThreadRec->Stop();
_ptrThreadRec.reset();
}
rtc::CritScope lock(&_critSect);
_recordingFramesLeft = 0;
if (_recordingBuffer) {
delete [] _recordingBuffer;
_recordingBuffer = NULL;
delete[] _recordingBuffer;
_recordingBuffer = NULL;
}
_inputFile.CloseFile();
LOG(LS_INFO) << "Stopped recording from input file: "
<< _inputFilename;
LOG(LS_INFO) << "Stopped recording from input file: " << _inputFilename;
return 0;
}
@ -304,25 +301,41 @@ bool FileAudioDevice::Recording() const {
return _recording;
}
int32_t FileAudioDevice::SetAGC(bool enable) { return -1; }
int32_t FileAudioDevice::SetAGC(bool enable) {
return -1;
}
bool FileAudioDevice::AGC() const { return false; }
bool FileAudioDevice::AGC() const {
return false;
}
int32_t FileAudioDevice::InitSpeaker() { return -1; }
int32_t FileAudioDevice::InitSpeaker() {
return -1;
}
bool FileAudioDevice::SpeakerIsInitialized() const { return false; }
bool FileAudioDevice::SpeakerIsInitialized() const {
return false;
}
int32_t FileAudioDevice::InitMicrophone() { return 0; }
int32_t FileAudioDevice::InitMicrophone() {
return 0;
}
bool FileAudioDevice::MicrophoneIsInitialized() const { return true; }
bool FileAudioDevice::MicrophoneIsInitialized() const {
return true;
}
int32_t FileAudioDevice::SpeakerVolumeIsAvailable(bool& available) {
return -1;
}
int32_t FileAudioDevice::SetSpeakerVolume(uint32_t volume) { return -1; }
int32_t FileAudioDevice::SetSpeakerVolume(uint32_t volume) {
return -1;
}
int32_t FileAudioDevice::SpeakerVolume(uint32_t& volume) const { return -1; }
int32_t FileAudioDevice::SpeakerVolume(uint32_t& volume) const {
return -1;
}
int32_t FileAudioDevice::MaxSpeakerVolume(uint32_t& maxVolume) const {
return -1;
@ -336,7 +349,9 @@ int32_t FileAudioDevice::MicrophoneVolumeIsAvailable(bool& available) {
return -1;
}
int32_t FileAudioDevice::SetMicrophoneVolume(uint32_t volume) { return -1; }
int32_t FileAudioDevice::SetMicrophoneVolume(uint32_t volume) {
return -1;
}
int32_t FileAudioDevice::MicrophoneVolume(uint32_t& volume) const {
return -1;
@ -350,19 +365,29 @@ int32_t FileAudioDevice::MinMicrophoneVolume(uint32_t& minVolume) const {
return -1;
}
int32_t FileAudioDevice::SpeakerMuteIsAvailable(bool& available) { return -1; }
int32_t FileAudioDevice::SpeakerMuteIsAvailable(bool& available) {
return -1;
}
int32_t FileAudioDevice::SetSpeakerMute(bool enable) { return -1; }
int32_t FileAudioDevice::SetSpeakerMute(bool enable) {
return -1;
}
int32_t FileAudioDevice::SpeakerMute(bool& enabled) const { return -1; }
int32_t FileAudioDevice::SpeakerMute(bool& enabled) const {
return -1;
}
int32_t FileAudioDevice::MicrophoneMuteIsAvailable(bool& available) {
return -1;
}
int32_t FileAudioDevice::SetMicrophoneMute(bool enable) { return -1; }
int32_t FileAudioDevice::SetMicrophoneMute(bool enable) {
return -1;
}
int32_t FileAudioDevice::MicrophoneMute(bool& enabled) const { return -1; }
int32_t FileAudioDevice::MicrophoneMute(bool& enabled) const {
return -1;
}
int32_t FileAudioDevice::StereoPlayoutIsAvailable(bool& available) {
available = true;
@ -409,81 +434,76 @@ void FileAudioDevice::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
_ptrAudioBuffer->SetPlayoutChannels(0);
}
bool FileAudioDevice::PlayThreadFunc(void* pThis)
{
return (static_cast<FileAudioDevice*>(pThis)->PlayThreadProcess());
bool FileAudioDevice::PlayThreadFunc(void* pThis) {
return (static_cast<FileAudioDevice*>(pThis)->PlayThreadProcess());
}
bool FileAudioDevice::RecThreadFunc(void* pThis)
{
return (static_cast<FileAudioDevice*>(pThis)->RecThreadProcess());
bool FileAudioDevice::RecThreadFunc(void* pThis) {
return (static_cast<FileAudioDevice*>(pThis)->RecThreadProcess());
}
bool FileAudioDevice::PlayThreadProcess()
{
if (!_playing) {
return false;
}
int64_t currentTime = rtc::TimeMillis();
_critSect.Enter();
bool FileAudioDevice::PlayThreadProcess() {
if (!_playing) {
return false;
}
int64_t currentTime = rtc::TimeMillis();
_critSect.Enter();
if (_lastCallPlayoutMillis == 0 ||
currentTime - _lastCallPlayoutMillis >= 10) {
_critSect.Leave();
_ptrAudioBuffer->RequestPlayoutData(_playoutFramesIn10MS);
_critSect.Enter();
_playoutFramesLeft = _ptrAudioBuffer->GetPlayoutData(_playoutBuffer);
RTC_DCHECK_EQ(_playoutFramesIn10MS, _playoutFramesLeft);
if (_outputFile.is_open()) {
_outputFile.Write(_playoutBuffer, kPlayoutBufferSize);
}
_lastCallPlayoutMillis = currentTime;
}
_playoutFramesLeft = 0;
if (_lastCallPlayoutMillis == 0 ||
currentTime - _lastCallPlayoutMillis >= 10) {
_critSect.Leave();
int64_t deltaTimeMillis = rtc::TimeMillis() - currentTime;
if (deltaTimeMillis < 10) {
SleepMs(10 - deltaTimeMillis);
}
return true;
}
bool FileAudioDevice::RecThreadProcess()
{
if (!_recording) {
return false;
}
int64_t currentTime = rtc::TimeMillis();
_ptrAudioBuffer->RequestPlayoutData(_playoutFramesIn10MS);
_critSect.Enter();
if (_lastCallRecordMillis == 0 ||
currentTime - _lastCallRecordMillis >= 10) {
if (_inputFile.is_open()) {
if (_inputFile.Read(_recordingBuffer, kRecordingBufferSize) > 0) {
_ptrAudioBuffer->SetRecordedBuffer(_recordingBuffer,
_recordingFramesIn10MS);
} else {
_inputFile.Rewind();
}
_lastCallRecordMillis = currentTime;
_critSect.Leave();
_ptrAudioBuffer->DeliverRecordedData();
_critSect.Enter();
_playoutFramesLeft = _ptrAudioBuffer->GetPlayoutData(_playoutBuffer);
RTC_DCHECK_EQ(_playoutFramesIn10MS, _playoutFramesLeft);
if (_outputFile.is_open()) {
_outputFile.Write(_playoutBuffer, kPlayoutBufferSize);
}
_lastCallPlayoutMillis = currentTime;
}
_playoutFramesLeft = 0;
_critSect.Leave();
int64_t deltaTimeMillis = rtc::TimeMillis() - currentTime;
if (deltaTimeMillis < 10) {
SleepMs(10 - deltaTimeMillis);
}
return true;
}
bool FileAudioDevice::RecThreadProcess() {
if (!_recording) {
return false;
}
int64_t currentTime = rtc::TimeMillis();
_critSect.Enter();
if (_lastCallRecordMillis == 0 || currentTime - _lastCallRecordMillis >= 10) {
if (_inputFile.is_open()) {
if (_inputFile.Read(_recordingBuffer, kRecordingBufferSize) > 0) {
_ptrAudioBuffer->SetRecordedBuffer(_recordingBuffer,
_recordingFramesIn10MS);
} else {
_inputFile.Rewind();
}
_lastCallRecordMillis = currentTime;
_critSect.Leave();
_ptrAudioBuffer->DeliverRecordedData();
_critSect.Enter();
}
}
_critSect.Leave();
_critSect.Leave();
int64_t deltaTimeMillis = rtc::TimeMillis() - currentTime;
if (deltaTimeMillis < 10) {
SleepMs(10 - deltaTimeMillis);
}
int64_t deltaTimeMillis = rtc::TimeMillis() - currentTime;
if (deltaTimeMillis < 10) {
SleepMs(10 - deltaTimeMillis);
}
return true;
return true;
}
} // namespace webrtc

View File

@ -36,7 +36,8 @@ FileAudioDevice* FileAudioDeviceFactory::CreateFileAudioDevice() {
}
void FileAudioDeviceFactory::SetFilenamesToUse(
const char* inputAudioFilename, const char* outputAudioFilename) {
const char* inputAudioFilename,
const char* outputAudioFilename) {
#ifdef WEBRTC_DUMMY_FILE_DEVICES
RTC_DCHECK_LT(strlen(inputAudioFilename), MAX_FILENAME_LEN);
RTC_DCHECK_LT(strlen(outputAudioFilename), MAX_FILENAME_LEN);
@ -47,8 +48,9 @@ void FileAudioDeviceFactory::SetFilenamesToUse(
_isConfigured = true;
#else
// Sanity: must be compiled with the right define to run this.
printf("Trying to use dummy file devices, but is not compiled "
"with WEBRTC_DUMMY_FILE_DEVICES. Bailing out.\n");
printf(
"Trying to use dummy file devices, but is not compiled "
"with WEBRTC_DUMMY_FILE_DEVICES. Bailing out.\n");
std::exit(1);
#endif
}

View File

@ -34,7 +34,6 @@
#import "sdk/objc/Framework/Headers/WebRTC/RTCAudioSession.h"
#import "sdk/objc/Framework/Headers/WebRTC/RTCAudioSessionConfiguration.h"
namespace webrtc {
#define LOGI() LOG(LS_INFO) << "AudioDeviceIOS::"
@ -56,7 +55,6 @@ namespace webrtc {
} \
} while (0)
// Hardcoded delay estimates based on real measurements.
// TODO(henrika): these value is not used in combination with built-in AEC.
// Can most likely be removed.
@ -93,8 +91,8 @@ static void LogDeviceInfo() {
LOG(LS_INFO) << " process ID: " << ios::GetProcessID();
LOG(LS_INFO) << " OS version: " << ios::GetOSVersionString();
LOG(LS_INFO) << " processing cores: " << ios::GetProcessorCount();
#if defined(__IPHONE_9_0) && defined(__IPHONE_OS_VERSION_MAX_ALLOWED) \
&& __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_9_0
#if defined(__IPHONE_9_0) && defined(__IPHONE_OS_VERSION_MAX_ALLOWED) && \
__IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_9_0
LOG(LS_INFO) << " low power mode: " << ios::GetLowPowerModeEnabled();
#endif
#if TARGET_IPHONE_SIMULATOR
@ -121,8 +119,7 @@ AudioDeviceIOS::AudioDeviceIOS()
LOGI() << "ctor" << ios::GetCurrentThreadDescription();
io_thread_checker_.DetachFromThread();
thread_ = rtc::Thread::Current();
audio_session_observer_ =
[[RTCAudioSessionDelegateAdapter alloc] initWithObserver:this];
audio_session_observer_ = [[RTCAudioSessionDelegateAdapter alloc] initWithObserver:this];
}
AudioDeviceIOS::~AudioDeviceIOS() {
@ -152,12 +149,9 @@ AudioDeviceGeneric::InitStatus AudioDeviceIOS::Init() {
// here. They have not been set and confirmed yet since configureForWebRTC
// is not called until audio is about to start. However, it makes sense to
// store the parameters now and then verify at a later stage.
RTCAudioSessionConfiguration* config =
[RTCAudioSessionConfiguration webRTCConfiguration];
playout_parameters_.reset(config.sampleRate,
config.outputNumberOfChannels);
record_parameters_.reset(config.sampleRate,
config.inputNumberOfChannels);
RTCAudioSessionConfiguration* config = [RTCAudioSessionConfiguration webRTCConfiguration];
playout_parameters_.reset(config.sampleRate, config.outputNumberOfChannels);
record_parameters_.reset(config.sampleRate, config.inputNumberOfChannels);
// Ensure that the audio device buffer (ADB) knows about the internal audio
// parameters. Note that, even if we are unable to get a mono audio session,
// we will always tell the I/O audio unit to do a channel format conversion
@ -235,8 +229,7 @@ int32_t AudioDeviceIOS::StartPlayout() {
if (fine_audio_buffer_) {
fine_audio_buffer_->ResetPlayout();
}
if (!recording_ &&
audio_unit_->GetState() == VoiceProcessingAudioUnit::kInitialized) {
if (!recording_ && audio_unit_->GetState() == VoiceProcessingAudioUnit::kInitialized) {
if (!audio_unit_->Start()) {
RTCLogError(@"StartPlayout failed to start audio unit.");
return -1;
@ -269,9 +262,8 @@ int32_t AudioDeviceIOS::StopPlayout() {
average_number_of_playout_callbacks_between_glitches =
num_playout_callbacks_ / num_detected_playout_glitches_;
}
RTC_HISTOGRAM_COUNTS_100000(
"WebRTC.Audio.AveragePlayoutCallbacksBetweenGlitches",
average_number_of_playout_callbacks_between_glitches);
RTC_HISTOGRAM_COUNTS_100000("WebRTC.Audio.AveragePlayoutCallbacksBetweenGlitches",
average_number_of_playout_callbacks_between_glitches);
RTCLog(@"Average number of playout callbacks between glitches: %d",
average_number_of_playout_callbacks_between_glitches);
return 0;
@ -286,8 +278,7 @@ int32_t AudioDeviceIOS::StartRecording() {
if (fine_audio_buffer_) {
fine_audio_buffer_->ResetRecord();
}
if (!playing_ &&
audio_unit_->GetState() == VoiceProcessingAudioUnit::kInitialized) {
if (!playing_ && audio_unit_->GetState() == VoiceProcessingAudioUnit::kInitialized) {
if (!audio_unit_->Start()) {
RTCLogError(@"StartRecording failed to start audio unit.");
return -1;
@ -333,9 +324,8 @@ int32_t AudioDeviceIOS::SetLoudspeakerStatus(bool enable) {
options = AVAudioSessionCategoryOptionDefaultToSpeaker;
}
NSError* error = nil;
BOOL success = [session setCategory:AVAudioSessionCategoryPlayAndRecord
withOptions:options
error:&error];
BOOL success =
[session setCategory:AVAudioSessionCategoryPlayAndRecord withOptions:options error:&error];
ios::CheckAndLogError(success, error);
[session unlockForConfiguration];
return (error == nil) ? 0 : -1;
@ -389,7 +379,9 @@ void AudioDeviceIOS::OnValidRouteChange() {
void AudioDeviceIOS::OnCanPlayOrRecordChange(bool can_play_or_record) {
RTC_DCHECK(thread_);
thread_->Post(RTC_FROM_HERE, this, kMessageTypeCanPlayOrRecordChange,
thread_->Post(RTC_FROM_HERE,
this,
kMessageTypeCanPlayOrRecordChange,
new rtc::TypedMessageData<bool>(can_play_or_record));
}
@ -406,11 +398,9 @@ OSStatus AudioDeviceIOS::OnDeliverRecordedData(AudioUnitRenderActionFlags* flags
RTC_DCHECK_RUN_ON(&io_thread_checker_);
OSStatus result = noErr;
// Simply return if recording is not enabled.
if (!rtc::AtomicOps::AcquireLoad(&recording_))
return result;
if (!rtc::AtomicOps::AcquireLoad(&recording_)) return result;
const size_t num_bytes =
num_frames * VoiceProcessingAudioUnit::kBytesPerSample;
const size_t num_bytes = num_frames * VoiceProcessingAudioUnit::kBytesPerSample;
// Set the size of our own audio buffer and clear it first to avoid copying
// in combination with potential reallocations.
// On real iOS devices, the size will only be set once (at first callback).
@ -435,8 +425,7 @@ OSStatus AudioDeviceIOS::OnDeliverRecordedData(AudioUnitRenderActionFlags* flags
// We can make the audio unit provide a buffer instead in io_data, but we
// currently just use our own.
// TODO(henrika): should error handling be improved?
result = audio_unit_->Render(
flags, time_stamp, bus_number, num_frames, &audio_buffer_list);
result = audio_unit_->Render(flags, time_stamp, bus_number, num_frames, &audio_buffer_list);
if (result != noErr) {
RTCLogError(@"Failed to render audio.");
return result;
@ -445,9 +434,8 @@ OSStatus AudioDeviceIOS::OnDeliverRecordedData(AudioUnitRenderActionFlags* flags
// Get a pointer to the recorded audio and send it to the WebRTC ADB.
// Use the FineAudioBuffer instance to convert between native buffer size
// and the 10ms buffer size used by WebRTC.
fine_audio_buffer_->DeliverRecordedData(record_audio_buffer_,
kFixedPlayoutDelayEstimate,
kFixedRecordDelayEstimate);
fine_audio_buffer_->DeliverRecordedData(
record_audio_buffer_, kFixedPlayoutDelayEstimate, kFixedRecordDelayEstimate);
return noErr;
}
@ -465,8 +453,7 @@ OSStatus AudioDeviceIOS::OnGetPlayoutData(AudioUnitRenderActionFlags* flags,
// Get pointer to internal audio buffer to which new audio data shall be
// written.
const size_t size_in_bytes = audio_buffer->mDataByteSize;
RTC_CHECK_EQ(size_in_bytes / VoiceProcessingAudioUnit::kBytesPerSample,
num_frames);
RTC_CHECK_EQ(size_in_bytes / VoiceProcessingAudioUnit::kBytesPerSample, num_frames);
int8_t* destination = reinterpret_cast<int8_t*>(audio_buffer->mData);
// Produce silence and give audio unit a hint about it if playout is not
// activated.
@ -508,12 +495,11 @@ OSStatus AudioDeviceIOS::OnGetPlayoutData(AudioUnitRenderActionFlags* flags,
// Read decoded 16-bit PCM samples from WebRTC (using a size that matches
// the native I/O audio unit) and copy the result to the audio buffer in the
// |io_data| destination.
fine_audio_buffer_->GetPlayoutData(
rtc::ArrayView<int8_t>(destination, size_in_bytes));
fine_audio_buffer_->GetPlayoutData(rtc::ArrayView<int8_t>(destination, size_in_bytes));
return noErr;
}
void AudioDeviceIOS::OnMessage(rtc::Message *msg) {
void AudioDeviceIOS::OnMessage(rtc::Message* msg) {
switch (msg->message_id) {
case kMessageTypeInterruptionBegin:
HandleInterruptionBegin();
@ -525,8 +511,7 @@ void AudioDeviceIOS::OnMessage(rtc::Message *msg) {
HandleValidRouteChange();
break;
case kMessageTypeCanPlayOrRecordChange: {
rtc::TypedMessageData<bool>* data =
static_cast<rtc::TypedMessageData<bool>*>(msg->pdata);
rtc::TypedMessageData<bool>* data = static_cast<rtc::TypedMessageData<bool>*>(msg->pdata);
HandleCanPlayOrRecordChange(data->data());
delete data;
break;
@ -542,10 +527,8 @@ void AudioDeviceIOS::OnMessage(rtc::Message *msg) {
void AudioDeviceIOS::HandleInterruptionBegin() {
RTC_DCHECK_RUN_ON(&thread_checker_);
RTCLog(@"Interruption begin. IsInterrupted changed from %d to 1.",
is_interrupted_);
if (audio_unit_ &&
audio_unit_->GetState() == VoiceProcessingAudioUnit::kStarted) {
RTCLog(@"Interruption begin. IsInterrupted changed from %d to 1.", is_interrupted_);
if (audio_unit_ && audio_unit_->GetState() == VoiceProcessingAudioUnit::kStarted) {
RTCLog(@"Stopping the audio unit due to interruption begin.");
if (!audio_unit_->Stop()) {
RTCLogError(@"Failed to stop the audio unit for interruption begin.");
@ -566,7 +549,8 @@ void AudioDeviceIOS::HandleInterruptionBegin() {
void AudioDeviceIOS::HandleInterruptionEnd() {
RTC_DCHECK_RUN_ON(&thread_checker_);
RTCLog(@"Interruption ended. IsInterrupted changed from %d to 0. "
"Updating audio unit state.", is_interrupted_);
"Updating audio unit state.",
is_interrupted_);
is_interrupted_ = false;
UpdateAudioUnit([RTCAudioSession sharedInstance].canPlayOrRecord);
}
@ -589,15 +573,13 @@ void AudioDeviceIOS::HandleSampleRateChange(float sample_rate) {
// Don't do anything if we're interrupted.
if (is_interrupted_) {
RTCLog(@"Ignoring sample rate change to %f due to interruption.",
sample_rate);
RTCLog(@"Ignoring sample rate change to %f due to interruption.", sample_rate);
return;
}
// If we don't have an audio unit yet, or the audio unit is uninitialized,
// there is no work to do.
if (!audio_unit_ ||
audio_unit_->GetState() < VoiceProcessingAudioUnit::kInitialized) {
if (!audio_unit_ || audio_unit_->GetState() < VoiceProcessingAudioUnit::kInitialized) {
return;
}
@ -609,8 +591,7 @@ void AudioDeviceIOS::HandleSampleRateChange(float sample_rate) {
const size_t session_frames_per_buffer =
static_cast<size_t>(session_sample_rate * session_buffer_duration + .5);
const double current_sample_rate = playout_parameters_.sample_rate();
const size_t current_frames_per_buffer =
playout_parameters_.frames_per_buffer();
const size_t current_frames_per_buffer = playout_parameters_.frames_per_buffer();
RTCLog(@"Handling playout sample rate change to: %f\n"
" Session sample rate: %f frames_per_buffer: %lu\n"
" ADM sample rate: %f frames_per_buffer: %lu",
@ -652,15 +633,13 @@ void AudioDeviceIOS::HandleSampleRateChange(float sample_rate) {
// Initialize the audio unit again with the new sample rate.
RTC_DCHECK_EQ(playout_parameters_.sample_rate(), session_sample_rate);
if (!audio_unit_->Initialize(session_sample_rate)) {
RTCLogError(@"Failed to initialize the audio unit with sample rate: %f",
session_sample_rate);
RTCLogError(@"Failed to initialize the audio unit with sample rate: %f", session_sample_rate);
return;
}
// Restart the audio unit if it was already running.
if (restart_audio_unit && !audio_unit_->Start()) {
RTCLogError(@"Failed to start audio unit with sample rate: %f",
session_sample_rate);
RTCLogError(@"Failed to start audio unit with sample rate: %f", session_sample_rate);
return;
}
RTCLog(@"Successfully handled sample rate change.");
@ -682,8 +661,7 @@ void AudioDeviceIOS::HandlePlayoutGlitchDetected() {
return;
}
num_detected_playout_glitches_++;
RTCLog(@"Number of detected playout glitches: %lld",
num_detected_playout_glitches_);
RTCLog(@"Number of detected playout glitches: %lld", num_detected_playout_glitches_);
int64_t glitch_count = num_detected_playout_glitches_;
dispatch_async(dispatch_get_main_queue(), ^{
@ -712,8 +690,7 @@ void AudioDeviceIOS::UpdateAudioDeviceBuffer() {
// Inform the audio device buffer (ADB) about the new audio format.
audio_device_buffer_->SetPlayoutSampleRate(playout_parameters_.sample_rate());
audio_device_buffer_->SetPlayoutChannels(playout_parameters_.channels());
audio_device_buffer_->SetRecordingSampleRate(
record_parameters_.sample_rate());
audio_device_buffer_->SetRecordingSampleRate(record_parameters_.sample_rate());
audio_device_buffer_->SetRecordingChannels(record_parameters_.channels());
}
@ -729,8 +706,7 @@ void AudioDeviceIOS::SetupAudioBuffersForActiveAudioSession() {
// hardware sample rate but continue and use the non-ideal sample rate after
// reinitializing the audio parameters. Most BT headsets only support 8kHz or
// 16kHz.
RTCAudioSessionConfiguration* webRTCConfig =
[RTCAudioSessionConfiguration webRTCConfiguration];
RTCAudioSessionConfiguration* webRTCConfig = [RTCAudioSessionConfiguration webRTCConfiguration];
if (sample_rate != webRTCConfig.sampleRate) {
LOG(LS_WARNING) << "Unable to set the preferred sample rate";
}
@ -740,18 +716,13 @@ void AudioDeviceIOS::SetupAudioBuffersForActiveAudioSession() {
// number of audio frames.
// Example: IO buffer size = 0.008 seconds <=> 128 audio frames at 16kHz.
// Hence, 128 is the size we expect to see in upcoming render callbacks.
playout_parameters_.reset(sample_rate, playout_parameters_.channels(),
io_buffer_duration);
playout_parameters_.reset(sample_rate, playout_parameters_.channels(), io_buffer_duration);
RTC_DCHECK(playout_parameters_.is_complete());
record_parameters_.reset(sample_rate, record_parameters_.channels(),
io_buffer_duration);
record_parameters_.reset(sample_rate, record_parameters_.channels(), io_buffer_duration);
RTC_DCHECK(record_parameters_.is_complete());
LOG(LS_INFO) << " frames per I/O buffer: "
<< playout_parameters_.frames_per_buffer();
LOG(LS_INFO) << " bytes per I/O buffer: "
<< playout_parameters_.GetBytesPerBuffer();
RTC_DCHECK_EQ(playout_parameters_.GetBytesPerBuffer(),
record_parameters_.GetBytesPerBuffer());
LOG(LS_INFO) << " frames per I/O buffer: " << playout_parameters_.frames_per_buffer();
LOG(LS_INFO) << " bytes per I/O buffer: " << playout_parameters_.GetBytesPerBuffer();
RTC_DCHECK_EQ(playout_parameters_.GetBytesPerBuffer(), record_parameters_.GetBytesPerBuffer());
// Update the ADB parameters since the sample rate might have changed.
UpdateAudioDeviceBuffer();
@ -781,7 +752,8 @@ bool AudioDeviceIOS::CreateAudioUnit() {
void AudioDeviceIOS::UpdateAudioUnit(bool can_play_or_record) {
RTC_DCHECK_RUN_ON(&thread_checker_);
RTCLog(@"Updating audio unit state. CanPlayOrRecord=%d IsInterrupted=%d",
can_play_or_record, is_interrupted_);
can_play_or_record,
is_interrupted_);
if (is_interrupted_) {
RTCLog(@"Ignoring audio unit update due to interruption.");
@ -790,8 +762,7 @@ void AudioDeviceIOS::UpdateAudioUnit(bool can_play_or_record) {
// If we're not initialized we don't need to do anything. Audio unit will
// be initialized on initialization.
if (!audio_is_initialized_)
return;
if (!audio_is_initialized_) return;
// If we're initialized, we must have an audio unit.
RTC_DCHECK(audio_unit_);
@ -809,13 +780,11 @@ void AudioDeviceIOS::UpdateAudioUnit(bool can_play_or_record) {
case VoiceProcessingAudioUnit::kUninitialized:
RTCLog(@"VPAU state: Uninitialized");
should_initialize_audio_unit = can_play_or_record;
should_start_audio_unit = should_initialize_audio_unit &&
(playing_ || recording_);
should_start_audio_unit = should_initialize_audio_unit && (playing_ || recording_);
break;
case VoiceProcessingAudioUnit::kInitialized:
RTCLog(@"VPAU state: Initialized");
should_start_audio_unit =
can_play_or_record && (playing_ || recording_);
should_start_audio_unit = can_play_or_record && (playing_ || recording_);
should_uninitialize_audio_unit = !can_play_or_record;
break;
case VoiceProcessingAudioUnit::kStarted:
@ -916,8 +885,7 @@ bool AudioDeviceIOS::InitPlayOrRecord() {
NSError* error = nil;
if (![session beginWebRTCSession:&error]) {
[session unlockForConfiguration];
RTCLogError(@"Failed to begin WebRTC session: %@",
error.localizedDescription);
RTCLogError(@"Failed to begin WebRTC session: %@", error.localizedDescription);
return false;
}

View File

@ -15,8 +15,7 @@
namespace webrtc {
int32_t AudioDeviceIOS::ActiveAudioLayer(
AudioDeviceModule::AudioLayer& audioLayer) const {
int32_t AudioDeviceIOS::ActiveAudioLayer(AudioDeviceModule::AudioLayer& audioLayer) const {
audioLayer = AudioDeviceModule::kPlatformDefaultAudio;
return 0;
}
@ -199,8 +198,7 @@ int32_t AudioDeviceIOS::SetRecordingDevice(uint16_t index) {
return 0;
}
int32_t AudioDeviceIOS::SetRecordingDevice(
AudioDeviceModule::WindowsDeviceType) {
int32_t AudioDeviceIOS::SetRecordingDevice(AudioDeviceModule::WindowsDeviceType) {
RTC_NOTREACHED() << "Not implemented";
return -1;
}

File diff suppressed because it is too large Load Diff

View File

@ -2136,8 +2136,7 @@ bool AudioDeviceLinuxPulse::PlayThreadProcess() {
NULL, (int64_t)0, PA_SEEK_RELATIVE) != PA_OK) {
_writeErrors++;
if (_writeErrors > 10) {
LOG(LS_ERROR) << "Playout error: _writeErrors="
<< _writeErrors
LOG(LS_ERROR) << "Playout error: _writeErrors=" << _writeErrors
<< ", error=" << LATE(pa_context_errno)(_paContext);
_writeErrors = 0;
}
@ -2180,8 +2179,7 @@ bool AudioDeviceLinuxPulse::PlayThreadProcess() {
NULL, (int64_t)0, PA_SEEK_RELATIVE) != PA_OK) {
_writeErrors++;
if (_writeErrors > 10) {
LOG(LS_ERROR) << "Playout error: _writeErrors="
<< _writeErrors
LOG(LS_ERROR) << "Playout error: _writeErrors=" << _writeErrors
<< ", error=" << LATE(pa_context_errno)(_paContext);
_writeErrors = 0;
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -19,9 +19,9 @@
namespace webrtc {
namespace adm_linux {
inline static const char *GetDllError() {
inline static const char* GetDllError() {
#ifdef WEBRTC_LINUX
char *err = dlerror();
char* err = dlerror();
if (err) {
return err;
} else {
@ -64,11 +64,11 @@ void InternalUnloadDll(DllHandle handle) {
}
static bool LoadSymbol(DllHandle handle,
const char *symbol_name,
void **symbol) {
const char* symbol_name,
void** symbol) {
#ifdef WEBRTC_LINUX
*symbol = dlsym(handle, symbol_name);
char *err = dlerror();
char* err = dlerror();
if (err) {
LOG(LS_ERROR) << "Error loading symbol " << symbol_name << " : " << err;
return false;
@ -87,8 +87,8 @@ static bool LoadSymbol(DllHandle handle,
// caller may later interpret as a valid address.
bool InternalLoadSymbols(DllHandle handle,
int num_symbols,
const char *const symbol_names[],
void *symbols[]) {
const char* const symbol_names[],
void* symbols[]) {
#ifdef WEBRTC_LINUX
// Clear any old errors.
dlerror();

View File

@ -23,34 +23,31 @@
namespace webrtc {
#define WEBRTC_CA_RETURN_ON_ERR(expr) \
#define WEBRTC_CA_RETURN_ON_ERR(expr) \
do { \
err = expr; \
if (err != noErr) { \
logCAMsg(rtc::LS_ERROR, "Error in " #expr, (const char*)&err); \
return -1; \
} \
} while (0)
#define WEBRTC_CA_LOG_ERR(expr) \
do { \
err = expr; \
if (err != noErr) { \
logCAMsg(rtc::LS_ERROR, "Error in " #expr, (const char*)&err); \
} \
} while (0)
#define WEBRTC_CA_LOG_WARN(expr) \
do { \
err = expr; \
if (err != noErr) { \
logCAMsg(rtc::LS_ERROR, "Error in " #expr, \
(const char*) & err); \
return -1; \
logCAMsg(rtc::LS_WARNING, "Error in " #expr, (const char*)&err); \
} \
} while (0)
#define WEBRTC_CA_LOG_ERR(expr) \
do { \
err = expr; \
if (err != noErr) { \
logCAMsg(rtc::LS_ERROR, "Error in " #expr, \
(const char*) & err); \
} \
} while (0)
#define WEBRTC_CA_LOG_WARN(expr) \
do { \
err = expr; \
if (err != noErr) { \
logCAMsg(rtc::LS_WARNING, "Error in " #expr, \
(const char*) & err); \
} \
} while (0)
enum { MaxNumberDevices = 64 };
void AudioDeviceMac::AtomicSet32(int32_t* theValue, int32_t newValue) {
@ -94,7 +91,7 @@ void AudioDeviceMac::logCAMsg(const rtc::LoggingSeverity sev,
}
#else
// We need to flip the characters in this case.
switch (sev) {
switch (sev) {
case rtc::LS_ERROR:
LOG(LS_ERROR) << msg << ": " << err[3] << err[2] << err[1] << err[0];
break;
@ -373,8 +370,8 @@ int32_t AudioDeviceMac::Terminate() {
err = AudioHardwareUnload();
if (err != noErr) {
logCAMsg(rtc::LS_ERROR,
"Error in AudioHardwareUnload()", (const char*)&err);
logCAMsg(rtc::LS_ERROR, "Error in AudioHardwareUnload()",
(const char*)&err);
retVal = -1;
}
@ -1038,8 +1035,7 @@ int32_t AudioDeviceMac::InitPlayout() {
_outputDeviceID, &propertyAddress, 0, NULL, &size, &_outStreamFormat));
if (_outStreamFormat.mFormatID != kAudioFormatLinearPCM) {
logCAMsg(rtc::LS_ERROR,
"Unacceptable output stream format -> mFormatID",
logCAMsg(rtc::LS_ERROR, "Unacceptable output stream format -> mFormatID",
(const char*)&_outStreamFormat.mFormatID);
return -1;
}
@ -1146,8 +1142,7 @@ int32_t AudioDeviceMac::InitRecording() {
_inputDeviceID, &propertyAddress, 0, NULL, &size, &_inStreamFormat));
if (_inStreamFormat.mFormatID != kAudioFormatLinearPCM) {
logCAMsg(rtc::LS_ERROR,
"Unacceptable input stream format -> mFormatID",
logCAMsg(rtc::LS_ERROR, "Unacceptable input stream format -> mFormatID",
(const char*)&_inStreamFormat.mFormatID);
return -1;
}
@ -1348,12 +1343,11 @@ int32_t AudioDeviceMac::StopRecording() {
_critSect.Leave(); // Cannot be under lock, risk of deadlock
if (kEventTimeout == _stopEventRec.Wait(2000)) {
rtc::CritScope critScoped(&_critSect);
LOG(LS_WARNING)
<< "Timed out stopping the capture IOProc."
<< "We may have failed to detect a device removal.";
LOG(LS_WARNING) << "Timed out stopping the capture IOProc."
<< "We may have failed to detect a device removal.";
WEBRTC_CA_LOG_WARN(AudioDeviceStop(_inputDeviceID, _inDeviceIOProcID));
WEBRTC_CA_LOG_WARN(
AudioDeviceDestroyIOProcID(_inputDeviceID, _inDeviceIOProcID));
AudioDeviceDestroyIOProcID(_inputDeviceID, _inDeviceIOProcID));
}
_critSect.Enter();
_doStopRec = false;
@ -1377,9 +1371,8 @@ int32_t AudioDeviceMac::StopRecording() {
_critSect.Leave(); // Cannot be under lock, risk of deadlock
if (kEventTimeout == _stopEvent.Wait(2000)) {
rtc::CritScope critScoped(&_critSect);
LOG(LS_WARNING)
<< "Timed out stopping the shared IOProc."
<< "We may have failed to detect a device removal.";
LOG(LS_WARNING) << "Timed out stopping the shared IOProc."
<< "We may have failed to detect a device removal.";
// We assume rendering on a shared device has stopped as well if
// the IOProc times out.
WEBRTC_CA_LOG_WARN(AudioDeviceStop(_outputDeviceID, _deviceIOProcID));
@ -1391,7 +1384,7 @@ int32_t AudioDeviceMac::StopRecording() {
LOG(LS_INFO) << "Recording stopped (shared device)";
} else if (_recIsInitialized && !_playing && !_playIsInitialized) {
WEBRTC_CA_LOG_WARN(
AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID));
AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID));
LOG(LS_INFO) << "Recording uninitialized (shared device)";
}
}
@ -1486,9 +1479,8 @@ int32_t AudioDeviceMac::StopPlayout() {
_critSect.Leave(); // Cannot be under lock, risk of deadlock
if (kEventTimeout == _stopEvent.Wait(2000)) {
rtc::CritScope critScoped(&_critSect);
LOG(LS_WARNING)
<< "Timed out stopping the render IOProc."
<< "We may have failed to detect a device removal.";
LOG(LS_WARNING) << "Timed out stopping the render IOProc."
<< "We may have failed to detect a device removal.";
// We assume capturing on a shared device has stopped as well if the
// IOProc times out.
@ -1501,11 +1493,11 @@ int32_t AudioDeviceMac::StopPlayout() {
LOG(LS_INFO) << "Playout stopped";
} else if (_twoDevices && _playIsInitialized) {
WEBRTC_CA_LOG_WARN(
AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID));
AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID));
LOG(LS_INFO) << "Playout uninitialized (output device)";
} else if (!_twoDevices && _playIsInitialized && !_recIsInitialized) {
WEBRTC_CA_LOG_WARN(
AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID));
AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID));
LOG(LS_INFO) << "Playout uninitialized (shared device)";
}
@ -1829,8 +1821,8 @@ OSStatus AudioDeviceMac::SetDesiredPlayoutFormat() {
_ptrAudioBuffer->SetPlayoutChannels((uint8_t)_playChannels);
}
_renderDelayOffsetSamples = _renderBufSizeSamples -
N_BUFFERS_OUT * ENGINE_PLAY_BUF_SIZE_IN_SAMPLES *
_renderDelayOffsetSamples =
_renderBufSizeSamples - N_BUFFERS_OUT * ENGINE_PLAY_BUF_SIZE_IN_SAMPLES *
_outDesiredFormat.mChannelsPerFrame;
_outDesiredFormat.mBytesPerPacket =
@ -1909,9 +1901,9 @@ OSStatus AudioDeviceMac::SetDesiredPlayoutFormat() {
static_cast<uint32_t>((1.0e6 * latency) / _outStreamFormat.mSampleRate);
LOG(LS_VERBOSE) << "initial playout status: _renderDelayOffsetSamples="
<< _renderDelayOffsetSamples << ", _renderDelayUs="
<< _renderDelayUs << ", _renderLatencyUs="
<< _renderLatencyUs;
<< _renderDelayOffsetSamples
<< ", _renderDelayUs=" << _renderDelayUs
<< ", _renderLatencyUs=" << _renderLatencyUs;
return 0;
}
@ -1970,8 +1962,8 @@ int32_t AudioDeviceMac::HandleDeviceChange() {
AtomicSet32(&_captureDeviceIsAlive, 0);
_mixerManager.CloseMicrophone();
} else if (err != noErr) {
logCAMsg(rtc::LS_ERROR,
"Error in AudioDeviceGetProperty()", (const char*)&err);
logCAMsg(rtc::LS_ERROR, "Error in AudioDeviceGetProperty()",
(const char*)&err);
return -1;
}
}
@ -1989,8 +1981,8 @@ int32_t AudioDeviceMac::HandleDeviceChange() {
AtomicSet32(&_renderDeviceIsAlive, 0);
_mixerManager.CloseSpeaker();
} else if (err != noErr) {
logCAMsg(rtc::LS_ERROR,
"Error in AudioDeviceGetProperty()", (const char*)&err);
logCAMsg(rtc::LS_ERROR, "Error in AudioDeviceGetProperty()",
(const char*)&err);
return -1;
}
}
@ -2016,8 +2008,7 @@ int32_t AudioDeviceMac::HandleStreamFormatChange(
objectId, &propertyAddress, 0, NULL, &size, &streamFormat));
if (streamFormat.mFormatID != kAudioFormatLinearPCM) {
logCAMsg(rtc::LS_ERROR,
"Unacceptable input stream format -> mFormatID",
logCAMsg(rtc::LS_ERROR, "Unacceptable input stream format -> mFormatID",
(const char*)&streamFormat.mFormatID);
return -1;
}
@ -2042,8 +2033,7 @@ int32_t AudioDeviceMac::HandleStreamFormatChange(
LOG(LS_VERBOSE) << "mBytesPerFrame = " << streamFormat.mBytesPerFrame
<< ", mBitsPerChannel = " << streamFormat.mBitsPerChannel;
LOG(LS_VERBOSE) << "mFormatFlags = " << streamFormat.mFormatFlags;
logCAMsg(rtc::LS_VERBOSE, "mFormatID",
(const char*)&streamFormat.mFormatID);
logCAMsg(rtc::LS_VERBOSE, "mFormatID", (const char*)&streamFormat.mFormatID);
if (propertyAddress.mScope == kAudioDevicePropertyScopeInput) {
const int io_block_size_samples = streamFormat.mChannelsPerFrame *
@ -2247,8 +2237,8 @@ OSStatus AudioDeviceMac::implDeviceIOProc(const AudioBufferList* inputData,
LOG(LS_ERROR) << "Error in AudioConverterFillComplexBuffer()";
return 1;
} else {
logCAMsg(rtc::LS_ERROR,
"Error in AudioConverterFillComplexBuffer()", (const char*)&err);
logCAMsg(rtc::LS_ERROR, "Error in AudioConverterFillComplexBuffer()",
(const char*)&err);
return 1;
}
}
@ -2485,8 +2475,8 @@ bool AudioDeviceMac::CaptureWorkerThread() {
// This is our own error.
return false;
} else {
logCAMsg(rtc::LS_ERROR,
"Error in AudioConverterFillComplexBuffer()", (const char*)&err);
logCAMsg(rtc::LS_ERROR, "Error in AudioConverterFillComplexBuffer()",
(const char*)&err);
return false;
}
}

View File

@ -14,34 +14,31 @@
namespace webrtc {
#define WEBRTC_CA_RETURN_ON_ERR(expr) \
#define WEBRTC_CA_RETURN_ON_ERR(expr) \
do { \
err = expr; \
if (err != noErr) { \
logCAMsg(rtc::LS_ERROR, "Error in " #expr, (const char*)&err); \
return -1; \
} \
} while (0)
#define WEBRTC_CA_LOG_ERR(expr) \
do { \
err = expr; \
if (err != noErr) { \
logCAMsg(rtc::LS_ERROR, "Error in " #expr, (const char*)&err); \
} \
} while (0)
#define WEBRTC_CA_LOG_WARN(expr) \
do { \
err = expr; \
if (err != noErr) { \
logCAMsg(rtc::LS_ERROR, "Error in " #expr, \
(const char*) & err); \
return -1; \
logCAMsg(rtc::LS_WARNING, "Error in " #expr, (const char*)&err); \
} \
} while (0)
#define WEBRTC_CA_LOG_ERR(expr) \
do { \
err = expr; \
if (err != noErr) { \
logCAMsg(rtc::LS_ERROR, "Error in " #expr, \
(const char*) & err); \
} \
} while (0)
#define WEBRTC_CA_LOG_WARN(expr) \
do { \
err = expr; \
if (err != noErr) { \
logCAMsg(rtc::LS_WARNING, "Error in " #expr, \
(const char*) & err); \
} \
} while (0)
AudioMixerManagerMac::AudioMixerManagerMac()
: _inputDeviceID(kAudioObjectUnknown),
_outputDeviceID(kAudioObjectUnknown),
@ -876,8 +873,8 @@ int32_t AudioMixerManagerMac::MinMicrophoneVolume(uint32_t& minVolume) const {
// CoreAudio errors are best interpreted as four character strings.
void AudioMixerManagerMac::logCAMsg(const rtc::LoggingSeverity sev,
const char* msg,
const char* err) {
const char* msg,
const char* err) {
RTC_DCHECK(msg != NULL);
RTC_DCHECK(err != NULL);
RTC_DCHECK(sev == rtc::LS_ERROR || sev == rtc::LS_WARNING);
@ -895,7 +892,7 @@ void AudioMixerManagerMac::logCAMsg(const rtc::LoggingSeverity sev,
}
#else
// We need to flip the characters in this case.
switch (sev) {
switch (sev) {
case rtc::LS_ERROR:
LOG(LS_ERROR) << msg << ": " << err[3] << err[2] << err[1] << err[0];
break;

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -16,248 +16,231 @@
#include "rtc_base/logging.h"
#ifndef abs
#define abs(a) (a>=0?a:-a)
#define abs(a) (a >= 0 ? a : -a)
#endif
namespace webrtc
{
namespace videocapturemodule
{
namespace webrtc {
namespace videocapturemodule {
DeviceInfoImpl::DeviceInfoImpl()
: _apiLock(*RWLockWrapper::CreateRWLock()), _lastUsedDeviceName(NULL),
_lastUsedDeviceNameLength(0)
{
: _apiLock(*RWLockWrapper::CreateRWLock()),
_lastUsedDeviceName(NULL),
_lastUsedDeviceNameLength(0) {}
DeviceInfoImpl::~DeviceInfoImpl(void) {
_apiLock.AcquireLockExclusive();
free(_lastUsedDeviceName);
_apiLock.ReleaseLockExclusive();
delete &_apiLock;
}
int32_t DeviceInfoImpl::NumberOfCapabilities(const char* deviceUniqueIdUTF8) {
if (!deviceUniqueIdUTF8)
return -1;
DeviceInfoImpl::~DeviceInfoImpl(void)
{
_apiLock.AcquireLockExclusive();
free(_lastUsedDeviceName);
_apiLock.ReleaseLockExclusive();
_apiLock.AcquireLockShared();
delete &_apiLock;
}
int32_t DeviceInfoImpl::NumberOfCapabilities(
const char* deviceUniqueIdUTF8)
{
if (!deviceUniqueIdUTF8)
return -1;
_apiLock.AcquireLockShared();
if (_lastUsedDeviceNameLength == strlen((char*) deviceUniqueIdUTF8))
{
// Is it the same device that is asked for again.
if (_lastUsedDeviceNameLength == strlen((char*)deviceUniqueIdUTF8)) {
// Is it the same device that is asked for again.
#if defined(WEBRTC_MAC) || defined(WEBRTC_LINUX)
if(strncasecmp((char*)_lastUsedDeviceName,
(char*) deviceUniqueIdUTF8,
_lastUsedDeviceNameLength)==0)
if (strncasecmp((char*)_lastUsedDeviceName, (char*)deviceUniqueIdUTF8,
_lastUsedDeviceNameLength) == 0)
#else
if (_strnicmp((char*) _lastUsedDeviceName,
(char*) deviceUniqueIdUTF8,
_lastUsedDeviceNameLength) == 0)
if (_strnicmp((char*)_lastUsedDeviceName, (char*)deviceUniqueIdUTF8,
_lastUsedDeviceNameLength) == 0)
#endif
{
//yes
_apiLock.ReleaseLockShared();
return static_cast<int32_t>(_captureCapabilities.size());
}
{
// yes
_apiLock.ReleaseLockShared();
return static_cast<int32_t>(_captureCapabilities.size());
}
// Need to get exclusive rights to create the new capability map.
_apiLock.ReleaseLockShared();
WriteLockScoped cs2(_apiLock);
}
// Need to get exclusive rights to create the new capability map.
_apiLock.ReleaseLockShared();
WriteLockScoped cs2(_apiLock);
int32_t ret = CreateCapabilityMap(deviceUniqueIdUTF8);
return ret;
int32_t ret = CreateCapabilityMap(deviceUniqueIdUTF8);
return ret;
}
int32_t DeviceInfoImpl::GetCapability(const char* deviceUniqueIdUTF8,
const uint32_t deviceCapabilityNumber,
VideoCaptureCapability& capability)
{
assert(deviceUniqueIdUTF8 != NULL);
VideoCaptureCapability& capability) {
assert(deviceUniqueIdUTF8 != NULL);
ReadLockScoped cs(_apiLock);
ReadLockScoped cs(_apiLock);
if ((_lastUsedDeviceNameLength != strlen((char*) deviceUniqueIdUTF8))
if ((_lastUsedDeviceNameLength != strlen((char*)deviceUniqueIdUTF8))
#if defined(WEBRTC_MAC) || defined(WEBRTC_LINUX)
|| (strncasecmp((char*)_lastUsedDeviceName,
(char*) deviceUniqueIdUTF8,
_lastUsedDeviceNameLength)!=0))
#else
|| (_strnicmp((char*) _lastUsedDeviceName,
(char*) deviceUniqueIdUTF8,
|| (strncasecmp((char*)_lastUsedDeviceName, (char*)deviceUniqueIdUTF8,
_lastUsedDeviceNameLength) != 0))
#else
|| (_strnicmp((char*)_lastUsedDeviceName, (char*)deviceUniqueIdUTF8,
_lastUsedDeviceNameLength) != 0))
#endif
{
_apiLock.ReleaseLockShared();
_apiLock.AcquireLockExclusive();
if (-1 == CreateCapabilityMap(deviceUniqueIdUTF8))
{
_apiLock.ReleaseLockExclusive();
_apiLock.AcquireLockShared();
return -1;
}
_apiLock.ReleaseLockExclusive();
_apiLock.AcquireLockShared();
{
_apiLock.ReleaseLockShared();
_apiLock.AcquireLockExclusive();
if (-1 == CreateCapabilityMap(deviceUniqueIdUTF8)) {
_apiLock.ReleaseLockExclusive();
_apiLock.AcquireLockShared();
return -1;
}
_apiLock.ReleaseLockExclusive();
_apiLock.AcquireLockShared();
}
// Make sure the number is valid
if (deviceCapabilityNumber >= (unsigned int) _captureCapabilities.size())
{
LOG(LS_ERROR) << "Invalid deviceCapabilityNumber "
<< deviceCapabilityNumber << ">= number of capabilities ("
<< _captureCapabilities.size() << ").";
return -1;
}
// Make sure the number is valid
if (deviceCapabilityNumber >= (unsigned int)_captureCapabilities.size()) {
LOG(LS_ERROR) << "Invalid deviceCapabilityNumber " << deviceCapabilityNumber
<< ">= number of capabilities ("
<< _captureCapabilities.size() << ").";
return -1;
}
capability = _captureCapabilities[deviceCapabilityNumber];
return 0;
capability = _captureCapabilities[deviceCapabilityNumber];
return 0;
}
int32_t DeviceInfoImpl::GetBestMatchedCapability(
const char*deviceUniqueIdUTF8,
const VideoCaptureCapability& requested,
VideoCaptureCapability& resulting)
{
const char* deviceUniqueIdUTF8,
const VideoCaptureCapability& requested,
VideoCaptureCapability& resulting) {
if (!deviceUniqueIdUTF8)
return -1;
if (!deviceUniqueIdUTF8)
return -1;
ReadLockScoped cs(_apiLock);
if ((_lastUsedDeviceNameLength != strlen((char*) deviceUniqueIdUTF8))
ReadLockScoped cs(_apiLock);
if ((_lastUsedDeviceNameLength != strlen((char*)deviceUniqueIdUTF8))
#if defined(WEBRTC_MAC) || defined(WEBRTC_LINUX)
|| (strncasecmp((char*)_lastUsedDeviceName,
(char*) deviceUniqueIdUTF8,
_lastUsedDeviceNameLength)!=0))
#else
|| (_strnicmp((char*) _lastUsedDeviceName,
(char*) deviceUniqueIdUTF8,
|| (strncasecmp((char*)_lastUsedDeviceName, (char*)deviceUniqueIdUTF8,
_lastUsedDeviceNameLength) != 0))
#else
|| (_strnicmp((char*)_lastUsedDeviceName, (char*)deviceUniqueIdUTF8,
_lastUsedDeviceNameLength) != 0))
#endif
{
_apiLock.ReleaseLockShared();
_apiLock.AcquireLockExclusive();
if (-1 == CreateCapabilityMap(deviceUniqueIdUTF8))
{
return -1;
}
_apiLock.ReleaseLockExclusive();
_apiLock.AcquireLockShared();
{
_apiLock.ReleaseLockShared();
_apiLock.AcquireLockExclusive();
if (-1 == CreateCapabilityMap(deviceUniqueIdUTF8)) {
return -1;
}
_apiLock.ReleaseLockExclusive();
_apiLock.AcquireLockShared();
}
int32_t bestformatIndex = -1;
int32_t bestWidth = 0;
int32_t bestHeight = 0;
int32_t bestFrameRate = 0;
VideoType bestVideoType = VideoType::kUnknown;
int32_t bestformatIndex = -1;
int32_t bestWidth = 0;
int32_t bestHeight = 0;
int32_t bestFrameRate = 0;
VideoType bestVideoType = VideoType::kUnknown;
const int32_t numberOfCapabilies =
static_cast<int32_t>(_captureCapabilities.size());
const int32_t numberOfCapabilies =
static_cast<int32_t>(_captureCapabilities.size());
for (int32_t tmp = 0; tmp < numberOfCapabilies; ++tmp) // Loop through all capabilities
{
VideoCaptureCapability& capability = _captureCapabilities[tmp];
for (int32_t tmp = 0; tmp < numberOfCapabilies;
++tmp) // Loop through all capabilities
{
VideoCaptureCapability& capability = _captureCapabilities[tmp];
const int32_t diffWidth = capability.width - requested.width;
const int32_t diffHeight = capability.height - requested.height;
const int32_t diffFrameRate = capability.maxFPS - requested.maxFPS;
const int32_t diffWidth = capability.width - requested.width;
const int32_t diffHeight = capability.height - requested.height;
const int32_t diffFrameRate = capability.maxFPS - requested.maxFPS;
const int32_t currentbestDiffWith = bestWidth - requested.width;
const int32_t currentbestDiffHeight = bestHeight - requested.height;
const int32_t currentbestDiffFrameRate = bestFrameRate - requested.maxFPS;
const int32_t currentbestDiffWith = bestWidth - requested.width;
const int32_t currentbestDiffHeight = bestHeight - requested.height;
const int32_t currentbestDiffFrameRate = bestFrameRate - requested.maxFPS;
if ((diffHeight >= 0 && diffHeight <= abs(currentbestDiffHeight)) // Height better or equalt that previouse.
|| (currentbestDiffHeight < 0 && diffHeight >= currentbestDiffHeight))
{
if (diffHeight == currentbestDiffHeight) // Found best height. Care about the width)
{
if ((diffWidth >= 0 && diffWidth <= abs(currentbestDiffWith)) // Width better or equal
|| (currentbestDiffWith < 0 && diffWidth >= currentbestDiffWith))
{
if (diffWidth == currentbestDiffWith && diffHeight
== currentbestDiffHeight) // Same size as previously
{
//Also check the best frame rate if the diff is the same as previouse
if (((diffFrameRate >= 0 &&
diffFrameRate <= currentbestDiffFrameRate) // Frame rate to high but better match than previouse and we have not selected IUV
||
(currentbestDiffFrameRate < 0 &&
diffFrameRate >= currentbestDiffFrameRate)) // Current frame rate is lower than requested. This is better.
)
{
if ((currentbestDiffFrameRate == diffFrameRate) // Same frame rate as previous or frame rate allready good enough
|| (currentbestDiffFrameRate >= 0))
{
if (bestVideoType != requested.videoType &&
requested.videoType != VideoType::kUnknown &&
(capability.videoType ==
requested.videoType ||
capability.videoType == VideoType::kI420 ||
capability.videoType == VideoType::kYUY2 ||
capability.videoType == VideoType::kYV12)) {
bestVideoType = capability.videoType;
bestformatIndex = tmp;
}
// If width height and frame rate is full filled we can use the camera for encoding if it is supported.
if (capability.height == requested.height
&& capability.width == requested.width
&& capability.maxFPS >= requested.maxFPS)
{
bestformatIndex = tmp;
}
}
else // Better frame rate
{
bestWidth = capability.width;
bestHeight = capability.height;
bestFrameRate = capability.maxFPS;
bestVideoType = capability.videoType;
bestformatIndex = tmp;
}
}
}
else // Better width than previously
{
bestWidth = capability.width;
bestHeight = capability.height;
bestFrameRate = capability.maxFPS;
bestVideoType = capability.videoType;
bestformatIndex = tmp;
}
}// else width no good
}
else // Better height
{
if ((diffHeight >= 0 &&
diffHeight <= abs(currentbestDiffHeight)) // Height better or equalt
// that previouse.
|| (currentbestDiffHeight < 0 && diffHeight >= currentbestDiffHeight)) {
if (diffHeight ==
currentbestDiffHeight) // Found best height. Care about the width)
{
if ((diffWidth >= 0 &&
diffWidth <= abs(currentbestDiffWith)) // Width better or equal
|| (currentbestDiffWith < 0 && diffWidth >= currentbestDiffWith)) {
if (diffWidth == currentbestDiffWith &&
diffHeight == currentbestDiffHeight) // Same size as previously
{
// Also check the best frame rate if the diff is the same as
// previouse
if (((diffFrameRate >= 0 &&
diffFrameRate <=
currentbestDiffFrameRate) // Frame rate to high but
// better match than previouse
// and we have not selected IUV
|| (currentbestDiffFrameRate < 0 &&
diffFrameRate >=
currentbestDiffFrameRate)) // Current frame rate is
// lower than requested.
// This is better.
) {
if ((currentbestDiffFrameRate ==
diffFrameRate) // Same frame rate as previous or frame rate
// allready good enough
|| (currentbestDiffFrameRate >= 0)) {
if (bestVideoType != requested.videoType &&
requested.videoType != VideoType::kUnknown &&
(capability.videoType == requested.videoType ||
capability.videoType == VideoType::kI420 ||
capability.videoType == VideoType::kYUY2 ||
capability.videoType == VideoType::kYV12)) {
bestVideoType = capability.videoType;
bestformatIndex = tmp;
}
// If width height and frame rate is full filled we can use the
// camera for encoding if it is supported.
if (capability.height == requested.height &&
capability.width == requested.width &&
capability.maxFPS >= requested.maxFPS) {
bestformatIndex = tmp;
}
} else // Better frame rate
{
bestWidth = capability.width;
bestHeight = capability.height;
bestFrameRate = capability.maxFPS;
bestVideoType = capability.videoType;
bestformatIndex = tmp;
}
}
}// else height not good
}//end for
} else // Better width than previously
{
bestWidth = capability.width;
bestHeight = capability.height;
bestFrameRate = capability.maxFPS;
bestVideoType = capability.videoType;
bestformatIndex = tmp;
}
} // else width no good
} else // Better height
{
bestWidth = capability.width;
bestHeight = capability.height;
bestFrameRate = capability.maxFPS;
bestVideoType = capability.videoType;
bestformatIndex = tmp;
}
} // else height not good
} // end for
LOG(LS_VERBOSE) << "Best camera format: " << bestWidth << "x" << bestHeight
<< "@" << bestFrameRate
<< "fps, color format: " << static_cast<int>(bestVideoType);
LOG(LS_VERBOSE) << "Best camera format: " << bestWidth << "x" << bestHeight
<< "@" << bestFrameRate
<< "fps, color format: " << static_cast<int>(bestVideoType);
// Copy the capability
if (bestformatIndex < 0)
return -1;
resulting = _captureCapabilities[bestformatIndex];
return bestformatIndex;
// Copy the capability
if (bestformatIndex < 0)
return -1;
resulting = _captureCapabilities[bestformatIndex];
return bestformatIndex;
}
//Default implementation. This should be overridden by Mobile implementations.
// Default implementation. This should be overridden by Mobile implementations.
int32_t DeviceInfoImpl::GetOrientation(const char* deviceUniqueIdUTF8,
VideoRotation& orientation) {
orientation = kVideoRotation_0;
return -1;
return -1;
}
} // namespace videocapturemodule
} // namespace webrtc

View File

@ -17,304 +17,250 @@
#include <sys/ioctl.h>
#include <sys/stat.h>
#include <unistd.h>
//v4l includes
// v4l includes
#include <linux/videodev2.h>
#include "rtc_base/logging.h"
namespace webrtc
{
namespace videocapturemodule
{
VideoCaptureModule::DeviceInfo*
VideoCaptureImpl::CreateDeviceInfo()
{
return new videocapturemodule::DeviceInfoLinux();
namespace webrtc {
namespace videocapturemodule {
VideoCaptureModule::DeviceInfo* VideoCaptureImpl::CreateDeviceInfo() {
return new videocapturemodule::DeviceInfoLinux();
}
DeviceInfoLinux::DeviceInfoLinux()
: DeviceInfoImpl()
{
DeviceInfoLinux::DeviceInfoLinux() : DeviceInfoImpl() {}
int32_t DeviceInfoLinux::Init() {
return 0;
}
int32_t DeviceInfoLinux::Init()
{
return 0;
}
DeviceInfoLinux::~DeviceInfoLinux() {}
DeviceInfoLinux::~DeviceInfoLinux()
{
}
uint32_t DeviceInfoLinux::NumberOfDevices() {
LOG(LS_INFO) << __FUNCTION__;
uint32_t DeviceInfoLinux::NumberOfDevices()
{
LOG(LS_INFO) << __FUNCTION__;
uint32_t count = 0;
char device[20];
int fd = -1;
uint32_t count = 0;
char device[20];
int fd = -1;
/* detect /dev/video [0-63]VideoCaptureModule entries */
for (int n = 0; n < 64; n++)
{
sprintf(device, "/dev/video%d", n);
if ((fd = open(device, O_RDONLY)) != -1)
{
close(fd);
count++;
}
/* detect /dev/video [0-63]VideoCaptureModule entries */
for (int n = 0; n < 64; n++) {
sprintf(device, "/dev/video%d", n);
if ((fd = open(device, O_RDONLY)) != -1) {
close(fd);
count++;
}
}
return count;
return count;
}
int32_t DeviceInfoLinux::GetDeviceName(
uint32_t deviceNumber,
char* deviceNameUTF8,
uint32_t deviceNameLength,
char* deviceUniqueIdUTF8,
uint32_t deviceUniqueIdUTF8Length,
char* /*productUniqueIdUTF8*/,
uint32_t /*productUniqueIdUTF8Length*/)
{
LOG(LS_INFO) << __FUNCTION__;
int32_t DeviceInfoLinux::GetDeviceName(uint32_t deviceNumber,
char* deviceNameUTF8,
uint32_t deviceNameLength,
char* deviceUniqueIdUTF8,
uint32_t deviceUniqueIdUTF8Length,
char* /*productUniqueIdUTF8*/,
uint32_t /*productUniqueIdUTF8Length*/) {
LOG(LS_INFO) << __FUNCTION__;
// Travel through /dev/video [0-63]
uint32_t count = 0;
char device[20];
int fd = -1;
bool found = false;
for (int n = 0; n < 64; n++)
{
sprintf(device, "/dev/video%d", n);
if ((fd = open(device, O_RDONLY)) != -1)
{
if (count == deviceNumber) {
// Found the device
found = true;
break;
} else {
close(fd);
count++;
}
}
// Travel through /dev/video [0-63]
uint32_t count = 0;
char device[20];
int fd = -1;
bool found = false;
for (int n = 0; n < 64; n++) {
sprintf(device, "/dev/video%d", n);
if ((fd = open(device, O_RDONLY)) != -1) {
if (count == deviceNumber) {
// Found the device
found = true;
break;
} else {
close(fd);
count++;
}
}
}
if (!found)
return -1;
if (!found)
return -1;
// query device capabilities
struct v4l2_capability cap;
if (ioctl(fd, VIDIOC_QUERYCAP, &cap) < 0) {
LOG(LS_INFO) << "error in querying the device capability for device "
<< device << ". errno = " << errno;
close(fd);
return -1;
}
close(fd);
char cameraName[64];
memset(deviceNameUTF8, 0, deviceNameLength);
memcpy(cameraName, cap.card, sizeof(cap.card));
if (deviceNameLength >= strlen(cameraName)) {
memcpy(deviceNameUTF8, cameraName, strlen(cameraName));
} else {
LOG(LS_INFO) << "buffer passed is too small";
return -1;
}
if (cap.bus_info[0] != 0) // may not available in all drivers
{
// copy device id
if (deviceUniqueIdUTF8Length >= strlen((const char*)cap.bus_info)) {
memset(deviceUniqueIdUTF8, 0, deviceUniqueIdUTF8Length);
memcpy(deviceUniqueIdUTF8, cap.bus_info,
strlen((const char*)cap.bus_info));
} else {
LOG(LS_INFO) << "buffer passed is too small";
return -1;
}
}
return 0;
}
int32_t DeviceInfoLinux::CreateCapabilityMap(const char* deviceUniqueIdUTF8) {
int fd;
char device[32];
bool found = false;
const int32_t deviceUniqueIdUTF8Length =
(int32_t)strlen((char*)deviceUniqueIdUTF8);
if (deviceUniqueIdUTF8Length > kVideoCaptureUniqueNameLength) {
LOG(LS_INFO) << "Device name too long";
return -1;
}
LOG(LS_INFO) << "CreateCapabilityMap called for device "
<< deviceUniqueIdUTF8;
/* detect /dev/video [0-63] entries */
for (int n = 0; n < 64; ++n) {
sprintf(device, "/dev/video%d", n);
fd = open(device, O_RDONLY);
if (fd == -1)
continue;
// query device capabilities
struct v4l2_capability cap;
if (ioctl(fd, VIDIOC_QUERYCAP, &cap) < 0)
{
LOG(LS_INFO) << "error in querying the device capability for device "
<< device << ". errno = " << errno;
close(fd);
return -1;
}
close(fd);
char cameraName[64];
memset(deviceNameUTF8, 0, deviceNameLength);
memcpy(cameraName, cap.card, sizeof(cap.card));
if (deviceNameLength >= strlen(cameraName))
{
memcpy(deviceNameUTF8, cameraName, strlen(cameraName));
}
else
{
LOG(LS_INFO) << "buffer passed is too small";
return -1;
}
if (cap.bus_info[0] != 0) // may not available in all drivers
{
// copy device id
if (deviceUniqueIdUTF8Length >= strlen((const char*) cap.bus_info))
if (ioctl(fd, VIDIOC_QUERYCAP, &cap) == 0) {
if (cap.bus_info[0] != 0) {
if (strncmp((const char*)cap.bus_info, (const char*)deviceUniqueIdUTF8,
strlen((const char*)deviceUniqueIdUTF8)) ==
0) // match with device id
{
memset(deviceUniqueIdUTF8, 0, deviceUniqueIdUTF8Length);
memcpy(deviceUniqueIdUTF8, cap.bus_info,
strlen((const char*) cap.bus_info));
found = true;
break; // fd matches with device unique id supplied
}
else
{
LOG(LS_INFO) << "buffer passed is too small";
return -1;
} else // match for device name
{
if (IsDeviceNameMatches((const char*)cap.card,
(const char*)deviceUniqueIdUTF8)) {
found = true;
break;
}
}
}
close(fd); // close since this is not the matching device
}
return 0;
}
if (!found) {
LOG(LS_INFO) << "no matching device found";
return -1;
}
int32_t DeviceInfoLinux::CreateCapabilityMap(
const char* deviceUniqueIdUTF8)
{
int fd;
char device[32];
bool found = false;
// now fd will point to the matching device
// reset old capability list.
_captureCapabilities.clear();
const int32_t deviceUniqueIdUTF8Length =
(int32_t) strlen((char*) deviceUniqueIdUTF8);
if (deviceUniqueIdUTF8Length > kVideoCaptureUniqueNameLength)
{
LOG(LS_INFO) << "Device name too long";
return -1;
}
LOG(LS_INFO) << "CreateCapabilityMap called for device "
<< deviceUniqueIdUTF8;
int size = FillCapabilities(fd);
close(fd);
/* detect /dev/video [0-63] entries */
for (int n = 0; n < 64; ++n)
{
sprintf(device, "/dev/video%d", n);
fd = open(device, O_RDONLY);
if (fd == -1)
continue;
// Store the new used device name
_lastUsedDeviceNameLength = deviceUniqueIdUTF8Length;
_lastUsedDeviceName =
(char*)realloc(_lastUsedDeviceName, _lastUsedDeviceNameLength + 1);
memcpy(_lastUsedDeviceName, deviceUniqueIdUTF8,
_lastUsedDeviceNameLength + 1);
// query device capabilities
struct v4l2_capability cap;
if (ioctl(fd, VIDIOC_QUERYCAP, &cap) == 0)
{
if (cap.bus_info[0] != 0)
{
if (strncmp((const char*) cap.bus_info,
(const char*) deviceUniqueIdUTF8,
strlen((const char*) deviceUniqueIdUTF8)) == 0) //match with device id
{
found = true;
break; // fd matches with device unique id supplied
}
}
else //match for device name
{
if (IsDeviceNameMatches((const char*) cap.card,
(const char*) deviceUniqueIdUTF8))
{
found = true;
break;
}
}
}
close(fd); // close since this is not the matching device
}
LOG(LS_INFO) << "CreateCapabilityMap " << _captureCapabilities.size();
if (!found)
{
LOG(LS_INFO) << "no matching device found";
return -1;
}
// now fd will point to the matching device
// reset old capability list.
_captureCapabilities.clear();
int size = FillCapabilities(fd);
close(fd);
// Store the new used device name
_lastUsedDeviceNameLength = deviceUniqueIdUTF8Length;
_lastUsedDeviceName = (char*) realloc(_lastUsedDeviceName,
_lastUsedDeviceNameLength + 1);
memcpy(_lastUsedDeviceName, deviceUniqueIdUTF8, _lastUsedDeviceNameLength + 1);
LOG(LS_INFO) << "CreateCapabilityMap " << _captureCapabilities.size();
return size;
return size;
}
bool DeviceInfoLinux::IsDeviceNameMatches(const char* name,
const char* deviceUniqueIdUTF8)
{
if (strncmp(deviceUniqueIdUTF8, name, strlen(name)) == 0)
return true;
return false;
const char* deviceUniqueIdUTF8) {
if (strncmp(deviceUniqueIdUTF8, name, strlen(name)) == 0)
return true;
return false;
}
int32_t DeviceInfoLinux::FillCapabilities(int fd)
{
int32_t DeviceInfoLinux::FillCapabilities(int fd) {
// set image format
struct v4l2_format video_fmt;
memset(&video_fmt, 0, sizeof(struct v4l2_format));
// set image format
struct v4l2_format video_fmt;
memset(&video_fmt, 0, sizeof(struct v4l2_format));
video_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
video_fmt.fmt.pix.sizeimage = 0;
video_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
video_fmt.fmt.pix.sizeimage = 0;
int totalFmts = 4;
unsigned int videoFormats[] = {V4L2_PIX_FMT_MJPEG, V4L2_PIX_FMT_YUV420,
V4L2_PIX_FMT_YUYV, V4L2_PIX_FMT_UYVY};
int totalFmts = 4;
unsigned int videoFormats[] = {
V4L2_PIX_FMT_MJPEG,
V4L2_PIX_FMT_YUV420,
V4L2_PIX_FMT_YUYV,
V4L2_PIX_FMT_UYVY };
int sizes = 13;
unsigned int size[][2] = {{128, 96}, {160, 120}, {176, 144}, {320, 240},
{352, 288}, {640, 480}, {704, 576}, {800, 600},
{960, 720}, {1280, 720}, {1024, 768}, {1440, 1080},
{1920, 1080}};
int sizes = 13;
unsigned int size[][2] = { { 128, 96 }, { 160, 120 }, { 176, 144 },
{ 320, 240 }, { 352, 288 }, { 640, 480 },
{ 704, 576 }, { 800, 600 }, { 960, 720 },
{ 1280, 720 }, { 1024, 768 }, { 1440, 1080 },
{ 1920, 1080 } };
int index = 0;
for (int fmts = 0; fmts < totalFmts; fmts++) {
for (int i = 0; i < sizes; i++) {
video_fmt.fmt.pix.pixelformat = videoFormats[fmts];
video_fmt.fmt.pix.width = size[i][0];
video_fmt.fmt.pix.height = size[i][1];
int index = 0;
for (int fmts = 0; fmts < totalFmts; fmts++)
{
for (int i = 0; i < sizes; i++)
{
video_fmt.fmt.pix.pixelformat = videoFormats[fmts];
video_fmt.fmt.pix.width = size[i][0];
video_fmt.fmt.pix.height = size[i][1];
if (ioctl(fd, VIDIOC_TRY_FMT, &video_fmt) >= 0) {
if ((video_fmt.fmt.pix.width == size[i][0]) &&
(video_fmt.fmt.pix.height == size[i][1])) {
VideoCaptureCapability cap;
cap.width = video_fmt.fmt.pix.width;
cap.height = video_fmt.fmt.pix.height;
if (videoFormats[fmts] == V4L2_PIX_FMT_YUYV) {
cap.videoType = VideoType::kYUY2;
} else if (videoFormats[fmts] == V4L2_PIX_FMT_YUV420) {
cap.videoType = VideoType::kI420;
} else if (videoFormats[fmts] == V4L2_PIX_FMT_MJPEG) {
cap.videoType = VideoType::kMJPEG;
} else if (videoFormats[fmts] == V4L2_PIX_FMT_UYVY) {
cap.videoType = VideoType::kUYVY;
}
if (ioctl(fd, VIDIOC_TRY_FMT, &video_fmt) >= 0)
{
if ((video_fmt.fmt.pix.width == size[i][0])
&& (video_fmt.fmt.pix.height == size[i][1]))
{
VideoCaptureCapability cap;
cap.width = video_fmt.fmt.pix.width;
cap.height = video_fmt.fmt.pix.height;
if (videoFormats[fmts] == V4L2_PIX_FMT_YUYV)
{
cap.videoType = VideoType::kYUY2;
}
else if (videoFormats[fmts] == V4L2_PIX_FMT_YUV420)
{
cap.videoType = VideoType::kI420;
}
else if (videoFormats[fmts] == V4L2_PIX_FMT_MJPEG)
{
cap.videoType = VideoType::kMJPEG;
}
else if (videoFormats[fmts] == V4L2_PIX_FMT_UYVY)
{
cap.videoType = VideoType::kUYVY;
}
// get fps of current camera mode
// V4l2 does not have a stable method of knowing so we just guess.
if (cap.width >= 800 && cap.videoType != VideoType::kMJPEG) {
cap.maxFPS = 15;
} else {
cap.maxFPS = 30;
}
// get fps of current camera mode
// V4l2 does not have a stable method of knowing so we just guess.
if (cap.width >= 800 &&
cap.videoType != VideoType::kMJPEG) {
cap.maxFPS = 15;
}
else
{
cap.maxFPS = 30;
}
_captureCapabilities.push_back(cap);
index++;
LOG(LS_VERBOSE) << "Camera capability, width:" << cap.width
<< " height:" << cap.height << " type:"
<< static_cast<int32_t>(cap.videoType)
<< " fps:" << cap.maxFPS;
}
}
_captureCapabilities.push_back(cap);
index++;
LOG(LS_VERBOSE) << "Camera capability, width:" << cap.width
<< " height:" << cap.height
<< " type:" << static_cast<int32_t>(cap.videoType)
<< " fps:" << cap.maxFPS;
}
}
}
}
LOG(LS_INFO) << "CreateCapabilityMap " << _captureCapabilities.size();
return _captureCapabilities.size();
LOG(LS_INFO) << "CreateCapabilityMap " << _captureCapabilities.size();
return _captureCapabilities.size();
}
} // namespace videocapturemodule

View File

@ -24,22 +24,22 @@
#include <new>
#include "media/base/videocommon.h"
#include "rtc_base/logging.h"
#include "rtc_base/refcount.h"
#include "rtc_base/refcountedobject.h"
#include "rtc_base/scoped_ref_ptr.h"
#include "rtc_base/logging.h"
namespace webrtc {
namespace videocapturemodule {
rtc::scoped_refptr<VideoCaptureModule> VideoCaptureImpl::Create(
const char* deviceUniqueId) {
rtc::scoped_refptr<VideoCaptureModuleV4L2> implementation(
new rtc::RefCountedObject<VideoCaptureModuleV4L2>());
rtc::scoped_refptr<VideoCaptureModuleV4L2> implementation(
new rtc::RefCountedObject<VideoCaptureModuleV4L2>());
if (implementation->Init(deviceUniqueId) != 0)
return nullptr;
if (implementation->Init(deviceUniqueId) != 0)
return nullptr;
return implementation;
return implementation;
}
VideoCaptureModuleV4L2::VideoCaptureModuleV4L2()
@ -54,407 +54,369 @@ VideoCaptureModuleV4L2::VideoCaptureModuleV4L2()
_captureVideoType(VideoType::kI420),
_pool(NULL) {}
int32_t VideoCaptureModuleV4L2::Init(const char* deviceUniqueIdUTF8)
{
int len = strlen((const char*) deviceUniqueIdUTF8);
_deviceUniqueId = new (std::nothrow) char[len + 1];
if (_deviceUniqueId)
{
memcpy(_deviceUniqueId, deviceUniqueIdUTF8, len + 1);
}
int32_t VideoCaptureModuleV4L2::Init(const char* deviceUniqueIdUTF8) {
int len = strlen((const char*)deviceUniqueIdUTF8);
_deviceUniqueId = new (std::nothrow) char[len + 1];
if (_deviceUniqueId) {
memcpy(_deviceUniqueId, deviceUniqueIdUTF8, len + 1);
}
int fd;
char device[32];
bool found = false;
int fd;
char device[32];
bool found = false;
/* detect /dev/video [0-63] entries */
int n;
for (n = 0; n < 64; n++)
{
sprintf(device, "/dev/video%d", n);
if ((fd = open(device, O_RDONLY)) != -1)
{
// query device capabilities
struct v4l2_capability cap;
if (ioctl(fd, VIDIOC_QUERYCAP, &cap) == 0)
{
if (cap.bus_info[0] != 0)
{
if (strncmp((const char*) cap.bus_info,
(const char*) deviceUniqueIdUTF8,
strlen((const char*) deviceUniqueIdUTF8)) == 0) //match with device id
{
close(fd);
found = true;
break; // fd matches with device unique id supplied
}
}
}
close(fd); // close since this is not the matching device
/* detect /dev/video [0-63] entries */
int n;
for (n = 0; n < 64; n++) {
sprintf(device, "/dev/video%d", n);
if ((fd = open(device, O_RDONLY)) != -1) {
// query device capabilities
struct v4l2_capability cap;
if (ioctl(fd, VIDIOC_QUERYCAP, &cap) == 0) {
if (cap.bus_info[0] != 0) {
if (strncmp((const char*)cap.bus_info,
(const char*)deviceUniqueIdUTF8,
strlen((const char*)deviceUniqueIdUTF8)) ==
0) // match with device id
{
close(fd);
found = true;
break; // fd matches with device unique id supplied
}
}
}
close(fd); // close since this is not the matching device
}
if (!found)
{
LOG(LS_INFO) << "no matching device found";
return -1;
}
_deviceId = n; //store the device id
return 0;
}
if (!found) {
LOG(LS_INFO) << "no matching device found";
return -1;
}
_deviceId = n; // store the device id
return 0;
}
VideoCaptureModuleV4L2::~VideoCaptureModuleV4L2()
{
StopCapture();
if (_deviceFd != -1)
close(_deviceFd);
VideoCaptureModuleV4L2::~VideoCaptureModuleV4L2() {
StopCapture();
if (_deviceFd != -1)
close(_deviceFd);
}
int32_t VideoCaptureModuleV4L2::StartCapture(
const VideoCaptureCapability& capability)
{
if (_captureStarted)
{
if (capability.width == _currentWidth &&
capability.height == _currentHeight &&
_captureVideoType == capability.videoType) {
return 0;
}
else
{
StopCapture();
}
}
rtc::CritScope cs(&_captureCritSect);
//first open /dev/video device
char device[20];
sprintf(device, "/dev/video%d", (int) _deviceId);
if ((_deviceFd = open(device, O_RDWR | O_NONBLOCK, 0)) < 0)
{
LOG(LS_INFO) << "error in opening " << device << " errono = " << errno;
return -1;
}
// Supported video formats in preferred order.
// If the requested resolution is larger than VGA, we prefer MJPEG. Go for
// I420 otherwise.
const int nFormats = 5;
unsigned int fmts[nFormats];
if (capability.width > 640 || capability.height > 480) {
fmts[0] = V4L2_PIX_FMT_MJPEG;
fmts[1] = V4L2_PIX_FMT_YUV420;
fmts[2] = V4L2_PIX_FMT_YUYV;
fmts[3] = V4L2_PIX_FMT_UYVY;
fmts[4] = V4L2_PIX_FMT_JPEG;
const VideoCaptureCapability& capability) {
if (_captureStarted) {
if (capability.width == _currentWidth &&
capability.height == _currentHeight &&
_captureVideoType == capability.videoType) {
return 0;
} else {
fmts[0] = V4L2_PIX_FMT_YUV420;
fmts[1] = V4L2_PIX_FMT_YUYV;
fmts[2] = V4L2_PIX_FMT_UYVY;
fmts[3] = V4L2_PIX_FMT_MJPEG;
fmts[4] = V4L2_PIX_FMT_JPEG;
StopCapture();
}
}
// Enumerate image formats.
struct v4l2_fmtdesc fmt;
int fmtsIdx = nFormats;
memset(&fmt, 0, sizeof(fmt));
fmt.index = 0;
fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
LOG(LS_INFO) << "Video Capture enumerats supported image formats:";
while (ioctl(_deviceFd, VIDIOC_ENUM_FMT, &fmt) == 0) {
LOG(LS_INFO) << " { pixelformat = "
<< cricket::GetFourccName(fmt.pixelformat)
<< ", description = '" << fmt.description << "' }";
// Match the preferred order.
for (int i = 0; i < nFormats; i++) {
if (fmt.pixelformat == fmts[i] && i < fmtsIdx)
fmtsIdx = i;
}
// Keep enumerating.
fmt.index++;
rtc::CritScope cs(&_captureCritSect);
// first open /dev/video device
char device[20];
sprintf(device, "/dev/video%d", (int)_deviceId);
if ((_deviceFd = open(device, O_RDWR | O_NONBLOCK, 0)) < 0) {
LOG(LS_INFO) << "error in opening " << device << " errono = " << errno;
return -1;
}
// Supported video formats in preferred order.
// If the requested resolution is larger than VGA, we prefer MJPEG. Go for
// I420 otherwise.
const int nFormats = 5;
unsigned int fmts[nFormats];
if (capability.width > 640 || capability.height > 480) {
fmts[0] = V4L2_PIX_FMT_MJPEG;
fmts[1] = V4L2_PIX_FMT_YUV420;
fmts[2] = V4L2_PIX_FMT_YUYV;
fmts[3] = V4L2_PIX_FMT_UYVY;
fmts[4] = V4L2_PIX_FMT_JPEG;
} else {
fmts[0] = V4L2_PIX_FMT_YUV420;
fmts[1] = V4L2_PIX_FMT_YUYV;
fmts[2] = V4L2_PIX_FMT_UYVY;
fmts[3] = V4L2_PIX_FMT_MJPEG;
fmts[4] = V4L2_PIX_FMT_JPEG;
}
// Enumerate image formats.
struct v4l2_fmtdesc fmt;
int fmtsIdx = nFormats;
memset(&fmt, 0, sizeof(fmt));
fmt.index = 0;
fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
LOG(LS_INFO) << "Video Capture enumerats supported image formats:";
while (ioctl(_deviceFd, VIDIOC_ENUM_FMT, &fmt) == 0) {
LOG(LS_INFO) << " { pixelformat = "
<< cricket::GetFourccName(fmt.pixelformat)
<< ", description = '" << fmt.description << "' }";
// Match the preferred order.
for (int i = 0; i < nFormats; i++) {
if (fmt.pixelformat == fmts[i] && i < fmtsIdx)
fmtsIdx = i;
}
// Keep enumerating.
fmt.index++;
}
if (fmtsIdx == nFormats)
{
LOG(LS_INFO) << "no supporting video formats found";
return -1;
} else {
LOG(LS_INFO) << "We prefer format "
<< cricket::GetFourccName(fmts[fmtsIdx]);
}
if (fmtsIdx == nFormats) {
LOG(LS_INFO) << "no supporting video formats found";
return -1;
} else {
LOG(LS_INFO) << "We prefer format "
<< cricket::GetFourccName(fmts[fmtsIdx]);
}
struct v4l2_format video_fmt;
memset(&video_fmt, 0, sizeof(struct v4l2_format));
video_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
video_fmt.fmt.pix.sizeimage = 0;
video_fmt.fmt.pix.width = capability.width;
video_fmt.fmt.pix.height = capability.height;
video_fmt.fmt.pix.pixelformat = fmts[fmtsIdx];
struct v4l2_format video_fmt;
memset(&video_fmt, 0, sizeof(struct v4l2_format));
video_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
video_fmt.fmt.pix.sizeimage = 0;
video_fmt.fmt.pix.width = capability.width;
video_fmt.fmt.pix.height = capability.height;
video_fmt.fmt.pix.pixelformat = fmts[fmtsIdx];
if (video_fmt.fmt.pix.pixelformat == V4L2_PIX_FMT_YUYV)
_captureVideoType = VideoType::kYUY2;
else if (video_fmt.fmt.pix.pixelformat == V4L2_PIX_FMT_YUV420)
_captureVideoType = VideoType::kI420;
else if (video_fmt.fmt.pix.pixelformat == V4L2_PIX_FMT_UYVY)
_captureVideoType = VideoType::kUYVY;
else if (video_fmt.fmt.pix.pixelformat == V4L2_PIX_FMT_MJPEG ||
video_fmt.fmt.pix.pixelformat == V4L2_PIX_FMT_JPEG)
_captureVideoType = VideoType::kMJPEG;
if (video_fmt.fmt.pix.pixelformat == V4L2_PIX_FMT_YUYV)
_captureVideoType = VideoType::kYUY2;
else if (video_fmt.fmt.pix.pixelformat == V4L2_PIX_FMT_YUV420)
_captureVideoType = VideoType::kI420;
else if (video_fmt.fmt.pix.pixelformat == V4L2_PIX_FMT_UYVY)
_captureVideoType = VideoType::kUYVY;
else if (video_fmt.fmt.pix.pixelformat == V4L2_PIX_FMT_MJPEG ||
video_fmt.fmt.pix.pixelformat == V4L2_PIX_FMT_JPEG)
_captureVideoType = VideoType::kMJPEG;
//set format and frame size now
if (ioctl(_deviceFd, VIDIOC_S_FMT, &video_fmt) < 0)
{
LOG(LS_INFO) << "error in VIDIOC_S_FMT, errno = " << errno;
return -1;
}
// set format and frame size now
if (ioctl(_deviceFd, VIDIOC_S_FMT, &video_fmt) < 0) {
LOG(LS_INFO) << "error in VIDIOC_S_FMT, errno = " << errno;
return -1;
}
// initialize current width and height
_currentWidth = video_fmt.fmt.pix.width;
_currentHeight = video_fmt.fmt.pix.height;
// initialize current width and height
_currentWidth = video_fmt.fmt.pix.width;
_currentHeight = video_fmt.fmt.pix.height;
// Trying to set frame rate, before check driver capability.
bool driver_framerate_support = true;
struct v4l2_streamparm streamparms;
memset(&streamparms, 0, sizeof(streamparms));
streamparms.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (ioctl(_deviceFd, VIDIOC_G_PARM, &streamparms) < 0) {
LOG(LS_INFO) << "error in VIDIOC_G_PARM errno = " << errno;
// Trying to set frame rate, before check driver capability.
bool driver_framerate_support = true;
struct v4l2_streamparm streamparms;
memset(&streamparms, 0, sizeof(streamparms));
streamparms.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (ioctl(_deviceFd, VIDIOC_G_PARM, &streamparms) < 0) {
LOG(LS_INFO) << "error in VIDIOC_G_PARM errno = " << errno;
driver_framerate_support = false;
// continue
} else {
// check the capability flag is set to V4L2_CAP_TIMEPERFRAME.
if (streamparms.parm.capture.capability & V4L2_CAP_TIMEPERFRAME) {
// driver supports the feature. Set required framerate.
memset(&streamparms, 0, sizeof(streamparms));
streamparms.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
streamparms.parm.capture.timeperframe.numerator = 1;
streamparms.parm.capture.timeperframe.denominator = capability.maxFPS;
if (ioctl(_deviceFd, VIDIOC_S_PARM, &streamparms) < 0) {
LOG(LS_INFO) << "Failed to set the framerate. errno=" << errno;
driver_framerate_support = false;
// continue
} else {
// check the capability flag is set to V4L2_CAP_TIMEPERFRAME.
if (streamparms.parm.capture.capability & V4L2_CAP_TIMEPERFRAME) {
// driver supports the feature. Set required framerate.
memset(&streamparms, 0, sizeof(streamparms));
streamparms.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
streamparms.parm.capture.timeperframe.numerator = 1;
streamparms.parm.capture.timeperframe.denominator = capability.maxFPS;
if (ioctl(_deviceFd, VIDIOC_S_PARM, &streamparms) < 0) {
LOG(LS_INFO) << "Failed to set the framerate. errno=" << errno;
driver_framerate_support = false;
} else {
_currentFrameRate = capability.maxFPS;
}
}
}
// If driver doesn't support framerate control, need to hardcode.
// Hardcoding the value based on the frame size.
if (!driver_framerate_support) {
if (_currentWidth >= 800 && _captureVideoType != VideoType::kMJPEG) {
_currentFrameRate = 15;
} else {
_currentFrameRate = 30;
_currentFrameRate = capability.maxFPS;
}
}
if (!AllocateVideoBuffers())
{
LOG(LS_INFO) << "failed to allocate video capture buffers";
return -1;
}
// If driver doesn't support framerate control, need to hardcode.
// Hardcoding the value based on the frame size.
if (!driver_framerate_support) {
if (_currentWidth >= 800 && _captureVideoType != VideoType::kMJPEG) {
_currentFrameRate = 15;
} else {
_currentFrameRate = 30;
}
}
//start capture thread;
if (!_captureThread)
{
_captureThread.reset(new rtc::PlatformThread(
VideoCaptureModuleV4L2::CaptureThread, this, "CaptureThread"));
_captureThread->Start();
_captureThread->SetPriority(rtc::kHighPriority);
}
if (!AllocateVideoBuffers()) {
LOG(LS_INFO) << "failed to allocate video capture buffers";
return -1;
}
// Needed to start UVC camera - from the uvcview application
enum v4l2_buf_type type;
type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (ioctl(_deviceFd, VIDIOC_STREAMON, &type) == -1)
{
LOG(LS_INFO) << "Failed to turn on stream";
return -1;
}
// start capture thread;
if (!_captureThread) {
_captureThread.reset(new rtc::PlatformThread(
VideoCaptureModuleV4L2::CaptureThread, this, "CaptureThread"));
_captureThread->Start();
_captureThread->SetPriority(rtc::kHighPriority);
}
_captureStarted = true;
return 0;
// Needed to start UVC camera - from the uvcview application
enum v4l2_buf_type type;
type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (ioctl(_deviceFd, VIDIOC_STREAMON, &type) == -1) {
LOG(LS_INFO) << "Failed to turn on stream";
return -1;
}
_captureStarted = true;
return 0;
}
int32_t VideoCaptureModuleV4L2::StopCapture()
{
if (_captureThread) {
// Make sure the capture thread stop stop using the critsect.
_captureThread->Stop();
_captureThread.reset();
}
int32_t VideoCaptureModuleV4L2::StopCapture() {
if (_captureThread) {
// Make sure the capture thread stop stop using the critsect.
_captureThread->Stop();
_captureThread.reset();
}
rtc::CritScope cs(&_captureCritSect);
if (_captureStarted)
{
_captureStarted = false;
rtc::CritScope cs(&_captureCritSect);
if (_captureStarted) {
_captureStarted = false;
DeAllocateVideoBuffers();
close(_deviceFd);
_deviceFd = -1;
}
DeAllocateVideoBuffers();
close(_deviceFd);
_deviceFd = -1;
}
return 0;
return 0;
}
//critical section protected by the caller
// critical section protected by the caller
bool VideoCaptureModuleV4L2::AllocateVideoBuffers()
{
struct v4l2_requestbuffers rbuffer;
memset(&rbuffer, 0, sizeof(v4l2_requestbuffers));
bool VideoCaptureModuleV4L2::AllocateVideoBuffers() {
struct v4l2_requestbuffers rbuffer;
memset(&rbuffer, 0, sizeof(v4l2_requestbuffers));
rbuffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
rbuffer.memory = V4L2_MEMORY_MMAP;
rbuffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
rbuffer.memory = V4L2_MEMORY_MMAP;
rbuffer.count = kNoOfV4L2Bufffers;
if (ioctl(_deviceFd, VIDIOC_REQBUFS, &rbuffer) < 0) {
LOG(LS_INFO) << "Could not get buffers from device. errno = " << errno;
return false;
}
if (rbuffer.count > kNoOfV4L2Bufffers)
rbuffer.count = kNoOfV4L2Bufffers;
if (ioctl(_deviceFd, VIDIOC_REQBUFS, &rbuffer) < 0)
{
LOG(LS_INFO) << "Could not get buffers from device. errno = " << errno;
return false;
_buffersAllocatedByDevice = rbuffer.count;
// Map the buffers
_pool = new Buffer[rbuffer.count];
for (unsigned int i = 0; i < rbuffer.count; i++) {
struct v4l2_buffer buffer;
memset(&buffer, 0, sizeof(v4l2_buffer));
buffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
buffer.memory = V4L2_MEMORY_MMAP;
buffer.index = i;
if (ioctl(_deviceFd, VIDIOC_QUERYBUF, &buffer) < 0) {
return false;
}
if (rbuffer.count > kNoOfV4L2Bufffers)
rbuffer.count = kNoOfV4L2Bufffers;
_pool[i].start = mmap(NULL, buffer.length, PROT_READ | PROT_WRITE,
MAP_SHARED, _deviceFd, buffer.m.offset);
_buffersAllocatedByDevice = rbuffer.count;
//Map the buffers
_pool = new Buffer[rbuffer.count];
for (unsigned int i = 0; i < rbuffer.count; i++)
{
struct v4l2_buffer buffer;
memset(&buffer, 0, sizeof(v4l2_buffer));
buffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
buffer.memory = V4L2_MEMORY_MMAP;
buffer.index = i;
if (ioctl(_deviceFd, VIDIOC_QUERYBUF, &buffer) < 0)
{
return false;
}
_pool[i].start = mmap(NULL, buffer.length, PROT_READ | PROT_WRITE, MAP_SHARED,
_deviceFd, buffer.m.offset);
if (MAP_FAILED == _pool[i].start)
{
for (unsigned int j = 0; j < i; j++)
munmap(_pool[j].start, _pool[j].length);
return false;
}
_pool[i].length = buffer.length;
if (ioctl(_deviceFd, VIDIOC_QBUF, &buffer) < 0)
{
return false;
}
if (MAP_FAILED == _pool[i].start) {
for (unsigned int j = 0; j < i; j++)
munmap(_pool[j].start, _pool[j].length);
return false;
}
_pool[i].length = buffer.length;
if (ioctl(_deviceFd, VIDIOC_QBUF, &buffer) < 0) {
return false;
}
}
return true;
}
bool VideoCaptureModuleV4L2::DeAllocateVideoBuffers() {
// unmap buffers
for (int i = 0; i < _buffersAllocatedByDevice; i++)
munmap(_pool[i].start, _pool[i].length);
delete[] _pool;
// turn off stream
enum v4l2_buf_type type;
type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (ioctl(_deviceFd, VIDIOC_STREAMOFF, &type) < 0) {
LOG(LS_INFO) << "VIDIOC_STREAMOFF error. errno: " << errno;
}
return true;
}
bool VideoCaptureModuleV4L2::CaptureStarted() {
return _captureStarted;
}
bool VideoCaptureModuleV4L2::CaptureThread(void* obj) {
return static_cast<VideoCaptureModuleV4L2*>(obj)->CaptureProcess();
}
bool VideoCaptureModuleV4L2::CaptureProcess() {
int retVal = 0;
fd_set rSet;
struct timeval timeout;
rtc::CritScope cs(&_captureCritSect);
FD_ZERO(&rSet);
FD_SET(_deviceFd, &rSet);
timeout.tv_sec = 1;
timeout.tv_usec = 0;
retVal = select(_deviceFd + 1, &rSet, NULL, NULL, &timeout);
if (retVal < 0 && errno != EINTR) // continue if interrupted
{
// select failed
return false;
} else if (retVal == 0) {
// select timed out
return true;
}
bool VideoCaptureModuleV4L2::DeAllocateVideoBuffers()
{
// unmap buffers
for (int i = 0; i < _buffersAllocatedByDevice; i++)
munmap(_pool[i].start, _pool[i].length);
delete[] _pool;
// turn off stream
enum v4l2_buf_type type;
type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (ioctl(_deviceFd, VIDIOC_STREAMOFF, &type) < 0)
{
LOG(LS_INFO) << "VIDIOC_STREAMOFF error. errno: " << errno;
}
} else if (!FD_ISSET(_deviceFd, &rSet)) {
// not event on camera handle
return true;
}
}
bool VideoCaptureModuleV4L2::CaptureStarted()
{
return _captureStarted;
}
bool VideoCaptureModuleV4L2::CaptureThread(void* obj)
{
return static_cast<VideoCaptureModuleV4L2*> (obj)->CaptureProcess();
}
bool VideoCaptureModuleV4L2::CaptureProcess()
{
int retVal = 0;
fd_set rSet;
struct timeval timeout;
rtc::CritScope cs(&_captureCritSect);
FD_ZERO(&rSet);
FD_SET(_deviceFd, &rSet);
timeout.tv_sec = 1;
timeout.tv_usec = 0;
retVal = select(_deviceFd + 1, &rSet, NULL, NULL, &timeout);
if (retVal < 0 && errno != EINTR) // continue if interrupted
{
// select failed
return false;
}
else if (retVal == 0)
{
// select timed out
return true;
}
else if (!FD_ISSET(_deviceFd, &rSet))
{
// not event on camera handle
if (_captureStarted) {
struct v4l2_buffer buf;
memset(&buf, 0, sizeof(struct v4l2_buffer));
buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
buf.memory = V4L2_MEMORY_MMAP;
// dequeue a buffer - repeat until dequeued properly!
while (ioctl(_deviceFd, VIDIOC_DQBUF, &buf) < 0) {
if (errno != EINTR) {
LOG(LS_INFO) << "could not sync on a buffer on device "
<< strerror(errno);
return true;
}
}
VideoCaptureCapability frameInfo;
frameInfo.width = _currentWidth;
frameInfo.height = _currentHeight;
frameInfo.videoType = _captureVideoType;
if (_captureStarted)
{
struct v4l2_buffer buf;
memset(&buf, 0, sizeof(struct v4l2_buffer));
buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
buf.memory = V4L2_MEMORY_MMAP;
// dequeue a buffer - repeat until dequeued properly!
while (ioctl(_deviceFd, VIDIOC_DQBUF, &buf) < 0)
{
if (errno != EINTR)
{
LOG(LS_INFO) << "could not sync on a buffer on device "
<< strerror(errno);
return true;
}
}
VideoCaptureCapability frameInfo;
frameInfo.width = _currentWidth;
frameInfo.height = _currentHeight;
frameInfo.videoType = _captureVideoType;
// convert to to I420 if needed
IncomingFrame((unsigned char*) _pool[buf.index].start,
buf.bytesused, frameInfo);
// enqueue the buffer again
if (ioctl(_deviceFd, VIDIOC_QBUF, &buf) == -1)
{
LOG(LS_INFO) << "Failed to enqueue capture buffer";
}
// convert to to I420 if needed
IncomingFrame((unsigned char*)_pool[buf.index].start, buf.bytesused,
frameInfo);
// enqueue the buffer again
if (ioctl(_deviceFd, VIDIOC_QBUF, &buf) == -1) {
LOG(LS_INFO) << "Failed to enqueue capture buffer";
}
usleep(0);
return true;
}
usleep(0);
return true;
}
int32_t VideoCaptureModuleV4L2::CaptureSettings(VideoCaptureCapability& settings)
{
settings.width = _currentWidth;
settings.height = _currentHeight;
settings.maxFPS = _currentFrameRate;
settings.videoType = _captureVideoType;
int32_t VideoCaptureModuleV4L2::CaptureSettings(
VideoCaptureCapability& settings) {
settings.width = _currentWidth;
settings.height = _currentHeight;
settings.maxFPS = _currentFrameRate;
settings.videoType = _captureVideoType;
return 0;
return 0;
}
} // namespace videocapturemodule
} // namespace webrtc

View File

@ -25,11 +25,12 @@ using namespace webrtc;
using namespace videocapturemodule;
static NSArray* camera_presets = @[
AVCaptureSessionPreset352x288, AVCaptureSessionPreset640x480,
AVCaptureSessionPreset352x288,
AVCaptureSessionPreset640x480,
AVCaptureSessionPreset1280x720
];
#define IOS_UNSUPPORTED() \
#define IOS_UNSUPPORTED() \
LOG(LS_ERROR) << __FUNCTION__ << " is not supported on the iOS platform."; \
return -1;
@ -55,8 +56,7 @@ int32_t DeviceInfoIos::Init() {
for (NSString* preset in camera_presets) {
BOOL support = [avDevice supportsAVCaptureSessionPreset:preset];
if (support) {
VideoCaptureCapability capability =
[DeviceInfoIosObjC capabilityForPreset:preset];
VideoCaptureCapability capability = [DeviceInfoIosObjC capabilityForPreset:preset];
capabilityVector.push_back(capability);
}
}
@ -66,8 +66,7 @@ int32_t DeviceInfoIos::Init() {
this->GetDeviceName(i, deviceNameUTF8, 256, deviceId, 256);
std::string deviceIdCopy(deviceId);
std::pair<std::string, VideoCaptureCapabilities> mapPair =
std::pair<std::string, VideoCaptureCapabilities>(deviceIdCopy,
capabilityVector);
std::pair<std::string, VideoCaptureCapabilities>(deviceIdCopy, capabilityVector);
_capabilitiesMap.insert(mapPair);
}
@ -87,8 +86,7 @@ int32_t DeviceInfoIos::GetDeviceName(uint32_t deviceNumber,
uint32_t productUniqueIdUTF8Length) {
NSString* deviceName = [DeviceInfoIosObjC deviceNameForIndex:deviceNumber];
NSString* deviceUniqueId =
[DeviceInfoIosObjC deviceUniqueIdForIndex:deviceNumber];
NSString* deviceUniqueId = [DeviceInfoIosObjC deviceUniqueIdForIndex:deviceNumber];
strncpy(deviceNameUTF8, [deviceName UTF8String], deviceNameUTF8Length);
deviceNameUTF8[deviceNameUTF8Length - 1] = '\0';
@ -136,17 +134,15 @@ int32_t DeviceInfoIos::GetCapability(const char* deviceUniqueIdUTF8,
return -1;
}
int32_t DeviceInfoIos::DisplayCaptureSettingsDialogBox(
const char* deviceUniqueIdUTF8,
const char* dialogTitleUTF8,
void* parentWindow,
uint32_t positionX,
uint32_t positionY) {
int32_t DeviceInfoIos::DisplayCaptureSettingsDialogBox(const char* deviceUniqueIdUTF8,
const char* dialogTitleUTF8,
void* parentWindow,
uint32_t positionX,
uint32_t positionY) {
IOS_UNSUPPORTED();
}
int32_t DeviceInfoIos::GetOrientation(const char* deviceUniqueIdUTF8,
VideoRotation& orientation) {
int32_t DeviceInfoIos::GetOrientation(const char* deviceUniqueIdUTF8, VideoRotation& orientation) {
if (strcmp(deviceUniqueIdUTF8, "Front Camera") == 0) {
orientation = kVideoRotation_0;
} else {

View File

@ -56,22 +56,18 @@ using namespace webrtc::videocapturemodule;
}
// create and configure a new output (using callbacks)
AVCaptureVideoDataOutput* captureOutput =
[[AVCaptureVideoDataOutput alloc] init];
AVCaptureVideoDataOutput* captureOutput = [[AVCaptureVideoDataOutput alloc] init];
NSString* key = (NSString*)kCVPixelBufferPixelFormatTypeKey;
NSNumber* val = [NSNumber
numberWithUnsignedInt:kCVPixelFormatType_420YpCbCr8BiPlanarFullRange];
NSDictionary* videoSettings =
[NSDictionary dictionaryWithObject:val forKey:key];
NSNumber* val = [NSNumber numberWithUnsignedInt:kCVPixelFormatType_420YpCbCr8BiPlanarFullRange];
NSDictionary* videoSettings = [NSDictionary dictionaryWithObject:val forKey:key];
captureOutput.videoSettings = videoSettings;
// add new output
if ([_captureSession canAddOutput:captureOutput]) {
[_captureSession addOutput:captureOutput];
} else {
LOG(LS_ERROR) << __FUNCTION__
<< ": Could not add output to AVCaptureSession";
LOG(LS_ERROR) << __FUNCTION__ << ": Could not add output to AVCaptureSession";
}
#ifdef WEBRTC_IOS
@ -95,8 +91,7 @@ using namespace webrtc::videocapturemodule;
- (void)directOutputToSelf {
[[self currentOutput]
setSampleBufferDelegate:self
queue:dispatch_get_global_queue(
DISPATCH_QUEUE_PRIORITY_DEFAULT, 0)];
queue:dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0)];
}
- (void)directOutputToNil {
@ -143,13 +138,11 @@ using namespace webrtc::videocapturemodule;
if (capability.width > 1280 || capability.height > 720) {
return NO;
}
} else if ([_captureSession
canSetSessionPreset:AVCaptureSessionPreset640x480]) {
} else if ([_captureSession canSetSessionPreset:AVCaptureSessionPreset640x480]) {
if (capability.width > 640 || capability.height > 480) {
return NO;
}
} else if ([_captureSession
canSetSessionPreset:AVCaptureSessionPreset352x288]) {
} else if ([_captureSession canSetSessionPreset:AVCaptureSessionPreset352x288]) {
if (capability.width > 352 || capability.height > 288) {
return NO;
}
@ -160,17 +153,15 @@ using namespace webrtc::videocapturemodule;
_capability = capability;
AVCaptureVideoDataOutput* currentOutput = [self currentOutput];
if (!currentOutput)
return NO;
if (!currentOutput) return NO;
[self directOutputToSelf];
_orientationHasChanged = NO;
_captureChanging = YES;
dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0),
^{
[self startCaptureInBackgroundWithOutput:currentOutput];
});
dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^{
[self startCaptureInBackgroundWithOutput:currentOutput];
});
return YES;
}
@ -178,10 +169,8 @@ using namespace webrtc::videocapturemodule;
return [[_captureSession outputs] firstObject];
}
- (void)startCaptureInBackgroundWithOutput:
(AVCaptureVideoDataOutput*)currentOutput {
NSString* captureQuality =
[NSString stringWithString:AVCaptureSessionPresetLow];
- (void)startCaptureInBackgroundWithOutput:(AVCaptureVideoDataOutput*)currentOutput {
NSString* captureQuality = [NSString stringWithString:AVCaptureSessionPresetLow];
if (_capability.width >= 1280 || _capability.height >= 720) {
captureQuality = [NSString stringWithString:AVCaptureSessionPreset1280x720];
} else if (_capability.width >= 640 || _capability.height >= 480) {
@ -219,8 +208,7 @@ using namespace webrtc::videocapturemodule;
_connection.videoOrientation = AVCaptureVideoOrientationPortrait;
break;
case UIDeviceOrientationPortraitUpsideDown:
_connection.videoOrientation =
AVCaptureVideoOrientationPortraitUpsideDown;
_connection.videoOrientation = AVCaptureVideoOrientationPortraitUpsideDown;
break;
case UIDeviceOrientationLandscapeLeft:
_connection.videoOrientation = AVCaptureVideoOrientationLandscapeRight;
@ -258,10 +246,9 @@ using namespace webrtc::videocapturemodule;
}
_captureChanging = YES;
dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0),
^(void) {
[self stopCaptureInBackground];
});
dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^(void) {
[self stopCaptureInBackground];
});
return YES;
}
@ -275,8 +262,7 @@ using namespace webrtc::videocapturemodule;
NSArray* currentInputs = [_captureSession inputs];
// remove current input
if ([currentInputs count] > 0) {
AVCaptureInput* currentInput =
(AVCaptureInput*)[currentInputs objectAtIndex:0];
AVCaptureInput* currentInput = (AVCaptureInput*)[currentInputs objectAtIndex:0];
[_captureSession removeInput:currentInput];
}
@ -288,8 +274,7 @@ using namespace webrtc::videocapturemodule;
return NO;
}
AVCaptureDevice* captureDevice =
[DeviceInfoIosObjC captureDeviceForUniqueId:uniqueId];
AVCaptureDevice* captureDevice = [DeviceInfoIosObjC captureDeviceForUniqueId:uniqueId];
if (!captureDevice) {
return NO;
@ -298,14 +283,12 @@ using namespace webrtc::videocapturemodule;
// now create capture session input out of AVCaptureDevice
NSError* deviceError = nil;
AVCaptureDeviceInput* newCaptureInput =
[AVCaptureDeviceInput deviceInputWithDevice:captureDevice
error:&deviceError];
[AVCaptureDeviceInput deviceInputWithDevice:captureDevice error:&deviceError];
if (!newCaptureInput) {
const char* errorMessage = [[deviceError localizedDescription] UTF8String];
LOG(LS_ERROR) << __FUNCTION__ << ": deviceInputWithDevice error:"
<< errorMessage;
LOG(LS_ERROR) << __FUNCTION__ << ": deviceInputWithDevice error:" << errorMessage;
return NO;
}
@ -339,17 +322,12 @@ using namespace webrtc::videocapturemodule;
const int kYPlaneIndex = 0;
const int kUVPlaneIndex = 1;
uint8_t* baseAddress =
(uint8_t*)CVPixelBufferGetBaseAddressOfPlane(videoFrame, kYPlaneIndex);
size_t yPlaneBytesPerRow =
CVPixelBufferGetBytesPerRowOfPlane(videoFrame, kYPlaneIndex);
uint8_t* baseAddress = (uint8_t*)CVPixelBufferGetBaseAddressOfPlane(videoFrame, kYPlaneIndex);
size_t yPlaneBytesPerRow = CVPixelBufferGetBytesPerRowOfPlane(videoFrame, kYPlaneIndex);
size_t yPlaneHeight = CVPixelBufferGetHeightOfPlane(videoFrame, kYPlaneIndex);
size_t uvPlaneBytesPerRow =
CVPixelBufferGetBytesPerRowOfPlane(videoFrame, kUVPlaneIndex);
size_t uvPlaneHeight =
CVPixelBufferGetHeightOfPlane(videoFrame, kUVPlaneIndex);
size_t frameSize =
yPlaneBytesPerRow * yPlaneHeight + uvPlaneBytesPerRow * uvPlaneHeight;
size_t uvPlaneBytesPerRow = CVPixelBufferGetBytesPerRowOfPlane(videoFrame, kUVPlaneIndex);
size_t uvPlaneHeight = CVPixelBufferGetHeightOfPlane(videoFrame, kUVPlaneIndex);
size_t frameSize = yPlaneBytesPerRow * yPlaneHeight + uvPlaneBytesPerRow * uvPlaneHeight;
VideoCaptureCapability tempCaptureCapability;
tempCaptureCapability.width = CVPixelBufferGetWidth(videoFrame);

View File

@ -54,7 +54,8 @@ int32_t VideoCaptureImpl::RotationFromDegrees(int degrees,
*rotation = kVideoRotation_270;
return 0;
default:
return -1;;
return -1;
;
}
}
@ -87,29 +88,28 @@ VideoCaptureImpl::VideoCaptureImpl()
_lastProcessFrameTimeNanos(rtc::TimeNanos()),
_rotateFrame(kVideoRotation_0),
apply_rotation_(false) {
_requestedCapability.width = kDefaultWidth;
_requestedCapability.height = kDefaultHeight;
_requestedCapability.maxFPS = 30;
_requestedCapability.videoType = VideoType::kI420;
memset(_incomingFrameTimesNanos, 0, sizeof(_incomingFrameTimesNanos));
_requestedCapability.width = kDefaultWidth;
_requestedCapability.height = kDefaultHeight;
_requestedCapability.maxFPS = 30;
_requestedCapability.videoType = VideoType::kI420;
memset(_incomingFrameTimesNanos, 0, sizeof(_incomingFrameTimesNanos));
}
VideoCaptureImpl::~VideoCaptureImpl()
{
DeRegisterCaptureDataCallback();
if (_deviceUniqueId)
delete[] _deviceUniqueId;
VideoCaptureImpl::~VideoCaptureImpl() {
DeRegisterCaptureDataCallback();
if (_deviceUniqueId)
delete[] _deviceUniqueId;
}
void VideoCaptureImpl::RegisterCaptureDataCallback(
rtc::VideoSinkInterface<VideoFrame>* dataCallBack) {
rtc::CritScope cs(&_apiCs);
_dataCallBack = dataCallBack;
rtc::CritScope cs(&_apiCs);
_dataCallBack = dataCallBack;
}
void VideoCaptureImpl::DeRegisterCaptureDataCallback() {
rtc::CritScope cs(&_apiCs);
_dataCallBack = NULL;
rtc::CritScope cs(&_apiCs);
_dataCallBack = NULL;
}
int32_t VideoCaptureImpl::DeliverCapturedFrame(VideoFrame& captureFrame) {
UpdateFrameCount(); // frame count used for local frame rate callback.
@ -121,68 +121,66 @@ int32_t VideoCaptureImpl::DeliverCapturedFrame(VideoFrame& captureFrame) {
return 0;
}
int32_t VideoCaptureImpl::IncomingFrame(
uint8_t* videoFrame,
size_t videoFrameLength,
const VideoCaptureCapability& frameInfo,
int64_t captureTime/*=0*/)
{
rtc::CritScope cs(&_apiCs);
int32_t VideoCaptureImpl::IncomingFrame(uint8_t* videoFrame,
size_t videoFrameLength,
const VideoCaptureCapability& frameInfo,
int64_t captureTime /*=0*/) {
rtc::CritScope cs(&_apiCs);
const int32_t width = frameInfo.width;
const int32_t height = frameInfo.height;
const int32_t width = frameInfo.width;
const int32_t height = frameInfo.height;
TRACE_EVENT1("webrtc", "VC::IncomingFrame", "capture_time", captureTime);
TRACE_EVENT1("webrtc", "VC::IncomingFrame", "capture_time", captureTime);
// Not encoded, convert to I420.
if (frameInfo.videoType != VideoType::kMJPEG &&
CalcBufferSize(frameInfo.videoType, width, abs(height)) !=
videoFrameLength) {
LOG(LS_ERROR) << "Wrong incoming frame length.";
return -1;
// Not encoded, convert to I420.
if (frameInfo.videoType != VideoType::kMJPEG &&
CalcBufferSize(frameInfo.videoType, width, abs(height)) !=
videoFrameLength) {
LOG(LS_ERROR) << "Wrong incoming frame length.";
return -1;
}
int stride_y = width;
int stride_uv = (width + 1) / 2;
int target_width = width;
int target_height = height;
// SetApplyRotation doesn't take any lock. Make a local copy here.
bool apply_rotation = apply_rotation_;
if (apply_rotation) {
// Rotating resolution when for 90/270 degree rotations.
if (_rotateFrame == kVideoRotation_90 ||
_rotateFrame == kVideoRotation_270) {
target_width = abs(height);
target_height = width;
}
}
int stride_y = width;
int stride_uv = (width + 1) / 2;
int target_width = width;
int target_height = height;
// Setting absolute height (in case it was negative).
// In Windows, the image starts bottom left, instead of top left.
// Setting a negative source height, inverts the image (within LibYuv).
// SetApplyRotation doesn't take any lock. Make a local copy here.
bool apply_rotation = apply_rotation_;
// TODO(nisse): Use a pool?
rtc::scoped_refptr<I420Buffer> buffer = I420Buffer::Create(
target_width, abs(target_height), stride_y, stride_uv, stride_uv);
const int conversionResult = ConvertToI420(
frameInfo.videoType, videoFrame, 0, 0, // No cropping
width, height, videoFrameLength,
apply_rotation ? _rotateFrame : kVideoRotation_0, buffer.get());
if (conversionResult < 0) {
LOG(LS_ERROR) << "Failed to convert capture frame from type "
<< static_cast<int>(frameInfo.videoType) << "to I420.";
return -1;
}
if (apply_rotation) {
// Rotating resolution when for 90/270 degree rotations.
if (_rotateFrame == kVideoRotation_90 ||
_rotateFrame == kVideoRotation_270) {
target_width = abs(height);
target_height = width;
}
}
VideoFrame captureFrame(buffer, 0, rtc::TimeMillis(),
!apply_rotation ? _rotateFrame : kVideoRotation_0);
captureFrame.set_ntp_time_ms(captureTime);
// Setting absolute height (in case it was negative).
// In Windows, the image starts bottom left, instead of top left.
// Setting a negative source height, inverts the image (within LibYuv).
DeliverCapturedFrame(captureFrame);
// TODO(nisse): Use a pool?
rtc::scoped_refptr<I420Buffer> buffer = I420Buffer::Create(
target_width, abs(target_height), stride_y, stride_uv, stride_uv);
const int conversionResult = ConvertToI420(
frameInfo.videoType, videoFrame, 0, 0, // No cropping
width, height, videoFrameLength,
apply_rotation ? _rotateFrame : kVideoRotation_0, buffer.get());
if (conversionResult < 0) {
LOG(LS_ERROR) << "Failed to convert capture frame from type "
<< static_cast<int>(frameInfo.videoType) << "to I420.";
return -1;
}
VideoFrame captureFrame(buffer, 0, rtc::TimeMillis(),
!apply_rotation ? _rotateFrame : kVideoRotation_0);
captureFrame.set_ntp_time_ms(captureTime);
DeliverCapturedFrame(captureFrame);
return 0;
return 0;
}
int32_t VideoCaptureImpl::SetCaptureRotation(VideoRotation rotation) {

File diff suppressed because it is too large Load Diff

View File

@ -17,495 +17,429 @@
#include "rtc_base/logging.h"
#include "rtc_base/platform_thread.h"
#include <Dvdmedia.h> // VIDEOINFOHEADER2
#include <Dvdmedia.h> // VIDEOINFOHEADER2
#include <initguid.h>
#define DELETE_RESET(p) { delete (p) ; (p) = NULL ;}
#define DELETE_RESET(p) \
{ \
delete (p); \
(p) = NULL; \
}
DEFINE_GUID(CLSID_SINKFILTER, 0x88cdbbdc, 0xa73b, 0x4afa, 0xac, 0xbf, 0x15, 0xd5,
0xe2, 0xce, 0x12, 0xc3);
DEFINE_GUID(CLSID_SINKFILTER,
0x88cdbbdc,
0xa73b,
0x4afa,
0xac,
0xbf,
0x15,
0xd5,
0xe2,
0xce,
0x12,
0xc3);
namespace webrtc
{
namespace videocapturemodule
{
namespace webrtc {
namespace videocapturemodule {
typedef struct tagTHREADNAME_INFO
{
DWORD dwType; // must be 0x1000
LPCSTR szName; // pointer to name (in user addr space)
DWORD dwThreadID; // thread ID (-1=caller thread)
DWORD dwFlags; // reserved for future use, must be zero
typedef struct tagTHREADNAME_INFO {
DWORD dwType; // must be 0x1000
LPCSTR szName; // pointer to name (in user addr space)
DWORD dwThreadID; // thread ID (-1=caller thread)
DWORD dwFlags; // reserved for future use, must be zero
} THREADNAME_INFO;
CaptureInputPin::CaptureInputPin (IN TCHAR * szName,
IN CaptureSinkFilter* pFilter,
IN CCritSec * pLock,
OUT HRESULT * pHr,
IN LPCWSTR pszName)
: CBaseInputPin (szName, pFilter, pLock, pHr, pszName),
CaptureInputPin::CaptureInputPin(IN TCHAR* szName,
IN CaptureSinkFilter* pFilter,
IN CCritSec* pLock,
OUT HRESULT* pHr,
IN LPCWSTR pszName)
: CBaseInputPin(szName, pFilter, pLock, pHr, pszName),
_requestedCapability(),
_resultingCapability()
{
_threadHandle = NULL;
_resultingCapability() {
_threadHandle = NULL;
}
CaptureInputPin::~CaptureInputPin()
{
CaptureInputPin::~CaptureInputPin() {}
HRESULT
CaptureInputPin::GetMediaType(IN int iPosition, OUT CMediaType* pmt) {
// reset the thread handle
_threadHandle = NULL;
if (iPosition < 0)
return E_INVALIDARG;
VIDEOINFOHEADER* pvi =
(VIDEOINFOHEADER*)pmt->AllocFormatBuffer(sizeof(VIDEOINFOHEADER));
if (NULL == pvi) {
LOG(LS_INFO) << "CheckMediaType VIDEOINFOHEADER is NULL. Returning.";
return (E_OUTOFMEMORY);
}
ZeroMemory(pvi, sizeof(VIDEOINFOHEADER));
pvi->bmiHeader.biSize = sizeof(BITMAPINFOHEADER);
pvi->bmiHeader.biPlanes = 1;
pvi->bmiHeader.biClrImportant = 0;
pvi->bmiHeader.biClrUsed = 0;
if (_requestedCapability.maxFPS != 0) {
pvi->AvgTimePerFrame = 10000000 / _requestedCapability.maxFPS;
}
SetRectEmpty(&(pvi->rcSource)); // we want the whole image area rendered.
SetRectEmpty(&(pvi->rcTarget)); // no particular destination rectangle
pmt->SetType(&MEDIATYPE_Video);
pmt->SetFormatType(&FORMAT_VideoInfo);
pmt->SetTemporalCompression(FALSE);
int32_t positionOffset = 1;
switch (iPosition + positionOffset) {
case 0: {
pvi->bmiHeader.biCompression = MAKEFOURCC('I', '4', '2', '0');
pvi->bmiHeader.biBitCount = 12; // bit per pixel
pvi->bmiHeader.biWidth = _requestedCapability.width;
pvi->bmiHeader.biHeight = _requestedCapability.height;
pvi->bmiHeader.biSizeImage =
3 * _requestedCapability.height * _requestedCapability.width / 2;
pmt->SetSubtype(&MEDIASUBTYPE_I420);
} break;
case 1: {
pvi->bmiHeader.biCompression = MAKEFOURCC('Y', 'U', 'Y', '2');
;
pvi->bmiHeader.biBitCount = 16; // bit per pixel
pvi->bmiHeader.biWidth = _requestedCapability.width;
pvi->bmiHeader.biHeight = _requestedCapability.height;
pvi->bmiHeader.biSizeImage =
2 * _requestedCapability.width * _requestedCapability.height;
pmt->SetSubtype(&MEDIASUBTYPE_YUY2);
} break;
case 2: {
pvi->bmiHeader.biCompression = BI_RGB;
pvi->bmiHeader.biBitCount = 24; // bit per pixel
pvi->bmiHeader.biWidth = _requestedCapability.width;
pvi->bmiHeader.biHeight = _requestedCapability.height;
pvi->bmiHeader.biSizeImage =
3 * _requestedCapability.height * _requestedCapability.width;
pmt->SetSubtype(&MEDIASUBTYPE_RGB24);
} break;
case 3: {
pvi->bmiHeader.biCompression = MAKEFOURCC('U', 'Y', 'V', 'Y');
pvi->bmiHeader.biBitCount = 16; // bit per pixel
pvi->bmiHeader.biWidth = _requestedCapability.width;
pvi->bmiHeader.biHeight = _requestedCapability.height;
pvi->bmiHeader.biSizeImage =
2 * _requestedCapability.height * _requestedCapability.width;
pmt->SetSubtype(&MEDIASUBTYPE_UYVY);
} break;
case 4: {
pvi->bmiHeader.biCompression = MAKEFOURCC('M', 'J', 'P', 'G');
pvi->bmiHeader.biBitCount = 12; // bit per pixel
pvi->bmiHeader.biWidth = _requestedCapability.width;
pvi->bmiHeader.biHeight = _requestedCapability.height;
pvi->bmiHeader.biSizeImage =
3 * _requestedCapability.height * _requestedCapability.width / 2;
pmt->SetSubtype(&MEDIASUBTYPE_MJPG);
} break;
default:
return VFW_S_NO_MORE_ITEMS;
}
pmt->SetSampleSize(pvi->bmiHeader.biSizeImage);
LOG(LS_INFO) << "GetMediaType position " << iPosition << ", width "
<< _requestedCapability.width << ", height "
<< _requestedCapability.height << ", biCompression 0x"
<< std::hex << pvi->bmiHeader.biCompression;
return NOERROR;
}
HRESULT
CaptureInputPin::GetMediaType (IN int iPosition, OUT CMediaType * pmt)
{
// reset the thread handle
_threadHandle = NULL;
CaptureInputPin::CheckMediaType(IN const CMediaType* pMediaType) {
// reset the thread handle
_threadHandle = NULL;
if(iPosition < 0)
const GUID* type = pMediaType->Type();
if (*type != MEDIATYPE_Video)
return E_INVALIDARG;
VIDEOINFOHEADER* pvi = (VIDEOINFOHEADER*) pmt->AllocFormatBuffer(
sizeof(VIDEOINFOHEADER));
if(NULL == pvi)
{
LOG(LS_INFO) << "CheckMediaType VIDEOINFOHEADER is NULL. Returning.";
return(E_OUTOFMEMORY);
const GUID* formatType = pMediaType->FormatType();
// Check for the subtypes we support
const GUID* SubType = pMediaType->Subtype();
if (SubType == NULL) {
return E_INVALIDARG;
}
if (*formatType == FORMAT_VideoInfo) {
VIDEOINFOHEADER* pvi = (VIDEOINFOHEADER*)pMediaType->Format();
if (pvi == NULL) {
return E_INVALIDARG;
}
ZeroMemory(pvi, sizeof(VIDEOINFOHEADER));
pvi->bmiHeader.biSize = sizeof(BITMAPINFOHEADER);
pvi->bmiHeader.biPlanes = 1;
pvi->bmiHeader.biClrImportant = 0;
pvi->bmiHeader.biClrUsed = 0;
if (_requestedCapability.maxFPS != 0) {
pvi->AvgTimePerFrame = 10000000/_requestedCapability.maxFPS;
// Store the incoming width and height
_resultingCapability.width = pvi->bmiHeader.biWidth;
// Store the incoming height,
// for RGB24 we assume the frame to be upside down
if (*SubType == MEDIASUBTYPE_RGB24 && pvi->bmiHeader.biHeight > 0) {
_resultingCapability.height = -(pvi->bmiHeader.biHeight);
} else {
_resultingCapability.height = abs(pvi->bmiHeader.biHeight);
}
SetRectEmpty(&(pvi->rcSource)); // we want the whole image area rendered.
SetRectEmpty(&(pvi->rcTarget)); // no particular destination rectangle
pmt->SetType(&MEDIATYPE_Video);
pmt->SetFormatType(&FORMAT_VideoInfo);
pmt->SetTemporalCompression(FALSE);
int32_t positionOffset=1;
switch (iPosition+positionOffset)
{
case 0:
{
pvi->bmiHeader.biCompression = MAKEFOURCC('I','4','2','0');
pvi->bmiHeader.biBitCount = 12; //bit per pixel
pvi->bmiHeader.biWidth = _requestedCapability.width;
pvi->bmiHeader.biHeight = _requestedCapability.height;
pvi->bmiHeader.biSizeImage = 3*_requestedCapability.height
*_requestedCapability.width/2;
pmt->SetSubtype(&MEDIASUBTYPE_I420);
}
break;
case 1:
{
pvi->bmiHeader.biCompression = MAKEFOURCC('Y','U','Y','2');;
pvi->bmiHeader.biBitCount = 16; //bit per pixel
pvi->bmiHeader.biWidth = _requestedCapability.width;
pvi->bmiHeader.biHeight = _requestedCapability.height;
pvi->bmiHeader.biSizeImage = 2*_requestedCapability.width
*_requestedCapability.height;
pmt->SetSubtype(&MEDIASUBTYPE_YUY2);
}
break;
case 2:
{
pvi->bmiHeader.biCompression = BI_RGB;
pvi->bmiHeader.biBitCount = 24; //bit per pixel
pvi->bmiHeader.biWidth = _requestedCapability.width;
pvi->bmiHeader.biHeight = _requestedCapability.height;
pvi->bmiHeader.biSizeImage = 3*_requestedCapability.height
*_requestedCapability.width;
pmt->SetSubtype(&MEDIASUBTYPE_RGB24);
}
break;
case 3:
{
pvi->bmiHeader.biCompression = MAKEFOURCC('U','Y','V','Y');
pvi->bmiHeader.biBitCount = 16; //bit per pixel
pvi->bmiHeader.biWidth = _requestedCapability.width;
pvi->bmiHeader.biHeight = _requestedCapability.height;
pvi->bmiHeader.biSizeImage = 2*_requestedCapability.height
*_requestedCapability.width;
pmt->SetSubtype(&MEDIASUBTYPE_UYVY);
}
break;
case 4:
{
pvi->bmiHeader.biCompression = MAKEFOURCC('M','J','P','G');
pvi->bmiHeader.biBitCount = 12; //bit per pixel
pvi->bmiHeader.biWidth = _requestedCapability.width;
pvi->bmiHeader.biHeight = _requestedCapability.height;
pvi->bmiHeader.biSizeImage = 3*_requestedCapability.height
*_requestedCapability.width/2;
pmt->SetSubtype(&MEDIASUBTYPE_MJPG);
}
break;
default :
return VFW_S_NO_MORE_ITEMS;
}
pmt->SetSampleSize(pvi->bmiHeader.biSizeImage);
LOG(LS_INFO) << "GetMediaType position " << iPosition << ", width "
<< _requestedCapability.width << ", height "
<< _requestedCapability.height << ", biCompression 0x"
LOG(LS_INFO) << "CheckMediaType width:" << pvi->bmiHeader.biWidth
<< " height:" << pvi->bmiHeader.biHeight << " Compression:0x"
<< std::hex << pvi->bmiHeader.biCompression;
return NOERROR;
if (*SubType == MEDIASUBTYPE_MJPG &&
pvi->bmiHeader.biCompression == MAKEFOURCC('M', 'J', 'P', 'G')) {
_resultingCapability.videoType = VideoType::kMJPEG;
return S_OK; // This format is acceptable.
}
if (*SubType == MEDIASUBTYPE_I420 &&
pvi->bmiHeader.biCompression == MAKEFOURCC('I', '4', '2', '0')) {
_resultingCapability.videoType = VideoType::kI420;
return S_OK; // This format is acceptable.
}
if (*SubType == MEDIASUBTYPE_YUY2 &&
pvi->bmiHeader.biCompression == MAKEFOURCC('Y', 'U', 'Y', '2')) {
_resultingCapability.videoType = VideoType::kYUY2;
::Sleep(60); // workaround for bad driver
return S_OK; // This format is acceptable.
}
if (*SubType == MEDIASUBTYPE_UYVY &&
pvi->bmiHeader.biCompression == MAKEFOURCC('U', 'Y', 'V', 'Y')) {
_resultingCapability.videoType = VideoType::kUYVY;
return S_OK; // This format is acceptable.
}
if (*SubType == MEDIASUBTYPE_HDYC) {
_resultingCapability.videoType = VideoType::kUYVY;
return S_OK; // This format is acceptable.
}
if (*SubType == MEDIASUBTYPE_RGB24 &&
pvi->bmiHeader.biCompression == BI_RGB) {
_resultingCapability.videoType = VideoType::kRGB24;
return S_OK; // This format is acceptable.
}
}
if (*formatType == FORMAT_VideoInfo2) {
// VIDEOINFOHEADER2 that has dwInterlaceFlags
VIDEOINFOHEADER2* pvi = (VIDEOINFOHEADER2*)pMediaType->Format();
if (pvi == NULL) {
return E_INVALIDARG;
}
LOG(LS_INFO) << "CheckMediaType width:" << pvi->bmiHeader.biWidth
<< " height:" << pvi->bmiHeader.biHeight << " Compression:0x"
<< std::hex << pvi->bmiHeader.biCompression;
_resultingCapability.width = pvi->bmiHeader.biWidth;
// Store the incoming height,
// for RGB24 we assume the frame to be upside down
if (*SubType == MEDIASUBTYPE_RGB24 && pvi->bmiHeader.biHeight > 0) {
_resultingCapability.height = -(pvi->bmiHeader.biHeight);
} else {
_resultingCapability.height = abs(pvi->bmiHeader.biHeight);
}
if (*SubType == MEDIASUBTYPE_MJPG &&
pvi->bmiHeader.biCompression == MAKEFOURCC('M', 'J', 'P', 'G')) {
_resultingCapability.videoType = VideoType::kMJPEG;
return S_OK; // This format is acceptable.
}
if (*SubType == MEDIASUBTYPE_I420 &&
pvi->bmiHeader.biCompression == MAKEFOURCC('I', '4', '2', '0')) {
_resultingCapability.videoType = VideoType::kI420;
return S_OK; // This format is acceptable.
}
if (*SubType == MEDIASUBTYPE_YUY2 &&
pvi->bmiHeader.biCompression == MAKEFOURCC('Y', 'U', 'Y', '2')) {
_resultingCapability.videoType = VideoType::kYUY2;
return S_OK; // This format is acceptable.
}
if (*SubType == MEDIASUBTYPE_UYVY &&
pvi->bmiHeader.biCompression == MAKEFOURCC('U', 'Y', 'V', 'Y')) {
_resultingCapability.videoType = VideoType::kUYVY;
return S_OK; // This format is acceptable.
}
if (*SubType == MEDIASUBTYPE_HDYC) {
_resultingCapability.videoType = VideoType::kUYVY;
return S_OK; // This format is acceptable.
}
if (*SubType == MEDIASUBTYPE_RGB24 &&
pvi->bmiHeader.biCompression == BI_RGB) {
_resultingCapability.videoType = VideoType::kRGB24;
return S_OK; // This format is acceptable.
}
}
return E_INVALIDARG;
}
HRESULT
CaptureInputPin::CheckMediaType ( IN const CMediaType * pMediaType)
{
// reset the thread handle
_threadHandle = NULL;
CaptureInputPin::Receive(IN IMediaSample* pIMediaSample) {
HRESULT hr = S_OK;
const GUID *type = pMediaType->Type();
if (*type != MEDIATYPE_Video)
return E_INVALIDARG;
RTC_DCHECK(m_pFilter);
RTC_DCHECK(pIMediaSample);
const GUID *formatType = pMediaType->FormatType();
// get the thread handle of the delivering thread inc its priority
if (_threadHandle == NULL) {
HANDLE handle = GetCurrentThread();
SetThreadPriority(handle, THREAD_PRIORITY_HIGHEST);
_threadHandle = handle;
// Check for the subtypes we support
const GUID *SubType = pMediaType->Subtype();
if (SubType == NULL)
{
return E_INVALIDARG;
rtc::SetCurrentThreadName("webrtc_video_capture");
}
reinterpret_cast<CaptureSinkFilter*>(m_pFilter)->LockReceive();
hr = CBaseInputPin::Receive(pIMediaSample);
if (SUCCEEDED(hr)) {
const LONG length = pIMediaSample->GetActualDataLength();
RTC_DCHECK(length >= 0);
unsigned char* pBuffer = NULL;
if (S_OK != pIMediaSample->GetPointer(&pBuffer)) {
reinterpret_cast<CaptureSinkFilter*>(m_pFilter)->UnlockReceive();
return S_FALSE;
}
if(*formatType == FORMAT_VideoInfo)
{
VIDEOINFOHEADER *pvi = (VIDEOINFOHEADER *) pMediaType->Format();
if(pvi == NULL)
{
return E_INVALIDARG;
}
// NOTE: filter unlocked within Send call
reinterpret_cast<CaptureSinkFilter*>(m_pFilter)->ProcessCapturedFrame(
pBuffer, static_cast<size_t>(length), _resultingCapability);
} else {
reinterpret_cast<CaptureSinkFilter*>(m_pFilter)->UnlockReceive();
}
// Store the incoming width and height
_resultingCapability.width = pvi->bmiHeader.biWidth;
// Store the incoming height,
// for RGB24 we assume the frame to be upside down
if(*SubType == MEDIASUBTYPE_RGB24
&& pvi->bmiHeader.biHeight > 0)
{
_resultingCapability.height = -(pvi->bmiHeader.biHeight);
}
else
{
_resultingCapability.height = abs(pvi->bmiHeader.biHeight);
}
LOG(LS_INFO) << "CheckMediaType width:" << pvi->bmiHeader.biWidth
<< " height:" << pvi->bmiHeader.biHeight
<< " Compression:0x" << std::hex
<< pvi->bmiHeader.biCompression;
if(*SubType == MEDIASUBTYPE_MJPG
&& pvi->bmiHeader.biCompression == MAKEFOURCC('M','J','P','G'))
{
_resultingCapability.videoType = VideoType::kMJPEG;
return S_OK; // This format is acceptable.
}
if(*SubType == MEDIASUBTYPE_I420
&& pvi->bmiHeader.biCompression == MAKEFOURCC('I','4','2','0'))
{
_resultingCapability.videoType = VideoType::kI420;
return S_OK; // This format is acceptable.
}
if(*SubType == MEDIASUBTYPE_YUY2
&& pvi->bmiHeader.biCompression == MAKEFOURCC('Y','U','Y','2'))
{
_resultingCapability.videoType = VideoType::kYUY2;
::Sleep(60); // workaround for bad driver
return S_OK; // This format is acceptable.
}
if(*SubType == MEDIASUBTYPE_UYVY
&& pvi->bmiHeader.biCompression == MAKEFOURCC('U','Y','V','Y'))
{
_resultingCapability.videoType = VideoType::kUYVY;
return S_OK; // This format is acceptable.
}
if(*SubType == MEDIASUBTYPE_HDYC)
{
_resultingCapability.videoType = VideoType::kUYVY;
return S_OK; // This format is acceptable.
}
if(*SubType == MEDIASUBTYPE_RGB24
&& pvi->bmiHeader.biCompression == BI_RGB)
{
_resultingCapability.videoType = VideoType::kRGB24;
return S_OK; // This format is acceptable.
}
}
if(*formatType == FORMAT_VideoInfo2)
{
// VIDEOINFOHEADER2 that has dwInterlaceFlags
VIDEOINFOHEADER2 *pvi = (VIDEOINFOHEADER2 *) pMediaType->Format();
if(pvi == NULL)
{
return E_INVALIDARG;
}
LOG(LS_INFO) << "CheckMediaType width:" << pvi->bmiHeader.biWidth
<< " height:" << pvi->bmiHeader.biHeight
<< " Compression:0x" << std::hex
<< pvi->bmiHeader.biCompression;
_resultingCapability.width = pvi->bmiHeader.biWidth;
// Store the incoming height,
// for RGB24 we assume the frame to be upside down
if(*SubType == MEDIASUBTYPE_RGB24
&& pvi->bmiHeader.biHeight > 0)
{
_resultingCapability.height = -(pvi->bmiHeader.biHeight);
}
else
{
_resultingCapability.height = abs(pvi->bmiHeader.biHeight);
}
if(*SubType == MEDIASUBTYPE_MJPG
&& pvi->bmiHeader.biCompression == MAKEFOURCC('M','J','P','G'))
{
_resultingCapability.videoType = VideoType::kMJPEG;
return S_OK; // This format is acceptable.
}
if(*SubType == MEDIASUBTYPE_I420
&& pvi->bmiHeader.biCompression == MAKEFOURCC('I','4','2','0'))
{
_resultingCapability.videoType = VideoType::kI420;
return S_OK; // This format is acceptable.
}
if(*SubType == MEDIASUBTYPE_YUY2
&& pvi->bmiHeader.biCompression == MAKEFOURCC('Y','U','Y','2'))
{
_resultingCapability.videoType = VideoType::kYUY2;
return S_OK; // This format is acceptable.
}
if(*SubType == MEDIASUBTYPE_UYVY
&& pvi->bmiHeader.biCompression == MAKEFOURCC('U','Y','V','Y'))
{
_resultingCapability.videoType = VideoType::kUYVY;
return S_OK; // This format is acceptable.
}
if(*SubType == MEDIASUBTYPE_HDYC)
{
_resultingCapability.videoType = VideoType::kUYVY;
return S_OK; // This format is acceptable.
}
if(*SubType == MEDIASUBTYPE_RGB24
&& pvi->bmiHeader.biCompression == BI_RGB)
{
_resultingCapability.videoType = VideoType::kRGB24;
return S_OK; // This format is acceptable.
}
}
return E_INVALIDARG;
}
HRESULT
CaptureInputPin::Receive ( IN IMediaSample * pIMediaSample )
{
HRESULT hr = S_OK;
RTC_DCHECK(m_pFilter);
RTC_DCHECK(pIMediaSample);
// get the thread handle of the delivering thread inc its priority
if( _threadHandle == NULL)
{
HANDLE handle= GetCurrentThread();
SetThreadPriority(handle, THREAD_PRIORITY_HIGHEST);
_threadHandle = handle;
rtc::SetCurrentThreadName("webrtc_video_capture");
}
reinterpret_cast <CaptureSinkFilter *>(m_pFilter)->LockReceive();
hr = CBaseInputPin::Receive (pIMediaSample);
if (SUCCEEDED (hr))
{
const LONG length = pIMediaSample->GetActualDataLength();
RTC_DCHECK(length >= 0);
unsigned char* pBuffer = NULL;
if(S_OK != pIMediaSample->GetPointer(&pBuffer))
{
reinterpret_cast <CaptureSinkFilter *>(m_pFilter)->UnlockReceive();
return S_FALSE;
}
// NOTE: filter unlocked within Send call
reinterpret_cast <CaptureSinkFilter *> (m_pFilter)->ProcessCapturedFrame(
pBuffer, static_cast<size_t>(length), _resultingCapability);
}
else
{
reinterpret_cast <CaptureSinkFilter *>(m_pFilter)->UnlockReceive();
}
return hr;
return hr;
}
// called under LockReceive
HRESULT CaptureInputPin::SetMatchingMediaType(
const VideoCaptureCapability& capability)
{
_requestedCapability = capability;
_resultingCapability = VideoCaptureCapability();
return S_OK;
const VideoCaptureCapability& capability) {
_requestedCapability = capability;
_resultingCapability = VideoCaptureCapability();
return S_OK;
}
// ----------------------------------------------------------------------------
CaptureSinkFilter::CaptureSinkFilter (IN TCHAR * tszName,
IN LPUNKNOWN punk,
OUT HRESULT * phr,
VideoCaptureExternal& captureObserver)
: CBaseFilter(tszName,punk,& m_crtFilter,CLSID_SINKFILTER),
CaptureSinkFilter::CaptureSinkFilter(IN TCHAR* tszName,
IN LPUNKNOWN punk,
OUT HRESULT* phr,
VideoCaptureExternal& captureObserver)
: CBaseFilter(tszName, punk, &m_crtFilter, CLSID_SINKFILTER),
m_pInput(NULL),
_captureObserver(captureObserver)
{
(* phr) = S_OK;
m_pInput = new CaptureInputPin(NAME ("VideoCaptureInputPin"),
this,
& m_crtFilter,
phr, L"VideoCapture");
if (m_pInput == NULL || FAILED (* phr))
{
(* phr) = FAILED (* phr) ? (* phr) : E_OUTOFMEMORY;
goto cleanup;
_captureObserver(captureObserver) {
(*phr) = S_OK;
m_pInput = new CaptureInputPin(NAME("VideoCaptureInputPin"), this,
&m_crtFilter, phr, L"VideoCapture");
if (m_pInput == NULL || FAILED(*phr)) {
(*phr) = FAILED(*phr) ? (*phr) : E_OUTOFMEMORY;
goto cleanup;
}
cleanup:
return;
}
CaptureSinkFilter::~CaptureSinkFilter() {
delete m_pInput;
}
int CaptureSinkFilter::GetPinCount() {
return 1;
}
CBasePin* CaptureSinkFilter::GetPin(IN int Index) {
CBasePin* pPin;
LockFilter();
if (Index == 0) {
pPin = m_pInput;
} else {
pPin = NULL;
}
UnlockFilter();
return pPin;
}
STDMETHODIMP CaptureSinkFilter::Pause() {
LockReceive();
LockFilter();
if (m_State == State_Stopped) {
// change the state, THEN activate the input pin
m_State = State_Paused;
if (m_pInput && m_pInput->IsConnected()) {
m_pInput->Active();
}
cleanup :
return;
}
CaptureSinkFilter::~CaptureSinkFilter()
{
delete m_pInput;
}
int CaptureSinkFilter::GetPinCount()
{
return 1;
}
CBasePin *
CaptureSinkFilter::GetPin(IN int Index)
{
CBasePin * pPin;
LockFilter ();
if (Index == 0)
{
pPin = m_pInput;
if (m_pInput && !m_pInput->IsConnected()) {
m_State = State_Running;
}
else
{
pPin = NULL;
}
UnlockFilter ();
return pPin;
} else if (m_State == State_Running) {
m_State = State_Paused;
}
UnlockFilter();
UnlockReceive();
return S_OK;
}
STDMETHODIMP CaptureSinkFilter::Pause()
{
LockReceive();
LockFilter();
if (m_State == State_Stopped)
{
// change the state, THEN activate the input pin
m_State = State_Paused;
if (m_pInput && m_pInput->IsConnected())
{
m_pInput->Active();
}
if (m_pInput && !m_pInput->IsConnected())
{
m_State = State_Running;
}
}
else if (m_State == State_Running)
{
m_State = State_Paused;
}
UnlockFilter();
UnlockReceive();
return S_OK;
STDMETHODIMP CaptureSinkFilter::Stop() {
LockReceive();
LockFilter();
// set the state
m_State = State_Stopped;
// inactivate the pins
if (m_pInput)
m_pInput->Inactive();
UnlockFilter();
UnlockReceive();
return S_OK;
}
STDMETHODIMP CaptureSinkFilter::Stop()
{
LockReceive();
LockFilter();
// set the state
m_State = State_Stopped;
// inactivate the pins
if (m_pInput)
m_pInput->Inactive();
UnlockFilter();
UnlockReceive();
return S_OK;
}
void CaptureSinkFilter::SetFilterGraph(IGraphBuilder* graph)
{
LockFilter();
m_pGraph = graph;
UnlockFilter();
void CaptureSinkFilter::SetFilterGraph(IGraphBuilder* graph) {
LockFilter();
m_pGraph = graph;
UnlockFilter();
}
void CaptureSinkFilter::ProcessCapturedFrame(
unsigned char* pBuffer,
size_t length,
const VideoCaptureCapability& frameInfo)
{
// we have the receiver lock
if (m_State == State_Running)
{
_captureObserver.IncomingFrame(pBuffer, length, frameInfo);
const VideoCaptureCapability& frameInfo) {
// we have the receiver lock
if (m_State == State_Running) {
_captureObserver.IncomingFrame(pBuffer, length, frameInfo);
// trying to hold it since it's only a memcpy
// IMPROVEMENT if this work move critsect
UnlockReceive();
return;
}
// trying to hold it since it's only a memcpy
// IMPROVEMENT if this work move critsect
UnlockReceive();
return;
}
UnlockReceive();
return;
}
STDMETHODIMP CaptureSinkFilter::SetMatchingMediaType(
const VideoCaptureCapability& capability)
{
LockReceive();
LockFilter();
HRESULT hr;
if (m_pInput)
{
hr = m_pInput->SetMatchingMediaType(capability);
}
else
{
hr = E_UNEXPECTED;
}
UnlockFilter();
UnlockReceive();
return hr;
const VideoCaptureCapability& capability) {
LockReceive();
LockFilter();
HRESULT hr;
if (m_pInput) {
hr = m_pInput->SetMatchingMediaType(capability);
} else {
hr = E_UNEXPECTED;
}
UnlockFilter();
UnlockReceive();
return hr;
}
STDMETHODIMP CaptureSinkFilter::GetClassID( OUT CLSID * pCLSID )
{
(* pCLSID) = CLSID_SINKFILTER;
return S_OK;
STDMETHODIMP CaptureSinkFilter::GetClassID(OUT CLSID* pCLSID) {
(*pCLSID) = CLSID_SINKFILTER;
return S_OK;
}
} // namespace videocapturemodule

View File

@ -15,378 +15,304 @@
#include "modules/video_capture/windows/sink_filter_ds.h"
#include "rtc_base/logging.h"
#include <Dvdmedia.h> // VIDEOINFOHEADER2
#include <Dvdmedia.h> // VIDEOINFOHEADER2
namespace webrtc
{
namespace videocapturemodule
{
namespace webrtc {
namespace videocapturemodule {
VideoCaptureDS::VideoCaptureDS()
: _captureFilter(NULL),
_graphBuilder(NULL), _mediaControl(NULL), _sinkFilter(NULL),
_inputSendPin(NULL), _outputCapturePin(NULL), _dvFilter(NULL),
_inputDvPin(NULL), _outputDvPin(NULL)
{
_graphBuilder(NULL),
_mediaControl(NULL),
_sinkFilter(NULL),
_inputSendPin(NULL),
_outputCapturePin(NULL),
_dvFilter(NULL),
_inputDvPin(NULL),
_outputDvPin(NULL) {}
VideoCaptureDS::~VideoCaptureDS() {
if (_mediaControl) {
_mediaControl->Stop();
}
if (_graphBuilder) {
if (_sinkFilter)
_graphBuilder->RemoveFilter(_sinkFilter);
if (_captureFilter)
_graphBuilder->RemoveFilter(_captureFilter);
if (_dvFilter)
_graphBuilder->RemoveFilter(_dvFilter);
}
RELEASE_AND_CLEAR(_inputSendPin);
RELEASE_AND_CLEAR(_outputCapturePin);
RELEASE_AND_CLEAR(_captureFilter); // release the capture device
RELEASE_AND_CLEAR(_sinkFilter);
RELEASE_AND_CLEAR(_dvFilter);
RELEASE_AND_CLEAR(_mediaControl);
RELEASE_AND_CLEAR(_inputDvPin);
RELEASE_AND_CLEAR(_outputDvPin);
RELEASE_AND_CLEAR(_graphBuilder);
}
VideoCaptureDS::~VideoCaptureDS()
{
if (_mediaControl)
{
_mediaControl->Stop();
}
if (_graphBuilder)
{
if (_sinkFilter)
_graphBuilder->RemoveFilter(_sinkFilter);
if (_captureFilter)
_graphBuilder->RemoveFilter(_captureFilter);
if (_dvFilter)
_graphBuilder->RemoveFilter(_dvFilter);
}
RELEASE_AND_CLEAR(_inputSendPin);
RELEASE_AND_CLEAR(_outputCapturePin);
int32_t VideoCaptureDS::Init(const char* deviceUniqueIdUTF8) {
const int32_t nameLength = (int32_t)strlen((char*)deviceUniqueIdUTF8);
if (nameLength > kVideoCaptureUniqueNameLength)
return -1;
RELEASE_AND_CLEAR(_captureFilter); // release the capture device
RELEASE_AND_CLEAR(_sinkFilter);
RELEASE_AND_CLEAR(_dvFilter);
// Store the device name
_deviceUniqueId = new (std::nothrow) char[nameLength + 1];
memcpy(_deviceUniqueId, deviceUniqueIdUTF8, nameLength + 1);
RELEASE_AND_CLEAR(_mediaControl);
if (_dsInfo.Init() != 0)
return -1;
RELEASE_AND_CLEAR(_inputDvPin);
RELEASE_AND_CLEAR(_outputDvPin);
_captureFilter = _dsInfo.GetDeviceFilter(deviceUniqueIdUTF8);
if (!_captureFilter) {
LOG(LS_INFO) << "Failed to create capture filter.";
return -1;
}
RELEASE_AND_CLEAR(_graphBuilder);
// Get the interface for DirectShow's GraphBuilder
HRESULT hr = CoCreateInstance(CLSID_FilterGraph, NULL, CLSCTX_INPROC_SERVER,
IID_IGraphBuilder, (void**)&_graphBuilder);
if (FAILED(hr)) {
LOG(LS_INFO) << "Failed to create graph builder.";
return -1;
}
hr = _graphBuilder->QueryInterface(IID_IMediaControl, (void**)&_mediaControl);
if (FAILED(hr)) {
LOG(LS_INFO) << "Failed to create media control builder.";
return -1;
}
hr = _graphBuilder->AddFilter(_captureFilter, CAPTURE_FILTER_NAME);
if (FAILED(hr)) {
LOG(LS_INFO) << "Failed to add the capture device to the graph.";
return -1;
}
_outputCapturePin = GetOutputPin(_captureFilter, PIN_CATEGORY_CAPTURE);
// Create the sink filte used for receiving Captured frames.
_sinkFilter = new CaptureSinkFilter(SINK_FILTER_NAME, NULL, &hr, *this);
if (hr != S_OK) {
LOG(LS_INFO) << "Failed to create send filter";
return -1;
}
_sinkFilter->AddRef();
hr = _graphBuilder->AddFilter(_sinkFilter, SINK_FILTER_NAME);
if (FAILED(hr)) {
LOG(LS_INFO) << "Failed to add the send filter to the graph.";
return -1;
}
_inputSendPin = GetInputPin(_sinkFilter);
// Temporary connect here.
// This is done so that no one else can use the capture device.
if (SetCameraOutput(_requestedCapability) != 0) {
return -1;
}
hr = _mediaControl->Pause();
if (FAILED(hr)) {
LOG(LS_INFO)
<< "Failed to Pause the Capture device. Is it already occupied? " << hr;
return -1;
}
LOG(LS_INFO) << "Capture device '" << deviceUniqueIdUTF8 << "' initialized.";
return 0;
}
int32_t VideoCaptureDS::Init(const char* deviceUniqueIdUTF8)
{
const int32_t nameLength =
(int32_t) strlen((char*) deviceUniqueIdUTF8);
if (nameLength > kVideoCaptureUniqueNameLength)
return -1;
int32_t VideoCaptureDS::StartCapture(const VideoCaptureCapability& capability) {
rtc::CritScope cs(&_apiCs);
// Store the device name
_deviceUniqueId = new (std::nothrow) char[nameLength + 1];
memcpy(_deviceUniqueId, deviceUniqueIdUTF8, nameLength + 1);
if (capability != _requestedCapability) {
DisconnectGraph();
if (_dsInfo.Init() != 0)
return -1;
_captureFilter = _dsInfo.GetDeviceFilter(deviceUniqueIdUTF8);
if (!_captureFilter)
{
LOG(LS_INFO) << "Failed to create capture filter.";
return -1;
if (SetCameraOutput(capability) != 0) {
return -1;
}
// Get the interface for DirectShow's GraphBuilder
HRESULT hr = CoCreateInstance(CLSID_FilterGraph, NULL,
CLSCTX_INPROC_SERVER, IID_IGraphBuilder,
(void **) &_graphBuilder);
if (FAILED(hr))
{
LOG(LS_INFO) << "Failed to create graph builder.";
return -1;
}
hr = _graphBuilder->QueryInterface(IID_IMediaControl,
(void **) &_mediaControl);
if (FAILED(hr))
{
LOG(LS_INFO) << "Failed to create media control builder.";
return -1;
}
hr = _graphBuilder->AddFilter(_captureFilter, CAPTURE_FILTER_NAME);
if (FAILED(hr))
{
LOG(LS_INFO) << "Failed to add the capture device to the graph.";
return -1;
}
_outputCapturePin = GetOutputPin(_captureFilter, PIN_CATEGORY_CAPTURE);
// Create the sink filte used for receiving Captured frames.
_sinkFilter = new CaptureSinkFilter(SINK_FILTER_NAME, NULL, &hr,
*this);
if (hr != S_OK)
{
LOG(LS_INFO) << "Failed to create send filter";
return -1;
}
_sinkFilter->AddRef();
hr = _graphBuilder->AddFilter(_sinkFilter, SINK_FILTER_NAME);
if (FAILED(hr))
{
LOG(LS_INFO) << "Failed to add the send filter to the graph.";
return -1;
}
_inputSendPin = GetInputPin(_sinkFilter);
// Temporary connect here.
// This is done so that no one else can use the capture device.
if (SetCameraOutput(_requestedCapability) != 0)
{
return -1;
}
hr = _mediaControl->Pause();
if (FAILED(hr))
{
LOG(LS_INFO)
<< "Failed to Pause the Capture device. Is it already occupied? "
<< hr;
return -1;
}
LOG(LS_INFO) << "Capture device '" << deviceUniqueIdUTF8
<< "' initialized.";
return 0;
}
HRESULT hr = _mediaControl->Run();
if (FAILED(hr)) {
LOG(LS_INFO) << "Failed to start the Capture device.";
return -1;
}
return 0;
}
int32_t VideoCaptureDS::StartCapture(
const VideoCaptureCapability& capability)
{
rtc::CritScope cs(&_apiCs);
int32_t VideoCaptureDS::StopCapture() {
rtc::CritScope cs(&_apiCs);
if (capability != _requestedCapability)
{
DisconnectGraph();
if (SetCameraOutput(capability) != 0)
{
return -1;
}
}
HRESULT hr = _mediaControl->Run();
if (FAILED(hr))
{
LOG(LS_INFO) << "Failed to start the Capture device.";
return -1;
}
return 0;
HRESULT hr = _mediaControl->Pause();
if (FAILED(hr)) {
LOG(LS_INFO) << "Failed to stop the capture graph. " << hr;
return -1;
}
return 0;
}
int32_t VideoCaptureDS::StopCapture()
{
rtc::CritScope cs(&_apiCs);
HRESULT hr = _mediaControl->Pause();
if (FAILED(hr))
{
LOG(LS_INFO) << "Failed to stop the capture graph. " << hr;
return -1;
}
return 0;
bool VideoCaptureDS::CaptureStarted() {
OAFilterState state = 0;
HRESULT hr = _mediaControl->GetState(1000, &state);
if (hr != S_OK && hr != VFW_S_CANT_CUE) {
LOG(LS_INFO) << "Failed to get the CaptureStarted status";
}
LOG(LS_INFO) << "CaptureStarted " << state;
return state == State_Running;
}
bool VideoCaptureDS::CaptureStarted()
{
OAFilterState state = 0;
HRESULT hr = _mediaControl->GetState(1000, &state);
if (hr != S_OK && hr != VFW_S_CANT_CUE)
{
LOG(LS_INFO) << "Failed to get the CaptureStarted status";
}
LOG(LS_INFO) << "CaptureStarted " << state;
return state == State_Running;
}
int32_t VideoCaptureDS::CaptureSettings(
VideoCaptureCapability& settings)
{
settings = _requestedCapability;
return 0;
int32_t VideoCaptureDS::CaptureSettings(VideoCaptureCapability& settings) {
settings = _requestedCapability;
return 0;
}
int32_t VideoCaptureDS::SetCameraOutput(
const VideoCaptureCapability& requestedCapability)
{
const VideoCaptureCapability& requestedCapability) {
// Get the best matching capability
VideoCaptureCapability capability;
int32_t capabilityIndex;
// Get the best matching capability
VideoCaptureCapability capability;
int32_t capabilityIndex;
// Store the new requested size
_requestedCapability = requestedCapability;
// Match the requested capability with the supported.
if ((capabilityIndex = _dsInfo.GetBestMatchedCapability(
_deviceUniqueId, _requestedCapability, capability)) < 0) {
return -1;
}
// Reduce the frame rate if possible.
if (capability.maxFPS > requestedCapability.maxFPS) {
capability.maxFPS = requestedCapability.maxFPS;
} else if (capability.maxFPS <= 0) {
capability.maxFPS = 30;
}
// Store the new requested size
_requestedCapability = requestedCapability;
// Match the requested capability with the supported.
if ((capabilityIndex = _dsInfo.GetBestMatchedCapability(_deviceUniqueId,
_requestedCapability,
capability)) < 0)
{
return -1;
}
//Reduce the frame rate if possible.
if (capability.maxFPS > requestedCapability.maxFPS)
{
capability.maxFPS = requestedCapability.maxFPS;
} else if (capability.maxFPS <= 0)
{
capability.maxFPS = 30;
// Convert it to the windows capability index since they are not nexessary
// the same
VideoCaptureCapabilityWindows windowsCapability;
if (_dsInfo.GetWindowsCapability(capabilityIndex, windowsCapability) != 0) {
return -1;
}
IAMStreamConfig* streamConfig = NULL;
AM_MEDIA_TYPE* pmt = NULL;
VIDEO_STREAM_CONFIG_CAPS caps;
HRESULT hr = _outputCapturePin->QueryInterface(IID_IAMStreamConfig,
(void**)&streamConfig);
if (hr) {
LOG(LS_INFO) << "Can't get the Capture format settings.";
return -1;
}
// Get the windows capability from the capture device
bool isDVCamera = false;
hr = streamConfig->GetStreamCaps(windowsCapability.directShowCapabilityIndex,
&pmt, reinterpret_cast<BYTE*>(&caps));
if (!FAILED(hr)) {
if (pmt->formattype == FORMAT_VideoInfo2) {
VIDEOINFOHEADER2* h = reinterpret_cast<VIDEOINFOHEADER2*>(pmt->pbFormat);
if (capability.maxFPS > 0 && windowsCapability.supportFrameRateControl) {
h->AvgTimePerFrame = REFERENCE_TIME(10000000.0 / capability.maxFPS);
}
} else {
VIDEOINFOHEADER* h = reinterpret_cast<VIDEOINFOHEADER*>(pmt->pbFormat);
if (capability.maxFPS > 0 && windowsCapability.supportFrameRateControl) {
h->AvgTimePerFrame = REFERENCE_TIME(10000000.0 / capability.maxFPS);
}
}
// Convert it to the windows capability index since they are not nexessary
// the same
VideoCaptureCapabilityWindows windowsCapability;
if (_dsInfo.GetWindowsCapability(capabilityIndex, windowsCapability) != 0)
{
return -1;
}
// Set the sink filter to request this capability
_sinkFilter->SetMatchingMediaType(capability);
// Order the capture device to use this capability
hr += streamConfig->SetFormat(pmt);
IAMStreamConfig* streamConfig = NULL;
AM_MEDIA_TYPE *pmt = NULL;
VIDEO_STREAM_CONFIG_CAPS caps;
// Check if this is a DV camera and we need to add MS DV Filter
if (pmt->subtype == MEDIASUBTYPE_dvsl ||
pmt->subtype == MEDIASUBTYPE_dvsd || pmt->subtype == MEDIASUBTYPE_dvhd)
isDVCamera = true; // This is a DV camera. Use MS DV filter
}
RELEASE_AND_CLEAR(streamConfig);
HRESULT hr = _outputCapturePin->QueryInterface(IID_IAMStreamConfig,
(void**) &streamConfig);
if (hr)
{
LOG(LS_INFO) << "Can't get the Capture format settings.";
return -1;
}
if (FAILED(hr)) {
LOG(LS_INFO) << "Failed to set capture device output format";
return -1;
}
//Get the windows capability from the capture device
bool isDVCamera = false;
hr = streamConfig->GetStreamCaps(
windowsCapability.directShowCapabilityIndex,
&pmt, reinterpret_cast<BYTE*> (&caps));
if (!FAILED(hr))
{
if (pmt->formattype == FORMAT_VideoInfo2)
{
VIDEOINFOHEADER2* h =
reinterpret_cast<VIDEOINFOHEADER2*> (pmt->pbFormat);
if (capability.maxFPS > 0
&& windowsCapability.supportFrameRateControl)
{
h->AvgTimePerFrame = REFERENCE_TIME(10000000.0
/ capability.maxFPS);
}
}
else
{
VIDEOINFOHEADER* h = reinterpret_cast<VIDEOINFOHEADER*>
(pmt->pbFormat);
if (capability.maxFPS > 0
&& windowsCapability.supportFrameRateControl)
{
h->AvgTimePerFrame = REFERENCE_TIME(10000000.0
/ capability.maxFPS);
}
}
// Set the sink filter to request this capability
_sinkFilter->SetMatchingMediaType(capability);
//Order the capture device to use this capability
hr += streamConfig->SetFormat(pmt);
//Check if this is a DV camera and we need to add MS DV Filter
if (pmt->subtype == MEDIASUBTYPE_dvsl
|| pmt->subtype == MEDIASUBTYPE_dvsd
|| pmt->subtype == MEDIASUBTYPE_dvhd)
isDVCamera = true; // This is a DV camera. Use MS DV filter
}
RELEASE_AND_CLEAR(streamConfig);
if (FAILED(hr))
{
LOG(LS_INFO) << "Failed to set capture device output format";
return -1;
}
if (isDVCamera)
{
hr = ConnectDVCamera();
}
else
{
hr = _graphBuilder->ConnectDirect(_outputCapturePin, _inputSendPin,
NULL);
}
if (hr != S_OK)
{
LOG(LS_INFO) << "Failed to connect the Capture graph " << hr;
return -1;
}
return 0;
if (isDVCamera) {
hr = ConnectDVCamera();
} else {
hr = _graphBuilder->ConnectDirect(_outputCapturePin, _inputSendPin, NULL);
}
if (hr != S_OK) {
LOG(LS_INFO) << "Failed to connect the Capture graph " << hr;
return -1;
}
return 0;
}
int32_t VideoCaptureDS::DisconnectGraph()
{
HRESULT hr = _mediaControl->Stop();
hr += _graphBuilder->Disconnect(_outputCapturePin);
hr += _graphBuilder->Disconnect(_inputSendPin);
int32_t VideoCaptureDS::DisconnectGraph() {
HRESULT hr = _mediaControl->Stop();
hr += _graphBuilder->Disconnect(_outputCapturePin);
hr += _graphBuilder->Disconnect(_inputSendPin);
//if the DV camera filter exist
if (_dvFilter)
{
_graphBuilder->Disconnect(_inputDvPin);
_graphBuilder->Disconnect(_outputDvPin);
}
if (hr != S_OK)
{
LOG(LS_ERROR)
<< "Failed to Stop the Capture device for reconfiguration "
<< hr;
return -1;
}
return 0;
// if the DV camera filter exist
if (_dvFilter) {
_graphBuilder->Disconnect(_inputDvPin);
_graphBuilder->Disconnect(_outputDvPin);
}
if (hr != S_OK) {
LOG(LS_ERROR) << "Failed to Stop the Capture device for reconfiguration "
<< hr;
return -1;
}
return 0;
}
HRESULT VideoCaptureDS::ConnectDVCamera()
{
HRESULT hr = S_OK;
HRESULT VideoCaptureDS::ConnectDVCamera() {
HRESULT hr = S_OK;
if (!_dvFilter)
{
hr = CoCreateInstance(CLSID_DVVideoCodec, NULL, CLSCTX_INPROC,
IID_IBaseFilter, (void **) &_dvFilter);
if (hr != S_OK)
{
LOG(LS_INFO) << "Failed to create the dv decoder: " << hr;
return hr;
}
hr = _graphBuilder->AddFilter(_dvFilter, L"VideoDecoderDV");
if (hr != S_OK)
{
LOG(LS_INFO) << "Failed to add the dv decoder to the graph: " << hr;
return hr;
}
_inputDvPin = GetInputPin(_dvFilter);
if (_inputDvPin == NULL)
{
LOG(LS_INFO) << "Failed to get input pin from DV decoder";
return -1;
}
_outputDvPin = GetOutputPin(_dvFilter, GUID_NULL);
if (_outputDvPin == NULL)
{
LOG(LS_INFO) << "Failed to get output pin from DV decoder";
return -1;
}
if (!_dvFilter) {
hr = CoCreateInstance(CLSID_DVVideoCodec, NULL, CLSCTX_INPROC,
IID_IBaseFilter, (void**)&_dvFilter);
if (hr != S_OK) {
LOG(LS_INFO) << "Failed to create the dv decoder: " << hr;
return hr;
}
hr = _graphBuilder->ConnectDirect(_outputCapturePin, _inputDvPin, NULL);
if (hr != S_OK)
{
LOG(LS_INFO) << "Failed to connect capture device to the dv devoder: "
<< hr;
return hr;
hr = _graphBuilder->AddFilter(_dvFilter, L"VideoDecoderDV");
if (hr != S_OK) {
LOG(LS_INFO) << "Failed to add the dv decoder to the graph: " << hr;
return hr;
}
_inputDvPin = GetInputPin(_dvFilter);
if (_inputDvPin == NULL) {
LOG(LS_INFO) << "Failed to get input pin from DV decoder";
return -1;
}
_outputDvPin = GetOutputPin(_dvFilter, GUID_NULL);
if (_outputDvPin == NULL) {
LOG(LS_INFO) << "Failed to get output pin from DV decoder";
return -1;
}
}
hr = _graphBuilder->ConnectDirect(_outputCapturePin, _inputDvPin, NULL);
if (hr != S_OK) {
LOG(LS_INFO) << "Failed to connect capture device to the dv devoder: "
<< hr;
return hr;
}
hr = _graphBuilder->ConnectDirect(_outputDvPin, _inputSendPin, NULL);
if (hr != S_OK)
{
if (hr == HRESULT_FROM_WIN32(ERROR_TOO_MANY_OPEN_FILES))
{
LOG(LS_INFO) << "Failed to connect the capture device, busy";
}
else
{
LOG(LS_INFO)
<< "Failed to connect capture device to the send graph: "
<< hr;
}
return hr;
hr = _graphBuilder->ConnectDirect(_outputDvPin, _inputSendPin, NULL);
if (hr != S_OK) {
if (hr == HRESULT_FROM_WIN32(ERROR_TOO_MANY_OPEN_FILES)) {
LOG(LS_INFO) << "Failed to connect the capture device, busy";
} else {
LOG(LS_INFO) << "Failed to connect capture device to the send graph: "
<< hr;
}
return hr;
}
return hr;
}
} // namespace videocapturemodule
} // namespace webrtc