Understanding the number of input channels in Core Audio

Hello everyone,

I'm new to Core Audio and still haven't found my footing. I'm learning how to capture audio from the default device, using Audio Units. On my MacBook, the default audio input is mono. But when I write a piece of code to capture audio using AUHAL, I'm discovering that I need to provide an AudioBufferList with two channels, not one. Also, when I try to capture audio from an audio interface with 20 audio inputs, I must provide an AudioBufferList with two channels, and not with 20 channels. To investigate the issue, I wrote a small diagnostic program, which opens the default audio device and probes it for the number of channels. Depending on which way I'm probing, I'm getting different results. When I probe the stream format, I'm getting information that there is 1 channels. But when I probe the input audio unit, I'm getting information that there are 2 input channels.

Here's my program to demonstrate the issue:

// InputDeviceChannels.m
// Compile with:
// clang -framework CoreAudio -framework AudioToolbox -framework CoreFoundation -framework AudioUnit -o InputDeviceChannels InputDeviceChannels.m
//
// On my system, this prints:
// 	Device Name: MacBook Pro Microphone
// 	Number of Channels (Stream Format): 1
// 	Number of Elements (Element Count): 2

#import <AudioToolbox/AudioToolbox.h>
#import <AudioUnit/AudioUnit.h>
#import <CoreAudio/CoreAudio.h>
#import <Foundation/Foundation.h>

void printDeviceInfo(AudioUnit audioUnit) {
  UInt32 size;
  OSStatus err;

  AudioStreamBasicDescription streamFormat;
  size = sizeof(streamFormat);
  err = AudioUnitGetProperty(audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 1,
                             &streamFormat, &size);
  if (err != noErr) {
    printf("Error getting stream format\n");
    exit(1);
  }
  int numChannels = streamFormat.mChannelsPerFrame;
  UInt32 elementCount;
  size = sizeof(elementCount);
  err = AudioUnitGetProperty(audioUnit, kAudioUnitProperty_ElementCount, kAudioUnitScope_Input, 0,
                             &elementCount, &size);
  if (err != noErr) {
    printf("Error getting element count\n");
    exit(1);
  }
  printf("Number of Channels (Stream Format): %d\n", numChannels);
  printf("Number of Elements (Element Count): %d\n", elementCount);
}

void printDeviceName(AudioDeviceID deviceID) {
  UInt32 size;
  OSStatus err;
  CFStringRef deviceName = NULL;
  size = sizeof(deviceName);
  err = AudioObjectGetPropertyData(
      deviceID,
      &(AudioObjectPropertyAddress){kAudioDevicePropertyDeviceNameCFString,
                                    kAudioObjectPropertyScopeGlobal,
                                    kAudioObjectPropertyElementMain},
      0, NULL, &size, &deviceName);

  if (err != noErr) {
    printf("Error getting device name\n");
    exit(1);
  }

  char deviceNameStr[256];
  if (!CFStringGetCString(deviceName, deviceNameStr, sizeof(deviceNameStr),
                          kCFStringEncodingUTF8)) {
    printf("Error converting device name to C string\n");
    exit(1);
  }
  CFRelease(deviceName);
  printf("Device Name: %s\n", deviceNameStr);
}

int main(int argc, const char *argv[]) {
  @autoreleasepool {
    OSStatus err;

    // Get the default input device ID
    AudioDeviceID input_device_id = kAudioObjectUnknown;
    {
      UInt32 property_size = sizeof(input_device_id);
      AudioObjectPropertyAddress input_device_property = {
          kAudioHardwarePropertyDefaultInputDevice,
          kAudioObjectPropertyScopeGlobal,
          kAudioObjectPropertyElementMain,
      };

      err = AudioObjectGetPropertyData(kAudioObjectSystemObject, &input_device_property, 0, NULL,
                                       &property_size, &input_device_id);
      if (err != noErr || input_device_id == kAudioObjectUnknown) {
        printf("Error getting default input device ID\n");
        exit(1);
      }
    }

    // Print the device name using the input device ID
    printDeviceName(input_device_id);

    // Open audio unit for the input device
    AudioComponentDescription desc = {kAudioUnitType_Output, kAudioUnitSubType_HALOutput,
                                      kAudioUnitManufacturer_Apple, 0, 0};
    AudioComponent component = AudioComponentFindNext(NULL, &desc);
    AudioUnit audioUnit;
    err = AudioComponentInstanceNew(component, &audioUnit);
    if (err != noErr) {
      printf("Error creating AudioUnit\n");
      exit(1);
    }

    // Enable IO for input on the AudioUnit and disable output
    UInt32 enableInput = 1;
    UInt32 disableOutput = 0;
    err = AudioUnitSetProperty(audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input,
                               1, &enableInput, sizeof(enableInput));
    if (err != noErr) {
      printf("Error enabling input on AudioUnit\n");
      exit(1);
    }

    err = AudioUnitSetProperty(audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output,
                               0, &disableOutput, sizeof(disableOutput));
    if (err != noErr) {
      printf("Error disabling output on AudioUnit\n");
      exit(1);
    }

    // Set the current device to the input device
    err =
        AudioUnitSetProperty(audioUnit, kAudioOutputUnitProperty_CurrentDevice,
                             kAudioUnitScope_Global, 0, &input_device_id, sizeof(input_device_id));
    if (err != noErr) {
      printf("Error setting device for AudioUnit\n");
      exit(1);
    }

    // Initialize AudioUnit
    err = AudioUnitInitialize(audioUnit);
    if (err != noErr) {
      printf("Error initializing AudioUnit\n");
      exit(1);
    }

    // Print device info
    printDeviceInfo(audioUnit);

    // Clean up
    AudioUnitUninitialize(audioUnit);
    AudioComponentInstanceDispose(audioUnit);
  }
  return 0;
}

It prints:

Device Name: MacBook Pro Microphone
Number of Channels (Stream Format): 1
Number of Elements (Element Count): 2

I tried to set the number of channels to 1 on the input unit, but it didn’t change anything. After calling setNumberOfChannels(1, audioUnit), I’m still getting the same output.

Note 1: I know that I can ignore one channel, etc, etc. My purpose here is not to "somehow get it to work", I already did that. My purpose is to understand the API, so that I'll be able to write code that handles any number of audio inputs.

Note 2: I already read a bunch of documentation, especially this here: https://developer.apple.com/library/archive/technotes/tn2091/ - perhaps the channel map could help here, but I can’t make sense of it - I tried to use it based on my understanding but I only got the -50 OSStatus.

How should I understand this? Is it that that audio unit is an abstraction layer and automatically converts mono input into stereo input? Can I ask AUHAL to provide me the same number of input channels that the audio device has?

One thing to understand is that the number of elements is 2 because audio devices usually have a global scope element (index 0) and one element for each channel. If you take a look at your input device in Audio MIDI Setup, you will see that you have a primary volume control (the global volume of the device) and a volume control for channel 1 which cannot be set.

So the number of input channels is not the same as the number of input elements and for your purposes, you should only be concerned with the number of channels of the stream format.

The AudioUnits you are interacting with provide input or output depending on the device you are using (in this case the Microphone). There is no conversion going on here.

You are using very low-level APIs, which is fine, but you may want to consider using a higher-level framework like AVAudioEngine as these APIs are more user-friendly.

In regards to your question about the stream format, the audio buffer that you provide for capture must match one of the compatible stream formats supported by that device. You can use Audio MIDI Setup to see what formats are supported.

Hope this helps! Michael

Understanding the number of input channels in Core Audio
 
 
Q