How to get Mac audio level? - objective-c

Currently, I have code that successfully returns the value of the users system audio value that they can set with the volume keys.
However, what I want is a value of the audio the speakers are playing. So if the user is watching Netflix and a character starts screaming, the value would return higher than if the character was whispering.
Code I have now:
+ (AudioDeviceID)defaultOutputDeviceID {
OSStatus status = noErr;
AudioDeviceID outputDeviceID = kAudioObjectUnknown;
AudioObjectPropertyAddress propertyAOPA;
propertyAOPA.mElement = kAudioObjectPropertyElementMaster;
propertyAOPA.mScope = kAudioObjectPropertyScopeGlobal;
propertyAOPA.mSelector = kAudioHardwarePropertyDefaultSystemOutputDevice;
UInt32 propertySize = sizeof(outputDeviceID);
if (!AudioHardwareServiceHasProperty(kAudioObjectSystemObject, &propertyAOPA)) {
NSLog(#"Cannot find default output device!");
return outputDeviceID;
}
status = AudioHardwareServiceGetPropertyData(kAudioObjectSystemObject, &propertyAOPA, 0, NULL, &propertySize, &outputDeviceID);
if(status) {
NSLog(#"Cannot find default output device!");
}
return outputDeviceID;
}
+ (float)volume {
OSStatus status = noErr;
AudioDeviceID outputDeviceID = [[self class] defaultOutputDeviceID];
if (outputDeviceID == kAudioObjectUnknown) {
NSLog(#"Unknown device");
return 0.0;
}
AudioObjectPropertyAddress propertyAOPA;
propertyAOPA.mElement = kAudioObjectPropertyElementMaster;
propertyAOPA.mScope = kAudioDevicePropertyScopeOutput;
propertyAOPA.mSelector = kAudioHardwareServiceDeviceProperty_VirtualMasterVolume;
Float32 outputVolume;
UInt32 propertySize = sizeof(outputVolume);
if (!AudioHardwareServiceHasProperty(outputDeviceID, &propertyAOPA)) {
NSLog(#"No volume returned for device 0x%0x", outputDeviceID);
return 0.0;
}
status = AudioHardwareServiceGetPropertyData(outputDeviceID, &propertyAOPA, 0, NULL, &propertySize, &outputVolume);
if (status) {
NSLog(#"No volume returned for device 0x%0x", outputDeviceID);
return 0.0;
}
if (outputVolume < 0.0 || outputVolume > 1.0)
return 0.0;
return outputVolume;
}

Get the volume level and set it as you want (as max. volume) and then revert it back again to users original volume level. For more details you can see the below link;
https://stackoverflow.com/a/27743599/1351327
Hope it will help. (Be careful that Apple will reject your app)

Related

Resolving SRV records with iOS SDK

I want to resolve DNS SRV records using the iOS SDK.
I've already tried the high-level Bonjour APIs Apple is providing, but they're not what I need. Now I'm using DNS SD.
void *processQueryForSRVRecord(void *record) {
DNSServiceRef sdRef;
int context;
printf("Setting up query for record: %s\n", record);
DNSServiceQueryRecord(&sdRef, 0, 0, record, kDNSServiceType_SRV, kDNSServiceClass_IN, callback, &context);
printf("Processing query for record: %s\n", record);
DNSServiceProcessResult(sdRef);
printf("Deallocating query for record: %s\n", record);
DNSServiceRefDeallocate(sdRef);
return NULL;
}
This works as long as it gets only correct SRV records (for example: _xmpp-server._tcp.gmail.com), but when the record is typed wrong, DNSServiceProcessResult(sdRef) goes into an infinite loop.
Is there a way to stop DNSServiceProcessResult or must I cancel the thread calling it?
Use good old select(). This is what I have at the moment:
- (void)updateDnsRecords
{
if (self.dnsUpdatePending == YES)
{
return;
}
else
{
self.dnsUpdatePending = YES;
}
NSLog(#"DNS update");
DNSServiceRef sdRef;
DNSServiceErrorType err;
const char* host = [self.dnsHost UTF8String];
if (host != NULL)
{
NSTimeInterval remainingTime = self.dnsUpdateTimeout;
NSDate* startTime = [NSDate date];
err = DNSServiceQueryRecord(&sdRef, 0, 0,
host,
kDNSServiceType_SRV,
kDNSServiceClass_IN,
processDnsReply,
&remainingTime);
// This is necessary so we don't hang forever if there are no results
int dns_sd_fd = DNSServiceRefSockFD(sdRef);
int nfds = dns_sd_fd + 1;
fd_set readfds;
int result;
while (remainingTime > 0)
{
FD_ZERO(&readfds);
FD_SET(dns_sd_fd, &readfds);
struct timeval tv;
tv.tv_sec = (time_t)remainingTime;
tv.tv_usec = (remainingTime - tv.tv_sec) * 1000000;
result = select(nfds, &readfds, (fd_set*)NULL, (fd_set*)NULL, &tv);
if (result == 1)
{
if (FD_ISSET(dns_sd_fd, &readfds))
{
err = DNSServiceProcessResult(sdRef);
if (err != kDNSServiceErr_NoError)
{
NSLog(#"There was an error reading the DNS SRV records.");
break;
}
}
}
else if (result == 0)
{
NBLog(#"DNS SRV select() timed out");
break;
}
else
{
if (errno == EINTR)
{
NBLog(#"DNS SRV select() interrupted, retry.");
}
else
{
NBLog(#"DNS SRV select() returned %d errno %d %s.", result, errno, strerror(errno));
break;
}
}
NSTimeInterval elapsed = [[NSDate date] timeIntervalSinceDate:startTime];
remainingTime -= elapsed;
}
DNSServiceRefDeallocate(sdRef);
}
}
static void processDnsReply(DNSServiceRef sdRef,
DNSServiceFlags flags,
uint32_t interfaceIndex,
DNSServiceErrorType errorCode,
const char* fullname,
uint16_t rrtype,
uint16_t rrclass,
uint16_t rdlen,
const void* rdata,
uint32_t ttl,
void* context)
{
NSTimeInterval* remainingTime = (NSTimeInterval*)context;
// If a timeout occurs the value of the errorCode argument will be
// kDNSServiceErr_Timeout.
if (errorCode != kDNSServiceErr_NoError)
{
return;
}
// The flags argument will have the kDNSServiceFlagsAdd bit set if the
// callback is being invoked when a record is received in response to
// the query.
//
// If kDNSServiceFlagsAdd bit is clear then callback is being invoked
// because the record has expired, in which case the ttl argument will
// be 0.
if ((flags & kDNSServiceFlagsMoreComing) == 0)
{
*remainingTime = 0;
}
// Record parsing code below was copied from Apple SRVResolver sample.
NSMutableData * rrData = [NSMutableData data];
dns_resource_record_t * rr;
uint8_t u8;
uint16_t u16;
uint32_t u32;
u8 = 0;
[rrData appendBytes:&u8 length:sizeof(u8)];
u16 = htons(kDNSServiceType_SRV);
[rrData appendBytes:&u16 length:sizeof(u16)];
u16 = htons(kDNSServiceClass_IN);
[rrData appendBytes:&u16 length:sizeof(u16)];
u32 = htonl(666);
[rrData appendBytes:&u32 length:sizeof(u32)];
u16 = htons(rdlen);
[rrData appendBytes:&u16 length:sizeof(u16)];
[rrData appendBytes:rdata length:rdlen];
rr = dns_parse_resource_record([rrData bytes], (uint32_t) [rrData length]);
// If the parse is successful, add the results.
if (rr != NULL)
{
NSString *target;
target = [NSString stringWithCString:rr->data.SRV->target encoding:NSASCIIStringEncoding];
if (target != nil)
{
uint16_t priority = rr->data.SRV->priority;
uint16_t weight = rr->data.SRV->weight;
uint16_t port = rr->data.SRV->port;
[[FailoverWebInterface sharedInterface] addDnsServer:target priority:priority weight:weight port:port ttl:ttl]; // You'll have to do this in with your own method.
}
}
dns_free_resource_record(rr);
}
Here's the Apple SRVResolver sample from which I got the RR parsing.
This Apple sample mentions that it may block forever, but strange enough suggest to use NSTimer when trying to add a timeout yourself. But I think using select() is a much better way.
I have one to-do: Implement flushing cache with DNSServiceReconfirmRecord. But won't do that now.
Be aware, this code is working, but I'm still testing it.
You need to add libresolv.dylib to your Xcode project's 'Linked Frameworks and Libraries'.

How do I connect a Mixer with an AUGraph

I want to create an AUGraph that has the following AudioUnits:
1 * 440HZ sine wave generator
1 * 880HZ sine wave generator
1 * Mixer
1 * Output
If I connect my sine wave generators to my mixer, then the mixer to the output I get no sound.
If I connect a sine wave generator directly to the output I do get sound, probably because the Output unit connects to the sine wave generator callback.
Is there something I'm missing with how this should be connected?
Full code sample, Connect 1 * Joins 1 is not working, comment these out and run Connect 2 and Joins 2 to hear the sine wave working
//
// main.c
// TestAudioUnit
//
// Created by Chris Davis on 25/08/2013.
// Copyright (c) 2013 Chris Davis. All rights reserved.
//
#include <CoreFoundation/CoreFoundation.h>
#import <AudioToolbox/AudioToolbox.h>
typedef struct MyAUGraphPlayer
{
AudioStreamBasicDescription streamFormat;
AUGraph graph;
AUNode output;
AUNode mixer;
AUNode sine;
AudioUnit audioUnits[3];
AudioBufferList *inputBuffer;
Float64 firstInputSampleTime;
Float64 firstOutputSampleTime;
Float64 inToOutSampleTimeOffset;
} MyAUGraphPlayer;
OSStatus SineWaveRenderCallback(void * inRefCon,
AudioUnitRenderActionFlags * ioActionFlags,
const AudioTimeStamp * inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList * ioData)
{
// inRefCon is the context pointer we passed in earlier when setting the render callback
double currentPhase = *((double *)inRefCon);
// ioData is where we're supposed to put the audio samples we've created
Float32 * outputBuffer = (Float32 *)ioData->mBuffers[0].mData;
const double frequency = 880.0;
const double phaseStep = (frequency / 44100.) * (M_PI * 2.);
for(int i = 0; i < inNumberFrames; i++) {
outputBuffer[i] = sin(currentPhase);
currentPhase += phaseStep;
}
// If we were doing stereo (or more), this would copy our sine wave samples
// to all of the remaining channels
for(int i = 1; i < ioData->mNumberBuffers; i++) {
memcpy(ioData->mBuffers[i].mData, outputBuffer, ioData->mBuffers[i].mDataByteSize);
}
// writing the current phase back to inRefCon so we can use it on the next call
*((double *)inRefCon) = currentPhase;
return noErr;
}
int main(int argc, const char * argv[])
{
MyAUGraphPlayer *player = {0};
MyAUGraphPlayer p = {0};
player=&p;
NewAUGraph(&player->graph);
//Output
{
AudioComponentDescription description = {
.componentType = kAudioUnitType_Output,
.componentSubType = kAudioUnitSubType_DefaultOutput,
.componentManufacturer = kAudioUnitManufacturer_Apple
};
AUGraphAddNode(player->graph, &description, &player->output);
AudioComponent comp = AudioComponentFindNext(NULL, &description);
AudioComponentInstanceNew(comp, &player->audioUnits[0]);
AudioUnitInitialize(player->audioUnits[0]);
AudioStreamBasicDescription ASBD = {
.mSampleRate = 44100,
.mFormatID = kAudioFormatLinearPCM,
.mFormatFlags = kAudioFormatFlagsNativeFloatPacked,
.mChannelsPerFrame = 1,
.mFramesPerPacket = 1,
.mBitsPerChannel = sizeof(Float32) * 8,
.mBytesPerPacket = sizeof(Float32),
.mBytesPerFrame = sizeof(Float32)
};
AudioUnitSetProperty(player->audioUnits[0],
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Global,
0,
&ASBD,
sizeof(ASBD));
}
//Mixer
{
AudioComponentDescription description = {
.componentType = kAudioUnitType_Mixer,
.componentSubType = kAudioUnitSubType_StereoMixer,
.componentManufacturer = kAudioUnitManufacturer_Apple
};
AUGraphAddNode(player->graph, &description, &player->mixer);
AudioComponent comp = AudioComponentFindNext(NULL, &description);
AudioComponentInstanceNew(comp, &player->audioUnits[1]);
AudioUnitInitialize(player->audioUnits[1]);
}
//Sine
{
AudioComponentDescription description = {
.componentType = kAudioUnitType_Generator,
.componentSubType = kAudioUnitSubType_ScheduledSoundPlayer,
.componentManufacturer = kAudioUnitManufacturer_Apple
};
AUGraphAddNode(player->graph, &description, &player->sine);
AudioComponent comp = AudioComponentFindNext(NULL, &description);
AudioComponentInstanceNew(comp, &player->audioUnits[2]);
AudioUnitInitialize(player->audioUnits[2]);
}
//Connect 1
{
AURenderCallbackStruct callbackInfo = {
.inputProc = SineWaveRenderCallback,
.inputProcRefCon = player
};
AudioUnitSetProperty(player->audioUnits[1],
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Global,
0,
&callbackInfo,
sizeof(callbackInfo));
}
//Joins 1 - sine to mixer to outout
{
AUGraphConnectNodeInput(player->graph,
player->sine,
0,
player->mixer,
0);
AUGraphConnectNodeInput(player->graph,
player->mixer,
0,
player->output,
0);
}
//connect 2
/*{
AURenderCallbackStruct callbackInfo = {
.inputProc = SineWaveRenderCallback,
.inputProcRefCon = player
};
AudioUnitSetProperty(player->audioUnits[0],
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Global,
0,
&callbackInfo,
sizeof(callbackInfo));
}
//Joins 2 - sine direct to output
{
AUGraphConnectNodeInput(player->graph,
player->sine,
0,
player->output,
0);
}*/
AUGraphInitialize(player->graph);
player->firstOutputSampleTime = -1;
AudioOutputUnitStart(player->audioUnits[0]);
AUGraphStart(player->graph);
printf("enter key to stop\n");
getchar();
return 0;
}
This code correctly plays the sine wave, however, I get lots of errors from CoreAudio during setup:
//
// main.c
// TestAudioUnit
//
// Created by Chris Davis on 25/08/2013.
// Copyright (c) 2013 Chris Davis. All rights reserved.
//
#include <CoreFoundation/CoreFoundation.h>
#import <AudioToolbox/AudioToolbox.h>
typedef struct MyAUGraphPlayer
{
AudioStreamBasicDescription streamFormat;
AUGraph graph;
AUNode output;
AUNode mixer;
AUNode sine;
AudioUnit audioUnits[3];
AudioBufferList *inputBuffer;
Float64 firstInputSampleTime;
Float64 firstOutputSampleTime;
Float64 inToOutSampleTimeOffset;
} MyAUGraphPlayer;
OSStatus SineWaveRenderCallback(void * inRefCon,
AudioUnitRenderActionFlags * ioActionFlags,
const AudioTimeStamp * inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList * ioData)
{
// inRefCon is the context pointer we passed in earlier when setting the render callback
double currentPhase = *((double *)inRefCon);
// ioData is where we're supposed to put the audio samples we've created
Float32 * outputBuffer = (Float32 *)ioData->mBuffers[0].mData;
const double frequency = 880.0;
const double phaseStep = (frequency / 44100.) * (M_PI * 2.);
for(int i = 0; i < inNumberFrames; i++) {
outputBuffer[i] = sin(currentPhase);
currentPhase += phaseStep;
}
// If we were doing stereo (or more), this would copy our sine wave samples
// to all of the remaining channels
for(int i = 1; i < ioData->mNumberBuffers; i++) {
memcpy(ioData->mBuffers[i].mData, outputBuffer, ioData->mBuffers[i].mDataByteSize);
}
// writing the current phase back to inRefCon so we can use it on the next call
*((double *)inRefCon) = currentPhase;
return noErr;
}
int main(int argc, const char * argv[])
{
MyAUGraphPlayer *player = {0};
MyAUGraphPlayer p = {0};
player=&p;
NewAUGraph(&player->graph);
OSStatus result = 0;
AudioStreamBasicDescription ASBD = {
.mSampleRate = 44100,
.mFormatID = kAudioFormatLinearPCM,
.mFormatFlags = kAudioFormatFlagsNativeFloatPacked,
.mChannelsPerFrame = 2,
.mFramesPerPacket = 1,
.mBitsPerChannel = sizeof(Float32) * 8,
.mBytesPerPacket = sizeof(Float32),
.mBytesPerFrame = sizeof(Float32)
};
//Output
{
AudioComponentDescription description = {
.componentType = kAudioUnitType_Output,
.componentSubType = kAudioUnitSubType_DefaultOutput,
.componentManufacturer = kAudioUnitManufacturer_Apple
};
result = AUGraphAddNode(player->graph, &description, &player->output);
printf("err: %d\n", result);
AudioComponent comp = AudioComponentFindNext(NULL, &description);
result = AudioComponentInstanceNew(comp, &player->audioUnits[0]);
printf("err: %d\n", result);
result = AudioUnitInitialize(player->audioUnits[0]);
printf("err: %d\n", result);
}
//Mixer
{
AudioComponentDescription description = {
.componentType = kAudioUnitType_Mixer,
.componentSubType = kAudioUnitSubType_StereoMixer,
.componentManufacturer = kAudioUnitManufacturer_Apple
};
result = AUGraphAddNode(player->graph, &description, &player->mixer);
printf("err: %d\n", result);
AudioComponent comp = AudioComponentFindNext(NULL, &description);
result = AudioComponentInstanceNew(comp, &player->audioUnits[1]);
printf("err: %d\n", result);
}
//Sine
{
AudioComponentDescription description = {
.componentType = kAudioUnitType_Generator,
.componentSubType = kAudioUnitSubType_ScheduledSoundPlayer,
.componentManufacturer = kAudioUnitManufacturer_Apple
};
result = AUGraphAddNode(player->graph, &description, &player->sine);
printf("err: %d\n", result);
AudioComponent comp = AudioComponentFindNext(NULL, &description);
result = AudioComponentInstanceNew(comp, &player->audioUnits[2]);
printf("err: %d\n", result);
result = AudioUnitInitialize(player->audioUnits[2]);
printf("err: %d\n", result);
}
result = AUGraphConnectNodeInput(player->graph,
player->sine,
0,
player->mixer,
0);
printf("err: %d\n", result);
result = AUGraphConnectNodeInput(player->graph,
player->mixer,
0,
player->output,
0);
printf("err: %d\n", result);
result = AUGraphOpen(player->graph);
printf("err: %d\n", result);
UInt32 numbuses = 1;
result = AudioUnitSetProperty(player->audioUnits[1], kAudioUnitProperty_ElementCount, kAudioUnitScope_Input, 0, &numbuses, sizeof(numbuses));
printf("err: %d\n", result);
for (UInt32 i = 0; i <= numbuses; ++i) {
// setup render callback struct
AURenderCallbackStruct rcbs;
rcbs.inputProc = &SineWaveRenderCallback;
rcbs.inputProcRefCon = &player;
printf("set AUGraphSetNodeInputCallback\n");
// set a callback for the specified node's specified input
result = AUGraphSetNodeInputCallback(player->graph, player->mixer, i, &rcbs);
printf("AUGraphSetNodeInputCallback err: %d\n", result);
printf("set input bus %d, client kAudioUnitProperty_StreamFormat\n", (unsigned int)i);
// set the input stream format, this is the format of the audio for mixer input
result = AudioUnitSetProperty(player->audioUnits[1], kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, i, &ASBD, sizeof(ASBD));
printf("err: %d\n", result);
}
result = AudioUnitSetProperty(player->audioUnits[1], kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 0, &ASBD, sizeof(ASBD));
printf("err: %d\n", result);
OSStatus status = AUGraphInitialize(player->graph);
printf("err: %d\n", status);
player->firstOutputSampleTime = -1;
AudioOutputUnitStart(player->audioUnits[0]);
AUGraphStart(player->graph);
printf("enter key to stop\n");
getchar();
return 0;
}

How to read Audio queue service bufer by byte?

I am recording sound from mic input using Audio queue service.
-(void)startRecording{
[self setupAudioFormat:&recordState.dataFormat];
recordState.currentPacket = 0;
OSStatus status;
status = AudioQueueNewInput(&recordState.dataFormat,
AudioInputCallback,
&recordState,
CFRunLoopGetCurrent(),
kCFRunLoopCommonModes,
0,
&recordState.queue);
if (status == 0)
{
// Prime recording buffers with empty data
for (int i = 0; i < NUM_BUFFERS; i++)
{
NSLog(#"buf in");
AudioQueueAllocateBuffer(recordState.queue, 16000, &recordState.buffers[i]);
AudioQueueEnqueueBuffer (recordState.queue, recordState.buffers[i], 0, NULL);
}
status = AudioFileCreateWithURL(fileURL,
kAudioFileAIFFType,
&recordState.dataFormat,
kAudioFileFlags_EraseFile,
&recordState.audioFile);
if (status == 0)
{
recordState.recording = true;
status = AudioQueueStart(recordState.queue, NULL);
if (status == 0)
{
NSLog(#"Recording");
}
}
}
if (status != 0)
{
//[self stopRecording];
NSLog(#"recording failed");
}
}
on callback:
void AudioInputCallback(void * inUserData,
AudioQueueRef inAQ,
AudioQueueBufferRef inBuffer,
const AudioTimeStamp * inStartTime,
UInt32 inNumberPacketDescriptions,
const AudioStreamPacketDescription * inPacketDescs)
{
RecordState * recordState = (RecordState*)inUserData;
if (!recordState->recording)
{
printf("Not recording, returning\n");
}
// if (inNumberPacketDescriptions == 0 && recordState->dataFormat.mBytesPerPacket != 0)
// {
// inNumberPacketDescriptions = inBuffer->mAudioDataByteSize / recordState->dataFormat.mBytesPerPacket;
// }
/*
int sampleCount = recordState->buffers[0]->mAudioDataBytesCapacity / sizeof (AUDIO_DATA_TYPE_FORMAT);
NSLog(#"sample count = %i",sampleCount);
AUDIO_DATA_TYPE_FORMAT *p = (AUDIO_DATA_TYPE_FORMAT*)recordState->buffers[0]->mAudioData;
for (int i = 0; i < sampleCount; i++) {
if (p[i] > 1000) {
NSLog(#"%hd",p[i]);
}
}*/
printf("Writing buffer %lld\n", recordState->currentPacket);
OSStatus status = AudioFileWritePackets(recordState->audioFile,
false,
inBuffer->mAudioDataByteSize,
inPacketDescs,
recordState->currentPacket,
&inNumberPacketDescriptions,
inBuffer->mAudioData);
if (status == 0)
{
recordState->buffers[0] = nil;
recordState->currentPacket += inNumberPacketDescriptions;
}
AudioQueueEnqueueBuffer(recordState->queue, inBuffer, 0, NULL);
}
Here i want to read recorded buffer. is it possible to get something like this:
short[] buffer = ?;//here should an audio buffer converted to some structure (short[] just for example)
then i would like to read every element of this structure:
for (int i = 0; i < sizeOfBuffer; i++) {
bufferVal = buffer[i];
}
In short how to handle buffer when recording ?
Thanks.

How to get the computer's current volume level?

How do I access the current volume level of a Mac from the Cocoa API?
For example: when I'm using Spotify.app on OS X 10.7 and a sound advertisement comes up, and I turn down my Mac's volume, the app will pause the ad until I turn it back up to an average level. I find this incredibly obnoxious and a violation of user privacy, but somehow Spotify has found a way to do this.
Is there any way I can do this with Cocoa? I'm making an app where it might come in useful to warn the user if the volume is low.
There are two options available. The first step is to determine what device you'd like and get its ID. Assuming the default output device, the code will look something like:
AudioObjectPropertyAddress propertyAddress = {
kAudioHardwarePropertyDefaultOutputDevice,
kAudioObjectPropertyScopeGlobal,
kAudioObjectPropertyElementMaster
};
AudioDeviceID deviceID;
UInt32 dataSize = sizeof(deviceID);
OSStatus result = AudioObjectGetPropertyData(kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize, &deviceID);
if(kAudioHardwareNoError != result)
// Handle the error
Next, you can use the kAudioHardwareServiceDeviceProperty_VirtualMasterVolume property to get the device's virtual master volume:
AudioObjectPropertyAddress propertyAddress = {
kAudioHardwareServiceDeviceProperty_VirtualMasterVolume,
kAudioDevicePropertyScopeOutput,
kAudioObjectPropertyElementMaster
};
if(!AudioHardwareServiceHasProperty(deviceID, &propertyAddress))
// An error occurred
Float32 volume;
UInt32 dataSize = sizeof(volume);
OSStatus result = AudioHardwareServiceGetPropertyData(deviceID, &propertyAddress, 0, NULL, &dataSize, &volume);
if(kAudioHardwareNoError != result)
// An error occurred
Alternatively, you can use kAudioDevicePropertyVolumeScalar to get the volume for a specific channel:
UInt32 channel = 1; // Channel 0 is master, if available
AudioObjectPropertyAddress propertyAddress = {
kAudioDevicePropertyVolumeScalar,
kAudioDevicePropertyScopeOutput,
channel
};
if(!AudioObjectHasProperty(deviceID, &propertyAddress))
// An error occurred
Float32 volume;
UInt32 dataSize = sizeof(volume);
OSStatus result = AudioObjectGetPropertyData(deviceID, &propertyAddress, 0, NULL, &dataSize, &volume);
if(kAudioHardwareNoError != result)
// An error occurred
The difference between the two is explained in Apple's docs:
kAudioHardwareServiceDeviceProperty_VirtualMasterVolume
A Float32 value that represents the value of the volume control. The
range for this property’s value is 0.0 (silence) through 1.0 (full
level). The effect of this property depends on the hardware device
associated with the HAL audio object. If the device has a master
volume control, this property controls it. If the device has
individual channel volume controls, this property applies to those
identified by the device's preferred multichannel layout, or the
preferred stereo pair if the device is stereo only. This control
maintains relative balance between the channels it affects.
So it can be tricky to define exactly what a device's volume is, especially for multichannel devices with non-standard channel maps.
From CocoaDev, these class methods look like they should work, though it's not particularly Cocoa-like:
#import <AudioToolbox/AudioServices.h>
+(AudioDeviceID)defaultOutputDeviceID
{
AudioDeviceID outputDeviceID = kAudioObjectUnknown;
// get output device device
UInt32 propertySize = 0;
OSStatus status = noErr;
AudioObjectPropertyAddress propertyAOPA;
propertyAOPA.mScope = kAudioObjectPropertyScopeGlobal;
propertyAOPA.mElement = kAudioObjectPropertyElementMaster;
propertyAOPA.mSelector = kAudioHardwarePropertyDefaultOutputDevice;
if (!AudioHardwareServiceHasProperty(kAudioObjectSystemObject, &propertyAOPA))
{
NSLog(#"Cannot find default output device!");
return outputDeviceID;
}
propertySize = sizeof(AudioDeviceID);
status = AudioHardwareServiceGetPropertyData(kAudioObjectSystemObject, &propertyAOPA, 0, NULL, &propertySize, &outputDeviceID);
if(status)
{
NSLog(#"Cannot find default output device!");
}
return outputDeviceID;
}
// getting system volume
+(float)volume
{
Float32 outputVolume;
UInt32 propertySize = 0;
OSStatus status = noErr;
AudioObjectPropertyAddress propertyAOPA;
propertyAOPA.mElement = kAudioObjectPropertyElementMaster;
propertyAOPA.mSelector = kAudioHardwareServiceDeviceProperty_VirtualMasterVolume;
propertyAOPA.mScope = kAudioDevicePropertyScopeOutput;
AudioDeviceID outputDeviceID = [[self class] defaultOutputDeviceID];
if (outputDeviceID == kAudioObjectUnknown)
{
NSLog(#"Unknown device");
return 0.0;
}
if (!AudioHardwareServiceHasProperty(outputDeviceID, &propertyAOPA))
{
NSLog(#"No volume returned for device 0x%0x", outputDeviceID);
return 0.0;
}
propertySize = sizeof(Float32);
status = AudioHardwareServiceGetPropertyData(outputDeviceID, &propertyAOPA, 0, NULL, &propertySize, &outputVolume);
if (status)
{
NSLog(#"No volume returned for device 0x%0x", outputDeviceID);
return 0.0;
}
if (outputVolume < 0.0 || outputVolume > 1.0) return 0.0;
return outputVolume;
}

objective c audio meter

Is it possible for xcode to have an audio level indicator?
I want to do something like this:
if (audioLevel = 100) {
}
or something similar...
Any ideas?? Example code please?
I'm VERY new to objective c so the more explaining the beter! :D
Unfortunately, there isn't a very straightforward API to do this. You need to use the low level AudioToolbox.framework.
Luckily, others have already solved this problem for you. Here's some code I simplified slightly to be straight C functions, from CocoaDev. You need to link to the AudioToolbox to compile this code (see here for documentation on how to do so).
#import <AudioToolbox/AudioServices.h>
AudioDeviceID getDefaultOutputDeviceID()
{
AudioDeviceID outputDeviceID = kAudioObjectUnknown;
// get output device device
OSStatus status = noErr;
AudioObjectPropertyAddress propertyAOPA;
propertyAOPA.mScope = kAudioObjectPropertyScopeGlobal;
propertyAOPA.mElement = kAudioObjectPropertyElementMaster;
propertyAOPA.mSelector = kAudioHardwarePropertyDefaultOutputDevice;
if (!AudioHardwareServiceHasProperty(kAudioObjectSystemObject, &propertyAOPA))
{
printf("Cannot find default output device!");
return outputDeviceID;
}
status = AudioHardwareServiceGetPropertyData(kAudioObjectSystemObject, &propertyAOPA, 0, NULL, (UInt32[]){sizeof(AudioDeviceID)}, &outputDeviceID);
if (status != 0)
{
printf("Cannot find default output device!");
}
return outputDeviceID;
}
float getVolume ()
{
Float32 outputVolume;
OSStatus status = noErr;
AudioObjectPropertyAddress propertyAOPA;
propertyAOPA.mElement = kAudioObjectPropertyElementMaster;
propertyAOPA.mSelector = kAudioHardwareServiceDeviceProperty_VirtualMasterVolume;
propertyAOPA.mScope = kAudioDevicePropertyScopeOutput;
AudioDeviceID outputDeviceID = getDefaultOutputDeviceID();
if (outputDeviceID == kAudioObjectUnknown)
{
printf("Unknown device");
return 0.0;
}
if (!AudioHardwareServiceHasProperty(outputDeviceID, &propertyAOPA))
{
printf("No volume returned for device 0x%0x", outputDeviceID);
return 0.0;
}
status = AudioHardwareServiceGetPropertyData(outputDeviceID, &propertyAOPA, 0, NULL, (UInt32[]){sizeof(Float32)}, &outputVolume);
if (status)
{
printf("No volume returned for device 0x%0x", outputDeviceID);
return 0.0;
}
if (outputVolume < 0.0 || outputVolume > 1.0) return 0.0;
return outputVolume;
}
int main (int argc, char const *argv[])
{
printf("%f", getVolume());
return 0;
}
Note that there's also a setVolume function there, too.