How to turn off bluetooth device and sound device in Cocoa? - objective-c

I've known that Airport can be turned off by CoreWLAN framework.
So, I think there are probably functions or frameworks related with bluetooth device and sound device.
How can I turn off that devices?

I assume you by "cannot have power so that it cannot speak", you mean you simply want to mute the speaker. I found some neat sample code here, using CoreAudio to mute the system's default speaker: http://cocoadev.com/index.pl?SoundVolume
I took the liberty of converting it to pure C and trying it out.
#import <CoreAudio/CoreAudio.h>
#import <stdio.h>
// getting system volume
float getVolume() {
float b_vol;
OSStatus err;
AudioDeviceID device;
UInt32 size;
UInt32 channels[2];
float volume[2];
// get device
size = sizeof device;
err = AudioHardwareGetProperty(kAudioHardwarePropertyDefaultOutputDevice, &size, &device);
if(err!=noErr) {
printf("audio-volume error get device\n");
return 0.0;
}
// try set master volume (channel 0)
size = sizeof b_vol;
err = AudioDeviceGetProperty(device, 0, 0, kAudioDevicePropertyVolumeScalar, &size, &b_vol); //kAudioDevicePropertyVolumeScalarToDecibels
if(noErr==err) return b_vol;
// otherwise, try seperate channels
// get channel numbers
size = sizeof(channels);
err = AudioDeviceGetProperty(device, 0, 0,kAudioDevicePropertyPreferredChannelsForStereo, &size,&channels);
if(err!=noErr) printf("error getting channel-numbers\n");
size = sizeof(float);
err = AudioDeviceGetProperty(device, channels[0], 0, kAudioDevicePropertyVolumeScalar, &size, &volume[0]);
if(noErr!=err) printf("error getting volume of channel %d\n",channels[0]);
err = AudioDeviceGetProperty(device, channels[1], 0, kAudioDevicePropertyVolumeScalar, &size, &volume[1]);
if(noErr!=err) printf("error getting volume of channel %d\n",channels[1]);
b_vol = (volume[0]+volume[1])/2.00;
return b_vol;
}
// setting system volume
void setVolume(float involume) {
OSStatus err;
AudioDeviceID device;
UInt32 size;
Boolean canset = false;
UInt32 channels[2];
//float volume[2];
// get default device
size = sizeof device;
err = AudioHardwareGetProperty(kAudioHardwarePropertyDefaultOutputDevice, &size, &device);
if(err!=noErr) {
printf("audio-volume error get device\n");
return;
}
// try set master-channel (0) volume
size = sizeof canset;
err = AudioDeviceGetPropertyInfo(device, 0, false, kAudioDevicePropertyVolumeScalar, &size, &canset);
if(err==noErr && canset==true) {
size = sizeof involume;
err = AudioDeviceSetProperty(device, NULL, 0, false, kAudioDevicePropertyVolumeScalar, size, &involume);
return;
}
// else, try seperate channes
// get channels
size = sizeof(channels);
err = AudioDeviceGetProperty(device, 0, false, kAudioDevicePropertyPreferredChannelsForStereo, &size,&channels);
if(err!=noErr) {
printf("error getting channel-numbers\n");
return;
}
// set volume
size = sizeof(float);
err = AudioDeviceSetProperty(device, 0, channels[0], false, kAudioDevicePropertyVolumeScalar, size, &involume);
if(noErr!=err) printf("error setting volume of channel %d\n",channels[0]);
err = AudioDeviceSetProperty(device, 0, channels[1], false, kAudioDevicePropertyVolumeScalar, size, &involume);
if(noErr!=err) printf("error setting volume of channel %d\n",channels[1]);
}
int main() {
printf("The system's volume is currently %f\n", getVolume());
printf("Setting volume to 0.\n");
setVolume(0.0f);
return 0;
}
I ran it and got this:
[04:29:03] [william#enterprise ~/Documents/Programming/c]$ gcc -framework CoreAudio -o mute.o coreaudio.c
.. snipped compiler output..
[04:29:26] [william#enterprise ~/Documents/Programming/c]$ ./mute.o
The system's volume is currently 0.436749
Setting volume to 0.
Hopefully this sends you in the right direction.

Related

Error in uploading sensor data to Azure IOT central

I have watched this video and tried to implement the same using all the required components and I have been getting errors in getting connected to the portal and am neither getting output from sensors.
this is the code that I have been using:
`
#include "DHT.h" // including the library of DHT11 temperature and humidity sensor
#include <ESP8266WiFi.h>
#define DHTTYPE DHT11
#include "D:/ARDUINO/ESP8266/ESP8266/src/iotc/common/string_buffer.h"
#include "D:/ARDUINO/ESP8266/ESP8266/src/iotc/iot"
#include "D:/ARDUINO/ESP8266/ESP8266/src/connection.h"
#define dht_dpin 12 // creating the object sensor on pin 'D12'
DHT dht(dht_dpin, DHTTYPE);
#define WIFI_SSID "<ENTER WIFI SSID>"
#define WIFI_PASSWORD "<ENTER WIFI PASSWORD>"
const char* SCOPE_ID = "<ENTER SCOPE ID>";
const char* DEVICE_ID = "<ENTER DEVICE ID>";
const char* DEVICE_KEY = "<ENTER DEVICE KEY>";
int echoPin = D6;
int trigPin = D8;
int pingTravelTime;
float pingTravelDistance;
float distanceToTarget;
float speedOfsound;
void on_event(IOTContext ctx, IOTCallbackInfo* callbackInfo);
void on_event(IOTContext ctx, IOTCallbackInfo* callbackInfo) {
// ConnectionStatus
if (strcmp(callbackInfo->eventName, "ConnectionStatus") == 0) {
LOG_VERBOSE("Is connected ? %s (%d)",
callbackInfo->statusCode == IOTC_CONNECTION_OK ? "YES" : "NO",
callbackInfo->statusCode);
isConnected = callbackInfo->statusCode == IOTC_CONNECTION_OK;
return;
}
// payload buffer doesn't have a null ending.
// add null ending in another buffer before print
AzureIOT::StringBuffer buffer;
if (callbackInfo->payloadLength > 0) {
buffer.initialize(callbackInfo->payload, callbackInfo->payloadLength);
}
LOG_VERBOSE("- [%s] event was received. Payload => %s\n",
callbackInfo->eventName, buffer.getLength() ? *buffer : "EMPTY");
if (strcmp(callbackInfo->eventName, "Command") == 0) {
LOG_VERBOSE("- Command name was => %s\r\n", callbackInfo->tag);
}
dht.begin();
}
void setup() {
Serial.begin(9600);
connect_wifi(WIFI_SSID, WIFI_PASSWORD);
connect_client(SCOPE_ID, DEVICE_ID, DEVICE_KEY);
if (context != NULL) {
lastTick = 0; // set timer in the past to enable first telemetry a.s.a.p
}
pinMode(trigPin,OUTPUT);
pinMode(echoPin,INPUT);
}
void loop() {
digitalWrite(trigPin,LOW);
delayMicroseconds(10);
digitalWrite(trigPin,HIGH);
delayMicroseconds(10);
digitalWrite(trigPin,LOW);
float h = dht.readHumidity();
float t = dht.readTemperature();
// Reading the temperature in Celsius degrees and store in the t variable
// Reading the humidity index and store in the h variable
pingTravelTime = pulseIn(echoPin,HIGH);
delay(25);
pingTravelDistance = (pingTravelTime*330*100)/(1000000);
speedOfsound = (pingTravelDistance*1000000)/pingTravelTime;
distanceToTarget = pingTravelDistance/2;
if (isConnected) {
unsigned long ms = millis();
if (ms - lastTick > 10000) { // send telemetry every 10 seconds
char msg[64] = {0};
int pos = 0, errorCode = 0;
lastTick = ms;
if (loopId++ % 4 == 0) { // send telemetry
pos = snprintf(msg, sizeof(msg) - 1, "{\"Temperature\": %f}",
t);
errorCode = iotc_send_telemetry(context, msg, pos);
pos = snprintf(msg, sizeof(msg) - 1, "{\"Humidity\":%f}",
h);
errorCode = iotc_send_telemetry(context, msg, pos);
pos = snprintf(msg, sizeof(msg) - 1, "{\"Distance\":%f}",
distanceToTarget);
errorCode = iotc_send_telemetry(context, msg, pos);
pos = snprintf(msg, sizeof(msg) - 1, "{\"Speed\":%f}",
speedOfsound);
errorCode = iotc_send_telemetry(context, msg, pos);
} else { // send property
}
msg[pos] = 0;
if (errorCode != 0) {
LOG_ERROR("Sending message has failed with error code %d", errorCode);
}
}
iotc_do_work(context); // do background work for iotc
} else {
iotc_free_context(context);
context = NULL;
connect_client(SCOPE_ID, DEVICE_ID, DEVICE_KEY);
}
delay(50);
}
`
I tried to implement this project for a thesis of mine and was not getting the results since the connection is not being established.
These are the errors i was getting:
ERROR: couldn't fetch the time from NTP. - -
X - Error at connection.h:32
Error # tcp_connect. Code 1 -
ERROR: Client was not connected. - -
iot.dps : getting auth... -
iotc.dps : getting operation id... -
ERROR: DPS endpoint PUT call has failed.
this is the github link for downloading the required header files for connection establishment
I could reproduce the same error in my NodeMCU. Please find the below image for reference.
If you let the code run and observe the NodeMCU serial monitor, it will occasionally spit out additional information as you can find in the below image
The error message in my case indicates that it is an issue with Authorization. I had the wrong Primary key provided in the code. In order to be able to connect to the device on Azure IoT Central, make sure the device is not set to be simulated when you create it.
Once you have the device created from the template, navigate to the device and click on connect to get the following details that needs to be entered in the code. Attached the below image for reference.
Here is the code snippet I have used to generate data
#include <ESP8266WiFi.h>
#include "src/iotc/common/string_buffer.h"
#include "src/iotc/iotc.h"
#include "DHT.h"
#define DHTPIN 2
#define DHTTYPE DHT11 // DHT 11
#define WIFI_SSID "<SSID>"
#define WIFI_PASSWORD "<WIFIPASSWORD>"
const char *SCOPE_ID = "<value 2 from above image>";
const char *DEVICE_ID = "<value 3 from above image>";
const char *DEVICE_KEY = "<value 4 from above image>";
DHT dht(DHTPIN, DHTTYPE);
void on_event(IOTContext ctx, IOTCallbackInfo *callbackInfo);
#include "src/connection.h"
void on_event(IOTContext ctx, IOTCallbackInfo *callbackInfo)
{
// ConnectionStatus
if (strcmp(callbackInfo->eventName, "ConnectionStatus") == 0)
{
LOG_VERBOSE("Is connected ? %s (%d)",
callbackInfo->statusCode == IOTC_CONNECTION_OK ? "YES" : "NO",
callbackInfo->statusCode);
isConnected = callbackInfo->statusCode == IOTC_CONNECTION_OK;
return;
}
// payload buffer doesn't have a null ending.
// add null ending in another buffer before print
AzureIOT::StringBuffer buffer;
if (callbackInfo->payloadLength > 0)
{
buffer.initialize(callbackInfo->payload, callbackInfo->payloadLength);
}
LOG_VERBOSE("- [%s] event was received. Payload => %s\n",
callbackInfo->eventName, buffer.getLength() ? *buffer : "EMPTY");
if (strcmp(callbackInfo->eventName, "Command") == 0)
{
LOG_VERBOSE("- Command name was => %s\r\n", callbackInfo->tag);
}
}
void setup()
{
Serial.begin(9600);
connect_wifi(WIFI_SSID, WIFI_PASSWORD);
connect_client(SCOPE_ID, DEVICE_ID, DEVICE_KEY);
if (context != NULL)
{
lastTick = 0; // set timer in the past to enable first telemetry a.s.a.p
}
dht.begin();
}
void loop()
{
float h = dht.readHumidity();
float t = dht.readTemperature();
if (isConnected)
{
unsigned long ms = millis();
if (ms - lastTick > 10000)
{ // send telemetry every 10 seconds
char msg[64] = {0};
int pos = 0, errorCode = 0;
lastTick = ms;
if (loopId++ % 2 == 0)
{ // send telemetry
pos = snprintf(msg, sizeof(msg) - 1, "{\"Temperature\": %f}",
t);
errorCode = iotc_send_telemetry(context, msg, pos);
pos = snprintf(msg, sizeof(msg) - 1, "{\"Humidity\":%f}",
h);
errorCode = iotc_send_telemetry(context, msg, pos);
}
else
{ // send property
}
msg[pos] = 0;
if (errorCode != 0)
{
LOG_ERROR("Sending message has failed with error code %d", errorCode);
}
}
iotc_do_work(context); // do background work for iotc
}
else
{
iotc_free_context(context);
context = NULL;
connect_client(SCOPE_ID, DEVICE_ID, DEVICE_KEY);
}
}
Here are the Temperature and Humidity values generated from the code
The data generated the following graph on the Azure IoT Central.
Please note that I have used DHT11 sensor and connected to read it from the GPI0 2 (D4) pin on my NodeMCU board. I have used the Arduino IDE version 1.8.19 and ESP8266 board version 2.7.4

Merge multi channel audio buffers into one CMSampleBuffer

I am using FFmpeg to access an RTSP stream in my macOS app.
REACHED GOALS: I have created a tone generator which creates single channel audio and returns a CMSampleBuffer. The tone generator is used to test my audio pipeline when the video's fps and audio sample rates are changed.
GOAL: The goal is to merge multi-channel audio buffers into a single CMSampleBuffer.
Audio data lifecyle:
AVCodecContext* audioContext = self.rtspStreamProvider.audioCodecContext;
if (!audioContext) { return; }
// Getting audio settings from FFmpegs audio context (AVCodecContext).
int samplesPerChannel = audioContext->frame_size;
int frameNumber = audioContext->frame_number;
int sampleRate = audioContext->sample_rate;
int fps = [self.rtspStreamProvider fps];
int calculatedSampleRate = sampleRate / fps;
// NSLog(#"\nSamples per channel = %i, frames = %i.\nSample rate = %i, fps = %i.\ncalculatedSampleRate = %i.", samplesPerChannel, frameNumber, sampleRate, fps, calculatedSampleRate);
// Decoding the audio data from a encoded AVPacket into a AVFrame.
AVFrame* audioFrame = [self.rtspStreamProvider readDecodedAudioFrame];
if (!audioFrame) { return; }
// Extracting my audio buffers from FFmpegs AVFrame.
uint8_t* leftChannelAudioBufRef = audioFrame->data[0];
uint8_t* rightChannelAudioBufRef = audioFrame->data[1];
// Creating the CMSampleBuffer with audio data.
CMSampleBufferRef leftSampleBuffer = [CMSampleBufferFactory createAudioSampleBufferUsingData:leftChannelAudioBufRef channelCount:1 framesCount:samplesPerChannel sampleRate:sampleRate];
// CMSampleBufferRef rightSampleBuffer = [CMSampleBufferFactory createAudioSampleBufferUsingData:packet->data[1] channelCount:1 framesCount:samplesPerChannel sampleRate:sampleRate];
if (!leftSampleBuffer) { return; }
if (!self.audioQueue) { return; }
if (!self.audioDelegates) { return; }
// All audio consumers will receive audio samples via delegation.
dispatch_sync(self.audioQueue, ^{
NSHashTable *audioDelegates = self.audioDelegates;
for (id<AudioDataProviderDelegate> audioDelegate in audioDelegates)
{
[audioDelegate provider:self didOutputAudioSampleBuffer:leftSampleBuffer];
// [audioDelegate provider:self didOutputAudioSampleBuffer:rightSampleBuffer];
}
});
CMSampleBuffer containing audio data creation:
import Foundation
import CoreMedia
#objc class CMSampleBufferFactory: NSObject
{
#objc static func createAudioSampleBufferUsing(data: UnsafeMutablePointer<UInt8> ,
channelCount: UInt32,
framesCount: CMItemCount,
sampleRate: Double) -> CMSampleBuffer? {
/* Prepare for sample Buffer creation */
var sampleBuffer: CMSampleBuffer! = nil
var osStatus: OSStatus = -1
var audioFormatDescription: CMFormatDescription! = nil
var absd: AudioStreamBasicDescription! = nil
let sampleDuration = CMTimeMake(value: 1, timescale: Int32(sampleRate))
let presentationTimeStamp = CMTimeMake(value: 0, timescale: Int32(sampleRate))
// NOTE: Change bytesPerFrame if you change the block buffer value types. Currently we are using double.
let bytesPerFrame: UInt32 = UInt32(MemoryLayout<Float32>.size) * channelCount
let memoryBlockByteLength = framesCount * Int(bytesPerFrame)
// var acl = AudioChannelLayout()
// acl.mChannelLayoutTag = kAudioChannelLayoutTag_Stereo
/* Sample Buffer Block buffer creation */
var blockBuffer: CMBlockBuffer?
osStatus = CMBlockBufferCreateWithMemoryBlock(
allocator: kCFAllocatorDefault,
memoryBlock: nil,
blockLength: memoryBlockByteLength,
blockAllocator: nil,
customBlockSource: nil,
offsetToData: 0,
dataLength: memoryBlockByteLength,
flags: 0,
blockBufferOut: &blockBuffer
)
assert(osStatus == kCMBlockBufferNoErr)
guard let eBlock = blockBuffer else { return nil }
osStatus = CMBlockBufferFillDataBytes(with: 0, blockBuffer: eBlock, offsetIntoDestination: 0, dataLength: memoryBlockByteLength)
assert(osStatus == kCMBlockBufferNoErr)
TVBlockBufferHelper.fillAudioBlockBuffer(blockBuffer,
audioData: data,
frames: Int32(framesCount))
/* Audio description creations */
absd = AudioStreamBasicDescription(
mSampleRate: sampleRate,
mFormatID: kAudioFormatLinearPCM,
mFormatFlags: kLinearPCMFormatFlagIsPacked | kLinearPCMFormatFlagIsFloat,
mBytesPerPacket: bytesPerFrame,
mFramesPerPacket: 1,
mBytesPerFrame: bytesPerFrame,
mChannelsPerFrame: channelCount,
mBitsPerChannel: 32,
mReserved: 0
)
guard absd != nil else {
print("\nCreating AudioStreamBasicDescription Failed.")
return nil
}
osStatus = CMAudioFormatDescriptionCreate(allocator: kCFAllocatorDefault,
asbd: &absd,
layoutSize: 0,
layout: nil,
// layoutSize: MemoryLayout<AudioChannelLayout>.size,
// layout: &acl,
magicCookieSize: 0,
magicCookie: nil,
extensions: nil,
formatDescriptionOut: &audioFormatDescription)
guard osStatus == noErr else {
print("\nCreating CMFormatDescription Failed.")
return nil
}
/* Create sample Buffer */
var timmingInfo = CMSampleTimingInfo(duration: sampleDuration, presentationTimeStamp: presentationTimeStamp, decodeTimeStamp: .invalid)
osStatus = CMSampleBufferCreate(allocator: kCFAllocatorDefault,
dataBuffer: eBlock,
dataReady: true,
makeDataReadyCallback: nil,
refcon: nil,
formatDescription: audioFormatDescription,
sampleCount: framesCount,
sampleTimingEntryCount: 1,
sampleTimingArray: &timmingInfo,
sampleSizeEntryCount: 0, // Must be 0, 1, or numSamples.
sampleSizeArray: nil, // Pointer ot Int. Don't know the size. Don't know if its bytes or bits?
sampleBufferOut: &sampleBuffer)
return sampleBuffer
}
}
CMSampleBuffer gets filled with raw audio data from FFmpeg's data:
#import Foundation;
#import CoreMedia;
#interface BlockBufferHelper : NSObject
+(void)fillAudioBlockBuffer:(CMBlockBufferRef)blockBuffer
audioData:(uint8_t *)data
frames:(int)framesCount;
#end
#import "TVBlockBufferHelper.h"
#implementation BlockBufferHelper
+(void)fillAudioBlockBuffer:(CMBlockBufferRef)blockBuffer
audioData:(uint8_t *)data
frames:(int)framesCount
{
// Possibly dev error.
if (framesCount == 0) {
NSAssert(false, #"\nfillAudioBlockBuffer/audioData/frames will not be able to fill an blockBuffer which has no frames.");
return;
}
char *rawBuffer = NULL;
size_t size = 0;
OSStatus status = CMBlockBufferGetDataPointer(blockBuffer, 0, &size, NULL, &rawBuffer);
if(status != noErr)
{
return;
}
memcpy(rawBuffer, data, framesCount);
}
#end
The LEARNING Core Audio book from Chris Adamson/Kevin Avila points me toward a multi channel mixer.
The multi channel mixer should have 2-n inputs and 1 output. I assume the output could be a buffer or something that could be put into a CMSampleBuffer for further consumption.
This direction should lead me to AudioUnits, AUGraph and the AudioToolbox. I don't understand all of these classes and how they work together. I have found some code snippets on SO which could help me but most of them use AudioToolBox classes and don't use CMSampleBuffers as much as I need.
Is there another way to merge audio buffers into a new one?
Is creating a multi channel mixer using AudioToolBox the right direction?

How to write NALs produced by x264_encoder_encode() using ffmpeg av_interleaved_write_frame()

I have been trying to produce a "flv" video file in the following sequence:
av_register_all();
// Open video file
if (avformat_open_input(&pFormatCtx, "6.mp4", NULL, NULL) != 0)
return -1; // Couldn't open file
// Retrieve stream information
if (avformat_find_stream_info(pFormatCtx, NULL) < 0)
return -1; // Couldn't find stream information
// Dump information about file onto standard error
av_dump_format(pFormatCtx, 0, "input_file.mp4", 0);
// Find the first video stream
videoStream = -1;
for (i = 0; i < pFormatCtx->nb_streams; i++)
if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
videoStream = i;
break;
}
if (videoStream == -1)
return -1; // Didn't find a video stream
// Get a pointer to the codec context for the video stream
pCodecCtx = pFormatCtx->streams[videoStream]->codec;
// Find the decoder for the video stream
pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
if (pCodec == NULL) {
fprintf(stderr, "Unsupported codec!\n");
return -1; // Codec not found
}
// Open codec
if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0)
return -1; // Could not open codec
// Allocate video frame
pFrame = avcodec_alloc_frame();
// Allocate video frame
pFrame = avcodec_alloc_frame();
// Allocate an AVFrame structure
pFrameYUV420 = avcodec_alloc_frame();
if (pFrameYUV420 == NULL)
return -1;
// Determine required buffer size and allocate buffer
numBytes = avpicture_get_size(pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height);
buffer = (uint8_t *) av_malloc(numBytes * sizeof(uint8_t));
// Assign appropriate parts of buffer to image planes in pFrameYUV420
// Note that pFrameYUV420 is an AVFrame, but AVFrame is a superset of AVPicture
avpicture_fill((AVPicture *) pFrameRGB, buffer, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height);
// Setup scaler
img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, SWS_BILINEAR, 0, 0, 0);
if (img_convert_ctx == NULL) {
fprintf(stderr, "Cannot initialize the conversion context!\n");
exit(1);
}
// Setup encoder/muxing now
filename = "output_file.flv";
fmt = av_guess_format("flv", filename, NULL);
if (fmt == NULL) {
printf("Could not guess format.\n");
return -1;
}
/* allocate the output media context */
oc = avformat_alloc_context();
if (oc == NULL) {
printf("could not allocate context.\n");
return -1;
}
oc->oformat = fmt;
snprintf(oc->filename, sizeof(oc->filename), "%s", filename);
video_st = NULL;
if (fmt->video_codec != AV_CODEC_ID_NONE) {
video_st = add_stream(oc, &video_codec, fmt->video_codec);
}
// Let's see some information about our format
av_dump_format(oc, 0, filename, 1);
/* open the output file, if needed */
if (!(fmt->flags & AVFMT_NOFILE)) {
ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);
if (ret < 0) {
fprintf(stderr, "Could not open '%s': %s\n", filename, av_err2str(ret));
return 1;
}
}
/* Write the stream header, if any. */
ret = avformat_write_header(oc, NULL);
if (ret < 0) {
fprintf(stderr, "Error occurred when opening output file: %s\n", av_err2str(ret));
return 1;
}
// Setup x264 params
x264_param_t param;
x264_param_default_preset(&param, "veryfast", "zerolatency");
param.i_threads = 1;
param.i_width = video_st->codec->width;
param.i_height = video_st->codec->height;
param.i_fps_num = STREAM_FRAME_RATE; // 30 fps, same as video
param.i_fps_den = 1;
// Intra refres:
param.i_keyint_max = STREAM_FRAME_RATE;
param.b_intra_refresh = 1;
// Rate control:
param.rc.i_rc_method = X264_RC_CRF;
param.rc.f_rf_constant = 25;
param.rc.f_rf_constant_max = 35;
// For streaming:
param.b_repeat_headers = 1;
param.b_annexb = 1;
x264_param_apply_profile(&param, "baseline");
x264_t* encoder = x264_encoder_open(&param);
x264_picture_t pic_in, pic_out;
x264_picture_alloc(&pic_in, X264_CSP_I420, video_st->codec->width, video_st->codec->height);
x264_nal_t* nals;
int i_nals;
// The loop:
// 1. Read frames
// 2. Decode the frame
// 3. Attempt to re-encode using x264
// 4. Write the x264 encoded frame using av_interleaved_write_frame
while (av_read_frame(pFormatCtx, &packet) >= 0) {
// Is this a packet from the video stream?
if (packet.stream_index == videoStream) {
// Decode video frame
avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
// Did we get a video frame?
if (frameFinished) {
sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pic_in.img.plane, pic_in.img.i_stride);
int frame_size = x264_encoder_encode(encoder, &nals, &i_nals, &pic_in, &pic_out);
if (frame_size >= 0) {
if (i_nals < 0)
printf("invalid frame size: %d\n", i_nals);
// write out NALs
for (i = 0; i < i_nals; i++) {
// initalize a packet
AVPacket p;
av_init_packet(&p);
p.data = nals[i].p_payload;
p.size = nals[i].i_payload;
p.stream_index = video_st->index;
p.flags = AV_PKT_FLAG_KEY;
p.pts = AV_NOPTS_VALUE;
p.dts = AV_NOPTS_VALUE;
ret = av_interleaved_write_frame(oc, &p);
}
}
printf("encoded frame #%d\n", frame_count);
frame_count++;
}
}
// Free the packet that was allocated by av_read_frame
av_free_packet(&packet);
}
// Now we free up resources used/close codecs, and finally close our program.
Here is the implementation for the add_stream() function:
/* Add an output stream. */
static AVStream *add_stream(AVFormatContext *oc, AVCodec **codec, enum AVCodecID codec_id) {
AVCodecContext *c;
AVStream *st;
int r;
/* find the encoder */
*codec = avcodec_find_encoder(codec_id);
if (!(*codec)) {
fprintf(stderr, "Could not find encoder for '%s'\n",
avcodec_get_name(codec_id));
exit(1);
}
st = avformat_new_stream(oc, *codec);
if (!st) {
fprintf(stderr, "Could not allocate stream\n");
exit(1);
}
st->id = oc->nb_streams - 1;
c = st->codec;
switch ((*codec)->type) {
case AVMEDIA_TYPE_AUDIO:
st->id = 1;
c->sample_fmt = AV_SAMPLE_FMT_FLTP;
c->bit_rate = 64000;
c->sample_rate = 44100;
c->channels = 2;
break;
case AVMEDIA_TYPE_VIDEO:
avcodec_get_context_defaults3(c, *codec);
c->codec_id = codec_id;
c->bit_rate = 500*1000;
//c->rc_min_rate = 500*1000;
//c->rc_max_rate = 500*1000;
//c->rc_buffer_size = 500*1000;
/* Resolution must be a multiple of two. */
c->width = 1280;
c->height = 720;
/* timebase: This is the fundamental unit of time (in seconds) in terms
* of which frame timestamps are represented. For fixed-fps content,
* timebase should be 1/framerate and timestamp increments should be
* identical to 1. */
c->time_base.den = STREAM_FRAME_RATE;
c->time_base.num = 1;
c->gop_size = 12; /* emit one intra frame every twelve frames at most */
c->pix_fmt = STREAM_PIX_FMT;
if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
/* just for testing, we also add B frames */
c->max_b_frames = 2;
}
if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
/* Needed to avoid using macroblocks in which some coeffs overflow.
* This does not happen with normal video, it just happens here as
* the motion of the chroma plane does not match the luma plane. */
c->mb_decision = 2;
}
break;
default:
break;
}
/* Some formats want stream headers to be separate. */
if (oc->oformat->flags & AVFMT_GLOBALHEADER)
c->flags |= CODEC_FLAG_GLOBAL_HEADER;
return st;
}
After the encoding is complete, I check the output file output_file.flv. I notice it's size is very large: 101MB and it does not play. If I use ffmpeg to decode/encode the input file, then I get an output file about 83MB in size (which is about the same size as the original .mp4 file used as input). Also, the 83MB output from just using ffmpeg C api, as opposed to using x264 for the encoding step, plays just fine. Does anyone know where I am going wrong? I have tried researching this for a few days now but with no luck :(. I feel that I am close to making it work, however, I just cannot figure out what I am doing wrong. Thank you!
To produce the correct AVPacket, you should write all nals into the same packet, as it is done in http://ffmpeg.org/doxygen/trunk/libx264_8c_source.html (see encode_nals and X264_frame functions)

How to get the computer's current volume level?

How do I access the current volume level of a Mac from the Cocoa API?
For example: when I'm using Spotify.app on OS X 10.7 and a sound advertisement comes up, and I turn down my Mac's volume, the app will pause the ad until I turn it back up to an average level. I find this incredibly obnoxious and a violation of user privacy, but somehow Spotify has found a way to do this.
Is there any way I can do this with Cocoa? I'm making an app where it might come in useful to warn the user if the volume is low.
There are two options available. The first step is to determine what device you'd like and get its ID. Assuming the default output device, the code will look something like:
AudioObjectPropertyAddress propertyAddress = {
kAudioHardwarePropertyDefaultOutputDevice,
kAudioObjectPropertyScopeGlobal,
kAudioObjectPropertyElementMaster
};
AudioDeviceID deviceID;
UInt32 dataSize = sizeof(deviceID);
OSStatus result = AudioObjectGetPropertyData(kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize, &deviceID);
if(kAudioHardwareNoError != result)
// Handle the error
Next, you can use the kAudioHardwareServiceDeviceProperty_VirtualMasterVolume property to get the device's virtual master volume:
AudioObjectPropertyAddress propertyAddress = {
kAudioHardwareServiceDeviceProperty_VirtualMasterVolume,
kAudioDevicePropertyScopeOutput,
kAudioObjectPropertyElementMaster
};
if(!AudioHardwareServiceHasProperty(deviceID, &propertyAddress))
// An error occurred
Float32 volume;
UInt32 dataSize = sizeof(volume);
OSStatus result = AudioHardwareServiceGetPropertyData(deviceID, &propertyAddress, 0, NULL, &dataSize, &volume);
if(kAudioHardwareNoError != result)
// An error occurred
Alternatively, you can use kAudioDevicePropertyVolumeScalar to get the volume for a specific channel:
UInt32 channel = 1; // Channel 0 is master, if available
AudioObjectPropertyAddress propertyAddress = {
kAudioDevicePropertyVolumeScalar,
kAudioDevicePropertyScopeOutput,
channel
};
if(!AudioObjectHasProperty(deviceID, &propertyAddress))
// An error occurred
Float32 volume;
UInt32 dataSize = sizeof(volume);
OSStatus result = AudioObjectGetPropertyData(deviceID, &propertyAddress, 0, NULL, &dataSize, &volume);
if(kAudioHardwareNoError != result)
// An error occurred
The difference between the two is explained in Apple's docs:
kAudioHardwareServiceDeviceProperty_VirtualMasterVolume
A Float32 value that represents the value of the volume control. The
range for this property’s value is 0.0 (silence) through 1.0 (full
level). The effect of this property depends on the hardware device
associated with the HAL audio object. If the device has a master
volume control, this property controls it. If the device has
individual channel volume controls, this property applies to those
identified by the device's preferred multichannel layout, or the
preferred stereo pair if the device is stereo only. This control
maintains relative balance between the channels it affects.
So it can be tricky to define exactly what a device's volume is, especially for multichannel devices with non-standard channel maps.
From CocoaDev, these class methods look like they should work, though it's not particularly Cocoa-like:
#import <AudioToolbox/AudioServices.h>
+(AudioDeviceID)defaultOutputDeviceID
{
AudioDeviceID outputDeviceID = kAudioObjectUnknown;
// get output device device
UInt32 propertySize = 0;
OSStatus status = noErr;
AudioObjectPropertyAddress propertyAOPA;
propertyAOPA.mScope = kAudioObjectPropertyScopeGlobal;
propertyAOPA.mElement = kAudioObjectPropertyElementMaster;
propertyAOPA.mSelector = kAudioHardwarePropertyDefaultOutputDevice;
if (!AudioHardwareServiceHasProperty(kAudioObjectSystemObject, &propertyAOPA))
{
NSLog(#"Cannot find default output device!");
return outputDeviceID;
}
propertySize = sizeof(AudioDeviceID);
status = AudioHardwareServiceGetPropertyData(kAudioObjectSystemObject, &propertyAOPA, 0, NULL, &propertySize, &outputDeviceID);
if(status)
{
NSLog(#"Cannot find default output device!");
}
return outputDeviceID;
}
// getting system volume
+(float)volume
{
Float32 outputVolume;
UInt32 propertySize = 0;
OSStatus status = noErr;
AudioObjectPropertyAddress propertyAOPA;
propertyAOPA.mElement = kAudioObjectPropertyElementMaster;
propertyAOPA.mSelector = kAudioHardwareServiceDeviceProperty_VirtualMasterVolume;
propertyAOPA.mScope = kAudioDevicePropertyScopeOutput;
AudioDeviceID outputDeviceID = [[self class] defaultOutputDeviceID];
if (outputDeviceID == kAudioObjectUnknown)
{
NSLog(#"Unknown device");
return 0.0;
}
if (!AudioHardwareServiceHasProperty(outputDeviceID, &propertyAOPA))
{
NSLog(#"No volume returned for device 0x%0x", outputDeviceID);
return 0.0;
}
propertySize = sizeof(Float32);
status = AudioHardwareServiceGetPropertyData(outputDeviceID, &propertyAOPA, 0, NULL, &propertySize, &outputVolume);
if (status)
{
NSLog(#"No volume returned for device 0x%0x", outputDeviceID);
return 0.0;
}
if (outputVolume < 0.0 || outputVolume > 1.0) return 0.0;
return outputVolume;
}

How do I register for a notification for then the sound volume changes?

I need my app to be notified when the OS X sound volume has changed. This is for a Desktop app, not for iOS. How can I register for this notification?
This can be a tiny bit tricky because some audio devices support a master channel, but most don't so the volume will be a per-channel property. Depending on what you need to do you could observe only one channel and assume that all other channels the device supports have the same volume. Regardless of how many channels you want to watch, you observe the volume by registering a property listener for the AudioObject in question:
// Some devices (but not many) support a master channel
AudioObjectPropertyAddress propertyAddress = {
kAudioDevicePropertyVolumeScalar,
kAudioDevicePropertyScopeOutput,
kAudioObjectPropertyElementMaster
};
if(AudioObjectHasProperty(deviceID, &propertyAddress)) {
OSStatus result = AudioObjectAddPropertyListener(deviceID, &propertyAddress, myAudioObjectPropertyListenerProc, self);
// Error handling omitted
}
else {
// Typically the L and R channels are 1 and 2 respectively, but could be different
propertyAddress.mElement = 1;
OSStatus result = AudioObjectAddPropertyListener(deviceID, &propertyAddress, myAudioObjectPropertyListenerProc, self);
// Error handling omitted
propertyAddress.mElement = 2;
result = AudioObjectAddPropertyListener(deviceID, &propertyAddress, myAudioObjectPropertyListenerProc, self);
// Error handling omitted
}
Your listener proc should be something like:
static OSStatus
myAudioObjectPropertyListenerProc(AudioObjectID inObjectID,
UInt32 inNumberAddresses,
const AudioObjectPropertyAddress inAddresses[],
void *inClientData)
{
for(UInt32 addressIndex = 0; addressIndex < inNumberAddresses; ++addressIndex) {
AudioObjectPropertyAddress currentAddress = inAddresses[addressIndex];
switch(currentAddress.mSelector) {
case kAudioDevicePropertyVolumeScalar:
{
Float32 volume = 0;
UInt32 dataSize = sizeof(volume);
OSStatus result = AudioObjectGetPropertyData(inObjectID, &currentAddress, 0, NULL, &dataSize, &volume);
if(kAudioHardwareNoError != result) {
// Handle the error
continue;
}
// Process the volume change
break;
}
}
}
}