Arduino State Machine - objective-c

This is a cross-post from someone who answered my original question here.
I'm not sure how to go about executing the 3 functions I'm after (as well as introducing even more than 3 in the future).
I am simply trying to Fade/Blink the selected Colour of an RGB LED (and perhaps introduce more functions in the future) where its RGB data is coming back from iOS and sent to an RFDuino BLE module.
Sends a "fade" string to the module picked up by RFduinoBLE_onReceive (char *data, int len) on the Arduino end.
- (IBAction)fadeButtonPressed:(id)sender {
[rfduino send:[#"fade" dataUsingEncoding:NSUTF8StringEncoding]];
}
- (IBAction)blinkButtonPressed:(id)sender {
[rfduino send:[#"blink" dataUsingEncoding:NSUTF8StringEncoding]];
}
Selected Color:
- (void)setColor
{
NSLog(#"colors: RGB %f %f %f", red, green, blue);
UIColor *color = [UIColor colorWithRed:red green:green blue:blue alpha:1.0];
[colorSwatch setHighlighted:YES];
[colorSwatch setTintColor:color];
uint8_t tx[3] = { red * 255, green * 255, blue * 255 };
NSData *data = [NSData dataWithBytes:(void*)&tx length:3];
[rfduino send:data];
}
This is originally how I set the RGB colour:
void RFduinoBLE_onReceive (char *data, int len) {
if (len >= 3) {
// Get the RGB values.
uint8_t red = data[0];
uint8_t green = data[1];
uint8_t blue = data[2];
// Set PWM for each LED.
analogWrite(rgb2_pin, red);
analogWrite(rgb3_pin, green);
analogWrite(rgb4_pin, blue);
}
}
This was the provided answer that now compiles on Arduino, but I have no idea how to actually execute my functions and where?
#include <RFduinoBLE.h>
int state;
char command;
String hexstring;
// RGB pins.
int redPin = 2;
int grnPin = 3;
int bluPin = 4;
void setup () {
state = 1;
pinMode(redPin, OUTPUT);
pinMode(grnPin, OUTPUT);
pinMode(bluPin, OUTPUT);
// This is the data we want to appear in the advertisement
// (the deviceName length plus the advertisement length must be <= 18 bytes.
RFduinoBLE.deviceName = "iOS";
RFduinoBLE.advertisementInterval = MILLISECONDS(300);
RFduinoBLE.txPowerLevel = -20;
RFduinoBLE.advertisementData = "rgb";
// Start the BLE stack.
RFduinoBLE.begin();
}
void loop () {
//RFduino_ULPDelay(INFINITE);
}
void processCommand (int command, String hex) {
// hex ?
// command ?
}
void RFduinoBLE_onReceive (char *data, int len) {
for (int i = 0; i < len; i++) {
stateMachine(data[i]);
}
}
void stateMachine (char data) {
switch (state) {
case 1:
if (data == 1) {
state = 2;
}
break;
case 2:
if (data == 'b' || data == 'f' || data == 'c') {
command = data;
hexstring = "";
state = 3;
} else if (data != 1) { // Stay in state 2 if we received another 0x01.
state = 1;
}
break;
case 3:
if ((data >= 'a' && data <= 'z') || (data >= '0' && data <= '9')) {
hexstring = hexstring + data;
if (hexstring.length() == 6) {
state = 4;
}
} else if (data == 1) {
state = 2;
} else {
state = 1;
}
break;
case 4:
if (data == 3) {
processCommand(command, hexstring);
state = 1;
} else if (data == 1) {
state = 2;
} else {
state = 1;
}
break;
}
}
EDIT: Final code
#include <RFduinoBLE.h>
// State properties.
int state = 1;
char command;
String hexstring;
// RGB pins.
int redPin = 2;
int grnPin = 3;
int bluPin = 4;
// Setup function to set RGB pins to OUTPUT pins.
void setup () {
pinMode(redPin, OUTPUT);
pinMode(grnPin, OUTPUT);
pinMode(bluPin, OUTPUT);
// This is the data we want to appear in the advertisement
// (the deviceName length plus the advertisement length must be <= 18 bytes.
RFduinoBLE.deviceName = "iOS";
RFduinoBLE.advertisementInterval = MILLISECONDS(300);
RFduinoBLE.txPowerLevel = -20;
RFduinoBLE.advertisementData = "rgb";
// Start the BLE stack.
RFduinoBLE.begin();
}
void loop () {
switch (command) {
case 1:
// Blink.
break;
case 2:
// Fade.
break;
}
//RFduino_ULPDelay(INFINITE);
}
// Converts HEX as a String to actual HEX values.
// This is needed to properly convert the ASCII value to the hex
// value of each character.
byte getVal (char c) {
if (c >= '0' && c <= '9') return (byte)(c - '0');
else return (byte)(c - 'a' + 10);
}
// Process each function/command.
void processCommand (int command, String hex) {
switch (command) {
case 'b':
command = 1; // Set blink mode.
break;
case 'f':
command = 2; // Set fade mode.
break;
case 'c':
// We put together 2 characters as is
// done with HEX notation and set the color.
byte red = getVal(hex.charAt(1)) + (getVal(hex.charAt(0)) << 4);
byte green = getVal(hex.charAt(3)) + (getVal(hex.charAt(2)) << 4);
byte blue = getVal(hex.charAt(5)) + (getVal(hex.charAt(4)) << 4);
// Set the color.
setColor (red, green, blue);
break;
}
}
// Sets the color of each RGB pin.
void setColor (byte red, byte green, byte blue) {
analogWrite(redPin, red);
analogWrite(grnPin, green);
analogWrite(bluPin, blue);
}
// This function returns data from the radio.
void RFduinoBLE_onReceive (char *data, int len) {
for (int i = 0; i < len; i++) {
stateMachine(data[i]);
}
}
// Main state machine function, which processes
// data depending on the bytes received.
void stateMachine (char data) {
switch (state) {
case 1:
if (data == 1) {
state = 2;
}
break;
case 2:
if (data == 'b' || data == 'f' || data == 'c') {
command = data;
hexstring = "";
state = 3;
} else if (data != 1) { // Stay in state 2 if we received another 0x01.
state = 1;
}
break;
case 3:
if ((data >= 'a' && data <= 'z') || (data >= '0' && data <= '9')) {
hexstring = hexstring + data;
if (hexstring.length() == 6) {
state = 4;
}
} else if (data == 1) {
state = 2;
} else {
state = 1;
}
break;
case 4:
if (data == 3) {
processCommand(command, hexstring);
state = 1;
} else if (data == 1) {
state = 2;
} else {
state = 1;
}
break;
}
}

There is some code here that you can use to convert hex characters to a byte.
So, you can add this to your existing code -
byte getVal(char c)
{
if (c >= '0' && c <= '9')
return (byte)(c - '0');
else
return (byte)(c-'a'+10)
}
void processCommand (int command, String hex)
{
switch (command) {
case 'b':
command = 1; // set blink mode
break;
case 'f':
command=2; // set fade mode
break;
case 'c':
byte red=getVal(hex.charAt(1)) + (getVal(hex.charAt(0)) << 4);
byte green=getVal(hex.charAt(3)) + (getVal(hex.charAt(2)) << 4);
byte blue=getVal(hex.charAt(5)) + (getVal(hex.charAt(4)) << 4);
setColor(red,green,blue);
}
}
void setColor(byte red,byte green,byte blue)
{
// Set PWM for each LED.
analogWrite(rgb2_pin, red);
analogWrite(rgb3_pin, green);
analogWrite(rgb4_pin, blue);
}
On the iOS side you can use something like this -
-(void) sendCommand:(char)command arg1:(Byte)arg1 arg2:(Byte)arg2 arg3:(Byte) arg3 {
NSString *commandString=[NSString stringWithFormat:#"\001%c%02x%02x%02x\003",command,arg1,arg2,arg3];
NSData *commandData=[commandString dataUsingEncoding:NSASCIIStringEncoding];
[rfduino send:data];
}
- (IBAction)fadeButtonPressed:(id)sender {
[self sendCommand:'f' arg1:0 arg2:0 arg3:0];
}
- (IBAction)blinkButtonPressed:(id)sender {
[self sendCommand:'b' arg1:0 arg2:0 arg3:0];
}
- (void)setColor
{
NSLog(#"colors: RGB %f %f %f", red, green, blue);
UIColor *color = [UIColor colorWithRed:red green:green blue:blue alpha:1.0];
[colorSwatch setHighlighted:YES];
[colorSwatch setTintColor:color];
[self sendCommand:c arg1:red*255 arg2:green*255 arg3:blue*255];
}

Related

TinkerCAD Ardunio code error too few arguments

I'm building a robot for my class, and we have to have 2 servos and 1 DC motor working in a specific way. Everything is hooked up to an arduino uno, and my code works, but I using tinkercad to test a few things but I'm getting an error which is stopping my code from functioning in tinker cad, and I'm at a total loss.
ERROR
In function 'void loop()':
44:9: error: too few arguments to function 'void motor(char, char)'
17:6: note: declared here
exit status 1
CODE
#include <Servo.h> // set servo header to let ardduino know you intend to use a servo
Servo mycontinuousservo; // declare servos to be used
Servo mydegreeservo;
int In1 = 7; // declare your global variables to indicate pin numbers
int In2 = 8;
int pin = 6;
int servocontinuouspin = 10;
int servodegreepin = 9;
int angle = 90;
void servopos();
void servocontinous();
void motor(char Speed,char Direction);
void setup() {
// put your setup code here, to run once:
pinMode(In1, OUTPUT);
pinMode(In2, OUTPUT);
pinMode(pin, OUTPUT);
digitalWrite(In1, HIGH); //pin 7 moves forward
digitalWrite(In2, LOW); //pin 8 moves forward
analogWrite(pin, 0); // start at 0
pinMode(servocontinuouspin, OUTPUT);
pinMode(servodegreepin, OUTPUT);
mycontinuousservo.attach(servocontinuouspin);
mydegreeservo.attach(servodegreepin);
mycontinuousservo.write(90);
Serial.begin(9600); // for serial communication
}
void loop() {
servocontinous(); //call by ref aforedeclared functions
servopos();
motor();
}
// EXIT THE LOOP
void servopos() { //position function
int degree = 0;
int i = 0;
for (i = 0; i < 18; i++) {
mydegreeservo.write(degree);
delay(500); //delay 0.5 seconds
degree = degree + 10;
}
}
void servocontinous() // continous servo settings
{
for (int angle = 90; angle >= 0; angle--) {
mycontinuousservo.write(angle);
delay(50);
}
if (angle == 0) {
Serial.print("speed\n");
}
for (angle = 0; angle < 90; angle++)
{
mycontinuousservo.write(angle);
delay(50);
}
}
void motor() //motor function
{
char Speed = 0;
char Direction = 0;
if (Serial.available() > 0) //initialising
{
if (Direction == 'f') //70 representing F on the ASCII table
{
delay(500);
Serial.println("F");
}
if (Direction == 'r')
{
delay(500);
Serial.println("R");
}
}
if (Serial.available() > 0)
{
Speed = Serial.read();
if (Speed == '0')
{
Speed = 0;
Serial.println("Speed 0");
}
if (Speed == '1')
{
Speed = 14;
Serial.println("Speed 1");
}
if (Speed == '2')
{
Speed = 29;
Serial.println("Speed 2");
}
if (Speed == '3')
{
Speed = 42;
Serial.println("Speed 3");
}
if (Speed == '4')
{
Speed = 56;
Serial.println("Speed 4");
}
if (Speed == '5')
{
Speed = 70;
Serial.println("Speed 5");
}
if (Speed == '6')
{
Speed = 84;
Serial.println("Speed 6");
}
if (Speed == '7')
{
Speed = 98;
Serial.println("Speed 7");
}
if (Speed == '8')
{
Speed = 112;
Serial.println("Speed 8");
}
if (Speed == '9')
{
Speed = 128;
Serial.println("Speed 9");
}
} delay(5000);
analogWrite(pin, Speed);
if (Direction == 'f')
{ digitalWrite(In1, HIGH);
digitalWrite(In2, LOW);
} if (Direction == 'r')
{
digitalWrite(In1, LOW);
digitalWrite(In2, HIGH);
}
}
Here you declare the function as taking two arguments:
void motor(char Speed,char Direction);
Later you call it with no arguments, which is invalid when compared to that declaration:
motor();
This will be an immediate compiler error. That function is described as having two arguments, you call it with zero. Compile hard fails and stops because of this contradiction.
Yet when you define it the arguments are gone, they're actually local variables:
void motor() //motor function
{
char Speed = 0;
char Direction = 0;
// ...
}
This too contradicts the earlier declaration, so if you comment out the place where it's called you'll likely get a different error.
Local variables are the private business of a function, they do not need to be shown in the function signature, so don't think these need to be included as arguments.
What you need to do is either snip the arguments from the declaration, make sure that declaration matches the function signature exactly, or move the motor() function definition to before where it is first called.
I prefer to organize things so that pre-declaration is not necessary, or at least minimized. There's no reason to not put the motor() definition before loop().

PubSubClient & ArduinoJSON - Arduino passing char to digitalWrite

I am not sure how to pass the values in the function in order to be used within the digitalWrite functions.
I'm getting the following error:
error: cannot convert 'String' to 'uint8_t' {aka unsigned char}' for argument '1' to 'void digitalWrite(uint8_t, uint8_t)'
pubsub callback
void callback(char* topic, byte* payload, unsigned int length) {
Serial.print("New message on [");
Serial.print(topic);
Serial.print("] ");
Serial.println("");
char s[length];
for (int i = 0; i < length; i++) {
Serial.print((char)payload[i]);
s[i]=payload[i];
}
StaticJsonBuffer<500> jsonBuffer;
JsonObject& root = jsonBuffer.parseObject(s);
if (!root.success()) {
Serial.println("parseObject() failed");
}
String relay = root["relay"]; // "relayOne"
int time = root["timestamp"]; // 1351824120
String trigger = root["trigger"]; // "ON"
// Feel free to add more if statements to control more GPIOs with MQTT
commander(relay, trigger);
}
commander function
void commander(String relay, String trigger) {
if(trigger == "ON"){
Serial.print("Turning ");
Serial.println(relay);
Serial.println(" on");
digitalWrite(relay, HIGH);
} else if(trigger == "OFF"){
Serial.println(relay);
digitalWrite(relayOne, LOW);
Serial.print("TRIGGERED!");
} else {
// turn all the LEDs off:
for (int pin = 0; pin < relayPinCount; pin++) {
digitalWrite(relayPins[pin], LOW);
}
}
Serial.println();
}
void commander(String relay, String trigger) {
uint8_t pinNo;
if ( relay == "relayOne" ) {
pinNo = RELAY_1_PIN;
} else
if ( relay == "anotherRelay" ) {
pinNo = OTHER_RELAY_PIN;
} else
if ( ... ) {
...
} else {
return;
}
if(trigger == "ON"){
Serial.print("Turning ");
Serial.println(relay);
Serial.println(" on");
digitalWrite(pinNo, HIGH);
} else if(trigger == "OFF"){
Serial.println(relay);
digitalWrite(pinNo, LOW);
Serial.print("TRIGGERED!");
} else {
// turn all the LEDs off:
for (int pin = 0; pin < relayPinCount; pin++) {
digitalWrite(relayPins[pin], LOW);
}
}
Serial.println();
}

Record rtsp stream with ffmpeg in iOS

I've followed iFrameExtractor to successfully stream rtsp in my swift project. In this project, it also has recording function. It basically use avformat_write_header
, av_interleaved_write_frame and av_write_trailer to save the rtsp source into mp4 file.
When I used this project in my device, the rtsp streaming works fine, but recording function will always generate a blank mp4 file with no image and sound.
Could anyone tell me what step that I miss?
I'm using iPhone5 with iOS 9.1 and XCode 7.1.1.
The ffmpeg is 2.8.3 version and followed the compile instruction by CompilationGuide – FFmpeg
Following is the sample code in this project
The function that generate every frame:
-(BOOL)stepFrame {
// AVPacket packet;
int frameFinished=0;
static bool bFirstIFrame=false;
static int64_t vPTS=0, vDTS=0, vAudioPTS=0, vAudioDTS=0;
while(!frameFinished && av_read_frame(pFormatCtx, &packet)>=0) {
// Is this a packet from the video stream?
if(packet.stream_index==videoStream) {
// 20130525 albert.liao modified start
// Initialize a new format context for writing file
if(veVideoRecordState!=eH264RecIdle)
{
switch(veVideoRecordState)
{
case eH264RecInit:
{
if ( !pFormatCtx_Record )
{
int bFlag = 0;
//NSString *videoPath = [NSHomeDirectory() stringByAppendingPathComponent:#"Documents/test.mp4"];
NSString *videoPath = #"/Users/liaokuohsun/iFrameTest.mp4";
const char *file = [videoPath UTF8String];
pFormatCtx_Record = avformat_alloc_context();
bFlag = h264_file_create(file, pFormatCtx_Record, pCodecCtx, pAudioCodecCtx,/*fps*/0.0, packet.data, packet.size );
if(bFlag==true)
{
veVideoRecordState = eH264RecActive;
fprintf(stderr, "h264_file_create success\n");
}
else
{
veVideoRecordState = eH264RecIdle;
fprintf(stderr, "h264_file_create error\n");
}
}
}
//break;
case eH264RecActive:
{
if((bFirstIFrame==false) &&(packet.flags&AV_PKT_FLAG_KEY)==AV_PKT_FLAG_KEY)
{
bFirstIFrame=true;
vPTS = packet.pts ;
vDTS = packet.dts ;
#if 0
NSRunLoop *pRunLoop = [NSRunLoop currentRunLoop];
[pRunLoop addTimer:RecordingTimer forMode:NSDefaultRunLoopMode];
#else
[NSTimer scheduledTimerWithTimeInterval:5.0//2.0
target:self
selector:#selector(StopRecording:)
userInfo:nil
repeats:NO];
#endif
}
// Record audio when 1st i-Frame is obtained
if(bFirstIFrame==true)
{
if ( pFormatCtx_Record )
{
#if PTS_DTS_IS_CORRECT==1
packet.pts = packet.pts - vPTS;
packet.dts = packet.dts - vDTS;
#endif
h264_file_write_frame( pFormatCtx_Record, packet.stream_index, packet.data, packet.size, packet.dts, packet.pts);
}
else
{
NSLog(#"pFormatCtx_Record no exist");
}
}
}
break;
case eH264RecClose:
{
if ( pFormatCtx_Record )
{
h264_file_close(pFormatCtx_Record);
#if 0
// 20130607 Test
dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^(void)
{
ALAssetsLibrary *library = [[ALAssetsLibrary alloc]init];
NSString *filePathString = [NSHomeDirectory() stringByAppendingPathComponent:#"Documents/test.mp4"];
NSURL *filePathURL = [NSURL fileURLWithPath:filePathString isDirectory:NO];
if(1)// ([library videoAtPathIsCompatibleWithSavedPhotosAlbum:filePathURL])
{
[library writeVideoAtPathToSavedPhotosAlbum:filePathURL completionBlock:^(NSURL *assetURL, NSError *error){
if (error) {
// TODO: error handling
NSLog(#"writeVideoAtPathToSavedPhotosAlbum error");
} else {
// TODO: success handling
NSLog(#"writeVideoAtPathToSavedPhotosAlbum success");
}
}];
}
[library release];
});
#endif
vPTS = 0;
vDTS = 0;
vAudioPTS = 0;
vAudioDTS = 0;
pFormatCtx_Record = NULL;
NSLog(#"h264_file_close() is finished");
}
else
{
NSLog(#"fc no exist");
}
bFirstIFrame = false;
veVideoRecordState = eH264RecIdle;
}
break;
default:
if ( pFormatCtx_Record )
{
h264_file_close(pFormatCtx_Record);
pFormatCtx_Record = NULL;
}
NSLog(#"[ERROR] unexpected veVideoRecordState!!");
veVideoRecordState = eH264RecIdle;
break;
}
}
// Decode video frame
avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
}
else if(packet.stream_index==audioStream)
{
// 20131024 albert.liao modfied start
static int vPktCount=0;
BOOL bIsAACADTS = FALSE;
int ret = 0;
if(aPlayer.vAACType == eAAC_UNDEFINED)
{
tAACADTSHeaderInfo vxAACADTSHeaderInfo = {0};
bIsAACADTS = [AudioUtilities parseAACADTSHeader:(uint8_t *)packet.data ToHeader:&vxAACADTSHeaderInfo];
}
#synchronized(aPlayer)
{
if(aPlayer==nil)
{
aPlayer = [[AudioPlayer alloc]initAudio:nil withCodecCtx:(AVCodecContext *) pAudioCodecCtx];
NSLog(#"aPlayer initAudio");
if(bIsAACADTS)
{
aPlayer.vAACType = eAAC_ADTS;
//NSLog(#"is ADTS AAC");
}
}
else
{
if(vPktCount<5) // The voice is listened once image is rendered
{
vPktCount++;
}
else
{
if([aPlayer getStatus]!=eAudioRunning)
{
dispatch_async(dispatch_get_main_queue(), ^(void) {
#synchronized(aPlayer)
{
NSLog(#"aPlayer start play");
[aPlayer Play];
}
});
}
}
}
};
#synchronized(aPlayer)
{
int ret = 0;
ret = [aPlayer putAVPacket:&packet];
if(ret <= 0)
NSLog(#"Put Audio Packet Error!!");
}
// 20131024 albert.liao modfied end
if(bFirstIFrame==true)
{
switch(veVideoRecordState)
{
case eH264RecActive:
{
if ( pFormatCtx_Record )
{
h264_file_write_audio_frame(pFormatCtx_Record, pAudioCodecCtx, packet.stream_index, packet.data, packet.size, packet.dts, packet.pts);
}
else
{
NSLog(#"pFormatCtx_Record no exist");
}
}
}
}
}
else
{
//fprintf(stderr, "packet len=%d, Byte=%02X%02X%02X%02X%02X\n",\
packet.size, packet.data[0],packet.data[1],packet.data[2],packet.data[3], packet.data[4]);
}
// 20130525 albert.liao modified end
}
return frameFinished!=0;
}
avformat_write_header:
int h264_file_create(const char *pFilePath, AVFormatContext *fc, AVCodecContext *pCodecCtx,AVCodecContext *pAudioCodecCtx, double fps, void *p, int len )
{
int vRet=0;
AVOutputFormat *of=NULL;
AVStream *pst=NULL;
AVCodecContext *pcc=NULL, *pAudioOutputCodecContext=NULL;
avcodec_register_all();
av_register_all();
av_log_set_level(AV_LOG_VERBOSE);
if(!pFilePath)
{
fprintf(stderr, "FilePath no exist");
return -1;
}
if(!fc)
{
fprintf(stderr, "AVFormatContext no exist");
return -1;
}
fprintf(stderr, "file=%s\n",pFilePath);
// Create container
of = av_guess_format( 0, pFilePath, 0 );
fc->oformat = of;
strcpy( fc->filename, pFilePath );
// Add video stream
pst = avformat_new_stream( fc, 0 );
vVideoStreamIdx = pst->index;
fprintf(stderr,"Video Stream:%d",vVideoStreamIdx);
pcc = pst->codec;
avcodec_get_context_defaults3( pcc, AVMEDIA_TYPE_VIDEO );
// Save the stream as origin setting without convert
pcc->codec_type = pCodecCtx->codec_type;
pcc->codec_id = pCodecCtx->codec_id;
pcc->bit_rate = pCodecCtx->bit_rate;
pcc->width = pCodecCtx->width;
pcc->height = pCodecCtx->height;
if(fps==0)
{
double fps=0.0;
AVRational pTimeBase;
pTimeBase.num = pCodecCtx->time_base.num;
pTimeBase.den = pCodecCtx->time_base.den;
fps = 1.0/ av_q2d(pCodecCtx->time_base)/ FFMAX(pCodecCtx->ticks_per_frame, 1);
fprintf(stderr,"fps_method(tbc): 1/av_q2d()=%g",fps);
pcc->time_base.num = 1;
pcc->time_base.den = fps;
}
else
{
pcc->time_base.num = 1;
pcc->time_base.den = fps;
}
// reference ffmpeg\libavformat\utils.c
// For SPS and PPS in avcC container
pcc->extradata = malloc(sizeof(uint8_t)*pCodecCtx->extradata_size);
memcpy(pcc->extradata, pCodecCtx->extradata, pCodecCtx->extradata_size);
pcc->extradata_size = pCodecCtx->extradata_size;
// For Audio stream
if(pAudioCodecCtx)
{
AVCodec *pAudioCodec=NULL;
AVStream *pst2=NULL;
pAudioCodec = avcodec_find_encoder(AV_CODEC_ID_AAC);
// Add audio stream
pst2 = avformat_new_stream( fc, pAudioCodec );
vAudioStreamIdx = pst2->index;
pAudioOutputCodecContext = pst2->codec;
avcodec_get_context_defaults3( pAudioOutputCodecContext, pAudioCodec );
fprintf(stderr,"Audio Stream:%d",vAudioStreamIdx);
fprintf(stderr,"pAudioCodecCtx->bits_per_coded_sample=%d",pAudioCodecCtx->bits_per_coded_sample);
pAudioOutputCodecContext->codec_type = AVMEDIA_TYPE_AUDIO;
pAudioOutputCodecContext->codec_id = AV_CODEC_ID_AAC;
// Copy the codec attributes
pAudioOutputCodecContext->channels = pAudioCodecCtx->channels;
pAudioOutputCodecContext->channel_layout = pAudioCodecCtx->channel_layout;
pAudioOutputCodecContext->sample_rate = pAudioCodecCtx->sample_rate;
pAudioOutputCodecContext->bit_rate = 12000;//pAudioCodecCtx->sample_rate * pAudioCodecCtx->bits_per_coded_sample;
pAudioOutputCodecContext->bits_per_coded_sample = pAudioCodecCtx->bits_per_coded_sample;
pAudioOutputCodecContext->profile = pAudioCodecCtx->profile;
//FF_PROFILE_AAC_LOW;
// pAudioCodecCtx->bit_rate;
// AV_SAMPLE_FMT_U8P, AV_SAMPLE_FMT_S16P
//pAudioOutputCodecContext->sample_fmt = AV_SAMPLE_FMT_FLTP;//pAudioCodecCtx->sample_fmt;
pAudioOutputCodecContext->sample_fmt = pAudioCodecCtx->sample_fmt;
//pAudioOutputCodecContext->sample_fmt = AV_SAMPLE_FMT_U8;
pAudioOutputCodecContext->sample_aspect_ratio = pAudioCodecCtx->sample_aspect_ratio;
pAudioOutputCodecContext->time_base.num = pAudioCodecCtx->time_base.num;
pAudioOutputCodecContext->time_base.den = pAudioCodecCtx->time_base.den;
pAudioOutputCodecContext->ticks_per_frame = pAudioCodecCtx->ticks_per_frame;
pAudioOutputCodecContext->frame_size = 1024;
fprintf(stderr,"profile:%d, sample_rate:%d, channles:%d", pAudioOutputCodecContext->profile, pAudioOutputCodecContext->sample_rate, pAudioOutputCodecContext->channels);
AVDictionary *opts = NULL;
av_dict_set(&opts, "strict", "experimental", 0);
if (avcodec_open2(pAudioOutputCodecContext, pAudioCodec, &opts) < 0) {
fprintf(stderr, "\ncould not open codec\n");
}
av_dict_free(&opts);
#if 0
// For Audio, this part is no need
if(pAudioCodecCtx->extradata_size!=0)
{
NSLog(#"extradata_size !=0");
pAudioOutputCodecContext->extradata = malloc(sizeof(uint8_t)*pAudioCodecCtx->extradata_size);
memcpy(pAudioOutputCodecContext->extradata, pAudioCodecCtx->extradata, pAudioCodecCtx->extradata_size);
pAudioOutputCodecContext->extradata_size = pAudioCodecCtx->extradata_size;
}
else
{
// For WMA test only
pAudioOutputCodecContext->extradata_size = 0;
NSLog(#"extradata_size ==0");
}
#endif
}
if(fc->oformat->flags & AVFMT_GLOBALHEADER)
{
pcc->flags |= CODEC_FLAG_GLOBAL_HEADER;
pAudioOutputCodecContext->flags |= CODEC_FLAG_GLOBAL_HEADER;
}
if ( !( fc->oformat->flags & AVFMT_NOFILE ) )
{
vRet = avio_open( &fc->pb, fc->filename, AVIO_FLAG_WRITE );
if(vRet!=0)
{
fprintf(stderr,"avio_open(%s) error", fc->filename);
}
}
// dump format in console
av_dump_format(fc, 0, pFilePath, 1);
vRet = avformat_write_header( fc, NULL );
if(vRet==0)
return 1;
else
return 0;
}
av_interleaved_write_frame:
void h264_file_write_frame(AVFormatContext *fc, int vStreamIdx, const void* p, int len, int64_t dts, int64_t pts )
{
AVStream *pst = NULL;
AVPacket pkt;
if ( 0 > vVideoStreamIdx )
return;
// may be audio or video
pst = fc->streams[ vStreamIdx ];
// Init packet
av_init_packet( &pkt );
if(vStreamIdx ==vVideoStreamIdx)
{
pkt.flags |= ( 0 >= getVopType( p, len ) ) ? AV_PKT_FLAG_KEY : 0;
//pkt.flags |= AV_PKT_FLAG_KEY;
pkt.stream_index = pst->index;
pkt.data = (uint8_t*)p;
pkt.size = len;
pkt.dts = AV_NOPTS_VALUE;
pkt.pts = AV_NOPTS_VALUE;
// TODO: mark or unmark the log
//fprintf(stderr, "dts=%lld, pts=%lld\n",dts,pts);
// av_write_frame( fc, &pkt );
}
av_interleaved_write_frame( fc, &pkt );
}
av_write_trailer:
void h264_file_close(AVFormatContext *fc)
{
if ( !fc )
return;
av_write_trailer( fc );
if ( fc->oformat && !( fc->oformat->flags & AVFMT_NOFILE ) && fc->pb )
avio_close( fc->pb );
av_free( fc );
}
Thanks.
It looks like you're using the same AVFormatContext for both the input and output?
In the line
pst = fc->streams[ vStreamIdx ];
You're assigning the AVStream* from your AVFormatContext connected with your input (RTSP stream). But then later on you're trying to write the packet back to the same context av_interleaved_write_frame( fc, &pkt );. I kind of think of a context as a file which has helped me navagate this type of thing better. I do something identicial to what you're doing (not iOS though) where I use a separate AVFormatContext for each of the input (RTSP stream) and output (mp4 file). If I'm correct, I think what you just need to do is initialize an AVFormatContext and properly.
The following code (without error checking everything) is what I do to take an AVFormatContext * output_format_context = NULL and the AVFormatContext * input_format_context that I had associated with the RTSP stream and write from one to the other. This is after I have fetched a packet, etc., which in your case it looks like you're populating (I just take the packet from av_read_frame and re-package it.
This is code that could be in your write frame function (but it also does include the writing of the header).
AVFormatContext * output_format_context;
AVStream * in_stream_2;
AVStream * out_stream_2;
// Allocate the context with the output file
avformat_alloc_output_context2(&output_format_context, NULL, NULL, out_filename.c_str());
// Point to AVOutputFormat * output_format for manipulation
output_format = output_format_context->oformat;
// Loop through all streams
for (i = 0; i < input_format_context->nb_streams; i++) {
// Create a pointer to the input stream that was allocated earlier in the code
AVStream *in_stream = input_format_context->streams[i];
// Create a pointer to a new stream that will be part of the output
AVStream *out_stream = avformat_new_stream(output_format_context, in_stream->codec->codec);
// Set time_base of the new output stream to equal the input stream one since I'm not changing anything (can avoid but get a deprecation warning)
out_stream->time_base = in_stream->time_base;
// This is the non-deprecated way of copying all the parameters from the input stream into the output stream since everything stays the same
avcodec_parameters_from_context(out_stream->codecpar, in_stream->codec);
// I don't remember what this is for :)
out_stream->codec->codec_tag = 0;
// This just sets a flag from the format context to the stream relating to the header
if (output_format_context->oformat->flags & AVFMT_GLOBALHEADER)
out_stream->codec->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
// Check NOFILE flag and open the output file context (previously the output file was associated with the format context, now it is actually opened.
if (!(output_format->flags & AVFMT_NOFILE))
avio_open(&output_format_context->pb, out_filename.c_str(), AVIO_FLAG_WRITE);
// Write the header (not sure if this is always needed but h264 I believe it is.
avformat_write_header(output_format_context,NULL);
// Re-getting the appropriate stream that was populated above (this should allow for both audio/video)
in_stream_2 = input_format_context->streams[packet.stream_index];
out_stream_2 = output_format_context->streams[packet.stream_index];
// Rescaling pts and dts, duration and pos - you would do as you need in your code.
packet.pts = av_rescale_q_rnd(packet.pts, in_stream_2->time_base, out_stream_2->time_base, (AVRounding) (AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
packet.dts = av_rescale_q_rnd(packet.dts, in_stream_2->time_base, out_stream_2->time_base, (AVRounding) (AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
packet.duration = av_rescale_q(packet.duration, in_stream_2->time_base, out_stream_2->time_base);
packet.pos = -1;
// The first packet of my stream always gives me negative dts/pts so this just protects that first one for my purposes. You probably don't need.
if (packet.dts < 0) packet.dts = 0;
if (packet.pts < 0) packet.pts = 0;
// Finally write the frame
av_interleaved_write_frame(output_format_context, &packet);
// ....
// Write header, close/cleanup... etc
// ....
This code is fairly bare bones and doesn't include the setup (which it sounds like you're doing correctly anyway). I would also imagine this code could be cleaned up and tweaked for your purposes, but this works for me to re-write the RTSP stream into a file (in my case many files but code not shown).
The code is C code, so you might need to do minor tweaks for making it Swift compatible (for some of the library function calls maybe). I think overall it should be compatible though.
Hopefully this helps point you to the right direction. This was cobbled together thanks to several sample code sources (I don't remember where), along with warning prompts from the libraries themselves.

mupdf render jpeg2000 lose color?

I am working on an android project, which use vudroid, which in turn use mupdf version 0.5.
Vudroid remove the original openjpeg support of mupdf, I have ported the mupdf version 1.5's openjpeg support.
But I encounter a new problem, color information in jpx image gone, the desired effect:
my effect:
the ported load-jpx code:
#include "fitz.h"
#include "mupdf.h"
/* Without the definition of OPJ_STATIC, compilation fails on windows
* due to the use of __stdcall. We believe it is required on some
* linux toolchains too. */
#define OPJ_STATIC
#ifndef _MSC_VER
#define OPJ_HAVE_STDINT_H
#endif
#include <openjpeg.h>
static void fz_opj_error_callback(const char *msg, void *client_data)
{
//fz_context *ctx = (fz_context *)client_data;
//fz_warn(ctx, "openjpeg error: %s", msg);
}
static void fz_opj_warning_callback(const char *msg, void *client_data)
{
//fz_context *ctx = (fz_context *)client_data;
//fz_warn(ctx, "openjpeg warning: %s", msg);
}
static void fz_opj_info_callback(const char *msg, void *client_data)
{
/* fz_warn("openjpeg info: %s", msg); */
}
typedef struct stream_block_s
{
unsigned char *data;
int size;
int pos;
} stream_block;
static OPJ_SIZE_T fz_opj_stream_read(void * p_buffer, OPJ_SIZE_T p_nb_bytes, void * p_user_data)
{
stream_block *sb = (stream_block *)p_user_data;
int len;
len = sb->size - sb->pos;
if (len < 0)
len = 0;
if (len == 0)
return (OPJ_SIZE_T)-1; /* End of file! */
if ((OPJ_SIZE_T)len > p_nb_bytes)
len = p_nb_bytes;
memcpy(p_buffer, sb->data + sb->pos, len);
sb->pos += len;
return len;
}
static OPJ_OFF_T fz_opj_stream_skip(OPJ_OFF_T skip, void * p_user_data)
{
stream_block *sb = (stream_block *)p_user_data;
if (skip > sb->size - sb->pos)
skip = sb->size - sb->pos;
sb->pos += skip;
return sb->pos;
}
static OPJ_BOOL fz_opj_stream_seek(OPJ_OFF_T seek_pos, void * p_user_data)
{
stream_block *sb = (stream_block *)p_user_data;
if (seek_pos > sb->size)
return OPJ_FALSE;
sb->pos = seek_pos;
return OPJ_TRUE;
}
fz_error
fz_load_jpx(pdf_image* img, unsigned char *data, int size, fz_colorspace *defcs, int indexed)
{
//fz_pixmap *img;
opj_dparameters_t params;
opj_codec_t *codec;
opj_image_t *jpx;
opj_stream_t *stream;
fz_colorspace *colorspace;
unsigned char *p;
OPJ_CODEC_FORMAT format;
int a, n, w, h, depth, sgnd;
int x, y, k, v;
stream_block sb;
if (size < 2)
fz_throw("not enough data to determine image format");
/* Check for SOC marker -- if found we have a bare J2K stream */
if (data[0] == 0xFF && data[1] == 0x4F)
format = OPJ_CODEC_J2K;
else
format = OPJ_CODEC_JP2;
opj_set_default_decoder_parameters(&params);
if (indexed)
params.flags |= OPJ_DPARAMETERS_IGNORE_PCLR_CMAP_CDEF_FLAG;
codec = opj_create_decompress(format);
opj_set_info_handler(codec, fz_opj_info_callback, 0);
opj_set_warning_handler(codec, fz_opj_warning_callback, 0);
opj_set_error_handler(codec, fz_opj_error_callback, 0);
if (!opj_setup_decoder(codec, &params))
{
fz_throw("j2k decode failed");
}
stream = opj_stream_default_create(OPJ_TRUE);
sb.data = data;
sb.pos = 0;
sb.size = size;
opj_stream_set_read_function(stream, fz_opj_stream_read);
opj_stream_set_skip_function(stream, fz_opj_stream_skip);
opj_stream_set_seek_function(stream, fz_opj_stream_seek);
opj_stream_set_user_data(stream, &sb);
/* Set the length to avoid an assert */
opj_stream_set_user_data_length(stream, size);
if (!opj_read_header(stream, codec, &jpx))
{
opj_stream_destroy(stream);
opj_destroy_codec(codec);
fz_throw("Failed to read JPX header");
}
if (!opj_decode(codec, stream, jpx))
{
opj_stream_destroy(stream);
opj_destroy_codec(codec);
opj_image_destroy(jpx);
fz_throw("Failed to decode JPX image");
}
opj_stream_destroy(stream);
opj_destroy_codec(codec);
/* jpx should never be NULL here, but check anyway */
if (!jpx)
fz_throw("opj_decode failed");
pdf_logimage("opj_decode succeeded");
for (k = 1; k < (int)jpx->numcomps; k++)
{
if (!jpx->comps[k].data)
{
opj_image_destroy(jpx);
fz_throw("image components are missing data");
}
if (jpx->comps[k].w != jpx->comps[0].w)
{
opj_image_destroy(jpx);
fz_throw("image components have different width");
}
if (jpx->comps[k].h != jpx->comps[0].h)
{
opj_image_destroy(jpx);
fz_throw("image components have different height");
}
if (jpx->comps[k].prec != jpx->comps[0].prec)
{
opj_image_destroy(jpx);
fz_throw("image components have different precision");
}
}
n = jpx->numcomps;
w = jpx->comps[0].w;
h = jpx->comps[0].h;
depth = jpx->comps[0].prec;
sgnd = jpx->comps[0].sgnd;
if (jpx->color_space == OPJ_CLRSPC_SRGB && n == 4) { n = 3; a = 1; }
else if (jpx->color_space == OPJ_CLRSPC_SYCC && n == 4) { n = 3; a = 1; }
else if (n == 2) { n = 1; a = 1; }
else if (n > 4) { n = 4; a = 1; }
else { a = 0; }
if (defcs)
{
if (defcs->n == n)
{
colorspace = defcs;
}
else
{
fz_warn("jpx file and dict colorspaces do not match");
defcs = NULL;
}
}
if (!defcs)
{
switch (n)
{
case 1: colorspace = pdf_devicegray; break;
case 3: colorspace = pdf_devicergb; break;
case 4: colorspace = pdf_devicecmyk; break;
}
}
//error = fz_new_pixmap(&img, colorspace, w, h);
//if (error)
// return error;
pdf_logimage("colorspace handled\n");
int bpc = 1;
if (colorspace) {
bpc = 1 + colorspace->n;
};
pdf_logimage("w = %d, bpc = %d, h = %d\n", w, bpc, h);
img->samples = fz_newbuffer(w * bpc * h);
//opj_image_destroy(jpx);
//fz_throw("out of memory loading jpx");
p = (char*)img->samples->bp;
pdf_logimage("start to deal with samples");
for (y = 0; y < h; y++)
{
for (x = 0; x < w; x++)
{
for (k = 0; k < n + a; k++)
{
v = jpx->comps[k].data[y * w + x];
if (sgnd)
v = v + (1 << (depth - 1));
if (depth > 8)
v = v >> (depth - 8);
*p++ = v;
}
if (!a)
*p++ = 255;
}
}
img->samples->wp = p;
pdf_logimage("start to deal with samples succeeded");
opj_image_destroy(jpx);
// if (a)
// {
// if (n == 4)
// {
// fz_pixmap *tmp = fz_new_pixmap(ctx, fz_device_rgb(ctx), w, h);
// fz_convert_pixmap(ctx, tmp, img);
// fz_drop_pixmap(ctx, img);
// img = tmp;
// }
// fz_premultiply_pixmap(ctx, img);
// }
return fz_okay;
}
The render code:
JNIEXPORT jbyteArray JNICALL Java_org_vudroid_pdfdroid_codec_PdfPage_drawPage
(JNIEnv *env, jclass clazz, jlong dochandle, jlong pagehandle)
{
renderdocument_t *doc = (renderdocument_t*) dochandle;
renderpage_t *page = (renderpage_t*) pagehandle;
//DEBUG("PdfView(%p).drawpage(%p, %p)", this, doc, page);
fz_error error;
fz_matrix ctm;
fz_irect viewbox;
fz_pixmap *pixmap;
jfloat *matrix;
jint *viewboxarr;
jint *dimen;
jint *buffer;
int length, val;
pixmap = nil;
/* initialize parameter arrays for MuPDF */
ctm.a = 1;
ctm.b = 0;
ctm.c = 0;
ctm.d = 1;
ctm.e = 0;
ctm.f = 0;
// matrix = (*env)->GetPrimitiveArrayCritical(env, matrixarray, 0);
// ctm.a = matrix[0];
// ctm.b = matrix[1];
// ctm.c = matrix[2];
// ctm.d = matrix[3];
// ctm.e = matrix[4];
// ctm.f = matrix[5];
// (*env)->ReleasePrimitiveArrayCritical(env, matrixarray, matrix, 0);
// DEBUG("Matrix: %f %f %f %f %f %f",
// ctm.a, ctm.b, ctm.c, ctm.d, ctm.e, ctm.f);
// viewboxarr = (*env)->GetPrimitiveArrayCritical(env, viewboxarray, 0);
// viewbox.x0 = viewboxarr[0];
// viewbox.y0 = viewboxarr[1];
// viewbox.x1 = viewboxarr[2];
// viewbox.y1 = viewboxarr[3];
// (*env)->ReleasePrimitiveArrayCritical(env, viewboxarray, viewboxarr, 0);
// DEBUG("Viewbox: %d %d %d %d",
// viewbox.x0, viewbox.y0, viewbox.x1, viewbox.y1);
viewbox.x0 = 0;
viewbox.y0 = 0;
viewbox.x1 = 595;
viewbox.y1 = 841;
/* do the rendering */
DEBUG("doing the rendering...");
//buffer = (*env)->GetPrimitiveArrayCritical(env, bufferarray, 0);
// do the actual rendering:
error = fz_rendertree(&pixmap, doc->rast, page->page->tree,
ctm, viewbox, 1);
/* evil magic: we transform the rendered image's byte order
*/
int x, y;
if (bmpdata)
fz_free(bmpdata);
bmpstride = ((pixmap->w * 3 + 3) / 4) * 4;
bmpdata = fz_malloc(pixmap->h * bmpstride);
DEBUG("inside drawpage, bmpstride = %d, pixmap->w = %d, pixmap->h = %d\n", bmpstride, pixmap->w, pixmap->h);
if (!bmpdata)
return;
for (y = 0; y < pixmap->h; y++)
{
unsigned char *p = bmpdata + y * bmpstride;
unsigned char *s = pixmap->samples + y * pixmap->w * 4;
for (x = 0; x < pixmap->w; x++)
{
p[x * 3 + 0] = s[x * 4 + 3];
p[x * 3 + 1] = s[x * 4 + 2];
p[x * 3 + 2] = s[x * 4 + 1];
}
}
FILE* fp = fopen("/sdcard/drawpage", "wb");
fwrite(bmpdata, pixmap->h * bmpstride, 1, fp);
fclose(fp);
jbyteArray array = (*env)->NewByteArray(env, pixmap->h * bmpstride);
(*env)->SetByteArrayRegion(env, array, 0, pixmap->h * bmpstride, bmpdata);
// if(!error) {
// DEBUG("Converting image buffer pixel order");
// length = pixmap->w * pixmap->h;
// unsigned int *col = pixmap->samples;
// int c = 0;
// for(val = 0; val < length; val++) {
// col[val] = ((col[val] & 0xFF000000) >> 24) |
// ((col[val] & 0x00FF0000) >> 8) |
// ((col[val] & 0x0000FF00) << 8);
// }
// winconvert(pixmap);
// }
// (*env)->ReleasePrimitiveArrayCritical(env, bufferarray, buffer, 0);
fz_free(pixmap);
if (error) {
DEBUG("error!");
throw_exception(env, "error rendering page");
}
DEBUG("PdfView.drawPage() done");
return array;
}
I have compare the jpx output samples to the mupdf-1.5 windows, it is the same, but the colorspace of original jpx have gone.
Could help me to get the colorspace back?
It seems you are trying to use an old version of MuPDF with some bits pulled in from a more recent version. TO be honest that's hardly likely to work. I would also guess that its not the OpenJPEG library causing your problem, since the image appears, but converted to grayscale.
Have you tried opening the file in the current version of MuPDF ? Does it work ?
If so then it seems to me your correct approach should be to use the current code, not try and bolt pieces onto an older version.

How to read Audio queue service bufer by byte?

I am recording sound from mic input using Audio queue service.
-(void)startRecording{
[self setupAudioFormat:&recordState.dataFormat];
recordState.currentPacket = 0;
OSStatus status;
status = AudioQueueNewInput(&recordState.dataFormat,
AudioInputCallback,
&recordState,
CFRunLoopGetCurrent(),
kCFRunLoopCommonModes,
0,
&recordState.queue);
if (status == 0)
{
// Prime recording buffers with empty data
for (int i = 0; i < NUM_BUFFERS; i++)
{
NSLog(#"buf in");
AudioQueueAllocateBuffer(recordState.queue, 16000, &recordState.buffers[i]);
AudioQueueEnqueueBuffer (recordState.queue, recordState.buffers[i], 0, NULL);
}
status = AudioFileCreateWithURL(fileURL,
kAudioFileAIFFType,
&recordState.dataFormat,
kAudioFileFlags_EraseFile,
&recordState.audioFile);
if (status == 0)
{
recordState.recording = true;
status = AudioQueueStart(recordState.queue, NULL);
if (status == 0)
{
NSLog(#"Recording");
}
}
}
if (status != 0)
{
//[self stopRecording];
NSLog(#"recording failed");
}
}
on callback:
void AudioInputCallback(void * inUserData,
AudioQueueRef inAQ,
AudioQueueBufferRef inBuffer,
const AudioTimeStamp * inStartTime,
UInt32 inNumberPacketDescriptions,
const AudioStreamPacketDescription * inPacketDescs)
{
RecordState * recordState = (RecordState*)inUserData;
if (!recordState->recording)
{
printf("Not recording, returning\n");
}
// if (inNumberPacketDescriptions == 0 && recordState->dataFormat.mBytesPerPacket != 0)
// {
// inNumberPacketDescriptions = inBuffer->mAudioDataByteSize / recordState->dataFormat.mBytesPerPacket;
// }
/*
int sampleCount = recordState->buffers[0]->mAudioDataBytesCapacity / sizeof (AUDIO_DATA_TYPE_FORMAT);
NSLog(#"sample count = %i",sampleCount);
AUDIO_DATA_TYPE_FORMAT *p = (AUDIO_DATA_TYPE_FORMAT*)recordState->buffers[0]->mAudioData;
for (int i = 0; i < sampleCount; i++) {
if (p[i] > 1000) {
NSLog(#"%hd",p[i]);
}
}*/
printf("Writing buffer %lld\n", recordState->currentPacket);
OSStatus status = AudioFileWritePackets(recordState->audioFile,
false,
inBuffer->mAudioDataByteSize,
inPacketDescs,
recordState->currentPacket,
&inNumberPacketDescriptions,
inBuffer->mAudioData);
if (status == 0)
{
recordState->buffers[0] = nil;
recordState->currentPacket += inNumberPacketDescriptions;
}
AudioQueueEnqueueBuffer(recordState->queue, inBuffer, 0, NULL);
}
Here i want to read recorded buffer. is it possible to get something like this:
short[] buffer = ?;//here should an audio buffer converted to some structure (short[] just for example)
then i would like to read every element of this structure:
for (int i = 0; i < sizeOfBuffer; i++) {
bufferVal = buffer[i];
}
In short how to handle buffer when recording ?
Thanks.