I am seeking an example of using Audio Queue Services.
I would like to create a sound using a mathematical equation and then hear it.
Here's my code for generating sound from a function. I'm assuming you know how to use AudioQueue services, set up an AudioSession, and properly start and stop an audio output queue.
Here's a snippet for setting up and starting an output AudioQueue:
// Get the preferred sample rate (8,000 Hz on iPhone, 44,100 Hz on iPod touch)
size = sizeof(sampleRate);
err = AudioSessionGetProperty (kAudioSessionProperty_CurrentHardwareSampleRate, &size, &sampleRate);
if (err != noErr) NSLog(#"AudioSessionGetProperty(kAudioSessionProperty_CurrentHardwareSampleRate) error: %d", err);
//NSLog (#"Current hardware sample rate: %1.0f", sampleRate);
BOOL isHighSampleRate = (sampleRate > 16000);
int bufferByteSize;
AudioQueueBufferRef buffer;
// Set up stream format fields
AudioStreamBasicDescription streamFormat;
streamFormat.mSampleRate = sampleRate;
streamFormat.mFormatID = kAudioFormatLinearPCM;
streamFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
streamFormat.mBitsPerChannel = 16;
streamFormat.mChannelsPerFrame = 1;
streamFormat.mBytesPerPacket = 2 * streamFormat.mChannelsPerFrame;
streamFormat.mBytesPerFrame = 2 * streamFormat.mChannelsPerFrame;
streamFormat.mFramesPerPacket = 1;
streamFormat.mReserved = 0;
// New output queue ---- PLAYBACK ----
if (isPlaying == NO) {
err = AudioQueueNewOutput (&streamFormat, AudioEngineOutputBufferCallback, self, nil, nil, 0, &outputQueue);
if (err != noErr) NSLog(#"AudioQueueNewOutput() error: %d", err);
// Enqueue buffers
//outputFrequency = 0.0;
outputBuffersToRewrite = 3;
bufferByteSize = (sampleRate > 16000)? 2176 : 512; // 40.5 Hz : 31.25 Hz
for (i=0; i<3; i++) {
err = AudioQueueAllocateBuffer (outputQueue, bufferByteSize, &buffer);
if (err == noErr) {
[self generateTone: buffer];
err = AudioQueueEnqueueBuffer (outputQueue, buffer, 0, nil);
if (err != noErr) NSLog(#"AudioQueueEnqueueBuffer() error: %d", err);
} else {
NSLog(#"AudioQueueAllocateBuffer() error: %d", err);
return;
}
}
// Start playback
isPlaying = YES;
err = AudioQueueStart(outputQueue, nil);
if (err != noErr) { NSLog(#"AudioQueueStart() error: %d", err); isPlaying= NO; return; }
} else {
NSLog (#"Error: audio is already playing back.");
}
Here's the part that generates the tone:
// AudioQueue output queue callback.
void AudioEngineOutputBufferCallback (void *inUserData, AudioQueueRef inAQ, AudioQueueBufferRef inBuffer) {
AudioEngine *engine = (AudioEngine*) inUserData;
[engine processOutputBuffer:inBuffer queue:inAQ];
}
- (void) processOutputBuffer: (AudioQueueBufferRef) buffer queue:(AudioQueueRef) queue {
OSStatus err;
if (isPlaying == YES) {
[outputLock lock];
if (outputBuffersToRewrite > 0) {
outputBuffersToRewrite--;
[self generateTone:buffer];
}
err = AudioQueueEnqueueBuffer(queue, buffer, 0, NULL);
if (err == 560030580) { // Queue is not active due to Music being started or other reasons
isPlaying = NO;
} else if (err != noErr) {
NSLog(#"AudioQueueEnqueueBuffer() error %d", err);
}
[outputLock unlock];
} else {
err = AudioQueueStop (queue, NO);
if (err != noErr) NSLog(#"AudioQueueStop() error: %d", err);
}
}
-(void) generateTone: (AudioQueueBufferRef) buffer {
if (outputFrequency == 0.0) {
memset(buffer->mAudioData, 0, buffer->mAudioDataBytesCapacity);
buffer->mAudioDataByteSize = buffer->mAudioDataBytesCapacity;
} else {
// Make the buffer length a multiple of the wavelength for the output frequency.
int sampleCount = buffer->mAudioDataBytesCapacity / sizeof (SInt16);
double bufferLength = sampleCount;
double wavelength = sampleRate / outputFrequency;
double repetitions = floor (bufferLength / wavelength);
if (repetitions > 0.0) {
sampleCount = round (wavelength * repetitions);
}
double x, y;
double sd = 1.0 / sampleRate;
double amp = 0.9;
double max16bit = SHRT_MAX;
int i;
SInt16 *p = buffer->mAudioData;
for (i = 0; i < sampleCount; i++) {
x = i * sd * outputFrequency;
switch (outputWaveform) {
case kSine:
y = sin (x * 2.0 * M_PI);
break;
case kTriangle:
x = fmod (x, 1.0);
if (x < 0.25)
y = x * 4.0; // up 0.0 to 1.0
else if (x < 0.75)
y = (1.0 - x) * 4.0 - 2.0; // down 1.0 to -1.0
else
y = (x - 1.0) * 4.0; // up -1.0 to 0.0
break;
case kSawtooth:
y = 0.8 - fmod (x, 1.0) * 1.8;
break;
case kSquare:
y = (fmod(x, 1.0) < 0.5)? 0.7: -0.7;
break;
default: y = 0; break;
}
p[i] = y * max16bit * amp;
}
buffer->mAudioDataByteSize = sampleCount * sizeof (SInt16);
}
}
Something to watch out for is that your callback will be called on a non-main thread, so you have to practice thread safety with locks, mutexs, or other techniques.
This is a version using C# of the same sample from #lucius
void SetupAudio ()
{
AudioSession.Initialize ();
AudioSession.Category = AudioSessionCategory.MediaPlayback;
sampleRate = AudioSession.CurrentHardwareSampleRate;
var format = new AudioStreamBasicDescription () {
SampleRate = sampleRate,
Format = AudioFormatType.LinearPCM,
FormatFlags = AudioFormatFlags.LinearPCMIsSignedInteger | AudioFormatFlags.LinearPCMIsPacked,
BitsPerChannel = 16,
ChannelsPerFrame = 1,
BytesPerFrame = 2,
BytesPerPacket = 2,
FramesPerPacket = 1,
};
var queue = new OutputAudioQueue (format);
var bufferByteSize = (sampleRate > 16000)? 2176 : 512; // 40.5 Hz : 31.25 Hz
var buffers = new AudioQueueBuffer* [numBuffers];
for (int i = 0; i < numBuffers; i++){
queue.AllocateBuffer (bufferByteSize, out buffers [i]);
GenerateTone (buffers [i]);
queue.EnqueueBuffer (buffers [i], null);
}
queue.OutputCompleted += (object sender, OutputCompletedEventArgs e) => {
queue.EnqueueBuffer (e.UnsafeBuffer, null);
};
queue.Start ();
return true;
}
This is the tone generator:
void GenerateTone (AudioQueueBuffer *buffer)
{
// Make the buffer length a multiple of the wavelength for the output frequency.
uint sampleCount = buffer->AudioDataBytesCapacity / 2;
double bufferLength = sampleCount;
double wavelength = sampleRate / outputFrequency;
double repetitions = Math.Floor (bufferLength / wavelength);
if (repetitions > 0)
sampleCount = (uint)Math.Round (wavelength * repetitions);
double x, y;
double sd = 1.0 / sampleRate;
double amp = 0.9;
double max16bit = Int16.MaxValue;
int i;
short *p = (short *) buffer->AudioData;
for (i = 0; i < sampleCount; i++) {
x = i * sd * outputFrequency;
switch (outputWaveForm) {
case WaveForm.Sine:
y = Math.Sin (x * 2.0 * Math.PI);
break;
case WaveForm.Triangle:
x = x % 1.0;
if (x < 0.25)
y = x * 4.0; // up 0.0 to 1.0
else if (x < 0.75)
y = (1.0 - x) * 4.0 - 2.0; // down 1.0 to -1.0
else
y = (x - 1.0) * 4.0; // up -1.0 to 0.0
break;
case WaveForm.Sawtooth:
y = 0.8 - (x % 1.0) * 1.8;
break;
case WaveForm.Square:
y = ((x % 1.0) < 0.5)? 0.7: -0.7;
break;
default: y = 0; break;
}
p[i] = (short)(y * max16bit * amp);
}
buffer->AudioDataByteSize = sampleCount * 2;
}
}
You also want these definitions:
enum WaveForm {
Sine, Triangle, Sawtooth, Square
}
WaveForm outputWaveForm;
const float outputFrequency = 220;
High level: use AVAudioPlayer https://github.com/hollance/AVBufferPlayer
Med level: audio queues trailsinthesand.com/exploring-iphone-audio-part-1/ gets you going nicely. NOTE: I removed the http so the old link could be there, but it does direct to a bad site, so it apparently has changed.
Low level: alternatively, you can drop down a level and do it with audio units: http://cocoawithlove.com/2010/10/ios-tone-generator-introduction-to.html
Related
I have written a demo base on apple official document in swift. And I found the usage of CPU is lower in Objective-c than in swift.
Does it mean Objective-c is much effective than swift while handling metal app?
I'm confused because many people says that swift is faster than Objective-c in general. Or it is just an exception?
The demo involved pointer management. I know it is trouble handle pointer with swift. Maybe it is the reason why the app is cost much resource in swift? I am still finding.
The demo below is a triple buffer model which renders hundreds of small quads, updates their positions at the start of each frame and writes them into a vertex buffer. And uses semaphores to wait for full frame completions in case the CPU is running too far ahead of the GPU.
This is the part of code from apple official document
- (void)updateState
{
AAPLVertex *currentSpriteVertices = _vertexBuffers[_currentBuffer].contents;
NSUInteger currentVertex = _totalSpriteVertexCount-1;
NSUInteger spriteIdx = (_rowsOfSprites * _spritesPerRow)-1;
for(NSInteger row = _rowsOfSprites - 1; row >= 0; row--)
{
float startY = _sprites[spriteIdx].position.y;
for(NSInteger spriteInRow = _spritesPerRow-1; spriteInRow >= 0; spriteInRow--)
{
vector_float2 updatedPosition = _sprites[spriteIdx].position;
if(spriteInRow == 0)
{
updatedPosition.y = startY;
}
else
{
updatedPosition.y = _sprites[spriteIdx-1].position.y;
}
_sprites[spriteIdx].position = updatedPosition;
for(NSInteger vertexOfSprite = AAPLSprite.vertexCount-1; vertexOfSprite >= 0 ; vertexOfSprite--)
{
currentSpriteVertices[currentVertex].position = AAPLSprite.vertices[vertexOfSprite].position + _sprites[spriteIdx].position;
currentSpriteVertices[currentVertex].color = _sprites[spriteIdx].color;
currentVertex--;
}
spriteIdx--;
}
}
}
- (void)drawInMTKView:(nonnull MTKView *)view
{
dispatch_semaphore_wait(_inFlightSemaphore, DISPATCH_TIME_FOREVER);
_currentBuffer = (_currentBuffer + 1) % MaxBuffersInFlight;
[self updateState];
id<MTLCommandBuffer> commandBuffer = [_commandQueue commandBuffer];
commandBuffer.label = #"MyCommand";
__block dispatch_semaphore_t block_sema = _inFlightSemaphore;
[commandBuffer addCompletedHandler:^(id<MTLCommandBuffer> buffer)
{
dispatch_semaphore_signal(block_sema);
}];
MTLRenderPassDescriptor *renderPassDescriptor = view.currentRenderPassDescriptor;
if(renderPassDescriptor != nil)
{
id<MTLRenderCommandEncoder> renderEncoder =
[commandBuffer renderCommandEncoderWithDescriptor:renderPassDescriptor];
renderEncoder.label = #"MyRenderEncoder";
[renderEncoder setCullMode:MTLCullModeBack];
[renderEncoder setRenderPipelineState:_pipelineState];
[renderEncoder setVertexBuffer:_vertexBuffers[_currentBuffer]
offset:0
atIndex:AAPLVertexInputIndexVertices];
[renderEncoder setVertexBytes:&_viewportSize
length:sizeof(_viewportSize)
atIndex:AAPLVertexInputIndexViewportSize];
[renderEncoder drawPrimitives:MTLPrimitiveTypeTriangle
vertexStart:0
vertexCount:_totalSpriteVertexCount];
[renderEncoder endEncoding];
[commandBuffer presentDrawable:view.currentDrawable];
}
[commandBuffer commit];
}
and this is in swift
func updateSprite() {
let currentSpriteVertices = vertexBuffer[currentBuffer].contents().bindMemory(to: DFVertex.self, capacity: totalSpriteVertexCount * MemoryLayout<DFVertex>.size)
var currentVertex = totalSpriteVertexCount - 1
var spriteIdx = (rowsOfSprites * spritePerRow) - 1
for _ in stride(from: rowsOfSprites - 1, through: 0, by: -1) {
let startY = sprites[spriteIdx].position.y
for spriteInRow in stride(from: spritePerRow - 1, through: 0, by: -1) {
var updatePosition = sprites[spriteIdx].position
if spriteInRow == 0 {
updatePosition.y = startY
} else {
updatePosition.y = sprites[spriteIdx - 1].position.y
}
sprites[spriteIdx].position = updatePosition
for vertexOfSprite in stride(from: DFSprite.vertexCount - 1, through: 0, by: -1) {
currentSpriteVertices[currentVertex].position = DFSprite.vertices[vertexOfSprite].position + sprites[spriteIdx].position
currentSpriteVertices[currentVertex].color = sprites[spriteIdx].color
currentVertex -= 1
}
spriteIdx -= 1
}
}
}
func draw(in view: MTKView) {
inFlightSemaphore.wait()
currentBuffer = (currentBuffer + 1) % maxBufferInFlight
updateSprite()
let commandBuffer = commandQueue.makeCommandBuffer()
if commandBuffer == nil {
print("create command buffer failed.")
}
commandBuffer!.label = "command buffer"
commandBuffer!.addCompletedHandler { (buffer) in
self.inFlightSemaphore.signal()
}
if let renderPassDescriptor = view.currentRenderPassDescriptor,
let renderEncoder = commandBuffer!.makeRenderCommandEncoder(descriptor: renderPassDescriptor) {
renderEncoder.setCullMode(.back)
renderEncoder.setRenderPipelineState(pipelineState!)
renderEncoder.setVertexBuffer(vertexBuffer[currentBuffer],
offset: 0,
index: DFVertexInputIndex.vertex.rawValue)
renderEncoder.setVertexBytes(&viewportSize,
length: MemoryLayout.size(ofValue: viewportSize),
index: DFVertexInputIndex.viewportSize.rawValue)
renderEncoder.drawPrimitives(type: .triangle,
vertexStart: 0,
vertexCount: totalSpriteVertexCount)
renderEncoder.endEncoding()
commandBuffer!.present(view.currentDrawable!)
}
commandBuffer!.commit()
}
the result is that the app written in objective-c cost about 40% cpu usage while about 100% in swift. I thought swift would be faster.
I have to encode .wav file and write it into same file,or other file using
ffmpeg library,here is my code for encoding
-(void)audioencode:(const char *)fileName
{
AVFrame *frame;
AVPacket pkt;
int i, j, k, ret, got_output;
int buffer_size;
FILE *f;
uint16_t *samples;
const char *format_name = "wav",
const char *file_url = "/Users/xxxx/Downloads/simple-drum-beat.wav";
avcodec_register_all();
av_register_all();
AVOutputFormat *format = NULL;
for (AVOutputFormat *formatIter = av_oformat_next(NULL); formatIter != NULL; formatIter = av_oformat_next(formatIter)
{
int hasEncoder = NULL != avcodec_find_encoder(formatIter->audio_codec);
if (0 == strcmp(format_name, formatIter->name)) {
format = formatIter;
break;
}
}
AVCodec *codec = avcodec_find_encoder(format->audio_codec);
NSLog(#"tet test tststs");
AVCodecContext *c;
c = avcodec_alloc_context3(codec);
if (!c) {
fprintf(stderr, "Could not allocate audio codec context\n");
exit(1);
}
c->sample_fmt = AV_SAMPLE_FMT_S16;
if (!check_sample_fmt(codec, c->sample_fmt)) {
fprintf(stderr, "Encoder does not support sample format %s",
av_get_sample_fmt_name(c->sample_fmt));
exit(1);
}
c->bit_rate = 64000;//705600;
c->sample_rate = select_sample_rate(codec);
c->channel_layout = select_channel_layout(codec);
c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
c->frame_size = av_get_audio_frame_duration(c, 16);
int bits_per_sample = av_get_bits_per_sample(c->codec_id);
int frameSize = av_get_audio_frame_duration(c,16);
/* open it */
if (avcodec_open2(c, codec, NULL) < 0) {
fprintf(stderr, "Could not open codec\n");
exit(1);
}
f = fopen(fileName, "wb");
if (!f) {
fprintf(stderr, "Could not open %s\n", fileName);
exit(1);
}
/* frame containing input raw audio */
frame = av_frame_alloc();
if (!frame) {
fprintf(stderr, "Could not allocate audio frame\n");
exit(1);
}
frame->nb_samples = frameSize/*c->frame_size*/;
frame->format = c->sample_fmt;
frame->channel_layout = c->channel_layout;
buffer_size = av_samples_get_buffer_size(NULL, c->channels,frameSize /*c->frame_size*/,
c->sample_fmt, 0);
samples = av_malloc(buffer_size);
if (!samples) {
fprintf(stderr, "Could not allocate %d bytes for samples buffer\n",
buffer_size);
exit(1);
}
/* setup the data pointers in the AVFrame */
ret = avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt,
(const uint8_t*)samples, buffer_size, 0);
if (ret < 0) {
fprintf(stderr, "Could not setup audio frame\n");
exit(1);
}
float t, tincr;
/* encode a single tone sound */
t = 0;
tincr = 2 * M_PI * 440.0 / c->sample_rate;
for(i=0;i<800;i++) {
av_init_packet(&pkt);
pkt.data = NULL; // packet data will be allocated by the encoder
pkt.size = 0;
for (j = 0; j < frameSize/*c->frame_size*/; j++) {
samples[2*j] = (int)(sin(t) * 10000);
for (k = 1; k < c->channels; k++)
samples[2*j + k] = samples[2*j];
t += tincr;
}
/* encode the samples */
ret = avcodec_encode_audio2(c, &pkt, frame, &got_output);
if (ret < 0) {
fprintf(stderr, "Error encoding audio frame\n");
exit(1);
}
if (got_output) {
fwrite(pkt.data, 1, pkt.size, f);
av_free_packet(&pkt);
}
}
}
but after encoded file size is zero,
Please suggest what m doing wrong,any help will be appreciate, thanks in advance
I am working on an android project, which use vudroid, which in turn use mupdf version 0.5.
Vudroid remove the original openjpeg support of mupdf, I have ported the mupdf version 1.5's openjpeg support.
But I encounter a new problem, color information in jpx image gone, the desired effect:
my effect:
the ported load-jpx code:
#include "fitz.h"
#include "mupdf.h"
/* Without the definition of OPJ_STATIC, compilation fails on windows
* due to the use of __stdcall. We believe it is required on some
* linux toolchains too. */
#define OPJ_STATIC
#ifndef _MSC_VER
#define OPJ_HAVE_STDINT_H
#endif
#include <openjpeg.h>
static void fz_opj_error_callback(const char *msg, void *client_data)
{
//fz_context *ctx = (fz_context *)client_data;
//fz_warn(ctx, "openjpeg error: %s", msg);
}
static void fz_opj_warning_callback(const char *msg, void *client_data)
{
//fz_context *ctx = (fz_context *)client_data;
//fz_warn(ctx, "openjpeg warning: %s", msg);
}
static void fz_opj_info_callback(const char *msg, void *client_data)
{
/* fz_warn("openjpeg info: %s", msg); */
}
typedef struct stream_block_s
{
unsigned char *data;
int size;
int pos;
} stream_block;
static OPJ_SIZE_T fz_opj_stream_read(void * p_buffer, OPJ_SIZE_T p_nb_bytes, void * p_user_data)
{
stream_block *sb = (stream_block *)p_user_data;
int len;
len = sb->size - sb->pos;
if (len < 0)
len = 0;
if (len == 0)
return (OPJ_SIZE_T)-1; /* End of file! */
if ((OPJ_SIZE_T)len > p_nb_bytes)
len = p_nb_bytes;
memcpy(p_buffer, sb->data + sb->pos, len);
sb->pos += len;
return len;
}
static OPJ_OFF_T fz_opj_stream_skip(OPJ_OFF_T skip, void * p_user_data)
{
stream_block *sb = (stream_block *)p_user_data;
if (skip > sb->size - sb->pos)
skip = sb->size - sb->pos;
sb->pos += skip;
return sb->pos;
}
static OPJ_BOOL fz_opj_stream_seek(OPJ_OFF_T seek_pos, void * p_user_data)
{
stream_block *sb = (stream_block *)p_user_data;
if (seek_pos > sb->size)
return OPJ_FALSE;
sb->pos = seek_pos;
return OPJ_TRUE;
}
fz_error
fz_load_jpx(pdf_image* img, unsigned char *data, int size, fz_colorspace *defcs, int indexed)
{
//fz_pixmap *img;
opj_dparameters_t params;
opj_codec_t *codec;
opj_image_t *jpx;
opj_stream_t *stream;
fz_colorspace *colorspace;
unsigned char *p;
OPJ_CODEC_FORMAT format;
int a, n, w, h, depth, sgnd;
int x, y, k, v;
stream_block sb;
if (size < 2)
fz_throw("not enough data to determine image format");
/* Check for SOC marker -- if found we have a bare J2K stream */
if (data[0] == 0xFF && data[1] == 0x4F)
format = OPJ_CODEC_J2K;
else
format = OPJ_CODEC_JP2;
opj_set_default_decoder_parameters(¶ms);
if (indexed)
params.flags |= OPJ_DPARAMETERS_IGNORE_PCLR_CMAP_CDEF_FLAG;
codec = opj_create_decompress(format);
opj_set_info_handler(codec, fz_opj_info_callback, 0);
opj_set_warning_handler(codec, fz_opj_warning_callback, 0);
opj_set_error_handler(codec, fz_opj_error_callback, 0);
if (!opj_setup_decoder(codec, ¶ms))
{
fz_throw("j2k decode failed");
}
stream = opj_stream_default_create(OPJ_TRUE);
sb.data = data;
sb.pos = 0;
sb.size = size;
opj_stream_set_read_function(stream, fz_opj_stream_read);
opj_stream_set_skip_function(stream, fz_opj_stream_skip);
opj_stream_set_seek_function(stream, fz_opj_stream_seek);
opj_stream_set_user_data(stream, &sb);
/* Set the length to avoid an assert */
opj_stream_set_user_data_length(stream, size);
if (!opj_read_header(stream, codec, &jpx))
{
opj_stream_destroy(stream);
opj_destroy_codec(codec);
fz_throw("Failed to read JPX header");
}
if (!opj_decode(codec, stream, jpx))
{
opj_stream_destroy(stream);
opj_destroy_codec(codec);
opj_image_destroy(jpx);
fz_throw("Failed to decode JPX image");
}
opj_stream_destroy(stream);
opj_destroy_codec(codec);
/* jpx should never be NULL here, but check anyway */
if (!jpx)
fz_throw("opj_decode failed");
pdf_logimage("opj_decode succeeded");
for (k = 1; k < (int)jpx->numcomps; k++)
{
if (!jpx->comps[k].data)
{
opj_image_destroy(jpx);
fz_throw("image components are missing data");
}
if (jpx->comps[k].w != jpx->comps[0].w)
{
opj_image_destroy(jpx);
fz_throw("image components have different width");
}
if (jpx->comps[k].h != jpx->comps[0].h)
{
opj_image_destroy(jpx);
fz_throw("image components have different height");
}
if (jpx->comps[k].prec != jpx->comps[0].prec)
{
opj_image_destroy(jpx);
fz_throw("image components have different precision");
}
}
n = jpx->numcomps;
w = jpx->comps[0].w;
h = jpx->comps[0].h;
depth = jpx->comps[0].prec;
sgnd = jpx->comps[0].sgnd;
if (jpx->color_space == OPJ_CLRSPC_SRGB && n == 4) { n = 3; a = 1; }
else if (jpx->color_space == OPJ_CLRSPC_SYCC && n == 4) { n = 3; a = 1; }
else if (n == 2) { n = 1; a = 1; }
else if (n > 4) { n = 4; a = 1; }
else { a = 0; }
if (defcs)
{
if (defcs->n == n)
{
colorspace = defcs;
}
else
{
fz_warn("jpx file and dict colorspaces do not match");
defcs = NULL;
}
}
if (!defcs)
{
switch (n)
{
case 1: colorspace = pdf_devicegray; break;
case 3: colorspace = pdf_devicergb; break;
case 4: colorspace = pdf_devicecmyk; break;
}
}
//error = fz_new_pixmap(&img, colorspace, w, h);
//if (error)
// return error;
pdf_logimage("colorspace handled\n");
int bpc = 1;
if (colorspace) {
bpc = 1 + colorspace->n;
};
pdf_logimage("w = %d, bpc = %d, h = %d\n", w, bpc, h);
img->samples = fz_newbuffer(w * bpc * h);
//opj_image_destroy(jpx);
//fz_throw("out of memory loading jpx");
p = (char*)img->samples->bp;
pdf_logimage("start to deal with samples");
for (y = 0; y < h; y++)
{
for (x = 0; x < w; x++)
{
for (k = 0; k < n + a; k++)
{
v = jpx->comps[k].data[y * w + x];
if (sgnd)
v = v + (1 << (depth - 1));
if (depth > 8)
v = v >> (depth - 8);
*p++ = v;
}
if (!a)
*p++ = 255;
}
}
img->samples->wp = p;
pdf_logimage("start to deal with samples succeeded");
opj_image_destroy(jpx);
// if (a)
// {
// if (n == 4)
// {
// fz_pixmap *tmp = fz_new_pixmap(ctx, fz_device_rgb(ctx), w, h);
// fz_convert_pixmap(ctx, tmp, img);
// fz_drop_pixmap(ctx, img);
// img = tmp;
// }
// fz_premultiply_pixmap(ctx, img);
// }
return fz_okay;
}
The render code:
JNIEXPORT jbyteArray JNICALL Java_org_vudroid_pdfdroid_codec_PdfPage_drawPage
(JNIEnv *env, jclass clazz, jlong dochandle, jlong pagehandle)
{
renderdocument_t *doc = (renderdocument_t*) dochandle;
renderpage_t *page = (renderpage_t*) pagehandle;
//DEBUG("PdfView(%p).drawpage(%p, %p)", this, doc, page);
fz_error error;
fz_matrix ctm;
fz_irect viewbox;
fz_pixmap *pixmap;
jfloat *matrix;
jint *viewboxarr;
jint *dimen;
jint *buffer;
int length, val;
pixmap = nil;
/* initialize parameter arrays for MuPDF */
ctm.a = 1;
ctm.b = 0;
ctm.c = 0;
ctm.d = 1;
ctm.e = 0;
ctm.f = 0;
// matrix = (*env)->GetPrimitiveArrayCritical(env, matrixarray, 0);
// ctm.a = matrix[0];
// ctm.b = matrix[1];
// ctm.c = matrix[2];
// ctm.d = matrix[3];
// ctm.e = matrix[4];
// ctm.f = matrix[5];
// (*env)->ReleasePrimitiveArrayCritical(env, matrixarray, matrix, 0);
// DEBUG("Matrix: %f %f %f %f %f %f",
// ctm.a, ctm.b, ctm.c, ctm.d, ctm.e, ctm.f);
// viewboxarr = (*env)->GetPrimitiveArrayCritical(env, viewboxarray, 0);
// viewbox.x0 = viewboxarr[0];
// viewbox.y0 = viewboxarr[1];
// viewbox.x1 = viewboxarr[2];
// viewbox.y1 = viewboxarr[3];
// (*env)->ReleasePrimitiveArrayCritical(env, viewboxarray, viewboxarr, 0);
// DEBUG("Viewbox: %d %d %d %d",
// viewbox.x0, viewbox.y0, viewbox.x1, viewbox.y1);
viewbox.x0 = 0;
viewbox.y0 = 0;
viewbox.x1 = 595;
viewbox.y1 = 841;
/* do the rendering */
DEBUG("doing the rendering...");
//buffer = (*env)->GetPrimitiveArrayCritical(env, bufferarray, 0);
// do the actual rendering:
error = fz_rendertree(&pixmap, doc->rast, page->page->tree,
ctm, viewbox, 1);
/* evil magic: we transform the rendered image's byte order
*/
int x, y;
if (bmpdata)
fz_free(bmpdata);
bmpstride = ((pixmap->w * 3 + 3) / 4) * 4;
bmpdata = fz_malloc(pixmap->h * bmpstride);
DEBUG("inside drawpage, bmpstride = %d, pixmap->w = %d, pixmap->h = %d\n", bmpstride, pixmap->w, pixmap->h);
if (!bmpdata)
return;
for (y = 0; y < pixmap->h; y++)
{
unsigned char *p = bmpdata + y * bmpstride;
unsigned char *s = pixmap->samples + y * pixmap->w * 4;
for (x = 0; x < pixmap->w; x++)
{
p[x * 3 + 0] = s[x * 4 + 3];
p[x * 3 + 1] = s[x * 4 + 2];
p[x * 3 + 2] = s[x * 4 + 1];
}
}
FILE* fp = fopen("/sdcard/drawpage", "wb");
fwrite(bmpdata, pixmap->h * bmpstride, 1, fp);
fclose(fp);
jbyteArray array = (*env)->NewByteArray(env, pixmap->h * bmpstride);
(*env)->SetByteArrayRegion(env, array, 0, pixmap->h * bmpstride, bmpdata);
// if(!error) {
// DEBUG("Converting image buffer pixel order");
// length = pixmap->w * pixmap->h;
// unsigned int *col = pixmap->samples;
// int c = 0;
// for(val = 0; val < length; val++) {
// col[val] = ((col[val] & 0xFF000000) >> 24) |
// ((col[val] & 0x00FF0000) >> 8) |
// ((col[val] & 0x0000FF00) << 8);
// }
// winconvert(pixmap);
// }
// (*env)->ReleasePrimitiveArrayCritical(env, bufferarray, buffer, 0);
fz_free(pixmap);
if (error) {
DEBUG("error!");
throw_exception(env, "error rendering page");
}
DEBUG("PdfView.drawPage() done");
return array;
}
I have compare the jpx output samples to the mupdf-1.5 windows, it is the same, but the colorspace of original jpx have gone.
Could help me to get the colorspace back?
It seems you are trying to use an old version of MuPDF with some bits pulled in from a more recent version. TO be honest that's hardly likely to work. I would also guess that its not the OpenJPEG library causing your problem, since the image appears, but converted to grayscale.
Have you tried opening the file in the current version of MuPDF ? Does it work ?
If so then it seems to me your correct approach should be to use the current code, not try and bolt pieces onto an older version.
This is a cross-post from someone who answered my original question here.
I'm not sure how to go about executing the 3 functions I'm after (as well as introducing even more than 3 in the future).
I am simply trying to Fade/Blink the selected Colour of an RGB LED (and perhaps introduce more functions in the future) where its RGB data is coming back from iOS and sent to an RFDuino BLE module.
Sends a "fade" string to the module picked up by RFduinoBLE_onReceive (char *data, int len) on the Arduino end.
- (IBAction)fadeButtonPressed:(id)sender {
[rfduino send:[#"fade" dataUsingEncoding:NSUTF8StringEncoding]];
}
- (IBAction)blinkButtonPressed:(id)sender {
[rfduino send:[#"blink" dataUsingEncoding:NSUTF8StringEncoding]];
}
Selected Color:
- (void)setColor
{
NSLog(#"colors: RGB %f %f %f", red, green, blue);
UIColor *color = [UIColor colorWithRed:red green:green blue:blue alpha:1.0];
[colorSwatch setHighlighted:YES];
[colorSwatch setTintColor:color];
uint8_t tx[3] = { red * 255, green * 255, blue * 255 };
NSData *data = [NSData dataWithBytes:(void*)&tx length:3];
[rfduino send:data];
}
This is originally how I set the RGB colour:
void RFduinoBLE_onReceive (char *data, int len) {
if (len >= 3) {
// Get the RGB values.
uint8_t red = data[0];
uint8_t green = data[1];
uint8_t blue = data[2];
// Set PWM for each LED.
analogWrite(rgb2_pin, red);
analogWrite(rgb3_pin, green);
analogWrite(rgb4_pin, blue);
}
}
This was the provided answer that now compiles on Arduino, but I have no idea how to actually execute my functions and where?
#include <RFduinoBLE.h>
int state;
char command;
String hexstring;
// RGB pins.
int redPin = 2;
int grnPin = 3;
int bluPin = 4;
void setup () {
state = 1;
pinMode(redPin, OUTPUT);
pinMode(grnPin, OUTPUT);
pinMode(bluPin, OUTPUT);
// This is the data we want to appear in the advertisement
// (the deviceName length plus the advertisement length must be <= 18 bytes.
RFduinoBLE.deviceName = "iOS";
RFduinoBLE.advertisementInterval = MILLISECONDS(300);
RFduinoBLE.txPowerLevel = -20;
RFduinoBLE.advertisementData = "rgb";
// Start the BLE stack.
RFduinoBLE.begin();
}
void loop () {
//RFduino_ULPDelay(INFINITE);
}
void processCommand (int command, String hex) {
// hex ?
// command ?
}
void RFduinoBLE_onReceive (char *data, int len) {
for (int i = 0; i < len; i++) {
stateMachine(data[i]);
}
}
void stateMachine (char data) {
switch (state) {
case 1:
if (data == 1) {
state = 2;
}
break;
case 2:
if (data == 'b' || data == 'f' || data == 'c') {
command = data;
hexstring = "";
state = 3;
} else if (data != 1) { // Stay in state 2 if we received another 0x01.
state = 1;
}
break;
case 3:
if ((data >= 'a' && data <= 'z') || (data >= '0' && data <= '9')) {
hexstring = hexstring + data;
if (hexstring.length() == 6) {
state = 4;
}
} else if (data == 1) {
state = 2;
} else {
state = 1;
}
break;
case 4:
if (data == 3) {
processCommand(command, hexstring);
state = 1;
} else if (data == 1) {
state = 2;
} else {
state = 1;
}
break;
}
}
EDIT: Final code
#include <RFduinoBLE.h>
// State properties.
int state = 1;
char command;
String hexstring;
// RGB pins.
int redPin = 2;
int grnPin = 3;
int bluPin = 4;
// Setup function to set RGB pins to OUTPUT pins.
void setup () {
pinMode(redPin, OUTPUT);
pinMode(grnPin, OUTPUT);
pinMode(bluPin, OUTPUT);
// This is the data we want to appear in the advertisement
// (the deviceName length plus the advertisement length must be <= 18 bytes.
RFduinoBLE.deviceName = "iOS";
RFduinoBLE.advertisementInterval = MILLISECONDS(300);
RFduinoBLE.txPowerLevel = -20;
RFduinoBLE.advertisementData = "rgb";
// Start the BLE stack.
RFduinoBLE.begin();
}
void loop () {
switch (command) {
case 1:
// Blink.
break;
case 2:
// Fade.
break;
}
//RFduino_ULPDelay(INFINITE);
}
// Converts HEX as a String to actual HEX values.
// This is needed to properly convert the ASCII value to the hex
// value of each character.
byte getVal (char c) {
if (c >= '0' && c <= '9') return (byte)(c - '0');
else return (byte)(c - 'a' + 10);
}
// Process each function/command.
void processCommand (int command, String hex) {
switch (command) {
case 'b':
command = 1; // Set blink mode.
break;
case 'f':
command = 2; // Set fade mode.
break;
case 'c':
// We put together 2 characters as is
// done with HEX notation and set the color.
byte red = getVal(hex.charAt(1)) + (getVal(hex.charAt(0)) << 4);
byte green = getVal(hex.charAt(3)) + (getVal(hex.charAt(2)) << 4);
byte blue = getVal(hex.charAt(5)) + (getVal(hex.charAt(4)) << 4);
// Set the color.
setColor (red, green, blue);
break;
}
}
// Sets the color of each RGB pin.
void setColor (byte red, byte green, byte blue) {
analogWrite(redPin, red);
analogWrite(grnPin, green);
analogWrite(bluPin, blue);
}
// This function returns data from the radio.
void RFduinoBLE_onReceive (char *data, int len) {
for (int i = 0; i < len; i++) {
stateMachine(data[i]);
}
}
// Main state machine function, which processes
// data depending on the bytes received.
void stateMachine (char data) {
switch (state) {
case 1:
if (data == 1) {
state = 2;
}
break;
case 2:
if (data == 'b' || data == 'f' || data == 'c') {
command = data;
hexstring = "";
state = 3;
} else if (data != 1) { // Stay in state 2 if we received another 0x01.
state = 1;
}
break;
case 3:
if ((data >= 'a' && data <= 'z') || (data >= '0' && data <= '9')) {
hexstring = hexstring + data;
if (hexstring.length() == 6) {
state = 4;
}
} else if (data == 1) {
state = 2;
} else {
state = 1;
}
break;
case 4:
if (data == 3) {
processCommand(command, hexstring);
state = 1;
} else if (data == 1) {
state = 2;
} else {
state = 1;
}
break;
}
}
There is some code here that you can use to convert hex characters to a byte.
So, you can add this to your existing code -
byte getVal(char c)
{
if (c >= '0' && c <= '9')
return (byte)(c - '0');
else
return (byte)(c-'a'+10)
}
void processCommand (int command, String hex)
{
switch (command) {
case 'b':
command = 1; // set blink mode
break;
case 'f':
command=2; // set fade mode
break;
case 'c':
byte red=getVal(hex.charAt(1)) + (getVal(hex.charAt(0)) << 4);
byte green=getVal(hex.charAt(3)) + (getVal(hex.charAt(2)) << 4);
byte blue=getVal(hex.charAt(5)) + (getVal(hex.charAt(4)) << 4);
setColor(red,green,blue);
}
}
void setColor(byte red,byte green,byte blue)
{
// Set PWM for each LED.
analogWrite(rgb2_pin, red);
analogWrite(rgb3_pin, green);
analogWrite(rgb4_pin, blue);
}
On the iOS side you can use something like this -
-(void) sendCommand:(char)command arg1:(Byte)arg1 arg2:(Byte)arg2 arg3:(Byte) arg3 {
NSString *commandString=[NSString stringWithFormat:#"\001%c%02x%02x%02x\003",command,arg1,arg2,arg3];
NSData *commandData=[commandString dataUsingEncoding:NSASCIIStringEncoding];
[rfduino send:data];
}
- (IBAction)fadeButtonPressed:(id)sender {
[self sendCommand:'f' arg1:0 arg2:0 arg3:0];
}
- (IBAction)blinkButtonPressed:(id)sender {
[self sendCommand:'b' arg1:0 arg2:0 arg3:0];
}
- (void)setColor
{
NSLog(#"colors: RGB %f %f %f", red, green, blue);
UIColor *color = [UIColor colorWithRed:red green:green blue:blue alpha:1.0];
[colorSwatch setHighlighted:YES];
[colorSwatch setTintColor:color];
[self sendCommand:c arg1:red*255 arg2:green*255 arg3:blue*255];
}
I am recording sound from mic input using Audio queue service.
-(void)startRecording{
[self setupAudioFormat:&recordState.dataFormat];
recordState.currentPacket = 0;
OSStatus status;
status = AudioQueueNewInput(&recordState.dataFormat,
AudioInputCallback,
&recordState,
CFRunLoopGetCurrent(),
kCFRunLoopCommonModes,
0,
&recordState.queue);
if (status == 0)
{
// Prime recording buffers with empty data
for (int i = 0; i < NUM_BUFFERS; i++)
{
NSLog(#"buf in");
AudioQueueAllocateBuffer(recordState.queue, 16000, &recordState.buffers[i]);
AudioQueueEnqueueBuffer (recordState.queue, recordState.buffers[i], 0, NULL);
}
status = AudioFileCreateWithURL(fileURL,
kAudioFileAIFFType,
&recordState.dataFormat,
kAudioFileFlags_EraseFile,
&recordState.audioFile);
if (status == 0)
{
recordState.recording = true;
status = AudioQueueStart(recordState.queue, NULL);
if (status == 0)
{
NSLog(#"Recording");
}
}
}
if (status != 0)
{
//[self stopRecording];
NSLog(#"recording failed");
}
}
on callback:
void AudioInputCallback(void * inUserData,
AudioQueueRef inAQ,
AudioQueueBufferRef inBuffer,
const AudioTimeStamp * inStartTime,
UInt32 inNumberPacketDescriptions,
const AudioStreamPacketDescription * inPacketDescs)
{
RecordState * recordState = (RecordState*)inUserData;
if (!recordState->recording)
{
printf("Not recording, returning\n");
}
// if (inNumberPacketDescriptions == 0 && recordState->dataFormat.mBytesPerPacket != 0)
// {
// inNumberPacketDescriptions = inBuffer->mAudioDataByteSize / recordState->dataFormat.mBytesPerPacket;
// }
/*
int sampleCount = recordState->buffers[0]->mAudioDataBytesCapacity / sizeof (AUDIO_DATA_TYPE_FORMAT);
NSLog(#"sample count = %i",sampleCount);
AUDIO_DATA_TYPE_FORMAT *p = (AUDIO_DATA_TYPE_FORMAT*)recordState->buffers[0]->mAudioData;
for (int i = 0; i < sampleCount; i++) {
if (p[i] > 1000) {
NSLog(#"%hd",p[i]);
}
}*/
printf("Writing buffer %lld\n", recordState->currentPacket);
OSStatus status = AudioFileWritePackets(recordState->audioFile,
false,
inBuffer->mAudioDataByteSize,
inPacketDescs,
recordState->currentPacket,
&inNumberPacketDescriptions,
inBuffer->mAudioData);
if (status == 0)
{
recordState->buffers[0] = nil;
recordState->currentPacket += inNumberPacketDescriptions;
}
AudioQueueEnqueueBuffer(recordState->queue, inBuffer, 0, NULL);
}
Here i want to read recorded buffer. is it possible to get something like this:
short[] buffer = ?;//here should an audio buffer converted to some structure (short[] just for example)
then i would like to read every element of this structure:
for (int i = 0; i < sizeOfBuffer; i++) {
bufferVal = buffer[i];
}
In short how to handle buffer when recording ?
Thanks.