I'm trying to make an area calculation category for MKPolygon.
I found some JS code https://github.com/mapbox/geojson-area/blob/master/index.js#L1 with a link to the algorithm: http://trs-new.jpl.nasa.gov/dspace/handle/2014/40409.
It says:
Here is my code, which gave a wrong result (thousands times more than actual):
#define kEarthRadius 6378137
#implementation MKPolygon (AreaCalculation)
- (double) area {
double area = 0;
NSArray *coords = [self coordinates];
if (coords.count > 2) {
CLLocationCoordinate2D p1, p2;
for (int i = 0; i < coords.count - 1; i++) {
p1 = [coords[i] MKCoordinateValue];
p2 = [coords[i + 1] MKCoordinateValue];
area += degreesToRadians(p2.longitude - p1.longitude) * (2 + sinf(degreesToRadians(p1.latitude)) + sinf(degreesToRadians(p2.latitude)));
}
area = area * kEarthRadius * kEarthRadius / 2;
}
return area;
}
- (NSArray *)coordinates {
NSMutableArray *points = [NSMutableArray arrayWithCapacity:self.pointCount];
for (int i = 0; i < self.pointCount; i++) {
MKMapPoint *point = &self.points[i];
[points addObject:[NSValue valueWithMKCoordinate:MKCoordinateForMapPoint(* point)]];
}
return points.copy;
}
double degreesToRadians(double radius) {
return radius * M_PI / 180;
}
#end
What did I miss?
The whole algorithm implemented in Swift 3.0 :
import MapKit
let kEarthRadius = 6378137.0
// CLLocationCoordinate2D uses degrees but we need radians
func radians(degrees: Double) -> Double {
return degrees * M_PI / 180;
}
func regionArea(locations: [CLLocationCoordinate2D]) -> Double {
guard locations.count > 2 else { return 0 }
var area = 0.0
for i in 0..<locations.count {
let p1 = locations[i > 0 ? i - 1 : locations.count - 1]
let p2 = locations[i]
area += radians(degrees: p2.longitude - p1.longitude) * (2 + sin(radians(degrees: p1.latitude)) + sin(radians(degrees: p2.latitude)) )
}
area = -(area * kEarthRadius * kEarthRadius / 2);
return max(area, -area) // In order not to worry about is polygon clockwise or counterclockwise defined.
}
The final step for i = N-1 and i+1 = 0 (wrap around) is missing in your loop.
This may help to someone...
You need to pass the shape edge points into below method and it returns the correct area of a polygon
static double areaOfCurveWithPoints(const NSArray *shapeEdgePoints) {
CGPoint initialPoint = [shapeEdgePoints.firstObject CGPointValue];
CGMutablePathRef cgPath = CGPathCreateMutable();
CGPathMoveToPoint(cgPath, &CGAffineTransformIdentity, initialPoint.x, initialPoint.y);
for (int i = 1;i<shapeEdgePoints.count ;i++) {
CGPoint point = [[shapeEdgePoints objectAtIndex:i] CGPointValue];
CGPathAddLineToPoint(cgPath, &CGAffineTransformIdentity, point.x, point.y);
}
CGPathCloseSubpath(cgPath);
CGRect frame = integralFrameForPath(cgPath);
size_t bytesPerRow = bytesPerRowForWidth(frame.size.width);
CGContextRef gc = createBitmapContextWithFrame(frame, bytesPerRow);
CGContextSetFillColorWithColor(gc, [UIColor whiteColor].CGColor);
CGContextAddPath(gc, cgPath);
CGContextFillPath(gc);
double area = areaFilledInBitmapContext(gc);
CGPathRelease(cgPath);
CGContextRelease(gc);
return area;
}
static CGRect integralFrameForPath(CGPathRef path) {
CGRect frame = CGPathGetBoundingBox(path);
return CGRectIntegral(frame);
}
static size_t bytesPerRowForWidth(CGFloat width) {
static const size_t kFactor = 64;
// Round up to a multiple of kFactor, which must be a power of 2.
return ((size_t)width + (kFactor - 1)) & ~(kFactor - 1);
}
static CGContextRef createBitmapContextWithFrame(CGRect frame, size_t bytesPerRow) {
CGColorSpaceRef grayscale = CGColorSpaceCreateDeviceGray();
CGContextRef gc = CGBitmapContextCreate(NULL, frame.size.width, frame.size.height, 8, bytesPerRow, grayscale, kCGImageAlphaNone);
CGColorSpaceRelease(grayscale);
CGContextTranslateCTM(gc, -frame.origin.x, -frame.origin.x);
return gc;
}
static double areaFilledInBitmapContext(CGContextRef gc) {
size_t width = CGBitmapContextGetWidth(gc);
size_t height = CGBitmapContextGetHeight(gc);
size_t stride = CGBitmapContextGetBytesPerRow(gc);
// Get a pointer to the data
unsigned char *bitmapData = (unsigned char *)CGBitmapContextGetData(gc);
uint64_t coverage = 0;
for (size_t y = 0; y < height; ++y) {
for (size_t x = 0; x < width; ++x) {
coverage += bitmapData[y * stride + x];
}
}
// NSLog(#"coverage =%llu UINT8_MAX =%d",coverage,UINT8_MAX);
return (double)coverage / UINT8_MAX;
}
Related
So I've got a method that successfully gets the color of a pixel.
//arrColT is an NSMutableArray<NSColor *>
NSInteger width = [bitIn pixelsWide];
NSInteger height = [bitIn pixelsHigh];
NSInteger rowBytes = [bitIn bytesPerRow];
unsigned char* pixels = [bitIn bitmapData];
int row, col;
//For every row,
for (row = 0; row < height; row++)
{
unsigned char* rowStart = (unsigned char*)(pixels + (row * rowBytes));
unsigned char* nextChannel = rowStart;
//For every pixel in a row,
for (col = 0; col < width; col++)
{
//Get its color.
unsigned char red, green, blue, alpha;
red = *nextChannel;
int intRed = (int)red;
nextChannel++;
green = *nextChannel;
int intGreen = (int)green;
nextChannel++;
blue = *nextChannel;
int intBlue = (int)blue;
nextChannel++;
alpha = *nextChannel;
int intAlpha = (int)alpha;
nextChannel++;
NSColor *colHas = [NSColor colorWithCalibratedRed:(float)intRed/255 green: (float)intGreen/255 blue: (float)intBlue/255 alpha:(float)intAlpha/255];
for (int i = 0; i<[arrColT count]; i++) {
//If the target color is equal to the current color, replace it with the parallel replace color...somehow.
if([colHas isEqualTo:arrColT[i]]){
}
}
}
}
The question is, how do I get color data back into the bitmapData?
With hope,
radzo73
See Technical Q&A QA1509, which basically advises
create pixel buffer (by creating CGContextRef of predefined format and drawing your image to that);
manipulate the bytes within that pixel buffer as you want; and
create resulting image with CGBitmapContextCreateImage.
E.g.
- (UIImage *)convertImage:(UIImage *)image
{
// get image
CGImageRef imageRef = image.CGImage;
// prepare context
CGBitmapInfo bitmapInfo = kCGImageAlphaPremultipliedLast | kCGBitmapByteOrder32Big; // bytes in RGBA order
NSInteger width = CGImageGetWidth(imageRef);
NSInteger height = CGImageGetHeight(imageRef);
CGColorSpaceRef colorspace = CGColorSpaceCreateDeviceRGB();
CGContextRef context = CGBitmapContextCreate(NULL, width, height, 8, 4 * width, colorspace, bitmapInfo);
// draw image to that context
CGRect rect = CGRectMake(0, 0, width, height);
CGContextDrawImage(context, rect, imageRef);
uint8_t *buffer = CGBitmapContextGetData(context);
// ... manipulate pixel buffer bytes here ...
// get image from buffer
CGImageRef outputImage = CGBitmapContextCreateImage(context);
UIImage *result = [UIImage imageWithCGImage:outputImage scale:image.scale orientation:image.imageOrientation];
// clean up
CGImageRelease(outputImage);
CGContextRelease(context);
CGColorSpaceRelease(colorspace);
return result;
}
For example, here is a B&W conversion routine:
/** Convert the image to B&W as a single (non-parallel) task.
*
* This assumes the pixel buffer is in RGBA, 8 bits per pixel format.
*
* #param buffer The pixel buffer.
* #param width The image width in pixels.
* #param height The image height in pixels.
*/
- (void)convertToBWSimpleInBuffer:(uint8_t *)buffer width:(NSInteger)width height:(NSInteger)height
{
for (NSInteger row = 0; row < height; row++) {
for (NSInteger col = 0; col < width; col++) {
NSUInteger offset = (col + row * width) * 4;
uint8_t *pixel = buffer + offset;
// get the channels
uint8_t red = pixel[0];
uint8_t green = pixel[1];
uint8_t blue = pixel[2];
uint8_t alpha = pixel[3];
// update the channels with result
uint8_t gray = 0.2126 * red + 0.7152 * green + 0.0722 * blue;
pixel[0] = gray;
pixel[1] = gray;
pixel[2] = gray;
pixel[3] = alpha;
}
}
}
If you want to improve performance, you can use dispatch_apply and stride through your image buffer in parallel:
/** Convert the image to B&W, using GCD to split the conversion into several concurrent GCD tasks.
*
* This assumes the pixel buffer is in RGBA, 8 bits per pixel format.
*
* #param buffer The pixel buffer.
* #param width The image width in pixels.
* #param height The image height in pixels.
* #param count How many GCD tasks should the conversion be split into.
*/
- (void)convertToBWConcurrentInBuffer:(uint8_t *)buffer width:(NSInteger)width height:(NSInteger)height count:(NSInteger)count
{
dispatch_queue_t queue = dispatch_queue_create("com.domain.app", DISPATCH_QUEUE_CONCURRENT);
NSInteger stride = height / count;
dispatch_apply(height / stride, queue, ^(size_t idx) {
size_t j = idx * stride;
size_t j_stop = MIN(j + stride, height);
for (NSInteger row = j; row < j_stop; row++) {
for (NSInteger col = 0; col < width; col++) {
NSUInteger offset = (col + row * width) * 4;
uint8_t *pixel = buffer + offset;
// get the channels
uint8_t red = pixel[0];
uint8_t green = pixel[1];
uint8_t blue = pixel[2];
uint8_t alpha = pixel[3];
// update the channels with result
uint8_t gray = 0.2126 * red + 0.7152 * green + 0.0722 * blue;
pixel[0] = gray;
pixel[1] = gray;
pixel[2] = gray;
pixel[3] = alpha;
}
}
});
}
Well it depends on what bitIn is, but something like this should do the trick.
First, create a context. You can do this once and keep the context if you are going to make a lot of changes to the data and need a lot of pictures.
self.ctx = CGBitmapContextCreate (
[bitIn bitmapData],
[bitIn pixelsWide],
[bitIn pixelsHigh],
[bitIn bitsPerPixel],
[bitIn bytesPerRow],
[bitIn colorSpace],
[bitIn bitmapInfo] );
Here I freely used bitIn hoping it will provide all the missing pieces. Then you can assemble it all later, even after further changes to the data, using something like
CGImageRef cgImg = CGBitmapContextCreateImage ( self.ctx );
UIImage * img = [UIImage imageWithCGImage:cgImg];
CGImageRelease ( cgImg );
Now you can make more changes to the data and do the same to get a new image.
I am working on ESC POS printer. Using below code I am able to print the image, but the issue is an image not printing properly. You can see in the below image. Please review my code and let me know where exactly the issue.
- (void) btnPrintPicture{
UIImage * img = [UIImage imageNamed:#"download.png"];
int width = img.size.width;
int height = img.size.height;
unsigned char * binaryImageData = malloc(width * height);
unsigned char * data = malloc(height * (8 + width / 8));
unsigned char * grayData = [self convertImageToGray:img];
format_K_threshold(grayData, width, height, binaryImageData);
eachLinePixToCmd(binaryImageData, width, height, 0, data);
NSMutableArray *dataArray = [NSMutableArray new];
int splitBytes = 100;
NSData *comData = [[NSData alloc] initWithBytes:(const void *)data length:(height * (8+width/8))];
for(int i = 0; i < comData.length ; i=i+splitBytes){
NSData *subData = nil;
if((i+splitBytes)>comData.length){
subData = [comData subdataWithRange:NSMakeRange(i, (comData.length-i))];
}else{
subData = [comData subdataWithRange:NSMakeRange(i, splitBytes)];
}
[dataArray addObject:subData];
}
[dataArray enumerateObjectsUsingBlock:^(NSData *obj, NSUInteger idx, BOOL * _Nonnull stop) {
[self.discoveredPeripheral writeValue:obj forCharacteristic:self.discoveredCharacteristic type:CBCharacteristicWriteWithResponse];
}];
free(grayData);
free(binaryImageData);
free(data);
}
This method is used for converting image to grayscale.
-(unsigned char *)convertImageToGray:(UIImage *)i
{
int kRed = 1;
int kGreen = 2;
int kBlue = 4;
int colors = kGreen | kBlue | kRed;
int m_width = i.size.width;
int m_height = i.size.height;
uint32_t *rgbImage = (uint32_t *) malloc(m_width * m_height * sizeof(uint32_t));
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef context = CGBitmapContextCreate(rgbImage, m_width, m_height, 8, m_width * 4, colorSpace, kCGBitmapByteOrder32Little | kCGImageAlphaNoneSkipLast);
CGContextSetInterpolationQuality(context, kCGInterpolationHigh);
CGContextSetShouldAntialias(context, NO);
CGContextDrawImage(context, CGRectMake(0, 0, m_width, m_height), [i CGImage]);
CGContextRelease(context);
CGColorSpaceRelease(colorSpace);
// now convert to grayscale
uint8_t *m_imageData = (uint8_t *) malloc(m_width * m_height);
for(int y = 0; y < m_height; y++) {
for(int x = 0; x < m_width; x++) {
uint32_t rgbPixel=rgbImage[y*m_width+x];
uint32_t sum=0,count=0;
if (colors & kRed) {sum += (rgbPixel>>24)&255; count++;}
if (colors & kGreen) {sum += (rgbPixel>>16)&255; count++;}
if (colors & kBlue) {sum += (rgbPixel>>8)&255; count++;}
m_imageData[y*m_width+x]=sum/count;
}
}
free(rgbImage);
return m_imageData;}
void format_K_threshold(unsigned char * orgpixels, int xsize, int ysize, unsigned char * despixels) {
int graytotal = 0;
int k = 0;
int i;
int j;
int gray;
for(i = 0; i < ysize; ++i) {
for(j = 0; j < xsize; ++j) {
gray = orgpixels[k] & 255;
graytotal += gray;
++k;
}
}
int grayave = graytotal / ysize / xsize;
k = 0;
for(i = 0; i < ysize; ++i) {
for(j = 0; j < xsize; ++j) {
gray = orgpixels[k] & 255;
if(gray > grayave) {
despixels[k] = 0;
} else {
despixels[k] = 1;
}
++k;
}
}
}
This method is using ESC commands to print the image.
void eachLinePixToCmd(unsigned char * src, int nWidth, int nHeight, int nMode, unsigned char * data) {
int p0[] = { 0, 0x80 };
int p1[] = { 0, 0x40 };
int p2[] = { 0, 0x20 };
int p3[] = { 0, 0x10 };
int p4[] = { 0, 0x08 };
int p5[] = { 0, 0x04 };
int p6[] = { 0, 0x02 };
int nBytesPerLine = nWidth / 8;
int offset = 0;
int k = 0;
for (int i = 0; i < nHeight; i++) {
offset = i * (8 + nBytesPerLine);
data[offset + 0] = 0x1d;
data[offset + 1] = 0x76;
data[offset + 2] = 0x30;
data[offset + 3] = (unsigned char) (nMode & 0x01);
data[offset + 4] = (unsigned char) (nBytesPerLine % 0xff);
data[offset + 5] = (unsigned char) (nBytesPerLine / 0xff);
data[offset + 6] = 0x01;
data[offset + 7] = 0x00;
for (int j = 0; j < nBytesPerLine; j++) {
data[offset + 8 + j] = (unsigned char) (p0[src[k]] + p1[src[k + 1]] + p2[src[k + 2]] + p3[src[k + 3]] + p4[src[k + 4]] + p5[src[k + 5]] + p6[src[k + 6]] + src[k + 7]);
k = k + 8;
}
}
}
Thanks in advance.
Based on having fixed a very similar bug in a different project with this change, I am guessing that your image width is not divisible by 8.
This line will drop nWidth % 8 pixels on each row, causing a rightward slant if the image width is not divisible by 8.
int nBytesPerLine = nWidth / 8;
Instead, you should be padding with zeroes:
int nBytesPerLine = (nWidth + 7) / 8;
Your data variable needs to grow to match as well, it has the same issue.
Lastly, you are issuing the GS v 0 command for each row, which is not very efficient. You can issue this once for the entire image, and specify the height. From the same project, a C example is here.
Im trying to get the least used color, and the most used color from MP3 file's album artwork for a music playing application. I need the colors to do an effect like the new itunes 11. Where the background color of the menu is the most used color, and the least used color is the color for song labels and artist name.
I am using
`- (UIColor*) getPixelColorAtLocation:(CGPoint)point {
UIColor* color = nil;
CGImageRef inImage = self.image.CGImage;
// Create off screen bitmap context to draw the image into. Format ARGB is 4 bytes for each pixel: Alpa, Red, Green, Blue
CGContextRef cgctx = [self createARGBBitmapContextFromImage:inImage];
if (cgctx == NULL) { return nil; /* error */ }
size_t w = CGImageGetWidth(inImage);
size_t h = CGImageGetHeight(inImage);
CGRect rect = {{0,0},{w,h}};
// Draw the image to the bitmap context. Once we draw, the memory
// allocated for the context for rendering will then contain the
// raw image data in the specified color space.
CGContextDrawImage(cgctx, rect, inImage);
// Now we can get a pointer to the image data associated with the bitmap
// context.
unsigned char* data = CGBitmapContextGetData (cgctx);
if (data != NULL) {
//offset locates the pixel in the data from x,y.
//4 for 4 bytes of data per pixel, w is width of one row of data.
int offset = 4*((w*round(point.y))+round(point.x));
int alpha = data[offset];
int red = data[offset+1];
int green = data[offset+2];
int blue = data[offset+3];
NSLog(#"offset: %i colors: RGB A %i %i %i %i",offset,red,green,blue,alpha);
color = [UIColor colorWithRed:(red/255.0f) green:(green/255.0f) blue:(blue/255.0f) alpha:(alpha/255.0f)];
}
// When finished, release the context
CGContextRelease(cgctx);
// Free image data memory for the context
if (data) { free(data); }
return color;
}
- (CGContextRef) createARGBBitmapContextFromImage:(CGImageRef) inImage {
CGContextRef context = NULL;
CGColorSpaceRef colorSpace;
void * bitmapData;
int bitmapByteCount;
int bitmapBytesPerRow;
// Get image width, height. We'll use the entire image.
size_t pixelsWide = CGImageGetWidth(inImage);
size_t pixelsHigh = CGImageGetHeight(inImage);
// Declare the number of bytes per row. Each pixel in the bitmap in this
// example is represented by 4 bytes; 8 bits each of red, green, blue, and
// alpha.
bitmapBytesPerRow = (pixelsWide * 4);
bitmapByteCount = (bitmapBytesPerRow * pixelsHigh);
// Use the generic RGB color space.
colorSpace = CGColorSpaceCreateWithName(kCGColorSpaceGenericRGB);
if (colorSpace == NULL)
{
fprintf(stderr, "Error allocating color space\n");
return NULL;
}
// Allocate memory for image data. This is the destination in memory
// where any drawing to the bitmap context will be rendered.
bitmapData = malloc( bitmapByteCount );
if (bitmapData == NULL)
{
fprintf (stderr, "Memory not allocated!");
CGColorSpaceRelease( colorSpace );
return NULL;
}
// Create the bitmap context. We want pre-multiplied ARGB, 8-bits
// per component. Regardless of what the source image format is
// (CMYK, Grayscale, and so on) it will be converted over to the format
// specified here by CGBitmapContextCreate.
context = CGBitmapContextCreate (bitmapData,
pixelsWide,
pixelsHigh,
8, // bits per component
bitmapBytesPerRow,
colorSpace,
kCGImageAlphaPremultipliedFirst);
if (context == NULL)
{
free (bitmapData);
fprintf (stderr, "Context not created!");
}
// Make sure and release colorspace before returning
CGColorSpaceRelease( colorSpace );
return context;
}`
to get the color at the bottom of the image to make it blend in my view controller which uses the color for its background, and has a shadow to make it blended.
Question: So, as it says: How do I get the least and most used color from an image?
The method below takes an image and analyses it for its main colours, in the following steps:
1.) scale down the image and determine the main pixel colours.
2.) add some colour flexibility to allow for the loss during scaling
3.) distinguish colours, removing similar ones
4.) return the colours as an ordered array or with their percentages
You could adapt it to return a specific number of colours, e.g. top 10 colours in image if you needed a guaranteed number of colours returned, or just use the "detail" variable if you don't.
Larger images will take a long time to analyse at high detail.
No doubt the method could be cleaned up a bit but could be a good starting point.
Use like this:
NSDictionary * mainColours = [s mainColoursInImage:image detail:1];
-(NSDictionary*)mainColoursInImage:(UIImage *)image detail:(int)detail {
//1. determine detail vars (0==low,1==default,2==high)
//default detail
float dimension = 10;
float flexibility = 2;
float range = 60;
//low detail
if (detail==0){
dimension = 4;
flexibility = 1;
range = 100;
//high detail (patience!)
} else if (detail==2){
dimension = 100;
flexibility = 10;
range = 20;
}
//2. determine the colours in the image
NSMutableArray * colours = [NSMutableArray new];
CGImageRef imageRef = [image CGImage];
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
unsigned char *rawData = (unsigned char*) calloc(dimension * dimension * 4, sizeof(unsigned char));
NSUInteger bytesPerPixel = 4;
NSUInteger bytesPerRow = bytesPerPixel * dimension;
NSUInteger bitsPerComponent = 8;
CGContextRef context = CGBitmapContextCreate(rawData, dimension, dimension, bitsPerComponent, bytesPerRow, colorSpace, kCGImageAlphaPremultipliedLast | kCGBitmapByteOrder32Big);
CGColorSpaceRelease(colorSpace);
CGContextDrawImage(context, CGRectMake(0, 0, dimension, dimension), imageRef);
CGContextRelease(context);
float x = 0;
float y = 0;
for (int n = 0; n<(dimension*dimension); n++){
int index = (bytesPerRow * y) + x * bytesPerPixel;
int red = rawData[index];
int green = rawData[index + 1];
int blue = rawData[index + 2];
int alpha = rawData[index + 3];
NSArray * a = [NSArray arrayWithObjects:[NSString stringWithFormat:#"%i",red],[NSString stringWithFormat:#"%i",green],[NSString stringWithFormat:#"%i",blue],[NSString stringWithFormat:#"%i",alpha], nil];
[colours addObject:a];
y++;
if (y==dimension){
y=0;
x++;
}
}
free(rawData);
//3. add some colour flexibility (adds more colours either side of the colours in the image)
NSArray * copyColours = [NSArray arrayWithArray:colours];
NSMutableArray * flexibleColours = [NSMutableArray new];
float flexFactor = flexibility * 2 + 1;
float factor = flexFactor * flexFactor * 3; //(r,g,b) == *3
for (int n = 0; n<(dimension * dimension); n++){
NSArray * pixelColours = copyColours[n];
NSMutableArray * reds = [NSMutableArray new];
NSMutableArray * greens = [NSMutableArray new];
NSMutableArray * blues = [NSMutableArray new];
for (int p = 0; p<3; p++){
NSString * rgbStr = pixelColours[p];
int rgb = [rgbStr intValue];
for (int f = -flexibility; f<flexibility+1; f++){
int newRGB = rgb+f;
if (newRGB<0){
newRGB = 0;
}
if (p==0){
[reds addObject:[NSString stringWithFormat:#"%i",newRGB]];
} else if (p==1){
[greens addObject:[NSString stringWithFormat:#"%i",newRGB]];
} else if (p==2){
[blues addObject:[NSString stringWithFormat:#"%i",newRGB]];
}
}
}
int r = 0;
int g = 0;
int b = 0;
for (int k = 0; k<factor; k++){
int red = [reds[r] intValue];
int green = [greens[g] intValue];
int blue = [blues[b] intValue];
NSString * rgbString = [NSString stringWithFormat:#"%i,%i,%i",red,green,blue];
[flexibleColours addObject:rgbString];
b++;
if (b==flexFactor){ b=0; g++; }
if (g==flexFactor){ g=0; r++; }
}
}
//4. distinguish the colours
//orders the flexible colours by their occurrence
//then keeps them if they are sufficiently disimilar
NSMutableDictionary * colourCounter = [NSMutableDictionary new];
//count the occurences in the array
NSCountedSet *countedSet = [[NSCountedSet alloc] initWithArray:flexibleColours];
for (NSString *item in countedSet) {
NSUInteger count = [countedSet countForObject:item];
[colourCounter setValue:[NSNumber numberWithInteger:count] forKey:item];
}
//sort keys highest occurrence to lowest
NSArray *orderedKeys = [colourCounter keysSortedByValueUsingComparator:^NSComparisonResult(id obj1, id obj2){
return [obj2 compare:obj1];
}];
//checks if the colour is similar to another one already included
NSMutableArray * ranges = [NSMutableArray new];
for (NSString * key in orderedKeys){
NSArray * rgb = [key componentsSeparatedByString:#","];
int r = [rgb[0] intValue];
int g = [rgb[1] intValue];
int b = [rgb[2] intValue];
bool exclude = false;
for (NSString * ranged_key in ranges){
NSArray * ranged_rgb = [ranged_key componentsSeparatedByString:#","];
int ranged_r = [ranged_rgb[0] intValue];
int ranged_g = [ranged_rgb[1] intValue];
int ranged_b = [ranged_rgb[2] intValue];
if (r>= ranged_r-range && r<= ranged_r+range){
if (g>= ranged_g-range && g<= ranged_g+range){
if (b>= ranged_b-range && b<= ranged_b+range){
exclude = true;
}
}
}
}
if (!exclude){ [ranges addObject:key]; }
}
//return ranges array here if you just want the ordered colours high to low
NSMutableArray * colourArray = [NSMutableArray new];
for (NSString * key in ranges){
NSArray * rgb = [key componentsSeparatedByString:#","];
float r = [rgb[0] floatValue];
float g = [rgb[1] floatValue];
float b = [rgb[2] floatValue];
UIColor * colour = [UIColor colorWithRed:(r/255.0f) green:(g/255.0f) blue:(b/255.0f) alpha:1.0f];
[colourArray addObject:colour];
}
//if you just want an array of images of most common to least, return here
//return [NSDictionary dictionaryWithObject:colourArray forKey:#"colours"];
//if you want percentages to colours continue below
NSMutableDictionary * temp = [NSMutableDictionary new];
float totalCount = 0.0f;
for (NSString * rangeKey in ranges){
NSNumber * count = colourCounter[rangeKey];
totalCount += [count intValue];
temp[rangeKey]=count;
}
//set percentages
NSMutableDictionary * colourDictionary = [NSMutableDictionary new];
for (NSString * key in temp){
float count = [temp[key] floatValue];
float percentage = count/totalCount;
NSArray * rgb = [key componentsSeparatedByString:#","];
float r = [rgb[0] floatValue];
float g = [rgb[1] floatValue];
float b = [rgb[2] floatValue];
UIColor * colour = [UIColor colorWithRed:(r/255.0f) green:(g/255.0f) blue:(b/255.0f) alpha:1.0f];
colourDictionary[colour]=[NSNumber numberWithFloat:percentage];
}
return colourDictionary;
}
Not sure about finding most color or least color, but here is a method to find out the average color.
- (UIColor *)averageColor {
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
unsigned char rgba[4];
CGContextRef context = CGBitmapContextCreate(rgba, 1, 1, 8, 4, colorSpace, kCGImageAlphaPremultipliedLast | kCGBitmapByteOrder32Big);
CGContextDrawImage(context, CGRectMake(0, 0, 1, 1), self.CGImage);
CGColorSpaceRelease(colorSpace);
CGContextRelease(context);
if(rgba[3] > 0) {
CGFloat alpha = ((CGFloat)rgba[3])/255.0;
CGFloat multiplier = alpha/255.0;
return [UIColor colorWithRed:((CGFloat)rgba[0])*multiplier
green:((CGFloat)rgba[1])*multiplier
blue:((CGFloat)rgba[2])*multiplier
alpha:alpha];
}
else {
return [UIColor colorWithRed:((CGFloat)rgba[0])/255.0
green:((CGFloat)rgba[1])/255.0
blue:((CGFloat)rgba[2])/255.0
alpha:((CGFloat)rgba[3])/255.0];
}
}
You can probably follow a similar approach to find out the most used color.
Also check this answer about counting red color pixels in an image.
Thanks a lot for your code, #JohnnyRockex. It was really helpful in getting me started towards my goal (finding accent colors depending on the most predominant color in an image).
After going through it, I found the code could be simplified and made easier to read, so I'd like to give back to the community my own version; the -colors selector is in a UIImage extension.
- (NSArray *)colors {
// Original code by Johnny Rockex http://stackoverflow.com/a/29266983/825644
// Higher the dimension, the more pixels are checked against.
const float pixelDimension = 10;
// Higher the range, more similar colors are removed.
const float filterRange = 60;
unsigned char *rawData = (unsigned char*) calloc(pixelDimension * pixelDimension * kBytesPerPixel, sizeof(unsigned char));
NSUInteger bytesPerRow = kBytesPerPixel * pixelDimension;
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef context = CGBitmapContextCreate(rawData, pixelDimension, pixelDimension, kBitsInAByte, bytesPerRow, colorSpace, kCGImageAlphaPremultipliedLast | kCGBitmapByteOrder32Big);
CGColorSpaceRelease(colorSpace);
CGContextDrawImage(context, CGRectMake(0, 0, pixelDimension, pixelDimension), [self CGImage]);
CGContextRelease(context);
NSMutableArray * colors = [[NSMutableArray alloc] init];
float x = 0;
float y = 0;
const int pixelMatrixSize = pixelDimension * pixelDimension;
for (int i = 0; i < pixelMatrixSize; i++){
int index = (bytesPerRow * y) + x * kBytesPerPixel;
int red = rawData[index];
int green = rawData[index + 1];
int blue = rawData[index + 2];
int alpha = rawData[index + 3];
UIColor * color = [UIColor colorWithRed:(red / 255.0f) green:(green / 255.0f) blue:(blue / 255.0f) alpha:alpha];
[colors addObject:color];
y++;
if (y == pixelDimension){
y = 0;
x++;
}
}
free(rawData);
NSMutableDictionary * colorCounter = [[NSMutableDictionary alloc] init];
NSCountedSet *countedSet = [[NSCountedSet alloc] initWithArray:colors];
for (NSString *item in countedSet) {
NSUInteger count = [countedSet countForObject:item];
[colorCounter setValue:[NSNumber numberWithInteger:count] forKey:item];
}
NSArray *orderedColors = [colorCounter keysSortedByValueUsingComparator:^NSComparisonResult(id obj1, id obj2){
return [obj2 compare:obj1];
}];
NSMutableArray *filteredColors = [NSMutableArray new];
for (UIColor *color in orderedColors){
bool filtered = false;
for (UIColor *rangedColor in filteredColors){
if (abs(color.redRGBComponent - rangedColor.redRGBComponent) <= filterRange &&
abs(color.greenRGBComponent - rangedColor.greenRGBComponent) <= filterRange &&
abs(color.blueRGBComponent - rangedColor.blueRGBComponent) <= filterRange) {
filtered = true;
break;
}
}
if (!filtered) {
[filteredColors addObject:color];
}
}
return [filteredColors copy];
The code for UIColor's extension adding the -rgbComponent function can be found underneath, but I wrote it in Swift (trying to write all new classes in Swift, but this wasn't the case for the -colors selector):
extension UIColor {
open func redRGBComponent() -> UInt8 {
let colorComponents = cgColor.components!
return UInt8(colorComponents[0] * 255)
}
open func greenRGBComponent() -> UInt8 {
let colorComponents = cgColor.components!
return UInt8(colorComponents[1] * 255)
}
open func blueRGBComponent() -> UInt8 {
let colorComponents = cgColor.components!
return UInt8(colorComponents[2] * 255)
}
}
Enjoy!
I wrote this tool to do that.
https://github.com/623637646/UIImageColorRatio
// replace the UIImage() to yourself's UIImage.
let theMostUsedColor = UIImage().calculateColorRatio(deviation: 0)?.colorRatioArray.first?.color
let theLeastUsedColor = UIImage().calculateColorRatio(deviation: 0)?.colorRatioArray.last?.color
I am making a curve fitting program using matrices in the form of a two dimensional arrays but the compiler throws out BAD_ACCESS errors at random such as Thread 1:EXC_BAD_ACCESS (code=1, address=0x13f800000). The program works sometimes and other times crashes. Any ideas would be appreciated. Thanks.
- (void)main {
NSLog(#"Enter size");
scanf("%i", &matrixSize);
float* temp;
matrix = (float**)calloc(matrixSize, sizeof(float*));
temp = (float*)calloc(matrixSize+1, sizeof(float));
for (int i = 0; i < matrixSize+1; i++) {
matrix[i] = temp + (i*(matrixSize+1));
}
[self enterPoints];
[self elimination];
[self setNeedsDisplay:YES];
free(matrix);
free(temp);
}
//points entered here
- (void)enterPoints {
CGPoint *points = (CGPoint *)malloc(matrixSize * sizeof(CGPoint));
for (int i = 0; i < matrixSize; i++) {
scanf("%lf", &points[i].x);
scanf("%lf", &points[i].y);
}
for (int j = 0; j < matrixSize; j++) {
for (int i = 0; i < matrixSize+1; i++) {
if (i == (matrixSize)) {
matrix[i][j] = points[j].y;
}
else {
matrix[i][j] = pow(points[j].x, (matrixSize-1)-i);
}
}
}
free(points);
}
//matrix reduction occurs here
- (void)elimination {
for (int j = 0; j < matrixSize; j++) {
double divideValue = matrix[j][j];
for (int i = 0; i < matrixSize+1; i++) {
matrix[i][j] /= divideValue;
}
for (int j1 = 0; j1 < matrixSize; j1++) {
if (j1 == j) {
if (j1 == matrixSize-1) {
break;
}
else {
j1++;
}
}
double subValue = matrix[j][j1];
for (int i = 0; i < matrixSize+1; i++) {
matrix[i][j1] -= matrix[i][j]*subValue;
}
}
}
}
//drawing the polynomial
- (void)drawRect:(NSRect)dirtyRect {
NSGraphicsContext * GraphicsContext = [NSGraphicsContext currentContext];
CGContextRef context = (CGContextRef) [GraphicsContext graphicsPort];
CGContextSetRGBStrokeColor(context, 0.0, 0.0, 0.0, 1.0);
CGContextSetLineWidth(context, 3.0);
CGContextMoveToPoint(context, 0, matrix[matrixSize][0]*100 + 100);
[GraphicsContext saveGraphicsState];
CGMutablePathRef path;
path = CGPathCreateMutable();
for (float i = -matrixSize; i < matrixSize; i+=.01) {
float y = 0;
for (int j = 0; j < matrixSize; j++) {
y += matrix[matrixSize][j]*pow(i, j);
}
CGContextAddLineToPoint(context, i*100 + 100, y*100 + 100);
}
CGContextStrokePath(context);
[GraphicsContext restoreGraphicsState];
}
You did not allocate enough memory for your matrix. This line sets up the entire data area, but you have only allocated matrixSize+1 elements, instead of matrixSize*(matrixSize+1):
temp = (float*)calloc(matrixSize+1, sizeof(float));
So, maintaining the matrixSize+1 columns and matrixSize rows:
matrix = (float**)calloc(matrixSize, sizeof(float*));
temp = (float*)calloc(matrixSize * (matrixSize+1), sizeof(float));
for (int i = 0; i < matrixSize; i++) {
matrix[i] = temp + (i*(matrixSize+1));
}
When you use this later, be careful. You are addressing it wrong:
for (int j = 0; j < matrixSize; j++) {
for (int i = 0; i < matrixSize+1; i++) {
if (i == (matrixSize)) {
matrix[i][j] = points[j].y;
}
else {
matrix[i][j] = pow(points[j].x, (matrixSize-1)-i);
}
}
}
Notice that i goes to matrixSize+1 but you are using that as the row index (there are only matrixSize rows). I think you meant to use matrix[j][i] instead of matrix[i][j]. You also do this when you construct the initial matrix, but I've actually changed that to be in line with your allocation.
So there are two points of buffer overrun in your program that I see.
EXC_BAD_ACCESS indicates one of your objects is being over-released (not to be confused with garbage collection) before invoking a method on it. Once you reach the point in your code where you invoke the method on the collected object, the pointer is referencing an invalid memory location.
To find Zombie objects have a look at this: How to Enable NSZombie in XCode
I've used this and it works very well.
I'm looking for how to draw the sound amplitude.
I found http://supermegaultragroovy.com/2009/10/06/drawing-waveforms/ but i have some problems. How get a list of floating-point values representing the audio?
Thank all.
I found this example here: Drawing waveform with AVAssetReader , changed it and developed a new class based on.
This class returns UIImageView.
//.h file
#import <UIKit/UIKit.h>
#interface WaveformImageVew : UIImageView{
}
-(id)initWithUrl:(NSURL*)url;
- (NSData *) renderPNGAudioPictogramLogForAssett:(AVURLAsset *)songAsset;
#end
//.m file
#import "WaveformImageVew.h"
#define absX(x) (x<0?0-x:x)
#define minMaxX(x,mn,mx) (x<=mn?mn:(x>=mx?mx:x))
#define noiseFloor (-50.0)
#define decibel(amplitude) (20.0 * log10(absX(amplitude)/32767.0))
#define imgExt #"png"
#define imageToData(x) UIImagePNGRepresentation(x)
#implementation WaveformImageVew
-(id)initWithUrl:(NSURL*)url{
if(self = [super init]){
AVURLAsset * urlA = [AVURLAsset URLAssetWithURL:url options:nil];
[self setImage:[UIImage imageWithData:[self renderPNGAudioPictogramLogForAssett:urlA]]];
}
return self;
}
-(UIImage *) audioImageLogGraph:(Float32 *) samples
normalizeMax:(Float32) normalizeMax
sampleCount:(NSInteger) sampleCount
channelCount:(NSInteger) channelCount
imageHeight:(float) imageHeight {
CGSize imageSize = CGSizeMake(sampleCount, imageHeight);
UIGraphicsBeginImageContext(imageSize);
CGContextRef context = UIGraphicsGetCurrentContext();
CGContextSetFillColorWithColor(context, [UIColor blackColor].CGColor);
CGContextSetAlpha(context,1.0);
CGRect rect;
rect.size = imageSize;
rect.origin.x = 0;
rect.origin.y = 0;
CGColorRef leftcolor = [[UIColor whiteColor] CGColor];
CGColorRef rightcolor = [[UIColor redColor] CGColor];
CGContextFillRect(context, rect);
CGContextSetLineWidth(context, 1.0);
float halfGraphHeight = (imageHeight / 2) / (float) channelCount ;
float centerLeft = halfGraphHeight;
float centerRight = (halfGraphHeight*3) ;
float sampleAdjustmentFactor = (imageHeight/ (float) channelCount) / (normalizeMax - noiseFloor) / 2;
for (NSInteger intSample = 0 ; intSample < sampleCount ; intSample ++ ) {
Float32 left = *samples++;
float pixels = (left - noiseFloor) * sampleAdjustmentFactor;
CGContextMoveToPoint(context, intSample, centerLeft-pixels);
CGContextAddLineToPoint(context, intSample, centerLeft+pixels);
CGContextSetStrokeColorWithColor(context, leftcolor);
CGContextStrokePath(context);
if (channelCount==2) {
Float32 right = *samples++;
float pixels = (right - noiseFloor) * sampleAdjustmentFactor;
CGContextMoveToPoint(context, intSample, centerRight - pixels);
CGContextAddLineToPoint(context, intSample, centerRight + pixels);
CGContextSetStrokeColorWithColor(context, rightcolor);
CGContextStrokePath(context);
}
}
// Create new image
UIImage *newImage = UIGraphicsGetImageFromCurrentImageContext();
// Tidy up
UIGraphicsEndImageContext();
return newImage;
}
- (NSData *) renderPNGAudioPictogramLogForAssett:(AVURLAsset *)songAsset {
NSError * error = nil;
AVAssetReader * reader = [[AVAssetReader alloc] initWithAsset:songAsset error:&error];
AVAssetTrack * songTrack = [songAsset.tracks objectAtIndex:0];
NSDictionary* outputSettingsDict = [[NSDictionary alloc] initWithObjectsAndKeys:
[NSNumber numberWithInt:kAudioFormatLinearPCM],AVFormatIDKey,
// [NSNumber numberWithInt:44100.0],AVSampleRateKey, /*Not Supported*/
// [NSNumber numberWithInt: 2],AVNumberOfChannelsKey, /*Not Supported*/
[NSNumber numberWithInt:16],AVLinearPCMBitDepthKey,
[NSNumber numberWithBool:NO],AVLinearPCMIsBigEndianKey,
[NSNumber numberWithBool:NO],AVLinearPCMIsFloatKey,
[NSNumber numberWithBool:NO],AVLinearPCMIsNonInterleaved,
nil];
AVAssetReaderTrackOutput* output = [[AVAssetReaderTrackOutput alloc] initWithTrack:songTrack outputSettings:outputSettingsDict];
[reader addOutput:output];
[output release];
UInt32 sampleRate,channelCount;
NSArray* formatDesc = songTrack.formatDescriptions;
for(unsigned int i = 0; i < [formatDesc count]; ++i) {
CMAudioFormatDescriptionRef item = (CMAudioFormatDescriptionRef)[formatDesc objectAtIndex:i];
const AudioStreamBasicDescription* fmtDesc = CMAudioFormatDescriptionGetStreamBasicDescription (item);
if(fmtDesc ) {
sampleRate = fmtDesc->mSampleRate;
channelCount = fmtDesc->mChannelsPerFrame;
// NSLog(#"channels:%u, bytes/packet: %u, sampleRate %f",fmtDesc->mChannelsPerFrame, fmtDesc->mBytesPerPacket,fmtDesc->mSampleRate);
}
}
UInt32 bytesPerSample = 2 * channelCount;
Float32 normalizeMax = noiseFloor;
NSLog(#"normalizeMax = %f",normalizeMax);
NSMutableData * fullSongData = [[NSMutableData alloc] init];
[reader startReading];
UInt64 totalBytes = 0;
Float64 totalLeft = 0;
Float64 totalRight = 0;
Float32 sampleTally = 0;
NSInteger samplesPerPixel = sampleRate / 50;
while (reader.status == AVAssetReaderStatusReading){
AVAssetReaderTrackOutput * trackOutput = (AVAssetReaderTrackOutput *)[reader.outputs objectAtIndex:0];
CMSampleBufferRef sampleBufferRef = [trackOutput copyNextSampleBuffer];
if (sampleBufferRef){
CMBlockBufferRef blockBufferRef = CMSampleBufferGetDataBuffer(sampleBufferRef);
size_t length = CMBlockBufferGetDataLength(blockBufferRef);
totalBytes += length;
NSAutoreleasePool *wader = [[NSAutoreleasePool alloc] init];
NSMutableData * data = [NSMutableData dataWithLength:length];
CMBlockBufferCopyDataBytes(blockBufferRef, 0, length, data.mutableBytes);
SInt16 * samples = (SInt16 *) data.mutableBytes;
int sampleCount = length / bytesPerSample;
for (int i = 0; i < sampleCount ; i ++) {
Float32 left = (Float32) *samples++;
left = decibel(left);
left = minMaxX(left,noiseFloor,0);
totalLeft += left;
Float32 right;
if (channelCount==2) {
right = (Float32) *samples++;
right = decibel(right);
right = minMaxX(right,noiseFloor,0);
totalRight += right;
}
sampleTally++;
if (sampleTally > samplesPerPixel) {
left = totalLeft / sampleTally;
if (left > normalizeMax) {
normalizeMax = left;
}
// NSLog(#"left average = %f, normalizeMax = %f",left,normalizeMax);
[fullSongData appendBytes:&left length:sizeof(left)];
if (channelCount==2) {
right = totalRight / sampleTally;
if (right > normalizeMax) {
normalizeMax = right;
}
[fullSongData appendBytes:&right length:sizeof(right)];
}
totalLeft = 0;
totalRight = 0;
sampleTally = 0;
}
}
[wader drain];
CMSampleBufferInvalidate(sampleBufferRef);
CFRelease(sampleBufferRef);
}
}
NSData * finalData = nil;
if (reader.status == AVAssetReaderStatusFailed || reader.status == AVAssetReaderStatusUnknown){
// Something went wrong. Handle it.
}
if (reader.status == AVAssetReaderStatusCompleted){
// You're done. It worked.
NSLog(#"rendering output graphics using normalizeMax %f",normalizeMax);
UIImage *test = [self audioImageLogGraph:(Float32 *) fullSongData.bytes
normalizeMax:normalizeMax
sampleCount:fullSongData.length / (sizeof(Float32) * 2)
channelCount:2
imageHeight:100];
finalData = imageToData(test);
}
[fullSongData release];
[reader release];
return finalData;
}
#end
Been reading your question and created a control for this. Looks like this:
Code here:
https://github.com/fulldecent/FDWaveformView
Discussion here:
https://www.cocoacontrols.com/controls/fdwaveformview
UPDATE 2015-01-29: This project is going strong and making consistent releases. Thanks for SO for all the exposure!
I can give you reference of the one that I have implemented in my application. It was apple's example. Here is the example of AurioTouch which analyzes 3 types of sound audio. Apple has still not provided to analyse directly the audio waves... so this example also uses the Mic to analyse the sound...
Amongst 3 I have used only Oscilloscope for analysing the amplitude effect. I have to change that code drastically to match my requirement, so best of luck if you are going to use...
You can also see one more example using such amplitude : SpeakHere of Apple
This is the code I have used to convert my audio data (audio file ) into floating point representation and saved into an array.
-(void) PrintFloatDataFromAudioFile {
NSString * name = #"Filename"; //YOUR FILE NAME
NSString * source = [[NSBundle mainBundle] pathForResource:name ofType:#"m4a"]; // SPECIFY YOUR FILE FORMAT
const char *cString = [source cStringUsingEncoding:NSASCIIStringEncoding];
CFStringRef str = CFStringCreateWithCString(
NULL,
cString,
kCFStringEncodingMacRoman
);
CFURLRef inputFileURL = CFURLCreateWithFileSystemPath(
kCFAllocatorDefault,
str,
kCFURLPOSIXPathStyle,
false
);
ExtAudioFileRef fileRef;
ExtAudioFileOpenURL(inputFileURL, &fileRef);
AudioStreamBasicDescription audioFormat;
audioFormat.mSampleRate = 44100; // GIVE YOUR SAMPLING RATE
audioFormat.mFormatID = kAudioFormatLinearPCM;
audioFormat.mFormatFlags = kLinearPCMFormatFlagIsFloat;
audioFormat.mBitsPerChannel = sizeof(Float32) * 8;
audioFormat.mChannelsPerFrame = 1; // Mono
audioFormat.mBytesPerFrame = audioFormat.mChannelsPerFrame * sizeof(Float32); // == sizeof(Float32)
audioFormat.mFramesPerPacket = 1;
audioFormat.mBytesPerPacket = audioFormat.mFramesPerPacket * audioFormat.mBytesPerFrame; // = sizeof(Float32)
// 3) Apply audio format to the Extended Audio File
ExtAudioFileSetProperty(
fileRef,
kExtAudioFileProperty_ClientDataFormat,
sizeof (AudioStreamBasicDescription), //= audioFormat
&audioFormat);
int numSamples = 1024; //How many samples to read in at a time
UInt32 sizePerPacket = audioFormat.mBytesPerPacket; // = sizeof(Float32) = 32bytes
UInt32 packetsPerBuffer = numSamples;
UInt32 outputBufferSize = packetsPerBuffer * sizePerPacket;
// So the lvalue of outputBuffer is the memory location where we have reserved space
UInt8 *outputBuffer = (UInt8 *)malloc(sizeof(UInt8 *) * outputBufferSize);
AudioBufferList convertedData ;//= malloc(sizeof(convertedData));
convertedData.mNumberBuffers = 1; // Set this to 1 for mono
convertedData.mBuffers[0].mNumberChannels = audioFormat.mChannelsPerFrame; //also = 1
convertedData.mBuffers[0].mDataByteSize = outputBufferSize;
convertedData.mBuffers[0].mData = outputBuffer; //
UInt32 frameCount = numSamples;
float *samplesAsCArray;
int j =0;
double floatDataArray[882000] ; // SPECIFY YOUR DATA LIMIT MINE WAS 882000 , SHOULD BE EQUAL TO OR MORE THAN DATA LIMIT
while (frameCount > 0) {
ExtAudioFileRead(
fileRef,
&frameCount,
&convertedData
);
if (frameCount > 0) {
AudioBuffer audioBuffer = convertedData.mBuffers[0];
samplesAsCArray = (float *)audioBuffer.mData; // CAST YOUR mData INTO FLOAT
for (int i =0; i<1024 /*numSamples */; i++) { //YOU CAN PUT numSamples INTEAD OF 1024
floatDataArray[j] = (double)samplesAsCArray[i] ; //PUT YOUR DATA INTO FLOAT ARRAY
printf("\n%f",floatDataArray[j]); //PRINT YOUR ARRAY'S DATA IN FLOAT FORM RANGING -1 TO +1
j++;
}
}
}}
this is my answer, thx all geek
obj-c
here is code:
-(void) listenerData:(NSNotification *) notify
{
int resnum=112222;
unsigned int bitsum=0;
for(int i=0;i<4;i++)
{
bitsum+=(resnum>>(i*8))&0xff;
}
bitsum=bitsum&0xff;
NSString * check=[NSString stringWithFormat:#"%x %x",resnum,bitsum];
check=nil;
self.drawData=notify.object;``
[self setNeedsDisplay];
}
-(void)drawRect:(CGRect)rect
{
NSArray *data=self.drawData;
NSData *tdata=[data objectAtIndex:0];
double *srcdata=(double*)tdata.bytes;
int datacount=tdata.length/sizeof(double);
tdata=[data objectAtIndex:1];
double *coveddata=(double*)tdata.bytes;
CGContextRef context=UIGraphicsGetCurrentContext();
CGRect bounds=self.bounds;
CGContextClearRect(context, bounds);
CGFloat midpos=bounds.size.height/2;
CGContextBeginPath(context);
const double scale=1.0/100;
CGFloat step=bounds.size.width/(datacount-1);
CGContextMoveToPoint(context, 0, midpos);
CGFloat xpos=0;
for(int i=0;i<datacount;i++)
{
CGContextAddLineToPoint(context, xpos, midpos-srcdata[i]*scale);
xpos+=step;
}
CGContextAddLineToPoint(context, bounds.size.width, midpos);
CGContextClosePath(context);
CGContextSetRGBFillColor(context, 1.0, 0.0, 0.0, 1.0);
CGContextFillPath(context);
CGContextBeginPath(context);
const double scale2=1.0/100;
CGContextMoveToPoint(context, 0, midpos);
xpos=0;
for(int i=0;i<datacount;i++)
{
CGContextAddLineToPoint(context, xpos, midpos+coveddata[i]*scale2);
xpos+=step;
}
CGContextAddLineToPoint(context, bounds.size.width, midpos);
CGContextClosePath(context);
CGContextSetRGBFillColor(context, 1.0, 0.0, 1.0, 1.0);
CGContextFillPath(context);
}