CGDataProviderRefReleaseCallback not being called - objective-c

I'm creating a CGBitmap using the code below, but the done_callback function is never called. What am I doing wrong? The bitmap is created correctly but I have no way to free the pixel buffer afterwards. Is it because the CGBitmap is never released?
void done_callback(void *info, const void *data, size_t size)
{
free((void *)data);
NSLog(#"Done!");
}
static CGImageRef SVG_to_CGImage(char const *svg, int width, int height)
{
using namespace lunasvg;
auto doc = Document::loadFromData(svg);
if(doc.get() == nullptr) {
return nullptr;
}
auto bmp = doc->renderToBitmap(width, height);
if(!bmp.valid()) {
return nullptr;
}
UInt32 *pixels = (UInt32 *)malloc(width * height * 4);
// rearrange the BGRA to RGBA
for(int y=0; y<height; ++y) {
UInt32 *s = (UInt32 *)(bmp.data() + y * width * 4);
UInt32 *d = pixels + y * width;
for(int x=0; x<width; ++x) {
UInt32 argb = *s++;
UInt32 a = argb & 0xff000000;
UInt32 r = (argb & 0xff0000) >> 16;
UInt32 g = argb & 0xff00;
UInt32 b = (argb & 0xff) << 16;
*d++ = a | r | g | b;
}
}
CGDataProviderRef provider = CGDataProviderCreateWithData(NULL, pixels, width * height * 4, done_callback);
CGColorSpaceRef colorSpaceRef = CGColorSpaceCreateDeviceRGB();
CGBitmapInfo bitmapInfo = kCGImageAlphaPremultipliedLast;
CGColorRenderingIntent renderingIntent = kCGRenderingIntentDefault;
return CGImageCreate(width, height, 8, 32, 4 * width, colorSpaceRef, bitmapInfo, provider, NULL, NO, renderingIntent);
}

Related

Using bitmapData to modify an image

So I've got a method that successfully gets the color of a pixel.
//arrColT is an NSMutableArray<NSColor *>
NSInteger width = [bitIn pixelsWide];
NSInteger height = [bitIn pixelsHigh];
NSInteger rowBytes = [bitIn bytesPerRow];
unsigned char* pixels = [bitIn bitmapData];
int row, col;
//For every row,
for (row = 0; row < height; row++)
{
unsigned char* rowStart = (unsigned char*)(pixels + (row * rowBytes));
unsigned char* nextChannel = rowStart;
//For every pixel in a row,
for (col = 0; col < width; col++)
{
//Get its color.
unsigned char red, green, blue, alpha;
red = *nextChannel;
int intRed = (int)red;
nextChannel++;
green = *nextChannel;
int intGreen = (int)green;
nextChannel++;
blue = *nextChannel;
int intBlue = (int)blue;
nextChannel++;
alpha = *nextChannel;
int intAlpha = (int)alpha;
nextChannel++;
NSColor *colHas = [NSColor colorWithCalibratedRed:(float)intRed/255 green: (float)intGreen/255 blue: (float)intBlue/255 alpha:(float)intAlpha/255];
for (int i = 0; i<[arrColT count]; i++) {
//If the target color is equal to the current color, replace it with the parallel replace color...somehow.
if([colHas isEqualTo:arrColT[i]]){
}
}
}
}
The question is, how do I get color data back into the bitmapData?
With hope,
radzo73
See Technical Q&A QA1509, which basically advises
create pixel buffer (by creating CGContextRef of predefined format and drawing your image to that);
manipulate the bytes within that pixel buffer as you want; and
create resulting image with CGBitmapContextCreateImage.
E.g.
- (UIImage *)convertImage:(UIImage *)image
{
// get image
CGImageRef imageRef = image.CGImage;
// prepare context
CGBitmapInfo bitmapInfo = kCGImageAlphaPremultipliedLast | kCGBitmapByteOrder32Big; // bytes in RGBA order
NSInteger width = CGImageGetWidth(imageRef);
NSInteger height = CGImageGetHeight(imageRef);
CGColorSpaceRef colorspace = CGColorSpaceCreateDeviceRGB();
CGContextRef context = CGBitmapContextCreate(NULL, width, height, 8, 4 * width, colorspace, bitmapInfo);
// draw image to that context
CGRect rect = CGRectMake(0, 0, width, height);
CGContextDrawImage(context, rect, imageRef);
uint8_t *buffer = CGBitmapContextGetData(context);
// ... manipulate pixel buffer bytes here ...
// get image from buffer
CGImageRef outputImage = CGBitmapContextCreateImage(context);
UIImage *result = [UIImage imageWithCGImage:outputImage scale:image.scale orientation:image.imageOrientation];
// clean up
CGImageRelease(outputImage);
CGContextRelease(context);
CGColorSpaceRelease(colorspace);
return result;
}
For example, here is a B&W conversion routine:
/** Convert the image to B&W as a single (non-parallel) task.
*
* This assumes the pixel buffer is in RGBA, 8 bits per pixel format.
*
* #param buffer The pixel buffer.
* #param width The image width in pixels.
* #param height The image height in pixels.
*/
- (void)convertToBWSimpleInBuffer:(uint8_t *)buffer width:(NSInteger)width height:(NSInteger)height
{
for (NSInteger row = 0; row < height; row++) {
for (NSInteger col = 0; col < width; col++) {
NSUInteger offset = (col + row * width) * 4;
uint8_t *pixel = buffer + offset;
// get the channels
uint8_t red = pixel[0];
uint8_t green = pixel[1];
uint8_t blue = pixel[2];
uint8_t alpha = pixel[3];
// update the channels with result
uint8_t gray = 0.2126 * red + 0.7152 * green + 0.0722 * blue;
pixel[0] = gray;
pixel[1] = gray;
pixel[2] = gray;
pixel[3] = alpha;
}
}
}
If you want to improve performance, you can use dispatch_apply and stride through your image buffer in parallel:
/** Convert the image to B&W, using GCD to split the conversion into several concurrent GCD tasks.
*
* This assumes the pixel buffer is in RGBA, 8 bits per pixel format.
*
* #param buffer The pixel buffer.
* #param width The image width in pixels.
* #param height The image height in pixels.
* #param count How many GCD tasks should the conversion be split into.
*/
- (void)convertToBWConcurrentInBuffer:(uint8_t *)buffer width:(NSInteger)width height:(NSInteger)height count:(NSInteger)count
{
dispatch_queue_t queue = dispatch_queue_create("com.domain.app", DISPATCH_QUEUE_CONCURRENT);
NSInteger stride = height / count;
dispatch_apply(height / stride, queue, ^(size_t idx) {
size_t j = idx * stride;
size_t j_stop = MIN(j + stride, height);
for (NSInteger row = j; row < j_stop; row++) {
for (NSInteger col = 0; col < width; col++) {
NSUInteger offset = (col + row * width) * 4;
uint8_t *pixel = buffer + offset;
// get the channels
uint8_t red = pixel[0];
uint8_t green = pixel[1];
uint8_t blue = pixel[2];
uint8_t alpha = pixel[3];
// update the channels with result
uint8_t gray = 0.2126 * red + 0.7152 * green + 0.0722 * blue;
pixel[0] = gray;
pixel[1] = gray;
pixel[2] = gray;
pixel[3] = alpha;
}
}
});
}
Well it depends on what bitIn is, but something like this should do the trick.
First, create a context. You can do this once and keep the context if you are going to make a lot of changes to the data and need a lot of pictures.
self.ctx = CGBitmapContextCreate (
[bitIn bitmapData],
[bitIn pixelsWide],
[bitIn pixelsHigh],
[bitIn bitsPerPixel],
[bitIn bytesPerRow],
[bitIn colorSpace],
[bitIn bitmapInfo] );
Here I freely used bitIn hoping it will provide all the missing pieces. Then you can assemble it all later, even after further changes to the data, using something like
CGImageRef cgImg = CGBitmapContextCreateImage ( self.ctx );
UIImage * img = [UIImage imageWithCGImage:cgImg];
CGImageRelease ( cgImg );
Now you can make more changes to the data and do the same to get a new image.

ESC POS print image issue

I am working on ESC POS printer. Using below code I am able to print the image, but the issue is an image not printing properly. You can see in the below image. Please review my code and let me know where exactly the issue.
- (void) btnPrintPicture{
UIImage * img = [UIImage imageNamed:#"download.png"];
int width = img.size.width;
int height = img.size.height;
unsigned char * binaryImageData = malloc(width * height);
unsigned char * data = malloc(height * (8 + width / 8));
unsigned char * grayData = [self convertImageToGray:img];
format_K_threshold(grayData, width, height, binaryImageData);
eachLinePixToCmd(binaryImageData, width, height, 0, data);
NSMutableArray *dataArray = [NSMutableArray new];
int splitBytes = 100;
NSData *comData = [[NSData alloc] initWithBytes:(const void *)data length:(height * (8+width/8))];
for(int i = 0; i < comData.length ; i=i+splitBytes){
NSData *subData = nil;
if((i+splitBytes)>comData.length){
subData = [comData subdataWithRange:NSMakeRange(i, (comData.length-i))];
}else{
subData = [comData subdataWithRange:NSMakeRange(i, splitBytes)];
}
[dataArray addObject:subData];
}
[dataArray enumerateObjectsUsingBlock:^(NSData *obj, NSUInteger idx, BOOL * _Nonnull stop) {
[self.discoveredPeripheral writeValue:obj forCharacteristic:self.discoveredCharacteristic type:CBCharacteristicWriteWithResponse];
}];
free(grayData);
free(binaryImageData);
free(data);
}
This method is used for converting image to grayscale.
-(unsigned char *)convertImageToGray:(UIImage *)i
{
int kRed = 1;
int kGreen = 2;
int kBlue = 4;
int colors = kGreen | kBlue | kRed;
int m_width = i.size.width;
int m_height = i.size.height;
uint32_t *rgbImage = (uint32_t *) malloc(m_width * m_height * sizeof(uint32_t));
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef context = CGBitmapContextCreate(rgbImage, m_width, m_height, 8, m_width * 4, colorSpace, kCGBitmapByteOrder32Little | kCGImageAlphaNoneSkipLast);
CGContextSetInterpolationQuality(context, kCGInterpolationHigh);
CGContextSetShouldAntialias(context, NO);
CGContextDrawImage(context, CGRectMake(0, 0, m_width, m_height), [i CGImage]);
CGContextRelease(context);
CGColorSpaceRelease(colorSpace);
// now convert to grayscale
uint8_t *m_imageData = (uint8_t *) malloc(m_width * m_height);
for(int y = 0; y < m_height; y++) {
for(int x = 0; x < m_width; x++) {
uint32_t rgbPixel=rgbImage[y*m_width+x];
uint32_t sum=0,count=0;
if (colors & kRed) {sum += (rgbPixel>>24)&255; count++;}
if (colors & kGreen) {sum += (rgbPixel>>16)&255; count++;}
if (colors & kBlue) {sum += (rgbPixel>>8)&255; count++;}
m_imageData[y*m_width+x]=sum/count;
}
}
free(rgbImage);
return m_imageData;}
void format_K_threshold(unsigned char * orgpixels, int xsize, int ysize, unsigned char * despixels) {
int graytotal = 0;
int k = 0;
int i;
int j;
int gray;
for(i = 0; i < ysize; ++i) {
for(j = 0; j < xsize; ++j) {
gray = orgpixels[k] & 255;
graytotal += gray;
++k;
}
}
int grayave = graytotal / ysize / xsize;
k = 0;
for(i = 0; i < ysize; ++i) {
for(j = 0; j < xsize; ++j) {
gray = orgpixels[k] & 255;
if(gray > grayave) {
despixels[k] = 0;
} else {
despixels[k] = 1;
}
++k;
}
}
}
This method is using ESC commands to print the image.
void eachLinePixToCmd(unsigned char * src, int nWidth, int nHeight, int nMode, unsigned char * data) {
int p0[] = { 0, 0x80 };
int p1[] = { 0, 0x40 };
int p2[] = { 0, 0x20 };
int p3[] = { 0, 0x10 };
int p4[] = { 0, 0x08 };
int p5[] = { 0, 0x04 };
int p6[] = { 0, 0x02 };
int nBytesPerLine = nWidth / 8;
int offset = 0;
int k = 0;
for (int i = 0; i < nHeight; i++) {
offset = i * (8 + nBytesPerLine);
data[offset + 0] = 0x1d;
data[offset + 1] = 0x76;
data[offset + 2] = 0x30;
data[offset + 3] = (unsigned char) (nMode & 0x01);
data[offset + 4] = (unsigned char) (nBytesPerLine % 0xff);
data[offset + 5] = (unsigned char) (nBytesPerLine / 0xff);
data[offset + 6] = 0x01;
data[offset + 7] = 0x00;
for (int j = 0; j < nBytesPerLine; j++) {
data[offset + 8 + j] = (unsigned char) (p0[src[k]] + p1[src[k + 1]] + p2[src[k + 2]] + p3[src[k + 3]] + p4[src[k + 4]] + p5[src[k + 5]] + p6[src[k + 6]] + src[k + 7]);
k = k + 8;
}
}
}
Thanks in advance.
Based on having fixed a very similar bug in a different project with this change, I am guessing that your image width is not divisible by 8.
This line will drop nWidth % 8 pixels on each row, causing a rightward slant if the image width is not divisible by 8.
int nBytesPerLine = nWidth / 8;
Instead, you should be padding with zeroes:
int nBytesPerLine = (nWidth + 7) / 8;
Your data variable needs to grow to match as well, it has the same issue.
Lastly, you are issuing the GS v 0 command for each row, which is not very efficient. You can issue this once for the entire image, and specify the height. From the same project, a C example is here.

Objective C - save UIImage as a BMP file

Similar questions have been asked but they always contain the answer "use UIImagePNGRepresentation or UIImageJPEGRepresentation" which will not work for me. I need to save the file as a bitmap.
From my understanding, the UIImage data is already a bitmap. In which case I shouldn't have to create a bitmap. Or do I? I found someone who posted this question but I'm unsure what exactly the answer is, as it is not clear to me. I've even gone so far as to create a char pointer array but I still don't know how to save it as a bitmap. Is there anybody out there who can help me understand how to save the UIImage as a bitmap? Or tell me the steps and I'll research the steps.
Thank you.
EDIT:
I found this tutorial (with code) for converting the image to a bitmap but the bitmap returned is an unsigned char *. Either I'm on the right path or I'm going down in the wrong direction. So now I'm going to see if I can write out the unsigned char * to a .bmp file.
Below is the category that I have pieced together for UIImage that lets me access the raw BGRA data as well as data ready to be written to disk with an appropriate bitmap header.
#interface UIImage (BitmapData)
- (NSData *)bitmapData;
- (NSData *)bitmapFileHeaderData;
- (NSData *)bitmapDataWithFileHeader;
#end
# pragma pack(push, 1)
typedef struct s_bitmap_header
{
// Bitmap file header
UInt16 fileType;
UInt32 fileSize;
UInt16 reserved1;
UInt16 reserved2;
UInt32 bitmapOffset;
// DIB Header
UInt32 headerSize;
UInt32 width;
UInt32 height;
UInt16 colorPlanes;
UInt16 bitsPerPixel;
UInt32 compression;
UInt32 bitmapSize;
UInt32 horizontalResolution;
UInt32 verticalResolution;
UInt32 colorsUsed;
UInt32 colorsImportant;
} t_bitmap_header;
#pragma pack(pop)
#implementation UIImage (BitmapData)
- (NSData *)bitmapData
{
NSData *bitmapData = nil;
CGImageRef image = self.CGImage;
CGContextRef context = NULL;
CGColorSpaceRef colorSpace;
UInt8 *rawData;
size_t bitsPerPixel = 32;
size_t bitsPerComponent = 8;
size_t bytesPerPixel = bitsPerPixel / bitsPerComponent;
size_t width = CGImageGetWidth(image);
size_t height = CGImageGetHeight(image);
size_t bytesPerRow = width * bytesPerPixel;
size_t bufferLength = bytesPerRow * height;
colorSpace = CGColorSpaceCreateDeviceRGB();
if (colorSpace)
{
// Allocate memory for raw image data
rawData = (UInt8 *)calloc(bufferLength, sizeof(UInt8));
if (rawData)
{
CGBitmapInfo bitmapInfo = kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedFirst;
context = CGBitmapContextCreate(rawData,
width,
height,
bitsPerComponent,
bytesPerRow,
colorSpace,
bitmapInfo);
if (context)
{
CGRect rect = CGRectMake(0, 0, width, height);
CGContextTranslateCTM(context, 0, height);
CGContextScaleCTM(context, 1.0, -1.0);
CGContextDrawImage(context, rect, image);
bitmapData = [NSData dataWithBytes:rawData length:bufferLength];
CGContextRelease(context);
}
free(rawData);
}
CGColorSpaceRelease(colorSpace);
}
return bitmapData;
}
- (NSData *)bitmapFileHeaderData
{
CGImageRef image = self.CGImage;
UInt32 width = (UInt32)CGImageGetWidth(image);
UInt32 height = (UInt32)CGImageGetHeight(image);
t_bitmap_header header;
header.fileType = 0x4D42;
header.fileSize = (height * width * 4) + 54;
header.reserved1 = 0x0000;
header.reserved2 = 0x0000;
header.bitmapOffset = 0x00000036;
header.headerSize = 0x00000028;
header.width = width;
header.height = height;
header.colorPlanes = 0x0001;
header.bitsPerPixel = 0x0020;
header.compression = 0x00000000;
header.bitmapSize = height * width * 4;
header.horizontalResolution = 0x00000B13;
header.verticalResolution = 0x00000B13;
header.colorsUsed = 0x00000000;
header.colorsImportant = 0x00000000;
return [NSData dataWithBytes:&header length:sizeof(t_bitmap_header)];
}
- (NSData *)bitmapDataWithFileHeader
{
NSMutableData *data = [NSMutableData dataWithData:[self bitmapFileHeaderData]];
[data appendData:[self bitmapData]];
return [NSData dataWithData:data];
}
#end

MKPolygon area calculation

I'm trying to make an area calculation category for MKPolygon.
I found some JS code https://github.com/mapbox/geojson-area/blob/master/index.js#L1 with a link to the algorithm: http://trs-new.jpl.nasa.gov/dspace/handle/2014/40409.
It says:
Here is my code, which gave a wrong result (thousands times more than actual):
#define kEarthRadius 6378137
#implementation MKPolygon (AreaCalculation)
- (double) area {
double area = 0;
NSArray *coords = [self coordinates];
if (coords.count > 2) {
CLLocationCoordinate2D p1, p2;
for (int i = 0; i < coords.count - 1; i++) {
p1 = [coords[i] MKCoordinateValue];
p2 = [coords[i + 1] MKCoordinateValue];
area += degreesToRadians(p2.longitude - p1.longitude) * (2 + sinf(degreesToRadians(p1.latitude)) + sinf(degreesToRadians(p2.latitude)));
}
area = area * kEarthRadius * kEarthRadius / 2;
}
return area;
}
- (NSArray *)coordinates {
NSMutableArray *points = [NSMutableArray arrayWithCapacity:self.pointCount];
for (int i = 0; i < self.pointCount; i++) {
MKMapPoint *point = &self.points[i];
[points addObject:[NSValue valueWithMKCoordinate:MKCoordinateForMapPoint(* point)]];
}
return points.copy;
}
double degreesToRadians(double radius) {
return radius * M_PI / 180;
}
#end
What did I miss?
The whole algorithm implemented in Swift 3.0 :
import MapKit
let kEarthRadius = 6378137.0
// CLLocationCoordinate2D uses degrees but we need radians
func radians(degrees: Double) -> Double {
return degrees * M_PI / 180;
}
func regionArea(locations: [CLLocationCoordinate2D]) -> Double {
guard locations.count > 2 else { return 0 }
var area = 0.0
for i in 0..<locations.count {
let p1 = locations[i > 0 ? i - 1 : locations.count - 1]
let p2 = locations[i]
area += radians(degrees: p2.longitude - p1.longitude) * (2 + sin(radians(degrees: p1.latitude)) + sin(radians(degrees: p2.latitude)) )
}
area = -(area * kEarthRadius * kEarthRadius / 2);
return max(area, -area) // In order not to worry about is polygon clockwise or counterclockwise defined.
}
The final step for i = N-1 and i+1 = 0 (wrap around) is missing in your loop.
This may help to someone...
You need to pass the shape edge points into below method and it returns the correct area of a polygon
static double areaOfCurveWithPoints(const NSArray *shapeEdgePoints) {
CGPoint initialPoint = [shapeEdgePoints.firstObject CGPointValue];
CGMutablePathRef cgPath = CGPathCreateMutable();
CGPathMoveToPoint(cgPath, &CGAffineTransformIdentity, initialPoint.x, initialPoint.y);
for (int i = 1;i<shapeEdgePoints.count ;i++) {
CGPoint point = [[shapeEdgePoints objectAtIndex:i] CGPointValue];
CGPathAddLineToPoint(cgPath, &CGAffineTransformIdentity, point.x, point.y);
}
CGPathCloseSubpath(cgPath);
CGRect frame = integralFrameForPath(cgPath);
size_t bytesPerRow = bytesPerRowForWidth(frame.size.width);
CGContextRef gc = createBitmapContextWithFrame(frame, bytesPerRow);
CGContextSetFillColorWithColor(gc, [UIColor whiteColor].CGColor);
CGContextAddPath(gc, cgPath);
CGContextFillPath(gc);
double area = areaFilledInBitmapContext(gc);
CGPathRelease(cgPath);
CGContextRelease(gc);
return area;
}
static CGRect integralFrameForPath(CGPathRef path) {
CGRect frame = CGPathGetBoundingBox(path);
return CGRectIntegral(frame);
}
static size_t bytesPerRowForWidth(CGFloat width) {
static const size_t kFactor = 64;
// Round up to a multiple of kFactor, which must be a power of 2.
return ((size_t)width + (kFactor - 1)) & ~(kFactor - 1);
}
static CGContextRef createBitmapContextWithFrame(CGRect frame, size_t bytesPerRow) {
CGColorSpaceRef grayscale = CGColorSpaceCreateDeviceGray();
CGContextRef gc = CGBitmapContextCreate(NULL, frame.size.width, frame.size.height, 8, bytesPerRow, grayscale, kCGImageAlphaNone);
CGColorSpaceRelease(grayscale);
CGContextTranslateCTM(gc, -frame.origin.x, -frame.origin.x);
return gc;
}
static double areaFilledInBitmapContext(CGContextRef gc) {
size_t width = CGBitmapContextGetWidth(gc);
size_t height = CGBitmapContextGetHeight(gc);
size_t stride = CGBitmapContextGetBytesPerRow(gc);
// Get a pointer to the data
unsigned char *bitmapData = (unsigned char *)CGBitmapContextGetData(gc);
uint64_t coverage = 0;
for (size_t y = 0; y < height; ++y) {
for (size_t x = 0; x < width; ++x) {
coverage += bitmapData[y * stride + x];
}
}
// NSLog(#"coverage =%llu UINT8_MAX =%d",coverage,UINT8_MAX);
return (double)coverage / UINT8_MAX;
}

Is it possible to make a bit map image as array?

I would like to make a bit map image as follow array.
I think that will need to use quartz...
How to can make a bit map image?
int bit_map[10][10] = {{0,0,1,0,0,0,0,1,0,0},
{0,1,1,1,1,0,1,1,1,0},
{1,1,1,1,1,1,1,1,1,1},
{1,1,1,1,1,1,1,1,1,1},
{0,1,1,1,1,1,1,1,1,0},
{0,1,1,1,1,1,1,1,1,0},
{0,1,1,1,1,1,1,1,1,0},
{0,0,1,1,1,1,1,1,0,0},
{0,0,0,1,1,1,1,0,0,0},
{0,0,0,0,1,1,0,0,0,0}};
Here is code to create a CGImageRef (cir) from the C array. Note that things can be simpler if the C array is one dimensional with values of 0 and 255.
size_t width = 10;
size_t height = 10;
int bit_map[10][10] = {
{0,0,1,0,0,0,0,1,0,0},
{0,1,1,1,1,0,1,1,1,0},
{1,1,1,1,1,1,1,1,1,1},
{1,1,1,1,1,1,1,1,1,1},
{0,1,1,1,1,1,1,1,1,0},
{0,1,1,1,1,1,1,1,1,0},
{0,1,1,1,1,1,1,1,1,0},
{0,0,1,1,1,1,1,1,0,0},
{0,0,0,1,1,1,1,0,0,0},
{0,0,0,0,1,1,0,0,0,0}
};
UIImage *barCodeImage = nil;
size_t bitsPerComponent = 8;
size_t bitsPerPixel = 8;
size_t bytesPerRow = (width * bitsPerPixel + 7) / 8;
void *imageBytes;
size_t imageBytesSize = height * bytesPerRow;
imageBytes = calloc(1, imageBytesSize);
for (int i=0; i<10; i++) {
for (int j=0; j<10; j++) {
int pixel = bit_map[j][i];
if (pixel == 1)
pixel = 255;
((unsigned char*)imageBytes)[((i+1) * (j+1)) - 1] = pixel;
}
}
CGDataProviderRef provider = CGDataProviderCreateWithData(NULL, imageBytes, imageBytesSize, releasePixels);
CGColorSpaceRef colorSpaceGrey = CGColorSpaceCreateDeviceGray();
CGImageRef cir = CGImageCreate (width,
height,
bitsPerComponent,
bitsPerPixel,
imageBytesSize,
colorSpaceGrey,
kCGImageAlphaNone,
provider,
NULL,
NO,
kCGRenderingIntentDefault);
CGDataProviderRelease(provider);
CGColorSpaceRelease(colorSpaceGrey);