I am working on ESC POS printer. Using below code I am able to print the image, but the issue is an image not printing properly. You can see in the below image. Please review my code and let me know where exactly the issue.
- (void) btnPrintPicture{
UIImage * img = [UIImage imageNamed:#"download.png"];
int width = img.size.width;
int height = img.size.height;
unsigned char * binaryImageData = malloc(width * height);
unsigned char * data = malloc(height * (8 + width / 8));
unsigned char * grayData = [self convertImageToGray:img];
format_K_threshold(grayData, width, height, binaryImageData);
eachLinePixToCmd(binaryImageData, width, height, 0, data);
NSMutableArray *dataArray = [NSMutableArray new];
int splitBytes = 100;
NSData *comData = [[NSData alloc] initWithBytes:(const void *)data length:(height * (8+width/8))];
for(int i = 0; i < comData.length ; i=i+splitBytes){
NSData *subData = nil;
if((i+splitBytes)>comData.length){
subData = [comData subdataWithRange:NSMakeRange(i, (comData.length-i))];
}else{
subData = [comData subdataWithRange:NSMakeRange(i, splitBytes)];
}
[dataArray addObject:subData];
}
[dataArray enumerateObjectsUsingBlock:^(NSData *obj, NSUInteger idx, BOOL * _Nonnull stop) {
[self.discoveredPeripheral writeValue:obj forCharacteristic:self.discoveredCharacteristic type:CBCharacteristicWriteWithResponse];
}];
free(grayData);
free(binaryImageData);
free(data);
}
This method is used for converting image to grayscale.
-(unsigned char *)convertImageToGray:(UIImage *)i
{
int kRed = 1;
int kGreen = 2;
int kBlue = 4;
int colors = kGreen | kBlue | kRed;
int m_width = i.size.width;
int m_height = i.size.height;
uint32_t *rgbImage = (uint32_t *) malloc(m_width * m_height * sizeof(uint32_t));
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef context = CGBitmapContextCreate(rgbImage, m_width, m_height, 8, m_width * 4, colorSpace, kCGBitmapByteOrder32Little | kCGImageAlphaNoneSkipLast);
CGContextSetInterpolationQuality(context, kCGInterpolationHigh);
CGContextSetShouldAntialias(context, NO);
CGContextDrawImage(context, CGRectMake(0, 0, m_width, m_height), [i CGImage]);
CGContextRelease(context);
CGColorSpaceRelease(colorSpace);
// now convert to grayscale
uint8_t *m_imageData = (uint8_t *) malloc(m_width * m_height);
for(int y = 0; y < m_height; y++) {
for(int x = 0; x < m_width; x++) {
uint32_t rgbPixel=rgbImage[y*m_width+x];
uint32_t sum=0,count=0;
if (colors & kRed) {sum += (rgbPixel>>24)&255; count++;}
if (colors & kGreen) {sum += (rgbPixel>>16)&255; count++;}
if (colors & kBlue) {sum += (rgbPixel>>8)&255; count++;}
m_imageData[y*m_width+x]=sum/count;
}
}
free(rgbImage);
return m_imageData;}
void format_K_threshold(unsigned char * orgpixels, int xsize, int ysize, unsigned char * despixels) {
int graytotal = 0;
int k = 0;
int i;
int j;
int gray;
for(i = 0; i < ysize; ++i) {
for(j = 0; j < xsize; ++j) {
gray = orgpixels[k] & 255;
graytotal += gray;
++k;
}
}
int grayave = graytotal / ysize / xsize;
k = 0;
for(i = 0; i < ysize; ++i) {
for(j = 0; j < xsize; ++j) {
gray = orgpixels[k] & 255;
if(gray > grayave) {
despixels[k] = 0;
} else {
despixels[k] = 1;
}
++k;
}
}
}
This method is using ESC commands to print the image.
void eachLinePixToCmd(unsigned char * src, int nWidth, int nHeight, int nMode, unsigned char * data) {
int p0[] = { 0, 0x80 };
int p1[] = { 0, 0x40 };
int p2[] = { 0, 0x20 };
int p3[] = { 0, 0x10 };
int p4[] = { 0, 0x08 };
int p5[] = { 0, 0x04 };
int p6[] = { 0, 0x02 };
int nBytesPerLine = nWidth / 8;
int offset = 0;
int k = 0;
for (int i = 0; i < nHeight; i++) {
offset = i * (8 + nBytesPerLine);
data[offset + 0] = 0x1d;
data[offset + 1] = 0x76;
data[offset + 2] = 0x30;
data[offset + 3] = (unsigned char) (nMode & 0x01);
data[offset + 4] = (unsigned char) (nBytesPerLine % 0xff);
data[offset + 5] = (unsigned char) (nBytesPerLine / 0xff);
data[offset + 6] = 0x01;
data[offset + 7] = 0x00;
for (int j = 0; j < nBytesPerLine; j++) {
data[offset + 8 + j] = (unsigned char) (p0[src[k]] + p1[src[k + 1]] + p2[src[k + 2]] + p3[src[k + 3]] + p4[src[k + 4]] + p5[src[k + 5]] + p6[src[k + 6]] + src[k + 7]);
k = k + 8;
}
}
}
Thanks in advance.
Based on having fixed a very similar bug in a different project with this change, I am guessing that your image width is not divisible by 8.
This line will drop nWidth % 8 pixels on each row, causing a rightward slant if the image width is not divisible by 8.
int nBytesPerLine = nWidth / 8;
Instead, you should be padding with zeroes:
int nBytesPerLine = (nWidth + 7) / 8;
Your data variable needs to grow to match as well, it has the same issue.
Lastly, you are issuing the GS v 0 command for each row, which is not very efficient. You can issue this once for the entire image, and specify the height. From the same project, a C example is here.
Related
I'm creating a CGBitmap using the code below, but the done_callback function is never called. What am I doing wrong? The bitmap is created correctly but I have no way to free the pixel buffer afterwards. Is it because the CGBitmap is never released?
void done_callback(void *info, const void *data, size_t size)
{
free((void *)data);
NSLog(#"Done!");
}
static CGImageRef SVG_to_CGImage(char const *svg, int width, int height)
{
using namespace lunasvg;
auto doc = Document::loadFromData(svg);
if(doc.get() == nullptr) {
return nullptr;
}
auto bmp = doc->renderToBitmap(width, height);
if(!bmp.valid()) {
return nullptr;
}
UInt32 *pixels = (UInt32 *)malloc(width * height * 4);
// rearrange the BGRA to RGBA
for(int y=0; y<height; ++y) {
UInt32 *s = (UInt32 *)(bmp.data() + y * width * 4);
UInt32 *d = pixels + y * width;
for(int x=0; x<width; ++x) {
UInt32 argb = *s++;
UInt32 a = argb & 0xff000000;
UInt32 r = (argb & 0xff0000) >> 16;
UInt32 g = argb & 0xff00;
UInt32 b = (argb & 0xff) << 16;
*d++ = a | r | g | b;
}
}
CGDataProviderRef provider = CGDataProviderCreateWithData(NULL, pixels, width * height * 4, done_callback);
CGColorSpaceRef colorSpaceRef = CGColorSpaceCreateDeviceRGB();
CGBitmapInfo bitmapInfo = kCGImageAlphaPremultipliedLast;
CGColorRenderingIntent renderingIntent = kCGRenderingIntentDefault;
return CGImageCreate(width, height, 8, 32, 4 * width, colorSpaceRef, bitmapInfo, provider, NULL, NO, renderingIntent);
}
I have an existing app, written in Obj C, that works and is on the iTunes App Store. I am updating it to Swift. I am having trouble with the pixel classification and counting portion of the app. The Objective C code is below: I would appreciate any suggestions as how to proceed with the Swift 3 version.
void classifyPixel(unsigned char r, unsigned char g, unsigned char b,
NSUInteger *coverRed, NSUInteger *coverGreen, NSUInteger *coverYellow, NSUInteger *coverBlue, NSUInteger *coverWhite)
{
if ((r == 255) && (g == 0) && (b == 0))
*coverRed = *coverRed + 1;
else if ((r == 0) && (g == 255) && (b == 0))
*coverGreen = *coverGreen + 1;
else if ((r == 255) && (g == 255) && (b == 0))
*coverYellow = *coverYellow +1;
else if ((r == 0) && (g == 0) && (b== 255))
*coverBlue = *coverBlue + 1;
else if ((r == 255) & (g == 255) && (b == 255))
*coverWhite = *coverWhite +1;
}
- (IBAction)computeArea:(id)sender {
// Taken from Hugh's original code. With some slight clean up
UIImage *image = self.imageView.image;
NSUInteger coverRed = 0;
NSUInteger coverGreen = 0;
NSUInteger coverYellow = 0;
NSUInteger coverBlue = 0;
NSUInteger coverWhite = 0;
NSUInteger coverTotal = 0;
size_t image_width = image.size.width;
size_t image_height = image.size.height;
size_t bitsPerComponent = 8;
size_t bytesPerPixel = 4;
size_t bytesPerRow = bytesPerPixel*image_width;
unsigned char* raster_buffer = (unsigned char*) calloc(bitsPerComponent, bytesPerRow *image.size.height);
if(raster_buffer !=NULL)
{
CGContextRef context = CGBitmapContextCreate(
(void*) raster_buffer,
image.size.width,
image.size.height,
bitsPerComponent,
bytesPerRow,
CGImageGetColorSpace(image.CGImage),
(CGBitmapInfo)kCGImageAlphaPremultipliedLast);
if (context != NULL)
{
CGContextDrawImage(context, CGRectMake(0.0f, 0.0f, image.size.width, image.size.height), image.CGImage);
unsigned char* row_start_p = raster_buffer;
for (size_t y=0; y<image_height; y++)
{
unsigned char* nth_pixel_p = row_start_p;
for (size_t x=0; x<image_width; x++)
{
classifyPixel(nth_pixel_p[bytesPerPixel*x],
nth_pixel_p[bytesPerPixel*x+1],
nth_pixel_p[bytesPerPixel*x+2],
&coverRed,&coverGreen,&coverYellow,&coverBlue,&coverWhite);
coverTotal++;
}
row_start_p+=bytesPerRow;
}
}
}
free(raster_buffer);
float uu = coverGreen;
float vv = coverYellow;
float ww = coverRed;
float xx = coverBlue;
float yy = coverWhite;
float zz = uu + vv + ww + xx + yy;
float aa = (uu/zz)*100;
float bb = (vv/zz)*100;
float cc = (ww/zz)*100;
float dd = (xx/zz)*100;
float ee = (yy/zz)*100;
totalcount = [NSNumber numberWithFloat:zz];
greencountresult = [NSString stringWithFormat:#"%.1f%%",aa];
yellowcountresult = [NSString stringWithFormat:#"%.1f%%",bb];
redcountresult = [NSString stringWithFormat:#"%.1f%%",cc];
bluecountresult = [NSString stringWithFormat:#"%.1f%%",dd];
whitecountresult = [NSString stringWithFormat:#"%.1f%%",ee];
NSLog(#"Area compute! Red %lu Green %lu Yellow %lu Blue %lu White %lu\n",(unsigned long)coverRed,(unsigned long)coverGreen,(unsigned long)coverYellow,(unsigned long)coverBlue,(unsigned long)coverWhite);
}
My server devs ask me to send them some data encoded with base64 with this rules:
big-endian byte order
no extra zero bytes
base64 string
for example:
10005000 → «mKol»
1234567890 → «SZYC0g»
I did some spaghetti code, and it's work. But maybe somebody have more elegant solution?
+ (NSString*)encodeBigEndianBase64:(uint32_t)value {
char *bytes = (char*) &value;
int len = sizeof(uint32_t);
char *reverseBytes = malloc(sizeof(char) * len);
unsigned long index = len - 1;
for (int i = 0; i < len; i++)
reverseBytes[index--] = bytes[i];
int offset = 0;
while (reverseBytes[offset] == 0) {
offset++;
}
NSData *resultData;
if (offset > 0) {
int truncatedLen = (len - offset);
char *truncateBytes = malloc(sizeof(char) * truncatedLen);
for (int i = 0; i < truncatedLen ; i++)
truncateBytes[i] = reverseBytes[i + offset];
resultData = [NSData dataWithBytes:truncateBytes length:truncatedLen];
free(truncateBytes);
} else {
resultData = [NSData dataWithBytes:reverseBytes length:len];
}
free(reverseBytes);
return [[resultData base64EncodedStringWithOptions:NSDataBase64Encoding64CharacterLineLength] stringByReplacingOccurrencesOfString:#"=" withString:#""];
}
Little bit improved solution (thanks to zaph):
+ (NSString*)encodeBigEndianBase64:(uint32_t)value {
uint32_t swappedValue = CFSwapInt32HostToBig(value);
char *swappedBytes = (char*) &swappedValue;
int len = sizeof(uint32_t);
int offset = 0;
while (swappedBytes[offset] == 0) {
offset++;
}
NSData *resultData;
if (offset > 0) {
int truncatedLen = (len - offset);
char *truncateBytes = malloc(sizeof(char) * truncatedLen);
for (int i = 0; i < truncatedLen ; i++)
truncateBytes[i] = swappedBytes[i + offset];
resultData = [NSData dataWithBytes:truncateBytes length:truncatedLen];
free(truncateBytes);
} else {
resultData = [NSData dataWithBytes:swappedBytes length:len];
}
return [[resultData base64EncodedStringWithOptions:NSDataBase64Encoding64CharacterLineLength] stringByReplacingOccurrencesOfString:#"=" withString:#""];
}
For endian conversions use htons(), htonl(), ntohs(), ntohl()
network byte order is bigendian
`htons()` // host to network short
`htonl()` // host to network ling
`ntohs()` // network to host long
`ntohl()` // network to host long
These are defined in endan.h
Also see Byte-Order Utilities Reference
I'm trying to make an area calculation category for MKPolygon.
I found some JS code https://github.com/mapbox/geojson-area/blob/master/index.js#L1 with a link to the algorithm: http://trs-new.jpl.nasa.gov/dspace/handle/2014/40409.
It says:
Here is my code, which gave a wrong result (thousands times more than actual):
#define kEarthRadius 6378137
#implementation MKPolygon (AreaCalculation)
- (double) area {
double area = 0;
NSArray *coords = [self coordinates];
if (coords.count > 2) {
CLLocationCoordinate2D p1, p2;
for (int i = 0; i < coords.count - 1; i++) {
p1 = [coords[i] MKCoordinateValue];
p2 = [coords[i + 1] MKCoordinateValue];
area += degreesToRadians(p2.longitude - p1.longitude) * (2 + sinf(degreesToRadians(p1.latitude)) + sinf(degreesToRadians(p2.latitude)));
}
area = area * kEarthRadius * kEarthRadius / 2;
}
return area;
}
- (NSArray *)coordinates {
NSMutableArray *points = [NSMutableArray arrayWithCapacity:self.pointCount];
for (int i = 0; i < self.pointCount; i++) {
MKMapPoint *point = &self.points[i];
[points addObject:[NSValue valueWithMKCoordinate:MKCoordinateForMapPoint(* point)]];
}
return points.copy;
}
double degreesToRadians(double radius) {
return radius * M_PI / 180;
}
#end
What did I miss?
The whole algorithm implemented in Swift 3.0 :
import MapKit
let kEarthRadius = 6378137.0
// CLLocationCoordinate2D uses degrees but we need radians
func radians(degrees: Double) -> Double {
return degrees * M_PI / 180;
}
func regionArea(locations: [CLLocationCoordinate2D]) -> Double {
guard locations.count > 2 else { return 0 }
var area = 0.0
for i in 0..<locations.count {
let p1 = locations[i > 0 ? i - 1 : locations.count - 1]
let p2 = locations[i]
area += radians(degrees: p2.longitude - p1.longitude) * (2 + sin(radians(degrees: p1.latitude)) + sin(radians(degrees: p2.latitude)) )
}
area = -(area * kEarthRadius * kEarthRadius / 2);
return max(area, -area) // In order not to worry about is polygon clockwise or counterclockwise defined.
}
The final step for i = N-1 and i+1 = 0 (wrap around) is missing in your loop.
This may help to someone...
You need to pass the shape edge points into below method and it returns the correct area of a polygon
static double areaOfCurveWithPoints(const NSArray *shapeEdgePoints) {
CGPoint initialPoint = [shapeEdgePoints.firstObject CGPointValue];
CGMutablePathRef cgPath = CGPathCreateMutable();
CGPathMoveToPoint(cgPath, &CGAffineTransformIdentity, initialPoint.x, initialPoint.y);
for (int i = 1;i<shapeEdgePoints.count ;i++) {
CGPoint point = [[shapeEdgePoints objectAtIndex:i] CGPointValue];
CGPathAddLineToPoint(cgPath, &CGAffineTransformIdentity, point.x, point.y);
}
CGPathCloseSubpath(cgPath);
CGRect frame = integralFrameForPath(cgPath);
size_t bytesPerRow = bytesPerRowForWidth(frame.size.width);
CGContextRef gc = createBitmapContextWithFrame(frame, bytesPerRow);
CGContextSetFillColorWithColor(gc, [UIColor whiteColor].CGColor);
CGContextAddPath(gc, cgPath);
CGContextFillPath(gc);
double area = areaFilledInBitmapContext(gc);
CGPathRelease(cgPath);
CGContextRelease(gc);
return area;
}
static CGRect integralFrameForPath(CGPathRef path) {
CGRect frame = CGPathGetBoundingBox(path);
return CGRectIntegral(frame);
}
static size_t bytesPerRowForWidth(CGFloat width) {
static const size_t kFactor = 64;
// Round up to a multiple of kFactor, which must be a power of 2.
return ((size_t)width + (kFactor - 1)) & ~(kFactor - 1);
}
static CGContextRef createBitmapContextWithFrame(CGRect frame, size_t bytesPerRow) {
CGColorSpaceRef grayscale = CGColorSpaceCreateDeviceGray();
CGContextRef gc = CGBitmapContextCreate(NULL, frame.size.width, frame.size.height, 8, bytesPerRow, grayscale, kCGImageAlphaNone);
CGColorSpaceRelease(grayscale);
CGContextTranslateCTM(gc, -frame.origin.x, -frame.origin.x);
return gc;
}
static double areaFilledInBitmapContext(CGContextRef gc) {
size_t width = CGBitmapContextGetWidth(gc);
size_t height = CGBitmapContextGetHeight(gc);
size_t stride = CGBitmapContextGetBytesPerRow(gc);
// Get a pointer to the data
unsigned char *bitmapData = (unsigned char *)CGBitmapContextGetData(gc);
uint64_t coverage = 0;
for (size_t y = 0; y < height; ++y) {
for (size_t x = 0; x < width; ++x) {
coverage += bitmapData[y * stride + x];
}
}
// NSLog(#"coverage =%llu UINT8_MAX =%d",coverage,UINT8_MAX);
return (double)coverage / UINT8_MAX;
}
Once that I can't get successfully anything different of YCbCr 420 from the camera (https://stackoverflow.com/questions/19673770/objective-c-avcapturevideodataoutput-videosettings-how-to-identify-which-pix)
So, my goal is to display this CMSampleBufferRef (YCbCr 420), after process with opencv, as a colored frame (CGImageRef, using RGB color model) in the CALayer.
In the camera capture file I put this:
#define clamp(a) (a>255?255:(a<0?0:a));
cv::Mat* YUV2RGB(cv::Mat *src){
cv::Mat *output = new cv::Mat(src->rows, src->cols, CV_8UC4);
for(int i=0;i<output->rows;i++)
for(int j=0;j<output->cols;j++){
// from Wikipedia
int c = src->data[i*src->cols*src->channels() + j*src->channels() + 0] - 16;
int d = src->data[i*src->cols*src->channels() + j*src->channels() + 1] - 128;
int e = src->data[i*src->cols*src->channels() + j*src->channels() + 2] - 128;
output->data[i*src->cols*src->channels() + j*src->channels() + 0] = clamp((298*c+409*e+128)>>8);
output->data[i*src->cols*src->channels() + j*src->channels() + 1] = clamp((298*c-100*d-208*e+128)>>8);
output->data[i*src->cols*src->channels() + j*src->channels() + 2] = clamp((298*c+516*d+128)>>8);
}
return output;
}
-(void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection{
CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
CVPixelBufferLockBaseAddress(imageBuffer, 0);
size_t width = CVPixelBufferGetWidth(imageBuffer);
size_t height = CVPixelBufferGetHeight(imageBuffer);
uint8_t *baseAddress = (uint8_t*)CVPixelBufferGetBaseAddress(imageBuffer);
CVPlanarPixelBufferInfo_YCbCrBiPlanar *bufferInfo = (CVPlanarPixelBufferInfo_YCbCrBiPlanar *)baseAddress;
NSUInteger yOffset = EndianU32_BtoN(bufferInfo->componentInfoY.offset);
NSUInteger yPitch = EndianU32_BtoN(bufferInfo->componentInfoY.rowBytes);
NSUInteger cbCrOffset = EndianU32_BtoN(bufferInfo->componentInfoCbCr.offset);
NSUInteger cbCrPitch = EndianU32_BtoN(bufferInfo->componentInfoCbCr.rowBytes);
uint8_t *yBuffer = baseAddress + yOffset;
uint8_t *cbCrBuffer = baseAddress + cbCrOffset;
cv::Mat *src = new cv::Mat((int)(height), (int)(width), CV_8UC4);
//YUV -> cv::Mat
for(int i = 0; i< height; i++)
{
uint8_t *yBufferLine = &yBuffer[i * yPitch];
uint8_t *cbCrBufferLine = &cbCrBuffer[(i >> 1) * cbCrPitch];
for(int j = 0; j < width; j++)
{
uint8_t y = yBufferLine[j];
uint8_t cb = cbCrBufferLine[j & ~1];
uint8_t cr = cbCrBufferLine[j | 1];
src->data[i*width*src->channels() + j*src->channels() + 0] = y;
src->data[i*width*src->channels() + j*src->channels() + 1] = cb;
src->data[i*width*src->channels() + j*src->channels() + 2] = cr;
}
}
cv::Mat *output = YUV2RGB(src);
CGColorSpaceRef grayColorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef context = CGBitmapContextCreate(output->data, output->cols, output->rows, 8, output->step, grayColorSpace, kCGImageAlphaNoneSkipLast);
CGImageRef dstImage = CGBitmapContextCreateImage(context);
dispatch_sync(dispatch_get_main_queue(), ^{
customPreviewLayer.contents = (__bridge id)dstImage;
});
CGImageRelease(dstImage);
CGContextRelease(context);
CGColorSpaceRelease(grayColorSpace);
output->release();
src->release();
}
I got some issues (I was really stuck) trying to get to this point, I will describe here. It might be someone else's problem as well:
That clamp function is essential to ensure the correct conversion YUV->RGB
For some reason I couldn't just keep 3 channels on the image data, it somehow was causing a problem when it was about to display the image. So I changed kCGImageAlphaNone to kCGImageAlphaNoneSkipLast in the CGBitmapContextCreate. Also, I used 4 channels in the cv::Mat constructor.
Part of this code I adapted from kCVPixelFormatType_420YpCbCr8BiPlanarFullRange frame to UIImage conversion