Remove curled corner from qlgenerator thumbnail - objective-c

How do I remove the curled icon a thumbnail create in a quicklook plugin?
Screenshot of current icon:
Screenshot of what I want:
GeneratePreviewForURL.m:
#include <CoreFoundation/CoreFoundation.h>
#include <CoreServices/CoreServices.h>
#include <QuickLook/QuickLook.h>
#import "GenerateIcon.h"
OSStatus GeneratePreviewForURL(void *thisInterface, QLPreviewRequestRef preview, CFURLRef url, CFStringRef contentTypeUTI, CFDictionaryRef options);
void CancelPreviewGeneration(void *thisInterface, QLPreviewRequestRef preview);
/* -----------------------------------------------------------------------------
Generate a preview for file
This function's job is to create preview for designated file
----------------------------------------------------------------------------- */
OSStatus GeneratePreviewForURL(void *thisInterface, QLPreviewRequestRef preview, CFURLRef url, CFStringRef contentTypeUTI, CFDictionaryRef options)
{
// To complete your generator please implement the function GeneratePreviewForURL in GeneratePreviewForURL.c
[GenerateIcon generatePreviewWithRef:preview URL:url];
return noErr;
}
void CancelPreviewGeneration(void *thisInterface, QLPreviewRequestRef preview)
{
// Implement only if supported
}
GenerateIcon.m:
//
// GenerateIcon.m
// Windows Binary Icon
//
// Created by Asger Hautop Drewsen on 2/5/12.
// Copyright (c) 2012 Asger Drewsen. All rights reserved.
//
#import "GenerateIcon.h"
#implementation GenerateIcon
+(void) generateThumbnailWithRef:(QLThumbnailRequestRef)requestRef URL:(CFURLRef)url
{
[GenerateIcon generateMultiWithThumbnailRef:requestRef PreviewRef:nil URL:url];
}
+(void) generatePreviewWithRef:(QLPreviewRequestRef)requestRef URL:(CFURLRef)url
{
[GenerateIcon generateMultiWithThumbnailRef:nil PreviewRef:requestRef URL:url];
}
+(void) generateMultiWithThumbnailRef:(QLThumbnailRequestRef)thumbnail PreviewRef:(QLPreviewRequestRef)preview URL:(CFURLRef)url
{
#autoreleasepool {
NSString * tempDir = NSTemporaryDirectory();
if (tempDir == nil)
tempDir = #"/tmp";
NSFileManager *fileManager = [[NSFileManager alloc] init];
NSString *directory = [tempDir stringByAppendingFormat: [NSString stringWithFormat:#"%#-%.0f", #"exe-icons", [NSDate timeIntervalSinceReferenceDate] * 1000.0]];
//NSString *directory = [tempDir stringByAppendingPathComponent:#"com.tyilo.exe-icons"];
/*for (NSString *file in [fileManager contentsOfDirectoryAtPath:directory error:nil])
{
[fileManager removeItemAtPath:file error:nil];
}*/
[fileManager createDirectoryAtPath:directory withIntermediateDirectories:YES attributes:nil error:nil];
[[NSTask launchedTaskWithLaunchPath:#"/usr/local/bin/wrestool" arguments:[NSArray arrayWithObjects:
#"-t",
#"group_icon",
#"-o",
directory,
#"-x",
[(__bridge NSURL *)url path],
nil]] waitUntilExit];
NSArray *icons = [fileManager contentsOfDirectoryAtPath:directory error:nil];
if (icons.count > 0)
{
NSImage *image = [[NSImage alloc] initWithContentsOfFile:[directory stringByAppendingPathComponent: [icons objectAtIndex:0]]];
NSData *thumbnailData = [image TIFFRepresentation];
CGSize size = image.size;
NSDictionary *properties = [NSDictionary dictionaryWithObjectsAndKeys:
[NSNumber numberWithInt:size.width],kQLPreviewPropertyWidthKey,
[NSNumber numberWithInt:size.height],kQLPreviewPropertyHeightKey,
nil];
CGContextRef CGContext;
if (thumbnail)
{
CGContext = QLThumbnailRequestCreateContext(thumbnail, size, TRUE, (__bridge CFDictionaryRef)properties);
}
else
{
CGContext = QLPreviewRequestCreateContext(preview, size, TRUE, (__bridge CFDictionaryRef)properties);
}
if(CGContext) {
NSGraphicsContext* context = [NSGraphicsContext graphicsContextWithGraphicsPort:(void *)CGContext flipped:size.width > size.height];
if(context) {
//These two lines of code are just good safe programming…
[NSGraphicsContext saveGraphicsState];
[NSGraphicsContext setCurrentContext:context];
NSBitmapImageRep *thumbnailBitmap = [NSBitmapImageRep imageRepWithData:thumbnailData];
[thumbnailBitmap draw];
//This line sets the context back to what it was when we're done
[NSGraphicsContext restoreGraphicsState];
}
// When we are done with our drawing code QLThumbnailRequestFlushContext() is called to flush the context
if (thumbnail)
{
QLThumbnailRequestFlushContext(thumbnail, CGContext);
}
else
{
QLPreviewRequestFlushContext(preview, CGContext);
}
// Release the CGContext
CFRelease(CGContext);
}
/*NSLog(#"%#", [directory stringByAppendingPathComponent: [icons objectAtIndex:0]]);
CGImageRef image = (__bridge CGImageRef) [[NSImage alloc] initByReferencingFile:[directory stringByAppendingPathComponent: [icons objectAtIndex:0]]];
QLThumbnailRequestSetImage(thumbnail, image, properties);*/
}
else
{
NSLog(#"Failed to generate thumbnail!");
}
}
}
#end
Edit: Added screenshots.

You need to add the undocumented "IconFlavor" key to the properties dictionary that you supply to QLThumbnailRequestCreateContext() or QLThumbnailRequestSetXXX(), and give it the value 1 for minimal decoration.
See here for an example. At the top of that file are some other values I've discovered for "IconFlavour".

The aspect of your icons is automatically chosen by Quick Look and there is no public way to customize that. What is your type conformance tree?
For more information on UTIs see Uniform Type Identifiers Overview. Note that your type conformance tree won't necessarily translate to what you want from Quick Look but at least you will have a sane starting point.

Related

Xcode Objective-C: Download a custom font and display it in a UILabel

First of all: Is it possible to download a custom font and display it in a UILabel?
I'm using this code to download a custom font from internet and display it in a UILabel but it does not work.
#import "ViewController.h"
#interface ViewController ()
#end
#implementation ViewController
#synthesize txt_UserName;
- (void)viewDidLoad {
[super viewDidLoad];
// 1
NSString *dataUrl = #"https://dl.dafont.com/dl/?f=stars_fighters";
NSURL *url = [NSURL URLWithString:dataUrl];
// 2
NSURLSessionDataTask *downloadTask = [[NSURLSession sharedSession] dataTaskWithURL:url completionHandler:^(NSData *data, NSURLResponse *response, NSError *error) {
// 4: Handle response here
if (error == nil) {
NSLog(#"no error!");
if (data != nil) {
NSLog(#"There is data!");
[self loadFont:data];
}
} else {
NSLog(#"%#", error.localizedDescription);
}
}];
// 3
[downloadTask resume];
}
- (void)loadFont:(NSData *)data
{
NSData *inData = data;
CFErrorRef error;
CGDataProviderRef provider = CGDataProviderCreateWithCFData((CFDataRef)data);
CGFontRef font = CGFontCreateWithDataProvider(provider);
if(!CTFontManagerRegisterGraphicsFont(font, &error)){
CFStringRef errorDescription = CFErrorCopyDescription(error);
NSLog(#"Failed to load font: %#", errorDescription);
// CFRelease(errorDescription);
}
// CFRelease(font);
// CFRelease(provider);
CTFontRef ctFont = CTFontCreateWithGraphicsFont(font, 30, NULL, NULL);
UIFont *uiFont = CFBridgingRelease(ctFont);
dispatch_async(dispatch_get_main_queue(), ^{
[self.txt_UserName setFont:uiFont];
});
[self fontTest];
}
- (void)fontTest
{
NSArray *fontFamilies = [UIFont familyNames];
for (int i = 0; i < [fontFamilies count]; i++) {
NSString *fontFamily = [fontFamilies objectAtIndex:i];
NSArray *fontNames = [UIFont fontNamesForFamilyName:[fontFamilies objectAtIndex:i]];
NSLog (#"%#: %#", fontFamily, fontNames);
}
}
#end
Anyway I'm getting this error: Failed to load font: The operation couldn’t be completed. Invalid argument
At the place of the custom font I get a default iOS font.
Of course it's possible to download a custom font and display it in a UILabel,here is my code that works well:
step 1: Assuming the font file has been saved in the sandbox path Documents / StarsFighters.ttf, load the font and get font name(in YOUR_CLASS .m file):
+ (NSString *)loadFontAndReturnFontName {
NSString *imgFilePath = [NSString stringWithFormat:#"%#/Documents/StarsFighters.ttf",NSHomeDirectory()];
NSURL *fontUrl = [NSURL fileURLWithPath:imgFilePath];
CGDataProviderRef fontDataProvider = CGDataProviderCreateWithURL((__bridge CFURLRef)fontUrl);
CGFontRef fontRef = CGFontCreateWithDataProvider(fontDataProvider);
CGDataProviderRelease(fontDataProvider);
CTFontManagerRegisterGraphicsFont(fontRef, NULL);
NSString *fontName = CFBridgingRelease(CGFontCopyPostScriptName(fontRef));
return fontName;
}
step2: use custom font
NSString *fontname = [YOUR_CLASS loadFontAndReturnFontName];
//fontname is #"StarsFighters",best set it to a global variable or save it
YOUR_LABEL.font = [UIFont fontWithName:fontname size:18.0];

Access property inside a block objective c

- (IBAction)saveBtn:(id)sender {
UIImage* imageToSave = [self imageByCombiningImage:self.backgroundImage.image withImage:self.tempImage.image];
ALAssetsLibrary *library = [[ALAssetsLibrary alloc] init];
// Request to save the image to camera roll
[library writeImageToSavedPhotosAlbum:[imageToSave CGImage] orientation:(ALAssetOrientation)[imageToSave imageOrientation] completionBlock:^(NSURL *assetURL, NSError *error){
if (error) {
NSLog(#"error");
} else {
CGFloat compression = 0.0;
NSData *imageData = UIImageJPEGRepresentation(imageToSave, compression);
UIImage *compressedImage = [[UIImage alloc]initWithData:imageData];
NSMutableString *imageName = [[NSMutableString alloc] initWithCapacity:0];
CFUUIDRef theUUID = CFUUIDCreate(kCFAllocatorDefault);
if (theUUID) {
[imageName appendString:CFBridgingRelease(CFUUIDCreateString(kCFAllocatorDefault, theUUID))];
CFRelease(theUUID);
}
[imageName appendString:#".png"];
NSLog(#"Image name: %#", imageName);
//Image Data to web service
[self uploadImage:UIImageJPEGRepresentation(compressedImage, 1.0) filename:imageName];
_savedImageURL = assetURL;
[library assetForURL:_savedImageURL
resultBlock:resultblock
failureBlock:failureblock];
}];
}
ALAssetsLibraryAssetForURLResultBlock resultblock = ^(ALAsset *myasset)
{
ALAssetRepresentation *rep = [myasset defaultRepresentation];
CGImageRef iref = [rep fullResolutionImage];
if (iref) {
UIImage *largeimage = [UIImage imageWithCGImage:iref];
//image Property needs to be access here
}
};
I cannot access any property inside this block. Like I have made a UIImage property in the .h file but I cannot accessed that in that result block.
If the code you've posted is really the one you're using, then it's defining a variable which references a block. But that variable is not part of your class and you thus cannot reference any instance variables.
Instead, the variable is on the "C top-level", outside any class. You might want to turn it into an instance variable, for example like this:
#interface MyClass : SomeSuperClass
{
// Define an instance variable.
ALAssetsLibraryAssetForURLResultBlock resultblock;
}
#end
#implementation MyClass
- (void)someMethod
{
// Initialize the variable in `init`, `viewDidLoad` or whichever
// method suits you.
resultblock = ^(ALAsset *myasset)
{
ALAssetRepresentation *rep = [myasset defaultRepresentation];
CGImageRef iref = [rep fullResolutionImage];
if (iref) {
UIImage *largeimage = [UIImage imageWithCGImage:iref];
//image Property needs to be access here
}
};
}
#end
You could also use a property instead.
Next solution is you can create block property from whole controller like this:
__block MyViewController *blocksafeSelf = self;
and inside block you can use this property to access:
blocksafeSelf.myProperty;
or call method:
dispatch_async(dispatch_get_main_queue(), ^
[blocksafeSelf processNewMessage:text]
});

Getting frames though a stream and display on screen

I have a requirement of streaming from server and displaying the streamed content on the screen...Streaming is working fine using NSStream, and NSInputStream and NSOutputStream.How can I display it on the screen?
Stream used looks like #"http://191.168.143.41:1212/;
if(stream == inputStream) {
uint8_t buf[1024];
unsigned int len = 0;
len = [inputStream read:buf maxLength:1024];
if(len > 0) {
NSMutableData* datas=[[NSMutableData alloc] initWithLength:0];
[datas appendBytes: (const void *)buf length:len];
NSString *s = [[NSString alloc] initWithData:datas encoding:NSASCIIStringEncoding];
[self readIn:s];
NSLog(#"ss%#",s);
[self loadMovie:s]; //method for movie player
}
I tried to display this is in a movieplayer as below..
-(void_loadMovie:(NSString*)moviePrefix
{
NSString *path = [NSString stringWithFormat:#"%#.mjpg", moviePrefix];
NSURL *url = [NSURL fileURLWithPath:path];
if (url) {
_moviePlayer = [[MPMoviePlayerController alloc] initWithContentURL:url];
_moviePlayer.view.frame = CGRectMake(0, 70, 600, 450);
_moviePlayer.controlStyle = MPMovieControlStyleNone;
_moviePlayer.scalingMode = MPMovieScalingModeNone;
[dic setObject:__moviePlayer forKey:path];
}
}
[_moviePlayer prepareToPlay];
[self.view addSubview: _moviePlayer.view];
[self.view bringSubviewToFront:_moviePlayer.view];
[self.view addSubview: _moviePlayer.view];
[_moviePlayer play];
}
Is NSString *path = [NSString stringWithFormat:#"%#.mjpg", moviePrefix]; correct way??
This displays a black screen.What is wrong?
If this way is not correct,Is there any other way I can display those frames?
Can anyone help me to solve this...
MJPEG are only JPEG sent one after the other.
I worked a few years ago on this.
On a version of iOS (iOS5?), it was easily read with a UIWebView, but an update of iOS broke all this. This broke all my current work.
Maybe a UIWebView could do the trick today again (fix).
Anyway, since it's just bunch of JPEG, you could just read the JPG (detect start/end of JPG file), create the JPG image and show it in a UIImageView.
A work around (not tested), but you should get the whole idea:
//Properties
#property (nonatomic, strong) NSMutableData *data;
#property (nonatomic, weak) IBOutlet UIImageView *streamImageView;
//Initialize somewhere
_data = [[NSMutableData alloc] init];
//In the stream delegate method:
//Start JPG: FFD8 — End JPG: FFD9
UInt8 startJPEGBytes[2];
startJPEGBytes[0] = 0xFF;
startJPEGBytes[1] = 0xD8;
NSData *startData = [NSData dataWithBytes:&startJPEGBytes length:2];
UInt8 endJPEGBytes[2];
endJPEGBytes[0] = 0xFF;
endJPEGBytes[0] = 0xD9;
NSData *endData = [NSData dataWithBytes:&endJPEGBytes length:2];
[_data appendBytes: (const void *)buf length:len];
NSRange startRange = [_data rangeOfData:startData options:0 range:NSMakeRange(0, [_data length])];
if (startRange.location != NSNotFound) //We found the start of a JPEG
{
NSRange endRange = [_data rangeOfData:endData options:0 range:NSMakeRange(startRange.location, [_data length]-startRange.location)];
if (endRange.location != NSNotFound) //We found the end of a JPEG
{
NSRange imageRange = NSMakeRange(startRange.location, endRange.location+endRange.length-startRange.location);
NSData *imageData = [_data subDataWithRange: imageRange];
streamImage = [UIImage imageWithData:imageData];
[_streamImageView setImage:streamImage];
[_data replaceBytesInRange:NSMakeRange(0, imageRange.location+imageRange.length withBytes:NULL length:0]; //We remove the start till the end of JPEG frame. Start at 0, since there could be garbage at the start.
}
}
You are not adding moviePrefix to the string
NSString *path = [NSString stringWithFormat:#".mjpg", moviePrefix, #"movie"];
Change it to
NSString *path = [NSString stringWithFormat:#"%#.mjpg", moviePrefix, #"movie"];
https://github.com/horsson/mjpeg-iphone/tree/55251a85e2c2489014036ddf5a491783f9b1962d
Used this to get the stream and display.It works

How Can I Save This Array of Images?

I'm very new to programming, and I jumped right into a project (I know thats not the smartest thing to do, but I'm learning as I go). The app that I'm writing has 10 UIImageViews that display a picture from the users camera roll. The code I'm using needs each of the UIImageViews to have tags. I'm currently using NSData to save the array images, and it works great, but I can't use this method anymore because NSData doesn't support the use of tags. I also can't use NSUserDefaults, because I can't save images to a plist. Here is how I'm attempting to do this (using the NSData method, which works but I have to edit this so that my tags work.)
This is my current code:
- (void)imagePickerController:(UIImagePickerController *)picker didFinishPickingImage:(UIImage *)img editingInfo:(NSDictionary *)editInfo {
if (imageView.image == nil) {
imageView.image = img;
[self.array addObject:imageView.image];
[picker dismissModalViewControllerAnimated:YES];
[self.popover dismissPopoverAnimated:YES];
return;
}
if (imageView2.image == nil) {
imageView2.image = img;
NSLog(#"The image is a %#", imageView);
[self.array addObject:imageView2.image];
[picker dismissModalViewControllerAnimated:YES];
[self.popover dismissPopoverAnimated:YES];
return;
}
...
- (void)applicationDidEnterBackground:(UIApplication*)application {
NSLog(#"Image on didenterbackground: %#", imageView);
[self.array addObject:imageView.image];
[self.array addObject:imageView2.image];
[self.user setObject:self.array forKey:#"images"];
[user synchronize];
}
- (void)viewDidLoad
{
self.user = [NSUserDefaults standardUserDefaults];
NSLog(#"It is %#", self.user);
self.array = [[self.user objectForKey:#"images"]mutableCopy];
imageView.image = [[self.array objectAtIndex:0] copy];
imageView2.image = [[self.array objectAtIndex:1] copy];
UIApplication *app = [UIApplication sharedApplication];
[[NSNotificationCenter defaultCenter] addObserver:self
selector:#selector(applicationDidEnterBackground:)
name:UIApplicationDidEnterBackgroundNotification
object:app];
[super viewDidLoad];
}
Any help or suggestions on how to edit this code so that I can save the images, while using tags is much appreciated, thanks!
EDIT: Here is my updated code:
-(IBAction)saveButtonPressed:(id)sender {
NSString *docsDir = [NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask,YES) objectAtIndex:0];
for (UIImageView *imageView in self.array) {
NSInteger tag = self.imageView.tag;
UIImage *image = self.imageView.image;
NSString *imageName = [NSString stringWithFormat:#"Image%i.png",tag];
NSString *imagePath = [docsDir stringByAppendingPathComponent:imageName];
[UIImagePNGRepresentation(image) writeToFile:imagePath atomically:YES];
}
NSLog(#"Saved Button Pressed");
}
- (void)applicationDidEnterBackground:(UIApplication*)application {
}
-(void)viewDidLoad {
NSString *docsDir = [NSSearchPathForDirectoriesInDomains(NSDocumentDirectory,NSUserDomainMask,YES) objectAtIndex:0];
NSArray *docFiles = [[NSFileManager defaultManager]contentsOfDirectoryAtPath:docsDir error:NULL];
for (NSString *fileName in docFiles) {
if ([fileName hasSuffix:#".png"]) {
NSString *fullPath = [docsDir stringByAppendingPathComponent:fileName];
UIImage *loadedImage = [UIImage imageWithContentsOfFile:fullPath];
if (!imageView.image) {
imageView.image = loadedImage;
} else {
imageView2.image = loadedImage;
}
}
}
}
You need to use "Fast Enumeration" to parse the array's objects, and write each object to disk sequentially. First, you're going to need to add the UIImageView objects to the array instead of the UIImage property of the UIImageView, so you can recover the tag. So instead of writing
[self.array addObject:imageView.image];
It will be
[self.array addObject:imageView];
Try to follow along with my code. I inserted comments on each line to help.
-(void)applicationDidEnterBackground:(UIApplication *)application {
//Obtain the documents directory
NSString *docsDir = [NSSearchPathForDirectoriesInDomains(NSDocumentDirectory,NSUserDomainmask,YES) objectAtIndex:0];
//begin fast enumeration
//this is special to ObjC: it will iterate over any array one object at a time
//it's easier than using for (i=0;i<array.count;i++)
for (UIImageView *imageView in self.array) {
//get the imageView's tag to append to the filename
NSInteger tag = imageView.tag;
//get the image from the imageView;
UIImage *image = imageView.image;
//create a filename, in this case "ImageTAGNUM.png"
NSString *imageName = [NSString stringWithFormat:#"Image%i.png",tag];
//concatenate the docsDirectory and the filename
NSString *imagePath = [docsDir stringByAppendingPathComponent:imageName];
[UIImagePNGRepresentation(image) writeToFile:imagePath atomically:YES];
}
}
To load the images from disk, you'll have to look at your viewDidLoad method
-(void)viewDidLoad {
//get the contents of the docs directory
NSString *docsDir = [NSSearchPathForDirectoriesInDomains(NSDocumentDirectory,NSUserDomainmask,YES) objectAtIndex:0];
//Get the list of files from the file manager
NSArray *docFiles = [[NSFileManager defaultManager]contentsOfDirectoryAtPath:docsDir error:NULL]);
//use fast enumeration to iterate the list of files searching for .png extensions and load those
for (NSString *fileName in docFiles) {
//check to see if the file is a .png file
if ([fileName hasSuffix:#".png"]) {
NSString *fullPath = [docsDir stringByAppendingPathComponent:fileName];
UIImage *loadedImage = [UIImage imageWithContentsOfFile:fullPath];
//you'll have to sort out how to put these images in their proper place
if (!imageView1.image) {
imageView1.image = loadedImage;
} else {
imageView2.image = loadedImage;
}
}
}
}
Hope this helps
One thing you need to be aware of is that when an app enters the background it has about 5 seconds to clean up its act before it's suspended. The UIPNGRepresentation() function takes a significant amount of time and is not instantaneous. You should be aware of this. It would probably be better to write some of this code in other places and do it earlier than at app backgrounding. FWIW
You can use the [NSbundle Mainbundel] to store that images.
To get path
NSArray *paths = NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES);
NSString *documentsDirectory = [paths objectAtIndex:0];
First, there's still a problem in your for loop.
for (UIImageView *imageView in self.array) {
NSInteger tag = self.imageView.tag;
UIImage *image = self.imageView.image;
// ...
}
Before you make any other changes, you must understand why. imageView is your for loop control variable, which changes on each iteration through the loop. self.imageView is a different thing. It is the first of the 10 imageViews attached to your viewController. Every time this loop cycles, it looks at the first imageView, and only the first.
As for why saving doesn't work, it's probably because the arrays elsewhere aren't working. Add some logging to make sure there's something in the array, and that it has as many elements as you expect.
-(IBAction)saveButtonPressed:(id)sender {
NSString *docsDir = [NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask,YES) objectAtIndex:0];
// Log to make sure the views expected have previously been stored.
// If the array is empty, or shorter than expected, the problem is elsewhere.
NSLog(#"Image view array before saving = %#", self.array);
for (UIImageView *imageViewToSave in self.array) {
NSInteger tag = imageViewToSave.tag;
UIImage *image = imageViewToSave.image;
NSString *imageName = [NSString stringWithFormat:#"Image%i.png",tag];
NSString *imagePath = [docsDir stringByAppendingPathComponent:imageName];
// Log the image and path being saved. If either of these are nil, nothing will be written.
NSLog(#"Saving %# to %#", image, imagePath);
[UIImagePNGRepresentation(image) writeToFile:imagePath atomically:NO];
}
NSLog(#"Save Button Pressed");
}

Processing images using Leptonica in an Xcode project

In Xcode, I am trying to pre process an image prior to sending it to OCR'ing. The OCR engine, Tesseract, handles images based on the Leptonica library.
As an example:
The Leptonica feature pixConvertTo8("image.tif")... is there a way to "transfer" the image raw data from UIImage -> PIX (see pix.h from the leptonica library) -> perform the pixConvertTo8() and back from PIX -> UImage - and this preferably without saving it to a file for transition - all in memory.
- (void) processImage:(UIImage *) uiImage
{
NSAutoreleasePool * pool = [[NSAutoreleasePool alloc] init];
// preprocess UIImage here with fx: pixConvertTo8();
CGSize imageSize = [uiImage size];
int bytes_per_line = (int)CGImageGetBytesPerRow([uiImage CGImage]);
int bytes_per_pixel = (int)CGImageGetBitsPerPixel([uiImage CGImage]) / 8.0;
CFDataRef data = CGDataProviderCopyData(CGImageGetDataProvider([uiImage CGImage]));
const UInt8 *imageData = CFDataGetBytePtr(data);
// this could take a while.
char* text = tess->TesseractRect(imageData,
bytes_per_pixel,
bytes_per_line,
0, 0,
imageSize.width, imageSize.height);
these two functions will do the trick....
- (void) startTesseract
{
//code from http://robertcarlsen.net/2009/12/06/ocr-on-iphone-demo-1043
NSString *dataPath =
[[self applicationDocumentsDirectory]stringByAppendingPathComponent:#"tessdata"];
/*
Set up the data in the docs dir
want to copy the data to the documents folder if it doesn't already exist
*/
NSFileManager *fileManager = [NSFileManager defaultManager];
// If the expected store doesn't exist, copy the default store.
if (![fileManager fileExistsAtPath:dataPath]) {
// get the path to the app bundle (with the tessdata dir)
NSString *bundlePath = [[NSBundle mainBundle] bundlePath];
NSString *tessdataPath = [bundlePath stringByAppendingPathComponent:#"tessdata"];
if (tessdataPath) {
[fileManager copyItemAtPath:tessdataPath toPath:dataPath error:NULL];
}
}
NSString *dataPathWithSlash = [[self applicationDocumentsDirectory] stringByAppendingString:#"/"];
setenv("TESSDATA_PREFIX", [dataPathWithSlash UTF8String], 1);
// init the tesseract engine.
tess = new tesseract::TessBaseAPI();
tess->Init([dataPath cStringUsingEncoding:NSUTF8StringEncoding], "eng");
}
- (NSString *) ocrImage: (UIImage *) uiImage
{
//code from http://robertcarlsen.net/2009/12/06/ocr-on-iphone-demo-1043
CGSize imageSize = [uiImage size];
double bytes_per_line = CGImageGetBytesPerRow([uiImage CGImage]);
double bytes_per_pixel = CGImageGetBitsPerPixel([uiImage CGImage]) / 8.0;
CFDataRef data = CGDataProviderCopyData(CGImageGetDataProvider([uiImage CGImage]));
const UInt8 *imageData = CFDataGetBytePtr(data);
imageThresholder = new tesseract::ImageThresholder();
imageThresholder->SetImage(imageData,(int) imageSize.width,(int) imageSize.height,(int)bytes_per_pixel,(int)bytes_per_line);
// this could take a while. maybe needs to happen asynchronously.
tess->SetImage(imageThresholder->GetPixRect());
char* text = tess->GetUTF8Text();
// Do something useful with the text!
NSLog(#"Converted text: %#",[NSString stringWithCString:text encoding:NSUTF8StringEncoding]);
return [NSString stringWithCString:text encoding:NSUTF8StringEncoding]
}
You will have to declare both tess and imageThresholder in the .h file
tesseract::TestBaseApi *tess;
tesseract::ImageThresholder *imageThresholder;
I've found some good code snippets in the Tesseract OCR engine about how to do this. Noticeably in class ImageThresholder inside thresholder.cpp - see link below. I didn't test it yet but here is some short description:
the interesting part for me is the else block wherein the depth is 32. here the
pixCreate()
pixGetdata()
pixgetwpl() do the acctual work.
The thresholder.cpp from the tesseract engine uses the above mentioned method