I am trying to complete an app for a university project that will allow the user to take a photo which contains a QR code and some specific colours that could change over time. My app should be able to detect the QR code and changes in the colours. I am trying to use react native and openCV to complete this task concentrating on IOS first (I had hoped it would be easier). So far, using the openCV tutorial by brainhubeu/react-native-opencv-tutorial on GitHub, I am to take a picture, check for blur, but when it comes to detecting and decoding the QR code, its doesn't seem to be locating any QR in the image.
#import "RNOpenCVLibrary.h"
#import <React/RCTLog.h>
#implementation RNOpenCVLibrary
- (dispatch_queue_t)methodQueue
{
return dispatch_get_main_queue();
}
RCT_EXPORT_MODULE()
RCT_EXPORT_METHOD(addEvent:(NSString *)name location:(NSString *)location)
{
RCTLogInfo(#"Pretending to create an event %# at %#", name, location);
};
RCT_EXPORT_METHOD(checkForBlurryImage:(NSString *)imageAsBase64 callback:(RCTResponseSenderBlock)callback) {
RCTLog(#"%#", imageAsBase64);
UIImage* image = [self decodeBase64ToImage:imageAsBase64];
BOOL isImageBlurryResult = [self isImageBlurry:image];
id objects[] = { isImageBlurryResult ? #YES : #NO };
NSUInteger count = sizeof(objects) / sizeof(id);
NSArray *dataArray = [NSArray arrayWithObjects:objects
count:count];
callback(#[[NSNull null], dataArray]);
};
RCT_EXPORT_METHOD(checkForQRCode:(NSString *)imageAsBase64) {
RCTLog(#"%#", imageAsBase64);
UIImage* image = [self decodeBase64ToImage:imageAsBase64];
cv::QRCodeDetector qrDecoder = cv::QRCodeDetector();
BOOL isQRPresentResult = [self isQRPresent:image];
NSString *decodedQrData = [self decodeQRCode:image];
//BOOL isQRPresentResult = 1;
//std::string data = qrDecoder.detectAndDecode(matImage);
//std::string data = "testing";
//NSString* result = [NSString stringWithUTF8String:data.c_str()];
//NSString* result = #"test";
RCTLogInfo(#"Pretending to create an event %#", decodedQrData);
RCTLog(isQRPresentResult ? #"yes" : #"No");
};
-(BOOL)isQRPresent:(UIImage*)image{
cv::Mat matImage = [self convertUIImageToCVMat:image];
cv::Mat matImageGrey;
// converting image's color space (RGB) to grayscale
cv::cvtColor(matImage, matImageGrey, cv::COLOR_BGRA2GRAY);
std::vector<cv::Point> points;
cv::QRCodeDetector qrDecoder = cv::QRCodeDetector();
return qrDecoder.detect(matImageGrey, points);
};
-(NSString*)decodeQRCode:(UIImage*)image{
cv::Mat matImage = [self convertUIImageToCVMat:image];
cv::Mat matImageGrey;
// converting image's color space (RGB) to grayscale
cv::cvtColor(matImage, matImageGrey, cv::COLOR_BGRA2GRAY);
cv::QRCodeDetector qrDecoder = cv::QRCodeDetector();
std::string qrData;
qrData = qrDecoder.detectAndDecode(matImageGrey);
return [NSString stringWithUTF8String:qrData.c_str()];
};
- (cv::Mat)convertUIImageToCVMat:(UIImage *)image {
CGColorSpaceRef colorSpace = CGImageGetColorSpace(image.CGImage);
CGFloat cols = image.size.width;
CGFloat rows = image.size.height;
cv::Mat cvMat(rows, cols, CV_8UC4); // 8 bits per component, 4 channels (color channels + alpha)
CGContextRef contextRef = CGBitmapContextCreate(cvMat.data, // Pointer to data
cols, // Width of bitmap
rows, // Height of bitmap
8, // Bits per component
cvMat.step[0], // Bytes per row
colorSpace, // Colorspace
kCGImageAlphaNoneSkipLast |
kCGBitmapByteOrderDefault); // Bitmap info flags
CGContextDrawImage(contextRef, CGRectMake(0, 0, cols, rows), image.CGImage);
CGContextRelease(contextRef);
return cvMat;
};
- (UIImage *)decodeBase64ToImage:(NSString *)strEncodeData {
NSData *data = [[NSData alloc]initWithBase64EncodedString:strEncodeData options:NSDataBase64DecodingIgnoreUnknownCharacters];
return [UIImage imageWithData:data];
};
- (BOOL) isImageBlurry:(UIImage *) image {
// converting UIImage to OpenCV format - Mat
cv::Mat matImage = [self convertUIImageToCVMat:image];
cv::Mat matImageGrey;
// converting image's color space (RGB) to grayscale
cv::cvtColor(matImage, matImageGrey, cv::COLOR_BGRA2GRAY);
cv::Mat dst2 = [self convertUIImageToCVMat:image];
cv::Mat laplacianImage;
dst2.convertTo(laplacianImage, CV_8UC1);
// applying Laplacian operator to the image
cv::Laplacian(matImageGrey, laplacianImage, CV_8U);
cv::Mat laplacianImage8bit;
laplacianImage.convertTo(laplacianImage8bit, CV_8UC1);
unsigned char *pixels = laplacianImage8bit.data;
// 16777216 = 256*256*256
int maxLap = -16777216;
for (int i = 0; i < ( laplacianImage8bit.elemSize()*laplacianImage8bit.total()); i++) {
if (pixels[i] > maxLap) {
maxLap = pixels[i];
}
}
// one of the main parameters here: threshold sets the sensitivity for the blur check
// smaller number = less sensitive; default = 180
int threshold = 180;
return (maxLap <= threshold);
};
#end
Related
How can I get the size(height/width) of an image from URL in objective-C? I want my container size according to the image. I am using AFNetworking 3.0.
I could use SDWebImage if it fulfills my requirement.
Knowing the size of an image before actually loading it can be necessary in a number of cases. For example, setting the height of a tableView cell in the heightForRowAtIndexPath method while loading the actual image later in the cellForRowAtIndexPath (this is a very frequent catch 22).
One simple way to do it, is to read the image header from the server URL using the Image I/O interface:
#import <ImageIO/ImageIO.h>
NSMutableString *imageURL = [NSMutableString stringWithFormat:#"http://www.myimageurl.com/image.png"];
CGImageSourceRef source = CGImageSourceCreateWithURL((CFURLRef)[NSURL URLWithString:imageURL], NULL);
NSDictionary* imageHeader = (__bridge NSDictionary*) CGImageSourceCopyPropertiesAtIndex(source, 0, NULL);
NSLog(#"Image header %#",imageHeader);
NSLog(#"PixelHeight %#",[imageHeader objectForKey:#"PixelHeight"]);
Swift 4.x
Xcode 12.x
func sizeOfImageAt(url: URL) -> CGSize? {
// with CGImageSource we avoid loading the whole image into memory
guard let source = CGImageSourceCreateWithURL(url as CFURL, nil) else {
return nil
}
let propertiesOptions = [kCGImageSourceShouldCache: false] as CFDictionary
guard let properties = CGImageSourceCopyPropertiesAtIndex(source, 0, propertiesOptions) as? [CFString: Any] else {
return nil
}
if let width = properties[kCGImagePropertyPixelWidth] as? CGFloat,
let height = properties[kCGImagePropertyPixelHeight] as? CGFloat {
return CGSize(width: width, height: height)
} else {
return nil
}
}
Use Asynchronous mechanism called GCD in iOS to dowload image without affecting your main thread.
dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^{
// Download IMAGE using URL
NSData *data = [[NSData alloc]initWithContentsOfURL:URL];
// COMPOSE IMAGE FROM NSData
UIImage *image = [[UIImage alloc]initWithData:data];
dispatch_async(dispatch_get_main_queue(), ^{
// UI UPDATION ON MAIN THREAD
// Calcualte height & width of image
CGFloat height = image.size.height;
CGFloat width = image.size.width;
});
});
For Swift 4 use this:
let imageURL = URL(string: post.imageBigPath)!
let source = CGImageSourceCreateWithURL(imageURL as CFURL,
let imageHeader = CGImageSourceCopyPropertiesAtIndex(source!, 0, nil)! as NSDictionary;
print("Image header: \(imageHeader)")
The header would looks like:
Image header: {
ColorModel = RGB;
Depth = 8;
PixelHeight = 640;
PixelWidth = 640;
"{Exif}" = {
PixelXDimension = 360;
PixelYDimension = 360;
};
"{JFIF}" = {
DensityUnit = 0;
JFIFVersion = (
1,
0,
1
);
XDensity = 72;
YDensity = 72;
};
"{TIFF}" = {
Orientation = 0;
}; }
So u can get from it the Width, Height.
you can try like this:
NSData *data = [[NSData alloc]initWithContentsOfURL:URL];
UIImage *image = [[UIImage alloc]initWithData:data];
CGFloat height = image.size.height;
CGFloat width = image.size.width;
I am trying to overlay a recorded video with AvAssetReader and AvAssetWriter with some images. Following this tutorial, I am able to copy a video (and audio) into a new file. Now my objective is to overlay some of the initial video frames with some images with this code:
while ([assetWriterVideoInput isReadyForMoreMediaData] && !completedOrFailed)
{
// Get the next video sample buffer, and append it to the output file.
CMSampleBufferRef sampleBuffer = [assetReaderVideoOutput copyNextSampleBuffer];
CVPixelBufferRef pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
CVPixelBufferLockBaseAddress(pixelBuffer, 0);
EAGLContext *eaglContext = [[EAGLContext alloc] initWithAPI:kEAGLRenderingAPIOpenGLES2];
CIContext *ciContext = [CIContext contextWithEAGLContext:eaglContext options:#{kCIContextWorkingColorSpace : [NSNull null]}];
UIFont *font = [UIFont fontWithName:#"Helvetica" size:40];
NSDictionary *attributes = #{NSFontAttributeName:font, NSForegroundColorAttributeName:[UIColor lightTextColor]};
UIImage *img = [self imageFromText:#"test" :attributes];
CIImage *filteredImage = [[CIImage alloc] initWithCGImage:img.CGImage];
[ciContext render:filteredImage toCVPixelBuffer:pixelBuffer bounds:[filteredImage extent] colorSpace:CGColorSpaceCreateDeviceRGB()];
CVPixelBufferUnlockBaseAddress(pixelBuffer, 0);
if (sampleBuffer != NULL)
{
BOOL success = [assetWriterVideoInput appendSampleBuffer:sampleBuffer];
CFRelease(sampleBuffer);
sampleBuffer = NULL;
completedOrFailed = !success;
}
else
{
completedOrFailed = YES;
}
}
And to create image from text:
-(UIImage *)imageFromText:(NSString *)text :(NSDictionary *)attributes{
CGSize size = [text sizeWithAttributes:attributes];
UIGraphicsBeginImageContextWithOptions(size, NO, 0.0);
[text drawAtPoint:CGPointMake(0.0, 0.0) withAttributes:attributes];
UIImage *image = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return image;
}
The video and audio are copied, but I haven't any text on my video.
Question 1: Why this code is not working?
Moreover, I want to be able to check the timecode of the current read frame. For example I would like to insert a text with the current timecode in the video.
I try this code following this tutorial:
AVAsset *localAsset = [AVAsset assetWithURL:mURL];
NSError *localError;
AVAssetReader *assetReader = [[AVAssetReader alloc] initWithAsset:localAsset error:&localError];
BOOL success = (assetReader != nil);
// Create asset reader output for the first timecode track of the asset
if (success) {
AVAssetTrack *timecodeTrack = nil;
// Grab first timecode track, if the asset has them
NSArray *timecodeTracks = [localAsset tracksWithMediaType:AVMediaTypeTimecode];
if ([timecodeTracks count] > 0)
timecodeTrack = [timecodeTracks objectAtIndex:0];
if (timecodeTrack) {
AVAssetReaderTrackOutput *timecodeOutput = [AVAssetReaderTrackOutput assetReaderTrackOutputWithTrack:timecodeTrack outputSettings:nil];
[assetReader addOutput:timecodeOutput];
} else {
NSLog(#"%# has no timecode tracks", localAsset);
}
}
But I get the log:
[...] has no timecode tracks
Question 2: Why my video hasn't any AVMediaTypeTimecode? Ad so how can I get the current frame timecode?
Thanks for your help
I found the solutions:
To overlay video frames, you need to fix the decompression settings:
NSString* key = (NSString*)kCVPixelBufferPixelFormatTypeKey;
NSNumber* value = [NSNumber numberWithUnsignedInt:kCVPixelFormatType_32BGRA];
NSDictionary* decompressionVideoSettings = [NSDictionary dictionaryWithObject:value forKey:key];
// If there is a video track to read, set the decompression settings for YUV and create the asset reader output.
assetReaderVideoOutput = [AVAssetReaderTrackOutput assetReaderTrackOutputWithTrack:assetVideoTrack outputSettings:decompressionVideoSettings];
To get the frame timestamp, you have to read the video informations and then use a counter to increment the current timestamp:
durationSeconds = CMTimeGetSeconds(asset.duration);
timePerFrame = 1.0 / (Float64)assetVideoTrack.nominalFrameRate;
totalFrames = durationSeconds * assetVideoTrack.nominalFrameRate;
Then in this loop
while ([assetWriterVideoInput isReadyForMoreMediaData] && !completedOrFailed)
You can found the timestamp:
CMSampleBufferRef sampleBuffer = [assetReaderVideoOutput copyNextSampleBuffer];
if (sampleBuffer != NULL){
CVPixelBufferRef pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
if (pixelBuffer) {
Float64 secondsIn = ((float)counter/totalFrames)*durationSeconds;
CMTime imageTimeEstimate = CMTimeMakeWithSeconds(secondsIn, 600);
mergeTime = CMTimeGetSeconds(imageTimeEstimate);
counter++;
}
}
I hope it could help!
I'm trying to read an NSImage into an NSBitmapImageRep, for changing the color of some pixels. The Program should read the Color-Value of each pixel, and check if its equal to a color which is selected by a colorwell. I've got an ImageView named "MainImageView" and a ColorWell named "MainColor". That's my code:
- (IBAction)ScanImg:(id)sender {
NSBitmapImageRep *ImgRep = [[NSBitmapImageRep alloc]initWithData: MainImageView.image.TIFFRepresentation];
for (int pixelx = 0; pixelx < ImgRep.pixelsWide; pixelx ++) {
for (int pixely = 0; pixely < ImgRep.pixelsHigh;pixely ++){
if ([ImgRep colorAtX:pixelx y:pixely] == MainColor.color) {
[ImgRep setColor: [NSColor whiteColor] atX:pixelx y:pixely];
} else{
[ImgRep setColor: [NSColor blackColor] atX:pixelx y:pixely];
}
}
}
struct CGImage *CG = [ImgRep CGImage];
NSImage *Image = [[NSImage alloc] initWithCGImage:CG size:ImgRep.size];
MainImageView.image = Image;
}
But the Code changes nothing on the picture! What's the problem? Or is there another way, to change the pixel's Color?
Thank you for your help!
DaRi
I have the same issue. A workaround I found is to use the -setPixel method, although this feels a bit clumsy:
NSUInteger pix[4]; pix[0] = 0; pix[1] = 0; pix[2] = 0; pix[3] = 255; //black
[ImgRep setPixel:pix atX:x y:y];
Note: The pixel data must be arranged according to the color space you are using, in this example 0,0,0,255 is black in RGBA color space.
But in the end, thats still better than copying and modifying the underlying (unsigned char*) data that is returned by [ImgRep bitmapData] ...
I'm using the XZINGObjC framework to create an EAN-Barcode-Image. Following the documentation, I'm doing it like
//in viewDidAppear
//XZING: create Matrix
NSString* eanString = #"1234567890123"; //sth. like that
ZXBitMatrix* result = [writer encode:eanString
format:kBarcodeFormatEan13
width:500
height:500
error:&error];
if (result) {
//XZING: convert matrix to CGImageRef
CGImageRef imageRef = [[ZXImage imageWithMatrix:result] cgimage];
//CRASHLINE HERE!! (this is NOT in the XZING documentation, but i cannot figure out the issue!)
UIImage* uiImage = [[UIImage alloc] initWithCGImage:imageRef]; //<--CRASH: EXC_BAD_ACCESS
if(image != nil){
//assigning image to ui
self.barCodeImageView.image = uiImage;
}
It works, if I step through this code using breakpoints! However, I think at some point an Image is not ready for use?! But I cannot find the reason.
What I tried:
using imageRef and uiImage as local variables (EXC_BAD_ACCESS CRASH)
tried that operation in a background thread (EXC_BAD_ACCESS CRASH)
Same here, every solution worked if I used breakpoints and stepped line by line through the code. What is my mistake here? Any ideas? Thanks in advance!
After some try and error programming, I could fix the issue by replacing the following lines
CGImageRef imageRef = [[ZXImage imageWithMatrix:result] cgimage];
UIImage* uiImage = [[UIImage alloc] initWithCGImage:imageRef]; //<--CRASH
with
UIImage* uiImage = [[UIImage alloc] initWithCGImage:[[ZXImage imageWithMatrix:result] cgimage]];
Still, I don't know why?! I'm pretty sure something isn't hold in the memory or maybe the CGImageRef isn't ready if I try to convert it to UIImage.
Problem is with in [ZXImage imageWithMatrix:result], it is creating CGImage and before assigning it to ZXImage which will increase its retain count it is releasing the CGImage by CFRelease.
To fix this issue, replace + (ZXImage *)imageWithMatrix:(ZXBitMatrix *)matrix method with below implementation.
+ (ZXImage *)imageWithMatrix:(ZXBitMatrix *)matrix {
int width = matrix.width;
int height = matrix.height;
int8_t *bytes = (int8_t *)malloc(width * height * 4);
for(int y = 0; y < height; y++) {
for(int x = 0; x < width; x++) {
BOOL bit = [matrix getX:x y:y];
int8_t intensity = bit ? 0 : 255;
for(int i = 0; i < 3; i++) {
bytes[y * width * 4 + x * 4 + i] = intensity;
}
bytes[y * width * 4 + x * 4 + 3] = 255;
}
}
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef c = CGBitmapContextCreate(bytes, width, height, 8, 4 * width, colorSpace, kCGBitmapAlphaInfoMask & kCGImageAlphaPremultipliedLast);
CGImageRef image = CGBitmapContextCreateImage(c);
ZXImage *zxImage = [[ZXImage alloc] initWithCGImageRef:image];
CFRelease(colorSpace);
CFAutorelease(image);
CFRelease(c);
free(bytes);
return zxImage;
}
For people writing Swift, I hit the same problem that longilong described. The answer was in the ZXingObjC objective C example projects. Remember: this is for generating a barcode only.
internal func encodeBarcode(){
let writer : ZXMultiFormatWriter! = ZXMultiFormatWriter()
var result: ZXBitMatrix!
let hint: ZXEncodeHints! = ZXEncodeHints()
do {
hint.margin = 0
result = try writer.encode("Example String", format: kBarcodeFormatAztec, width: 500, height: 500, hints: hint)
**let rawBarcode: ZXImage = ZXImage(matrix: result)
barcodeUIImage.image = UIImage(CGImage: rawBarcode.cgimage)**
} catch {
print("failed to generate barcode")
}
}
I have created a program that allows me to display 3D objects and now I want to make a cut in relation to another object. Here is my result : screen.
The screen shows that we can see through the cut portion. So I decided to use a shader to fill this cut part.
I tried to load a shader and then to use it in a glUniform3f but that doesn't work. I did several searches on the internet, without results.
Here is my class to load a shader:
- (id)initWithVertexShaderFilename:(NSString *)vShaderFilename
fragmentShaderFilename:(NSString *)fShaderFilename
{
NSLog(fShaderFilename);
if (self = [super init])
{
attributes = [[NSMutableArray alloc] init];
NSString *vertShaderPathname, *fragShaderPathname;
program = glCreateProgram();
vertShaderPathname =[[NSBundle mainBundle]
pathForResource:vShaderFilename
ofType:#"vsh"
inDirectory:#"Shader"];
if (![self compileShader:&vertShader
type:GL_VERTEX_SHADER
file:vertShaderPathname])
NSLog(#"Failed to compile vertex shader");
else
NSLog(#"Vertex Shader OK");
fragShaderPathname = [[NSBundle mainBundle]
pathForResource:fShaderFilename
ofType:#"fsh"
inDirectory:#"Shader"];
if (![self compileShader:&fragShader
type:GL_FRAGMENT_SHADER
file:fragShaderPathname])
NSLog(#"Failed to compile fragment shader");
else
NSLog(#"Fragment shader OK");
glAttachShader(program, vertShader);
glAttachShader(program, fragShader);
}
return self;
}
- (BOOL)compileShader:(GLuint *)shader
type:(GLenum)type
file:(NSString *)file
{
NSLog(#"bonjour");
GLint status;
const GLchar *source;
source =
(GLchar *)[[NSString stringWithContentsOfFile:file
encoding:NSUTF8StringEncoding
error:nil] UTF8String];
if (!source)
{
NSLog(#"Failed to load vertex shader");
return NO;
}
*shader = glCreateShader(type);
glShaderSource(*shader, 1, &source, NULL);
glCompileShader(*shader);
glGetShaderiv(*shader, GL_COMPILE_STATUS, &status);
NSString *intString = [NSString stringWithFormat:#"%d", status];
return status = GL_TRUE;
}
#pragma mark -
- (void)addAttribute:(NSString *)attributeName
{
if (![attributes containsObject:attributeName])
{
[attributes addObject:attributeName];
glBindAttribLocation(program,
[attributes indexOfObject:attributeName],
[attributeName UTF8String]);
}
}
- (GLuint)attributeIndex:(NSString *)attributeName
{
return [attributes indexOfObject:attributeName];
}
- (GLuint)uniformIndex:(NSString *)uniformName
{
return glGetUniformLocation(program, [uniformName UTF8String]);
}
#pragma mark -
- (BOOL)link
{
GLint status;
glLinkProgram(program);
glValidateProgram(program);
glGetProgramiv(program, GL_LINK_STATUS, &status);
if (status == GL_FALSE)
return NO;
if (vertShader)
glDeleteShader(vertShader);
if (fragShader)
glDeleteShader(fragShader);
return YES;
}
- (void)use
{
glUseProgram(program);
}
Here is the function to initialize my shaders in the main class:
- (void)setup
{
GLShader *theProgram = [[GLShader alloc] initWithVertexShaderFilename:#"shader"
fragmentShaderFilename:#"shader"];
self.program = theProgram;
[self.program addAttribute:#"position"];
[self.program addAttribute:#"textureCoordinates"];
if (![self.program link])
{
NSLog(#"Link failed");
NSString *progLog = [self.program programLog];
NSLog(#"Program Log: %#", progLog);
NSString *fragLog = [self.program fragmentShaderLog];
NSLog(#"Frag Log: %#", fragLog);
NSString *vertLog = [self.program vertexShaderLog];
NSLog(#"Vert Log: %#", vertLog);
//[(GLView *)self.view stopAnimation];
self.program = nil;
}
textureCoordinateAttribute = [program attributeIndex:#"textureCoordinates"];
colorUniform = [program uniformIndex:#"uColor"];
textureUniform = [program uniformIndex:#"texture"];
//colorUniform = glGetUniformLocation(self.program, "uColor");
glEnable(GL_DEPTH_TEST);
glEnable(GL_CULL_FACE);
glEnable(GL_TEXTURE_2D);
glEnable(GL_BLEND);
glBlendFunc(GL_ONE, GL_ZERO);
}
Here is the function where I want to use my shader:
-(void) draw
{
[self.program use];
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT);
glDepthMask(true);
lUniform3f(colorUniform,1.0, 1.0, 0.5);
[self drawSubtraction:image1 with:image2];
}
Here is my fragment shader:
precision highp float;
uniform vec3 uColor;
uniform vec3 uLight;
varying vec3 vNormal;
const float alpha = 0.7;
void main(void) {
float val = dot( vNormal, uLight ) * 0.4 + 0.6;
gl_FragColor = vec4( uColor * val, alpha);
}
Am I doing something wrong ? Someone has an idea that could help me ?
Thanks.
Your shader code looks ok. Make sure you are passing in the "textureCoordinates" attribute correctly to the vertex shader. You will need to call glEnableVertexAttribArray() and glVertexAttribPointer() at some point of the initialization. Depending on how you are rendering the models you might need to ensure you are passing in vertices correctly as well.
Also i'm assuming [self drawSubtraction:image1 with:image2] calls glDrawArrays() or glDrawElements() at some point.
The Xcode default OpenGL game project is a great place to start for loading shaders too if you want a project to compare against.