QuadTree or KD Tree for objective c? [closed] - objective-c

Closed. This question does not meet Stack Overflow guidelines. It is not currently accepting answers.
Questions asking for code must demonstrate a minimal understanding of the problem being solved. Include attempted solutions, why they didn't work, and the expected results. See also: Stack Overflow question checklist
Closed 9 years ago.
Improve this question
I'm looking a while for a decent piece of code to use in my app, in one of those algorithms.
I found this example: http://rosettacode.org/wiki/K-d_tree#C
But when I put the code in xcode, I get an errors, for example:
"use of undeclared identifier", "expected ';' at the end of declaration".
I guess a header file is missing?

I copied the code from the link and made a minor edit which moved
"swap" from being an inline nested function to a static function.
Compiled with "gcc -C99 file.c" and it compiled ok. So, no, it doesn't
need some include file. Maybe you mis pasted it.
If you are happy with this answer, you could accept it. Thanks.
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <time.h>
#define MAX_DIM 3
struct kd_node_t{
double x[MAX_DIM];
struct kd_node_t *left, *right;
};
inline double
dist(struct kd_node_t *a, struct kd_node_t *b, int dim)
{
double t, d = 0;
while (dim--) {
t = a->x[dim] - b->x[dim];
d += t * t;
}
return d;
}
static void swap(struct kd_node_t *x, struct kd_node_t *y) {
double tmp[MAX_DIM];
memcpy(tmp, x->x, sizeof(tmp));
memcpy(x->x, y->x, sizeof(tmp));
memcpy(y->x, tmp, sizeof(tmp));
}
/* see quickselect method */
struct kd_node_t*
find_median(struct kd_node_t *start, struct kd_node_t *end, int idx)
{
if (end <= start) return NULL;
if (end == start + 1)
return start;
struct kd_node_t *p, *store, *md = start + (end - start) / 2;
double pivot;
while (1) {
pivot = md->x[idx];
swap(md, end - 1);
for (store = p = start; p < end; p++) {
if (p->x[idx] < pivot) {
if (p != store)
swap(p, store);
store++;
}
}
swap(store, end - 1);
/* median has duplicate values */
if (store->x[idx] == md->x[idx])
return md;
if (store > md) end = store;
else start = store;
}
}
struct kd_node_t*
make_tree(struct kd_node_t *t, int len, int i, int dim)
{
struct kd_node_t *n;
if (!len) return 0;
if ((n = find_median(t, t + len, i))) {
i = (i + 1) % dim;
n->left = make_tree(t, n - t, i, dim);
n->right = make_tree(n + 1, t + len - (n + 1), i, dim);
}
return n;
}
/* global variable, so sue me */
int visited;
void nearest(struct kd_node_t *root, struct kd_node_t *nd, int i, int dim,
struct kd_node_t **best, double *best_dist)
{
double d, dx, dx2;
if (!root) return;
d = dist(root, nd, dim);
dx = root->x[i] - nd->x[i];
dx2 = dx * dx;
visited ++;
if (!*best || d < *best_dist) {
*best_dist = d;
*best = root;
}
/* if chance of exact match is high */
if (!*best_dist) return;
if (++i >= dim) i = 0;
nearest(dx > 0 ? root->left : root->right, nd, i, dim, best, best_dist);
if (dx2 >= *best_dist) return;
nearest(dx > 0 ? root->right : root->left, nd, i, dim, best, best_dist);
}
#define N 1000000
#define rand1() (rand() / (double)RAND_MAX)
#define rand_pt(v) { v.x[0] = rand1(); v.x[1] = rand1(); v.x[2] = rand1(); }
int main(void)
{
int i;
struct kd_node_t wp[] = {
{{2, 3}}, {{5, 4}}, {{9, 6}}, {{4, 7}}, {{8, 1}}, {{7, 2}}
};
struct kd_node_t this = {{9, 2}};
struct kd_node_t *root, *found, *million;
double best_dist;
root = make_tree(wp, sizeof(wp) / sizeof(wp[1]), 0, 2);
visited = 0;
found = 0;
nearest(root, &this, 0, 2, &found, &best_dist);
printf(">> WP tree\nsearching for (%g, %g)\n"
"found (%g, %g) dist %g\nseen %d nodes\n\n",
this.x[0], this.x[1],
found->x[0], found->x[1], sqrt(best_dist), visited);
million = calloc(N, sizeof(struct kd_node_t));
srand(time(0));
for (i = 0; i < N; i++) rand_pt(million[i]);
root = make_tree(million, N, 0, 3);
rand_pt(this);
visited = 0;
found = 0;
nearest(root, &this, 0, 3, &found, &best_dist);
printf(">> Million tree\nsearching for (%g, %g, %g)\n"
"found (%g, %g, %g) dist %g\nseen %d nodes\n",
this.x[0], this.x[1], this.x[2],
found->x[0], found->x[1], found->x[2],
sqrt(best_dist), visited);
/* search many random points in million tree to see average behavior.
tree size vs avg nodes visited:
10 ~ 7
100 ~ 16.5
1000 ~ 25.5
10000 ~ 32.8
100000 ~ 38.3
1000000 ~ 42.6
10000000 ~ 46.7 */
int sum = 0, test_runs = 100000;
for (i = 0; i < test_runs; i++) {
found = 0;
visited = 0;
rand_pt(this);
nearest(root, &this, 0, 3, &found, &best_dist);
sum += visited;
}
printf("\n>> Million tree\n"
"visited %d nodes for %d random findings (%f per lookup)\n",
sum, test_runs, sum/(double)test_runs);
// free(million);
return 0;
}

Related

Comparing Execution time with Time Complexity in Merge & Quick Sort

I have implemented Merge & Quick Sort in the textbook what I've learned, and it says Time Complexities of each sorts are like this:
Merge Sort: O(n.log(n)) / Quick Sort: average O(n.log(n)) and O(n2) in the worst case (if key array is sorted).
So I executed the programs with Two types of Arrays: sorted and random, with different sizes.
Since I wanted to get the Average time, I have tried 10 times per each case.
Here is the code of Merge & Quick Sort:
#include <iostream>
#include <ctime>
#include <vector>
#include <algorithm>
using namespace std;
void Merge(vector<int>& s, int low, int mid, int high) {
int i = low;
int j = mid + 1;
int k = low;
vector<int> u(s);
while (i <= mid && j <= high) {
if (s.at(i) < s.at(j)) {
u.at(k) = s.at(i);
i++;
} else {
u.at(k) = s.at(j);
j++;
}
k++;
}
if (i > mid) {
for (int a = j; a < high + 1; a++) {
u.at(k) = s.at(a);
k++;
}
} else {
for (int a = i; a < mid + 1; a++) {
u.at(k) = s.at(a);
k++;
}
}
for (int a = low; a < high + 1; a++)
s.at(a) = u.at(a);
}
void MergeSort(vector<int>& s, int low, int high) {
int mid;
if (low < high) {
mid = (low + high) / 2;
MergeSort(s, low, mid);
MergeSort(s, mid + 1, high);
Merge(s, low, mid, high);
}
}
void swap(int& a, int& b) {
int tmp = a;
a = b;
b = tmp;
}
void Partition(vector<int>& s, int low, int high, int& pvpoint) {
int j;
int pvitem;
pvitem = s.at(low);
j = low;
for (int i = low + 1; i <= high; i++) {
if (s.at(i) < pvitem) {
j++;
swap(s.at(i), s.at(j));
}
pvpoint = j;
swap(s.at(low), s.at(pvpoint));
}
}
void QuickSort(vector<int>& s, int low, int high) {
int pvpoint;
if (high > low) {
Partition(s, low, high, pvpoint);
QuickSort(s, low, pvpoint - 1);
QuickSort(s, pvpoint + 1, high);
}
}
And each of these main() functions are printing the execution times in SORTED, and RANDOM key arrays.
you can see the result with adding one of these main functions in Visual Studio(C++):
//Sorted key array
int main() {
int s;
for (int i = 1; i < 21; i++) { //Size is from 300 to 6000
s = i * 300;
vector<int> Arr(s);
cout << "N : " << s << "\n";
//Assign Random numbers to each elements
Arr.front() = rand() % Arr.size();
for (int j = 1; j < Arr.size(); j++) { Arr.at(j) = ((737 * Arr.at(j - 1) + 149) % (Arr.size() * 5)); }
sort(Arr.begin(), Arr.end());
//QuickSort(Arr, 0, Arr.size() - 1); <- you can switch using this instead of MergeSort(...) below
for (int i = 0; i < 10; i++) { //print 10 times of execution time
clock_t start, end;
start = clock();
MergeSort(Arr, 0, Arr.size() - 1);
end = clock() - start;
printf("%12.3f ", (double)end * 1000.0 / CLOCKS_PER_SEC);
}
cout << endl;
}
return 0;
}
//Random key array
int main() {
int s;
for (int i = 1; i < 21; i++) {
s = i * 3000;
vector<int> Arr(s);
cout << "N : " << s << "\n";
for (int i = 0; i < 10; i++) {
//Assign Random numbers to each elements
Arr.front() = rand() % Arr.size();
for (int j = 1; j < Arr.size(); j++) { Arr.at(j) = ((737 * Arr.at(j - 1) + 149) % (Arr.size() * 5)); }
//QuickSort(Arr, 0, Arr.size() - 1); <- you can switch using this instead of MergeSort(...) below
clock_t start, end;
start = clock();
MergeSort(Arr, 0, Arr.size() - 1);
end = clock() - start;
printf("%12.3f ", (double)end * 1000.0 / CLOCKS_PER_SEC);
}
cout << endl;
}
return 0;
}
And the THING is, the result is not matching with their time complexity. for example, Merge sort in(RANDOM Array)
size N=3000 prints 20 ms, but size N=60000 prints 1400~1600 ms !! it supposed to print almost 400 ms because Time complexity (Not in worse case) in Quick Sort is O(n.log(n)), isn't it? I want to know what affects to this time and how could I see the printed time that I expected.
You posted the same code in this question: Calculate Execution Times in Sort algorithm and you did not take my answer into account.
Your MergeSort function has a flaw: you duplicate the whole array in merge causing a lot of overhead and quadratic time complexity. This innocent looking definition: vector<int> u(s); defines u as a vector initialized as a copy of s, the full array.
C++ is a very powerful language, often too powerful, littered with traps and pitfalls such as this. It is a very good thing you tried to verify that your program meets the expected performance from the known time complexity of the algorithm. Such a concern is alas too rare.
Here are some guidelines:
For getting execution time:
#include <time.h>
int main()
{
struct timeval stop, start;
int arr[10000];
gettimeofday(&start, NULL);
mergeSort(arr, 0, 9999);
gettimeofday(&stop, NULL);
printf("Time taken for Quick sort is: %ld microseconds\n",
(stop.tv_sec-start.tv_sec)*1000000+stop.tv_usec-start.tv_usec);
}

GNU Radio circular buffer manipulation

I encountered the following error
gr::log :WARN: tpb_thread_body - asynchronous message buffer overflowing, dropping message
Out of serendipity, I ran into this GNU Radio presentation on
Youtube.
The presenter mentioned an OOT block he called "buffer" that is capable of eliminating the "buffer overflowing" error. Apparently, this block plays with different sample rates and the so-called "circular buffers". I haven't worked with circular buffers myself. Any ideas on circular buffers or any hints on how to build this buffer block are welcome.
EDIT
Below is the flowgraph that generates the error. As it was suggested in the comments, the culprits could be the message processing blocks (red-circled) namely generateCADU (for generating standard CCSDS frames) and processCADU (for extracting CADUs from a data stream).
The implementation file of the generateCADU block is given below
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <gnuradio/io_signature.h>
#include "generateCADU_impl.h"
#include "fec/ReedSolomon/ReedSolomon.h"
#include "fec/Scrambler/Scrambler.h"
namespace gr {
namespace ccsds {
generateCADU::sptr
generateCADU::make(int frameLength,std::string sync, int scramble, int rs, int intDepth)
{
return gnuradio::get_initial_sptr
(new generateCADU_impl(frameLength, sync, scramble, rs, intDepth));
}
/*
* The private constructor
*/
generateCADU_impl::generateCADU_impl(int frameLength,std::string sync, int scramble, int rs, int intDepth)
: gr::sync_block("generateCADU",
gr::io_signature::make(1, 1, sizeof(unsigned char)),
gr::io_signature::make(0, 0, 0)),
d_frameLength(frameLength),d_scramble(scramble == 1),d_rs(rs >= 1), d_basis(rs >= 2), d_intDepth(intDepth)
{
set_output_multiple(d_frameLength);
//Registering output port
message_port_register_out(pmt::mp("out"));
d_sync = parse_string(sync);
}
/*
* Our virtual destructor.
*/
generateCADU_impl::~generateCADU_impl()
{
}
unsigned char
generateCADU_impl::parse_hex(char c)
{
if ('0' <= c && c <= '9') return c - '0';
if ('A' <= c && c <= 'F') return c - 'A' + 10;
if ('a' <= c && c <= 'f') return c - 'a' + 10;
std::abort();
}
std::vector<unsigned char>
generateCADU_impl::parse_string(const std::string & s)
{
if (s.size() % 2 != 0) std::abort();
std::vector<unsigned char> result(s.size() / 2);
for (std::size_t i = 0; i != s.size() / 2; ++i)
result[i] = 16 * parse_hex(s[2 * i]) + parse_hex(s[2 * i + 1]);
return result;
}
int
generateCADU_impl::work(int noutput_items,
gr_vector_const_void_star &input_items,
gr_vector_void_star &output_items)
{
const unsigned char *in = (const unsigned char *) input_items[0];
//Reed-Solomon and Scrambler objects
ReedSolomon RS(16,d_intDepth,d_basis);// False = conventional, True = dual-basis
Scrambler S;
//Buffers
unsigned char *frameBuffer1 = (unsigned char*)malloc(d_frameLength*sizeof(unsigned char));
std::vector<unsigned char> frameBuffer2;
//The work function engine
for(int i = 0; (i + d_frameLength) < noutput_items; i += d_frameLength)
{
//Copying data from input stream
memcpy(frameBuffer1,in + i + d_frameLength,d_frameLength);
//Copying frame into std::vector buffer
frameBuffer2.insert(frameBuffer2.begin(),frameBuffer1, frameBuffer1 + d_frameLength);
//Optional scrambling and Reed-Solomon
if (d_rs) RS.Encode_RS(frameBuffer2);
if (d_scramble) S.Scramble(frameBuffer2);
//Insert sync word
frameBuffer2.insert(frameBuffer2.begin(), d_sync.begin(), d_sync.end());
//Transmitting PDU
pmt::pmt_t pdu(pmt::cons(pmt::PMT_NIL,pmt::make_blob(frameBuffer2.data(),frameBuffer2.size())));
message_port_pub(pmt::mp("out"), pdu);
//Clear buffer
frameBuffer2.clear();
}
// Tell runtime system how many output items we produced.
return noutput_items;
}
} /* namespace ccsds */
} /* namespace gr */
And here is the processCADU block. This block uses tags generated by the synchronizeCADU (which is simply a wrapper for the correlate_access_tag block) to extract CADUs
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <gnuradio/io_signature.h>
#include "processCADU_impl.h"
#include "fec/ReedSolomon/ReedSolomon.h"
#include "fec/Scrambler/Scrambler.h"
namespace gr {
namespace ccsds {
processCADU::sptr
processCADU::make(int frameLength, int scramble, int rs, int intDepth, std::string tagName)
{
return gnuradio::get_initial_sptr
(new processCADU_impl(frameLength, scramble, rs, intDepth, tagName));
}
/*
* The private constructor
*/
processCADU_impl::processCADU_impl(int frameLength, int scramble, int rs, int intDepth, std::string tagName)
: gr::sync_block("processCADU",
gr::io_signature::make(1, 1, sizeof(unsigned char)),
gr::io_signature::make(0, 0, 0)),
d_frameLength(frameLength),d_scramble(scramble == 1),d_rs(rs >= 1), d_basis(rs >= 2), d_intDepth(intDepth)
{
//Multiple input
set_output_multiple(d_frameLength * 8);
//Registering output port
message_port_register_out(pmt::mp("out"));
if (d_rs) d_frameLength += 32 * d_intDepth;
//SEtting tag name
key = pmt::mp(tagName);
}
/*
* Our virtual destructor.
*/
processCADU_impl::~processCADU_impl()
{
delete d_pack;
}
int
processCADU_impl::work(int noutput_items,
gr_vector_const_void_star &input_items,
gr_vector_void_star &output_items)
{
const unsigned char *in = (const unsigned char *) input_items[0];
unsigned char *out = (unsigned char *) output_items[0];
void *msg_data = NULL;
unsigned char frame_data[d_frameLength];
unsigned char frame_len = 0;
std::vector<unsigned char> frameBuffer;
//Reed-Solomon and Scrambler objects
ReedSolomon RS(16,d_intDepth,d_basis);// False = conventional, True = dual-basis
std::vector<int> errors;//errors.push_back(0);
Scrambler S;
d_tags.clear();
d_pack = new blocks::kernel::pack_k_bits(8);
this->get_tags_in_window(d_tags, 0, 0, noutput_items,key);
for(d_tags_itr = d_tags.begin(); d_tags_itr != d_tags.end(); d_tags_itr++) {
// Check that we have enough data for a full frame
if ((d_tags_itr->offset - this->nitems_read(0)) > (noutput_items - (d_frameLength) * 8))
{
return (d_tags_itr->offset - this->nitems_read(0) - 1);
}
//Pack bits into bytes
d_pack->pack(frame_data, &in[d_tags_itr->offset - this->nitems_read(0)], d_frameLength);
//Copying frame into std::vector buffer
frameBuffer.insert(frameBuffer.begin(),frame_data, frame_data + d_frameLength);
//Optional scrambling and Reed-Solomon
if (d_scramble) S.Scramble(frameBuffer);
//if (d_rs) RS.Decode_RS(frameBuffer,errors);
//If there is Reed-Solomon decoding
if(d_rs)
{
RS.Decode_RS(frameBuffer,errors);
if (RS.Success(errors)) // Success
{
//std::cout << "Success" << std::endl;
pmt::pmt_t pdu(pmt::cons(pmt::PMT_NIL,pmt::make_blob(frameBuffer.data(),frameBuffer.size())));
message_port_pub(pmt::mp("out"), pdu);
/*for(int i=0; i < errors.size(); i++)
{
//std::cout << "Number of Errors : " << errors.at(i) << std::endl << std::endl;
}*/
}
else // Failure
{
std::cout << "RS failure" << std::endl;
}
}
else{
pmt::pmt_t pdu(pmt::cons(pmt::PMT_NIL,pmt::make_blob(frameBuffer.data(),frameBuffer.size())));
message_port_pub(pmt::mp("out"), pdu);
}
//Clear buffers
frameBuffer.clear();
errors.clear();
}
// Tell runtime system how many output items we produced.
return noutput_items;
}
} /* namespace ccsds */
} /* namespace gr */
Regards,
M
Thanks to #MarcusMüller suggestion, using the tagged_stream paradigma as opposed to PDUs solved the problem. I was able to transmit 47 terabytes of data without any problems. Below is the code for the newly implemented block.
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <gnuradio/io_signature.h>
#include "genCADU_impl.h"
namespace gr {
namespace ccsds {
genCADU::sptr
genCADU::make(int frameLength,std::string sync, int scramble, int rs, int intDepth, std::string len_tag_key)
{
return gnuradio::get_initial_sptr
(new genCADU_impl(frameLength, sync, scramble, rs, intDepth, len_tag_key));
}
/*
* The private constructor
*/
genCADU_impl::genCADU_impl(int frameLength,std::string sync, int scramble, int rs, int intDepth, std::string len_tag_key)
: gr::tagged_stream_block("genCADU",
gr::io_signature::make(1, 1, sizeof(unsigned char)),
gr::io_signature::make(1, 1, sizeof(unsigned char)),len_tag_key),
d_frameLength(frameLength),d_scramble(scramble == 1),d_rs(rs >= 1), d_basis(rs >= 2), d_intDepth(intDepth)
{
//Synchronization pattern
d_sync = parse_string(sync);
//Reed-Solomon and Scrambler objects
RS = new ReedSolomon(16,d_intDepth,d_basis);// False = conventional, True = dual-basis
S = new Scrambler();
}
/*
* Our virtual destructor.
*/
genCADU_impl::~genCADU_impl()
{
delete RS;
delete S;
}
int
genCADU_impl::calculate_output_stream_length(const gr_vector_int &ninput_items)
{
int noutput_items = (d_rs) ? d_frameLength + 32*d_intDepth + d_sync.size() : d_frameLength + d_sync.size();
return noutput_items ;
}
unsigned char
genCADU_impl::parse_hex(char c)
{
if ('0' <= c && c <= '9') return c - '0';
if ('A' <= c && c <= 'F') return c - 'A' + 10;
if ('a' <= c && c <= 'f') return c - 'a' + 10;
std::abort();
}
std::vector<unsigned char>
genCADU_impl::parse_string(const std::string & s)
{
if (s.size() % 2 != 0) std::abort();
std::vector<unsigned char> result(s.size() / 2);
for (std::size_t i = 0; i != s.size() / 2; ++i)
result[i] = 16 * parse_hex(s[2 * i]) + parse_hex(s[2 * i + 1]);
return result;
}
int
genCADU_impl::work (int noutput_items,
gr_vector_int &ninput_items,
gr_vector_const_void_star &input_items,
gr_vector_void_star &output_items)
{
const unsigned char *in = (const unsigned char *) input_items[0];
unsigned char *out = (unsigned char *) output_items[0];
int total_len;
//Copy pdu from circular buffer to local buffer
buffer.insert(buffer.end(), in, in + d_frameLength);
//Optional scrambling and Reed-Solomon. TO DO: Turbo and LDPC
if (d_rs) RS->Encode_RS(buffer);
if (d_scramble) S->Scramble(buffer);
//Insert sync word
buffer.insert(buffer.begin(), d_sync.begin(), d_sync.end());
//Copy from local buffer to circular buffer
std::copy(buffer.begin(),buffer.end(),out);
//Clear the local buffer
total_len = buffer.size();
buffer.clear();
// Tell runtime system how many output items we produced.
return total_len;
}
} /* namespace ccsds */
} /* namespace gr */
Regards,
M.

Vulnerable program using strtol

I came across this question when I was reading about vulnerable C programs involving strtol. Below is the code
#include <stdio.h>
void f (int i, int j) {
int a[50];
a[i] += j;
}
int main (int argc, char *argv[]) {
int x = 10, y, z;
if (argc > 1) {
y = strtol (argv[1], NULL, 10);
z = strtol (argv[2], NULL, 10);
f(y, z);
x = 20;
printf ("x=%d\n", x);
}
return 0;
}
The call strtol(s, NULL, 10) returns the integer written as
string s in decimal notation. What inputs should one be giving to print x value as 10 and what is the idea behind giving those inputs?

CUDA Thrust sort_by_key when the key is a tuple dealt with by zip_iterator's with custom comparison predicate

I've looked through a lot of questions here for something similar and there are quite a few, albeit with one minor change. I'm trying to sort values with a zip_iterator as a compound key.
Specifically, I have the following function:
void thrustSort(
unsigned int * primaryKey,
float * secondaryKey,
unsigned int * values,
unsigned int numberOfPoints)
{
thrust::device_ptr dev_ptr_pkey = thrust::device_pointer_cast(primaryKey);
thrust::device_ptr dev_ptr_skey = thrust::device_pointer_cast(secondaryKey);
thrust::device_ptr dev_ptr_values = thrust::device_pointer_cast(values);
thrust::tuple,thrust::device_ptr> keytup_begin =
thrust::make_tuple,thrust::device_ptr>(dev_ptr_pkey, dev_ptr_skey);
thrust::zip_iterator, thrust::device_ptr > > first =
thrust::make_zip_iterator, thrust::device_ptr > >(keytup_begin);
thrust::sort_by_key(first, first + numberOfPoints, dev_ptr_values, ZipComparator());
}
and this custom predicate:
typedef thrust::device_ptr<unsigned int> tdp_uint ;
typedef thrust::device_ptr<float> tdp_float ;
typedef thrust::tuple<tdp_uint, tdp_float> tdp_uif_tuple ;
struct ZipComparator
{
__host__ __device__
inline bool operator() (const tdp_uif_tuple &a, const tdp_uif_tuple &b)
{
if(a.head < b.head) return true;
if(a.head == b.head) return a.tail < b.tail;
return false;
}
};
The errors I'm getting are:
Error 1 error : no instance of constructor "thrust::device_ptr::device_ptr [with T=unsigned int]" matches the argument list C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v4.0\include\thrust\detail\tuple.inl 309 1 ---
Error 2 error : no instance of constructor "thrust::device_ptr::device_ptr [with T=float]" matches the argument list C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v4.0\include\thrust\detail\tuple.inl 401 1 ---
Any ideas what might cause this / how do I write a predicate that indeed works?
Thanks in Advance,
Nathan
The comparator takes arguments of type const thrust::tuple<unsigned int, float>&. The const tdp_uif_tuple& type you defined expands to const thrust::tuple<thrust::device_ptr<unsigned int>, thrust:device_ptr<float> >&
The code below compiles for me:
struct ZipComparator
{
__host__ __device__
inline bool operator() (const thrust::tuple<unsigned int, float> &a, const thrust::tuple<unsigned int, float> &b)
{
if(a.head < b.head) return true;
if(a.head == b.head) return a.tail < b.tail;
return false;
}
};
Hope it does for you as well :)
http://code.google.com/p/thrust/wiki/QuickStartGuide#zip_iterator has more details on the zip iterator.
Not required, but if you're looking to clean up the length of those templates, you can do this:
void thrustSort(
unsigned int * primaryKey,
float * secondaryKey,
unsigned int * values,
unsigned int numberOfPoints)
{
tdp_uint dev_ptr_pkey(primaryKey);
tdp_float dev_ptr_skey(secondaryKey);
tdp_uint dev_ptr_values(values);
thrust::tuple<tdp_uint, tdp_float> keytup_begin = thrust::make_tuple(dev_ptr_pkey, dev_ptr_skey);
thrust::zip_iterator<thrust::tuple<tdp_uint, tdp_float> > first =
thrust::make_zip_iterator(keytup_begin);
thrust::sort_by_key(first, first + numberOfPoints, dev_ptr_values, ZipComparator());
}
A lot of the template arguments can be inferred from the arguments.
This is a fully worked example on how using sort_by_key when the key is a tuple dealt with by zip_iterator's and a customized comparison operator.
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include "Utilities.cuh"
// --- Defining tuple type
typedef thrust::tuple<int, int> Tuple;
/**************************/
/* TUPLE ORDERING FUNCTOR */
/**************************/
struct TupleComp
{
__host__ __device__ bool operator()(const Tuple& t1, const Tuple& t2)
{
if (t1.get<0>() < t2.get<0>())
return true;
if (t1.get<0>() > t2.get<0>())
return false;
return t1.get<1>() < t2.get<1>();
}
};
/********/
/* MAIN */
/********/
int main()
{
const int N = 8;
// --- Keys and values on the host: allocation and definition
int h_keys1[N] = { 1, 3, 3, 3, 2, 3, 2, 1 };
int h_keys2[N] = { 1, 5, 3, 8, 2, 8, 1, 1 };
float h_values[N] = { 0.3, 5.1, 3.2, -0.08, 2.1, 5.2, 1.1, 0.01};
printf("\n\n");
printf("Original\n");
for (int i = 0; i < N; i++) {
printf("%i %i %f\n", h_keys1[i], h_keys2[i], h_values[i]);
}
// --- Keys and values on the device: allocation
int *d_keys1; gpuErrchk(cudaMalloc(&d_keys1, N * sizeof(int)));
int *d_keys2; gpuErrchk(cudaMalloc(&d_keys2, N * sizeof(int)));
float *d_values; gpuErrchk(cudaMalloc(&d_values, N * sizeof(float)));
// --- Keys and values: host -> device
gpuErrchk(cudaMemcpy(d_keys1, h_keys1, N * sizeof(int), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_keys2, h_keys2, N * sizeof(int), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_values, h_values, N * sizeof(float), cudaMemcpyHostToDevice));
// --- From raw pointers to device_ptr
thrust::device_ptr<int> dev_ptr_keys1 = thrust::device_pointer_cast(d_keys1);
thrust::device_ptr<int> dev_ptr_keys2 = thrust::device_pointer_cast(d_keys2);
thrust::device_ptr<float> dev_ptr_values = thrust::device_pointer_cast(d_values);
// --- Declare outputs
thrust::device_vector<float> d_values_output(N);
thrust::device_vector<Tuple> d_keys_output(N);
auto begin_keys = thrust::make_zip_iterator(thrust::make_tuple(dev_ptr_keys1, dev_ptr_keys2));
auto end_keys = thrust::make_zip_iterator(thrust::make_tuple(dev_ptr_keys1 + N, dev_ptr_keys2 + N));
thrust::sort_by_key(begin_keys, end_keys, dev_ptr_values, TupleComp());
int *h_keys1_output = (int *)malloc(N * sizeof(int));
int *h_keys2_output = (int *)malloc(N * sizeof(int));
float *h_values_output = (float *)malloc(N * sizeof(float));
gpuErrchk(cudaMemcpy(h_keys1_output, d_keys1, N * sizeof(int), cudaMemcpyDeviceToHost));
gpuErrchk(cudaMemcpy(h_keys2_output, d_keys2, N * sizeof(int), cudaMemcpyDeviceToHost));
gpuErrchk(cudaMemcpy(h_values_output, d_values, N * sizeof(float), cudaMemcpyDeviceToHost));
printf("\n\n");
printf("Ordered\n");
for (int i = 0; i < N; i++) {
printf("%i %i %f\n", h_keys1_output[i], h_keys2_output[i], h_values_output[i]);
}
}

How to quickly find a image in another image using CUDA?

In my current project I need to find pixel exact position of image contained in another image of larger size. Smaller image is never rotated or stretched (so should match pixel by pixel) but it may have different brightness and some pixels in the image may be distorted. My first attemp was to do it on CPU but it was too slow. The calculations are very parallel, so I decided to use the GPU. I just started to learn CUDA and wrote my first CUDA app. My code works but it still is too slow even on GPU. When the larger image has a dimension of 1024x1280 and smaller is 128x128 program performs calculations in 2000ms on GeForce GTX 560 ti. I need to get results in less than 200ms. In the future I'll probably need a more complex algorithm, so I'd rather have even more computational power reserve. The question is how I can optimise my code to achieve that speed up?
CUDAImageLib.dll:
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <cutil.h>
//#define SUPPORT_ALPHA
__global__ void ImageSearch_kernel(float* BufferOut, float* BufferB, float* BufferS, unsigned int bw, unsigned int bh, unsigned int sw, unsigned int sh)
{
unsigned int bx = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int by = threadIdx.y + blockIdx.y * blockDim.y;
float diff = 0;
for (unsigned int y = 0; y < sh; ++y)
{
for (unsigned int x = 0; x < sw; ++x)
{
unsigned int as = (x + y * sw) * 4;
unsigned int ab = (x + bx + (y + by) * bw) * 4;
#ifdef SUPPORT_ALPHA
diff += ((abs(BufferS[as] - BufferB[ab]) + abs(BufferS[as + 1] - BufferB[ab + 1]) + abs(BufferS[as + 2] - BufferB[ab + 2])) * BufferS[as + 3] * BufferB[ab + 3]);
#else
diff += abs(BufferS[as] - BufferB[ab]);
diff += abs(BufferS[as + 1] - BufferB[ab + 1]);
diff += abs(BufferS[as + 2] - BufferB[ab + 2]);
#endif
}
}
BufferOut[bx + (by * (bw - sw))] = diff;
}
extern "C" int __declspec(dllexport) __stdcall ImageSearchGPU(float* BufferOut, float* BufferB, float* BufferS, int bw, int bh, int sw, int sh)
{
int aBytes = (bw * bh) * 4 * sizeof(float);
int bBytes = (sw * sh) * 4 * sizeof(float);
int cBytes = ((bw - sw) * (bh - sh)) * sizeof(float);
dim3 threadsPerBlock(32, 32);
dim3 numBlocks((bw - sw) / threadsPerBlock.x, (bh - sh) / threadsPerBlock.y);
float *dev_B = 0;
float *dev_S = 0;
float *dev_Out = 0;
unsigned int timer = 0;
float sExecutionTime = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_Out, cBytes);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_B, aBytes);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_S, bBytes);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_B, BufferB, aBytes, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_S, BufferS, bBytes, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cutCreateTimer(&timer);
cutStartTimer(timer);
// Launch a kernel on the GPU with one thread for each element.
ImageSearch_kernel<<<numBlocks, threadsPerBlock>>>(dev_Out, dev_B, dev_S, bw, bh, sw, sh);
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
cutStopTimer(timer);
sExecutionTime = cutGetTimerValue(timer);
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(BufferOut, dev_Out, cBytes, cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_Out);
cudaFree(dev_B);
cudaFree(dev_S);
return (int)sExecutionTime;
}
extern "C" int __declspec(dllexport) __stdcall FindMinCPU(float* values, int count)
{
int minIndex = 0;
float minValue = 3.4e+38F;
for (int i = 0; i < count; ++i)
{
if (values[i] < minValue)
{
minValue = values[i];
minIndex = i;
}
}
return minIndex;
}
C# test app:
using System;
using System.Collections.Generic;
using System.Text;
using System.Diagnostics;
using System.Drawing;
namespace TestCUDAImageSearch
{
class Program
{
static void Main(string[] args)
{
using(Bitmap big = new Bitmap("Big.png"), small = new Bitmap("Small.png"))
{
Console.WriteLine("Big " + big.Width + "x" + big.Height + " Small " + small.Width + "x" + small.Height);
Stopwatch sw = new Stopwatch();
sw.Start();
Point point = CUDAImageLIb.ImageSearch(big, small);
sw.Stop();
long t = sw.ElapsedMilliseconds;
Console.WriteLine("Image found at " + point.X + "x" + point.Y);
Console.WriteLine("total time=" + t + "ms kernel time=" + CUDAImageLIb.LastKernelTime + "ms");
}
Console.WriteLine("Hit key");
Console.ReadKey();
}
}
}
//#define SUPPORT_HSB
using System;
using System.Collections.Generic;
using System.Text;
using System.Runtime.InteropServices;
using System.Drawing;
using System.Drawing.Imaging;
namespace TestCUDAImageSearch
{
public static class CUDAImageLIb
{
[DllImport("CUDAImageLib.dll")]
private static extern int ImageSearchGPU(float[] bufferOut, float[] bufferB, float[] bufferS, int bw, int bh, int sw, int sh);
[DllImport("CUDAImageLib.dll")]
private static extern int FindMinCPU(float[] values, int count);
private static int _lastKernelTime = 0;
public static int LastKernelTime
{
get { return _lastKernelTime; }
}
public static Point ImageSearch(Bitmap big, Bitmap small)
{
int bw = big.Width;
int bh = big.Height;
int sw = small.Width;
int sh = small.Height;
int mx = (bw - sw);
int my = (bh - sh);
float[] diffs = new float[mx * my];
float[] b = ImageToFloat(big);
float[] s = ImageToFloat(small);
_lastKernelTime = ImageSearchGPU(diffs, b, s, bw, bh, sw, sh);
int minIndex = FindMinCPU(diffs, diffs.Length);
return new Point(minIndex % mx, minIndex / mx);
}
public static List<Point> ImageSearch(Bitmap big, Bitmap small, float maxDeviation)
{
int bw = big.Width;
int bh = big.Height;
int sw = small.Width;
int sh = small.Height;
int mx = (bw - sw);
int my = (bh - sh);
int nDiff = mx * my;
float[] diffs = new float[nDiff];
float[] b = ImageToFloat(big);
float[] s = ImageToFloat(small);
_lastKernelTime = ImageSearchGPU(diffs, b, s, bw, bh, sw, sh);
List<Point> points = new List<Point>();
for(int i = 0; i < nDiff; ++i)
{
if (diffs[i] < maxDeviation)
{
points.Add(new Point(i % mx, i / mx));
}
}
return points;
}
#if SUPPORT_HSB
private static float[] ImageToFloat(Bitmap img)
{
int w = img.Width;
int h = img.Height;
float[] pix = new float[w * h * 4];
int i = 0;
for (int y = 0; y < h; ++y)
{
for (int x = 0; x < w; ++x)
{
Color c = img.GetPixel(x, y);
pix[i] = c.GetHue() / 360;
pix[i + 1] = c.GetSaturation();
pix[i + 2] = c.GetBrightness();
pix[i + 3] = c.A;
i += 4;
}
}
return pix;
}
#else
private static float[] ImageToFloat(Bitmap bmp)
{
int w = bmp.Width;
int h = bmp.Height;
int n = w * h;
float[] pix = new float[n * 4];
System.Diagnostics.Debug.Assert(bmp.PixelFormat == PixelFormat.Format32bppArgb);
Rectangle r = new Rectangle(0, 0, w, h);
BitmapData bmpData = bmp.LockBits(r, ImageLockMode.ReadOnly, bmp.PixelFormat);
System.Diagnostics.Debug.Assert(bmpData.Stride > 0);
int[] pixels = new int[n];
System.Runtime.InteropServices.Marshal.Copy(bmpData.Scan0, pixels, 0, n);
bmp.UnlockBits(bmpData);
int j = 0;
for (int i = 0; i < n; ++i)
{
pix[j] = (pixels[i] & 255) / 255.0f;
pix[j + 1] = ((pixels[i] >> 8) & 255) / 255.0f;
pix[j + 2] = ((pixels[i] >> 16) & 255) / 255.0f;
pix[j + 3] = ((pixels[i] >> 24) & 255) / 255.0f;
j += 4;
}
return pix;
}
#endif
}
}
Looks like what you are talking about is a well known problem: Template matching. The easiest way forward is to convolve the Image (the bigger image) with the template (the smaller image). You could implement convolutions in one of two ways.
1) Modify the convolutions example from the CUDA SDK (similar to what you are doing anyway).
2) Use FFTs to implement the convolution. Ref. Convolution theorem. You will need to remember
% MATLAB format
L = size(A) + size(B) - 1;
conv2(A, B) = IFFT2(FFT2(A, L) .* FFT2(B, L));
You could use cufft to implement the 2 dimensional FFTs (After padding them appropriately). You will need to write a kernel that does element wise multiplication and then normalizes the result (because CUFFT does not normalize) before performing the inverse FFT.
For the sizes you mention, (1024 x 1280 and 128 x 128), the inputs must be padded to atleast ((1024 + 128 - 1) x (1280 + 128 -1) = 1151 x 1407). But FFTs are fastest when the (padded) inputs are powers of 2. So you will need to pad both the large and small images to size 2048 x 2048.
You could speed up your calculations by using faster memory access, for example by using
Texture Cache for the big image
Shared Memory or Constant Cache for the small image or parts of it.
But your real problem is the whole approach of your comparison. Comparing the images pixel by pixel at every possible location will never be efficient. There is just too much work to do. First you should think about finding ways to
Select the interesting image regions in the big image where the small image might be contained and only search in these
Find a faster comparison mechanism, by something representing the images that are not their pixels values. You should be able to compare the images by computing a representation with less data, e.g. a color histogram, or integral images.