Related
I am working on google colab and i want to use libsvm library in my project. I downloaded libsvm and installed it. Now when i use !nvcc -o command and run the code using CUDA i am getting errors like,
undefined reference to `svm_get_nr_class
undefined reference to 'svm_predict_probability'
undefined reference to `svm_free_and_destroy_model
I guess the problem is that libsvm is not properly linked, As i use -l with proper flags to compile with nvcc, but i don't know what to use with -l to properly link libsvm and use it.
i downloaded libsvm using
!git clone https://github.com/cjlin1/libsvm
%cd libsvm/
!make && make install
%cd /content/libsvm/python/
!make
import sys
sys.path.append('/content/libsvm/python')
%cd /content
now when i run this program
%%cuda --name Blind_Deblurring_Cuda.cu
#include <iostream>
#include <fstream>
#include <iostream>
#include <fstream>
#include "/content/brisque.h"
#include "/content/libsvm/svm.h"
#include <vector>
#include <stdio.h>
#include "fstream"
#include "iostream"
#include <algorithm>
#include <iterator>
#include <cmath>
#include<stdlib.h>
#include <math.h>
#include <curand.h>
#include <opencv2/core/cuda.hpp>
#include <opencv2/core.hpp>
#include "opencv2/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include <opencv2/core/core.hpp>
#include <iostream>
#include "opencv2/highgui.hpp"
#include <opencv2/core/utility.hpp>
//rescaling based on training data i libsvm
float rescale_vector[36][2];
using namespace std;
using namespace cv;
float computescore(string imagename);
void ComputeBrisqueFeature(Mat& orig, vector<double>& featurevector);
int read_range_file() {
//check if file exists
char buff[100];
int i;
string range_fname = "allrange";
FILE* range_file = fopen(range_fname.c_str(), "r");
if(range_file == NULL) return 1;
//assume standard file format for this program
fgets(buff, 100, range_file);
fgets(buff, 100, range_file);
//now we can fill the array
for(i = 0; i < 36; ++i) {
float a, b, c;
fscanf(range_file, "%f %f %f", &a, &b, &c);
rescale_vector[i][0] = b;
rescale_vector[i][1] = c;
}
return 0;
}
int main(int argc, char** argv)
{
if(argc < 2) {
cout << "Input Image argument not given." << endl;
return -1;
}
//read in the allrange file to setup internal scaling array
if(read_range_file()) {
cerr<<"unable to open allrange file"<<endl;
return -1;
}
float qualityscore;
qualityscore = computescore(argv[1]);
cout << "Quality Score: " << qualityscore << endl;
}
float computescore(string imagename) {
// pre-loaded vectors from allrange file
float min_[36] = {0.336999 ,0.019667 ,0.230000 ,-0.125959 ,0.000167 ,0.000616 ,0.231000 ,-0.125873 ,0.000165 ,0.000600 ,0.241000 ,-0.128814 ,0.000179 ,0.000386 ,0.243000 ,-0.133080 ,0.000182 ,0.000421 ,0.436998 ,0.016929 ,0.247000 ,-0.200231 ,0.000104 ,0.000834 ,0.257000 ,-0.200017 ,0.000112 ,0.000876 ,0.257000 ,-0.155072 ,0.000112 ,0.000356 ,0.258000 ,-0.154374 ,0.000117 ,0.000351};
float max_[36] = {9.999411, 0.807472, 1.644021, 0.202917, 0.712384, 0.468672, 1.644021, 0.169548, 0.713132, 0.467896, 1.553016, 0.101368, 0.687324, 0.533087, 1.554016, 0.101000, 0.689177, 0.533133, 3.639918, 0.800955, 1.096995, 0.175286, 0.755547, 0.399270, 1.095995, 0.155928, 0.751488, 0.402398, 1.041992, 0.093209, 0.623516, 0.532925, 1.042992, 0.093714, 0.621958, 0.534484};
double qualityscore;
int i;
struct svm_model* model; // create svm model object
Mat orig = imread(imagename, 1); // read image (color mode)
vector<double> brisqueFeatures; // feature vector initialization
ComputeBrisqueFeature(orig, brisqueFeatures); // compute brisque features
// use the pre-trained allmodel file
string modelfile = "allmodel";
//if((model=svm_load_model(modelfile.c_str()))==0) {
//fprintf(stderr,"can't open model file allmodel\n");
// exit(1);
//}
// float min_[37];
// float max_[37];
struct svm_node x[37];
// rescale the brisqueFeatures vector from -1 to 1
// also convert vector to svm node array object
for(i = 0; i < 36; ++i) {
float min = min_[i];
float max = max_[i];
x[i].value = -1 + (2.0/(max - min) * (brisqueFeatures[i] - min));
x[i].index = i + 1;
}
x[36].index = -1;
int nr_class=svm_get_nr_class(model);
double *prob_estimates = (double *) malloc(nr_class*sizeof(double));
// predict quality score using libsvm class
qualityscore = svm_predict_probability(model,x,prob_estimates);
free(prob_estimates);
svm_free_and_destroy_model(&model);
return qualityscore;
}
void ComputeBrisqueFeature(Mat& orig, vector<double>& featurevector)
{
Mat orig_bw_int(orig.size(), CV_64F, 1);
// convert to grayscale
cvtColor(orig, orig_bw_int, COLOR_BGR2GRAY);
// create a copy of original image
Mat orig_bw(orig_bw_int.size(), CV_64FC1, 1);
orig_bw_int.convertTo(orig_bw, 1.0/255);
orig_bw_int.release();
// orig_bw now contains the grayscale image normalized to the range 0,1
int scalenum = 2; // number of times to scale the image
for (int itr_scale = 1; itr_scale<=scalenum; itr_scale++)
{
// resize image
Size dst_size(orig_bw.cols/cv::pow((double)2, itr_scale-1), orig_bw.rows/pow((double)2, itr_scale-1));
Mat imdist_scaled;
resize(orig_bw, imdist_scaled, dst_size, 0, 0, INTER_CUBIC); // INTER_CUBIC
imdist_scaled.convertTo(imdist_scaled, CV_64FC1, 1.0/255.0);
// calculating MSCN coefficients
// compute mu (local mean)
Mat mu(imdist_scaled.size(), CV_64FC1, 1);
GaussianBlur(imdist_scaled, mu, Size(7, 7), 1.166);
Mat mu_sq;
cv::pow(mu, double(2.0), mu_sq);
//compute sigma (local sigma)
Mat sigma(imdist_scaled.size(), CV_64FC1, 1);
cv::multiply(imdist_scaled, imdist_scaled, sigma);
GaussianBlur(sigma, sigma, Size(7, 7), 1.166);
cv::subtract(sigma, mu_sq, sigma);
cv::pow(sigma, double(0.5), sigma);
add(sigma, Scalar(1.0/255), sigma); // to avoid DivideByZero Error
Mat structdis(imdist_scaled.size(), CV_64FC1, 1);
subtract(imdist_scaled, mu, structdis);
divide(structdis, sigma, structdis); // structdis is MSCN image
// Compute AGGD fit to MSCN image
double lsigma_best, rsigma_best, gamma_best;
structdis = AGGDfit(structdis, lsigma_best, rsigma_best, gamma_best);
featurevector.push_back(gamma_best);
featurevector.push_back((lsigma_best*lsigma_best + rsigma_best*rsigma_best)/2);
// Compute paired product images
// indices for orientations (H, V, D1, D2)
int shifts[4][2]={{0,1},{1,0},{1,1},{-1,1}};
for(int itr_shift=1; itr_shift<=4; itr_shift++)
{
// select the shifting index from the 2D array
int* reqshift = shifts[itr_shift-1];
// declare shifted_structdis as pairwise image
Mat shifted_structdis(imdist_scaled.size(), CV_64F, 1);
// create copies of the images using BwImage constructor
// utility constructor for better subscript access (for pixels)
BwImage OrigArr(structdis);
BwImage ShiftArr(shifted_structdis);
// create pair-wise product for the given orientation (reqshift)
for(int i=0; i<structdis.rows; i++)
{
for(int j=0; j<structdis.cols; j++)
{
if(i+reqshift[0]>=0 && i+reqshift[0]<structdis.rows && j+reqshift[1]>=0 && j+reqshift[1]<structdis.cols)
{
ShiftArr[i][j]=OrigArr[i + reqshift[0]][j + reqshift[1]];
}
else
{
ShiftArr[i][j]=0;
}
}
}
// Mat structdis_pairwise;
shifted_structdis = ShiftArr.equate(shifted_structdis);
// calculate the products of the pairs
multiply(structdis, shifted_structdis, shifted_structdis);
// fit the pairwise product to AGGD
shifted_structdis = AGGDfit(shifted_structdis, lsigma_best, rsigma_best, gamma_best);
double constant = sqrt(tgamma(1/gamma_best))/sqrt(tgamma(3/gamma_best));
double meanparam = (rsigma_best-lsigma_best)*(tgamma(2/gamma_best)/tgamma(1/gamma_best))*constant;
// push the calculated parameters from AGGD fit to pair-wise products
featurevector.push_back(gamma_best);
featurevector.push_back(meanparam);
featurevector.push_back(cv::pow(lsigma_best,2));
featurevector.push_back(cv::pow(rsigma_best,2));
}
}
}
// function to compute best fit parameters from AGGDfit
Mat AGGDfit(Mat structdis, double& lsigma_best, double& rsigma_best, double& gamma_best)
{
// create a copy of an image using BwImage constructor (brisque.h - more info)
BwImage ImArr(structdis);
long int poscount=0, negcount=0;
double possqsum=0, negsqsum=0, abssum=0;
for(int i=0;i<structdis.rows;i++)
{
for (int j =0; j<structdis.cols; j++)
{
double pt = ImArr[i][j]; // BwImage provides [][] access
if(pt>0)
{
poscount++;
possqsum += pt*pt;
abssum += pt;
}
else if(pt<0)
{
negcount++;
negsqsum += pt*pt;
abssum -= pt;
}
}
}
lsigma_best = cv::pow(negsqsum/negcount, 0.5);
rsigma_best = cv::pow(possqsum/poscount, 0.5);
double gammahat = lsigma_best/rsigma_best;
long int totalcount = (structdis.cols)*(structdis.rows);
double rhat = cv::pow(abssum/totalcount, static_cast<double>(2))/((negsqsum + possqsum)/totalcount);
double rhatnorm = rhat*(cv::pow(gammahat,3) +1)*(gammahat+1)/pow(pow(gammahat,2)+1,2);
double prevgamma = 0;
double prevdiff = 1e10;
float sampling = 0.001;
for (float gam=0.2; gam<10; gam+=sampling) //possible to coarsen sampling to quicken the code, with some loss of accuracy
{
double r_gam = tgamma(2/gam)*tgamma(2/gam)/(tgamma(1/gam)*tgamma(3/gam));
double diff = abs(r_gam-rhatnorm);
if(diff> prevdiff) break;
prevdiff = diff;
prevgamma = gam;
}
gamma_best = prevgamma;
return structdis.clone();
}
And then try to compile using
!nvcc -o /content/src/Blind_Deblurring_Cuda /content/src/Blind_Deblurring_Cuda.cu -lopencv_core -lopencv_imgcodecs -lopencv_imgproc -lopencv_highgui -lopencv_ml
It gives the following error
/tmp/tmpxft_00003d8d_00000000-10_Blind_Deblurring_Cuda.o: In function `computescore(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >)':
tmpxft_00003d8d_00000000-5_Blind_Deblurring_Cuda.cudafe1.cpp:(.text+0x9bc): undefined reference to `svm_get_nr_class'
tmpxft_00003d8d_00000000-5_Blind_Deblurring_Cuda.cudafe1.cpp:(.text+0x9fd): undefined reference to `svm_predict_probability'
tmpxft_00003d8d_00000000-5_Blind_Deblurring_Cuda.cudafe1.cpp:(.text+0xa27): undefined reference to `svm_free_and_destroy_model'
collect2: error: ld returned 1 exit status
Following the same steps in CUDA samples to launch a kernel and sync across the grid using cooperative_groups::this_grid().sync() causes any CUDA API call to fails. While using
cooperative_groups::this_thread_block().sync() works fine and gives correct results.
I used the following code and CMakeLists.txt (cmake version 3.11.1) to test it using CUDA 10 on TITAN V GPU (Driver Version 410.73) with Ubuntu 16.04.5 LTS. The code is also available on github in order to make it easy to reproduce the error.
The code reads an array and then reverses it (from [0 1 2 ... 9] to [9 8 7 ... 0]). In order to do this, each thread reads a single element from the array, sync, and then writes its element to the right destination. The code can be easily modified to ensure that this_thread_block().sync() works fine. Simply change arr_size to be less 1024 and use cg::thread_block barrier = cg::this_thread_block(); instead.
test_cg.cu
#include <cuda_runtime_api.h>
#include <stdio.h>
#include <stdint.h>
#include <cstdint>
#include <numeric>
#include <cuda.h>
#include <cooperative_groups.h>
namespace cg = cooperative_groups;
//********************** CUDA_ERROR
inline void HandleError(cudaError_t err, const char *file, int line) {
//Error handling micro, wrap it around function whenever possible
if (err != cudaSuccess) {
printf("\n%s in %s at line %d\n", cudaGetErrorString(err), file, line);
#ifdef _WIN32
system("pause");
#else
exit(EXIT_FAILURE);
#endif
}
}
#define CUDA_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
//******************************************************************************
//********************** cg kernel
__global__ void testing_cg_grid_sync(const uint32_t num_elements,
uint32_t *d_arr){
uint32_t tid = threadIdx.x + blockDim.x*blockIdx.x;
if (tid < num_elements){
uint32_t my_element = d_arr[tid];
//to sync across the whole grid
cg::grid_group barrier = cg::this_grid();
//to sync within a single block
//cg::thread_block barrier = cg::this_thread_block();
//wait for all reads
barrier.sync();
uint32_t tar_id = num_elements - tid - 1;
d_arr[tar_id] = my_element;
}
}
//******************************************************************************
//********************** execute
void execute_test(const int sm_count){
//host array
const uint32_t arr_size = 1 << 20; //1M
uint32_t* h_arr = (uint32_t*)malloc(arr_size * sizeof(uint32_t));
//fill with sequential numbers
std::iota(h_arr, h_arr + arr_size, 0);
//device array
uint32_t* d_arr;
CUDA_ERROR(cudaMalloc((void**)&d_arr, arr_size*sizeof(uint32_t)));
CUDA_ERROR(cudaMemcpy(d_arr, h_arr, arr_size*sizeof(uint32_t),
cudaMemcpyHostToDevice));
//launch config
const int threads = 512;
//following the same steps done in conjugateGradientMultiBlockCG.cu
//cuda sample to launch kernel that sync across grid
//https://github.com/NVIDIA/cuda-samples/blob/master/Samples/conjugateGradientMultiBlockCG/conjugateGradientMultiBlockCG.cu#L436
int num_blocks_per_sm = 0;
CUDA_ERROR(cudaOccupancyMaxActiveBlocksPerMultiprocessor(&num_blocks_per_sm,
(void*)testing_cg_grid_sync, threads, 0));
dim3 grid_dim(sm_count * num_blocks_per_sm, 1, 1), block_dim(threads, 1, 1);
if(arr_size > grid_dim.x*block_dim.x){
printf("\n The grid size (numBlocks*numThreads) is less than array size.\n");
exit(EXIT_FAILURE);
}
printf("\n Launching %d blocks, each containing %d threads", grid_dim.x,
block_dim.x);
//argument passed to the kernel
void *kernel_args[] = {
(void *)&arr_size,
(void *)&d_arr, };
//finally launch the kernel
cudaLaunchCooperativeKernel((void*)testing_cg_grid_sync,
grid_dim, block_dim, kernel_args);
//make sure everything went okay
CUDA_ERROR(cudaGetLastError());
CUDA_ERROR(cudaDeviceSynchronize());
//get results on the host
CUDA_ERROR(cudaMemcpy(h_arr, d_arr, arr_size*sizeof(uint32_t),
cudaMemcpyDeviceToHost));
//validate
for (uint32_t i = 0; i < arr_size; i++){
if (h_arr[i] != arr_size - i - 1){
printf("\n Result mismatch in h_arr[%u] = %u\n", i, h_arr[i]);
exit(EXIT_FAILURE);
}
}
}
//******************************************************************************
int main(int argc, char**argv) {
//set to Titan V
uint32_t device_id = 0;
cudaSetDevice(device_id);
//get sm count
cudaDeviceProp devProp;
CUDA_ERROR(cudaGetDeviceProperties(&devProp, device_id));
int sm_count = devProp.multiProcessorCount;
//execute
execute_test(sm_count);
printf("\n Mission accomplished \n");
return 0;
}
CMakeLists.txt
cmake_minimum_required(VERSION 3.8 FATAL_ERROR)
set(PROJECT_NAME "test_cg")
project(${PROJECT_NAME} LANGUAGES CXX CUDA)
#default build type is Release
if (CMAKE_BUILD_TYPE STREQUAL "")
set(CMAKE_BUILD_TYPE Release)
endif ()
SET(CUDA_SEPARABLE_COMPILATION ON)
########## Libraries/flags Starts Here ######################
find_package(CUDA REQUIRED)
include_directories("${CUDA_INCLUDE_DIRS}")
set(CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS}; -lineinfo; -std=c++11; -expt-extended-lambda; -O3; -use_fast_math; -rdc=true;)
set(CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS};-gencode=arch=compute_70,code=sm_70) #for TITAN V
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -m64 -Wall -std=c++11")
########## Libraries/flags Ends Here ######################
########## inc/libs/exe/features Starts Here ######################
set(CMAKE_INCLUDE_CURRENT_DIR ON)
CUDA_ADD_EXECUTABLE(${PROJECT_NAME} test_cg.cu)
target_compile_features(${PROJECT_NAME} PUBLIC cxx_std_11)
set_target_properties(${PROJECT_NAME} PROPERTIES POSITION_INDEPENDENT_CODE ON)
set_target_properties(${PROJECT_NAME} PROPERTIES CUDA_SEPARABLE_COMPILATION ON)
target_link_libraries(${PROJECT_NAME} ${CUDA_LIBRARIES} ${CUDA_cudadevrt_LIBRARY})
########## inc/libs/exe/features Ends Here ######################
Running this code gives:
unknown error in /home/ahdhn/test_cg/test_cg.cu at line 67
This is the first line that uses cudaMalloc. I made sure that the code is compiled for the correct architecture by querying __CUDA_ARCH__ from the device and the results is 700. Kindly let me know if you spot me doing something wrong in the code or the CMakeLists.txt file.
With external help, the solution that got the code working is to add string(APPEND CMAKE_CUDA_FLAGS " -gencode arch=compute_70,code=sm_70 --cudart shared") after the second set(CUDA_NVCC_FLAGS...... The reason is that I only have libcudadevrt.a under my /usr/local/cuda-10.0/lib64/ and so I have to signal CUDA to link shared/dynamic run-time library since the default is to link to static. string(APPEND CMAKE_CUDA_FLAGS " -gencode arch=compute_70,code=sm_70") after the second set(CUDA_NVCC_FLAGS...... The reason is that the sm_70 flag was not passed to the linker properly.
Additionally, using only CUDA_NVCC_FLAGS will only pass the sm_70 info to the compiler not the linker. While only using CMAKE_NVCC_FLAGS will report error: namespace "cooperative_groups" has no member "grid_group" error.
Sorry for my ignorance but I am very new in FTDI chip Linux software development.
I have module based on FT245RL chip, programmed to be 4 port output (relays) and 4 port opto isolated input unit.
I found out in Internet program in C to turn on/off relays connected to outputs D0 to D3. After compiling it works properly. Below draft of this working program:
/* switch4.c
* # gcc -o switch4 switch4.c -L. -lftd2xx -Wl,-rpath,/usr/local/lib
* Usage
* # switch4 [0-15], for example # switch4 1
* */
#include <stdio.h>
#include <stdlib.h>
#include "./ftd2xx.h"
int main(int argc, char *argv[])
{
FT_STATUS ftStatus;
FT_HANDLE ftHandle0;
int parametr;
LPVOID pkod;
DWORD nBufferSize = 0x0001;
DWORD dwBytesWritten;
if(argc > 1) {
sscanf(argv[1], "%d", ¶metr);
}
else {
parametr = 0;
}
FT_SetVIDPID(0x5555,0x0001); // id from lsusb
FT_Open(0,&ftHandle0);
FT_SetBitMode(ftHandle0,15,1);
pkod=¶metr;
ftStatus = FT_Write(ftHandle0,pkod,nBufferSize,&dwBytesWritten);
ftStatus = FT_Close(ftHandle0);
}
My question is. How can I read in the same program, status of D4 to D7 pins, programmed as inputs? I mean about "printf" to stdout the number representing status (zero or one) of input pins (or all input/output pins).
Can anybody help newbie ?
UPDATE-1
This is my program with FT_GetBitMode
// # gcc -o read5 read5.c -L. -lftd2xx -Wl,-rpath,/usr/local/lib
#include <stdio.h>
#include <stdlib.h>
#include "./ftd2xx.h"
int main(int argc, char *argv[])
{
FT_STATUS ftStatus;
FT_HANDLE ftHandle0;
UCHAR BitMode;
FT_SetVIDPID(0x5555,0x0001); // id from lsusb
ftStatus = FT_Open(0,&ftHandle0);
if(ftStatus != FT_OK) {
printf("FT_Open failed");
return;
}
FT_SetBitMode(ftHandle0,15,1);
ftStatus = FT_GetBitMode(ftHandle0, &BitMode);
if (ftStatus == FT_OK) {
printf("BitMode contains - %d",BitMode);
}
else {
printf("FT_GetBitMode FAILED!");
}
ftStatus = FT_Close(ftHandle0);
}
But it returns "FT_GetBitMode FAILED!" instead value of BitMode
FT_GetBitMode returns the instantaneous value of the pins. A single byte will be
returned containing the current values of the pins, both those which are inputs and
those which are outputs.
Source.
Finally I found out whats going wrong. I used incorrect version of ftdi library. The correct version dedicated for x86_64 platform is located here:
Link to FTDI library
I connected my ultimate gps breakout to my raspberry pi using the USB to TTL serial cable. Using C code I can easily connect to and read NMEA sentences from the GPS. But when I write configuration commands such as PMTK220 to set the update rate, they are ignored. I should get back a PMTK_ACK reporting success or failure, but it is not forthcoming. The problem also occurs when I use terminal windows. ie. I run:
while (true) do cat -A /dev/ttyUSB0 ; done
in one terminal, and get a stream of $GPGGA, $GPGSA, $GPRMC etc messages. In another terminal I run:
echo "$PMTK220,200*2C\r\n" > /dev/ttyUSB0
The NMEA messages continue but there is no PMTK001 coming back. Any ideas?
Ok, here's some code (inspired by gpsd source) that demonstrates sending a configuration message and getting an acknowledgement:
#define _BSD_SOURCE
#include <stdio.h>
#include <stdarg.h>
#include <sys/ioctl.h>
#include <termios.h>
#include <fcntl.h>
#include <unistd.h>
#include <assert.h>
#include <semaphore.h>
#include <string.h>
#ifndef CRTSCTS
# ifdef CNEW_RTSCTS
# define CRTSCTS CNEW_RTSCTS
# else
# define CRTSCTS 0
# endif /* CNEW_RTSCTS */
#endif /* !CRTSCTS */
int main (int argc, char **argv) {
int const baudr = B9600, mode = O_RDWR | O_NONBLOCK | O_NOCTTY;
int const fd = open("/dev/ttyUSB0", mode);
assert (fd != -1);
ioctl(fd, (unsigned long)TIOCEXCL);
struct termios ttyOld, ttyNew;
assert(tcgetattr(fd, &ttyOld) != -1);
ttyNew = ttyOld;
ttyNew.c_cflag &= ~(CSIZE | PARENB | PARODD | CRTSCTS | CSTOPB);
ttyNew.c_cflag |= CREAD | CLOCAL | CS8;
ttyNew.c_iflag = ttyNew.c_oflag = 0;
ttyNew.c_lflag = ICANON;
cfsetispeed(&ttyNew, baudr);
cfsetospeed(&ttyNew, baudr);
assert(tcsetattr(fd, TCSANOW, &ttyNew) != -1);
tcflush(fd, TCIOFLUSH);
usleep(200000);
tcflush(fd, TCIOFLUSH);
int const oldfl = fcntl(fd, F_GETFL);
assert (oldfl != -1);
fcntl(fd, F_SETFL, oldfl & ~O_NONBLOCK);
tcdrain(fd);
printf("port opened baudr=%d mode=%d i=%d o=%d c=%d l=%d fl=%d\n", baudr, mode, ttyNew.c_iflag, ttyNew.c_oflag, ttyNew.c_cflag, ttyNew.c_lflag, oldfl);
unsigned char buf[2048];
int count = 0;
while(++count < 20) {
if (count == 4) {
char const *const cmd = "$PMTK220,200*2C\r\n";
int const n = strlen(cmd);
assert(write(fd, cmd, n) == n);
tcdrain(fd);
printf("wrote command %d: %s\n", n, cmd);
}
int const n = read(fd, buf, sizeof(buf));
buf[(n >= sizeof(buf)) ? (sizeof(buf) - 1) : n] = 0;
printf(buf);
}
tcsetattr(fd, TCSANOW, &ttyOld);
close(fd);
}
with results:
pi#pi01 ~/c $ ./test
port opened baudr=13 mode=2306 i=0 o=0 c=3261 l=2 fl=2050
$GPGGA,175748.089,,,,,0,00,,,M,,M,,*71
$GPGSA,A,1,,,,,,,,,,,,,,,*1E
$GPRMC,175748.089,V,,,,,0.00,0.00,260715,,,N*43
wrote command 17: $PMTK220,200*2C
$GPVTG,0.00,T,,M,0.00,N,0.00,K,N*32
$PMTK001,220,2*31
$GPGGA,175749.089,,,,,0,00,,,M,,M,,*70
$GPGSA,A,1,,,,,,,,,,,,,,,*1E
$GPGSV,1,1,01,11,,,31*7A
$GPRMC,175749.089,V,,,,,0.00,0.00,260715,,,N*42
$GPVTG,0.00,T,,M,0.00,N,0.00,K,N*32
$GPGGA,175750.089,,,,,0,00,,,M,,M,,*78
$GPGSA,A,1,,,,,,,,,,,,,,,*1E
$GPRMC,175750.089,V,,,,,0.00,0.00,260715,,,N*4A
$GPVTG,0.00,T,,M,0.00,N,0.00,K,N*32
$GPGGA,175751.089,,,,,0,00,,,M,,M,,*79
$GPGSA,A,1,,,,,,,,,,,,,,,*1E
$GPRMC,175751.089,V,,,,,0.00,0.00,260715,,,N*4B
$GPVTG,0.00,T,,M,0.00,N,0.00,K,N*32
$GPGGA,175752.089,,,,,0,00,,,M,,M,,*7A
pi#pi01 ~/c $
I'm new to openMP, and I was trying to parallelize a Gaussian Elimination, and I'm having troubles with performance. I'm compiling the code below using:
gcc -o gaussian_elimination gaussian_elimination.c -lm -lgsl -lgslcblas -fopenmp -Wall
And setting the number of threads on the terminal with export OMP_NUM_THREADS
And my problem is that the parallel version of this code is running way slower than the serial version of the same. I believe that this is because I declared #pragma parallel for inside the external loop, and this would force openMP to create and destroy thread at each iteration, which would be incredibly costly, but I haven't seen any other clear way to do the same kind of operation, and I don't think I can exchange the external loop with the internal parallel ones.
I'm probably missing something, but I have not found any other forum threads here commenting on this particular problem. As far as execution correctness goes, my code seems to be functioning alright, the problem is just performance-wise.
Thanks in Advance
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <omp.h>
#include <stdbool.h>
#include <time.h>
#include <gsl/gsl_linalg.h>
#include <gsl/gsl_rng.h>
#define DEBUG_MODE false
int random_matrix(double *A, int N,long long int seed);
int print_matrix(double *A, int N);
int print_vector(float *b,int N);
int main(int argc, char **argv){
int N=1000;
int i,j,k,l,i_p,s,err,D=N+1;
long long int seed=9089123498274; // just a fixed seed only not to bother
double *A,pivot,sw,tmp,begin,end,time_spent;
double *Aref,*bref;
gsl_matrix_view gsl_m;
gsl_vector_view gsl_b;
gsl_vector *gsl_x;
gsl_permutation *gsl_p;
/* Input */
//scanf("%d",&N);
A = (double*)malloc(N*(N+1)*sizeof(double));
if(A==NULL){
printf("Matrix A not allocated\n");
return 1;
}
Aref = (double*)malloc(N*N*sizeof(double));
if(Aref==NULL){
printf("Matrix A not allocated\n");
return 1;
}
bref = (double*)malloc(N*sizeof(double));
if(bref==NULL){
printf("Vector B not allocated\n");
return 2;
}
/*
for(i=0;i<N;i+=1)
for(j=0;j<N;j+=1)
scanf("%f",&(A[i*N+j]));
for(i=0;i<N;i+=1)
scanf("%f",&(b[i]));
*/
/*
for(i=0;i<N*N;i++)
A[i]=(float) a_data[i];
for(i=0;i<N;i+=1)
b[i]=(float) b_data[i]; */
err= random_matrix(A,N,seed);
if(err!=0)
return err;
for(i=0;i<N;i++)
for(j=0;j<N;j+=1)
Aref[i*N+j]= A[i*D+j];
for(i=0;i<N;i+=1)
bref[i]= A[i*D+N];//b[i];
printf("GSL reference:\n");
gsl_m = gsl_matrix_view_array (Aref, N, N);
gsl_b = gsl_vector_view_array (bref, N);
gsl_x = gsl_vector_alloc (N);
gsl_p = gsl_permutation_alloc(N);
begin = clock();
gsl_linalg_LU_decomp(&gsl_m.matrix, gsl_p, &s);
gsl_linalg_LU_solve(&gsl_m.matrix, gsl_p, &gsl_b.vector, gsl_x);
end = clock();
time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
printf("gsl matrix solver: %lf s\n",time_spent);
if(DEBUG_MODE==true)
gsl_vector_fprintf(stdout,gsl_x,"%f");
gsl_permutation_free(gsl_p);
gsl_vector_free(gsl_x);
begin = omp_get_wtime();
for(i=0;i<N;i+=1){
i_p = i;
pivot = fabs(A[i*D+i]);
for(j=i;j<N;j+=1)
if(pivot<fabs(A[j*D+i])){
pivot = fabs(A[j*D+i]);
i_p = j;
}
#pragma omp parallel for shared(i,N,A,i_p) private(j,sw)
for(j=i;j<D;j+=1){
sw = A[i*D+j];
A[i*D+j] = A[i_p*D+j];
A[i_p*D+j] = sw;
}
pivot=A[i*D+i];
#pragma omp parallel for shared(i,D,pivot,A) private(j)
for(j=0;j<D;j++)
A[i*D+j]=A[i*D+j]/pivot;
#pragma omp parallel for shared(i,A,N,D) private(tmp,j,k,l)
for(j=i+1;j<N+i;j++){
k=j%N;
tmp=A[k*D+i];
for(l=0;l<D;l+=1)
A[k*D+l]=A[k*D+l]-tmp*A[i*D+l];
}
}
end = omp_get_wtime();
time_spent = (end - begin);
printf("omp matrix solver: %lf s\n",time_spent);
/* Output */
if(DEBUG_MODE==true){
printf("\nCalculated: \n");
for(i=0;i<N;i+=1)
printf("%.6f \n",A[i*(N+1)+N]);
printf("\n");
}
free(A);
return 0;
}
int random_matrix(double *A, int N,long long int seed){
int i,j;
const gsl_rng_type * T;
gsl_rng *r;
gsl_rng_env_setup();
T = gsl_rng_default;
r = gsl_rng_alloc (T);
for(i=0;i<N;i++)
for(j=0;j<=N;j++)
A[i*(N+1)+j]= gsl_rng_uniform (r);
gsl_rng_free (r);
return 0;
}
int print_matrix(double *A, int N){
int i,j;
for(i=0;i<N;i++)
for(j=0;j<=N+1;j++){
if(j==0 || j==N || j==N+1)
printf(" | ");
printf("%.2f ",A[i*(N+1)+j]);
if(j==N+1)
printf("\n");
}
return 0;
}
int print_vector(float *b,int N){
int i;
for(i=0;i<N;i+=1)
printf("%f\n", b[i]);
return 0;
}
I updated the code above with the omp_get_wtime(), and now it reads as the wtime diminishing as I include more and more threads, so, it does behave as it should, although not as clean as I would like.
For 1000 x 1000 matrices I get 0.25 s for the GSL lib, 4.4 s for the serial omp run and 1.5 s for the 4-thread run.
For 3000 x 3000 matrices, I get ~ 9s for the GSL lib, ~ 117 s for the serial omp run and ~ 44 s for the 4 thread-run, thus at least adding more threads indeed speeds up the program!
Thanks a lot everyone