I use the 3D convex hull estimator implementation in CGAL to compute the convex polyhedron of a set of points. below shows the code:
#include <CGAL/Exact_predicates_inexact_constructions_kernel.h>
#include <CGAL/point_generators_3.h>
#include <CGAL/algorithm.h>
#include <CGAL/Polyhedron_3.h>
#include <CGAL/convex_hull_3.h>
#include <vector>
#include <iostream>
typedef CGAL::Exact_predicates_inexact_constructions_kernel Kernel;
typedef Kernel::Point_3 Point_3;
typedef CGAL::Polyhedron_3<Kernel> Polyhedron;
typedef Polyhedron::Facet_iterator Facet_iterator;
typedef Polyhedron::Halfedge_around_facet_circulator Halfedge_facet_circulator;
int main()
{
std::vector<Point_3> points;
points.push_back(Point_3(2.0f, 3.535533905932738f, 3.535533905932737f));
points.push_back(Point_3(4.0f, 2.0f, 0.0f));
points.push_back(Point_3(0.0f, 2.0f, 0.0f));
points.push_back(Point_3(1.0f, 0.0f, 0.0f));
points.push_back(Point_3(4.0f, 1.414213562373095f, 1.414213562373095f));
points.push_back(Point_3(0.0f, 1.414213562373095f, 1.414213562373095f));
points.push_back(Point_3(3.0f, 0.0f, 0.0f));
points.push_back(Point_3(2.0f, 5.0f, 0.0f));
Polyhedron P;
CGAL::convex_hull_3(points.begin(), points.end(), P);
std::cout << "- Number of vertices = " << P.size_of_vertices() << std::endl;
std::cout << "- Number of edges = " << P.size_of_halfedges()/2 << std::endl;
std::cout << "- Number of faces = " << P.size_of_facets() << std::endl;
for ( Facet_iterator i = P.facets_begin(); i != P.facets_end(); ++i)
{
Halfedge_facet_circulator j = i->facet_begin();
CGAL_assertion( CGAL::circulator_size(j) >= 3);
std::cout << CGAL::circulator_size(j) << ' ';
do{
std::cout << ' ' << std::distance(P.vertices_begin(), j->vertex());
}while ( ++j != i->facet_begin());
std::cout << std::endl;
}
return 0;
}
However, the output is a triangulation of faces of the polyhedron:
Is there a way to force CGAL to represent a face with a polygon not a set of triangles?
There is no way to do it out-of-the box. You need to test for each edge whether the four points are coplanar.
Related
This question already has an answer here:
Should I ever use a `vec3` inside of a uniform buffer or shader storage buffer object?
(1 answer)
Closed 1 year ago.
I encounter an issue when I transmit uniform buffer data from CPU to GPU side. It seems something to do with memory alignment between CPU/GPU. I define a uniform buffer object as follows:
const int MAX_LIGHTS_NUM = 32;
struct Lights {
glm::vec3 Position;
glm::vec3 Color;
float Linear;
float Quadratic;
float intensity;
};
struct UniformBufferObject {
glm::mat4 mvp;
glm::mat4 model;
glm::vec3 camPos;
int RealLightsNum;
Lights lights[MAX_LIGHTS_NUM];
};
and in shader code, it defines as:
layout(binding = 0) uniform UniformBufferObject {
mat4 mvp;
mat4 mMatrix;
vec3 CameraPosition;
int RealLightsNum;
Light lights[MAX_LIGHTS_NUM];
} ubo;
I define MAX_LIGHTS_NUM as 32. When I copy memory from on CPU side, I print all ubo data and confirm that all data on CPU side are correct:
void printUBO(UniformBufferObject &ubo) {
LOGD("%s:%d Model matrix:%s", __FUNCTION__, __LINE__, glm::to_string(ubo.model).c_str());
LOGD("%s:%d MVP matrix:%s", __FUNCTION__, __LINE__, glm::to_string(ubo.mvp).c_str());
LOGD("%s:%d camPos:%s", __FUNCTION__, __LINE__, glm::to_string(ubo.camPos).c_str());
LOGD("%s:%d ubo size:%d, RealLightsNum:%d", __FUNCTION__, __LINE__, sizeof(ubo), ubo.RealLightsNum);
for (int i = 0; i < ubo.RealLightsNum; i++) {
LOGD("%s:%d Light[%d].pos: %s",
__FUNCTION__, __LINE__, i, glm::to_string(ubo.lights[i].Position).c_str());
LOGD("%s:%d Light[%d].color: %s",
__FUNCTION__, __LINE__, i, glm::to_string(ubo.lights[i].Color).c_str());
LOGD("%s:%d Light[%d].(intensity/linear/quadratic: %f, %f, %f",
__FUNCTION__, __LINE__, i, ubo.lights[i].intensity, ubo.lights[i].Linear, ubo.lights[i].Quadratic);
}
}
void VulkanHal::drawPrimitive(int nodeId, unsigned int index, size_t offset, int mode, int count,
int type) {
if (initialized == false) return;
// LOGD("%s:%d nodeId:%d, index:%u offset:%u, mode:%d, count:%d, type:%d",
// __FUNCTION__, __LINE__, nodeId, index, offset, mode, count, type);
// LOGD("%s:%d -", __FUNCTION__, __LINE__);
// LOGD("%s:%d ubo size:%d, RealLightsNum:%d", __FUNCTION__, __LINE__, sizeof(ubo), ubo.RealLightsNum);
printUBO(ubo);
int primIndex = getPrimitiveIndex(nodeId, index);
void* data;
vkMapMemory(device, uniformBuffersMemory[imageIndex][primIndex], 0, sizeof(ubo), 0, &data);
memcpy(data, &ubo, sizeof(ubo));
vkUnmapMemory(device, uniformBuffersMemory[imageIndex][primIndex]);
}
I capture renderdoc ubo data, and found that the data till RealLightsNum are correct, but Light data only the first one is partially correct, which means the position is correct, but color third channel is not correct.
I doubt this is an memory alignment issue, but can't figure it out correctly. Could somebody help on this, thanks in advance.
Logs:
renderdoc capture:
I got the answer on link Vulkan Memory Alignment for Uniform Buffers. It says for each elements on CPU side, we should set alignment like:
a int/float an alignement of 4
a vec2 an alignement of 8
a vec3, vec4, mat4 an alignement of 16
It works after I set as follows:
const int MAX_LIGHTS_NUM = 32;
struct Lights {
alignas(16) glm::vec3 Position;
alignas(16) glm::vec3 Color;
alignas(4) float Linear;
alignas(4) float Quadratic;
alignas(4) float intensity;
};
struct UniformBufferObject {
alignas(16) glm::mat4 mvp;
alignas(16) glm::mat4 model;
alignas(16) glm::vec3 camPos;
alignas(4) int RealLightsNum;
Lights lights[MAX_LIGHTS_NUM];
};
Using linux and g++.
This works:
stringstream ss;
for (int k = 1; k < 1000; k++){
}
This should also works but result in "segmentation fault":
for (int k = 1; k <1000; k++){
stringstream ss;
}
Why?
Thank you Antonio Perez for your reply.
Actually my code was exactly this:
#pragma pack(1)
#include <sstream>
#include <iostream>
int main(){
for (int i = 0; i < 2; i++){
std::stringstream ss;
}
}
Amazingly if I displace the #pragma pack(1) like this:
#include <sstream>
#pragma pack(1)
#include <iostream>
int main(){
for (int i = 0; i < 2; i++){
std::stringstream ss;
}
}
...then no error occurs!
Is there a possible (non-bug) reason for why sstream does not permit packing of its structure?
I am trying to solve this problem on codeforces using dynamic programming. I have made the recurrence which is of O(N^2) complexity but it is timing out. I know that the complexity of this solution can be reduced to O(N) via Convex hull optimization which is explained here. But I am not able to reduce the complexity. Please help.
This is my code.
#include <bits/stdc++.h>
using namespace std;
#define MAX 100005
typedef long long ll;
ll a[MAX],b[MAX],dp[MAX];
int main()
{
int n;
cin >> n;
for(int i = 0; i < n; i++)
cin >> a[i];
for(int i = 0; i < n; i++)
cin >> b[i];
dp[0] = 0;
for(int i = 1; i < n; i++)
{
dp[i] = 1e18;
for(int j = 0; j < i; j++)
{
dp[i] = min(dp[i],dp[j] + a[i] * b[j]);
}
}
cout << dp[n-1];
}
In my application, i have to display the YUV frame receiving from the server, YUV image format is I420,
I have subclass NSOpenGLView like this,
#interface RTCNSGLVideoView : NSOpenGLView <RTCVideoRenderer>
and OpenGL View creating on the View something like this,
if ( bUsingOpenGL && !pVideoRTCGLView){
NSOpenGLPixelFormatAttribute attributes[] = {
NSOpenGLPFADoubleBuffer,
NSOpenGLPFADepthSize, 24,
NSOpenGLPFAOpenGLProfile,
NSOpenGLProfileVersion3_2Core,
0
};
NSRect frameRect = [pImageView frame];
NSOpenGLPixelFormat* pixelFormat =
[[NSOpenGLPixelFormat alloc] initWithAttributes:attributes];
pVideoRTCGLView = [[RTCNSGLVideoView alloc] initWithFrame:frameRect
pixelFormat:pixelFormat];
[pVideoRTCGLView setAutoresizingMask:[pImageView autoresizingMask]];
[pVideoRTCGLView setTranslatesAutoresizingMaskIntoConstraints:NO];
[pVideoRTCGLView setYUVFrameDelegate:self];
[[[self window] contentView] addSubview:pVideoRTCGLView];
[pVideoRTCGLView prepareOpenGL];
}
I am displaying I420 directly on the screen, and for that, Vertex and shaders program are like this,
#define RTC_PIXEL_FORMAT GL_RED
#define SHADER_VERSION "#version 150\n"
#define VERTEX_SHADER_IN "in"
#define VERTEX_SHADER_OUT "out"
#define FRAGMENT_SHADER_IN "in"
#define FRAGMENT_SHADER_OUT "out vec4 fragColor;\n"
#define FRAGMENT_SHADER_COLOR "fragColor"
#define FRAGMENT_SHADER_TEXTURE "texture"
#endif
// Vertex shader doesn't do anything except pass coordinates through.
static const char kVertexShaderSource[] =
SHADER_VERSION
VERTEX_SHADER_IN " vec2 position;\n"
VERTEX_SHADER_IN " vec2 texcoord;\n"
VERTEX_SHADER_OUT " vec2 v_texcoord;\n"
"void main() {\n"
" gl_Position = vec4(position.x, position.y, 0.0, 1.0);\n"
" v_texcoord = texcoord;\n"
"}\n";
// Fragment shader converts YUV values from input textures into a final RGB
// pixel. The conversion formula is from http://www.fourcc.org/fccyvrgb.php.
static const char kFragmentShaderSource[] =
SHADER_VERSION
"precision highp float;"
FRAGMENT_SHADER_IN " vec2 v_texcoord;\n"
"uniform lowp sampler2D s_textureY;\n"
"uniform lowp sampler2D s_textureU;\n"
"uniform lowp sampler2D s_textureV;\n"
FRAGMENT_SHADER_OUT
"void main() {\n"
" float y, u, v, r, g, b;\n"
" y = " FRAGMENT_SHADER_TEXTURE "(s_textureY, v_texcoord).r;\n"
" u = " FRAGMENT_SHADER_TEXTURE "(s_textureU, v_texcoord).r;\n"
" v = " FRAGMENT_SHADER_TEXTURE "(s_textureV, v_texcoord).r;\n"
" u = u - 0.5;\n"
" v = v - 0.5;\n"
" r = y + 1.403 * v;\n"
" g = y - 0.344 * u - 0.714 * v;\n"
" b = y + 1.770 * u;\n"
" " FRAGMENT_SHADER_COLOR " = vec4(r, g, b, 1.0);\n"
" }\n";
// Compiles a shader of the given |type| with GLSL source |source| and returns
// the shader handle or 0 on error.
GLuint CreateShader(GLenum type, const GLchar* source) {
GLuint shader = glCreateShader(type);
if (!shader) {
return 0;
}
glShaderSource(shader, 1, &source, NULL);
glCompileShader(shader);
GLint compileStatus = GL_FALSE;
glGetShaderiv(shader, GL_COMPILE_STATUS, &compileStatus);
if (compileStatus == GL_FALSE) {
glDeleteShader(shader);
shader = 0;
}
return shader;
}
// Links a shader program with the given vertex and fragment shaders and
// returns the program handle or 0 on error.
GLuint CreateProgram(GLuint vertexShader, GLuint fragmentShader) {
if (vertexShader == 0 || fragmentShader == 0) {
return 0;
}
GLuint program = glCreateProgram();
if (!program) {
return 0;
}
glAttachShader(program, vertexShader);
glAttachShader(program, fragmentShader);
glLinkProgram(program);
GLint linkStatus = GL_FALSE;
glGetProgramiv(program, GL_LINK_STATUS, &linkStatus);
if (linkStatus == GL_FALSE) {
glDeleteProgram(program);
program = 0;
}
return program;
}
In the Subclass view, i am setting the view port size like this,
- (void)reshape {
[super reshape];
NSRect frame = [self frame];
NSLog(#"reshaping viewport (%#) ",NSStringFromRect(frame));
CGLLockContext([[self openGLContext] CGLContextObj]);
glViewport(0, 0, frame.size.width, frame.size.height);
CGLUnlockContext([[self openGLContext] CGLContextObj]);
}
What i am seeing is bottom and right side is bit corrupted, please refer the image,
Please help me, not able to figure it out, what is going wrong,
if you refer the image, where i drawn Red rectangle at the bottom and on the right side this is coming unnecessary
Having difficulty to make two processes comunicate through pipe and substract a number alternatively.
Output should be like:
process1: 9
process2: 8
process1: 7...
What I've did so far:
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/wait.h>
int main() {
int p2c[2];
int c2p[2];
int n = 9;
pipe(p2c);
pipe(c2p);
write(p2c[1], &n, sizeof(int));
if(fork() == 0) {
read(p2c[0], &n, sizeof(int));
printf("Got from parent: %d", n);
n--;
write(c2p[1], &n, sizeof(int));
close(p2c[0]);
close(p2c[1]);
close(c2p[0]);
close(c2p[1]);
exit(0);
}
else{
read(c2p[0], &n, sizeof(int));
printf("Got from child: %d", n);
n--;
write(p2c[1], &n; sizeof(int));
close(p2c[0]);
close(p2c[1]);
close(c2p[0]);
close(c2p[1]);
}
return 0;
}
Whith the output:
Got from parent:9
Got from child:8
What's the proper way to get these two processes substract the number till 0?
It makes sense that you're only getting "Got from parent:9 Got from child:8" as a result, you need, you need a while or for loop for both child and parent processes to get what you're expecting, and the stop conditions for those loops are (n < 0) after decrementing n or the write end of pipe get closed:
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/wait.h>
int main() {
int p2c[2];
int c2p[2];
int n = 9;
pipe(p2c);
pipe(c2p);
// this is important to prevent deadlock situation, at least one of both processes
// must start a write operation before while loop, unless the read will block and
// and each process still waiting the other to write something on the pipe
write(p2c[1], &n, sizeof(int));
if(fork() == 0) {
int readStatus;
while(1){
readStatus=read(p2c[0], &n, sizeof(int));
// when read returns 0, this means the write end of pipe was closed, so we have to break the loop
// because no more data to recieve
if(readStatus == 0) break;
printf("Got from parent: %d\n", n);
n--;
// we check if n less than 0, if yes we are finished
if(n < 0) break;
write(c2p[1], &n, sizeof(int));
}
close(p2c[0]);
close(p2c[1]);
close(c2p[0]);
close(c2p[1]);
exit(0);
}
else{
int readStatus;
while(1){
readStatus= read(c2p[0], &n, sizeof(int));
if(readStatus == 0) break;
printf("Got from child: %d\n", n);
n--;
if(n < 0) break;
write(p2c[1], &n, sizeof(int));
}
close(p2c[0]);
close(p2c[1]);
close(c2p[0]);
close(c2p[1]);
}
return 0;
}