Related
I'm trying to rewrite this first triangle example in vala and failing this is what I have so far:
public class MyApp : Gtk.Application {
private MyAppWindow? myAppWindow;
public string appName;
public MyApp () {
Object (
application_id: "com.github.myusername.myreponame",
flags: ApplicationFlags.FLAGS_NONE
);
appName = "My App";
}
protected override void activate () {
myAppWindow = new MyAppWindow (this);
add_window (myAppWindow);
myAppWindow.show_all ();
}
public static int main (string[] args) {
var myApp = new MyApp ();
return myApp.run (args);
}
}
public class MyAppWindow : Gtk.ApplicationWindow {
public MyApp myApp { get; construct set; }
private Gtk.HeaderBar headerBar;
private Gtk.GLArea glArea;
private Gtk.Frame frame;
// An array of 3 vectors which represents 3 vertices
private GLES2.GLfloat[] g_vertex_buffer_data = { -1.0f, -1.0f, 0.0f, 1.0f, -1.0f, 0.0f, 0.0f, 1.0f, 0.0f };
// This will identify our vertex buffer
private GLES2.GLuint vertexbuffer;
private GLES2.GLuint programID;
public MyAppWindow(MyApp myApp) {
Object(myApp: myApp);
}
construct {
set_default_size (480, 640);
headerBar = new Gtk.HeaderBar ();
headerBar.set_show_close_button (true);
headerBar.set_title (myApp.appName);
glArea = new Gtk.GLArea ();
glArea.margin = 10;
frame = new Gtk.Frame ("GL Area");
frame.margin = 10;
// Generate 1 buffer, put the resulting identifier in vertexbuffer
GLES2.glGenBuffers(1, &vertexbuffer);
// The following commands will talk about our 'vertexbuffer' buffer
GLES2.glBindBuffer(GLES2.GL_ARRAY_BUFFER, vertexbuffer);
// Give our vertices to OpenGL.
GLES2.glBufferData(GLES2.GL_ARRAY_BUFFER, 9*4, g_vertex_buffer_data, GLES2.GL_STATIC_DRAW);
LoadShaders (out programID);
GLES2.glClearColor (0.0f, 0.0f, 0.0f, 0.0f);
glArea.render.connect (() => {
GLES2.glClear (GLES2.GL_COLOR_BUFFER_BIT | GLES2.GL_DEPTH_BUFFER_BIT);
GLES2.glUseProgram(programID);
// 1st attribute buffer : vertices
GLES2.glEnableVertexAttribArray(0);
GLES2.glBindBuffer(GLES2.GL_ARRAY_BUFFER, vertexbuffer);
GLES2.glVertexAttribPointer(
0, // attribute 0. No particular reason for 0, but must match the layout in the shader.
3, // size
GLES2.GL_FLOAT, // type
GLES2.GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
// Draw the triangle !
GLES2.glDrawArrays(GLES2.GL_TRIANGLES, 0, 3); // Starting from vertex 0; 3 vertices total -> 1 triangle
GLES2.glDisableVertexAttribArray(0);
GLES2.glFlush ();
return true;
});
set_titlebar (headerBar);
frame.add (glArea);
add (frame);
}
void LoadShaders(out GLES2.GLuint ProgramID) {
GLES2.GLint Result = GLES2.GL_FALSE;
int InfoLogLength = 0;
// create vertex shader
GLES2.GLuint VertexShaderID = GLES2.glCreateShader(GLES2.GL_VERTEX_SHADER);
string VertexSource = "#version 330 core
layout(location = 0) in vec3 vertexPosition_modelspace;
void main(){
gl_Position.xyz = vertexPosition_modelspace;
gl_Position.w = 1.0;
}";
// compile vertex shader
GLES2.glShaderSource(VertexShaderID, 1, out VertexSource, null);
GLES2.glCompileShader(VertexShaderID);
// check vertex shader
GLES2.glGetShaderiv(VertexShaderID, GLES2.GL_COMPILE_STATUS, &Result);
GLES2.glGetShaderiv(VertexShaderID, GLES2.GL_INFO_LOG_LENGTH, &InfoLogLength);
if ( InfoLogLength > 0 ) {
GLES2.GLchar[InfoLogLength+1] VertexShaderErrorMessage;
GLES2.glGetShaderInfoLog(VertexShaderID, InfoLogLength, null, &VertexShaderErrorMessage[0]);
}
// create fragment shader
GLES2.GLuint FragmentShaderID = GLES2.glCreateShader(GLES2.GL_FRAGMENT_SHADER);
string FragmentSource = "#version 330 core
out vec3 color;
void main(){
color = vec3(1,0,0);
}";
// compile fragment shader
GLES2.glShaderSource(FragmentShaderID, 1, out FragmentSource, null);
GLES2.glCompileShader(FragmentShaderID);
// check fragment shader
GLES2.glGetShaderiv(FragmentShaderID, GLES2.GL_COMPILE_STATUS, &Result);
GLES2.glGetShaderiv(FragmentShaderID, GLES2.GL_INFO_LOG_LENGTH, &InfoLogLength);
if ( InfoLogLength > 0 ){
GLES2.GLchar[InfoLogLength+1] FragmentShaderErrorMessage;
GLES2.glGetShaderInfoLog(FragmentShaderID, InfoLogLength, null, &FragmentShaderErrorMessage[0]);
}
ProgramID = GLES2.glCreateProgram();
GLES2.glAttachShader(ProgramID, VertexShaderID);
GLES2.glAttachShader(ProgramID, FragmentShaderID);
GLES2.glLinkProgram(ProgramID);
// Check the program
GLES2.glGetProgramiv(ProgramID, GLES2.GL_LINK_STATUS, &Result);
GLES2.glGetProgramiv(ProgramID, GLES2.GL_INFO_LOG_LENGTH, &InfoLogLength);
if ( InfoLogLength > 0 ){
GLES2.GLchar[InfoLogLength+1] ProgramErrorMessage;
GLES2.glGetProgramInfoLog(ProgramID, InfoLogLength, null, &ProgramErrorMessage[0]);
}
GLES2.glDetachShader(ProgramID, VertexShaderID);
GLES2.glDetachShader(ProgramID, FragmentShaderID);
GLES2.glDeleteShader(VertexShaderID);
GLES2.glDeleteShader(FragmentShaderID);
}
}
I compile it with valac --pkg gtk+-3.0 --vapidir=. --pkg gles2 valagl.vala -o valagl --Xcc=-lGLESv2. I have a gles2.vapi in the same folder.
There are 2 [-Wincompatible-pointer-types] warnings in glShaderSource for the shader source string at compile time. That could be where the problem is but I do not know how to fix it.
expected ‘const GLchar * const* {aka const char * const*}’ but argument is of type ‘gchar ** {aka char **}
The example has a step of glfwSwapBuffers(). I'm not sure what needs to be done there. I use GLES2.glFlush () but I do not understand how it connects to the GLArea I just created.
Also valadoc goes on about an on_realize signal where shaders need to be initialized, but I cant seem to find an on_realize signal at all
How do I draw a simple triangle in a GTK3 window using GLES2?
The program runs and shows a black GLArea. The only thing that works is the color of that area, I can change the color by changing GLES2.glClearColor (0.0f, 0.0f, 0.0f, 0.0f)
Is there possible to change the color of an individual pixel with OpenGL ES 2.0? Right now, I have found that I can manage that using a vertex. I've used this method to draw it:
GLES20.glDrawArrays(GLES20.GL_POINTS, 0, 1);
The size of the point was set to minimum in order to be a single pixel painted.
All good, until I've needed to draw 3 to 4 millions of them! It takes 5-6 seconds to initialize only one frame. This is time-inefficient as long as the pixels will be updated constantly. The update/ refresh would be preferable to be as close as possible to 60 fps.
How can I paint them in a more efficient way?
Note: It's a must to paint them individually only!
My attempt is here (for a screen of 1440x2560 px):
package com.example.ctelescu.opengl_pixel_draw;
import android.opengl.GLES20;
import android.opengl.GLSurfaceView;
import android.opengl.Matrix;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.nio.FloatBuffer;
import javax.microedition.khronos.egl.EGLConfig;
import javax.microedition.khronos.opengles.GL10;
public class PixelDrawRenderer implements GLSurfaceView.Renderer {
private float[] mModelMatrix = new float[16];
private float[] mViewMatrix = new float[16];
private float[] mProjectionMatrix = new float[16];
private float[] mMVPMatrix = new float[16];
private final FloatBuffer mVerticesBuffer;
private int mMVPMatrixHandle;
private int mPositionHandle;
private int mColorHandle;
private final int mBytesPerFloat = 4;
private final int mStrideBytes = 7 * mBytesPerFloat;
private final int mPositionOffset = 0;
private final int mPositionDataSize = 3;
private final int mColorOffset = 3;
private final int mColorDataSize = 4;
public PixelDrawRenderer() {
// Define the vertices.
// final float[] vertices = {
// // X, Y, Z,
// // R, G, B, A
// -1f, 1f, 0.0f,
// 1.0f, 0.0f, 0.0f, 1.0f,
//
// -0.9f, 1.2f, 0.0f,
// 0.0f, 0.0f, 1.0f, 1.0f,
//
// -0.88f, 1.2f, 0.0f,
// 0.0f, 1.0f, 0.0f, 1.0f,
//
// -0.87f, 1.2f, 0.0f,
// 0.0f, 1.0f, 0.0f, 1.0f,
//
// -0.86f, 1.2f, 0.0f,
// 0.0f, 1.0f, 0.0f, 1.0f,
//
// -0.85f, 1.2f, 0.0f,
// 0.0f, 1.0f, 0.0f, 1.0f};
// Initialize the buffers.
mVerticesBuffer = ByteBuffer.allocateDirect(22579200 * mBytesPerFloat)
.order(ByteOrder.nativeOrder()).asFloatBuffer();
// mVerticesBuffer.put(vertices);
}
#Override
public void onSurfaceCreated(GL10 glUnused, EGLConfig config) {
// Set the background clear color to gray.
GLES20.glClearColor(0.5f, 0.5f, 0.5f, 0.5f);
// Position the eye behind the origin.
final float eyeX = 0.0f;
final float eyeY = 0.0f;
final float eyeZ = 1.5f;
// We are looking toward the distance
final float lookX = 0.0f;
final float lookY = 0.0f;
final float lookZ = -5.0f;
// Set our up vector. This is where our head would be pointing were we holding the camera.
final float upX = 0.0f;
final float upY = 1.0f;
final float upZ = 0.0f;
// Set the view matrix. This matrix can be said to represent the camera position.
// NOTE: In OpenGL 1, a ModelView matrix is used, which is a combination of a model and
// view matrix. In OpenGL 2, we can keep track of these matrices separately if we choose.
Matrix.setLookAtM(mViewMatrix, 0, eyeX, eyeY, eyeZ, lookX, lookY, lookZ, upX, upY, upZ);
final String vertexShader =
"uniform mat4 u_MVPMatrix; \n" // A constant representing the combined model/view/projection matrix.
+ "attribute vec4 a_Position; \n" // Per-vertex position information we will pass in.
+ "attribute vec4 a_Color; \n" // Per-vertex color information we will pass in.
+ "varying vec4 v_Color; \n" // This will be passed into the fragment shader.
+ "void main() \n" // The entry point for our vertex shader.
+ "{ \n"
+ " v_Color = a_Color; \n" // Pass the color through to the fragment shader.
// It will be interpolated across the vertex.
+ " gl_Position = u_MVPMatrix \n" // gl_Position is a special variable used to store the final position.
+ " * a_Position; \n" // Multiply the vertex by the matrix to get the final point in
+ " gl_PointSize = 0.1; \n"
+ "} \n"; // normalized screen coordinates.
final String fragmentShader =
"#ifdef GL_FRAGMENT_PRECISION_HIGH \n"
+ "precision highp float; \n"
+ "#else \n"
+ "precision mediump float; \n" // Set the default precision to medium. We don't need as high of a
// precision in the fragment shader.
+ "#endif \n"
+ "varying vec4 v_Color; \n" // This is the color from the vertex shader interpolated across the
// vertex per fragment.
+ "void main() \n" // The entry point for our fragment shader.
+ "{ \n"
+ " gl_FragColor = v_Color; \n" // Pass the color directly through the pipeline.
+ "} \n";
// Load in the vertex shader.
int vertexShaderHandle = GLES20.glCreateShader(GLES20.GL_VERTEX_SHADER);
if (vertexShaderHandle != 0) {
// Pass in the shader source.
GLES20.glShaderSource(vertexShaderHandle, vertexShader);
// Compile the shader.
GLES20.glCompileShader(vertexShaderHandle);
// Get the compilation status.
final int[] compileStatus = new int[1];
GLES20.glGetShaderiv(vertexShaderHandle, GLES20.GL_COMPILE_STATUS, compileStatus, 0);
// If the compilation failed, delete the shader.
if (compileStatus[0] == 0) {
GLES20.glDeleteShader(vertexShaderHandle);
vertexShaderHandle = 0;
}
}
if (vertexShaderHandle == 0) {
throw new RuntimeException("Error creating vertex shader.");
}
// Load in the fragment shader shader.
int fragmentShaderHandle = GLES20.glCreateShader(GLES20.GL_FRAGMENT_SHADER);
if (fragmentShaderHandle != 0) {
// Pass in the shader source.
GLES20.glShaderSource(fragmentShaderHandle, fragmentShader);
// Compile the shader.
GLES20.glCompileShader(fragmentShaderHandle);
// Get the compilation status.
final int[] compileStatus = new int[1];
GLES20.glGetShaderiv(fragmentShaderHandle, GLES20.GL_COMPILE_STATUS, compileStatus, 0);
// If the compilation failed, delete the shader.
if (compileStatus[0] == 0) {
GLES20.glDeleteShader(fragmentShaderHandle);
fragmentShaderHandle = 0;
}
}
if (fragmentShaderHandle == 0) {
throw new RuntimeException("Error creating fragment shader.");
}
// Create a program object and store the handle to it.
int programHandle = GLES20.glCreateProgram();
if (programHandle != 0) {
// Bind the vertex shader to the program.
GLES20.glAttachShader(programHandle, vertexShaderHandle);
// Bind the fragment shader to the program.
GLES20.glAttachShader(programHandle, fragmentShaderHandle);
// Bind attributes
GLES20.glBindAttribLocation(programHandle, 0, "a_Position");
GLES20.glBindAttribLocation(programHandle, 1, "a_Color");
// Link the two shaders together into a program.
GLES20.glLinkProgram(programHandle);
// Get the link status.
final int[] linkStatus = new int[1];
GLES20.glGetProgramiv(programHandle, GLES20.GL_LINK_STATUS, linkStatus, 0);
// If the link failed, delete the program.
if (linkStatus[0] == 0) {
GLES20.glDeleteProgram(programHandle);
programHandle = 0;
}
}
if (programHandle == 0) {
throw new RuntimeException("Error creating program.");
}
// Set program handles. These will later be used to pass in values to the program.
mMVPMatrixHandle = GLES20.glGetUniformLocation(programHandle, "u_MVPMatrix");
mPositionHandle = GLES20.glGetAttribLocation(programHandle, "a_Position");
mColorHandle = GLES20.glGetAttribLocation(programHandle, "a_Color");
// Tell OpenGL to use this program when rendering.
GLES20.glUseProgram(programHandle);
}
#Override
public void onSurfaceChanged(GL10 glUnused, int width, int height) {
// Set the OpenGL viewport to the same size as the surface.
GLES20.glViewport(0, 0, width, height);
// Create a new perspective projection matrix. The height will stay the same
// while the width will vary as per aspect ratio.
final float ratio = (float) width / height;
final float left = -ratio;
final float right = ratio;
final float bottom = -1.0f;
final float top = 1.0f;
final float near = 1.0f;
final float far = 10.0f;
Matrix.frustumM(mProjectionMatrix, 0, left, right, bottom, top, near, far);
float[] vertices = new float[22579200];
int counter = 0;
for (float i = -width / 2; i < width / 2; i++) {
for (float j = height / 2; j > -height / 2; j--) {
// Initialize the buffers.
vertices[counter++] = 2f * i * (1f / width); //X
vertices[counter++] = 2f * j * (1.5f / height); //Y
vertices[counter++] = 0; //Z
vertices[counter++] = 1f; //blue
vertices[counter++] = 1f; //green
vertices[counter++] = 0f; //blue
vertices[counter++] = 1f; //alpha
}
}
mVerticesBuffer.put(vertices);
mVerticesBuffer.clear();
}
#Override
public void onDrawFrame(GL10 glUnused) {
GLES20.glClear(GLES20.GL_DEPTH_BUFFER_BIT | GLES20.GL_COLOR_BUFFER_BIT);
// Draw the vertices facing straight on.
Matrix.setIdentityM(mModelMatrix, 0);
drawVertices(mVerticesBuffer);
}
private void drawVertices(final FloatBuffer aVertexBuffer) {
// Pass in the position information
aVertexBuffer.position(mPositionOffset);
GLES20.glVertexAttribPointer(mPositionHandle, mPositionDataSize, GLES20.GL_FLOAT, false,
mStrideBytes, aVertexBuffer);
GLES20.glEnableVertexAttribArray(mPositionHandle);
// Pass in the color information
aVertexBuffer.position(mColorOffset);
GLES20.glVertexAttribPointer(mColorHandle, mColorDataSize, GLES20.GL_FLOAT, false,
mStrideBytes, aVertexBuffer);
GLES20.glEnableVertexAttribArray(mColorHandle);
// This multiplies the view matrix by the model matrix, and stores the result in the MVP matrix
// (which currently contains model * view).
Matrix.multiplyMM(mMVPMatrix, 0, mViewMatrix, 0, mModelMatrix, 0);
// This multiplies the modelview matrix by the projection matrix, and stores the result in the MVP matrix
// (which now contains model * view * projection).
Matrix.multiplyMM(mMVPMatrix, 0, mProjectionMatrix, 0, mMVPMatrix, 0);
GLES20.glUniformMatrix4fv(mMVPMatrixHandle, 1, false, mMVPMatrix, 0);
GLES20.glDrawArrays(GLES20.GL_POINTS, 0, 3225600);
}
}
I'm learning OpenGLES20 on Android and have some questions. Hope you guys can help me out.
Accordinly to Android Developers website "...OpenGL ES allows you to define drawn objects using coordinates in three-dimensional space.". This and only this is strictly true or they are "hiding" de "w" coordinate? Normal space has 3 coordinates but Clip Space has 4 (x,y,z,w), right?
Why the GLES20.glBindBuffer(arg0, arg1) function is only defined for int values if my triangle is defined in a float array?
Even if I doesn't put the code glBindBuffer(GL_ARRAY_BUFFER, myPositionBufferObject); my program is working. How come this work if I'm not binding my Buffer to the OpenGL target GL_ARRAY_BUFFER??
This is the code for my Triangle. It's the super reduced code only necessary to render the simplest triangle.
public class Triangle {
ByteBuffer myByteBuffer;
FloatBuffer positionBufferObject;
int mProgram;
public Triangle() {
float vertexPositions[] = {
0.75f, 0.75f, 0.0f, 1.0f,
0.75f, -0.75f, 0.0f, 1.0f,
-0.75f, -0.75f, 0.0f, 1.0f
};
myByteBuffer = ByteBuffer.allocateDirect(vertexPositions.length * 4);
myByteBuffer.order(ByteOrder.nativeOrder());
positionBufferObject = myByteBuffer.asFloatBuffer();
positionBufferObject.put(vertexPositions);
positionBufferObject.position(0);
String vertexShaderCode =
"attribute vec4 vPosition;" +
"void main() {" +
" gl_Position = vPosition;" +
"}";
String fragmentShaderCode =
"precision mediump float;" +
"uniform vec4 vColor;" +
"void main() {" +
" gl_FragColor = vColor;" +
"}";
int myVertexShader = GLES20.glCreateShader(GLES20.GL_VERTEX_SHADER);
GLES20.glShaderSource(myVertexShader, vertexShaderCode);
GLES20.glCompileShader(myVertexShader);
int myFragmentShader = GLES20.glCreateShader(GLES20.GL_FRAGMENT_SHADER);
GLES20.glShaderSource(myFragmentShader, fragmentShaderCode);
GLES20.glCompileShader(myFragmentShader);
mProgram = GLES20.glCreateProgram();
GLES20.glAttachShader(mProgram, myVertexShader);
GLES20.glAttachShader(mProgram, myFragmentShader);
GLES20.glLinkProgram(mProgram);
}
public void draw() {
GLES20.glUseProgram(mProgram);
GLES20.glEnableVertexAttribArray(0);
GLES20.glVertexAttribPointer(0, 4, GLES20.GL_FLOAT, false, 0, positionBufferObject);
GLES20.glDrawArrays(GLES20.GL_TRIANGLES, 0, 3);
}
}
I develop 2D game using libGDX and box2D. Camera is following with body.I want to scroll background (png 3200X48) too. Now the camera works fine in box2D world but background is not moving. I searched and tried many solutions but didn't help. What is the best solution for it? Maybe makes two cameras one for physics world and second for libgdx scene? Has someone got the similar problem? Thanks for help.
Here are a parts of my code
float w = 320;
float h = 480;
#Override
public void show() {
camera = new OrthographicCamera();
camera.setToOrtho(false, w/2, h/2);
world = new World(new Vector2(0, -20), true);
box2DDebugRenderer = new Box2DDebugRenderer();
background = new Texture(Gdx.files.internal("gfx/forklift/background.png"));
bg = new Sprite(background);
......}
#Override
public void render(float delta) {
Gdx.gl.glClearColor(1f, 1f, 1f, 1f);
Gdx.gl.glClear(GL20.GL_COLOR_BUFFER_BIT);
moveCamera(dynamicBody.getPosition().x);
moveBody();
batch.begin();
batch.draw(background, 0, 0, 3200, 480);
batch.end();
Gdx.app.log("", ""+dynamicBody.getPosition());
world.step(1/60f, 6, 2);
box2DDebugRenderer.render(world, camera.combined);}
public void moveCamera(float x){
camera.position.set(x, dynamicBody.getPosition().y, 0);
camera.update();
}
#Override
public void resize(int w, int h) {
camera = new OrthographicCamera(w, h);
camera.translate(h/2, w/2, 0);
super.resize(w, h);
}
use batch.setProjectionmatrix(camera.combined)
before batch.begin()
I'm creating a small drawing program in Mono gtk# and using the Cairo graphics library. I'm coding and compiling on a MacOs X system. I have a drawable object which I put into Pixbuf at a certain time and then retrieve it later into the drawable object! The idea is to take a "snapshot" of the image in the drawable and then draw on top of it.
The problem is that when I put the Pixbuf back into the drawable it looks obscure, all yellow with stripes and it looks like a portion of the image is missing.
UPDATE: I ran the program on my linux and windows machines and there it works flawlessly! So this error is only on MacOs X.
Here's the code:
// use: gmcs -pkg:gtk-sharp-2.0 -pkg:mono-cairo ttv1.cs
using Gtk;
using Cairo;
using System;
public class Draw : Window
{
DrawingArea canvas;
public Gdk.Pixbuf pixbuf;
public Draw() : base("teikniteink")
{
canvas = new DrawingArea();
canvas.ExposeEvent += canvasExposed;
DeleteEvent += delegate { Application.Quit();};
KeyPressEvent += onKey;
SetDefaultSize(400,400);
SetPosition(WindowPosition.Center);
Add(canvas);
ShowAll();
}
private void onKey(object o, KeyPressEventArgs args)
{
switch (args.Event.Key)
{
case Gdk.Key.w:
Console.WriteLine("Key Pressed {0}", args.Event.Key);
// Send to Pixbuf
pixbuf = Gdk.Pixbuf.FromDrawable(canvas.GdkWindow, Gdk.Colormap.System,0,0,0,0,400,400);
// Save to output.png
pixbuf.Save ("output.png", "png");
break;
case Gdk.Key.e:
Console.WriteLine("Key Pressed {0}", args.Event.Key);
Gdk.GC g = new Gdk.GC(canvas.GdkWindow);
// Retrive from pixbuf
canvas.GdkWindow.DrawPixbuf (g,pixbuf,0,0,0,0,-1,-1,Gdk.RgbDither.Normal,0,0);
break;
}
}
private void canvasExposed(object o, ExposeEventArgs args)
{
using (Cairo.Context ctx = Gdk.CairoHelper.Create(canvas.GdkWindow))
{
PointD start = new PointD(100,100);
PointD end = new PointD(300,300);
double width = Math.Abs(start.X - end.X);
double height = Math.Abs(start.Y - end.Y);
double xcenter = start.X + (end.X - start.X) / 2.0;
double ycenter = start.Y + (end.Y - start.Y) / 2.0;
ctx.Save();
ctx.Translate(xcenter, ycenter);
ctx.Scale(width/2.0, height/2.0);
ctx.Arc(0.0, 0.0, 1.0, 0.0, 2*Math.PI);
ctx.Restore();
ctx.Stroke();
}
}
public static void Main()
{
Application.Init();
new Draw();
Application.Run();
}
}
It would be very much appreciated if someone knows whats going on here and can point me in the right direction to fix it.
I triggered the same problem in this manner:
gw = gtk_widget_get_window(GTK_WIDGET(GLOBALS->mainwindow));
if(gw)
{
gdk_drawable_get_size(gw, &w, &h);
cm = gdk_drawable_get_colormap(gw);
if(cm)
{
dest = gdk_pixbuf_new(GDK_COLORSPACE_RGB, FALSE, 8, w, h);
if(dest)
{
dest2 = gdk_pixbuf_get_from_drawable(dest, gw, cm, 0, 0, 0, 0, w, h);
if(dest2)
{
succ = gdk_pixbuf_save (dest2, *GLOBALS->fileselbox_text, "png", &err, NULL);
}
}
}
}
The gdk_pixbuf_get_from_drawable() function when the source drawable is a Quartz window has issues, specifically in how _gdk_quartz_image_copy_to_image() services it. In short, 256 bit vertical strips are converted but the conversion routine assumes the pixels are 24-bit RGB rather than 32-bit RGBA. The following patch fixed the problem for me:
--- gtk+/gdk/quartz/gdkimage-quartz.c 2011-12-03 14:24:03.000000000 -0600
+++ gtk+664894/gdk/quartz/gdkimage-quartz.c 2013-10-15 18:52:24.000000000 -0500
## -150,6 +150,10 ## _gdk_quartz_image_copy_to_image (GdkDraw
data = [rep bitmapData];
size = [rep size];
+ int bpr = [rep bytesPerRow];
+ int wid = size.width;
+ int bpx = bpr/wid;
+
for (y = 0; y < size.height; y++)
{
guchar *src = data + y * [rep bytesPerRow];
## -158,12 +162,15 ## _gdk_quartz_image_copy_to_image (GdkDraw
{
gint32 pixel;
+ if (bpx == 4) // fix gdk_pixbuf_get_from_drawable "yellow stripes"
+ pixel = src[0] << 16 | src[1] << 8 | src[2];
+ else
if (image->byte_order == GDK_LSB_FIRST)
pixel = src[0] << 8 | src[1] << 16 |src[2] << 24;
else
pixel = src[0] << 16 | src[1] << 8 |src[2];
- src += 3;
+ src += bpx;
gdk_image_put_pixel (image, dest_x + x, dest_y + y, pixel);
}
I don't know if this was fixed in future versions of the GTK OSX source. I use my own for producing binaries of gtkwave as I have some necessary patches that were seemingly never integrated into the jhbuild source tree long ago.
-Tony