简体   繁体   中英

Why do I get two different point array renders on laptop, integrated and 1050Ti, python opengl code?

I currently use Asus TUF FX505G that has both integrated and 1050Ti graphic unit. While trying to render array of points to screen, I found out that I would get two different results depeding on which GPU is in use. On 1050Ti the code works not intended. Python version 3.10.8 1050Ti 上的不正确行为,已插入电源 使用主板 gpu 时的正确行为,拔下电源

Main.py

import glfw
import Shader
from OpenGL.GL import *
from Math_3d import Vector3f


class Window:
    def __init__(self, width: int, height: int, title: str):
        # Exit keyboard flag
        self.exitNow = False

        # Vertex Buffer Object
        # Create point vertex data
        self.v3f = Vector3f.data

        if not glfw.init():
            raise Exception("glfw can not be initialized")

        # OpenGL 4.0 version for shader subroutines and Dynamically uniform expression support
        glfw.window_hint(glfw.CONTEXT_VERSION_MAJOR, 3)
        glfw.window_hint(glfw.CONTEXT_VERSION_MINOR, 2)
        glfw.window_hint(glfw.OPENGL_PROFILE, glfw.OPENGL_CORE_PROFILE)

        self._win = glfw.create_window(width, height, title, None, None)

        if not self._win:
            glfw.terminate()
            raise Exception("glfw window can not be created")

        glfw.set_window_pos(self._win, 400, 200)
        glfw.make_context_current(self._win)
        # Enable key events
        # glfw.set_input_mode(self._win, glfw.STICKY_KEYS, GL_TRUE)
        # Enable key event callback from keyboard
        # glfw.set_key_callback(self._win, self.onKeyboard)
        self.program = glCreateProgram()
        self.buffer = glGenBuffers(1)

    def initialize(self):
        # Request program and shader slots from the GPU

        vertex = glCreateShader(GL_VERTEX_SHADER)
        fragment = glCreateShader(GL_FRAGMENT_SHADER)

        # Set shader sources
        glShaderSource(vertex, Shader.vertex_code)
        glShaderSource(fragment, Shader.fragment_code)
        # Compile shaders and check that they have compiled
        glCompileShader(vertex)
        glCompileShader(fragment)
        if not glGetShaderiv(vertex, GL_COMPILE_STATUS):
            report_shader = glGetShaderInfoLog(vertex)
            print(report_shader)
            raise RuntimeError("Vertex shader compilation error")

        if not glGetShaderiv(fragment, GL_COMPILE_STATUS):
            report_frag = glGetShaderInfoLog(fragment)
            print(report_frag)
            raise RuntimeError("Fragment shader compilation error")

        # Link shaders to program
        glAttachShader(self.program, vertex)
        glAttachShader(self.program, fragment)
        glLinkProgram(self.program)

        if not glGetProgramiv(self.program, GL_LINK_STATUS):
            print(glGetProgramInfoLog(self.program))
            raise RuntimeError('Linking error')
        # Get rid of shaders
        glDetachShader(self.program, vertex)
        glDetachShader(self.program, fragment)
        # Make default program to run
        glUseProgram(self.program)

        # Request a buffer slot from GPU
        # buffer = glGenBuffers(1)
        # Make this buffer the default one
        glBindBuffer(GL_ARRAY_BUFFER, self.buffer)
        # Vertex Array Buffer
        vao = glGenVertexArrays(1)
        glBindVertexArray(vao)

        loc = glGetAttribLocation(self.program, 'position')
        glEnableVertexAttribArray(loc)
        glVertexAttribPointer(loc, self.v3f.itemsize, GL_FLOAT, False, self.v3f.data.strides[0], None)
        glBufferData(GL_ARRAY_BUFFER, self.v3f.data.nbytes, self.v3f.data, GL_DYNAMIC_DRAW)

    def renderscene(self):
        while not glfw.window_should_close(self._win) and not self.exitNow:
            glClear(GL_COLOR_BUFFER_BIT)
            glPointSize(30)
            glDrawArrays(GL_POINTS, 0, self.v3f.itemsize)

            glfw.swap_buffers(self._win)
            # Do not refresh screen before receiving event
            glfw.wait_events()
        glDisableVertexAttribArray(0)
        glUseProgram(0)
        glfw.terminate()


if __name__ == '__main__':
    win = Window(1024, 768, "GLFW Window - Rotate glPoints")
    win.initialize()  # Create and initialize shaders and initialize Vertex Buffer Object
    win.renderscene()  # Swap buffer and render scene

Shader.py

vertex_code = """
    attribute vec3 position;
    void main()
    {
        gl_Position = vec4(position, 1.0);
    }   
    """

fragment_code = """
    void main() 
    { 
        gl_FragColor = vec4(0.5, 0.5, 0.5, 1.0); 
    }
    """

Math_3d.py

from OpenGL.GL import *
import numpy as np
np.set_printoptions(precision=3, suppress=True)


class Vector3f:
    data = np.array([[0.5, 0.5, 0],
                    [-0.5, 0.5, 0],
                    [-0.5, -0.5, 0],
                    [0.5, -0.5, 0]], dtype=np.float32)

    def Rotate(self):
        pass

if __name__ == '__main__':
    vec3 = Vector3f
    print(vec3.data)
    print("Item size: ", vec3.data.itemsize)
    print("Strides: ", vec3.data.strides[0])
    print("Bytes:", vec3.data.nbytes)

Ran the code while using both gpus. There should be no apparent buffer overflow.

The problem is here:

 glVertexAttribPointer(loc, self.v3f.itemsize, GL_FLOAT, False, self.v3f.data.strides[0], None)

The 2nd argument of glVertexAttribPointer is the number of components of the attribute (x, y, z), which is 3. However, itemsize is the number of bytes of an item, which is 4 for an item of type float .

Correct is:

glVertexAttribPointer(loc, 3, GL_FLOAT, False, self.v3f.data.strides[0], None)

The technical post webpages of this site follow the CC BY-SA 4.0 protocol. If you need to reprint, please indicate the site URL or the original address.Any question please contact:yoyou2525@163.com.

 
粤ICP备18138465号  © 2020-2024 STACKOOM.COM