我有一个叫做Texture的类。此类负责管理纹理。在程序启动时,OpenGL上下文已正确初始化(这使此问题与大多数涉及意外的glGenTextures行为的问题有所不同)。 TextureglGenTextures()获取纹理名称,然后将图像数据的图像数据加载并绑定(bind)到内存中,并将其绑定(bind)到函数texGLInit()中。
这样可以正常工作,并且纹理将按预期显示。

但是,我还希望我的Texture能够更改当用户单击按钮并从OpenFileDiaglog的HDD中选择一个按钮时显示的纹理。用于此的函数称为reloadTexture()。此功能试图从内存中删除旧的图像/像素数据,并用用户选择的文件中的新数据替换它。
但是,发生这种情况时,它将使用glDeleteTextures删除纹理名称,然后使用texGLInit()函数分配一个新的纹理名称并将新的像素数据加载到内存中。但是纹理名称与100%的时间之前(通常为“1”)的名称完全相同。

发生这种情况后显示的图像很奇怪。它具有新图像尺寸,但仍具有旧图像像素。简而言之,它会使旧图像失真到新图像尺寸。它仍在使用假定删除的像素数据进行绘制。应该发生的是,屏幕现在在屏幕上显示新的图像文件。我相信这与纹理名称不唯一有关。

该代码包括在下面:

Texture::Texture(string filename)//---Constructor loads in the initial image. Works fine!
{
    textureID[0]=0;

    const char* fnPtr = filename.c_str(); //our image loader accepts a ptr to a char, not a string
    //printf(fnPtr);

    lodepng::load_file(buffer, fnPtr);//load the file into a buffer

    unsigned error = lodepng::decode(image,w,h,buffer);//lodepng's decode function will load the pixel data into image vector from the buffer
    //display any errors with the texture
    if(error)
    {
        cout << "\ndecoder error " << error << ": " << lodepng_error_text(error) <<endl;
    }
    //execute the code that'll throw exceptions to do with the images size
    checkPOT(w);
    checkPOT(h);


    //image now contains our pixeldata. All ready for OpenGL to do its thing

    //let's get this texture up in the video memory
    texGLInit();

    Draw_From_Corner = CENTER;
}

void Texture::reloadTexture(string filename)//Reload texture replaces the texture name and image/pixel data bound to this texture
{
    //first and foremost clear the image and buffer vectors back down to nothing so we can start afresh
    buffer.clear();
    image.clear();
    w = 0;
    h = 0;
    //also delete the texture name we were using before
    glDeleteTextures(1, &textureID[0]);




    const char* fnPtr = filename.c_str(); //our image loader accepts a ptr to a char, not a string
    //printf(fnPtr);

    lodepng::load_file(buffer, fnPtr);//load the file into a buffer

    unsigned error = lodepng::decode(image,w,h,buffer);//lodepng's decode function will load the pixel data into image vector from the buffer
    //display any errors with the texture
    if(error)
    {
        cout << "\ndecoder error " << error << ": " << lodepng_error_text(error) <<endl;
    }
    //execute the code that'll throw exceptions to do with the images size
    checkPOT(w);
    checkPOT(h);

    //image now contains our pixeldata. All ready for  to do its thing

    //let's get this texture up in the video memoryOpenGL
    texGLInit();

    Draw_From_Corner = CENTER;
}

void Texture::texGLInit()//Actually gets the new texture name loads the pixeldata into openGL
{
    glGenTextures(1, &textureID[0]);
    ////printf("\ntextureID = %u", textureID[0]);
    glBindTexture(GL_TEXTURE_2D, textureID[0]);//evrything we're about to do is about this texture
    glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
    glTexParameteri (GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
    glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE);
    //glDisable(GL_COLOR_MATERIAL);
    glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8,w,h,0, GL_RGBA, GL_UNSIGNED_BYTE, &image[0]);
    //we COULD free the image vectors memory right about now. But we'll do it when there's a need to. At the beginning of the reloadtexture func, makes sure it happens when we need it to.



}

对于这里值得的是Texture类中的draw函数。
void Texture::draw(point* origin, ANCHOR drawFrom)
{
    //let us set the DFC enum here.
    Draw_From_Corner = drawFrom;

    glEnable(GL_TEXTURE_2D);
    //printf("\nDrawing texture at (%f, %f)",centerPoint.x, centerPoint.y);
    glBindTexture(GL_TEXTURE_2D, textureID[0]);//bind the texture
    //create a quick vertex array for the primitive we're going to bind the texture to
    ////printf("TexID = %u",textureID[0]);
    GLfloat vArray[8];

#pragma region anchor switch
    switch (Draw_From_Corner)
    {
    case CENTER:

        vArray[0] = origin->x-(w/2); vArray[1] = origin->y-(h/2);//bottom left i0
        vArray[2] = origin->x-(w/2); vArray[3] = origin->y+(h/2);//top left i1
        vArray[4] = origin->x+(w/2); vArray[5] = origin->y+(h/2);//top right i2
        vArray[6] = origin->x+(w/2); vArray[7] = origin->y-(h/2);//bottom right i3
        break;

    case BOTTOMLEFT:

        vArray[0] = origin->x; vArray[1] = origin->y;//bottom left i0
        vArray[2] = origin->x; vArray[3] = origin->y+h;//top left i1
        vArray[4] = origin->x+w; vArray[5] = origin->y+h;//top right i2
        vArray[6] = origin->x+w; vArray[7] = origin->y;//bottom right i3

        break;

    case TOPLEFT:

        vArray[0] = origin->x; vArray[1] = origin->y-h;//bottom left i0
        vArray[2] = origin->x; vArray[3] = origin->y;//top left i1
        vArray[4] = origin->x+w; vArray[5] = origin->y;//top right i2
        vArray[6] = origin->x+w; vArray[7] = origin->y-h;//bottom right i3

        break;

    case TOPRIGHT:

        vArray[0] = origin->x-w; vArray[1] = origin->y-h;//bottom left i0
        vArray[2] = origin->x-w; vArray[3] = origin->y;//top left i1
        vArray[4] = origin->x; vArray[5] = origin->y;//top right i2
        vArray[6] = origin->x; vArray[7] = origin->y-h;//bottom right i3

        break;

    case BOTTOMRIGHT:

        vArray[0] = origin->x-w; vArray[1] = origin->y;//bottom left i0
        vArray[2] = origin->x-w; vArray[3] = origin->y+h;//top left i1
        vArray[4] = origin->x-h; vArray[5] = origin->y;//top right i2
        vArray[6] = origin->x; vArray[7] = origin->y;//bottom right i3

        break;

    default: //same as center

        vArray[0] = origin->x-(w/2); vArray[1] = origin->y-(h/2);//bottom left i0
        vArray[2] = origin->x-(w/2); vArray[3] = origin->y+(h/2);//top left i1
        vArray[4] = origin->x+(w/2); vArray[5] = origin->y+(h/2);//top right i2
        vArray[6] = origin->x+(w/2); vArray[7] = origin->y-(h/2);//bottom right i3

        break;


    }
#pragma  endregion

    //create a quick texture array (we COULD create this on the heap rather than creating/destoying every cycle)
    GLfloat tArray[8] =
    {
        //this has been tinkered with from my normal order. I think LodePNG is bringing the PD upside down. SO A QUICK FIX HERE WAS NECESSARY.
        0.0f,1.0f,//0
        0.0f,0.0f,//1
        1.0f,0.0f,//2
        1.0f,1.0f//3
    };

    //and finally.. the index array...remember, we draw in triangles....(and we'll go CW)
    GLubyte iArray[6] =
    {
        0,1,2,
        0,2,3
    };

    //Activate arrays
    glEnableClientState(GL_VERTEX_ARRAY);
    glEnableClientState(GL_TEXTURE_COORD_ARRAY);

    //Give openGL a pointer to our vArray and tArray
    glVertexPointer(2, GL_FLOAT, 0, &vArray[0]);
    glTexCoordPointer(2, GL_FLOAT, 0, &tArray[0]);

    //Draw it all
    glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_BYTE, &iArray[0]);

    //glDrawArrays(GL_TRIANGLES,0,6);

    //Disable the vertex arrays
    glDisableClientState(GL_VERTEX_ARRAY);
    glDisableClientState(GL_TEXTURE_COORD_ARRAY);
    glDisable(GL_TEXTURE_2D);
}

谁能告诉我为什么OpenGL无法从我已经加载到其中的新像素数据进行加载和绘制?就像我说的,我怀疑这与glGenTextures没有给我一个新的纹理名称有关。

最佳答案

您正在调用glTexImage2D并将一个指针传递给客户端内存。当心,文档说:



为了安全起见,您可能希望调用glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0)取消绑定(bind)任何缓冲区对象。

关于c++ - glGenTextures可以工作,但是每次都返回相同的纹理名称,我们在Stack Overflow上找到一个类似的问题:https://stackoverflow.com/questions/17729088/

10-11 22:06
查看更多