glUseProgram fails on RTX4080 (WGL + MFC Windows)

I have code that seems work on Intel GPUs and legacy NVidia cards up to 1080 but fails on RTX4080.

I am using glad to load the OpenGL (4.5) and WGL symbols.

The shader program compiles and links fine:

{
    static const char* vs_source[] = { R"_(
    #version 450 core

    uniform highp mat4 projection; 

    out vec2 texCoord;

    void main(void)
    {
        // using GL_TRIANGLE_FAN
        mat4 vertices = {
            {-1.0, -1.0, 0.0, 1},
            {-1.0,  1.0, 0.0, 1},
            { 1.0,  1.0, 0.0, 1},
            { 1.0, -1.0, 0.0, 1},
        };

        mat4x2 texCoords = {
            {0.,0.},
            {0.,1.},
            {1.,1.},
            {1.,0.}
        };

        texCoord = texCoords[gl_VertexID];
        gl_Position = projection * vertices[gl_VertexID];
    }
 )_" };

	static const char* fs_source[] = { R"(
    #version 450 core

    // swizzle takes care of converting single channel image data (Red)
    // to broadcasting Red to Green and Blue
    // can use identity when color channel loaded
    uniform mat4 swizzle;
    uniform float zoom;
    uniform vec2 corner;
    uniform lowp vec2 contrast_range;

    layout (binding = 0) uniform sampler2D image;
    layout (binding = 1) uniform sampler1D offsets;
    layout (binding = 2) uniform sampler2D colormap;

    in vec2 texCoord;

    ivec2 getSampleCoordinates(void)
    {
        //vec2 texc = mat2(0,1,1,0) * texCoord;
        ivec2 dimensions = textureSize(image, 0);
        //int row_offset =  int(texelFetch(offsets, int(texc.t * dimensions.y), 0).r);
        ivec2 coordinates = ivec2(texCoord.s * dimensions.x /*+ row_offset*/, texCoord.t * dimensions.y);
        return coordinates;
    }

    void main(void)
    {
        vec4 color = texelFetch(image, getSampleCoordinates(), 0);
        // swizzle matrix has converted to grayscale
        //        color.r = (color.r - contrast_range.x)  / (contrast_range.y - contrast_range.x);
        color.g = color.r;
        color.b = color.r;
        gl_FragColor = color;
    }
)" };
    // intialize
    // ...
		m_program = glCreateProgram();
		GLuint fs = glCreateShader(GL_FRAGMENT_SHADER);
		glShaderSource(fs, 1, fs_source, NULL);
		glCompileShader(fs);
		CHECK_ERRORS();

		GLuint vs = glCreateShader(GL_VERTEX_SHADER);
		glShaderSource(vs, 1, vs_source, NULL);
		glCompileShader(vs);
		CHECK_ERRORS();

		glAttachShader(m_program, vs);
		glAttachShader(m_program, fs);

		glLinkProgram(m_program);
		CHECK_ERRORS();
}

The program fails on every draw command on the RTX4080

#define CHECK_ERRORS() { \
auto error = glGetError(); \
if (error != GL_NO_ERROR) \
{ \
    stringstream ss; \
    ss << __FUNCTION__ << "::" << __LINE__ << "::" << this_thread::get_id() << " OpenGL error: 0x" << std::hex << error; \
    std::cerr << ss.str() << "\n"; \
}}; \
	if (!m_dirty && m_dirtyImage.empty()) return;

	wglMakeCurrent(m_hdc, m_hRC);
	////std::cerr << this_thread::get_id() << " - " << wglMakeCurrent(m_hdc, m_hRC) << "\n";
	//CHECK_ERRORS();
	glClearColor(0.0f, 0.05f, 0.8f, 1.0f);
	CHECK_ERRORS();
	glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
	CHECK_ERRORS();

	if (m_program == 0) return;

	// FAILS HERE!!!!!!!!!!!!!!!!
	glUseProgram(m_program);
	CHECK_ERRORS();
	// ERROR produces: OpenGLView::OnPaint::565::11716 OpenGL error: 0x502
	// only on NVidia RTX4080

	if (!m_dirtyImage.empty()) updateImage(m_dirtyImage);
	CHECK_ERRORS();
	
	...
	
	SwapBuffers(m_hdc);
	//CHECK_ERRORS();

	m_dirty = false;
	
	glUseProgram(0);

	//// Allow other rendering contexts to coexist
	wglMakeCurrent(NULL, NULL);

I have tried 3 different drivers, but all fail.

The same approach works on RTX4080 if I use GLFW3 for the windows (i.e. same shader program works on GLFW3 but not on MFC).

I need MFC to support a legacy project. Can someone help me figure out a way to make this shader load successfully on the 4080?

OS is Windows 10 64bit. Nvidia Driver 536.99