384.69 libglx xorg strtol sigsegv generally thread context fs base is changed during GLX init.

Since systemtap due to some unclear bugs is not working I went other way.
Here is piece of linux kernel instrumentation
arch/x86/kernel/process_64.h

do_arch_prctl_64

case ARCH_SET_FS: {
	/* Not strictly needed for fs, but do it for symmetry
	   with gs */
	unsigned long base;		   
	if (arg2 >= TASK_SIZE_MAX)
		return -EPERM;

// instumentation for nvidia spoiled thread context
if( ! strncmp( current->comm, “Xorg.nz”, 7) ) {
rdmsrl(MSR_FS_BASE, base);
if ( base != 0 ) {
printk(“SET_FS_BASE %p old %p pid %d tid %d ptrace %x\n”,(void )arg2,(void)base, task_tgid_vnr(current),task_pid_vnr(current),current->ptrace);
dump_stack();
if ( current->ptrace & PT_PTRACED ) {
printk(“send signal\n”);
send_sig(SIGTRAP,current,0);
}
}
}
// end if instrumentation
cpu = get_cpu();
task->thread.fsindex = 0;
task->thread.fsbase = arg2;
if (doit) {
/* set the selector to 0 to not confuse __switch_to */
loadsegment(fs, 0);
ret = wrmsrl_safe(MSR_FS_BASE, arg2);
}
put_cpu();
break;

gdb --args /usr/libexec/Xorg.nz :8 -config /usr/local/etc/bumblebee/xorg.conf.nvidia -configdir /usr/local/etc/bumblebee/xorg.conf.d -sharevts -nolisten tcp -noreset -verbose 3 -isolateDevice PCI:01:00:0 -modulepath /usr/lib64/xorg/modules/extensions/nvidia,/usr/lib64/xorg/modules,/usr/lib64/modules/extensions,/usr/lib64/xorg/modules/input

Program received signal SIGTRAP, Trace/breakpoint trap.
0x00007ffff21e98ea in _nv006tls () from /lib64/tls/libnvidia-tls.so.384.69
Missing separate debuginfos, use: dnf debuginfo-install audit-libs-2.7.7-1.fc26.x86_64 bzip2-libs-1.0.6-22.fc26.x86_64 dbus-libs-1.11.16-1.fc26.x86_64 freetype-2.7.1-9.fc26.x86_64 libXau-1.0.8-7.fc26.x86_64 libXdmcp-1.1.2-6.fc26.x86_64 libXfont2-2.0.1-3.fc26.x86_64 libbsd-0.8.3-3.fc26.x86_64 libcap-2.25-5.fc26.x86_64 libcap-ng-0.7.8-3.fc26.x86_64 libdrm-2.4.82-1.fc26.x86_64 libfontenc-1.1.3-4.fc26.x86_64 libgcc-7.1.1-3.fc26.x86_64 libgcrypt-1.7.8-1.fc26.x86_64 libgpg-error-1.25-2.fc26.x86_64 libpciaccess-0.13.4-4.fc26.x86_64 libpng-1.6.28-2.fc26.x86_64 libselinux-2.6-7.fc26.x86_64 libunwind-1.2-1.fc26.x86_64 libxshmfence-1.2-4.fc26.x86_64 lz4-libs-1.8.0-1.fc26.x86_64 openssl-libs-1.1.0f-7.fc26.x86_64 pcre-8.41-1.fc26.x86_64 pixman-0.34.0-3.fc26.x86_64 systemd-libs-233-6.fc26.x86_64 xz-libs-5.2.3-2.fc26.x86_64 zlib-1.2.11-2.fc26.x86_64
(gdb) bt
#0 0x00007ffff21e98ea in _nv006tls () from /lib64/tls/libnvidia-tls.so.384.69
#1 0x00007ffff2a4b456 in ?? () from /usr/lib64/xorg/modules/extensions/nvidia/libglx.so
#2 0x00007ffff2d14f05 in ?? () from /usr/lib64/xorg/modules/extensions/nvidia/libglx.so
#3 0x000000000082bb48 in pushToken ()
#4 0x000000000000001c in ?? ()
#5 0x0000000000000200 in ?? ()
#6 0x0000000000000000 in ?? ()

(gdb) disass _nv006tls
Dump of assembler code for function _nv006tls:
0x00007ffff21e9860 <+0>: push %rbx
0x00007ffff21e9861 <+1>: mov %rdx,%rbx
0x00007ffff21e9864 <+4>: mov %edi,%edx
0x00007ffff21e9866 <+6>: sub $0x20,%rsp
0x00007ffff21e986a <+10>: cmpl $0xffffffff,0x2024f7(%rip) # 0x7ffff23ebd68
0x00007ffff21e9871 <+17>: mov %esi,(%rsp)
0x00007ffff21e9874 <+20>: mov %rbx,0x8(%rsp)
0x00007ffff21e9879 <+25>: je 0x7ffff21e9900 <_nv006tls+160>
0x00007ffff21e987f <+31>: test %dl,%dl
0x00007ffff21e9881 <+33>: je 0x7ffff21e98a0 <_nv006tls+64>
0x00007ffff21e9883 <+35>: mov 0x2024df(%rip),%eax # 0x7ffff23ebd68
0x00007ffff21e9889 <+41>: mov $0xffffffffffffffff,%rsi
0x00007ffff21e9890 <+48>: test %eax,%eax
0x00007ffff21e9892 <+50>: je 0x7ffff21e98a0 <_nv006tls+64>
0x00007ffff21e9894 <+52>: mov %rsi,%rdx
0x00007ffff21e9897 <+55>: add $0x20,%rsp
0x00007ffff21e989b <+59>: mov %rdx,%rax
0x00007ffff21e989e <+62>: pop %rbx
0x00007ffff21e989f <+63>: retq
0x00007ffff21e98a0 <+64>: mov $0x300,%esi
0x00007ffff21e98a5 <+69>: mov $0x1,%edi
0x00007ffff21e98aa <+74>: callq 0x7ffff21e9800 calloc@plt
0x00007ffff21e98af <+79>: xor %edx,%edx
0x00007ffff21e98b1 <+81>: test %rax,%rax
0x00007ffff21e98b4 <+84>: je 0x7ffff21e9897 <_nv006tls+55>
0x00007ffff21e98b6 <+86>: mov %rax,%rsi
0x00007ffff21e98b9 <+89>: movq $0x0,0xa0(%rax)
0x00007ffff21e98c4 <+100>: movl $0x0,0xa8(%rax)
0x00007ffff21e98ce <+110>: mov %rax,(%rsi)
0x00007ffff21e98d1 <+113>: mov %ebx,%eax
0x00007ffff21e98d3 <+115>: mov $0x1002,%edi
0x00007ffff21e98d8 <+120>: and $0x3ff,%eax
0x00007ffff21e98dd <+125>: mov %eax,0x2f8(%rsi)
0x00007ffff21e98e3 <+131>: mov $0x9e,%eax
0x00007ffff21e98e8 <+136>: syscall
=> 0x00007ffff21e98ea <+138>: test %eax,%eax
0x00007ffff21e98ec <+140>: jns 0x7ffff21e9894 <_nv006tls+52>
0x00007ffff21e98ee <+142>: mov %rsi,%rdi
0x00007ffff21e98f1 <+145>: callq 0x7ffff21e97f0 free@plt
0x00007ffff21e98f6 <+150>: xor %esi,%esi
0x00007ffff21e98f8 <+152>: jmp 0x7ffff21e9894 <_nv006tls+52>
0x00007ffff21e98fa <+154>: nopw 0x0(%rax,%rax,1)
0x00007ffff21e9900 <+160>: lea 0x18(%rsp),%rsi
0x00007ffff21e9905 <+165>: mov $0x9e,%eax

info thread
Id Target Id Frame

  • 1 Thread 0x8f1400 (LWP 2256) “Xorg.nz” 0x00007ffff21e98ea in _nv006tls ()
    from /lib64/tls/libnvidia-tls.so.384.69

dmesg

[ 230.768057] SET_FS_BASE 00000000008f1400 old 00007ffff7f0ea40 pid 2256 tid 2256 ptrace 8001f9
[ 230.768061] CPU: 5 PID: 2256 Comm: Xorg.nz Tainted: G O 4.12.9-300.fc26.nz.x86_64 #5
[ 230.768063] Hardware name: To Be Filled By O.E.M. To Be Filled By O.E.M./Z270 Gaming-ITX/ac, BIOS P2.30 07/14/2017
[ 230.768065] Call Trace:
[ 230.768070] dump_stack+0x8e/0xcd
[ 230.768074] do_arch_prctl_64+0x26a/0x2a0
[ 230.768078] SyS_arch_prctl+0x2a/0x50
[ 230.768082] entry_SYSCALL_64_fastpath+0x1f/0xbe
[ 230.768084] RIP: 0033:0x7ffff21e98ea
[ 230.768086] RSP: 002b:00007fffffffdea0 EFLAGS: 00003246 ORIG_RAX: 000000000000009e
[ 230.768089] RAX: ffffffffffffffda RBX: 00007ffff5842b38 RCX: 00007ffff21e98ea
[ 230.768091] RDX: 0000000000000000 RSI: 00000000008f1400 RDI: 0000000000001002
[ 230.768093] RBP: 00007ffff5842ae0 R08: 00000000008f1400 R09: 00000000000005cb
[ 230.768094] R10: 000000000000002a R11: 0000000000003246 R12: 00007ffff5842b38
[ 230.768096] R13: 0000000000000750 R14: 00007ffff5842b38 R15: 000000000000270e
[ 230.768101] send signal

What the $$$$
libnvidia-tls.so.384.69
changes thread context ???
and doesn’t return it back?

Your program seems to be crashed already before the following line in your backtrace.
#6 0x0000000000000000 in ?? ()

Have you investigated why the program jumps to 0x0?

gdb --args /usr/libexec/Xorg.nz :8 -config /usr/local/etc/bumblebee/xorg.conf.nvidia -configdir /usr/local/etc/bumblebee/xorg.conf.d -sharevts -nolisten tcp -noreset -verbose 3 -isolateDevice PCI:01:00:0 -modulepath /usr/lib64/xorg/modules/extensions/nvidia,/usr/lib64/xorg/modules,/usr/lib64/modules/extensions,/usr/lib64/xorg/modules/input

b main
breakpoint 1 at 0x423590: file stubmain.c, line 34.
(gdb) run
Starting program: /usr/libexec/Xorg.nz :8 -config /usr/local/etc/bumblebee/xorg.conf.nvidia -configdir /usr/local/etc/bumblebee/xorg.conf.d -sharevts -nolisten tcp -noreset -verbose 3 -isolateDevice PCI:01:00:0 -modulepath /usr/lib64/xorg/modules/extensions/nvidia,/usr/lib64/xorg/modules,/usr/lib64/modules/extensions,/usr/lib64/xorg/modules/input
[Thread debugging using libthread_db enabled]
Using host libthread_db library “/lib64/libthread_db.so.1”.

Breakpoint 1, main (argc=16, argv=0x7fffffffe0d8, envp=0x7fffffffe160) at stubmain.c:34
34 return dix_main(argc, argv, envp);

(gdb) b miinitext.c:360 if !strcmp(ext->name,“GLX”)
Breakpoint 2 at 0x4a8c30: file …/…/…/mi/miinitext.c, line 360.

so I set breakpoint before 1st call nvidia libglx.so code

(gdb) c
Continuing.

Breakpoint 2, InitExtensions (argc=argc@entry=16, argv=argv@entry=0x7fffffffe0d8) at …/…/…/mi/miinitext.c:360
360 (ext->initFunc) ();
(gdb) bt
#0 InitExtensions (argc=argc@entry=16, argv=argv@entry=0x7fffffffe0d8) at …/…/…/mi/miinitext.c:360
#1 0x00000000004396af in dix_main (argc=16, argv=0x7fffffffe0d8, envp=) at main.c:201
#2 0x00007ffff549750a in __libc_start_main (main=0x423590 , argc=16, argv=0x7fffffffe0d8, init=,
fini=, rtld_fini=, stack_end=0x7fffffffe0c8) at …/csu/libc-start.c:295
#3 0x00000000004235ca in _start ()

stack is ok.

(gdb) list
355 if (ext->initFunc != NULL &&
356 (ext->disablePtr == NULL || !*ext->disablePtr)) {
357 ret1 = syscall(syscallno, code, (unsigned long) & fs_base );
358 if (ret1 != 0)
359 fs_base =-1;
360 (ext->initFunc) (); // ←
361 if ( fs_base != -1 ) {
362 ret2 = syscall(158, 0x1003, & fs_base_2);
363 if ( ret2==0 && fs_base != fs_base_2 ) {
364 ret2 = syscall(158, 0x1002, & fs_base);

(gdb) sia
0x00007ffff2d14d70 in ?? () from /usr/lib64/xorg/modules/extensions/nvidia/libglx.so
Dump of assembler code from 0x7ffff2d14d70 to 0x7ffff2d14d80:
=> 0x00007ffff2d14d70: push %rbp
0x00007ffff2d14d71: lea 0x1ffa68(%rip),%rdi # 0x7ffff2f147e0
0x00007ffff2d14d78: push %rbx
0x00007ffff2d14d79: sub $0x28,%rsp
0x00007ffff2d14d7d: callq 0x7ffff2a2dd20 LoaderSymbol@plt
End of assembler dump.

(gdb) bt
#0 0x00007ffff2d14d70 in ?? () from /usr/lib64/xorg/modules/extensions/nvidia/libglx.so
#1 0x00000000004a8c34 in InitExtensions (argc=argc@entry=16, argv=argv@entry=0x7fffffffe0d8) at …/…/…/mi/miinitext.c:360
#2 0x00000000004396af in dix_main (argc=16, argv=0x7fffffffe0d8, envp=) at main.c:201
#3 0x00007ffff549750a in __libc_start_main (main=0x423590 , argc=16, argv=0x7fffffffe0d8, init=,
fini=, rtld_fini=, stack_end=0x7fffffffe0c8) at …/csu/libc-start.c:295
#4 0x00000000004235ca in _start ()

stack is ok

(gdb) sia
0x00007ffff2d14d70 in ?? () from /usr/lib64/xorg/modules/extensions/nvidia/libglx.so
Dump of assembler code from 0x7ffff2d14d70 to 0x7ffff2d14d80:
=> 0x00007ffff2d14d70: push %rbp
0x00007ffff2d14d71: lea 0x1ffa68(%rip),%rdi # 0x7ffff2f147e0
0x00007ffff2d14d78: push %rbx
0x00007ffff2d14d79: sub $0x28,%rsp
0x00007ffff2d14d7d: callq 0x7ffff2a2dd20 LoaderSymbol@plt
End of assembler dump.

(gdb) sia
0x00007ffff2d14d71 in ?? () from /usr/lib64/xorg/modules/extensions/nvidia/libglx.so
Dump of assembler code from 0x7ffff2d14d71 to 0x7ffff2d14d81:
=> 0x00007ffff2d14d71: lea 0x1ffa68(%rip),%rdi # 0x7ffff2f147e0
0x00007ffff2d14d78: push %rbx
0x00007ffff2d14d79: sub $0x28,%rsp
0x00007ffff2d14d7d: callq 0x7ffff2a2dd20 LoaderSymbol@plt
End of assembler dump.

sia = si + disass $rip, +16

after 1st push

(gdb) bt
#0 0x00007ffff2d14d71 in ?? () from /usr/lib64/xorg/modules/extensions/nvidia/libglx.so
#1 0x00000000000002b8 in ?? () ; <— artefact
#2 0x00000000004a8c34 in InitExtensions (argc=argc@entry=16, argv=argv@entry=0x7fffffffe0d8) at …/…/…/mi/miinitext.c:360
#3 0x00000000004396af in dix_main (argc=16, argv=0x7fffffffe0d8, envp=) at main.c:201
#4 0x00007ffff549750a in __libc_start_main (main=0x423590 , argc=16, argv=0x7fffffffe0d8, init=,
fini=, rtld_fini=, stack_end=0x7fffffffe0c8) at …/csu/libc-start.c:295
#5 0x00000000004235ca in _start ()

gdb starts to show strange stack frame.

I have no big desire to dig in that, there is no debuginfo for libglx.so
Stack has garbage entry after 1st push instruction in 1st called function from libglx.so

I think it is not relevant question.
Any real crash ends up in gdb by some signal. It doesn’t happen.
If there would be libglx.so debug info then gdb shows more reasonable stack frames.

What is real fact that nvidia userspace code changes fs_base and after this calls glibc function.
glibc function might use tls variable which depends on fs_base.
glibc function issues sigsegv. (if fs_base is changed then data used by glibc should be copied)
It is not surprise if nvidia userspace code does strange thing.

Have you tried Valgrind ?

As Aaron suggested, it may provide useful information to investigate the stack corruption.

valgrind log Xorg is not instrumented,
valgrind.1.log (19.7 KB)

valgrind log Xorg instrumented
valgrind.2.log (16.1 KB)

vagrind log is near to be clean (for instrumented Xorg).
msync call unlikely to spoil something.
valgrind have no idea about nvidia ioctl 0x6443.
That way valgrind log doesn’t provide valuable info on
root reason ending up in fs_base changing.
It looks as error code path not processed correctly and fs_base has
been left not restored.

For what version of linux kernel and xorg nvidia driver is tested before
making release?

384.90 nothing changed. The same problem.

I found similar issue in Debian Bug report.
https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=883705

It seems to be a regression bug in glibc, and it may crash under certain conditions.

It is not bug of glibc!!!
It is nvidia driver bug!!!
Otherwise you did not understand the investigation.
From the word at all.
Core reason - nvidia userspace code change gs and call glibc function.
Any function of glibc using thread storage MUST get sigsegv
because gs is changed and thread context is invalid.
Of cause gs can not be change by userspace code
it is done by syscall and this syscall sits in libnvidia-tls.
So problem is that before calling glibc nvidia code MUST
return gs to the right context in all code path.

Whether bug is reproduced in one version of glibc and
doesn’t reproduced on another doesn’t matter absolutely.
In my case bug reproduced 100% on two box.
ibm lenovo w520
asrock mini itx fatal1ty
Though ibm lenovo w520 worked in past (bumplebee primus)
when there was more older linux kernel and glibc
and older nvidia driver.

The reason I did this investigation I met similar bug reports many times
but nobody made serious attempt to figure out in core of the problem.

Anyway I did special version of Xorg which fix this issue.
Anyway nvidia driver doesn’t work.
Though I did not try native Xorg. It Xorg worked with bumblebee primus.

Possibly I will repeat experiment on latest nvidia driver and latest kernel (4.13.16)
and glibc I have, using native Xorg i.e. nvidia card makes output to monitor
without bumblebee primus.

This isn’t a NVIDIA bug: your driver isn’t installed correctly. You’re using the wrong version of libnvidia-tls.so. You need to use the ELF one.
Please install using our .run installer, that should fix the problem; and in any case make sure that you’re copying the correct libnvidia-tls.so file.

FYI, the following command would help to check whether the libnvidia-tls.so libraries are installed correctly or not.
$ /sbin/ldconfig -p |grep nvidia-tls

The result (debian and debian’s NVIDIA packages) is:
libnvidia-tls.so.387.34 (libc6,x86-64, hwcap: 0x8000000000000000, OS ABI: Linux 2.3.99) => /usr/lib/x86_64-linux-gnu/tls/libnvidia-tls.so.387.34
libnvidia-tls.so.387.34 (libc6,x86-64, OS ABI: Linux 2.2.5) => /usr/lib/x86_64-linux-gnu/libnvidia-tls.so.387.34

If I extract (–extract-only) nvidia driver package then there are 2 libnvidia-tls.384.98
-rwxr-xr-x. 13080 Oct 27 00:39 ./libnvidia-tls.so.384.98 ()
-rwxr-xr-x. 14480 Oct 27 00:39 ./tls/libnvidia-tls.so.384.98
I don’t want standard path which is used by installer.
I want /usr/lib64/nvidia/384.98 (here is copied .)
and agree with
/usr/lib64/tls (here is copied ./tls/.)
because here there is no conflict in file.

Some library goes to /usr/lib64/xorg/modules
/usr/lib64/xorg/modules/drivers
nvidia_drv.so → nvidia_drv.so.384.98
/usr/lib64/xorg/modules/extensions/nvidia
libglx.so → libglx.so.384.98

Question is which libnvidia-tls.* (./ ./tls/.)
should go to
/usr/lib64/nvidia/346.98/.
and
/usr/lib64/tls

here is excerpt from README.txt
The nvidia-tls libraries (‘/usr/lib/libnvidia-tls.so.384.98’ and
‘/usr/lib/tls/libnvidia-tls.so.384.98’); these files provide thread
local storage support for the NVIDIA OpenGL libraries (libGL,
libnvidia-glcore, and libglx). Each nvidia-tls library provides support
for a particular thread local storage model (such as ELF TLS), and the
one appropriate for your system will be loaded at run time.

And
export LD_LIBRARY_PATH=/usr/lib64/nvidia/384.98
ldd -r /usr/bin/glxgears
linux-vdso.so.1 (0x00007fff756cc000)
libGL.so.1 => /usr/lib64/nvidia/384.98/libGL.so.1 (0x00007fb0d4c58000)
libm.so.6 => /lib64/libm.so.6 (0x00007fb0d4942000)
libX11.so.6 => /lib64/libX11.so.6 (0x00007fb0d4604000)
libc.so.6 => /lib64/libc.so.6 (0x00007fb0d422f000)
libdl.so.2 => /lib64/libdl.so.2 (0x00007fb0d402b000)
libGLX.so.0 => /usr/lib64/nvidia/384.98/libGLX.so.0 (0x00007fb0d3dfb000)
libGLdispatch.so.0 => /usr/lib64/nvidia/384.98/libGLdispatch.so.0 (0x00007fb0d3b2d000)
/lib64/ld-linux-x86-64.so.2 (0x00007fb0d5102000)
libxcb.so.1 => /lib64/libxcb.so.1 (0x00007fb0d3905000)
libXext.so.6 => /lib64/libXext.so.6 (0x00007fb0d36f3000)
libXau.so.6 => /lib64/libXau.so.6 (0x00007fb0d34ef000)
if I make
/usr/lib64/nvidia/384.98/libnvidia-tls.so.384.98
is link to
/usr/lib/tls/libnvidia-tls.so.384.98 (which match ./tls/. in original nvidia archive)
then it seems my test case
gdb --args /usr/libexec/Xorg :8 -config /usr/local/etc/bumblebee/xorg.conf.nvidia -configdir /usr/local/etc/bumblebee/xorg.conf.d -sharevts -nolisten tcp -noreset -verbose 3 -isolateDevice PCI:01:00:0 -modulepath /usr/lib64/xorg/modules/extensions/nvidia,/usr/lib64/xorg/modules,/usr/lib64/modules/extensions,/usr/lib64/xorg/modules/input
doesn’t have sigsegv.

Finally I was able to run bumblebee/primus glxinfo glxgears for noglvnd variant of nvidia
user space lib. nvidia version 384.98.

why this information is not in README file?
It would economy much time.

nvidia-installer has a little bit strange behavior.
I have something like this

#!/usr/bin/bash

nvidiaversion=384.98

optlibglvnd=‘–no-libglx-indirect --no-install-libglvnd --no-glvnd-glx-client --no-glvnd-egl-client’
driverdir=/mnt/d/distr/nvidia/NVIDIA-Linux-x86_64-384.98
installerdir=$driverdir

if [ ! -d /opt/nvidia/$nvidiaversion ] ; then
echo “mkdir /opt/nvidia/$nvidiaversion”
mkdir -p /opt/nvidia/$nvidiaversion
fi

root32=/opt/nvidia/$nvidiaversion/32
if [ ! -d /opt/nvidia/$nvidiaversion/32 ] ; then
echo “mkdir /opt/nvidia/$nvidiaversion/32”
mkdir -p /opt/nvidia/$nvidiaversion/32
fi

lib64=/opt/nvidia/$nvidiaversion/lib64
if [ ! -d $lib64 ] ; then
echo “mkdir $lib64”
mkdir -p $lib64
fi

if [ ! -d /opt/nvidia/$nvidiaversion/share ] ; then
echo “mkdir /opt/nvidia/$nvidiaversion/share”
mkdir -p /opt/nvidia/$nvidiaversion/share
fi

if [ ! -d /opt/nvidia/$nvidiaversion/bin ] ; then
echo “mkdir /opt/nvidia/$nvidiaversion/bin”
mkdir -p /opt/nvidia/$nvidiaversion/bin
fi

$installerdir/nvidia-installer --accept-license
–no-kernel-module
–force-tls=new
–no-x-check
–no-nouveau-check
–skip-module-unload
–no-nvidia-modprobe
–no-check-for-alternate-installs
–no-libglx-indirect --no-install-libglvnd --no-glvnd-glx-client --no-glvnd-egl-client
–x-prefix=/opt/nvidia/$nvidiaversion
–x-module-path=/opt/nvidia/$nvidiaversion
–x-library-path=/opt/nvidia/$nvidiaversion
–opengl-prefix=/opt/nvidia/$nvidiaversion
–opengl-libdir=lib64
–utility-prefix=/opt/nvidia/$nvidiaversion
–application-profile-path=/opt/nvidia/$nvidiaversion/share
–documentation-prefix=/opt/nvidia/$nvidiaversion
–install-compat32-libs
–compat32-chroot=/opt/nvidia/$nvidiaversion/32
–compat32-libdir=lib32

Why it detects some nvidia installation?
I copied files from /opt/nvidia
to
/usr/lib64/nvidia/$nvidiaversion
/usr/lib/nvidia/$nvidiaversion
/usr/lib64/xorg/modules/drivers
/usr/lib64/xorg/modules/extensions/nvidia
then delete content of /opt/nvidia directories
in order to install with differnet option for example
libglvnd variant of installation.

All nvidia driver files goes into /opt/nvidia/$nvidiaversion and directory
and it is empty beside some empty directory.

Installer breaks rpm mesa, libglvnd, xorg packages by changing some files
so I need to reinstall them in order to get glxinfo to work for intel driver.
What the hell it makes changes in /usr/lib64 /usr/lib64/xorg directories.
What the hell it deletes /usr/lib64/primus/libGLX.so.1

What I want is to install nvidia driver userspace lib files into separate root directory
for example /opt/nvidia
and I don’t want any change in my /usr/lib64 /usr/lib directories.