I was installing RTX chat on my local system and I don’t know why I am getting this error “Mistral 7B INT4 Not Installed” I have also installed it with my antivirus closed and still the same problem. I have also try run the code from “app_launch.bat” from C dive as you can see in the code my model is not installing. It will be helpful if anyone can share the solutions to fix this.
Environment path found: C:\Users\gaura\AppData\Local\NVIDIA\ChatWithRTX\env_nvd_rag
App running with config
{
“models”: {
“supported”: [
{
“name”: “Mistral 7B int4”,
“installed”: false,
“metadata”: {
“model_path”: “model\mistral\mistral7b_int4_engine”,
“engine”: “llama_float16_tp1_rank0.engine”,
“tokenizer_path”: “model\mistral\mistral7b_hf”,
“max_new_tokens”: 1024,
“max_input_token”: 7168,
“temperature”: 0.1
}
},
{
“name”: “Llama 2 13B int4”,
“installed”: false,
“metadata”: {
“model_path”: “model\llama\llama13_int4_engine”,
“engine”: “llama_float16_tp1_rank0.engine”,
“tokenizer_path”: “model\llama\llama13_hf”,
“max_new_tokens”: 1024,
“max_input_token”: 3900,
“temperature”: 0.1
}
}
],
“selected”: “Mistral 7B int4”
},
“sample_questions”: [
{
“query”: “How does NVIDIA ACE generate emotional responses?”
},
{
“query”: “What is Portal prelude RTX?”
},
{
“query”: “What is important about Half Life 2 RTX?”
},
{
“query”: “When is the launch date for Ratchet & Clank: Rift Apart on PC?”
}
],
“dataset”: {
“sources”: [
“directory”,
“nodataset”
],
“selected”: “directory”,
“path”: “dataset”,
“isRelative”: true
},
“strings”: {
“directory”: “Folder Path”,
“nodataset”: “AI model default”
}
}
Traceback (most recent call last):
File “C:\Users\gaura\AppData\Local\NVIDIA\ChatWithRTX\RAG\trt-llm-rag-windows-main\app.py”, line 28, in
from trt_llama_api import TrtLlmAPI
File “C:\Users\gaura\AppData\Local\NVIDIA\ChatWithRTX\RAG\trt-llm-rag-windows-main\trt_llama_api.py”, line 42, in
from utils import (DEFAULT_HF_MODEL_DIRS, DEFAULT_PROMPT_TEMPLATES,
File “C:\Users\gaura\AppData\Local\NVIDIA\ChatWithRTX\RAG\trt-llm-rag-windows-main\utils.py”, line 22, in
import tensorrt_llm
File “C:\Users\gaura\AppData\Local\NVIDIA\ChatWithRTX\env_nvd_rag\lib\site-packages\tensorrt_llm_init_.py”, line 18, in
import tensorrt_llm.runtime as runtime
File “C:\Users\gaura\AppData\Local\NVIDIA\ChatWithRTX\env_nvd_rag\lib\site-packages\tensorrt_llm\runtime_init_.py”, line 22, in
from .model_runner import ModelRunner
File “C:\Users\gaura\AppData\Local\NVIDIA\ChatWithRTX\env_nvd_rag\lib\site-packages\tensorrt_llm\runtime\model_runner.py”, line 24, in
from … import profiler
File “C:\Users\gaura\AppData\Local\NVIDIA\ChatWithRTX\env_nvd_rag\lib\site-packages\tensorrt_llm\profiler.py”, line 46, in
elif pynvml.version < ‘11.5.0’:
AttributeError: module ‘pynvml’ has no attribute ‘version’
Press any key to continue . . .