To accelerate the whole model inference pipeline, I have tried to use nvjpeg
to decode my entire datasets. Followed on the official samples provided at here, my code have been written as follows:
// create library handle
nvjpegJpegState_t nvjpeg_state;
nvjpegHandle_t nvjpeg_handle;
nvjpegCreateSimple(&nvjpeg_handle);//step 1
// create bitstream object
nvjpegJpegStateCreate(nvjpeg_handle, &nvjpeg_state);//step 2
//init
std::vector<nvjpegImage_t> out;out.resize(img_names.size());
for (int i = 0; i < out.size(); i++) {
for (int c = 0; c < NVJPEG_MAX_COMPONENT; c++) {
out[i].channel[c] = NULL;
out[i].pitch[c] = 0;
}
}
int widths[NVJPEG_MAX_COMPONENT];
int heights[NVJPEG_MAX_COMPONENT];
int channels;
nvjpegChromaSubsampling_t subsampling;
for(int i = 0; i < img_names.size(); i++) {
std::tuple<size_t, std::vector<char>> raw_len_data = read_next_batch(img_names[i]);
auto raw_len = std::get<0>(raw_len_data);
auto raw_data = std::get<1>(raw_len_data);
std::cout<<raw_len<<std::endl;
assert(raw_data.size()>0);
NVJPEG_CHECK(nvjpegGetImageInfo(nvjpeg_handle, (unsigned char *)raw_data.data(), raw_len, &channels, &subsampling, widths, heights));//step 3
int ori_width = widths[0];
int ori_height = heights[0];
std::cout<<"width: "<<ori_width<<" ";
std::cout<<"height: "<<ori_height<<std::endl;
NVJPEG_CHECK(nvjpegDecode(nvjpeg_handle, nvjpeg_state,
(unsigned char *)raw_data.data(),
raw_len, NVJPEG_OUTPUT_BGRI, &out[i],
mCudaStream));//step 4
assert(out[i].channel[0] !=nullptr);
}
However, the compiler indicates that "assertion failed"
,which means out[i].channel[0]
is equal to nullptr
. At the same time, the image info was generated successfully provided by this functionnvjpegGetImageInfo
" .
I have been struggling with this problem for a couple of days. Any hints or solutions would be highly appreciated,