ERROR: model_1/conv1/convolution: kernel weights has count 576 but 55296 was expected


I’m trying to run the TensorRT on Windows in a Tensorflow model I created. I translated the UFF file successfully, however while trying to parse the file, I got the following error:

ERROR: model_1/conv1/convolution: kernel weights has count 576 but 55296 was expected
ERROR: UFFParser: Parser error: model_1/conv1/BiasAdd: The input to the Scale Layer is required to have a minimum of 3 dimensions.
ERROR: siamese_vggish: Fail to parse uff file

The number of weights for the model_1/conv1 layer is correct : it has 3x3x64 = 576 parameters. The number 55296 can be achieved by multiplying this number by 96, which is the input tensor width.
However, I don’t make any reshape operation on the input tensor.
My code is attached below:

#include <cuda_runtime_api.h>
#include <sys/stat.h>
#include <unordered_map>
#include “NvInfer.h”
#include “NvUffParser.h”

#include “NvUtils.h”

using namespace nvuffparser;
using namespace nvinfer1;
#include “common.h”

static Logger gLogger;
static int gUseDLACore{ -1 };

#define MAX_WORKSPACE (1 << 30)

#define RETURN_AND_LOG(ret, severity, message)
std::string error_message = "siamese_vggish: " + std::string(message);
gLogger.log(ILogger::Severity::k##severity, error_message.c_str());
return (ret);
} while (0)

inline int64_t volume(const Dims& d)
int64_t v = 1;
for (int64_t i = 0; i < d.nbDims; i++)
v *= d.d[i];
return v;

inline unsigned int elementSize(DataType t)
switch (t)
case DataType::kINT32:
// Fallthrough, same as kFLOAT
case DataType::kFLOAT: return 4;
case DataType::kHALF: return 2;
case DataType::kINT8: return 1;
return 0;

void* safeCudaMalloc(size_t memSize)
void* deviceMem;
CHECK(cudaMalloc(&deviceMem, memSize));
if (deviceMem == nullptr)
std::cerr << “Out of memory” << std::endl;
return deviceMem;

std::vector<std::pair<int64_t, DataType>> calculateBindingBufferSizes(const ICudaEngine& engine, int nbBindings, int batchSize)
std::vector<std::pair<int64_t, DataType>> sizes;
for (int i = 0; i < nbBindings; ++i)
Dims dims = engine.getBindingDimensions(i);
DataType dtype = engine.getBindingDataType(i);

	int64_t eltCount = volume(dims) * batchSize;
	sizes.push_back(std::make_pair(eltCount, dtype));

return sizes;


ICudaEngine* loadModelAndCreateEngine(const char* uffFile, int maxBatchSize, IUffParser* parser)
/Define a Builder/
IBuilder* builder = createInferBuilder(gLogger);
/Define a Network from Builder/
INetworkDefinition* network = builder->createNetwork();

if (!parser->parse(uffFile, *network, DataType::kFLOAT))
	RETURN_AND_LOG(nullptr, ERROR, "Fail to parse uff file");

/*Create the engine*/
//samplesCommon::enableDLA(builder, gUseDLACore);

ICudaEngine* engine = builder->buildCudaEngine(*network);
if (!engine)
	RETURN_AND_LOG(nullptr, ERROR, "Unable to create engine");


return engine;


void execute(ICudaEngine& engine)
IExecutionContext* context = engine.createExecutionContext();
int batchSize = 1;

int nbBindings = engine.getNbBindings();
assert(nbBindings == 2);

std::vector<void*> buffers(nbBindings);
auto buffersSizes = calculateBindingBufferSizes(engine, nbBindings, batchSize);

int bindingIdxInput = 0;
for (int i = 0; i < nbBindings; ++i)
	if (engine.bindingIsInput(i))
		bindingIdxInput = i;
		auto bufferSizesOutput = buffersSizes[i];
		buffers[i] = safeCudaMalloc(bufferSizesOutput.first * elementSize(bufferSizesOutput.second));


int main(int argc, char** argv)
//gUseDLACore = samplesCommon::parseDLA(argc, argv);
std::string fileName = “D:/Projects/Whisperer/Siamese-Vggish/weights/vggish_siamese.uff”;
std::cout << fileName << std::endl;

int maxBatchSize = 1;
auto parser = createUffParser();

/* Register tensorflow input */
parser->registerInput("input_left", Dims3(96, 64, 1), UffInputOrder::kNHWC); 
parser->registerInput("input_right", Dims3(96, 64, 1), UffInputOrder::kNHWC);

ICudaEngine* engine = loadModelAndCreateEngine(fileName.c_str(), maxBatchSize, parser);




I also uploaded my pb anduff weights files on Google drive. Link is :

Thank you.


Did u fix this bug? cuz I am facing the same issue.