How to create inference in PyTorch from NVidia Modulus .pth trained net

I have run some of the examples (wave_1d, helmholtz) in NVidia Modulus. In the outputs directory of the examples there is a .pth file (e.g. wave_network.pth) which I can read in as an OrderedDict in Jupyter Notebook and see that it has the weights, biases and gradients. All the examples for creating the inferencer by reading in a .pt or .pth file require the class definition of the network. I can see the sizes of the tensors but I am at a loss as to how to code the class definition for the inference network. Does anyone know where I can find suitable torch.nn.Module based definitions that will load the ,pth file from NVidia Modulus examples? Thank you in advance for any help.

I have been able to load “some” of the model, by creating a class definition for the model, basically linear layers and defining a swish function to use. But I still have an issue in that the weights in the .pth file are .weight and .weight_g, which means it was saved as normalized weights, I think. How do I either reconstitute the weight and weight_g into un-normalized weights OR get Linear to load in the weights in a weight and weight_g (vector and magnitude)?

Thanks in advance for any advice

Below is the code I wrote in python (modulus not installed in this environment), using pytorch. It runs but the result is just wrong.

import numpy as np
import torch as pt
import torch.nn as nn
import torch.optim as optim
import collections as coll
from mpl_toolkits import mplot3d
import matplotlib.pyplot as plt

pt.Tensor.ndim = property(lambda self: len(self.shape))

swish = nn.SiLU()

class MyNet(nn.Module):
def init(self):
super(MyNet,self).init()
self.layer0 = nn.utils.weight_norm(nn.Linear(2, 512))
self.layer1 = nn.utils.weight_norm(nn.Linear(512, 512))
self.layer2 = nn.utils.weight_norm(nn.Linear(512, 512))
self.layer3 = nn.utils.weight_norm(nn.Linear(512, 512))
self.layer4 = nn.utils.weight_norm(nn.Linear(512, 512))
self.layer5 = nn.utils.weight_norm(nn.Linear(512, 512))
self.final_layer = nn.utils.weight_norm(nn.Linear(512, 1))

def forward(self, x):
    x = self.layer0(x)
    a = swish(x)
    x = self.layer1(a)
    a = swish(x)
    x = self.layer2(a)
    a = swish(x)
    x = self.layer3(a)
    a = swish(x)
    x = self.layer4(a)
    a = swish(x)
    x = self.layer5(a)
    a = swish(x)
    x = self.final_layer(a)
    return x

model = MyNet()
print(model)

originalOD = pt.load(‘C:\\Users\IMSIR\Documents\ModulusTrainedModels\wave_network.pth’)
model.load_state_dict(originalOD, False)
model.train(False)

my2dspace = pt.tensor(np.linspace([0.0, 0.0], [3.1415926535, 0.0], num=101), requires_grad=False)
t = 0.0
for i in range(1,101):
t = t + 0.0123.1415926535
my2dspace = pt.cat((my2dspace, pt.tensor(np.linspace([0.0, t], [3.1415926535, t], num=101), requires_grad=False)), 0)
myOutput = model.forward(my2dspace.float())
print(my2dspace)
print(myOutput)

print(‘x:’, my2dspace.select(1,0))
print(‘t:’, my2dspace.select(1,1))
print(‘u:’, myOutput.squeeze().detach().numpy())
fig = plt.figure()
ax = plt.axes(projection =‘3d’)
ax.scatter(my2dspace.select(1,0), my2dspace.select(1,1), myOutput.squeeze().detach().numpy())
plt.show()