Unet on SDK3.1

Hi,
I am working on train SDK 3.1. I want to try to use Unet for segmentation of lung tumor.
I modified a config_train.json of SegAhnet to Unet. I got the following error messages. Does anyone help me?

2021-07-09 07:59:05.890732: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1320] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 30129 MB memory) -> physical GPU (device: 0, name: Tesla V100-DGXS-32GB, pci bus id: 0000:0e:00.0, compute capability: 7.0)
Traceback (most recent call last):
  File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/framework/tensor_util.py", line 541, in make_tensor_proto
    str_values = [compat.as_bytes(x) for x in proto_values]
  File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/framework/tensor_util.py", line 541, in <listcomp>
    str_values = [compat.as_bytes(x) for x in proto_values]
  File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/util/compat.py", line 71, in as_bytes
    (bytes_or_text,))
TypeError: Expected binary or unicode string, got -1

During handling of the above exception, another exception occurred:

Traceback (most recent call last):
  File "/usr/lib/python3.6/runpy.py", line 193, in _run_module_as_main
    "__main__", mod_spec)
  File "/usr/lib/python3.6/runpy.py", line 85, in _run_code
    exec(code, run_globals)
  File "apps/train.py", line 47, in <module>
  File "apps/train.py", line 30, in main
  File "utils/train_conf.py", line 54, in train_mmar
  File "workflows/trainers/supervised_trainer.py", line 279, in train
  File "workflows/builders/tf_builder.py", line 137, in build
  File "components/models/model.py", line 76, in build
  File "components/models/unet.py", line 94, in get_predictions
  File "libs/models/unet.py", line 216, in model
  File "libs/models/unet.py", line 184, in batch_norm_relu
  File "operations/normalization.py", line 85, in group_norm
  File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/ops/array_ops.py", line 131, in reshape
    result = gen_array_ops.reshape(tensor, shape, name)
  File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/ops/gen_array_ops.py", line 8115, in reshape
    "Reshape", tensor=tensor, shape=shape, name=name)
  File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/framework/op_def_library.py", line 531, in _apply_op_helper
    raise err
  File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/framework/op_def_library.py", line 528, in _apply_op_helper
    preferred_dtype=default_dtype)
  File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/framework/ops.py", line 1297, in internal_convert_to_tensor
    ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
  File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/framework/constant_op.py", line 286, in _constant_tensor_conversion_function
    return constant(v, dtype=dtype, name=name)
  File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/framework/constant_op.py", line 227, in constant
    allow_broadcast=True)
  File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/framework/constant_op.py", line 265, in _constant_impl
    allow_broadcast=allow_broadcast))
  File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/framework/tensor_util.py", line 545, in make_tensor_proto
    "supported type." % (type(values), values))
TypeError: Failed to convert object of type <class 'list'> to Tensor. Contents: [-1, None, None, None, 4, 8]. Consider casting elements to a supported type.

I attached the config_train.json below as well

{
  "epochs": 1250,
  "num_training_epoch_per_valid": 20,
  "learning_rate": 1e-4,
  "multi_gpu": false,
  "determinism": {
    "python_seed": "20191201",
    "random_seed": 123456,
    "numpy_seed": 654321,
    "tf_seed": 11111
  },
  "use_amp": true,
  "dynamic_input_shape": true,
  "train": {
    "loss": {
      "name": "Dice",
      "args": {
        "jaccard": false
      }
    },
    "optimizer": {
      "name": "Adam"
    },
    "model": {
      "name": "Unet",
      "args": {
        "num_classes": 2,
        "final_activation": "softmax"
      }
    },
    "pre_transforms": [
      {
        "name": "LoadNifti",
        "args": {
          "fields": [
            "image",
            "label"
          ]
        }
      },
      {
        "name": "ConvertToChannelsFirst",
        "args": {
          "fields": [
            "image",
            "label"
          ]
        }
      },
      {
        "name": "ReplaceLabels",
        "args": {
          "fields": "label",
            "input_labels":  [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],
            "output_labels": [0,0,0,0,0,0,0,0,1,1, 1, 1, 1, 1, 1, 1]
          }
      },
      {
        "name": "ScaleIntensityRange",
        "args": {
          "fields": "image",
          "a_min": -1500,
          "a_max": 500,
          "b_min": 0.0,
          "b_max": 1.0,
          "clip": true
        }
      },
      {
        "name": "FastCropByPosNegRatio",
        "args": {
          "size": [160, 160, 160],
          "fields": "image",
          "label_field": "label",
          "pos": 1,
          "neg": 9,
          "batch_size": 3
        }
      },
      {
        "name": "RandomAxisFlip",
        "args": {
          "fields": ["image", "label"],
          "probability": 0.0
        }
      },
      {
        "name": "RandomRotate3D",
        "args": {
          "fields": ["image", "label"],
          "probability": 0.0
        }
      },
      {
        "name": "ScaleIntensityOscillation",
        "args": {
          "fields": "image",
          "magnitude": 0.1
        }
      }
    ],
    "image_pipeline": {
      "name": "SegmentationImagePipelineWithCache",
      "args": {
        "data_list_file_path": "{DATASET_JSON}",
        "data_file_base_dir": "{DATA_ROOT}",
        "data_list_key": "training",
        "output_crop_size": [160, 160, 160],
        "output_batch_size": 3,
        "batched_by_transforms": true,
        "num_workers": 1,
        "prefetch_size": 6,
        "num_cache_objects": 40,
        "replace_percent": 0.25
      }
    }
  },
  "validate": {
    "metrics": [
      {
        "name": "ComputeAverageDice",
        "args": {
          "name": "mean_dice",
          "is_key_metric": true,
          "field": "model",
          "label_field": "label"
        }
      }
    ],
    "pre_transforms": [
      {
        "name": "LoadNifti",
        "args": {
          "fields": [
            "image",
            "label"
          ]
        }
      },
      {
        "name": "ConvertToChannelsFirst",
        "args": {
          "fields": [
            "image",
            "label"
          ]
        }
      },
      {
        "name": "ReplaceLabels",
        "args": {
          "fields": "label",
            "input_labels":  [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],
            "output_labels": [0,0,0,0,0,0,0,0,1,1, 1, 1, 1, 1, 1, 1]
          }
      },
      {
        "name": "ScaleIntensityRange",
        "args": {
          "fields": "image",
          "a_min": -1500,
          "a_max": 500,
          "b_min": 0.0,
          "b_max": 1.0,
          "clip": true
        }
      }
    ],
    "image_pipeline": {
      "name": "SegmentationImagePipeline",
      "args": {
        "data_list_file_path": "{DATASET_JSON}",
        "data_file_base_dir": "{DATA_ROOT}",
        "data_list_key": "validation",
        "output_crop_size": [160, 160, 160],
        "output_batch_size": 1,
        "num_workers": 1,
        "prefetch_size": 0
      }
    },
    "inferer": {
      "name": "TFScanWindowInferer",
      "args": {
        "roi_size": [224, 224, 224]
      }
    }
  }
}

Best,
Takashi

Hi
Thanks for your interest in Clara Train SDK.
Please refer to the clara train notebooks for guidance clara-train-examples/Tensorflow-Deprecated at master · NVIDIA/clara-train-examples · GitHub

However it seems you are just starting with clara train so I would strongly recommend you move to V4 based on MONAI which uses PyTorch. Please check out the notebooks to get you started clara-train-examples/PyTorch/NoteBooks at master · NVIDIA/clara-train-examples · GitHub

Hope this helps