model_config { arch: "efficientnet_b1", # Setting these parameters to true to match the template downloaded from NGC. use_batch_norm: True all_projections: False freeze_blocks: 0 freeze_blocks: 1 resize_interpolation_method: BICUBIC input_image_size: "3,224,224" } train_config { train_dataset_path: "/workspace/tao-experiments/data/split/train" val_dataset_path: "/workspace/tao-experiments/data/split/val" pretrained_model_path: "/workspace/tao-experiments/classification/pre_trained_efficientnet_b1_swish/pretrained_classification_vefficientnet_b1_swish/efficientnet_b1_swish.hdf5" optimizer { sgd { lr: 0.01 decay: 0.0 momentum: 0.9 nesterov: False } } batch_size_per_gpu: 32 n_epochs: 100 n_workers: 16 preprocess_mode: "caffe" enable_random_crop: True enable_center_crop: True label_smoothing: 0.0 mixup_alpha: 0.1 # regularizer reg_config { type: "L2" scope: "Conv2D,Dense" weight_decay: 0.00005 } # learning_rate lr_config { step { learning_rate: 0.006 step_size: 10 gamma: 0.1 } } } eval_config { eval_dataset_path: "/workspace/tao-experiments/data/split/test" model_path: "/workspace/tao-experiments/classification/output/weights/efficientnet_b1_081.tlt" top_k: 3 batch_size: 256 n_workers: 8 enable_center_crop: True }