Installing Driveworks within a docker image

build_driveworks.sh

#!/bin/bash

echo "Building base docker image..."

if ! nvidia-docker build \
              -f ~/scripts/docker/Dockerfile_ubuntu16 \
              -t cu90-dnn7-u16:base \
              ~/scripts/docker; then
    echo "ERROR: failed building base docker image"
    exit 1
fi

echo "Done"

docker run -it --name v4l_5050a \
  --privileged \
  -h "v4l_5050a" \
  -v /dev/bus/usb:/dev/bus/usb \
  -v /home/user_name/NVIDIA:/home/nvidia/NVIDIA \
  -v /home/user_name/dev:/home/nvidia/dev \
  -v /tmp/.X11-unix:/tmp/.X11-unix \
  -e DISPLAY=$DISPLAY \
  -v /usr/lib/x86_64-linux-gnu:/usr/lib/nvidia-390 \
  -e LD_LIBRARY_PATH=/usr/lib/nvidia-390 \
  --runtime=nvidia \
  cu90-dnn7-u16:base \
  /bin/bash

Dockerfile_ubuntu16

# Select OS Basis
FROM nvidia/cuda:9.0-cudnn7-devel-ubuntu16.04
#FROM nvidia/cuda:8.0-cudnn5-devel-ubuntu16.04
LABEL Description="Ubuntu 16.04 dev env for MW" Version="1.0"
ARG DEBIAN_FRONTEND=noninteractive
ARG DEBIAN_PRIORITY=critical
ENV TERM=xterm-color

# Needed to be able to install resolvconf without complaints about /etc/resolv.conf not being accessible
# See: https://github.com/moby/moby/issues/1297#issuecomment-115458690
RUN echo "resolvconf resolvconf/linkify-resolvconf boolean false" | debconf-set-selections

# Install Prerequisites
RUN dpkg --add-architecture i386
RUN apt-get -y update && apt-get install -y --no-install-recommends p7zip-full dpkg-dev lib32z1 tftp lib32ncurses5 lib32stdc++6 isc-dhcp-server nfs-kernel-server minicom sudo gcc-multilib software-properties-common x11-utils libgtk2.0-0 libcanberra-gtk* cmake unzip expect usbutils dmidecode curl tree libreadline-dev build-essential libncurses5-dev git vim-gtk libtbb-dev libglew-dev libtiff5-dev libjpeg-dev libpng12-dev libjasper-dev libavcodec-dev libavformat-dev libavutil-dev  libpostproc-dev libswscale-dev libeigen3-dev pkg-config  policykit-1 psmisc lsof cpio ca-certificates chromium-browser kmod lsb-core resolvconf tzdata udev xterm

RUN apt-get -y autoremove && apt-get -y clean && rm -rf /var/lib/apt/lists/*

# Set up time and locale
# /etc/localtime is replaced by an actual file instead of symbolic link,
# otherwise DriveInstall can end up trying to copy the same file to itself
RUN sed -i -e 's/# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen && \
    locale-gen && \
    rm /etc/localtime && \
    cp /usr/share/zoneinfo/Etc/UTC /etc/localtime
ENV LANG en_US.UTF-8
ENV LANGUAGE en_US:en
ENV LC_ALL en_US.UTF-8

# Create nvidia user, no password, add it to sudoers
RUN export uid=1000 gid=1000 && mkdir -p /home/nvidia && echo "nvidia:x:${uid}:${gid}:nvidia,,,:/home/nvidia:/bin/bash" >> /etc/passwd && echo "nvidia:x:${uid}:" >> /etc/group && echo "nvidia ALL=(ALL) NOPASSWD: ALL" > /etc/sudoers.d/nvidia && chmod 0440 /etc/sudoers.d/nvidia && chown ${uid}:${gid} -R /home/nvidia

# Switch to nvidia user
USER nvidia
ENV HOME /home/nvidia
RUN touch /home/nvidia/.bashrc && echo "export PATH=/usr/local/cuda-9.0/bin/:$PATH" >> /home/nvidia/.bashrc

# Create mount point inside nvidia home directory to share files between host and container
RUN mkdir /home/nvidia/shared
VOLUME /home/nvidia/shared

# Set locales
#RUN sudo apt-get -y install locales
#RUN sudo locale-gen en_US.UTF-8
#ENV LANG en_US.UTF-8
#ENV LANGUAGE en_US:en
#ENV LC_ALL en_US.UTF-8

# Install PDK
# Copy Release into the Docker image
# COPY sources.list /etc/apt/sources.list
#COPY installPDK.sh /home/nvidia/V4L/installPDK.sh
#COPY Release /home/nvidia/V4L
#RUN sudo chown -R nvidia:nvidia /home/nvidia/V4L
#WORKDIR /home/nvidia/V4L

CMD ["bash"]

Please refer to below link.
https://devtalk.nvidia.com/default/topic/1027885/driveworks/installing-driveworks-within-a-docker-image/