diff --git a/.gitignore b/.gitignore index 572c5c07a..5e08b188b 100644 --- a/.gitignore +++ b/.gitignore @@ -23,3 +23,7 @@ build # codecov htmlcov/ + +# PyTorch +lightning_logs +tb_logs diff --git a/.travis.yml b/.travis.yml index 991f8cfe7..dc07b88b0 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,37 +1,33 @@ --- -language: python -# this is required for python 3.7 -dist: xenial -# This list should match the versions listed in setup.py -python: -- 3.6 -- 3.7 +# travis pipeline +dist: focal os: - linux + - osx install: - - sudo apt-get update -qq - - wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh + - if [ "$TRAVIS_OS_NAME" = "linux" ]; then + sudo apt-get update -qq; + wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh; + fi + - if [ "$TRAVIS_OS_NAME" = "osx" ]; then + wget https://repo.continuum.io/miniconda/Miniconda3-latest-MacOSX-x86_64.sh -O miniconda.sh; + fi - bash miniconda.sh -b -p $HOME/miniconda - - export PATH="$HOME/miniconda/bin:$PATH" + - source "$HOME/miniconda/etc/profile.d/conda.sh" - hash -r - conda config --set always_yes yes --set changeps1 no + - conda config --append channels conda-forge - conda update -q conda + # Useful for debugging any issues with conda - conda info -a - - conda create -q -n test-environment-$TRAVIS_PYTHON_VERSION python=$TRAVIS_PYTHON_VERSION - - source activate test-environment-$TRAVIS_PYTHON_VERSION - - pip install --upgrade pip - - hash -r - - conda info -a - - pip install -e .[tf] - - pip install -e .[pc] - - pip install -e .[dev] - - pip install -e .[ci] - - pip install -e .[mm1] - - echo "pip freeze virtualenv=test-environment python=${TRAVIS_PYTHON_VERSION}" - - pip freeze > test-environment-${TRAVIS_PYTHON_VERSION}.txt - - cat test-environment-${TRAVIS_PYTHON_VERSION}.txt + - if [ "$TRAVIS_OS_NAME" = "osx" ]; then conda env create -f install/envs/mac.yml ; fi + - if [ "$TRAVIS_OS_NAME" = "linux" ]; then conda env create -f install/envs/ubuntu.yml ; fi + - conda activate donkey + - pip install -e . + - conda env export > environment.yml + - cat environment.yml script: - pytest -v donkeycar/tests --cov=./ @@ -65,7 +61,6 @@ jobs: - stage: lint name: black language: python - python: 3.6 install: - pip install --upgrade pip - hash -r diff --git a/README.md b/README.md index 85d14a105..03e88519d 100644 --- a/README.md +++ b/README.md @@ -12,7 +12,7 @@ community contributions. #### Quick Links * [Donkeycar Updates & Examples](http://donkeycar.com) * [Build instructions and Software documentation](http://docs.donkeycar.com) -* [Slack / Chat](https://donkey-slackin.herokuapp.com/) +* [Discord / Chat](https://discord.gg/PN6kFeA) ![donkeycar](./docs/assets/build_hardware/donkey2.png) @@ -37,7 +37,7 @@ The donkey car is controlled by running a sequence of events import time from donkeycar import Vehicle from donkeycar.parts.cv import CvCam -from donkeycar.parts.datastore import TubWriter +from donkeycar.parts.datastore_v2 import TubWriter V = Vehicle() IMAGE_W = 160 @@ -53,9 +53,7 @@ while cam.run() is None: time.sleep(1) #add tub part to record images -tub = TubWriter(path='./dat', - inputs=['image'], - types=['image_array']) +tub = TubWriter(path='./dat', inputs=['image'], types=['image_array']) V.add(tub, inputs=['image'], outputs=['num_records']) #start the drive loop at 10 Hz diff --git a/docs/guide/host_pc/setup_mac.md b/docs/guide/host_pc/setup_mac.md index ba9e8d3ff..9aa27fd77 100644 --- a/docs/guide/host_pc/setup_mac.md +++ b/docs/guide/host_pc/setup_mac.md @@ -37,6 +37,7 @@ conda env create -f install/envs/mac.yml conda activate donkey pip install -e .[pc] ``` +Note: if you are using ZSH (you'll know if you are), you won't be able to run `pip install -e .[pc]`. You'll need to escape the brackets and run `pip install -e .\[pc\]`. * Tensorflow GPU diff --git a/docs/guide/host_pc/setup_ubuntu.md b/docs/guide/host_pc/setup_ubuntu.md index 317030277..ddedfbfd4 100644 --- a/docs/guide/host_pc/setup_ubuntu.md +++ b/docs/guide/host_pc/setup_ubuntu.md @@ -42,19 +42,30 @@ conda env create -f install/envs/ubuntu.yml conda activate donkey pip install -e .[pc] ``` +Note: if you are using ZSH (you'll know if you are), you won't be able to run `pip install -e .[pc]`. You'll need to escape the brackets and run `pip install -e .\[pc\]`. * Optional Install Tensorflow GPU - only for NVidia Graphics cards You should have an NVidia GPU with the latest drivers. Conda will handle installing the correct cuda and cuddn libraries for the version of tensorflow you are using. ```bash -conda install tensorflow-gpu==1.13.1 +conda install tensorflow-gpu==2.2.0 ``` * Optional Install Coral edge tpu compiler If you have a Google Coral edge tpu, you may wish to compile models. You will need to install the edgetpu_compiler exectutable. Follow [their instructions](https://coral.withgoogle.com/docs/edgetpu/compiler/). +* Optionally configure PyTorch to use GPU - only for NVidia Graphics cards + +If you have an NVidia card, you should update to the lastest drivers and [install Cuda SDK](https://www.tensorflow.org/install/gpu#windows_setup). + +```bash +conda install cudatoolkit= -c pytorch +``` + +You should replace `` with your CUDA version. Any version above 10.0 should work. You can find out your CUDA version by running `nvcc --version` or `nvidia-smi`. If the version given by these two commands don't match, go with the version given by `nvidia-smi`. + * Create your local working dir: ```bash diff --git a/docs/guide/host_pc/setup_windows.md b/docs/guide/host_pc/setup_windows.md index 22ff44495..0100a1a20 100644 --- a/docs/guide/host_pc/setup_windows.md +++ b/docs/guide/host_pc/setup_windows.md @@ -1,4 +1,14 @@ -## Install Donkeycar on Windows +# Windows + +Windows provides a few different methods for setting up and installing Donkey Car. + +1. Miniconda +2. Native +3. Windows Subsystem for Linux (WSL) - Experimental + +If you are unfamiliar or concerned about the way that you install Donkey Car, please use option 1 above. + +## Install Donkeycar on Windows (miniconda) ![donkey](/assets/logos/windows_logo.png) @@ -33,19 +43,30 @@ conda env remove -n donkey * Create the Python anaconda environment ```bash -conda env create -f install\envs\windows.yml +conda env create -f install\envs\ubuntu.yml conda activate donkey pip install -e .[pc] ``` +Note: if you are using ZSH (you'll know if you are), you won't be able to run `pip install -e .[pc]`. You'll need to escape the brackets and run `pip install -e .\[pc\]`. * Optionally Install Tensorflow GPU - only for NVidia Graphics cards If you have an NVidia card, you should update to the lastest drivers and [install Cuda SDK](https://www.tensorflow.org/install/gpu#windows_setup). ```bash -conda install tensorflow-gpu==1.13.1 +conda install tensorflow-gpu==2.2.0 ``` +* Optionally configure PyTorch to use GPU - only for NVidia Graphics cards + +If you have an NVidia card, you should update to the lastest drivers and [install Cuda SDK](https://www.tensorflow.org/install/gpu#windows_setup). + +```bash +conda install cudatoolkit= -c pytorch +``` + +You should replace `` with your CUDA version. Any version above 10.0 should work. You can find out your CUDA version by running `nvcc --version` or `nvidia-smi`. + * Create your local working dir: ```bash @@ -57,5 +78,123 @@ donkey createcar --path ~/mycar > Python libraries ---- +### Next let's [install software on Donkeycar](/guide/install_software/#step-2-install-software-on-donkeycar) + +--- + +## Install Donkeycar on Windows (native) + +![donkey](/assets/logos/windows_logo.png) + +* Install [Python 3.6 (or later)](https://www.python.org/downloads/) + +* Install [Git Bash](https://gitforwindows.org/). During install make sure you check Git to run 'from command line and also from 3rd-party software'. + +* Open Command Prompt as Administrator via the Start Menu (cmd.exe | right-click | run as administrator) + +* Change to a folder that that you would like to use for all your projects + +```shell +mkdir projects +cd projects +``` + +* Get the latest donkey from Github. + +```bash +git clone https://github.com/autorope/donkeycar +cd donkeycar +git checkout master +``` + +> NOTE: The `dev` branch has the latest (unstable) version of donkeycar with experimental features. + +* Install Donkeycar into Python + +``` +pip3 install -e .[pc] +``` + +* Recommended for GPU Users: Install Tensorflow GPU - only for NVIDIA Graphics cards + +If you have an NVIDIA card, you should update to the lastest drivers and [install Cuda SDK](https://www.tensorflow.org/install/gpu#windows_setup). + +```bash +pip3 install tensorflow +``` + +* Create your local working dir: + +```bash +donkey createcar --path \Users\\projects\mycar --template complete +``` + +> **Templates** +> There are a number of different templates to choose from in Donkey Car. +> basic | complete +> You can find all the templates in the [donkeycar/donkeycar/templates](https://github.com/autorope/donkeycar/tree/dev/donkeycar/templates) folder + +--- +### Next let's [install software on Donkeycar](/guide/install_software/#step-2-install-software-on-donkeycar) +--- + + +## Install Donkeycar on Windows (WSL) + +The Windows Subsystem for Linux (WSL) lets developers run a GNU/Linux environment -- including most command-line tools, utilities, and applications -- directly on Windows, unmodified, without the overhead of a traditional virtual machine or dualboot setup. + +* Install [Windows Subsystem for Linux](https://docs.microsoft.com/en-us/windows/wsl/install-win10). + 1. Turn on Windows 10 "Windows Subsystem for Linux" Feature (Settings > Apps > Programs and Features > Turn Windows features on or off) + 2. Download a Linux Distribution from the Microsoft Store (recommend [Ubuntu](https://www.microsoft.com/en-us/p/ubuntu/9nblggh4msv6?activetab=pivot:overviewtab) Latest) + 3. Open the Ubuntu App and configure. + +* Open the Ubuntu App to get a prompt window via Start Menu | Ubuntu + +* Install `git` using `sudo apt install git` + +* Install `python3` using `sudo apt install python3` + +* Change to a directory that you would like to use as the head of all your projects. + +```bash +mkdir projects +cd projects +``` + +* Get the latest donkey from Github. + +```bash +git clone https://github.com/autorope/donkeycar +cd donkeycar +git checkout master +``` + +> NOTE: The `dev` branch has the latest (unstable) version of donkeycar with experimental features. + +* Install Donkeycar into Python + +``` +pip3 install -e .[pc] +``` + +* Experimental Support - GPU Users: Install Tensorflow GPU - only for NVIDIA Graphics cards + +If you have an NVIDIA card, you should update to the lastest drivers and [install Cuda SDK](https://www.tensorflow.org/install/gpu#windows_setup). + +```bash +pip3 install tensorflow +``` + +* Create your local working dir: + +```bash +donkey createcar --path /path/to/projects/mycar --template complete +``` + +> **Templates** +> There are a number of different templates to choose from in Donkey Car. +> basic | complete +> You can find all the templates in the [donkeycar/donkeycar/templates](https://github.com/autorope/donkeycar/tree/dev/donkeycar/templates) folder +--- ### Next let's [install software on Donkeycar](/guide/install_software/#step-2-install-software-on-donkeycar) diff --git a/docs/guide/robot_sbc/setup_jetson_nano.md b/docs/guide/robot_sbc/setup_jetson_nano.md index 6eb426067..2de45d2b0 100644 --- a/docs/guide/robot_sbc/setup_jetson_nano.md +++ b/docs/guide/robot_sbc/setup_jetson_nano.md @@ -16,10 +16,50 @@ Visit the official [Nvidia Jetson Nano Getting Started Guide](https://developer. ssh into your vehicle. Use the the terminal for Ubuntu or Mac. [Putty](https://www.chiark.greenend.org.uk/~sgtatham/putty/latest.html) for windows. +Note: you can either proceed with this tutorial, or if you have Jetpack 4.4 installed, you can use a script to automate the setup. The script is located in `donkeycar/install/nano/install-jp44.sh`. You will need to edit line #3 of the script and replace the default password with your password. This script will install all Git repositories into a ~/projects directory. If you wish to use a different directory, you will need to change this as well (replace all instances of ~/projects with your desired folder path). + +First install some packages with `apt-get`. ```bash -sudo apt-get update -sudo apt-get upgrade -sudo apt-get install build-essential python3 python3-dev python3-pip python3-pandas python3-h5py libhdf5-serial-dev hdf5-tools nano ntp +sudo apt-get update -y +sudo apt-get upgrade -y +sudo apt-get install -y libhdf5-serial-dev hdf5-tools libhdf5-dev zlib1g-dev zip libjpeg8-dev liblapack-dev libblas-dev gfortran +sudo apt-get install -y python3-dev python3-pip +sudo apt-get install -y libxslt1-dev libxml2-dev libffi-dev libcurl4-openssl-dev libssl-dev libpng-dev libopenblas-dev +sudo apt-get install -y git +sudo apt-get install -y openmpi-doc openmpi-bin libopenmpi-dev libopenblas-dev +``` + +Next, you will need to install packages with `pip`: +```bash +sudo -H pip3 install -U pip testresources setuptools +sudo -H pip3 install -U futures==3.1.1 protobuf==3.12.2 pybind11==2.5.0 +sudo -H pip3 install -U cython==0.29.21 +sudo -H pip3 install -U numpy==1.19.0 +sudo -H pip3 install -U future==0.18.2 mock==4.0.2 h5py==2.10.0 keras_preprocessing==1.1.2 keras_applications==1.0.8 gast==0.3.3 +sudo -H pip3 install -U grpcio==1.30.0 absl-py==0.9.0 py-cpuinfo==7.0.0 psutil==5.7.2 portpicker==1.3.1 six requests==2.24.0 astor==0.8.1 termcolor==1.1.0 wrapt==1.12.1 google-pasta==0.2.0 +sudo -H pip3 install -U scipy==1.4.1 +sudo -H pip3 install -U pandas==1.0.5 +sudo -H pip3 install -U gdown + +# This will install tensorflow as a system package +sudo -H pip3 install --pre --extra-index-url https://developer.download.nvidia.com/compute/redist/jp/v44 tensorflow==2.2.0+nv20.6 +``` + +Finally, you can install PyTorch: +```bash +# Install PyTorch v1.7 - torchvision v0.8.1 +wget https://nvidia.box.com/shared/static/wa34qwrwtk9njtyarwt5nvo6imenfy26.whl -O torch-1.7.0-cp36-cp36m-linux_aarch64.whl +sudo -H pip3 install ./torch-1.7.0-cp36-cp36m-linux_aarch64.whl + +# Install PyTorch Vision +sudo apt-get install libjpeg-dev zlib1g-dev libpython3-dev libavcodec-dev libavformat-dev libswscale-dev + +# You can replace the following line with wherever you want to store your Git repositories +mkdir -p ~/projects; cd ~/projects +git clone --branch v0.8.1 https://github.com/pytorch/vision torchvision +cd torchvision +export BUILD_VERSION=0.8.1 +sudo python3 setup.py install ``` Optionally, you can install RPi.GPIO clone for Jetson Nano from [here](https://github.com/NVIDIA/jetson-gpio). This is not required for default setup, but can be useful if using LED or other GPIO driven devices. @@ -50,19 +90,6 @@ git checkout master pip install -e .[nano] ``` -For Jetpack 4.2, -``` -pip install --extra-index-url https://developer.download.nvidia.com/compute/redist/jp/v42 tensorflow-gpu==1.15.0+nv19.11 -``` - -For Jetpack 4.4, -``` -pip install --pre --extra-index-url https://developer.download.nvidia.com/compute/redist/jp/v44 'tensorflow<2' -``` - - -Note: This last command can take some time to compile grpcio. - ## Step 5: Install PyGame (Optional) If you plan to use a USB camera, you will also want to setup pygame: diff --git a/docs/guide/robot_sbc/setup_raspberry_pi.md b/docs/guide/robot_sbc/setup_raspberry_pi.md index 5621bc163..57ba4097f 100644 --- a/docs/guide/robot_sbc/setup_raspberry_pi.md +++ b/docs/guide/robot_sbc/setup_raspberry_pi.md @@ -219,8 +219,11 @@ git clone https://github.com/autorope/donkeycar cd donkeycar git checkout master pip install -e .[pi] -pip install tensorflow==1.13.1 pip install numpy --upgrade +wget "https://raw.githubusercontent.com/PINTO0309/Tensorflow-bin/master/tensorflow-2.3.1-cp37-none-linux_armv7l_download.sh" +chmod u+x tensorflow-2.3.1-cp37-none-linux_armv7l_download.sh +tensorflow-2.3.1-cp37-none-linux_armv7l_download.sh +pip install tensorflow-2.3.1-cp37-none-linux_armv7l.whl ``` You can validate your tensorflow install with diff --git a/donkeycar/__init__.py b/donkeycar/__init__.py index 6ca995d8b..de73f1d8c 100644 --- a/donkeycar/__init__.py +++ b/donkeycar/__init__.py @@ -1,14 +1,19 @@ -__version__ = '3.1.5' +import sys +from pyfiglet import Figlet -print('using donkey v{} ...'.format(__version__)) +__version__ = '4.1.0-dev' +f = Figlet(font='speed') -import sys +print(f.renderText('Donkey Car')) +print(f'using donkey v{__version__} ...') -if sys.version_info.major < 3: - msg = 'Donkey Requires Python 3.4 or greater. You are using {}'.format(sys.version) +if sys.version_info.major < 3 or sys.version_info.minor < 6: + msg = f'Donkey Requires Python 3.6 or greater. You are using {sys.version}' raise ValueError(msg) -from . import parts +# The default recursion limits in CPython are too small. +sys.setrecursionlimit(10**5) + from .vehicle import Vehicle from .memory import Memory from . import utils diff --git a/donkeycar/benchmarks/tub.py b/donkeycar/benchmarks/tub.py new file mode 100644 index 000000000..c22ce0d24 --- /dev/null +++ b/donkeycar/benchmarks/tub.py @@ -0,0 +1,44 @@ +import json +import os +import shutil +import timeit +from pathlib import Path + +import numpy as np + +from donkeycar.parts.datastore import Tub + + +def benchmark(): + # Change with a non SSD storage path + path = Path('/media/rahulrav/Cruzer/tub') + if os.path.exists(path.absolute().as_posix()): + shutil.rmtree(path) + + inputs = ['input'] + types = ['int'] + tub = Tub(path.absolute().as_posix(), inputs, types) + write_count = 1000 + for i in range(write_count): + record = {'input': i} + tub.put_record(record) + + # old tub starts counting at 1 + deletions = set(np.random.randint(1, write_count + 1, 100)) + for index in deletions: + index = int(index) + tub.remove_record(index) + + files = path.glob('*.json') + for record_file in files: + contents = record_file.read_text() + if contents: + contents = json.loads(contents) + print('Record %s' % contents) + + +if __name__ == "__main__": + timer = timeit.Timer(benchmark) + time_taken = timer.timeit(number=1) + print('Time taken %s seconds' % time_taken) + print('\nDone.') diff --git a/donkeycar/benchmarks/tub_v2.py b/donkeycar/benchmarks/tub_v2.py new file mode 100644 index 000000000..492c1110a --- /dev/null +++ b/donkeycar/benchmarks/tub_v2.py @@ -0,0 +1,42 @@ +import os +import shutil +import timeit +from pathlib import Path + +import numpy as np + +from donkeycar.parts.tub_v2 import Tub + + +def benchmark(): + # Change to a non SSD storage path + path = Path('/media/rahulrav/Cruzer/benchmark') + + # Recreate paths + if os.path.exists(path.absolute().as_posix()): + shutil.rmtree(path) + + inputs = ['input'] + types = ['int'] + tub = Tub(path.as_posix(), inputs, types, max_catalog_len=1000) + write_count = 1000 + for i in range(write_count): + record = {'input': i} + tub.write_record(record) + + deletions = np.random.randint(0, write_count, 100) + for index in deletions: + index = int(index) + tub.delete_record(index) + + for record in tub: + print('Record %s' % record) + + tub.close() + + +if __name__ == "__main__": + timer = timeit.Timer(benchmark) + time_taken = timer.timeit(number=1) + print('Time taken %s seconds' % time_taken) + print('\nDone.') diff --git a/donkeycar/config.py b/donkeycar/config.py index 4c64ee159..4f0919139 100644 --- a/donkeycar/config.py +++ b/donkeycar/config.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Wed Sep 13 21:27:44 2017 @@ -7,11 +6,11 @@ """ import os import types - + + class Config: - def from_pyfile(self, filename, silent=False): - #filename = os.path.join(self.root_path, filename) + def from_pyfile(self, filename): d = types.ModuleType('config') d.__file__ = filename try: @@ -26,14 +25,13 @@ def from_pyfile(self, filename, silent=False): def from_object(self, obj): for key in dir(obj): if key.isupper(): - #self[key] = getattr(obj, key) setattr(self, key, getattr(obj, key)) def __str__(self): result = [] for key in dir(self): if key.isupper(): - result.append((key, getattr(self,key))) + result.append((key, getattr(self, key))) return str(result) def show(self): @@ -42,7 +40,6 @@ def show(self): print(attr, ":", getattr(self, attr)) - def load_config(config_path=None, myconfig="myconfig.py"): if config_path is None: @@ -58,29 +55,20 @@ def load_config(config_path=None, myconfig="myconfig.py"): cfg = Config() cfg.from_pyfile(config_path) - #look for the optional myconfig.py in the same path. - print("myconfig", myconfig) + # look for the optional myconfig.py in the same path. personal_cfg_path = config_path.replace("config.py", myconfig) if os.path.exists(personal_cfg_path): print("loading personal config over-rides from", myconfig) personal_cfg = Config() personal_cfg.from_pyfile(personal_cfg_path) - #personal_cfg.show() - cfg.from_object(personal_cfg) - #print("final settings:") - #cfg.show() else: print("personal config: file not found ", personal_cfg_path) - - - #derivative settings + + # derived settings if hasattr(cfg, 'IMAGE_H') and hasattr(cfg, 'IMAGE_W'): - cfg.TARGET_H = cfg.IMAGE_H - cfg.ROI_CROP_TOP - cfg.ROI_CROP_BOTTOM + cfg.TARGET_H = cfg.IMAGE_H cfg.TARGET_W = cfg.IMAGE_W cfg.TARGET_D = cfg.IMAGE_DEPTH - print() - - print('config loaded') return cfg diff --git a/donkeycar/management/base.py b/donkeycar/management/base.py index c49bedc25..ede7f4052 100644 --- a/donkeycar/management/base.py +++ b/donkeycar/management/base.py @@ -1,23 +1,23 @@ -import shutil import argparse -import json -import uuid - -from socket import * import os -from threading import Thread +import shutil +import socket +import stat +import sys +from socket import * +from pathlib import Path +from progress.bar import IncrementalBar import donkeycar as dk -from donkeycar.parts.datastore import Tub -from donkeycar.utils import * -from donkeycar.management.tub import TubManager from donkeycar.management.joystick_creator import CreateJoystick -import numpy as np +from donkeycar.management.tub import TubManager +from donkeycar.utils import * PACKAGE_PATH = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) TEMPLATES_PATH = os.path.join(PACKAGE_PATH, 'templates') + def make_dir(path): real_path = os.path.expanduser(path) print('making dir ', real_path) @@ -58,7 +58,6 @@ def parse_args(self, args): parser.add_argument('--path', default=None, help='path where to create car folder') parser.add_argument('--template', default=None, help='name of car template to use') parser.add_argument('--overwrite', action='store_true', help='should replace existing files') - parsed_args = parser.parse_args(args) return parsed_args @@ -66,18 +65,16 @@ def run(self, args): args = self.parse_args(args) self.create_car(path=args.path, template=args.template, overwrite=args.overwrite) - def create_car(self, path, template='complete', overwrite=False): + def create_car(self, path, template='basic', overwrite=False): """ This script sets up the folder structure for donkey to work. It must run without donkey installed so that people installing with docker can build the folder structure for docker to mount to. """ - #these are neeeded incase None is passed as path + # these are neeeded incase None is passed as path path = path or '~/mycar' - template = template or 'complete' - - + template = template or 'basic' print("Creating car folder: {}".format(path)) path = make_dir(path) @@ -87,7 +84,7 @@ def create_car(self, path, template='complete', overwrite=False): for fp in folder_paths: make_dir(fp) - #add car application and config files if they don't exist + # add car application and config files if they don't exist app_template_path = os.path.join(TEMPLATES_PATH, template+'.py') config_template_path = os.path.join(TEMPLATES_PATH, 'cfg_' + template + '.py') myconfig_template_path = os.path.join(TEMPLATES_PATH, 'myconfig.py') @@ -97,36 +94,40 @@ def create_car(self, path, template='complete', overwrite=False): car_config_path = os.path.join(path, 'config.py') mycar_config_path = os.path.join(path, 'myconfig.py') train_app_path = os.path.join(path, 'train.py') - calibrate_app_path = os.path.join(path, 'calibrate.py') + calibrate_app_path = os.path.join(path, 'calibrate.py') if os.path.exists(car_app_path) and not overwrite: print('Car app already exists. Delete it and rerun createcar to replace.') else: print("Copying car application template: {}".format(template)) shutil.copyfile(app_template_path, car_app_path) - + os.chmod(car_app_path, stat.S_IRWXU) + if os.path.exists(car_config_path) and not overwrite: print('Car config already exists. Delete it and rerun createcar to replace.') else: print("Copying car config defaults. Adjust these before starting your car.") shutil.copyfile(config_template_path, car_config_path) - + if os.path.exists(train_app_path) and not overwrite: print('Train already exists. Delete it and rerun createcar to replace.') else: print("Copying train script. Adjust these before starting your car.") shutil.copyfile(train_template_path, train_app_path) - + os.chmod(train_app_path, stat.S_IRWXU) + if os.path.exists(calibrate_app_path) and not overwrite: print('Calibrate already exists. Delete it and rerun createcar to replace.') else: print("Copying calibrate script. Adjust these before starting your car.") shutil.copyfile(calibrate_template_path, calibrate_app_path) + os.chmod(calibrate_app_path, stat.S_IRWXU) if not os.path.exists(mycar_config_path): print("Copying my car config overrides") shutil.copyfile(myconfig_template_path, mycar_config_path) - #now copy file contents from config to myconfig, with all lines commented out. + # now copy file contents from config to myconfig, with all lines + # commented out. cfg = open(car_config_path, "rt") mcfg = open(mycar_config_path, "at") copy = False @@ -162,7 +163,6 @@ class FindCar(BaseCommand): def parse_args(self, args): pass - def run(self, args): print('Looking up your computer IP address...') s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) @@ -175,9 +175,8 @@ def run(self, args): cmd = "sudo nmap -sP " + ip + "/24 | awk '/^Nmap/{ip=$NF}/B8:27:EB/{print ip}'" print("Your car's ip address is:" ) os.system(cmd) - - - + + class CalibrateCar(BaseCommand): def parse_args(self, args): @@ -253,6 +252,7 @@ def parse_args(self, args): parser.add_argument('--start', type=int, default=0, help='first frame to process') parser.add_argument('--end', type=int, default=-1, help='last frame to process') parser.add_argument('--scale', type=int, default=2, help='make image frame output larger by X mult') + parser.add_argument('--draw-user-input', default=True, action='store_false', help='show user input on the video') parsed_args = parser.parse_args(args) return parsed_args, parser @@ -269,147 +269,6 @@ def run(self, args): mm.run(args, parser) -class TubCheck(BaseCommand): - def parse_args(self, args): - parser = argparse.ArgumentParser(prog='tubcheck', usage='%(prog)s [options]') - parser.add_argument('tubs', nargs='+', help='paths to tubs') - parser.add_argument('--fix', action='store_true', help='remove problem records') - parser.add_argument('--delete_empty', action='store_true', help='delete tub dir with no records') - parsed_args = parser.parse_args(args) - return parsed_args - - def check(self, tub_paths, fix=False, delete_empty=False): - ''' - Check for any problems. Looks at tubs and find problems in any records or images that won't open. - If fix is True, then delete images and records that cause problems. - ''' - cfg = load_config('config.py') - tubs = gather_tubs(cfg, tub_paths) - - for tub in tubs: - tub.check(fix=fix) - if delete_empty and tub.get_num_records() == 0: - import shutil - print("removing empty tub", tub.path) - shutil.rmtree(tub.path) - - def run(self, args): - args = self.parse_args(args) - self.check(args.tubs, args.fix, args.delete_empty) - - -class ShowHistogram(BaseCommand): - - def parse_args(self, args): - parser = argparse.ArgumentParser(prog='tubhist', usage='%(prog)s [options]') - parser.add_argument('--tub', nargs='+', help='paths to tubs') - parser.add_argument('--record', default=None, help='name of record to create histogram') - parser.add_argument('--out', default=None, help='path where to save histogram end with .png') - parsed_args = parser.parse_args(args) - return parsed_args - - def show_histogram(self, tub_paths, record_name, out): - ''' - Produce a histogram of record type frequency in the given tub - ''' - from matplotlib import pyplot as plt - from donkeycar.parts.datastore import TubGroup - - output = out or os.path.basename(tub_paths) - tg = TubGroup(tub_paths=tub_paths) - - if record_name is not None: - tg.df[record_name].hist(bins=50) - else: - tg.df.hist(bins=50) - - try: - if out is not None: - filename = output - else: - if record_name is not None: - filename = output + '_hist_%s.png' % record_name.replace('/', '_') - else: - filename = output + '_hist.png' - plt.savefig(filename) - print('saving image to:', filename) - except Exception as e: - print(e) - plt.show() - - def run(self, args): - args = self.parse_args(args) - args.tub = ','.join(args.tub) - self.show_histogram(args.tub, args.record, args.out) - - -class ConSync(BaseCommand): - ''' - continuously rsync data - ''' - - def parse_args(self, args): - parser = argparse.ArgumentParser(prog='consync', usage='%(prog)s [options]') - parser.add_argument('--dir', default='./cont_data/', help='paths to tubs') - parser.add_argument('--delete', default='y', help='remove files locally that were deleted remotely y=yes n=no') - parsed_args = parser.parse_args(args) - return parsed_args - - def run(self, args): - args = self.parse_args(args) - cfg = load_config('config.py') - dest_dir = args.dir - del_arg = "" - - if args.delete == 'y': - reply = input('WARNING:this rsync operation will delete data in the target dir: %s. ok to proceeed? [y/N]: ' % dest_dir) - - if reply != 'y' and reply != "Y": - return - del_arg = "--delete" - - if not dest_dir[-1] == '/' and not dest_dir[-1] == '\\': - print("Desination dir should end with a /") - return - - try: - os.mkdir(dest_dir) - except: - pass - - while True: - command = "rsync -aW --progress %s@%s:%s/data/ %s %s" %\ - (cfg.PI_USERNAME, cfg.PI_HOSTNAME, cfg.PI_DONKEY_ROOT, dest_dir, del_arg) - - os.system(command) - time.sleep(5) - - -class ConTrain(BaseCommand): - ''' - continuously train data - ''' - - def parse_args(self, args): - parser = argparse.ArgumentParser(prog='contrain', usage='%(prog)s [options]') - parser.add_argument('--tub', default='./cont_data/*', help='paths to tubs') - parser.add_argument('--model', default='./models/drive.h5', help='path to model') - parser.add_argument('--transfer', default=None, help='path to transfer model') - parser.add_argument('--type', default='categorical', help='type of model (linear|categorical|rnn|imu|behavior|3d)') - parser.add_argument('--aug', action="store_true", help='perform image augmentation') - parsed_args = parser.parse_args(args) - return parsed_args - - def run(self, args): - args = self.parse_args(args) - cfg = load_config('config.py') - import sys - sys.path.append('.') - from train import multi_train - continuous = True - multi_train(cfg, args.tub, args.model, args.transfer, args.type, continuous, args.aug) - - class ShowCnnActivations(BaseCommand): def __init__(self): @@ -428,7 +287,7 @@ def get_activations(self, image_path, model_path, cfg): image_path = os.path.expanduser(image_path) model = load_model(model_path, compile=False) - image = load_scaled_image_arr(image_path, cfg)[None, ...] + image = load_image(image_path, cfg)[None, ...] conv_layer_names = self.get_conv_layers(model) input_layer = model.get_layer(name='img_in').input @@ -499,21 +358,23 @@ def plot_predictions(self, cfg, tub_paths, model_path, limit, model_type): model_type = cfg.DEFAULT_MODEL_TYPE model.load(model_path) - records = gather_records(cfg, tub_paths) user_angles = [] user_throttles = [] pilot_angles = [] pilot_throttles = [] + from donkeycar.parts.tub_v2 import Tub + from pathlib import Path + + base_path = Path(os.path.expanduser(tub_paths)).absolute().as_posix() + tub = Tub(base_path) + records = list(tub) records = records[:limit] - num_records = len(records) - print('processing %d records:' % num_records) - - for record_path in records: - with open(record_path, 'r') as fp: - record = json.load(fp) - img_filename = os.path.join(tub_paths, record['cam/image_array']) - img = load_scaled_image_arr(img_filename, cfg) + bar = IncrementalBar('Inferencing', max=len(records)) + + for record in records: + img_filename = os.path.join(base_path, Tub.images(), record['cam/image_array']) + img = load_image(img_filename, cfg) user_angle = float(record["user/angle"]) user_throttle = float(record["user/throttle"]) pilot_angle, pilot_throttle = model.run(img) @@ -522,6 +383,7 @@ def plot_predictions(self, cfg, tub_paths, model_path, limit, model_type): user_throttles.append(user_throttle) pilot_angles.append(pilot_angle) pilot_throttles.append(pilot_throttle) + bar.next() angles_df = pd.DataFrame({'user_angle': user_angles, 'pilot_angle': pilot_angles}) throttles_df = pd.DataFrame({'user_throttle': user_throttles, 'pilot_throttle': pilot_throttles}) @@ -546,7 +408,7 @@ def plot_predictions(self, cfg, tub_paths, model_path, limit, model_type): def parse_args(self, args): parser = argparse.ArgumentParser(prog='tubplot', usage='%(prog)s [options]') parser.add_argument('--tub', nargs='+', help='The tub to make plot from') - parser.add_argument('--model', default=None, help='name of record to create histogram') + parser.add_argument('--model', default=None, help='model for predictions') parser.add_argument('--limit', type=int, default=1000, help='how many records to process') parser.add_argument('--type', default=None, help='model type') parser.add_argument('--config', default='./config.py', help='location of config file to use. default: ./config.py') @@ -558,51 +420,36 @@ def run(self, args): args.tub = ','.join(args.tub) cfg = load_config(args.config) self.plot_predictions(cfg, args.tub, args.model, args.limit, args.type) - -class TubAugment(BaseCommand): + +class Train(BaseCommand): + def parse_args(self, args): - parser = argparse.ArgumentParser(prog='tubaugment', - usage='%(prog)s [options]') - parser.add_argument('tubs', nargs='+', help='paths to tubs') - parser.add_argument('--inplace', dest='inplace', action='store_true', - help='If tub should be changed in place or new ' - 'tub will be created') - parser.set_defaults(inplace=False) + parser = argparse.ArgumentParser(prog='train', usage='%(prog)s [options]') + parser.add_argument('--tub', nargs='+', help='tub data for training') + parser.add_argument('--model', default=None, help='output model name') + parser.add_argument('--type', default=None, help='model type') + parser.add_argument('--config', default='./config.py', help='location of config file to use. default: ./config.py') + parser.add_argument('--framework', choices=['tensorflow', 'pytorch', None], required=False, help='the AI framework to use (tensorflow|pytorch). Defaults to config.DEFAULT_AI_FRAMEWORK') + parser.add_argument('--checkpoint', type=str, help='location of checkpoint to resume training from') parsed_args = parser.parse_args(args) return parsed_args - def augment(self, tub_paths, inplace=False): - """ - :param tub_paths: path list to tubs - :param inplace: if tub should be changed or copied - :return: None - """ - cfg = load_config('config.py') - tubs = gather_tubs(cfg, tub_paths) - - for tub in tubs: - if inplace: - tub.augment_images() - else: - tub_path = tub.path - # remove trailing slash if exits - if tub_path[-1] == '/': - tub_path = tub_path[:-1] - # create new tub path by inserting '_aug' after 'tub_XY' - head, tail = os.path.split(tub_path) - tail_list = tail.split('_') - tail_list.insert(2, 'aug') - new_tail = '_'.join(tail_list) - new_path = os.path.join(head, new_tail) - # copy whole tub to new location and run augmentation - shutil.copytree(tub.path, new_path) - new_tub = Tub(new_path) - new_tub.augment_images() - def run(self, args): args = self.parse_args(args) - self.augment(args.tubs, args.inplace) + args.tub = ','.join(args.tub) + cfg = load_config(args.config) + framework = args.framework if args.framework else cfg.DEFAULT_AI_FRAMEWORK + + if framework == 'tensorflow': + from donkeycar.pipeline.training import train + train(cfg, args.tub, args.model, args.type) + elif framework == 'pytorch': + from donkeycar.parts.pytorch.torch_train import train + train(cfg, args.tub, args.model, args.type, + checkpoint_path=args.checkpoint) + else: + print("Unrecognized framework: {}. Please specify one of 'tensorflow' or 'pytorch'".format(framework)) def execute_from_command_line(): @@ -610,21 +457,17 @@ def execute_from_command_line(): This is the function linked to the "donkey" terminal command. """ commands = { - 'createcar': CreateCar, - 'findcar': FindCar, - 'calibrate': CalibrateCar, - 'tubclean': TubManager, - 'tubhist': ShowHistogram, - 'tubplot': ShowPredictionPlots, - 'tubcheck': TubCheck, - 'tubaugment': TubAugment, - 'makemovie': MakeMovieShell, - 'createjs': CreateJoystick, - 'consync': ConSync, - 'contrain': ConTrain, - 'cnnactivations': ShowCnnActivations, - 'update': UpdateCar - } + 'createcar': CreateCar, + 'findcar': FindCar, + 'calibrate': CalibrateCar, + 'tubclean': TubManager, + 'tubplot': ShowPredictionPlots, + 'makemovie': MakeMovieShell, + 'createjs': CreateJoystick, + 'cnnactivations': ShowCnnActivations, + 'update': UpdateCar, + 'train': Train, + } args = sys.argv[:] diff --git a/donkeycar/management/joystick_creator.py b/donkeycar/management/joystick_creator.py index 482c6674e..87963d227 100644 --- a/donkeycar/management/joystick_creator.py +++ b/donkeycar/management/joystick_creator.py @@ -5,7 +5,6 @@ import time import math -from donkeycar.parts.datastore import Tub from donkeycar.utils import * from donkeycar.parts.controller import JoystickCreatorController diff --git a/donkeycar/management/makemovie.py b/donkeycar/management/makemovie.py index 0d20b7a17..3135ee120 100755 --- a/donkeycar/management/makemovie.py +++ b/donkeycar/management/makemovie.py @@ -12,7 +12,7 @@ raise Exception("Please install keras-vis: pip install git+https://github.com/autorope/keras-vis.git") import donkeycar as dk -from donkeycar.parts.datastore import Tub +from donkeycar.parts.tub_v2 import Tub from donkeycar.utils import * @@ -54,26 +54,30 @@ def run(self, args, parser): return self.tub = Tub(args.tub) - self.index = self.tub.get_index(shuffled=False) + start = args.start - self.end = args.end if args.end != -1 else len(self.index) - if self.end >= len(self.index): - self.end = len(self.index) - 1 - num_frames = self.end - start - self.iRec = start + self.end_index = args.end if args.end != -1 else len(self.tub) + num_frames = self.end_index - start + + # Move to the correct offset + self.current = 0 + self.iterator = self.tub.__iter__() + while self.current < start: + self.iterator.next() + self.current += 1 + self.scale = args.scale self.keras_part = None self.do_salient = False + self.user = args.draw_user_input if args.model is not None: self.keras_part = get_model_by_type(args.type, cfg=self.cfg) self.keras_part.load(args.model) - self.keras_part.compile() if args.salient: self.do_salient = self.init_salient(self.keras_part.model) print('making movie', args.out, 'from', num_frames, 'images') - clip = mpy.VideoClip(self.make_frame, - duration=((num_frames - 1) / self.cfg.DRIVE_LOOP_HZ)) + clip = mpy.VideoClip(self.make_frame, duration=((num_frames - 1) / self.cfg.DRIVE_LOOP_HZ)) clip.write_videofile(args.out, fps=self.cfg.DRIVE_LOOP_HZ) def draw_user_input(self, record, img): @@ -101,34 +105,32 @@ def draw_user_input(self, record, img): # user is green, pilot is blue cv2.line(img, p1, p11, (0, 255, 0), 2) - def draw_model_prediction(self, record, img): - ''' + def draw_model_prediction(self, img): + """ query the model for it's prediction, draw the predictions as a blue line on the image - ''' + """ if self.keras_part is None: return import cv2 - - expected = self.keras_part.model.inputs[0].shape[1:] - actual = img.shape - # normalize image before prediction - pred_img = img.astype(np.float32) / 255.0 + expected = tuple(self.keras_part.get_input_shape()[1:]) + actual = img.shape - # check input depth + # if model expects grey-scale but got rgb, covert if expected[2] == 1 and actual[2] == 3: - pred_img = rgb2gray(pred_img) - pred_img = pred_img.reshape(pred_img.shape + (1,)) - actual = pred_img.shape + # normalize image before grey conversion + grey_img = rgb2gray(img) + actual = grey_img.shape + img = grey_img.reshape(grey_img.shape + (1,)) if expected != actual: print("expected input dim", expected, "didn't match actual dim", actual) return - pilot_angle, pilot_throttle = self.keras_part.run(pred_img) - height, width, _ = pred_img.shape + pilot_angle, pilot_throttle = self.keras_part.run(img) + height, width, _ = img.shape length = height a2 = pilot_angle * 45.0 @@ -143,7 +145,7 @@ def draw_model_prediction(self, record, img): # user is green, pilot is blue cv2.line(img, p2, p22, (0, 0, 255), 2) - def draw_steering_distribution(self, record, img): + def draw_steering_distribution(self, img): ''' query the model for it's prediction, draw the distribution of steering choices ''' @@ -154,7 +156,8 @@ def draw_steering_distribution(self, record, img): import cv2 - pred_img = img.reshape((1,) + img.shape) + pred_img = normalize_image(img) + pred_img = pred_img.reshape((1,) + pred_img.shape) angle_binned, _ = self.keras_part.model.predict(pred_img) x = 4 @@ -211,17 +214,16 @@ def draw_salient(self, img): import cv2 alpha = 0.004 beta = 1.0 - alpha - expected = self.keras_part.model.inputs[0].shape[1:] actual = img.shape - pred_img = img.astype(np.float32) / 255.0 - # check input depth + # check input depth and convert to grey to match expected model input if expected[2] == 1 and actual[2] == 3: - pred_img = rgb2gray(pred_img) - pred_img = pred_img.reshape(pred_img.shape + (1,)) + grey_img = rgb2gray(img) + img = grey_img.reshape(grey_img.shape + (1,)) - salient_mask = self.compute_visualisation_mask(pred_img) + norm_img = normalize_image(img) + salient_mask = self.compute_visualisation_mask(norm_img) z = np.zeros_like(salient_mask) salient_mask_stacked = np.dstack((z, z)) salient_mask_stacked = np.dstack((salient_mask_stacked, salient_mask)) @@ -236,25 +238,22 @@ def make_frame(self, t): a frame counter. This assumes sequential access. ''' - if self.iRec >= self.end or self.iRec >= len(self.index): + if self.current >= self.end_index: return None - rec_ix = self.index[self.iRec] - rec = self.tub.get_record(rec_ix) - image = rec['cam/image_array'] - - if self.cfg.ROI_CROP_TOP != 0 or self.cfg.ROI_CROP_BOTTOM != 0: - image = img_crop(image, self.cfg.ROI_CROP_TOP, self.cfg.ROI_CROP_BOTTOM) + rec = self.iterator.next() + img_path = os.path.join(self.tub.images_base_path, rec['cam/image_array']) + image = img_to_arr(Image.open(img_path)) if self.do_salient: image = self.draw_salient(image) image = image * 255 image = image.astype('uint8') - self.draw_user_input(rec, image) + if self.user: self.draw_user_input(rec, image) if self.keras_part is not None: - self.draw_model_prediction(rec, image) - self.draw_steering_distribution(rec, image) + self.draw_model_prediction(image) + self.draw_steering_distribution(image) if self.scale != 1: import cv2 @@ -262,6 +261,6 @@ def make_frame(self, t): dsize = (w * self.scale, h * self.scale) image = cv2.resize(image, dsize=dsize, interpolation=cv2.INTER_CUBIC) - self.iRec += 1 + self.current += 1 # returns a 8-bit RGB array return image diff --git a/donkeycar/management/tub.py b/donkeycar/management/tub.py index be39e3858..022d54bb9 100644 --- a/donkeycar/management/tub.py +++ b/donkeycar/management/tub.py @@ -4,10 +4,16 @@ Manage tubs ''' -import os, sys, time import json +import os +import sys +import time +from pathlib import Path +from stat import S_ISREG, ST_ATIME, ST_CTIME, ST_MODE, ST_MTIME + import tornado.web -from stat import S_ISREG, ST_MTIME, ST_MODE, ST_CTIME, ST_ATIME + +from donkeycar.parts.tub_v2 import Tub class TubManager: @@ -25,8 +31,6 @@ def __init__(self, data_path): this_dir = os.path.dirname(os.path.realpath(__file__)) static_file_path = os.path.join(this_dir, 'tub_web', 'static') - - handlers = [ (r"/", tornado.web.RedirectHandler, dict(url="/tubs")), (r"/tubs", TubsView, dict(data_path=data_path)), @@ -70,47 +74,44 @@ def get(self, tub_id): class TubApi(tornado.web.RequestHandler): def initialize(self, data_path): - self.data_path = data_path - - def image_path(self, tub_path, frame_id): - return os.path.join(tub_path, str(frame_id) + "_cam-image_array_.jpg") - - def record_path(self, tub_path, frame_id): - return os.path.join(tub_path, "record_" + frame_id + ".json") + path = Path(os.path.expanduser(data_path)) + self.data_path = path.absolute() def clips_of_tub(self, tub_path): - seqs = [ int(f.split("_")[0]) for f in os.listdir(tub_path) if f.endswith('.jpg') ] - seqs.sort() - - entries = ((os.stat(self.image_path(tub_path, seq))[ST_ATIME], seq) for seq in seqs) + tub = Tub(tub_path) - (last_ts, seq) = next(entries) - clips = [[seq]] - for next_ts, next_seq in entries: - #if next_ts - last_ts > 100: #greater than 1s apart - # clips.append([next_seq]) - #else: - # clips[-1].append(next_seq) - clips[-1].append(next_seq) - last_ts = next_ts + clips = [] + for record in tub: + index = record['_index'] + images_relative_path = os.path.join(Tub.images(), record['cam/image_array']) + record['cam/image_array'] = images_relative_path + clips.append(record) - return clips + return [clips] def get(self, tub_id): - clips = self.clips_of_tub(os.path.join(self.data_path, tub_id)) - + base_path = os.path.join(self.data_path, tub_id) + clips = self.clips_of_tub(base_path) self.set_header("Content-Type", "application/json; charset=UTF-8") self.write(json.dumps({'clips': clips})) def post(self, tub_id): tub_path = os.path.join(self.data_path, tub_id) + tub = Tub(tub_path) old_clips = self.clips_of_tub(tub_path) new_clips = tornado.escape.json_decode(self.request.body) import itertools old_frames = list(itertools.chain(*old_clips)) + old_indexes = set() + for frame in old_frames: + old_indexes.add(frame['_index']) + new_frames = list(itertools.chain(*new_clips['clips'])) - frames_to_delete = [str(item) for item in old_frames if item not in new_frames] + new_indexes = set() + for frame in new_frames: + new_indexes.add(frame['_index']) + + frames_to_delete = [index for index in old_indexes if index not in new_indexes] for frm in frames_to_delete: - os.remove(self.record_path(tub_path, frm)) - os.remove(self.image_path(tub_path, frm)) + tub.delete_record(frm) diff --git a/donkeycar/management/tub_web/static/tub.js b/donkeycar/management/tub_web/static/tub.js index 94422166e..0b39c417e 100644 --- a/donkeycar/management/tub_web/static/tub.js +++ b/donkeycar/management/tub_web/static/tub.js @@ -50,21 +50,20 @@ $(document).ready(function(){ // UI elements update var updateStreamImg = function() { var curFrame = selectedClip().frames[currentFrameIdx]; - $('#img-preview').attr('src', '/tub_data/' + tubId + '/' + curFrame + '_cam-image_array_.jpg'); - $('#cur-frame').text(curFrame); - $.getJSON('/tub_data/' + tubId + '/' + 'record_' + curFrame + '.json', function(data) { - var angle = data["user/angle"]; - var steeringPercent = Math.round(Math.abs(angle) * 100) + '%'; - var steeringRounded = angle.toFixed(2) - - $('.steering-bar .progress-bar').css('width', '0%').html(''); - if(angle < 0) { - $('#angle-bar-backward').css('width', steeringPercent).html(steeringRounded) - } - if (angle > 0) { - $('#angle-bar-forward').css('width', steeringPercent).html(steeringRounded) - } - }); + var frameIndex = curFrame['_index']; + var imagePath = curFrame['cam/image_array']; + $('#img-preview').attr('src', '/tub_data/' + tubId + '/' + imagePath); + $('#cur-frame').text(frameIndex); + var angle = curFrame["user/angle"]; + var steeringPercent = Math.round(Math.abs(angle) * 100) + '%'; + var steeringRounded = angle.toFixed(2) + $('.steering-bar .progress-bar').css('width', '0%').html(''); + if(angle < 0) { + $('#angle-bar-backward').css('width', steeringPercent).html(steeringRounded) + } + if (angle > 0) { + $('#angle-bar-forward').css('width', steeringPercent).html(steeringRounded) + } }; var updateStreamControls = function() { @@ -105,7 +104,9 @@ $(document).ready(function(){ return Math.round(frames.length/16*i); }) .map(function(frameIdx) { - return ''; + var frame = frames[frameIdx]; + var imagePath = frame['cam/image_array']; + return ''; }) .join(''); diff --git a/donkeycar/parts/__init__.py b/donkeycar/parts/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/donkeycar/parts/augment.py b/donkeycar/parts/augment.py index 8c0b3d784..0800f4e0a 100644 --- a/donkeycar/parts/augment.py +++ b/donkeycar/parts/augment.py @@ -1,117 +1,36 @@ -''' - File: augment.py - Author : Tawn Kramer - Date : July 2017 -''' -import random -from PIL import Image -from PIL import ImageEnhance -import glob +import time import numpy as np -import math - -''' - find_coeffs and persp_transform borrowed from: - https://stackoverflow.com/questions/14177744/how-does-perspective-transformation-work-in-pil -''' -ONE_BY_255 = 1.0 / 255.0 - - -def find_coeffs(pa, pb): - matrix = [] - for p1, p2 in zip(pa, pb): - matrix.append([p1[0], p1[1], 1, 0, 0, 0, -p2[0]*p1[0], -p2[0]*p1[1]]) - matrix.append([0, 0, 0, p1[0], p1[1], 1, -p2[1]*p1[0], -p2[1]*p1[1]]) - - A = np.matrix(matrix, dtype=np.float) - B = np.array(pb).reshape(8) - - res = np.dot(np.linalg.inv(A.T * A) * A.T, B) - return np.array(res).reshape(8) - - -def rand_persp_transform(img): - width, height = img.size - new_width = math.floor(float(width) * random.uniform(0.9, 1.1)) - xshift = math.floor(float(width) * random.uniform(-0.2, 0.2)) - coeffs = find_coeffs( - [(0, 0), (256, 0), (256, 256), (0, 256)], - [(0, 0), (256, 0), (new_width, height), (xshift, height)]) - - return img.transform((width, height), Image.PERSPECTIVE, coeffs, Image.BICUBIC) - - -def augment_image(np_img, shadow_images=None, do_warp_persp=False): - """ - :param np_img: numpy image - input image in numpy normalised format - :param shadow_images: list of 2-tuples of PIL images - shadow vector as prepared by load_shadow_images - :param do_warp_persp: bool - apply warping - :return: numpy image - output image in numpy normalised format - """ - # denormalise image to 8int - conv_img = np_img * 255.0 - conv_img = conv_img.astype(np.uint8) - # convert to PIL and apply transformation - img = Image.fromarray(conv_img) - img = augment_pil_image(img, shadow_images, do_warp_persp) - # transform back to normalised numpy format - img_out = np.array(img).astype(np.float) * ONE_BY_255 - return img_out - - -def augment_pil_image(img, shadow_images=None, do_warp_persp=False): - """ - :param img: PIL image - input image in PIL format - :param do_warp_persp: bool - apply warping - :param shadow_images: list of 2-tuples of PIL images - shadow vector as prepared by load_shadow_images - :return: PIL image - augmented image - """ - # change the coloration, sharpness, and composite a shadow - factor = random.uniform(0.5, 2.0) - img = ImageEnhance.Brightness(img).enhance(factor) - factor = random.uniform(0.5, 1.0) - img = ImageEnhance.Contrast(img).enhance(factor) - factor = random.uniform(0.5, 1.5) - img = ImageEnhance.Sharpness(img).enhance(factor) - factor = random.uniform(0.0, 2.0) - img = ImageEnhance.Color(img).enhance(factor) - # optionally composite a shadow, prepared from load_shadow_images - if shadow_images is not None: - iShad = random.randrange(0, len(shadow_images)) - top, mask = shadow_images[iShad] - theta = random.randrange(-35, 35) - mask.rotate(theta) - top.rotate(theta) - mask = ImageEnhance.Brightness(mask).enhance(random.uniform(0.3, 1.0)) - offset = (random.randrange(-128, 128), random.randrange(-128, 128)) - img.paste(top, offset, mask) - # optionally warp perspective - if do_warp_persp: - img = rand_persp_transform(img) - return img - - -def load_shadow_images(path_mask): - shadow_images = [] - filenames = glob.glob(path_mask) - for filename in filenames: - shadow = Image.open(filename) - shadow.thumbnail((256, 256)) - channels = shadow.split() - if len(channels) != 4: - continue - r, g, b, a = channels - top = Image.merge("RGB", (r, g, b)) - mask = Image.merge("L", (a,)) - shadow_images.append((top, mask)) - return shadow_images - - +from donkeycar.pipeline.augmentations import ImageAugmentation +from donkeycar.config import Config + + +class ImageAugmentationPart: + """ Image augmentation as part for the vehicle""" + def __init__(self, cfg: Config, delay: float = 0.002) -> None: + """ + :param cfg: donkey config + :param delay: time spent in threaded part, if augmentation takes + longer, the last augmented image will be delivered + """ + self.augmenter = ImageAugmentation(cfg) + self.img_arr = None + self.aug_img_arr = None + self.delay = delay + self.on = True + + def run(self, img_arr: np.ndarray) -> np.ndarray: + return self.augmenter.augment(img_arr) + + def update(self) -> None: + while self.on: + if self.img_arr is not None: + self.aug_img_arr = self.augmenter.augment(self.img_arr) + self.img_arr = None + + def run_threaded(self, img_arr: np.ndarray) -> np.ndarray: + self.img_arr = img_arr + time.sleep(self.delay) + return self.aug_img_arr + + def shutdown(self) -> None: + self.on = False diff --git a/donkeycar/parts/camera.py b/donkeycar/parts/camera.py index e1ba0fb05..1c0363130 100644 --- a/donkeycar/parts/camera.py +++ b/donkeycar/parts/camera.py @@ -35,7 +35,6 @@ def __init__(self, image_w=160, image_h=120, image_d=3, framerate=20, vflip=Fals print('PiCamera loaded.. .warming camera') time.sleep(2) - def run(self): f = next(self.stream) frame = f.array @@ -68,6 +67,7 @@ def shutdown(self): self.rawCapture.close() self.camera.close() + class Webcam(BaseCamera): def __init__(self, image_w=160, image_h=120, image_d=3, framerate = 20, iCam = 0): import pygame @@ -193,6 +193,7 @@ def shutdown(self): time.sleep(.5) del(self.camera) + class V4LCamera(BaseCamera): ''' uses the v4l2capture library from this fork for python3 support: https://github.com/atareao/python3-v4l2capture @@ -233,7 +234,6 @@ def init_video(self): # Start the device. This lights the LED if it's a camera that has one. self.video.start() - def update(self): import select from donkeycar.parts.image import JpgToImgArr @@ -247,13 +247,11 @@ def update(self): image_data = self.video.read_and_queue() self.frame = jpg_conv.run(image_data) - def shutdown(self): self.running = False time.sleep(0.5) - class MockCamera(BaseCamera): ''' Fake camera. Returns only a single static frame @@ -270,11 +268,12 @@ def update(self): def shutdown(self): pass + class ImageListCamera(BaseCamera): ''' Use the images from a tub as a fake camera output ''' - def __init__(self, path_mask='~/mycar/data/**/*.jpg'): + def __init__(self, path_mask='~/mycar/data/**/images/*.jpg'): self.image_filenames = glob.glob(os.path.expanduser(path_mask), recursive=True) def get_image_index(fnm): diff --git a/donkeycar/parts/controller.py b/donkeycar/parts/controller.py index 3bd5ba835..e5ef3fa4d 100644 --- a/donkeycar/parts/controller.py +++ b/donkeycar/parts/controller.py @@ -518,13 +518,8 @@ def __init__(self, *args, **kwargs): self.axis_names = { 0x00: 'left_stick_horz', - 0x01: 'left_stick_vert', - 0x02: 'right_stick_horz', - 0x05: 'right_stick_vert', - 0x09: 'right_trigger', - 0x0a: 'left_trigger', - 0x10: 'dpad_horz', - 0x11: 'dpad_vert', + 0x02: 'left_trigger', + 0x05: 'right_trigger' } self.button_names = { @@ -786,8 +781,8 @@ def set_tub(self, tub): def erase_last_N_records(self): if self.tub is not None: try: - self.tub.erase_last_n_records(self.num_records_to_erase) - print('erased last %d records.' % self.num_records_to_erase) + self.tub.delete_last_n_records(self.num_records_to_erase) + print('deleted last %d records.' % self.num_records_to_erase) except: print('failed to erase') @@ -1520,30 +1515,12 @@ def get_js_controller(cfg): return ctr if __name__ == "__main__": - ''' - publish ps3 controller - when running from ubuntu 16.04, it will interfere w mouse until: - xinput set-prop "Sony PLAYSTATION(R)3 Controller" "Device Enabled" 0 - ''' - print("You may need:") - print('xinput set-prop "Sony PLAYSTATION(R)3 Controller" "Device Enabled" 0') - #p = JoyStickPub() - - - #Ps4 pygame controller test. - import donkeycar - v = donkeycar.vehicle.Vehicle() - p = PyGamePS4JoystickController() - v.add(p, inputs=['cam/image_array'], - outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'], - threaded=True) - v.start(max_loop_count = 100) - - ''' - j = PyGamePS4Joystick(which_js=0) - i = 0 - while i < 100: - j.poll() - time.sleep(0.1) - i += 1 - ''' + # Testing the XboxOneJoystickController + js = XboxOneJoystick('/dev/input/js0') + js.init() + + while True: + button, button_state, axis, axis_val = js.poll() + if (button is not None or axis is not None): + print(button, button_state, axis, axis_val) + time.sleep(0.1) diff --git a/donkeycar/parts/datastore.py b/donkeycar/parts/datastore.py index 0011744a6..9c6a50ec5 100644 --- a/donkeycar/parts/datastore.py +++ b/donkeycar/parts/datastore.py @@ -1,25 +1,24 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- + +# Archived, will get removed once all the functionality has been ported over. """ Created on Tue Jul 4 12:32:53 2017 @author: wroscoe """ +import datetime +import glob +import json import os +import random import sys import time -import json -import datetime -import random -import glob + import numpy as np import pandas as pd - from PIL import Image -from donkeycar.parts.augment import augment_pil_image -from donkeycar.utils import arr_to_img - class Tub(object): """ @@ -341,27 +340,6 @@ def include_index(self, index): except: pass - def augment_images(self): - # Get all record's index - index = self.get_index(shuffled=False) - # Go through index - count = 0 - for ix in index: - data = self.get_record(ix) - for key, val in data.items(): - typ = self.get_input_type(key) - if typ == 'image_array': - # here val is an img_arr - img = arr_to_img(val) - # then augment and denormalise - img_aug = augment_pil_image(img) - name = self.make_file_name(key, ext='.jpg', ix=ix) - try: - img_aug.save(os.path.join(self.path, name)) - count += 1 - except IOError as err: - print(err) - print('Augmenting', count, 'images in', self.path) def write_exclude(self): if 0 == len(self.exclude): @@ -372,65 +350,6 @@ def write_exclude(self): with open(self.exclude_path, 'w') as f: json.dump(list(self.exclude), f) - def get_record_gen(self, record_transform=None, shuffle=True, df=None): - - if df is None: - df = self.get_df() - - while True: - for _, row in self.df.iterrows(): - if shuffle: - record_dict = df.sample(n=1).to_dict(orient='record')[0] - else: - record_dict = row - - if record_transform: - record_dict = record_transform(record_dict) - - record_dict = self.read_record(record_dict) - yield record_dict - - def get_batch_gen(self, keys, record_transform=None, batch_size=128, shuffle=True, df=None): - - record_gen = self.get_record_gen(record_transform, shuffle=shuffle, df=df) - - if keys is None: - keys = list(self.df.columns) - - while True: - record_list = [] - for _ in range(batch_size): - record_list.append(next(record_gen)) - - batch_arrays = {} - for i, k in enumerate(keys): - arr = np.array([r[k] for r in record_list]) - batch_arrays[k] = arr - - yield batch_arrays - - def get_train_gen(self, X_keys, Y_keys, batch_size=128, record_transform=None, df=None): - - batch_gen = self.get_batch_gen(X_keys + Y_keys, - batch_size=batch_size, record_transform=record_transform, df=df) - - while True: - batch = next(batch_gen) - X = [batch[k] for k in X_keys] - Y = [batch[k] for k in Y_keys] - yield X, Y - - def get_train_val_gen(self, X_keys, Y_keys, batch_size=128, record_transform=None, train_frac=.8): - train_df = train=self.df.sample(frac=train_frac,random_state=200) - val_df = self.df.drop(train_df.index) - - train_gen = self.get_train_gen(X_keys=X_keys, Y_keys=Y_keys, batch_size=batch_size, - record_transform=record_transform, df=train_df) - - val_gen = self.get_train_gen(X_keys=X_keys, Y_keys=Y_keys, batch_size=batch_size, - record_transform=record_transform, df=val_df) - - return train_gen, val_gen class TubWriter(Tub): diff --git a/donkeycar/parts/datastore_v2.py b/donkeycar/parts/datastore_v2.py new file mode 100644 index 000000000..c851d62cf --- /dev/null +++ b/donkeycar/parts/datastore_v2.py @@ -0,0 +1,382 @@ +import json +import mmap +import os +import time +from pathlib import Path + + +NEWLINE = '\n' +NEWLINE_STRIP = '\r\n' + + +class Seekable(object): + ''' + A seekable file reader, writer which deals with newline delimited records. \n + This reader maintains an index of line lengths, so seeking a line is a O(1) operation. + ''' + + def __init__(self, file, read_only=False, line_lengths=list()): + self.line_lengths = list() + self.cumulative_lengths = list() + self.method = 'r' if read_only else 'a+' + self.file = open(file, self.method, newline=NEWLINE) + if self.method == 'r': + # If file is read only improve performance by memory mappping the file. + self.file = mmap.mmap(self.file.fileno(), length=0, access=mmap.ACCESS_READ) + self.total_length = 0 + if len(line_lengths) <= 0: + self._read_contents() + else: + self.line_lengths.extend(line_lengths) + for line_length in self.line_lengths: + self.total_length += line_length + self.cumulative_lengths.append(self.total_length) + + def _read_contents(self): + self.line_lengths.clear() + self.cumulative_lengths.clear() + self.total_length = 0 + self.file.seek(0) + contents = self.file.readline() + while len(contents) > 0: + line_length = len(contents) + self.line_lengths.append(line_length) + self.total_length += line_length + self.cumulative_lengths.append(self.total_length) + contents = self.file.readline() + self.seek_end_of_file() + + def __enter__(self): + return self + + def writeline(self, contents): + if self.method == 'r': + raise RuntimeError(f'Seekable {self.file} is read-only.') + + has_newline = contents[-1] == NEWLINE + if has_newline: + line = contents + else: + line = f'{contents}{NEWLINE}' + + offset = len(line) + self.total_length += offset + self.line_lengths.append(offset) + self.cumulative_lengths.append(self.total_length) + self.file.write(line) + self.file.flush() + + def _line_start_offset(self, line_number): + return self._offset_until(line_number - 1) + + def _line_end_offset(self, line_number): + return self._offset_until(line_number) + + def _offset_until(self, line_index): + end_index = line_index - 1 + return self.cumulative_lengths[end_index] if end_index >= 0 and end_index < len(self.cumulative_lengths) else 0 + + def readline(self): + contents = self.file.readline() + # When Seekable is a memory mapped file, readline() returns a `bytes` + if isinstance(contents, bytes): + contents = contents.decode(encoding='utf-8') + return contents.rstrip(NEWLINE_STRIP) + + def seek_line_start(self, line_number): + self.file.seek(self._line_start_offset(line_number)) + + def seek_end_of_file(self): + self.file.seek(self.total_length) + + def truncate_until_end(self, line_number): + self.line_lengths = self.line_lengths[:line_number] + self.cumulative_lengths = self.cumulative_lengths[:line_number] + self.total_length = self.cumulative_lengths[-1] if len(self.cumulative_lengths) > 0 else 0 + self.seek_end_of_file() + self.file.truncate() + + def read_from(self, line_number): + current_offset = self.file.tell() + self.seek_line_start(line_number) + lines = list() + contents = self.readline() + while len(contents) > 0: + lines.append(contents) + contents = self.readline() + + self.file.seek(current_offset) + return lines + + def update_line(self, line_number, contents): + lines = self.read_from(line_number) + length = len(lines) + self.truncate_until_end(line_number - 1) + self.writeline(contents) + if length > 1: + for line in lines[1:]: + self.writeline(line) + + def lines(self): + return len(self.line_lengths) + + def has_content(self): + return self.lines() > 0 + + def close(self): + self.file.close() + + def __exit__(self, type, value, traceback): + self.close() + + +class Catalog(object): + ''' + A new line delimited file that has records delimited by newlines. \n + + [ json object record ] \n + [ json object record ] \n + ... + ''' + def __init__(self, path, read_only=False, start_index=0): + self.path = Path(os.path.expanduser(path)) + self.manifest = CatalogMetadata(self.path, read_only=read_only, start_index=start_index) + self.seekable = Seekable(self.path.as_posix(), line_lengths=self.manifest.line_lengths(), read_only=read_only) + + def _exit_handler(self): + self.close() + + def write_record(self, record): + # Add record and update manifest + contents = json.dumps(record, allow_nan=False, sort_keys=True) + self.seekable.writeline(contents) + line_lengths = self.seekable.line_lengths + self.manifest.update_line_lengths(line_lengths) + + def close(self): + self.manifest.close() + self.seekable.close() + + +class CatalogMetadata(object): + ''' + Manifest for a Catalog + ''' + def __init__(self, catalog_path, read_only=False, start_index=0): + path = Path(catalog_path) + manifest_name = '%s.catalog_manifest' % (path.stem) + self.manifest_path = Path(os.path.join(path.parent.as_posix(), manifest_name)) + self.seekeable = Seekable(self.manifest_path, read_only=read_only) + has_contents = False + if os.path.exists(self.manifest_path) and self.seekeable.has_content(): + self.seekeable.seek_line_start(1) + contents = self.seekeable.readline() + if contents: + self.contents = json.loads(contents) + has_contents = True + + if not has_contents: + # New catalog metadata entry + self.contents = dict() + self.contents['path'] = self.manifest_path.name + created_at = time.time() + self.contents['created_at'] = created_at + self.contents['start_index'] = start_index + self.contents['line_lengths'] = list() + self._update() + + def update_line_lengths(self, new_lengths): + self.contents['line_lengths'] = new_lengths + self._update() + + def line_lengths(self): + return self.contents['line_lengths'] + + def start_index(self): + return self.contents['start_index'] + + def _update(self): + contents = json.dumps(self.contents, allow_nan=False, sort_keys=True) + self.seekeable.truncate_until_end(0) + self.seekeable.writeline(contents) + + def close(self): + self.seekeable.close() + + +class Manifest(object): + ''' + A newline delimited file, with the following format. + + [ json array of inputs ]\n + [ json array of types ]\n + [ json object with user metadata ]\n + [ json object with manifest metadata ]\n + [ json object with catalog metadata ]\n + ''' + + def __init__(self, base_path, inputs=[], types=[], metadata=[], + max_len=1000, read_only=False): + self.base_path = Path(os.path.expanduser(base_path)).absolute() + self.manifest_path = Path(os.path.join(self.base_path, 'manifest.json')) + self.inputs = inputs + self.types = types + self._read_metadata(metadata) + self.manifest_metadata = dict() + self.max_len = max_len + self.read_only = read_only + self.current_catalog = None + self.current_index = 0 + self.catalog_paths = list() + self.catalog_metadata = dict() + self.deleted_indexes = set() + has_catalogs = False + + if self.manifest_path.exists(): + self.seekeable = Seekable(self.manifest_path, read_only=self.read_only) + if self.seekeable.has_content(): + self._read_contents() + has_catalogs = len(self.catalog_paths) > 0 + else: + created_at = time.time() + self.manifest_metadata['created_at'] = created_at + if not self.base_path.exists(): + self.base_path.mkdir(parents=True, exist_ok=True) + print('Created a new datastore at %s' % (self.base_path.as_posix())) + self.seekeable = Seekable(self.manifest_path, read_only=self.read_only) + + if not has_catalogs: + self._write_contents() + self._add_catalog() + else: + last_known_catalog = os.path.join(self.base_path, self.catalog_paths[-1]) + print('Using catalog %s' % (last_known_catalog)) + self.current_catalog = Catalog(last_known_catalog, read_only=self.read_only, start_index=self.current_index) + + def write_record(self, record): + new_catalog = self.current_index > 0 and (self.current_index % self.max_len) == 0 + if new_catalog: + self._add_catalog() + + self.current_catalog.write_record(record) + self.current_index += 1 + # Update metadata to keep track of the last index + self._update_catalog_metadata(update=True) + + def delete_record(self, record_index): + # Does not actually delete the record, but marks it as deleted. + self.deleted_indexes.add(record_index) + self._update_catalog_metadata(update=True) + + def _add_catalog(self): + current_length = len(self.catalog_paths) + catalog_name = 'catalog_%s.catalog' % (current_length) + catalog_path = os.path.join(self.base_path, catalog_name) + current_catalog = self.current_catalog + self.current_catalog = Catalog(catalog_path, start_index=self.current_index, read_only=self.read_only) + # Store relative paths + self.catalog_paths.append(catalog_name) + self._update_catalog_metadata(update=True) + if current_catalog: + current_catalog.close() + + def _read_metadata(self, metadata=[]): + self.metadata = dict() + for (key, value) in metadata: + self.metadata[key] = value + + def _read_contents(self): + self.seekeable.seek_line_start(1) + self.inputs = json.loads(self.seekeable.readline()) + self.types = json.loads(self.seekeable.readline()) + self.metadata = json.loads(self.seekeable.readline()) + self.manifest_metadata = json.loads(self.seekeable.readline()) + # Catalog metadata + catalog_metadata = json.loads(self.seekeable.readline()) + self.catalog_paths = catalog_metadata['paths'] + self.current_index = catalog_metadata['current_index'] + self.max_len = catalog_metadata['max_len'] + self.deleted_indexes = set(catalog_metadata['deleted_indexes']) + + def _write_contents(self): + self.seekeable.truncate_until_end(0) + self.seekeable.writeline(json.dumps(self.inputs)) + self.seekeable.writeline(json.dumps(self.types)) + self.seekeable.writeline(json.dumps(self.metadata)) + self.seekeable.writeline(json.dumps(self.manifest_metadata)) + self._update_catalog_metadata(update=False) + + def _update_catalog_metadata(self, update=True): + if update: + self.seekeable.truncate_until_end(4) + # Catalog metadata + catalog_metadata = dict() + catalog_metadata['paths'] = self.catalog_paths + catalog_metadata['current_index'] = self.current_index + catalog_metadata['max_len'] = self.max_len + catalog_metadata['deleted_indexes'] = list(self.deleted_indexes) + self.catalog_metadata = catalog_metadata + self.seekeable.writeline(json.dumps(catalog_metadata)) + + def close(self): + self.current_catalog.close() + self.seekeable.close() + + def __iter__(self): + return ManifestIterator(self) + + def __len__(self): + # current_index is already pointing to the next index + return self.current_index - len(self.deleted_indexes) + + +class ManifestIterator(object): + ''' + An iterator for the Manifest type. \n + + Returns catalog entries lazily when a consumer calls __next__(). + ''' + def __init__(self, manifest): + self.manifest = manifest + self.has_catalogs = len(self.manifest.catalog_paths) > 0 + self.current_index = 0 + self.current_catalog_index = 0 + self.current_catalog = None + + def __next__(self): + if not self.has_catalogs: + raise StopIteration('No catalogs') + + if self.current_catalog_index >= len(self.manifest.catalog_paths): + raise StopIteration('No more catalogs') + + if self.current_catalog is None: + current_catalog_path = os.path.join(self.manifest.base_path, self.manifest.catalog_paths[self.current_catalog_index]) + self.current_catalog = Catalog(current_catalog_path, read_only=self.manifest.read_only) + self.current_catalog.seekable.seek_line_start(1) + + contents = self.current_catalog.seekable.readline() + + if contents is not None and len(contents) > 0: + # Check for current_index when we are ready to advance the underlying iterator. + current_index = self.current_index + self.current_index += 1 + if current_index in self.manifest.deleted_indexes: + # Skip over index, because it has been marked deleted + return self.__next__() + else: + try: + record = json.loads(contents) + return record + except Exception: + print('Ignoring record at index %s' % (current_index)) + return self.__next__() + else: + self.current_catalog = None + self.current_catalog_index += 1 + return self.__next__() + + next = __next__ + + def __len__(self): + return self.manifest.__len__() diff --git a/donkeycar/parts/dgym.py b/donkeycar/parts/dgym.py index e5e387454..e651f3ea6 100644 --- a/donkeycar/parts/dgym.py +++ b/donkeycar/parts/dgym.py @@ -24,7 +24,7 @@ def __init__(self, sim_path, host="127.0.0.1", port=9091, headless=0, env_name=" conf['guid'] = 0 self.env = gym.make(env_name, conf=conf) self.frame = self.env.reset() - self.action = [0.0, 0.0] + self.action = [0.0, 0.0, 0.0] self.running = True self.info = { 'pos' : (0., 0., 0.)} self.delay = float(delay) @@ -33,13 +33,15 @@ def update(self): while self.running: self.frame, _, _, self.info = self.env.step(self.action) - def run_threaded(self, steering, throttle): + def run_threaded(self, steering, throttle, brake=None): if steering is None or throttle is None: steering = 0.0 throttle = 0.0 + if brake is None: + brake = 0.0 if self.delay > 0.0: time.sleep(self.delay / 1000.0) - self.action = [steering, throttle] + self.action = [steering, throttle, brake] return self.frame def shutdown(self): diff --git a/donkeycar/parts/fast_stretch.py b/donkeycar/parts/fast_stretch.py new file mode 100644 index 000000000..2fbc16c25 --- /dev/null +++ b/donkeycar/parts/fast_stretch.py @@ -0,0 +1,97 @@ +import cv2 +import numpy as np +from pathlib import Path +import time + +Mx = 128 # Natural mean +C = 0.007 # Base line fraction +Ts = 0.02 # Tunable amplitude +Tr = 0.7 # Threshold +T = -0.3 # Gamma boost +Epsilon = 1e-07 # Epsilon + + +def fast_stretch(image, debug=False): + hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) + (h, s, v) = cv2.split(hsv) + input = v + shape = input.shape + rows = shape[0] + cols = shape[1] + size = rows * cols + output = np.empty_like(input) + if debug: + start = time.time() + mean = np.mean(input) + t = (mean - Mx) / Mx + Sl = 0. + Sh = 0. + if t <= 0: + Sl = C + Sh = C - (Ts * t) + else: + Sl = C + (Ts * t) + Sh = C + + gamma = 1. + if t <= T: + gamma = max((1 + (t - T)), Tr) + + if debug: + time_taken = (time.time() - start) * 1000 + print('Preprocessing time %s' % time_taken) + start = time.time() + + histogram = cv2.calcHist([input], [0], None, [256], [0, 256]) + # Walk histogram + Xl = 0 + Xh = 255 + targetFl = Sl * size + targetFh = Sh * size + + count = histogram[Xl] + while count < targetFl: + count += histogram[Xl] + Xl += 1 + + count = histogram[Xh] + while count < targetFh: + count += histogram[Xh] + Xh -= 1 + + if debug: + time_taken = (time.time() - start) * 1000 + print('Histogram Binning %s' % time_taken) + start = time.time() + + # Vectorized ops + output = np.where(input <= Xl, 0, input) + output = np.where(output >= Xh, 255, output) + output = np.where(np.logical_and(output > Xl, output < Xh), np.multiply( + 255, np.power(np.divide(np.subtract(output, Xl), np.max([np.subtract(Xh, Xl), Epsilon])), gamma)), output) + # max to 255 + output = np.where(output > 255., 255., output) + output = np.asarray(output, dtype='uint8') + output = cv2.merge((h, s, output)) + output = cv2.cvtColor(output, cv2.COLOR_HSV2RGB) + + if debug: + time_taken = (time.time() - start) * 1000 + print('Vector Ops %s' % time_taken) + start = time.time() + + return output + + +if __name__ == "__main__": + path = Path('images/Lenna.jpg') + image = cv2.imread(path.as_posix()) + # Ensure BGR + bgr = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) + image_data = np.asarray(bgr, dtype=np.uint8) + + stretched = fast_stretch(image_data, debug=True) + cv2.imshow('Original', image) + cv2.imshow('Contrast Stretched', stretched) + cv2.waitKey(0) + cv2.destroyAllWindows() diff --git a/donkeycar/parts/image.py b/donkeycar/parts/image.py index 80fc3d155..942127d55 100644 --- a/donkeycar/parts/image.py +++ b/donkeycar/parts/image.py @@ -1,8 +1,9 @@ -import os -import io + from PIL import Image import numpy as np -from donkeycar.utils import img_to_binary, binary_to_img, arr_to_img, img_to_arr +from donkeycar.utils import img_to_binary, binary_to_img, arr_to_img, \ + img_to_arr, normalize_image + class ImgArrToJpg(): @@ -16,6 +17,7 @@ def run(self, img_arr): except: return None + class JpgToImgArr(): def run(self, jpg): @@ -25,6 +27,7 @@ def run(self, jpg): img_arr = img_to_arr(image) return img_arr + class StereoPair: ''' take two images and put together in a single image @@ -70,13 +73,13 @@ def run(self, img_arr): def shutdown(self): pass - class ImgStack: """ - Stack N previous images into a single N channel image, after converting each to grayscale. - The most recent image is the last channel, and pushes previous images towards the front. + Stack N previous images into a single N channel image, after converting + each to grayscale. The most recent image is the last channel, and pushes + previous images towards the front. """ def __init__(self, num_channels=3): self.img_arr = None @@ -84,7 +87,8 @@ def __init__(self, num_channels=3): def rgb2gray(self, rgb): ''' - take a numpy rgb image return a new single channel image converted to greyscale + take a numpy rgb image return a new single channel image converted to + greyscale ''' return np.dot(rgb[...,:3], [0.299, 0.587, 0.114]) diff --git a/donkeycar/parts/keras.py b/donkeycar/parts/keras.py index 25ae25a5a..d18c71acd 100644 --- a/donkeycar/parts/keras.py +++ b/donkeycar/parts/keras.py @@ -1,65 +1,66 @@ -''' +""" -pilots.py +keras.py -Methods to create, use, save and load pilots. Pilots -contain the highlevel logic used to determine the angle -and throttle of a vehicle. Pilots can include one or more -models to help direct the vehicles motion. +Methods to create, use, save and load pilots. Pilots contain the highlevel +logic used to determine the angle and throttle of a vehicle. Pilots can +include one or more models to help direct the vehicles motion. -''' +""" - - - -import os +from abc import ABC, abstractmethod import numpy as np - -import tensorflow as tf -from tensorflow.python import keras -from tensorflow.python.keras.layers import Input, Dense -from tensorflow.python.keras.models import Model, Sequential -from tensorflow.python.keras.layers import Convolution2D, MaxPooling2D, Reshape, BatchNormalization -from tensorflow.python.keras.layers import Activation, Dropout, Flatten, Cropping2D, Lambda -from tensorflow.python.keras.layers.merge import concatenate -from tensorflow.python.keras.layers import LSTM -from tensorflow.python.keras.layers.wrappers import TimeDistributed as TD -from tensorflow.python.keras.layers import Conv3D, MaxPooling3D, Cropping3D, Conv2DTranspose - +from typing import Dict, Any, Tuple, Optional, Union import donkeycar as dk +from donkeycar.utils import normalize_image, linear_bin +from donkeycar.pipeline.types import TubRecord -if tf.__version__ == '1.13.1': - from tensorflow import ConfigProto, Session - - # Override keras session to work around a bug in TF 1.13.1 - # Remove after we upgrade to TF 1.14 / TF 2.x. - config = ConfigProto() - config.gpu_options.allow_growth = True - session = Session(config=config) - keras.backend.set_session(session) - - -class KerasPilot(object): - ''' - Base class for Keras models that will provide steering and throttle to guide a car. - ''' - def __init__(self): - self.model = None +import tensorflow as tf +from tensorflow import keras +from tensorflow.keras.layers import Input, Dense +from tensorflow.keras.layers import Convolution2D, MaxPooling2D, \ + BatchNormalization +from tensorflow.keras.layers import Activation, Dropout, Flatten +from tensorflow.keras.layers import LSTM +from tensorflow.keras.layers import TimeDistributed as TD +from tensorflow.keras.layers import Conv3D, MaxPooling3D, Conv2DTranspose +from tensorflow.keras.backend import concatenate +from tensorflow.keras.models import Model, Sequential +from tensorflow.python.keras.callbacks import EarlyStopping, ModelCheckpoint +from tensorflow.keras.optimizers import Optimizer + +ONE_BYTE_SCALE = 1.0 / 255.0 + +# type of x +XY = Union[float, np.ndarray, Tuple[float, ...], Tuple[np.ndarray, ...]] + + +class KerasPilot(ABC): + """ + Base class for Keras models that will provide steering and throttle to + guide a car. + """ + def __init__(self) -> None: + self.model: Optional[Model] = None self.optimizer = "adam" - - def load(self, model_path): + print(f'Created {self}') + + def load(self, model_path: str) -> None: self.model = keras.models.load_model(model_path, compile=False) - def load_weights(self, model_path, by_name=True): + def load_weights(self, model_path: str, by_name: bool = True) -> None: + assert self.model, 'Model not set' self.model.load_weights(model_path, by_name=by_name) - def shutdown(self): + def shutdown(self) -> None: pass - def compile(self): + def compile(self) -> None: pass - def set_optimizer(self, optimizer_type, rate, decay): + def set_optimizer(self, optimizer_type: str, + rate: float, decay: float) -> None: + assert self.model, 'Model not set' if optimizer_type == "adam": self.model.optimizer = keras.optimizers.Adam(lr=rate, decay=decay) elif optimizer_type == "sgd": @@ -68,173 +69,317 @@ def set_optimizer(self, optimizer_type, rate, decay): self.model.optimizer = keras.optimizers.RMSprop(lr=rate, decay=decay) else: raise Exception("unknown optimizer type: %s" % optimizer_type) - - def train(self, train_gen, val_gen, - saved_model_path, epochs=100, steps=100, train_split=0.8, - verbose=1, min_delta=.0005, patience=5, use_early_stop=True): - + + def get_input_shape(self) -> tf.TensorShape: + assert self.model, 'Model not set' + return self.model.inputs[0].shape + + def run(self, img_arr: np.ndarray, other_arr: np.ndarray = None) \ + -> Tuple[Union[float, np.ndarray], ...]: """ - train_gen: generator that yields an array of images an array of - + Donkeycar parts interface to run the part in the loop. + + :param img_arr: uint8 [0,255] numpy array with image data + :param other_arr: numpy array of additional data to be used in the + pilot, like IMU array for the IMU model or a + state vector in the Behavioural model + :return: tuple of (angle, throttle) """ + norm_arr = normalize_image(img_arr) + return self.inference(norm_arr, other_arr) - #checkpoint to save model after each epoch - save_best = keras.callbacks.ModelCheckpoint(saved_model_path, - monitor='val_loss', - verbose=verbose, - save_best_only=True, - mode='min') - - #stop training if the validation error stops improving. - early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', - min_delta=min_delta, - patience=patience, - verbose=verbose, - mode='auto') - - callbacks_list = [save_best] + @abstractmethod + def inference(self, img_arr: np.ndarray, other_arr: np.ndarray) \ + -> Tuple[Union[float, np.ndarray], ...]: + """ + Virtual method to be implemented by child classes for inferencing - if use_early_stop: - callbacks_list.append(early_stop) - - hist = self.model.fit_generator( - train_gen, - steps_per_epoch=steps, - epochs=epochs, - verbose=1, - validation_data=val_gen, - callbacks=callbacks_list, - validation_steps=steps*(1.0 - train_split)) - return hist + :param img_arr: float32 [0,1] numpy array with normalized image data + :param other_arr: numpy array of additional data to be used in the + pilot, like IMU array for the IMU model or a + state vector in the Behavioural model + :return: tuple of (angle, throttle) + """ + pass + + def train(self, + model_path: str, + train_data: 'BatchSequence', + train_steps: int, + batch_size: int, + validation_data: 'BatchSequence', + validation_steps: int, + epochs: int, + verbose: int = 1, + min_delta: float = .0005, + patience: int = 5) -> tf.keras.callbacks.History: + """ + trains the model + """ + model = self._get_train_model() + self.compile() + + callbacks = [ + EarlyStopping(monitor='val_loss', + patience=patience, + min_delta=min_delta), + ModelCheckpoint(monitor='val_loss', + filepath=model_path, + save_best_only=True, + verbose=verbose)] + + history: Dict[str, Any] = model.fit( + x=train_data, + steps_per_epoch=train_steps, + batch_size=batch_size, + callbacks=callbacks, + validation_data=validation_data, + validation_steps=validation_steps, + epochs=epochs, + verbose=verbose, + workers=1, + use_multiprocessing=False + ) + return history + + def _get_train_model(self) -> Model: + """ Model used for training, could be just a sub part of the model""" + return self.model + + def x_transform(self, record: TubRecord) -> XY: + img_arr = record.image(cached=True) + return img_arr + + def y_transform(self, record: TubRecord) -> XY: + raise NotImplementedError(f'{self} not ready yet for new training ' + f'pipeline') + + def x_translate(self, x: XY) -> Dict[str, Union[float, np.ndarray]]: + return {'img_in': x} + + def y_translate(self, y: XY) -> Dict[str, Union[float, np.ndarray]]: + raise NotImplementedError(f'{self} not ready yet for new training ' + f'pipeline') + + def output_types(self) -> Dict[str, np.typename]: + raise NotImplementedError(f'{self} not ready yet for new training ' + f'pipeline') + + def output_types(self): + """ Used in tf.data, assume all types are doubles""" + shapes = self.output_shapes() + types = tuple({k: tf.float64 for k in d} for d in shapes) + return types + + def output_shapes(self) -> Optional[Dict[str, tf.TensorShape]]: + return None + + def __str__(self) -> str: + """ For printing model initialisation """ + return type(self).__name__ class KerasCategorical(KerasPilot): - ''' - The KerasCategorical pilot breaks the steering and throttle decisions into discreet - angles and then uses categorical cross entropy to train the network to activate a single - neuron for each steering and throttle choice. This can be interesting because we - get the confidence value as a distribution over all choices. - This uses the dk.utils.linear_bin and dk.utils.linear_unbin to transform continuous - real numbers into a range of discreet values for training and runtime. - The input and output are therefore bounded and must be chosen wisely to match the data. - The default ranges work for the default setup. But cars which go faster may want to - enable a higher throttle range. And cars with larger steering throw may want more bins. - ''' - def __init__(self, input_shape=(120, 160, 3), throttle_range=0.5, roi_crop=(0, 0), *args, **kwargs): - super(KerasCategorical, self).__init__(*args, **kwargs) - self.model = default_categorical(input_shape, roi_crop) - self.compile() + """ + The KerasCategorical pilot breaks the steering and throttle decisions + into discreet angles and then uses categorical cross entropy to train the + network to activate a single neuron for each steering and throttle + choice. This can be interesting because we get the confidence value as a + distribution over all choices. This uses the dk.utils.linear_bin and + dk.utils.linear_unbin to transform continuous real numbers into a range + of discreet values for training and runtime. The input and output are + therefore bounded and must be chosen wisely to match the data. The + default ranges work for the default setup. But cars which go faster may + want to enable a higher throttle range. And cars with larger steering + throw may want more bins. + """ + def __init__(self, input_shape=(120, 160, 3), throttle_range=0.5): + super().__init__() + self.model = default_categorical(input_shape) self.throttle_range = throttle_range def compile(self): - self.model.compile(optimizer=self.optimizer, metrics=['acc'], - loss={'angle_out': 'categorical_crossentropy', - 'throttle_out': 'categorical_crossentropy'}, - loss_weights={'angle_out': 0.5, 'throttle_out': 1.0}) + self.model.compile(optimizer=self.optimizer, metrics=['accuracy'], + loss={'angle_out': 'categorical_crossentropy', + 'throttle_out': 'categorical_crossentropy'}, + loss_weights={'angle_out': 0.5, 'throttle_out': 0.5}) - def run(self, img_arr): + def inference(self, img_arr, other_arr): if img_arr is None: print('no image') return 0.0, 0.0 img_arr = img_arr.reshape((1,) + img_arr.shape) - angle_binned, throttle = self.model.predict(img_arr) - N = len(throttle[0]) - throttle = dk.utils.linear_unbin(throttle, N=N, offset=0.0, R=self.throttle_range) - angle_unbinned = dk.utils.linear_unbin(angle_binned) - return angle_unbinned, throttle - - - + angle_binned, throttle_binned = self.model.predict(img_arr) + N = len(throttle_binned[0]) + throttle = dk.utils.linear_unbin(throttle_binned, N=N, + offset=0.0, R=self.throttle_range) + angle = dk.utils.linear_unbin(angle_binned) + return angle, throttle + + def y_transform(self, record: TubRecord): + angle: float = record.underlying['user/angle'] + throttle: float = record.underlying['user/throttle'] + angle = linear_bin(angle, N=15, offset=1, R=2.0) + throttle = linear_bin(throttle, N=20, offset=0.0, R=self.throttle_range) + return angle, throttle + + def y_translate(self, y: XY) -> Dict[str, Union[float, np.ndarray]]: + if isinstance(y, tuple): + angle, throttle = y + return {'angle_out': angle, 'throttle_out': throttle} + else: + raise TypeError('Expected tuple') + + def output_shapes(self): + # need to cut off None from [None, 120, 160, 3] tensor shape + img_shape = self.get_input_shape()[1:] + shapes = ({'img_in': tf.TensorShape(img_shape)}, + {'angle_out': tf.TensorShape([15]), + 'throttle_out': tf.TensorShape([20])}) + return shapes + + class KerasLinear(KerasPilot): - ''' - The KerasLinear pilot uses one neuron to output a continous value via the - Keras Dense layer with linear activation. One each for steering and throttle. - The output is not bounded. - ''' - def __init__(self, num_outputs=2, input_shape=(120, 160, 3), roi_crop=(0, 0), *args, **kwargs): - super(KerasLinear, self).__init__(*args, **kwargs) - self.model = default_n_linear(num_outputs, input_shape, roi_crop) - self.compile() + """ + The KerasLinear pilot uses one neuron to output a continous value via the + Keras Dense layer with linear activation. One each for steering and + throttle. The output is not bounded. + """ + def __init__(self, num_outputs=2, input_shape=(120, 160, 3)): + super().__init__() + self.model = default_n_linear(num_outputs, input_shape) def compile(self): - self.model.compile(optimizer=self.optimizer, - loss='mse') + self.model.compile(optimizer=self.optimizer, loss='mse') - def run(self, img_arr): + def inference(self, img_arr, other_arr): img_arr = img_arr.reshape((1,) + img_arr.shape) outputs = self.model.predict(img_arr) steering = outputs[0] throttle = outputs[1] return steering[0][0], throttle[0][0] + def y_transform(self, record: TubRecord): + angle: float = record.underlying['user/angle'] + throttle: float = record.underlying['user/throttle'] + return angle, throttle + + def y_translate(self, y: XY) -> Dict[str, Union[float, np.ndarray]]: + if isinstance(y, tuple): + angle, throttle = y + return {'n_outputs0': angle, 'n_outputs1': throttle} + else: + raise TypeError('Expected tuple') + + def output_shapes(self): + # need to cut off None from [None, 120, 160, 3] tensor shape + img_shape = self.get_input_shape()[1:] + shapes = ({'img_in': tf.TensorShape(img_shape)}, + {'n_outputs0': tf.TensorShape([]), + 'n_outputs1': tf.TensorShape([])}) + return shapes + + +class KerasInferred(KerasPilot): + def __init__(self, num_outputs=1, input_shape=(120, 160, 3)): + super().__init__() + self.model = default_n_linear(num_outputs, input_shape) + + def compile(self): + self.model.compile(optimizer=self.optimizer, loss='mse') + + def inference(self, img_arr, other_arr): + img_arr = img_arr.reshape((1,) + img_arr.shape) + outputs = self.model.predict(img_arr) + steering = outputs[0] + return steering[0], dk.utils.throttle(steering[0]) + + def y_transform(self, record: TubRecord): + angle: float = record.underlying['user/angle'] + return angle + + def y_translate(self, y: XY) -> Dict[str, Union[float, np.ndarray]]: + return {'n_outputs0': y} + + def output_shapes(self): + # need to cut off None from [None, 120, 160, 3] tensor shape + img_shape = self.get_input_shape()[1:] + shapes = ({'img_in': tf.TensorShape(img_shape)}, + {'n_outputs0': tf.TensorShape([])}) + return shapes class KerasIMU(KerasPilot): - ''' + """ A Keras part that take an image and IMU vector as input, outputs steering and throttle Note: When training, you will need to vectorize the input from the IMU. - Depending on the names you use for imu records, something like this will work: + Depending on the names you use for imu records, something like this will + work: X_keys = ['cam/image_array','imu_array'] y_keys = ['user/angle', 'user/throttle'] - + def rt(rec): - rec['imu_array'] = np.array([ rec['imu/acl_x'], rec['imu/acl_y'], rec['imu/acl_z'], + rec['imu_array'] = np.array([ rec['imu/acl_x'], rec['imu/acl_y'], + rec['imu/acl_z'], rec['imu/gyr_x'], rec['imu/gyr_y'], rec['imu/gyr_z'] ]) return rec kl = KerasIMU() tubgroup = TubGroup(tub_names) - train_gen, val_gen = tubgroup.get_train_val_gen(X_keys, y_keys, record_transform=rt, + train_gen, val_gen = tubgroup.get_train_val_gen(X_keys, y_keys, + record_transform=rt, batch_size=cfg.BATCH_SIZE, train_frac=cfg.TRAIN_TEST_SPLIT) - ''' - def __init__(self, model=None, num_outputs=2, num_imu_inputs=6, input_shape=(120, 160, 3), roi_crop=(0,0), *args, **kwargs): - super(KerasIMU, self).__init__(*args, **kwargs) + """ + def __init__(self, num_outputs=2, num_imu_inputs=6, input_shape=(120, 160, 3)): + super().__init__() self.num_imu_inputs = num_imu_inputs - self.model = default_imu(num_outputs = num_outputs, num_imu_inputs = num_imu_inputs, input_shape=input_shape, roi_crop=roi_crop) - self.compile() + self.model = default_imu(num_outputs=num_outputs, + num_imu_inputs=num_imu_inputs, + input_shape=input_shape) def compile(self): - self.model.compile(optimizer=self.optimizer, - loss='mse') + self.model.compile(optimizer=self.optimizer, loss='mse') - def run(self, img_arr, accel_x, accel_y, accel_z, gyr_x, gyr_y, gyr_z): - #TODO: would be nice to take a vector input array. + def inference(self, img_arr, other_arr): img_arr = img_arr.reshape((1,) + img_arr.shape) - imu_arr = np.array([accel_x, accel_y, accel_z, gyr_x, gyr_y, gyr_z]).reshape(1,self.num_imu_inputs) + imu_arr = np.array(other_arr).reshape(1, self.num_imu_inputs) outputs = self.model.predict([img_arr, imu_arr]) steering = outputs[0] throttle = outputs[1] return steering[0][0], throttle[0][0] + def y_transform(self, record: TubRecord): + angle: float = record.underlying['user/angle'] + throttle: float = record.underlying['user/throttle'] + return {'n_out0': angle, 'n_out1': throttle} + class KerasBehavioral(KerasPilot): - ''' + """ A Keras part that take an image and Behavior vector as input, outputs steering and throttle - ''' - def __init__(self, model=None, num_outputs=2, num_behavior_inputs=2, input_shape=(120, 160, 3), *args, **kwargs): - super(KerasBehavioral, self).__init__(*args, **kwargs) - self.model = default_bhv(num_outputs = num_outputs, num_bvh_inputs = num_behavior_inputs, input_shape=input_shape) - self.compile() + """ + def __init__(self, num_behavior_inputs=2, input_shape=(120, 160, 3)): + super(KerasBehavioral, self).__init__() + self.model = default_bhv(num_bvh_inputs=num_behavior_inputs, + input_shape=input_shape) def compile(self): - self.model.compile(optimizer=self.optimizer, - loss='mse') + self.model.compile(optimizer=self.optimizer, loss='mse') - def run(self, img_arr, state_array): + def inference(self, img_arr, state_array): img_arr = img_arr.reshape((1,) + img_arr.shape) - bhv_arr = np.array(state_array).reshape(1,len(state_array)) + bhv_arr = np.array(state_array).reshape(1, len(state_array)) angle_binned, throttle = self.model.predict([img_arr, bhv_arr]) - #in order to support older models with linear throttle, - #we will test for shape of throttle to see if it's the newer - #binned version. + # In order to support older models with linear throttle,we will test for + # shape of throttle to see if it's the newer binned version. N = len(throttle[0]) if N > 0: @@ -244,129 +389,121 @@ def run(self, img_arr, state_array): angle_unbinned = dk.utils.linear_unbin(angle_binned) return angle_unbinned, throttle + def y_transform(self, record: TubRecord): + angle: float = record.underlying['user/angle'] + throttle: float = record.underlying['user/throttle'] + angle = linear_bin(angle, N=15, offset=1, R=2.0) + throttle = linear_bin(throttle, N=20, offset=0.0, R=0.5) + return {'angle_out': angle, 'throttle_out': throttle} + class KerasLocalizer(KerasPilot): - ''' + """ A Keras part that take an image as input, outputs steering and throttle, and localisation category - ''' - def __init__(self, model=None, num_locations=8, input_shape=(120, 160, 3), *args, **kwargs): - super(KerasLocalizer, self).__init__(*args, **kwargs) - self.model = default_loc(num_locations=num_locations, input_shape=input_shape) - self.compile() + """ + def __init__(self, num_locations=8, input_shape=(120, 160, 3)): + super().__init__() + self.model = default_loc(num_locations=num_locations, + input_shape=input_shape) def compile(self): self.model.compile(optimizer=self.optimizer, metrics=['acc'], - loss='mse') + loss='mse') - def run(self, img_arr): + def inference(self, img_arr, other_arr): img_arr = img_arr.reshape((1,) + img_arr.shape) angle, throttle, track_loc = self.model.predict([img_arr]) loc = np.argmax(track_loc[0]) - return angle, throttle, loc -def adjust_input_shape(input_shape, roi_crop): - height = input_shape[0] - new_height = height - roi_crop[0] - roi_crop[1] - return (new_height, input_shape[1], input_shape[2]) - - -def default_categorical(input_shape=(120, 160, 3), roi_crop=(0, 0)): - - opt = keras.optimizers.Adam() - drop = 0.2 - - #we now expect that cropping done elsewhere. we will adjust our expeected image size here: - input_shape = adjust_input_shape(input_shape, roi_crop) - - img_in = Input(shape=input_shape, name='img_in') # First layer, input layer, Shape comes from camera.py resolution, RGB - x = img_in - x = Convolution2D(24, (5,5), strides=(2,2), activation='relu', name="conv2d_1")(x) # 24 features, 5 pixel x 5 pixel kernel (convolution, feauture) window, 2wx2h stride, relu activation - x = Dropout(drop)(x) # Randomly drop out (turn off) 10% of the neurons (Prevent overfitting) - x = Convolution2D(32, (5,5), strides=(2,2), activation='relu', name="conv2d_2")(x) # 32 features, 5px5p kernel window, 2wx2h stride, relu activatiion - x = Dropout(drop)(x) # Randomly drop out (turn off) 10% of the neurons (Prevent overfitting) - if input_shape[0] > 32 : - x = Convolution2D(64, (5,5), strides=(2,2), activation='relu', name="conv2d_3")(x) # 64 features, 5px5p kernal window, 2wx2h stride, relu - else: - x = Convolution2D(64, (3,3), strides=(1,1), activation='relu', name="conv2d_3")(x) # 64 features, 5px5p kernal window, 2wx2h stride, relu - if input_shape[0] > 64 : - x = Convolution2D(64, (3,3), strides=(2,2), activation='relu', name="conv2d_4")(x) # 64 features, 3px3p kernal window, 2wx2h stride, relu - elif input_shape[0] > 32 : - x = Convolution2D(64, (3,3), strides=(1,1), activation='relu', name="conv2d_4")(x) # 64 features, 3px3p kernal window, 2wx2h stride, relu - x = Dropout(drop)(x) # Randomly drop out (turn off) 10% of the neurons (Prevent overfitting) - x = Convolution2D(64, (3,3), strides=(1,1), activation='relu', name="conv2d_5")(x) # 64 features, 3px3p kernal window, 1wx1h stride, relu - x = Dropout(drop)(x) # Randomly drop out (turn off) 10% of the neurons (Prevent overfitting) - # Possibly add MaxPooling (will make it less sensitive to position in image). Camera angle fixed, so may not to be needed - - x = Flatten(name='flattened')(x) # Flatten to 1D (Fully connected) - x = Dense(100, activation='relu', name="fc_1")(x) # Classify the data into 100 features, make all negatives 0 - x = Dropout(drop)(x) # Randomly drop out (turn off) 10% of the neurons (Prevent overfitting) - x = Dense(50, activation='relu', name="fc_2")(x) # Classify the data into 50 features, make all negatives 0 - x = Dropout(drop)(x) # Randomly drop out 10% of the neurons (Prevent overfitting) - #categorical output of the angle - angle_out = Dense(15, activation='softmax', name='angle_out')(x) # Connect every input with every output and output 15 hidden units. Use Softmax to give percentage. 15 categories and find best one based off percentage 0.0-1.0 - - #continous output of throttle - throttle_out = Dense(20, activation='softmax', name='throttle_out')(x) # Reduce to 1 number, Positive number only - - model = Model(inputs=[img_in], outputs=[angle_out, throttle_out]) - return model - - - -def default_n_linear(num_outputs, input_shape=(120, 160, 3), roi_crop=(0, 0)): - drop = 0.1 - #we now expect that cropping done elsewhere. we will adjust our expeected image size here: - input_shape = adjust_input_shape(input_shape, roi_crop) - - img_in = Input(shape=input_shape, name='img_in') +def conv2d(filters, kernel, strides, layer_num, activation='relu'): + """ + Helper function to create a standard valid-padded convolutional layer + with square kernel and strides and unified naming convention + + :param filters: channel dimension of the layer + :param kernel: creates (kernel, kernel) kernel matrix dimension + :param strides: creates (strides, strides) stride + :param layer_num: used in labelling the layer + :param activation: activation, defaults to relu + :return: tf.keras Convolution2D layer + """ + return Convolution2D(filters=filters, + kernel_size=(kernel, kernel), + strides=(strides, strides), + activation=activation, + name='conv2d_' + str(layer_num)) + + +def core_cnn_layers(img_in, drop, l4_stride=1): + """ + Returns the core CNN layers that are shared among the different models, + like linear, imu, behavioural + + :param img_in: input layer of network + :param drop: dropout rate + :param l4_stride: 4-th layer stride, default 1 + :return: stack of CNN layers + """ x = img_in - x = Convolution2D(24, (5,5), strides=(2,2), activation='relu', name="conv2d_1")(x) + x = conv2d(24, 5, 2, 1)(x) x = Dropout(drop)(x) - x = Convolution2D(32, (5,5), strides=(2,2), activation='relu', name="conv2d_2")(x) + x = conv2d(32, 5, 2, 2)(x) x = Dropout(drop)(x) - x = Convolution2D(64, (5,5), strides=(2,2), activation='relu', name="conv2d_3")(x) + x = conv2d(64, 5, 2, 3)(x) x = Dropout(drop)(x) - x = Convolution2D(64, (3,3), strides=(1,1), activation='relu', name="conv2d_4")(x) + x = conv2d(64, 3, l4_stride, 4)(x) x = Dropout(drop)(x) - x = Convolution2D(64, (3,3), strides=(1,1), activation='relu', name="conv2d_5")(x) + x = conv2d(64, 3, 1, 5)(x) x = Dropout(drop)(x) - x = Flatten(name='flattened')(x) - x = Dense(100, activation='relu')(x) + return x + + +def default_n_linear(num_outputs, input_shape=(120, 160, 3)): + drop = 0.2 + img_in = Input(shape=input_shape, name='img_in') + x = core_cnn_layers(img_in, drop) + x = Dense(100, activation='relu', name='dense_1')(x) x = Dropout(drop)(x) - x = Dense(50, activation='relu')(x) + x = Dense(50, activation='relu', name='dense_2')(x) x = Dropout(drop)(x) outputs = [] - for i in range(num_outputs): - outputs.append(Dense(1, activation='linear', name='n_outputs' + str(i))(x)) - + outputs.append( + Dense(1, activation='linear', name='n_outputs' + str(i))(x)) + model = Model(inputs=[img_in], outputs=outputs) - return model +def default_categorical(input_shape=(120, 160, 3)): + drop = 0.2 + img_in = Input(shape=input_shape, name='img_in') + x = core_cnn_layers(img_in, drop, l4_stride=2) + x = Dense(100, activation='relu', name="dense_1")(x) + x = Dropout(drop)(x) + x = Dense(50, activation='relu', name="dense_2")(x) + x = Dropout(drop)(x) + # Categorical output of the angle into 15 bins + angle_out = Dense(15, activation='softmax', name='angle_out')(x) + # categorical output of throttle into 20 bins + throttle_out = Dense(20, activation='softmax', name='throttle_out')(x) -def default_imu(num_outputs, num_imu_inputs, input_shape, roi_crop=(0, 0)): + model = Model(inputs=[img_in], outputs=[angle_out, throttle_out]) + return model - #we now expect that cropping done elsewhere. we will adjust our expeected image size here: - input_shape = adjust_input_shape(input_shape, roi_crop) +def default_imu(num_outputs, num_imu_inputs, input_shape): + drop = 0.2 img_in = Input(shape=input_shape, name='img_in') imu_in = Input(shape=(num_imu_inputs,), name="imu_in") - - x = img_in - x = Convolution2D(24, (5,5), strides=(2,2), activation='relu')(x) - x = Convolution2D(32, (5,5), strides=(2,2), activation='relu')(x) - x = Convolution2D(64, (3,3), strides=(2,2), activation='relu')(x) - x = Convolution2D(64, (3,3), strides=(1,1), activation='relu')(x) - x = Convolution2D(64, (3,3), strides=(1,1), activation='relu')(x) - x = Flatten(name='flattened')(x) + + x = core_cnn_layers(img_in, drop) x = Dense(100, activation='relu')(x) x = Dropout(.1)(x) @@ -381,32 +518,20 @@ def default_imu(num_outputs, num_imu_inputs, input_shape, roi_crop=(0, 0)): z = Dense(50, activation='relu')(z) z = Dropout(.1)(z) - outputs = [] - + outputs = [] for i in range(num_outputs): outputs.append(Dense(1, activation='linear', name='out_' + str(i))(z)) model = Model(inputs=[img_in, imu_in], outputs=outputs) - return model -def default_bhv(num_outputs, num_bvh_inputs, input_shape): - ''' - Notes: this model depends on concatenate which failed on keras < 2.0.8 - ''' - +def default_bhv(num_bvh_inputs, input_shape): + drop = 0.2 img_in = Input(shape=input_shape, name='img_in') bvh_in = Input(shape=(num_bvh_inputs,), name="behavior_in") - - x = img_in - #x = Cropping2D(cropping=((60,0), (0,0)))(x) #trim 60 pixels off top - x = Convolution2D(24, (5,5), strides=(2,2), activation='relu')(x) - x = Convolution2D(32, (5,5), strides=(2,2), activation='relu')(x) - x = Convolution2D(64, (5,5), strides=(2,2), activation='relu')(x) - x = Convolution2D(64, (3,3), strides=(1,1), activation='relu')(x) - x = Convolution2D(64, (3,3), strides=(1,1), activation='relu')(x) - x = Flatten(name='flattened')(x) + + x = core_cnn_layers(img_in, drop) x = Dense(100, activation='relu')(x) x = Dropout(.1)(x) @@ -421,77 +546,53 @@ def default_bhv(num_outputs, num_bvh_inputs, input_shape): z = Dense(50, activation='relu')(z) z = Dropout(.1)(z) - #categorical output of the angle - angle_out = Dense(15, activation='softmax', name='angle_out')(z) # Connect every input with every output and output 15 hidden units. Use Softmax to give percentage. 15 categories and find best one based off percentage 0.0-1.0 - - #continous output of throttle - throttle_out = Dense(20, activation='softmax', name='throttle_out')(z) # Reduce to 1 number, Positive number only + # Categorical output of the angle into 15 bins + angle_out = Dense(15, activation='softmax', name='angle_out')(z) + # Categorical output of throttle into 20 bins + throttle_out = Dense(20, activation='softmax', name='throttle_out')(z) model = Model(inputs=[img_in, bvh_in], outputs=[angle_out, throttle_out]) - return model def default_loc(num_locations, input_shape): drop = 0.2 - img_in = Input(shape=input_shape, name='img_in') - - x = img_in - x = Convolution2D(24, (5,5), strides=(2,2), activation='relu', name="conv2d_1")(x) - x = Dropout(drop)(x) - x = Convolution2D(32, (5,5), strides=(2,2), activation='relu', name="conv2d_2")(x) - x = Dropout(drop)(x) - x = Convolution2D(64, (5,5), strides=(2,2), activation='relu', name="conv2d_3")(x) - x = Dropout(drop)(x) - x = Convolution2D(64, (3,3), strides=(2,2), activation='relu', name="conv2d_4")(x) - x = Dropout(drop)(x) - x = Convolution2D(64, (3,3), strides=(1,1), activation='relu', name="conv2d_5")(x) - x = Dropout(drop)(x) - x = Flatten(name='flattened')(x) + + x = core_cnn_layers(img_in, drop) x = Dense(100, activation='relu')(x) x = Dropout(drop)(x) z = Dense(50, activation='relu')(x) z = Dropout(drop)(z) - - - #linear output of the angle + + # linear output of the angle angle_out = Dense(1, activation='linear', name='angle')(z) - - #linear output of throttle + # linear output of throttle throttle_out = Dense(1, activation='linear', name='throttle')(z) - - #categorical output of location + # categorical output of location loc_out = Dense(num_locations, activation='softmax', name='loc')(z) model = Model(inputs=[img_in], outputs=[angle_out, throttle_out, loc_out]) - return model class KerasRNN_LSTM(KerasPilot): - def __init__(self, image_w =160, image_h=120, image_d=3, seq_length=3, roi_crop=(0,0), num_outputs=2, *args, **kwargs): - super(KerasRNN_LSTM, self).__init__(*args, **kwargs) - input_shape = (image_h, image_w, image_d) + def __init__(self, input_shape=(120, 160, 3), seq_length=3, num_outputs=2): + super().__init__() + self.input_shape = input_shape self.model = rnn_lstm(seq_length=seq_length, - num_outputs=num_outputs, - input_shape=input_shape, - roi_crop=roi_crop) + num_outputs=num_outputs, + input_shape=input_shape) self.seq_length = seq_length - self.image_d = image_d - self.image_w = image_w - self.image_h = image_h self.img_seq = [] - self.compile() self.optimizer = "rmsprop" def compile(self): - self.model.compile(optimizer=self.optimizer, - loss='mse') + self.model.compile(optimizer=self.optimizer, loss='mse') - def run(self, img_arr): - if img_arr.shape[2] == 3 and self.image_d == 1: + def inference(self, img_arr, other_arr): + if img_arr.shape[2] == 3 and self.input_shape[2] == 1: img_arr = dk.utils.rgb2gray(img_arr) while len(self.img_seq) < self.seq_length: @@ -500,30 +601,30 @@ def run(self, img_arr): self.img_seq = self.img_seq[1:] self.img_seq.append(img_arr) - img_arr = np.array(self.img_seq).reshape(1, self.seq_length, self.image_h, self.image_w, self.image_d ) + img_arr = np.array(self.img_seq).reshape((1, self.seq_length, + *self.input_shape)) outputs = self.model.predict([img_arr]) steering = outputs[0][0] throttle = outputs[0][1] return steering, throttle - -def rnn_lstm(seq_length=3, num_outputs=2, input_shape=(120,160,3), roi_crop=(0, 0)): - - #we now expect that cropping done elsewhere. we will adjust our expeected image size here: - input_shape = adjust_input_shape(input_shape, roi_crop) +def rnn_lstm(seq_length=3, num_outputs=2, input_shape=(120, 160, 3)): + # add sequence length dimensions as keras time-distributed expects shape + # of (num_samples, seq_length, input_shape) img_seq_shape = (seq_length,) + input_shape - img_in = Input(batch_shape = img_seq_shape, name='img_in') + img_in = Input(batch_shape=img_seq_shape, name='img_in') drop_out = 0.3 x = Sequential() - x.add(TD(Convolution2D(24, (5,5), strides=(2,2), activation='relu'), input_shape=img_seq_shape)) + x.add(TD(Convolution2D(24, (5,5), strides=(2,2), activation='relu'), + input_shape=img_seq_shape)) x.add(TD(Dropout(drop_out))) - x.add(TD(Convolution2D(32, (5,5), strides=(2,2), activation='relu'))) + x.add(TD(Convolution2D(32, (5, 5), strides=(2, 2), activation='relu'))) x.add(TD(Dropout(drop_out))) - x.add(TD(Convolution2D(32, (3,3), strides=(2,2), activation='relu'))) + x.add(TD(Convolution2D(32, (3, 3), strides=(2, 2), activation='relu'))) x.add(TD(Dropout(drop_out))) - x.add(TD(Convolution2D(32, (3,3), strides=(1,1), activation='relu'))) + x.add(TD(Convolution2D(32, (3, 3), strides=(1, 1), activation='relu'))) x.add(TD(Dropout(drop_out))) x.add(TD(MaxPooling2D(pool_size=(2, 2)))) x.add(TD(Flatten(name='flattened'))) @@ -539,35 +640,26 @@ def rnn_lstm(seq_length=3, num_outputs=2, input_shape=(120,160,3), roi_crop=(0, x.add(Dense(64, activation='relu')) x.add(Dense(10, activation='relu')) x.add(Dense(num_outputs, activation='linear', name='model_outputs')) - return x class Keras3D_CNN(KerasPilot): - def __init__(self, image_w =160, image_h=120, image_d=3, seq_length=20, num_outputs=2, roi_crop=(0, 0), *args, **kwargs): - super(Keras3D_CNN, self).__init__(*args, **kwargs) - - #we now expect that cropping done elsewhere. we will adjust our expeected image size here: - input_shape = adjust_input_shape((image_h, image_w, image_d), roi_crop) - image_h = input_shape[0] - image_w = input_shape[1] - image_d = input_shape[2] - - - self.model = build_3d_cnn(w=image_w, h=image_h, d=image_d, s=seq_length, num_outputs=num_outputs) + def __init__(self, input_shape=(120, 160, 3), seq_length=20, num_outputs=2): + super().__init__() + self.input_shape = input_shape + self.model = build_3d_cnn(input_shape, s=seq_length, + num_outputs=num_outputs) self.seq_length = seq_length - self.image_d = image_d - self.image_w = image_w - self.image_h = image_h self.img_seq = [] - self.compile() def compile(self): - self.model.compile(loss='mean_squared_error', optimizer=self.optimizer, metrics=['accuracy']) + self.model.compile(loss='mean_squared_error', + optimizer=self.optimizer, + metrics=['accuracy']) - def run(self, img_arr): + def inference(self, img_arr, other_arr): - if img_arr.shape[2] == 3 and self.image_d == 1: + if img_arr.shape[2] == 3 and self.input_shape[2] == 1: img_arr = dk.utils.rgb2gray(img_arr) while len(self.img_seq) < self.seq_length: @@ -576,62 +668,65 @@ def run(self, img_arr): self.img_seq = self.img_seq[1:] self.img_seq.append(img_arr) - img_arr = np.array(self.img_seq).reshape(1, self.seq_length, self.image_h, self.image_w, self.image_d ) + img_arr = np.array(self.img_seq).reshape((1, self.seq_length, + *self.input_shape)) outputs = self.model.predict([img_arr]) steering = outputs[0][0] throttle = outputs[0][1] return steering, throttle -def build_3d_cnn(w, h, d, s, num_outputs): - #Credit: https://github.com/jessecha/DNRacing/blob/master/3D_CNN_Model/model.py - ''' - w : width - h : height - d : depth - s : n_stacked - ''' - input_shape=(s, h, w, d) +def build_3d_cnn(input_shape, s, num_outputs): + """ + Credit: https://github.com/jessecha/DNRacing/blob/master/3D_CNN_Model/model.py + :param input_shape: image input shape + :param s: sequence length + :param num_outputs: output dimension + :return: + """ + input_shape = (s, ) + input_shape model = Sequential() - #First layer - #model.add(Cropping3D(cropping=((0,0), (50,10), (0,0)), input_shape=input_shape) ) #trim pixels off top - + # Second layer model.add(Conv3D( - filters=16, kernel_size=(3,3,3), strides=(1,3,3), + filters=16, kernel_size=(3, 3, 3), strides=(1, 3, 3), data_format='channels_last', padding='same', input_shape=input_shape) ) model.add(Activation('relu')) model.add(MaxPooling3D( - pool_size=(1,2,2), strides=(1,2,2), padding='valid', data_format=None) + pool_size=(1, 2, 2), strides=(1, 2, 2), padding='valid', + data_format=None) ) # Third layer model.add(Conv3D( - filters=32, kernel_size=(3,3,3), strides=(1,1,1), + filters=32, kernel_size=(3, 3, 3), strides=(1, 1, 1), data_format='channels_last', padding='same') ) model.add(Activation('relu')) model.add(MaxPooling3D( - pool_size=(1, 2, 2), strides=(1,2,2), padding='valid', data_format=None) + pool_size=(1, 2, 2), strides=(1, 2, 2), padding='valid', + data_format=None) ) # Fourth layer model.add(Conv3D( - filters=64, kernel_size=(3,3,3), strides=(1,1,1), + filters=64, kernel_size=(3, 3, 3), strides=(1, 1, 1), data_format='channels_last', padding='same') ) model.add(Activation('relu')) model.add(MaxPooling3D( - pool_size=(1,2,2), strides=(1,2,2), padding='valid', data_format=None) + pool_size=(1, 2, 2), strides=(1, 2, 2), padding='valid', + data_format=None) ) # Fifth layer model.add(Conv3D( - filters=128, kernel_size=(3,3,3), strides=(1,1,1), + filters=128, kernel_size=(3, 3, 3), strides=(1, 1, 1), data_format='channels_last', padding='same') ) model.add(Activation('relu')) model.add(MaxPooling3D( - pool_size=(1,2,2), strides=(1,2,2), padding='valid', data_format=None) + pool_size=(1, 2, 2), strides=(1, 2, 2), padding='valid', + data_format=None) ) # Fully connected layer model.add(Flatten()) @@ -647,24 +742,21 @@ def build_3d_cnn(w, h, d, s, num_outputs): model.add(Dropout(0.5)) model.add(Dense(num_outputs)) - #model.add(Activation('tanh')) - return model + class KerasLatent(KerasPilot): - def __init__(self, num_outputs=2, input_shape=(120, 160, 3), *args, **kwargs): - super().__init__(*args, **kwargs) + def __init__(self, num_outputs=2, input_shape=(120, 160, 3)): + super().__init__() self.model = default_latent(num_outputs, input_shape) - self.compile() def compile(self): - self.model.compile(optimizer=self.optimizer, loss={ - "img_out" : "mse", "n_outputs0" : "mse", "n_outputs1" : "mse" - }, loss_weights={ - "img_out" : 100.0, "n_outputs0" : 2.0, "n_outputs1" : 1.0 - }) + loss = {"img_out": "mse", "n_outputs0": "mse", "n_outputs1": "mse"} + weights = {"img_out": 100.0, "n_outputs0": 2.0, "n_outputs1": 1.0} + self.model.compile(optimizer=self.optimizer, + loss=loss, loss_weights=weights) - def run(self, img_arr): + def inference(self, img_arr, other_arr): img_arr = img_arr.reshape((1,) + img_arr.shape) outputs = self.model.predict(img_arr) steering = outputs[1] @@ -673,9 +765,10 @@ def run(self, img_arr): def default_latent(num_outputs, input_shape): - + # TODO: this auto-encoder should run the standard cnn in encoding and + # have corresponding decoder. Also outputs should be reversed with + # images at end. drop = 0.2 - img_in = Input(shape=input_shape, name='img_in') x = img_in x = Convolution2D(24, (5,5), strides=(2,2), activation='relu', name="conv2d_1")(x) @@ -710,10 +803,8 @@ def default_latent(num_outputs, input_shape): x = Dropout(drop)(x) outputs = [y] - for i in range(num_outputs): outputs.append(Dense(1, activation='linear', name='n_outputs' + str(i))(x)) model = Model(inputs=[img_in], outputs=outputs) - return model diff --git a/donkeycar/parts/leopard_imaging.py b/donkeycar/parts/leopard_imaging.py new file mode 100644 index 000000000..9a7e7d521 --- /dev/null +++ b/donkeycar/parts/leopard_imaging.py @@ -0,0 +1,57 @@ +import cv2 +from donkeycar.parts.camera import BaseCamera +from donkeycar.parts.fast_stretch import fast_stretch +import time + + +class LICamera(BaseCamera): + ''' + The Leopard Imaging Camera with Fast-Stretch built in. + ''' + def __init__(self, width=224, height=224, capture_width=1280, capture_height=720, fps=60): + super(LICamera, self).__init__() + self.width = width + self.height = height + self.capture_width = capture_width + self.capture_height = capture_height + self.fps = fps + self.camera_id = LICamera.camera_id(self.capture_width, self.capture_height, self.width, self.height, self.fps) + self.frame = None + print('Connecting to Leopard Imaging Camera') + self.capture = cv2.VideoCapture(self.camera_id) + time.sleep(2) + if self.capture.isOpened(): + print('Leopard Imaging Camera Connected.') + self.on = True + else: + self.on = False + print('Unable to connect. Are you sure you are using the right camera parameters ?') + + def read_frame(self): + success, frame = self.capture.read() + if success: + # returns an RGB frame. + frame = fast_stretch(frame) + self.frame = frame + + def run(self): + self.read_frame() + return self.frame + + def update(self): + # keep looping infinitely until the thread is stopped + # if the thread indicator variable is set, stop the thread + while self.on: + self.read_frame() + + def shutdown(self): + # indicate that the thread should be stopped + self.on = False + print('Stopping Leopard Imaging Camera') + self.capture.release() + time.sleep(.5) + + @classmethod + def camera_id(cls, capture_width, capture_height, width, height, fps): + return 'nvarguscamerasrc ! video/x-raw(memory:NVMM), width=%d, height=%d, format=(string)NV12, framerate=(fraction)%d/1 ! nvvidconv ! video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx ! videoconvert ! appsink' % ( + capture_width, capture_height, fps, width, height) \ No newline at end of file diff --git a/donkeycar/parts/pytorch/ResNet18.py b/donkeycar/parts/pytorch/ResNet18.py new file mode 100644 index 000000000..ac32b86ee --- /dev/null +++ b/donkeycar/parts/pytorch/ResNet18.py @@ -0,0 +1,111 @@ +import pytorch_lightning as pl +import torchvision.models as models +import torch.nn as nn +import torch.nn.functional as F +import torch +import numpy as np +import torch.optim as optim +from donkeycar.parts.pytorch.torch_data import get_default_transform + + +def load_resnet18(num_classes=2): + # Load the pre-trained model (on ImageNet) + model = models.resnet18(pretrained=True) + + # Don't allow model feature extraction layers to be modified + for layer in model.parameters(): + layer.requires_grad = False + + # Change the classifier layer + model.fc = nn.Linear(512, 2) + + for param in model.fc.parameters(): + param.requires_grad = True + + return model + + +class ResNet18(pl.LightningModule): + def __init__(self, input_shape=(128, 3, 224, 224), output_size=(2,)): + super().__init__() + + # Used by PyTorch Lightning to print an example model summary + self.example_input_array = torch.rand(input_shape) + + # Metrics + self.train_acc = pl.metrics.Accuracy() + self.valid_acc = pl.metrics.Accuracy() + self.train_precision = pl.metrics.Precision() + self.valid_precision = pl.metrics.Precision() + + self.model = load_resnet18(num_classes=output_size[0]) + + self.inference_transform = get_default_transform(for_inference=True) + + # Keep track of the loss history. This is useful for writing tests + self.loss_history = [] + + def forward(self, x): + # Forward defines the prediction/inference action + return self.model(x) + + def training_step(self, batch, batch_idx): + x, y = batch + logits = self.model(x) + + loss = F.l1_loss(logits, y) + self.loss_history.append(loss) + self.log('train_loss', loss) + + # Log Metrics + self.train_acc(logits, y) + self.log('train_acc', self.train_acc, on_step=False, on_epoch=True) + + self.train_precision(logits, y) + self.log('train_precision', self.train_precision, + on_step=False, on_epoch=True) + + return loss + + def validation_step(self, batch, batch_idx): + x, y = batch + logits = self.forward(x) + loss = F.l1_loss(logits, y) + + # Log Metrics + self.log('val_loss', loss) + + self.valid_acc(logits, y) + self.log('valid_acc', self.valid_acc, on_step=False, on_epoch=True) + + self.valid_precision(logits, y) + self.log('valid_precision', self.valid_precision, + on_step=False, on_epoch=True) + + def configure_optimizers(self): + optimizer = optim.Adam( + self.model.parameters(), lr=0.0001, weight_decay=0.0005) + return optimizer + + def run(self, img_arr: np.ndarray, other_arr: np.ndarray = None): + """ + Donkeycar parts interface to run the part in the loop. + + :param img_arr: uint8 [0,255] numpy array with image data + :param other_arr: numpy array of additional data to be used in the + pilot, like IMU array for the IMU model or a + state vector in the Behavioural model + :return: tuple of (angle, throttle) + """ + from PIL import Image + pil_image = Image.fromarray(img_arr) + tensor_image = self.inference_transform(pil_image) + tensor_image = tensor_image.unsqueeze(0) + + # Result is (1, 2) + result = self.forward(tensor_image) + + # Resize to (2,) + result = result.reshape(-1) + print("ResNet18 result: {}".format(result)) + return result diff --git a/donkeycar/parts/pytorch/torch_data.py b/donkeycar/parts/pytorch/torch_data.py new file mode 100644 index 000000000..1c60606b4 --- /dev/null +++ b/donkeycar/parts/pytorch/torch_data.py @@ -0,0 +1,151 @@ +# PyTorch +import torch +from torch.utils.data import IterableDataset, DataLoader +from donkeycar.utils import train_test_split +from donkeycar.parts.tub_v2 import Tub +from torchvision import transforms +from typing import List, Any +from donkeycar.pipeline.types import TubRecord, TubDataset +from donkeycar.pipeline.sequence import TubSequence +import pytorch_lightning as pl + + +def get_default_transform(for_video=False, for_inference=False): + """ + Creates a default transform to work with torchvision models + + Video transform: + All pre-trained models expect input images normalized in the same way, + i.e. mini-batches of 3-channel RGB videos of shape (3 x T x H x W), + where H and W are expected to be 112, and T is a number of video frames + in a clip. The images have to be loaded in to a range of [0, 1] and + then normalized using mean = [0.43216, 0.394666, 0.37645] and + std = [0.22803, 0.22145, 0.216989]. + """ + + mean = [0.485, 0.456, 0.406] + std = [0.229, 0.224, 0.225] + input_size = (224, 224) + + if for_video: + mean = [0.43216, 0.394666, 0.37645] + std = [0.22803, 0.22145, 0.216989] + input_size = (112, 112) + + transform = transforms.Compose([ + transforms.Resize(input_size), + transforms.ToTensor(), + transforms.Normalize(mean=mean, std=std) + ]) + + return transform + + +class TorchTubDataset(IterableDataset): + ''' + Loads the dataset, and creates a train/test split. + ''' + + def __init__(self, config, records: List[TubRecord], transform=None): + """Create a PyTorch Tub Dataset + + Args: + config (object): the configuration information + records (List[TubRecord]): a list of tub records + transform (function, optional): a transform to apply to the data + """ + self.config = config + + # Handle the transforms + if transform: + self.transform = transform + else: + self.transform = get_default_transform() + + self.sequence = TubSequence(records) + self.pipeline = self._create_pipeline() + + def _create_pipeline(self): + """ This can be overridden if more complicated pipelines are + required """ + + def y_transform(record: TubRecord): + angle: float = record.underlying['user/angle'] + throttle: float = record.underlying['user/throttle'] + return torch.tensor([angle, throttle]) + + def x_transform(record: TubRecord): + # Loads the result of Image.open() + img_arr = record.image(cached=True, as_nparray=False) + return self.transform(img_arr) + + # Build pipeline using the transformations + pipeline = self.sequence.build_pipeline(x_transform=x_transform, + y_transform=y_transform) + + return pipeline + + def __iter__(self): + return iter(self.pipeline) + + +class TorchTubDataModule(pl.LightningDataModule): + + def __init__(self, config: Any, tub_paths: List[str], transform=None): + """Create a PyTorch Lightning Data Module to contain all data loading logic + + Args: + config (object): the configuration information + tub_paths (List[str]): a list of paths to the tubs to use (minimum size of 1). + Each tub path corresponds to another training run. + transform (function, optional): a transform to apply to the data + """ + super().__init__() + + self.config = config + self.tub_paths = tub_paths + + # Handle the transforms + if transform: + self.transform = transform + else: + self.transform = get_default_transform() + + self.tubs: List[Tub] = [Tub(tub_path, read_only=True) + for tub_path in self.tub_paths] + self.records: List[TubRecord] = [] + + def setup(self, stage=None): + """Load all the tub data and set up the datasets. + + Args: + stage ([string], optional): setup expects a string arg stage. + It is used to separate setup logic for trainer.fit + and trainer.test. Defaults to None. + """ + # Loop through all the different tubs and load all the records for each of them + for tub in self.tubs: + for underlying in tub: + record = TubRecord(self.config, tub.base_path, + underlying=underlying) + self.records.append(record) + + train_records, val_records = train_test_split( + self.records, test_size=(1. - self.config.TRAIN_TEST_SPLIT)) + + assert len(val_records) > 0, "Not enough validation data. Add more data" + + self.train_dataset = TorchTubDataset( + self.config, train_records, transform=self.transform) + self.val_dataset = TorchTubDataset( + self.config, val_records, transform=self.transform) + + def train_dataloader(self): + # The number of workers are set to 0 to avoid errors on Macs and Windows + # See: https://github.com/rusty1s/pytorch_geometric/issues/366#issuecomment-498022534 + return DataLoader(self.train_dataset, batch_size=self.config.BATCH_SIZE, num_workers=0) + + def val_dataloader(self): + # The number of workers are set to 0 to avoid errors on Macs and Windows + # See: https://github.com/rusty1s/pytorch_geometric/issues/366#issuecomment-498022534 + return DataLoader(self.val_dataset, batch_size=self.config.BATCH_SIZE, num_workers=0) diff --git a/donkeycar/parts/pytorch/torch_train.py b/donkeycar/parts/pytorch/torch_train.py new file mode 100644 index 000000000..3d8a8e812 --- /dev/null +++ b/donkeycar/parts/pytorch/torch_train.py @@ -0,0 +1,61 @@ +import os +from pathlib import Path +import torch +import pytorch_lightning as pl +from donkeycar.parts.pytorch.torch_data import TorchTubDataModule +from donkeycar.parts.pytorch.torch_utils import get_model_by_type + + +def train(cfg, tub_paths, model_output_path, model_type, checkpoint_path=None): + """ + Train the model + """ + model_name, model_ext = os.path.splitext(model_output_path) + + is_torch_model = model_ext == '.ckpt' + if is_torch_model: + model = f'{model_name}.ckpt' + else: + print("Unrecognized model file extension for model_output_path: '{}'. Please use the '.ckpt' extension.".format( + model_output_path)) + + + if not model_type: + model_type = cfg.DEFAULT_MODEL_TYPE + + tubs = tub_paths.split(',') + tub_paths = [os.path.expanduser(tub) for tub in tubs] + output_path = os.path.expanduser(model_output_path) + + output_dir = Path(model_output_path).parent + + model = get_model_by_type(model_type, cfg, checkpoint_path=checkpoint_path) + + if torch.cuda.is_available(): + print('Using CUDA') + gpus = -1 + else: + print('Not using CUDA') + gpus = 0 + + logger = None + if cfg.VERBOSE_TRAIN: + print("Tensorboard logging started. Run `tensorboard --logdir ./tb_logs` in a new terminal") + from pytorch_lightning.loggers import TensorBoardLogger + + # Create Tensorboard logger + logger = TensorBoardLogger('tb_logs', name=model_name) + + weights_summary = 'full' if cfg.PRINT_MODEL_SUMMARY else 'top' + trainer = pl.Trainer(gpus=gpus, logger=logger, progress_bar_refresh_rate=30, + max_epochs=cfg.MAX_EPOCHS, default_root_dir=output_dir, weights_summary=weights_summary) + + data_module = TorchTubDataModule(cfg, tub_paths) + trainer.fit(model, data_module) + + if is_torch_model: + checkpoint_model_path = f'{os.path.splitext(output_path)[0]}.ckpt' + trainer.save_checkpoint(checkpoint_model_path) + print("Saved final model to {}".format(checkpoint_model_path)) + + return model.loss_history diff --git a/donkeycar/parts/pytorch/torch_utils.py b/donkeycar/parts/pytorch/torch_utils.py new file mode 100644 index 000000000..b4b904a75 --- /dev/null +++ b/donkeycar/parts/pytorch/torch_utils.py @@ -0,0 +1,31 @@ +import os + + +def get_model_by_type(model_type, cfg, checkpoint_path=None): + ''' + given the string model_type and the configuration settings in cfg + create a Torch model and return it. + ''' + if model_type is None: + model_type = cfg.DEFAULT_MODEL_TYPE + print("\"get_model_by_type\" model Type is: {}".format(model_type)) + + input_shape = (cfg.BATCH_SIZE, cfg.IMAGE_DEPTH, cfg.IMAGE_H, cfg.IMAGE_W) + + if model_type == "resnet18": + from donkeycar.parts.pytorch.ResNet18 import ResNet18 + # ResNet18 will always use the following input size + # regardless of what the user specifies. This is necessary since + # the model is pre-trained on ImageNet + input_shape = (cfg.BATCH_SIZE, 3, 224, 224) + model = ResNet18(input_shape=input_shape) + else: + raise Exception("Unknown model type {:}, supported types are " + "resnet18" + .format(model_type)) + + if checkpoint_path: + print("Loading model from checkpoint {}".format(checkpoint_path)) + model.load_from_checkpoint(checkpoint_path) + + return model diff --git a/donkeycar/parts/telemetry.py b/donkeycar/parts/telemetry.py new file mode 100644 index 000000000..a7a392fea --- /dev/null +++ b/donkeycar/parts/telemetry.py @@ -0,0 +1,142 @@ +# -*- coding: utf-8 -*- +""" +Telemetry class distributing real-time metrics to external server + +author: @miro (Meir Tseitlin) 2020 + +Note: +""" +import os +import queue +import time +import json +from logging import StreamHandler +from paho.mqtt.client import Client as MQTTClient + + +class MqttTelemetry(StreamHandler): + """ + Telemetry class collects telemetry from different parts of the system and periodically sends updated to the server. + Telemetry reports are timestamped and stored in memory until it is pushed to the server + """ + + def __init__(self, cfg, default_inputs=None, default_types=None): + + StreamHandler.__init__(self) + + self.PUBLISH_PERIOD = cfg.TELEMETRY_PUBLISH_PERIOD + self._last_publish = time.time() + self._telem_q = queue.Queue() + self._default_inputs = default_inputs or [] + self._default_types = default_types or [] + self._total_updates = 0 + self._donkey_name = os.environ.get('DONKEY_NAME', cfg.TELEMETRY_DONKEY_NAME) + self._mqtt_broker = os.environ.get('DONKEY_MQTT_BROKER', cfg.TELEMETRY_MQTT_BROKER_HOST) # 'iot.eclipse.org' + self._topic = cfg.TELEMETRY_MQTT_TOPIC_TEMPLATE % self._donkey_name + self._use_json_format = cfg.TELEMETRY_MQTT_JSON_ENABLE + self._mqtt_client = MQTTClient() + self._mqtt_client.connect(self._mqtt_broker, cfg.TELEMETRY_MQTT_BROKER_PORT) + self._mqtt_client.loop_start() + self._on = True + print(f"Telemetry MQTT server connected (publishing: {', '.join(self._default_inputs)}") + + @staticmethod + def filter_supported_metrics(inputs, types): + supported_inputs = [] + supported_types = [] + for ind in range(0, len(inputs or [])): + if types[ind] in ['float', 'str', 'int']: + supported_inputs.append(inputs[ind]) + supported_types.append(types[ind]) + return supported_inputs, supported_types + + def report(self, metrics): + """ + Basic reporting - gets arbitrary dictionary with values + """ + curr_time = int(time.time()) + + # Store sample with time rounded to second + try: + self._telem_q.put((curr_time, metrics), block=False) + except queue.Full: + pass + + return curr_time + + def emit(self, record): + """ + FUTURE: Added to support Logging interface (to allow to use Python logging module to log directly to telemetry) + """ + # msg = self.format(record) + self.report(record) + + @property + def qsize(self): + return self._telem_q.qsize() + + def publish(self): + + # Create packet + packet = {} + while not self._telem_q.empty(): + next_item = self._telem_q.get() + packet.setdefault(next_item[0], {}).update(next_item[1]) + + if not packet: + return + + if self._use_json_format: + packet = [{'ts': k, 'values': v} for k, v in packet.items()] + payload = json.dumps(packet) + + self._mqtt_client.publish(self._topic, payload) + # print(f'Total updates - {self._total_updates} (payload size={len(payload)})') + else: + # Publish only the last timestamp + last_sample = packet[list(packet)[-1]] + for k, v in last_sample.items(): + self._mqtt_client.publish('{}/{}'.format(self._topic, k), v) + # print(f'Total updates - {self._total_updates} (values ={len(last_sample)})') + + self._total_updates += 1 + return + + def run(self, *args): + """ + API function needed to use as a Donkey part. Accepts values, + pairs them with their inputs keys and saves them to disk. + """ + assert len(self._default_inputs) == len(args) + + # Add to queue + record = dict(zip(self._default_inputs, args)) + self.report(record) + + # Periodic publish + curr_time = time.time() + if curr_time - self._last_publish > self.PUBLISH_PERIOD and self.qsize > 0: + + self.publish() + self._last_publish = curr_time + return self.qsize + + def run_threaded(self, *args): + + assert len(self._default_inputs) == len(args) + + # Add to queue + record = dict(zip(self._default_inputs, args)) + self.report(record) + return self.qsize + + def update(self): + while self._on: + self.publish() + time.sleep(self.PUBLISH_PERIOD) + + def shutdown(self): + # indicate that the thread should be stopped + self._on = False + print('Stopping MQTT Telemetry') + time.sleep(.2) \ No newline at end of file diff --git a/donkeycar/parts/tensorrt.py b/donkeycar/parts/tensorrt.py index d46bfcada..739852eaf 100644 --- a/donkeycar/parts/tensorrt.py +++ b/donkeycar/parts/tensorrt.py @@ -1,5 +1,6 @@ from collections import namedtuple from donkeycar.parts.keras import KerasPilot +from donkeycar.utils import throttle as calculate_throttle import json import numpy as np import pycuda.driver as cuda @@ -10,12 +11,13 @@ HostDeviceMemory = namedtuple('HostDeviceMemory', 'host_memory device_memory') + class TensorRTLinear(KerasPilot): ''' Uses TensorRT to do the inference. ''' - def __init__(self, cfg, *args, **kwargs): - super(TensorRTLinear, self).__init__(*args, **kwargs) + def __init__(self, cfg): + super().__init__() self.logger = trt.Logger(trt.Logger.WARNING) self.cfg = cfg self.engine = None @@ -29,15 +31,16 @@ def compile(self): def load(self, model_path): uff_model = Path(model_path) - metadata_path = Path('%s/%s.metadata' % (uff_model.parent.as_posix(), uff_model.stem)) - with open(metadata_path.as_posix(), 'r') as metadata, trt.Builder(self.logger) as builder, builder.create_network() as network, trt.UffParser() as parser: - - # Without this max_workspace_size setting, I was getting: - # Building CUDA Engine - # [TensorRT] ERROR: Internal error: could not find any implementation for node 2-layer MLP, try increasing the workspace size with IBuilder::setMaxWorkspaceSize() - # [TensorRT] ERROR: ../builder/tacticOptimizer.cpp (1230) - OutOfMemory Error in computeCosts: 0 - builder.max_workspace_size = 1 << 20 #common.GiB(1) + metadata_path = Path('%s/%s.metadata' + % (uff_model.parent.as_posix(), uff_model.stem)) + with open(metadata_path.as_posix(), 'r') as metadata, \ + trt.Builder(self.logger) as builder, \ + builder.create_network() as network, \ + trt.UffParser() as parser: + + builder.max_workspace_size = 1 << 20 builder.max_batch_size = 1 + builder.fp16_mode = True metadata = json.loads(metadata.read()) # Configure inputs and outputs @@ -45,7 +48,9 @@ def load(self, model_path): input_names = metadata['input_names'] output_names = metadata['output_names'] for name in input_names: - parser.register_input(name, (self.cfg.TARGET_D, self.cfg.TARGET_H, self.cfg.TARGET_W)) + parser.register_input(name, (self.cfg.TARGET_D, + self.cfg.TARGET_H, + self.cfg.TARGET_W)) for name in output_names: parser.register_output(name) @@ -56,10 +61,11 @@ def load(self, model_path): self.engine = builder.build_cuda_engine(network) # Allocate buffers print('Allocating Buffers') - self.inputs, self.outputs, self.bindings, self.stream = TensorRTLinear.allocate_buffers(self.engine) + self.inputs, self.outputs, self.bindings, self.stream \ + = TensorRTLinear.allocate_buffers(self.engine) print('Ready') - def run(self, image): + def inference(self, image, other_arr=None): # Channel first image format image = image.transpose((2,0,1)) # Flatten it to a 1D array. @@ -68,8 +74,17 @@ def run(self, image): image_input = self.inputs[0] np.copyto(image_input.host_memory, image) with self.engine.create_execution_context() as context: - [throttle, steering] = TensorRTLinear.infer(context=context, bindings=self.bindings, inputs=self.inputs, outputs=self.outputs, stream=self.stream) - return steering[0], throttle[0] + inference_output = TensorRTLinear.infer(context=context, + bindings=self.bindings, + inputs=self.inputs, + outputs=self.outputs, + stream=self.stream) + if len(inference_output) == 2: + [throttle, steering] = inference_output + return steering[0], throttle[0] + else: + [steering] = inference_output + return steering[0], calculate_throttle(steering[0]) @classmethod def allocate_buffers(cls, engine): @@ -78,7 +93,8 @@ def allocate_buffers(cls, engine): bindings = [] stream = cuda.Stream() for binding in engine: - size = trt.volume(engine.get_binding_shape(binding)) * engine.max_batch_size + size = trt.volume(engine.get_binding_shape(binding)) \ + * engine.max_batch_size dtype = trt.nptype(engine.get_binding_dtype(binding)) # Allocate host and device buffers host_memory = cuda.pagelocked_empty(size, dtype) @@ -94,11 +110,14 @@ def allocate_buffers(cls, engine): @classmethod def infer(cls, context, bindings, inputs, outputs, stream, batch_size=1): # Transfer input data to the GPU. - [cuda.memcpy_htod_async(inp.device_memory, inp.host_memory, stream) for inp in inputs] + [cuda.memcpy_htod_async(inp.device_memory, inp.host_memory, stream) + for inp in inputs] # Run inference. - context.execute_async(batch_size=batch_size, bindings=bindings, stream_handle=stream.handle) + context.execute_async(batch_size=batch_size, bindings=bindings, + stream_handle=stream.handle) # Transfer predictions back from the GPU. - [cuda.memcpy_dtoh_async(out.host_memory, out.device_memory, stream) for out in outputs] + [cuda.memcpy_dtoh_async(out.host_memory, out.device_memory, stream) + for out in outputs] # Synchronize the stream stream.synchronize() # Return only the host outputs. diff --git a/donkeycar/parts/tflite.py b/donkeycar/parts/tflite.py index 258581364..052fe29f7 100755 --- a/donkeycar/parts/tflite.py +++ b/donkeycar/parts/tflite.py @@ -1,16 +1,17 @@ +import os import tensorflow as tf +from donkeycar.parts.keras import KerasPilot + + def keras_model_to_tflite(in_filename, out_filename, data_gen=None): - verStr = tf.__version__ - if verStr.find('1.1') == 0: # found MAJOR.MINOR match for version 1.1x.x - converter = tf.lite.TFLiteConverter.from_keras_model_file(in_filename) - if verStr.find('2.') == 0: # found MAJOR.MINOR match for version 2.x.x - new_model= tf.keras.models.load_model(in_filename) #filepath="keras_model.h5") - converter = tf.lite.TFLiteConverter.from_keras_model(new_model) + print('Convert model {:} to TFLite {:}'.format(in_filename, out_filename)) + new_model = tf.keras.models.load_model(in_filename) + converter = tf.lite.TFLiteConverter.from_keras_model(new_model) if data_gen is not None: - #when we have a data_gen that is the trigger to use it to - #create integer weights and calibrate them. Warning: this model will - #no longer run with the standard tflite engine. That uses only float. + # when we have a data_gen that is the trigger to use it to create + # integer weights and calibrate them. Warning: this model will no + # longer run with the standard tflite engine. That uses only float. converter.optimizations = [tf.lite.Optimize.DEFAULT] converter.representative_dataset = data_gen try: @@ -23,28 +24,25 @@ def keras_model_to_tflite(in_filename, out_filename, data_gen=None): pass converter.inference_input_type = tf.uint8 converter.inference_output_type = tf.uint8 - print("----- using data generator to create int optimized weights for Coral TPU -----") + print("using data generator to create int optimized weights for Coral TPU") tflite_model = converter.convert() open(out_filename, "wb").write(tflite_model) -def keras_session_to_tflite(model, out_filename): - inputs = model.inputs - outputs = model.outputs - with tf.keras.backend.get_session() as sess: - converter = tf.lite.TFLiteConverter.from_session(sess, inputs, outputs) - tflite_model = converter.convert() - open(out_filename, "wb").write(tflite_model) - -class TFLitePilot(object): - ''' - Base class for TFlite models that will provide steering and throttle to guide a car. - ''' +class TFLitePilot(KerasPilot): + """ + This class wraps around the TensorFlow Lite interpreter. + """ def __init__(self): - self.model = None - + super().__init__() + self.interpreter = None + self.input_shape = None + self.input_details = None + self.output_details = None def load(self, model_path): + assert os.path.splitext(model_path)[1] == '.tflite', \ + 'TFlitePilot should load only .tflite files' # Load TFLite model and allocate tensors. self.interpreter = tf.lite.Interpreter(model_path=model_path) self.interpreter.allocate_tensors() @@ -55,15 +53,12 @@ def load(self, model_path): # Get Input shape self.input_shape = self.input_details[0]['shape'] - - def run(self, image): - input_data = image.reshape(self.input_shape).astype('float32') - + def inference(self, img_arr, other_arr): + input_data = img_arr.reshape(self.input_shape) self.interpreter.set_tensor(self.input_details[0]['index'], input_data) self.interpreter.invoke() - steering = 0.0 throttle = 0.0 outputs = [] @@ -71,10 +66,13 @@ def run(self, image): output_data = self.interpreter.get_tensor(tensor['index']) outputs.append(output_data[0][0]) + steering = outputs[0] if len(outputs) > 1: - steering = outputs[0] throttle = outputs[1] return steering, throttle + def get_input_shape(self): + assert self.input_shape is not None, "Need to load model first" + return self.input_shape diff --git a/donkeycar/parts/tub_v2.py b/donkeycar/parts/tub_v2.py new file mode 100644 index 000000000..e34f91d16 --- /dev/null +++ b/donkeycar/parts/tub_v2.py @@ -0,0 +1,125 @@ +import atexit +import os +import time + +import numpy as np +from PIL import Image + +from donkeycar.parts.datastore_v2 import Manifest, ManifestIterator + + +class Tub(object): + """ + A datastore to store sensor data in a key, value format. \n + Accepts str, int, float, image_array, image, and array data types. + """ + + def __init__(self, base_path, inputs=[], types=[], metadata=[], + max_catalog_len=1000, read_only=False): + self.base_path = base_path + self.images_base_path = os.path.join(self.base_path, Tub.images()) + self.inputs = inputs + self.types = types + self.metadata = metadata + self.manifest = Manifest(base_path, inputs=inputs, types=types, + metadata=metadata, max_len=max_catalog_len, + read_only=read_only) + self.input_types = dict(zip(self.inputs, self.types)) + # Create images folder if necessary + if not os.path.exists(self.images_base_path): + os.makedirs(self.images_base_path, exist_ok=True) + + def write_record(self, record=None): + """ + Can handle various data types including images. + """ + contents = dict() + for key, value in record.items(): + if value is None: + continue + elif key not in self.input_types: + continue + else: + input_type = self.input_types[key] + if input_type == 'float': + # Handle np.float() types gracefully + contents[key] = float(value) + elif input_type == 'str': + contents[key] = value + elif input_type == 'int': + contents[key] = int(value) + elif input_type == 'boolean': + contents[key] = bool(value) + elif input_type == 'list' or input_type == 'vector': + contents[key] = list(value) + elif input_type == 'image_array': + # Handle image array + image = Image.fromarray(np.uint8(value)) + name = Tub._image_file_name(self.manifest.current_index, key) + image_path = os.path.join(self.images_base_path, name) + image.save(image_path) + contents[key] = name + + # Private properties + contents['_timestamp_ms'] = int(round(time.time() * 1000)) + contents['_index'] = self.manifest.current_index + + self.manifest.write_record(contents) + + def delete_record(self, record_index): + self.manifest.delete_record(record_index) + + def delete_last_n_records(self, n): + last_index = self.manifest.current_index + first_index = last_index - n + for index in range(first_index, last_index): + if index < 0: + continue + else: + self.manifest.delete_record(index) + + def close(self): + self.manifest.close() + + def __iter__(self): + return ManifestIterator(self.manifest) + + def __len__(self): + return self.manifest.__len__() + + @classmethod + def images(cls): + return 'images' + + @classmethod + def _image_file_name(cls, index, key, extension='.jpg'): + key_prefix = key.replace('/', '_') + name = '_'.join([str(index), key_prefix, extension]) + # Return relative paths to maintain portability + return name + + +class TubWriter(object): + """ + A Donkey part, which can write records to the datastore. + """ + def __init__(self, base_path, inputs=[], types=[], metadata=[], + max_catalog_len=1000): + self.tub = Tub(base_path, inputs, types, metadata, max_catalog_len) + def shutdown_hook(): + self.close() + + # Register hook + atexit.register(shutdown_hook) + + def run(self, *args): + assert len(self.tub.inputs) == len(args) + record = dict(zip(self.tub.inputs, args)) + self.tub.write_record(record) + return self.tub.manifest.current_index + + def __iter__(self): + return self.tub.__iter__() + + def close(self): + self.tub.manifest.close() diff --git a/donkeycar/parts/web_controller/web.py b/donkeycar/parts/web_controller/web.py index 0194f38e5..19fddb54f 100644 --- a/donkeycar/parts/web_controller/web.py +++ b/donkeycar/parts/web_controller/web.py @@ -2,11 +2,8 @@ # -*- coding: utf-8 -*- """ Created on Sat Jun 24 20:10:44 2017 - @author: wroscoe - remotes.py - The client and web server needed to control a car remotely. """ @@ -359,4 +356,3 @@ def shutdown(self): pass - diff --git a/donkeycar/pipeline/__init__.py b/donkeycar/pipeline/__init__.py new file mode 100644 index 000000000..9509025d7 --- /dev/null +++ b/donkeycar/pipeline/__init__.py @@ -0,0 +1 @@ +# The new donkey car training pipeline. \ No newline at end of file diff --git a/donkeycar/pipeline/augmentations.py b/donkeycar/pipeline/augmentations.py new file mode 100644 index 000000000..f0b934037 --- /dev/null +++ b/donkeycar/pipeline/augmentations.py @@ -0,0 +1,100 @@ +import cv2 +import numpy as np +import imgaug.augmenters as iaa +from donkeycar.config import Config + + +class Augmentations(object): + """ + Some ready to use image augumentations. + """ + + @classmethod + def crop(cls, left, right, top, bottom, keep_size=False): + """ + The image augumentation sequence. + Crops based on a region of interest among other things. + left, right, top & bottom are the number of pixels to crop. + """ + augmentation = iaa.Crop(px=(top, right, bottom, left), + keep_size=keep_size) + return augmentation + + @classmethod + def trapezoidal_mask(cls, lower_left, lower_right, upper_left, upper_right, + min_y, max_y): + """ + Uses a binary mask to generate a trapezoidal region of interest. + Especially useful in filtering out uninteresting features from an + input image. + """ + def _transform_images(images, random_state, parents, hooks): + # Transform a batch of images + transformed = [] + mask = None + for image in images: + if mask is None: + mask = np.zeros(image.shape, dtype=np.int32) + # # # # # # # # # # # # # + # ul ur min_y + # + # + # + # ll lr max_y + points = [ + [upper_left, min_y], + [upper_right, min_y], + [lower_right, max_y], + [lower_left, max_y] + ] + cv2.fillConvexPoly(mask, np.array(points, dtype=np.int32), + [255, 255, 255]) + mask = np.asarray(mask, dtype='bool') + + masked = np.multiply(image, mask) + transformed.append(masked) + + return transformed + + def _transform_keypoints(keypoints_on_images, random_state, parents, hooks): + # No-op + return keypoints_on_images + + augmentation = iaa.Lambda(func_images=_transform_images, + func_keypoints=_transform_keypoints) + return augmentation + + +class ImageAugmentation: + def __init__(self, cfg): + aug_list = getattr(cfg, 'AUGMENTATIONS', []) + augmentations = [ImageAugmentation.create(a, cfg) for a in aug_list] + self.augmentations = iaa.Sequential(augmentations) + + @classmethod + def create(cls, aug_type: str, config: Config) -> iaa.meta.Augmenter: + if aug_type == 'CROP': + return Augmentations.crop(left=config.ROI_CROP_TOP, + right=config.ROI_CROP_TOP, + bottom=config.ROI_CROP_BOTTOM, + top=config.ROI_CROP_TOP) + elif aug_type == 'TRAPEZE': + return Augmentations.trapezoidal_mask( + lower_left=config.ROI_TRAPEZE_LL, + lower_right=config.ROI_TRAPEZE_LR, + upper_left=config.ROI_TRAPEZE_UL, + upper_right=config.ROI_TRAPEZE_UR, + min_y=config.ROI_TRAPEZE_MIN_Y, + max_y=config.ROI_TRAPEZE_MAX_Y) + + elif aug_type == 'MULTIPLY': + interval = getattr(config, 'AUG_MULTIPLY_RANGE', (0.5, 1.5)) + return iaa.Multiply(interval) + + elif aug_type == 'BLUR': + interval = getattr(config, 'AUG_BLUR_RANGE', (0.0, 3.0)) + return iaa.GaussianBlur(sigma=interval) + + def augment(self, img_arr): + aug_img_arr = self.augmentations.augment_image(img_arr) + return aug_img_arr diff --git a/donkeycar/pipeline/sequence.py b/donkeycar/pipeline/sequence.py new file mode 100644 index 000000000..7d0ef1863 --- /dev/null +++ b/donkeycar/pipeline/sequence.py @@ -0,0 +1,155 @@ + +from typing import (Any, Callable, Generic, Iterable, Iterator, List, Sized, Tuple, TypeVar) + +from donkeycar.pipeline.types import TubRecord + +# Note: Be careful when re-using `TypeVar`s. +# If you are not-consistent mypy gets easily confused. + +R = TypeVar('R', covariant=True) +X = TypeVar('X', covariant=True) +Y = TypeVar('Y', covariant=True) +XOut = TypeVar('XOut', covariant=True) +YOut = TypeVar('YOut', covariant=True) + + +class SizedIterator(Generic[X], Iterator[X], Sized): + def __init__(self) -> None: + # This is a protocol type without explicitly using a `Protocol` + # Using `Protocol` requires Python 3.7 + pass + + +class TubSeqIterator(SizedIterator[TubRecord]): + def __init__(self, records: List[TubRecord]) -> None: + self.records = records or list() + self.current_index = 0 + + def __len__(self): + return len(self.records) + + def __iter__(self) -> SizedIterator[TubRecord]: + return TubSeqIterator(self.records) + + def __next__(self): + if self.current_index >= len(self.records): + raise StopIteration('No more records') + + record = self.records[self.current_index] + self.current_index += 1 + return record + + next = __next__ + + +class TfmIterator(Generic[R, XOut, YOut], SizedIterator[Tuple[XOut, YOut]]): + def __init__(self, + iterable: Iterable[R], + x_transform: Callable[[R], XOut], + y_transform: Callable[[R], YOut]) -> None: + + self.iterable = iterable + self.x_transform = x_transform + self.y_transform = y_transform + self.iterator = BaseTfmIterator_( + iterable=self.iterable, x_transform=self.x_transform, y_transform=self.y_transform) + + def __len__(self): + return len(self.iterator) + + def __iter__(self) -> SizedIterator[Tuple[XOut, YOut]]: + return BaseTfmIterator_( + iterable=self.iterable, x_transform=self.x_transform, y_transform=self.y_transform) + + def __next__(self): + return next(self.iterator) + + +class TfmTupleIterator(Generic[X, Y, XOut, YOut], SizedIterator[Tuple[XOut, YOut]]): + def __init__(self, + iterable: Iterable[Tuple[X, Y]], + x_transform: Callable[[X], XOut], + y_transform: Callable[[Y], YOut]) -> None: + + self.iterable = iterable + self.x_transform = x_transform + self.y_transform = y_transform + self.iterator = BaseTfmIterator_( + iterable=self.iterable, x_transform=self.x_transform, y_transform=self.y_transform) + + def __len__(self): + return len(self.iterator) + + def __iter__(self) -> SizedIterator[Tuple[XOut, YOut]]: + return BaseTfmIterator_( + iterable=self.iterable, x_transform=self.x_transform, y_transform=self.y_transform) + + def __next__(self): + return next(self.iterator) + + +class BaseTfmIterator_(Generic[XOut, YOut], SizedIterator[Tuple[XOut, YOut]]): + ''' + A basic transforming iterator. + Do no use this class directly. + ''' + + def __init__(self, + # Losing a bit of type safety here for a common implementation + iterable: Iterable[Any], + x_transform: Callable[[R], XOut], + y_transform: Callable[[R], YOut]) -> None: + + self.iterable = iterable + self.x_transform = x_transform + self.y_transform = y_transform + self.iterator = iter(self.iterable) + + def __len__(self): + return len(self.iterator) + + def __iter__(self) -> SizedIterator[Tuple[XOut, YOut]]: + return BaseTfmIterator_(self.iterable, self.x_transform, self.y_transform) + + def __next__(self): + record = next(self.iterator) + if (isinstance(record, tuple) and len(record) == 2): + x, y = record + return self.x_transform(x), self.y_transform(y) + else: + return self.x_transform(record), self.y_transform(record) + + +class TubSequence(Iterable[TubRecord]): + def __init__(self, records: List[TubRecord]) -> None: + self.records = records + + def __iter__(self) -> SizedIterator[TubRecord]: + return TubSeqIterator(self.records) + + def __len__(self): + return len(self.records) + + def build_pipeline(self, + x_transform: Callable[[TubRecord], X], + y_transform: Callable[[TubRecord], Y]) \ + -> TfmIterator: + return TfmIterator(self, x_transform=x_transform, y_transform=y_transform) + + @classmethod + def map_pipeline( + cls, + x_transform: Callable[[X], XOut], + y_transform: Callable[[Y], YOut], + pipeline: SizedIterator[Tuple[X, Y]]) -> SizedIterator[Tuple[XOut, YOut]]: + return TfmTupleIterator(pipeline, x_transform=x_transform, y_transform=y_transform) + + @classmethod + def map_pipeline_factory( + cls, + x_transform: Callable[[X], XOut], + y_transform: Callable[[Y], YOut], + factory: Callable[[], SizedIterator[Tuple[X, Y]]]) -> SizedIterator[Tuple[XOut, YOut]]: + + pipeline = factory() + return cls.map_pipeline(pipeline=pipeline, x_transform=x_transform, y_transform=y_transform) diff --git a/donkeycar/pipeline/training.py b/donkeycar/pipeline/training.py new file mode 100644 index 000000000..8b3a27d87 --- /dev/null +++ b/donkeycar/pipeline/training.py @@ -0,0 +1,132 @@ +import math +import os +from typing import List, Dict, Union + +from donkeycar.config import Config +from donkeycar.parts.keras import KerasPilot +from donkeycar.parts.tflite import keras_model_to_tflite +from donkeycar.pipeline.sequence import TubRecord, TubSequence, TfmIterator +from donkeycar.pipeline.types import TubDataset +from donkeycar.pipeline.augmentations import ImageAugmentation +from donkeycar.utils import get_model_by_type, normalize_image +import tensorflow as tf +import numpy as np + + +class BatchSequence(object): + """ + The idea is to have a shallow sequence with types that can hydrate + themselves to np.ndarray initially and later into the types required by + tf.data (i.e. dictionaries or np.ndarrays). + """ + def __init__(self, + model: KerasPilot, + config: Config, + records: List[TubRecord], + is_train: bool) -> None: + self.model = model + self.config = config + self.sequence = TubSequence(records) + self.batch_size = self.config.BATCH_SIZE + self.is_train = is_train + self.augmentation = ImageAugmentation(config) + self.pipeline = self._create_pipeline() + + def __len__(self) -> int: + return math.ceil(len(self.pipeline) / self.batch_size) + + def _create_pipeline(self) -> TfmIterator: + """ This can be overridden if more complicated pipelines are + required """ + # 1. Initialise TubRecord -> x, y transformations + def get_x(record: TubRecord) -> Dict[str, Union[float, np.ndarray]]: + """ Extracting x from record for training""" + # this transforms the record into x for training the model to x,y + x0 = self.model.x_transform(record) + # for multiple input tensors the return value here is a tuple + # where the image is in first slot otherwise x0 is the image + x1 = x0[0] if isinstance(x0, tuple) else x0 + # apply augmentation to training data only + x2 = self.augmentation.augment(x1) if self.is_train else x1 + # normalise image, assume other input data comes already normalised + x3 = normalize_image(x2) + # fill normalised image back into tuple if necessary + x4 = (x3, ) + x0[1:] if isinstance(x0, tuple) else x3 + # convert tuple to dictionary which is understood by tf.data + x5 = self.model.x_translate(x4) + return x5 + + def get_y(record: TubRecord) -> Dict[str, Union[float, np.ndarray]]: + """ Extracting y from record for training """ + y0 = self.model.y_transform(record) + y1 = self.model.y_translate(y0) + return y1 + + # 2. Build pipeline using the transformations + pipeline = self.sequence.build_pipeline(x_transform=get_x, + y_transform=get_y) + return pipeline + + def create_tf_data(self) -> tf.data.Dataset: + """ Assembles the tf data pipeline """ + dataset = tf.data.Dataset.from_generator( + generator=lambda: self.pipeline, + output_types=self.model.output_types(), + output_shapes=self.model.output_shapes()) + return dataset.repeat().batch(self.batch_size) + + +def train(cfg: Config, tub_paths: str, model: str, model_type: str) \ + -> tf.keras.callbacks.History: + """ + Train the model + """ + model_name, model_ext = os.path.splitext(model) + is_tflite = model_ext == '.tflite' + if is_tflite: + model = f'{model_name}.h5' + + if not model_type: + model_type = cfg.DEFAULT_MODEL_TYPE + + tubs = tub_paths.split(',') + all_tub_paths = [os.path.expanduser(tub) for tub in tubs] + output_path = os.path.expanduser(model) + train_type = 'linear' if 'linear' in model_type else model_type + + kl = get_model_by_type(train_type, cfg) + if cfg.PRINT_MODEL_SUMMARY: + print(kl.model.summary()) + + dataset = TubDataset(cfg, all_tub_paths) + training_records, validation_records = dataset.train_test_split() + print('Records # Training %s' % len(training_records)) + print('Records # Validation %s' % len(validation_records)) + + training_pipe = BatchSequence(kl, cfg, training_records, is_train=True) + validation_pipe = BatchSequence(kl, cfg, validation_records, is_train=False) + + dataset_train = training_pipe.create_tf_data() + dataset_validate = validation_pipe.create_tf_data() + train_size = len(training_pipe) + val_size = len(validation_pipe) + + assert val_size > 0, "Not enough validation data, decrease the batch " \ + "size or add more data." + + history = kl.train(model_path=output_path, + train_data=dataset_train, + train_steps=train_size, + batch_size=cfg.BATCH_SIZE, + validation_data=dataset_validate, + validation_steps=val_size, + epochs=cfg.MAX_EPOCHS, + verbose=cfg.VERBOSE_TRAIN, + min_delta=cfg.MIN_DELTA, + patience=cfg.EARLY_STOP_PATIENCE) + + if is_tflite: + tf_lite_model_path = f'{os.path.splitext(output_path)[0]}.tflite' + keras_model_to_tflite(output_path, tf_lite_model_path) + + return history diff --git a/donkeycar/pipeline/types.py b/donkeycar/pipeline/types.py new file mode 100644 index 000000000..a9dca3cea --- /dev/null +++ b/donkeycar/pipeline/types.py @@ -0,0 +1,91 @@ +import os +from typing import Any, List, Optional, TypeVar, Tuple + +import numpy as np +from donkeycar.config import Config +from donkeycar.parts.tub_v2 import Tub +from donkeycar.utils import load_image, load_pil_image, normalize_image, train_test_split +from typing_extensions import TypedDict + +X = TypeVar('X', covariant=True) + +TubRecordDict = TypedDict( + 'TubRecordDict', + { + 'cam/image_array': str, + 'user/angle': float, + 'user/throttle': float, + 'user/mode': str, + 'imu/acl_x': Optional[float], + 'imu/acl_y': Optional[float], + 'imu/acl_z': Optional[float], + 'imu/gyr_x': Optional[float], + 'imu/gyr_y': Optional[float], + 'imu/gyr_z': Optional[float], + } +) + + +class TubRecord(object): + def __init__(self, config: Config, base_path: str, + underlying: TubRecordDict) -> None: + self.config = config + self.base_path = base_path + self.underlying = underlying + self._image: Optional[Any] = None + + def image(self, cached=True, as_nparray=True) -> np.ndarray: + """Loads the image for you + + Args: + cached (bool, optional): whether to cache the image. Defaults to True. + as_nparray (bool, optional): whether to convert the image to a np array of uint8. + Defaults to True. If false, returns result of Image.open() + + Returns: + np.ndarray: [description] + """ + if self._image is None: + image_path = self.underlying['cam/image_array'] + full_path = os.path.join(self.base_path, 'images', image_path) + + if as_nparray: + _image = load_image(full_path, cfg=self.config) + else: + # If you just want the raw Image + _image = load_pil_image(full_path, cfg=self.config) + + if cached: + self._image = _image + else: + _image = self._image + return _image + def __repr__(self) -> str: + return repr(self.underlying) + + +class TubDataset(object): + ''' + Loads the dataset, and creates a train/test split. + ''' + + def __init__(self, config: Config, tub_paths: List[str], + shuffle: bool = True) -> None: + self.config = config + self.tub_paths = tub_paths + self.shuffle = shuffle + self.tubs: List[Tub] = [Tub(tub_path, read_only=True) + for tub_path in self.tub_paths] + self.records: List[TubRecord] = list() + + def train_test_split(self) -> Tuple[List[TubRecord], List[TubRecord]]: + print(f'Loading tubs from paths {self.tub_paths}') + self.records.clear() + for tub in self.tubs: + for underlying in tub: + record = TubRecord(self.config, tub.base_path, + underlying=underlying) + self.records.append(record) + + return train_test_split(self.records, shuffle=self.shuffle, + test_size=(1. - self.config.TRAIN_TEST_SPLIT)) diff --git a/donkeycar/templates/basic.py b/donkeycar/templates/basic.py new file mode 100755 index 000000000..0ae575331 --- /dev/null +++ b/donkeycar/templates/basic.py @@ -0,0 +1,171 @@ +#!/usr/bin/env python3 +""" +Scripts to drive a donkey car + +Usage: + manage.py drive [--model=] [--type=(linear|categorical|tflite_linear)] + +Options: + -h --help Show this screen. +""" + +from docopt import docopt + +import donkeycar as dk +from donkeycar.parts.tub_v2 import TubWriter +from donkeycar.parts.datastore import TubHandler +from donkeycar.parts.controller import LocalWebController +from donkeycar.parts.actuator import PCA9685, PWMSteering, PWMThrottle + + +class DriveMode: + """ Helper class to dispatch between ai and user driving""" + + def __init__(self, cfg): + self.cfg = cfg + + def run(self, mode, user_angle, user_throttle, pilot_angle, pilot_throttle): + if mode == 'user': + return user_angle, user_throttle + elif mode == 'local_angle': + return pilot_angle if pilot_angle else 0.0, user_throttle + else: + return pilot_angle if pilot_angle else 0.0, \ + pilot_throttle * self.cfg.AI_THROTTLE_MULT if \ + pilot_throttle else 0.0 + + +class PilotCondition: + """ Helper class to determine how is in charge of driving""" + def run(self, mode): + return mode != 'user' + + +def drive(cfg, model_path=None, model_type=None): + """ + Construct a minimal robotic vehicle from many parts. Here, we use a + single camera, web or joystick controller, autopilot and tubwriter. + + Each part runs as a job in the Vehicle loop, calling either it's run or + run_threaded method depending on the constructor flag `threaded`. All + parts are updated one after another at the framerate given in + cfg.DRIVE_LOOP_HZ assuming each part finishes processing in a timely + manner. Parts may have named outputs and inputs. The framework handles + passing named outputs to parts requesting the same named input. + """ + + car = dk.vehicle.Vehicle() + # add camera + inputs = [] + if cfg.DONKEY_GYM: + from donkeycar.parts.dgym import DonkeyGymEnv + cam = DonkeyGymEnv(cfg.DONKEY_SIM_PATH, host=cfg.SIM_HOST, env_name=cfg.DONKEY_GYM_ENV_NAME, conf=cfg.GYM_CONF, delay=cfg.SIM_ARTIFICIAL_LATENCY) + inputs = ['angle', 'throttle', 'brake'] + elif cfg.CAMERA_TYPE == "PICAM": + from donkeycar.parts.camera import PiCamera + cam = PiCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, + image_d=cfg.IMAGE_DEPTH, framerate=cfg.CAMERA_FRAMERATE, + vflip=cfg.CAMERA_VFLIP, hflip=cfg.CAMERA_HFLIP) + elif cfg.CAMERA_TYPE == "WEBCAM": + from donkeycar.parts.camera import Webcam + cam = Webcam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, + image_d=cfg.IMAGE_DEPTH) + elif cfg.CAMERA_TYPE == "CVCAM": + from donkeycar.parts.cv import CvCam + cam = CvCam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, + image_d=cfg.IMAGE_DEPTH) + elif cfg.CAMERA_TYPE == "CSIC": + from donkeycar.parts.camera import CSICamera + cam = CSICamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, + image_d=cfg.IMAGE_DEPTH, framerate=cfg.CAMERA_FRAMERATE, + gstreamer_flip=cfg.CSIC_CAM_GSTREAMER_FLIP_PARM) + elif cfg.CAMERA_TYPE == "V4L": + from donkeycar.parts.camera import V4LCamera + cam = V4LCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, + image_d=cfg.IMAGE_DEPTH, framerate=cfg.CAMERA_FRAMERATE) + elif cfg.CAMERA_TYPE == "MOCK": + from donkeycar.parts.camera import MockCamera + cam = MockCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, + image_d=cfg.IMAGE_DEPTH) + elif cfg.CAMERA_TYPE == "IMAGE_LIST": + from donkeycar.parts.camera import ImageListCamera + cam = ImageListCamera(path_mask=cfg.PATH_MASK) + else: + raise (Exception("Unkown camera type: %s" % cfg.CAMERA_TYPE)) + + car.add(cam, inputs=inputs, outputs=['cam/image_array'], threaded=True) + + # add controller + if cfg.USE_JOYSTICK_AS_DEFAULT: + from donkeycar.parts.controller import get_js_controller + ctr = get_js_controller(cfg) + if cfg.USE_NETWORKED_JS: + from donkeycar.parts.controller import JoyStickSub + netwkJs = JoyStickSub(cfg.NETWORK_JS_SERVER_IP) + car.add(netwkJs, threaded=True) + ctr.js = netwkJs + else: + ctr = LocalWebController(port=cfg.WEB_CONTROL_PORT, + mode=cfg.WEB_INIT_MODE) + + car.add(ctr, + inputs=['cam/image_array'], + outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'], + threaded=True) + + # pilot condition to determine if user or ai are driving + car.add(PilotCondition(), inputs=['user/mode'], outputs=['run_pilot']) + + # adding the auto-pilot + if model_type is None: + model_type = cfg.DEFAULT_MODEL_TYPE + if model_path: + kl = dk.utils.get_model_by_type(model_type, cfg) + kl.load(model_path=model_path) + inputs = ['cam/image_array'] + outputs = ['pilot/angle', 'pilot/throttle'] + car.add(kl, inputs=inputs, outputs=outputs, run_condition='run_pilot') + + # Choose what inputs should change the car. + car.add(DriveMode(cfg=cfg), + inputs=['user/mode', 'user/angle', 'user/throttle', + 'pilot/angle', 'pilot/throttle'], + outputs=['angle', 'throttle']) + + # Drive train setup + if cfg.DONKEY_GYM or cfg.DRIVE_TRAIN_TYPE == "MOCK": + pass + else: + steering_controller = PCA9685(cfg.STEERING_CHANNEL, cfg.PCA9685_I2C_ADDR, + busnum=cfg.PCA9685_I2C_BUSNUM) + steering = PWMSteering(controller=steering_controller, + left_pulse=cfg.STEERING_LEFT_PWM, + right_pulse=cfg.STEERING_RIGHT_PWM) + + throttle_controller = PCA9685(cfg.THROTTLE_CHANNEL, cfg.PCA9685_I2C_ADDR, + busnum=cfg.PCA9685_I2C_BUSNUM) + throttle = PWMThrottle(controller=throttle_controller, + max_pulse=cfg.THROTTLE_FORWARD_PWM, + zero_pulse=cfg.THROTTLE_STOPPED_PWM, + min_pulse=cfg.THROTTLE_REVERSE_PWM) + + car.add(steering, inputs=['angle']) + car.add(throttle, inputs=['throttle']) + + # add tub to save data + inputs = ['cam/image_array', 'user/angle', 'user/throttle', 'user/mode'] + types = ['image_array', 'float', 'float', 'str'] + # do we want to store new records into own dir or append to existing + tub_path = TubHandler(path=cfg.DATA_PATH).create_tub_path() if \ + cfg.AUTO_CREATE_NEW_TUB else cfg.DATA_PATH + tub_writer = TubWriter(base_path=tub_path, inputs=inputs, types=types) + car.add(tub_writer, inputs=inputs, outputs=["tub/num_records"], + run_condition='recording') + # start the car + car.start(rate_hz=cfg.DRIVE_LOOP_HZ, max_loop_count=cfg.MAX_LOOPS) + + +if __name__ == '__main__': + args = docopt(__doc__) + cfg = dk.load_config() + drive(cfg, model_path=args['--model'], model_type=args['--type']) diff --git a/donkeycar/templates/basic_web.py b/donkeycar/templates/basic_web.py deleted file mode 100755 index dbda4fe0e..000000000 --- a/donkeycar/templates/basic_web.py +++ /dev/null @@ -1,229 +0,0 @@ -#!/usr/bin/env python3 -""" -Scripts to drive a donkey 2 car - -Usage: - manage.py (drive) [--model=] [--type=(linear|categorical|rnn|imu|behavior|3d|localizer|latent)] - manage.py (train) [--tub=] [--file= ...] (--model=) [--transfer=] [--type=(linear|categorical|rnn|imu|behavior|3d|localizer)] [--continuous] [--aug] - - -Options: - -h --help Show this screen. - -f --file= A text file containing paths to tub files, one per line. Option may be used more than once. -""" -import os -import time - -from docopt import docopt -import numpy as np - -import donkeycar as dk -from donkeycar.parts.datastore import TubHandler -from donkeycar.parts.controller import LocalWebController -from donkeycar.parts.camera import PiCamera -from donkeycar.utils import * - - -def drive(cfg, model_path=None, model_type=None): - ''' - Construct a working robotic vehicle from many parts. - Each part runs as a job in the Vehicle loop, calling either - it's run or run_threaded method depending on the constructor flag `threaded`. - All parts are updated one after another at the framerate given in - cfg.DRIVE_LOOP_HZ assuming each part finishes processing in a timely manner. - Parts may have named outputs and inputs. The framework handles passing named outputs - to parts requesting the same named input. - ''' - - if model_type is None: - model_type = cfg.DEFAULT_MODEL_TYPE - - #Initialize car - V = dk.vehicle.Vehicle() - - cam = PiCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH) - V.add(cam, outputs=['cam/image_array'], threaded=True) - - - V.add(LocalWebController(port=cfg.WEB_CONTROL_PORT), - inputs=['cam/image_array'], - outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'], - threaded=True) - - - #See if we should even run the pilot module. - #This is only needed because the part run_condition only accepts boolean - class PilotCondition: - def run(self, mode): - if mode == 'user': - return False - else: - return True - - V.add(PilotCondition(), inputs=['user/mode'], outputs=['run_pilot']) - - #Sombrero - if cfg.HAVE_SOMBRERO: - from donkeycar.parts.sombrero import Sombrero - s = Sombrero() - - class ImgPrecondition(): - ''' - precondition camera image for inference - ''' - def __init__(self, cfg): - self.cfg = cfg - - def run(self, img_arr): - return normalize_and_crop(img_arr, self.cfg) - - V.add(ImgPrecondition(cfg), - inputs=['cam/image_array'], - outputs=['cam/normalized/cropped'], - run_condition='run_pilot') - - inputs=['cam/normalized/cropped'] - - def load_model(kl, model_path): - start = time.time() - try: - print('loading model', model_path) - kl.load(model_path, compile=False) - print('finished loading in %s sec.' % (str(time.time() - start)) ) - except Exception as e: - print(e) - print('ERR>> problems loading model', model_path) - - def load_weights(kl, weights_path): - start = time.time() - try: - print('loading model weights', weights_path) - kl.model.load_weights(weights_path) - print('finished loading in %s sec.' % (str(time.time() - start)) ) - except Exception as e: - print(e) - print('ERR>> problems loading weights', weights_path) - - def load_model_json(kl, json_fnm): - start = time.time() - print('loading model json', json_fnm) - from tensorflow.python import keras - try: - with open(json_fnm, 'r') as handle: - contents = handle.read() - kl.model = keras.models.model_from_json(contents) - print('finished loading json in %s sec.' % (str(time.time() - start)) ) - except Exception as e: - print(e) - print("ERR>> problems loading model json", json_fnm) - - if model_path: - #When we have a model, first create an appropriate Keras part - kl = dk.utils.get_model_by_type(model_type, cfg) - - if '.h5' in model_path: - #when we have a .h5 extension - #load everything from the model file - load_model(kl, model_path) - - elif '.json' in model_path: - #when we have a .json extension - #load the model from there and look for a matching - #.wts file with just weights - load_model_json(kl, model_path) - weights_path = model_path.replace('.json', '.weights') - load_weights(kl, weights_path) - - else: - print("ERR>> Unknown extension type on model file!!") - return - - outputs=['pilot/angle', 'pilot/throttle'] - - V.add(kl, inputs=inputs, - outputs=outputs, - run_condition='run_pilot') - - #Choose what inputs should change the car. - class DriveMode: - def run(self, mode, - user_angle, user_throttle, - pilot_angle, pilot_throttle): - if mode == 'user': - return user_angle, user_throttle - - elif mode == 'local_angle': - return pilot_angle if pilot_angle else 0.0, user_throttle - - else: - return pilot_angle if pilot_angle else 0.0, pilot_throttle * cfg.AI_THROTTLE_MULT if pilot_throttle else 0.0 - - V.add(DriveMode(), - inputs=['user/mode', 'user/angle', 'user/throttle', - 'pilot/angle', 'pilot/throttle'], - outputs=['angle', 'throttle']) - - #Drive train setup - - from donkeycar.parts.actuator import PCA9685, PWMSteering, PWMThrottle - - steering_controller = PCA9685(cfg.STEERING_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM) - steering = PWMSteering(controller=steering_controller, - left_pulse=cfg.STEERING_LEFT_PWM, - right_pulse=cfg.STEERING_RIGHT_PWM) - - throttle_controller = PCA9685(cfg.THROTTLE_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM) - throttle = PWMThrottle(controller=throttle_controller, - max_pulse=cfg.THROTTLE_FORWARD_PWM, - zero_pulse=cfg.THROTTLE_STOPPED_PWM, - min_pulse=cfg.THROTTLE_REVERSE_PWM) - - V.add(steering, inputs=['angle']) - V.add(throttle, inputs=['throttle']) - - #add tub to save data - - inputs=['cam/image_array', - 'user/angle', 'user/throttle', - 'user/mode'] - - types=['image_array', - 'float', 'float', - 'str'] - - th = TubHandler(path=cfg.DATA_PATH) - tub = th.new_tub_writer(inputs=inputs, types=types) - V.add(tub, inputs=inputs, outputs=["tub/num_records"], run_condition='recording') - - print(f"You can now go to :{cfg.WEB_CONTROL_PORT} to drive your car.") - - #run the vehicle for 20 seconds - V.start(rate_hz=cfg.DRIVE_LOOP_HZ, - max_loop_count=cfg.MAX_LOOPS) - - -if __name__ == '__main__': - args = docopt(__doc__) - cfg = dk.load_config() - - if args['drive']: - model_type = args['--type'] - drive(cfg, model_path = args['--model'], model_type=model_type) - - if args['train']: - from train import multi_train, preprocessFileList - - tub = args['--tub'] - model = args['--model'] - transfer = args['--transfer'] - model_type = args['--type'] - continuous = args['--continuous'] - aug = args['--aug'] - - dirs = preprocessFileList( args['--file'] ) - if tub is not None: - tub_paths = [os.path.expanduser(n) for n in tub.split(',')] - dirs.extend( tub_paths ) - - multi_train(cfg, dirs, model, transfer, model_type, continuous, aug) - diff --git a/donkeycar/templates/cfg_basic.py b/donkeycar/templates/cfg_basic.py new file mode 100755 index 000000000..791ab4df1 --- /dev/null +++ b/donkeycar/templates/cfg_basic.py @@ -0,0 +1,124 @@ +""" +CAR CONFIG + +This file is read by your car application's manage.py script to change the car +performance. + +EXMAPLE +----------- +import dk +cfg = dk.load_config(config_path='~/mycar/config.py') +print(cfg.CAMERA_RESOLUTION) + +""" + + +import os + +#PATHS +CAR_PATH = PACKAGE_PATH = os.path.dirname(os.path.realpath(__file__)) +DATA_PATH = os.path.join(CAR_PATH, 'data') +MODELS_PATH = os.path.join(CAR_PATH, 'models') + +#VEHICLE +DRIVE_LOOP_HZ = 20 +MAX_LOOPS = 100000 + +#CAMERA +CAMERA_TYPE = "PICAM" # (PICAM|WEBCAM|CVCAM|CSIC|V4L|D435|MOCK|IMAGE_LIST) +IMAGE_W = 160 +IMAGE_H = 120 +IMAGE_DEPTH = 3 # default RGB=3, make 1 for mono +CAMERA_FRAMERATE = DRIVE_LOOP_HZ +CAMERA_VFLIP = False +CAMERA_HFLIP = False + +#9865, over rides only if needed, ie. TX2.. +PCA9685_I2C_ADDR = 0x40 +PCA9685_I2C_BUSNUM = None + +#STEERING +STEERING_CHANNEL = 1 +STEERING_LEFT_PWM = 460 +STEERING_RIGHT_PWM = 290 + +#THROTTLE +THROTTLE_CHANNEL = 0 +THROTTLE_FORWARD_PWM = 500 +THROTTLE_STOPPED_PWM = 370 +THROTTLE_REVERSE_PWM = 220 + +#TRAINING +DEFAULT_MODEL_TYPE = 'linear' #(linear|categorical|rnn|imu|behavior|3d|localizer|latent) +BATCH_SIZE = 128 +TRAIN_TEST_SPLIT = 0.8 +MAX_EPOCHS = 100 +SHOW_PLOT = True +VERBOSE_TRAIN = True +USE_EARLY_STOP = True +EARLY_STOP_PATIENCE = 5 +MIN_DELTA = .0005 +PRINT_MODEL_SUMMARY = True #print layers and weights to stdout +OPTIMIZER = None #adam, sgd, rmsprop, etc.. None accepts default +LEARNING_RATE = 0.001 #only used when OPTIMIZER specified +LEARNING_RATE_DECAY = 0.0 #only used when OPTIMIZER specified +CACHE_IMAGES = True #keep images in memory. will speed succesive epochs, but crater if not enough mem. +PRUNE_CNN = False +PRUNE_PERCENT_TARGET = 75 # The desired percentage of pruning. +PRUNE_PERCENT_PER_ITERATION = 20 # Percenge of pruning that is perform per iteration. +PRUNE_VAL_LOSS_DEGRADATION_LIMIT = 0.2 # The max amout of validation loss that is permitted during pruning. +PRUNE_EVAL_PERCENT_OF_DATASET = .05 # percent of dataset used to perform evaluation of model. + +#model transfer options +FREEZE_LAYERS = False +NUM_LAST_LAYERS_TO_TRAIN = 7 + +#For the categorical model, this limits the upper bound of the learned throttle +#it's very IMPORTANT that this value is matched from the training PC config.py and the robot.py +#and ideally wouldn't change once set. +MODEL_CATEGORICAL_MAX_THROTTLE_RANGE = 0.5 + +#RNN or 3D +SEQUENCE_LENGTH = 3 + +#SOMBRERO +HAVE_SOMBRERO = False + +#RECORD OPTIONS +RECORD_DURING_AI = False +AUTO_CREATE_NEW_TUB = False #create a new tub (tub_YY_MM_DD) directory when recording or append records to data directory directly + +#JOYSTICK +USE_JOYSTICK_AS_DEFAULT = False #when starting the manage.py, when True, will not require a --js option to use the joystick +JOYSTICK_MAX_THROTTLE = 0.5 #this scalar is multiplied with the -1 to 1 throttle value to limit the maximum throttle. This can help if you drop the controller or just don't need the full speed available. +JOYSTICK_STEERING_SCALE = 1.0 #some people want a steering that is less sensitve. This scalar is multiplied with the steering -1 to 1. It can be negative to reverse dir. +AUTO_RECORD_ON_THROTTLE = True #if true, we will record whenever throttle is not zero. if false, you must manually toggle recording with some other trigger. Usually circle button on joystick. +CONTROLLER_TYPE='ps3' #(ps3|ps4|xbox|nimbus|wiiu|F710|rc3|MM1|custom) custom will run the my_joystick.py controller written by the `donkey createjs` command +USE_NETWORKED_JS = False #should we listen for remote joystick control over the network? +NETWORK_JS_SERVER_IP = "192.168.0.1"#when listening for network joystick control, which ip is serving this information +JOYSTICK_DEADZONE = 0.0 # when non zero, this is the smallest throttle before recording triggered. +JOYSTICK_THROTTLE_DIR = -1.0 # use -1.0 to flip forward/backward, use 1.0 to use joystick's natural forward/backward +USE_FPV = False # send camera data to FPV webserver +JOYSTICK_DEVICE_FILE = "/dev/input/js0" # this is the unix file use to access the joystick. + +#WEB CONTROL +WEB_CONTROL_PORT = int(os.getenv("WEB_CONTROL_PORT", 8887)) # which port to listen on when making a web controller +WEB_INIT_MODE = "user" # which control mode to start in. one of user|local_angle|local. Setting local will start in ai mode. + + +#DonkeyGym +#Only on Ubuntu linux, you can use the simulator as a virtual donkey and +#issue the same python manage.py drive command as usual, but have them control a virtual car. +#This enables that, and sets the path to the simualator and the environment. +#You will want to download the simulator binary from: https://github.com/tawnkramer/donkey_gym/releases/download/v18.9/DonkeySimLinux.zip +#then extract that and modify DONKEY_SIM_PATH. +DONKEY_GYM = False +DONKEY_SIM_PATH = "path to sim" #"/home/tkramer/projects/sdsandbox/sdsim/build/DonkeySimLinux/donkey_sim.x86_64" when racing on virtual-race-league use "remote", or user "remote" when you want to start the sim manually first. +DONKEY_GYM_ENV_NAME = "donkey-mountain-track-v0" # ("donkey-generated-track-v0"|"donkey-generated-roads-v0"|"donkey-warehouse-v0"|"donkey-avc-sparkfun-v0") +GYM_CONF = { "body_style" : "donkey", "body_rgb" : (128, 128, 128), "car_name" : "car", "font_size" : 100} # body style(donkey|bare|car01) body rgb 0-255 +GYM_CONF["racer_name"] = "Your Name" +GYM_CONF["country"] = "Place" +GYM_CONF["bio"] = "I race robots." + +SIM_HOST = "127.0.0.1" # when racing on virtual-race-league use host "trainmydonkey.com" +SIM_ARTIFICIAL_LATENCY = 0 # this is the millisecond latency in controls. Can use useful in emulating the delay when useing a remote server. values of 100 to 400 probably reasonable. diff --git a/donkeycar/templates/cfg_basic_web.py b/donkeycar/templates/cfg_basic_web.py deleted file mode 100755 index f0816eebc..000000000 --- a/donkeycar/templates/cfg_basic_web.py +++ /dev/null @@ -1,89 +0,0 @@ -""" -CAR CONFIG - -This file is read by your car application's manage.py script to change the car -performance. - -EXMAPLE ------------ -import dk -cfg = dk.load_config(config_path='~/mycar/config.py') -print(cfg.CAMERA_RESOLUTION) - -""" - - -import os - -#PATHS -CAR_PATH = PACKAGE_PATH = os.path.dirname(os.path.realpath(__file__)) -DATA_PATH = os.path.join(CAR_PATH, 'data') -MODELS_PATH = os.path.join(CAR_PATH, 'models') - -#VEHICLE -DRIVE_LOOP_HZ = 20 -MAX_LOOPS = 100000 - -#CAMERA -IMAGE_W = 160 -IMAGE_H = 120 -IMAGE_DEPTH = 3 # default RGB=3, make 1 for mono - -#9865, over rides only if needed, ie. TX2.. -PCA9685_I2C_ADDR = 0x40 -PCA9685_I2C_BUSNUM = None - -#STEERING -STEERING_CHANNEL = 1 -STEERING_LEFT_PWM = 460 -STEERING_RIGHT_PWM = 290 - -#THROTTLE -THROTTLE_CHANNEL = 0 -THROTTLE_FORWARD_PWM = 500 -THROTTLE_STOPPED_PWM = 370 -THROTTLE_REVERSE_PWM = 220 - -#TRAINING -DEFAULT_MODEL_TYPE = 'linear' #(linear|categorical|rnn|imu|behavior|3d|localizer|latent) -BATCH_SIZE = 128 -TRAIN_TEST_SPLIT = 0.8 -MAX_EPOCHS = 100 -SHOW_PLOT = True -VERBOSE_TRAIN = True -USE_EARLY_STOP = True -EARLY_STOP_PATIENCE = 5 -MIN_DELTA = .0005 -PRINT_MODEL_SUMMARY = True #print layers and weights to stdout -OPTIMIZER = None #adam, sgd, rmsprop, etc.. None accepts default -LEARNING_RATE = 0.001 #only used when OPTIMIZER specified -LEARNING_RATE_DECAY = 0.0 #only used when OPTIMIZER specified -CACHE_IMAGES = True #keep images in memory. will speed succesive epochs, but crater if not enough mem. -PRUNE_CNN = False -PRUNE_PERCENT_TARGET = 75 # The desired percentage of pruning. -PRUNE_PERCENT_PER_ITERATION = 20 # Percenge of pruning that is perform per iteration. -PRUNE_VAL_LOSS_DEGRADATION_LIMIT = 0.2 # The max amout of validation loss that is permitted during pruning. -PRUNE_EVAL_PERCENT_OF_DATASET = .05 # percent of dataset used to perform evaluation of model. - -# Region of interst cropping -# only supported in Categorical and Linear models. -ROI_CROP_TOP = 0 -ROI_CROP_BOTTOM = 0 - -#model transfer options -FREEZE_LAYERS = False -NUM_LAST_LAYERS_TO_TRAIN = 7 - -#For the categorical model, this limits the upper bound of the learned throttle -#it's very IMPORTANT that this value is matched from the training PC config.py and the robot.py -#and ideally wouldn't change once set. -MODEL_CATEGORICAL_MAX_THROTTLE_RANGE = 0.5 - -#RNN or 3D -SEQUENCE_LENGTH = 3 - -#SOMBRERO -HAVE_SOMBRERO = False - -#RECORD OPTIONS -RECORD_DURING_AI = False diff --git a/donkeycar/templates/cfg_complete.py b/donkeycar/templates/cfg_complete.py index 5fab09d45..f6e7d40a2 100644 --- a/donkeycar/templates/cfg_complete.py +++ b/donkeycar/templates/cfg_complete.py @@ -91,15 +91,20 @@ #TRAINING +# The default AI framework to use. Choose from (tensorflow|pytorch) +DEFAULT_AI_FRAMEWORK='tensorflow' + #The DEFAULT_MODEL_TYPE will choose which model will be created at training time. This chooses #between different neural network designs. You can override this setting by passing the command #line parameter --type to the python manage.py train and drive commands. -DEFAULT_MODEL_TYPE = 'linear' #(linear|categorical|rnn|imu|behavior|3d|localizer|latent) +# tensorflow models: (linear|categorical|tflite_linear|tensorrt_linear) +# pytorch models: (resnet18) +DEFAULT_MODEL_TYPE = 'linear' BATCH_SIZE = 128 #how many records to use when doing one pass of gradient decent. Use a smaller number if your gpu is running out of memory. TRAIN_TEST_SPLIT = 0.8 #what percent of records to use for training. the remaining used for validation. MAX_EPOCHS = 100 #how many times to visit all records of your data SHOW_PLOT = True #would you like to see a pop up display of final loss? -VERBOSE_TRAIN = True #would you like to see a progress bar with text during training? +VERBOSE_TRAIN = True #would you like to see a progress bar with text during training? USE_EARLY_STOP = True #would you like to stop the training if we see it's not improving fit? EARLY_STOP_PATIENCE = 5 #how many epochs to wait before no improvement MIN_DELTA = .0005 #early stop will want this much loss change before calling it improved. @@ -116,20 +121,13 @@ PRUNE_VAL_LOSS_DEGRADATION_LIMIT = 0.2 # The max amout of validation loss that is permitted during pruning. PRUNE_EVAL_PERCENT_OF_DATASET = .05 # percent of dataset used to perform evaluation of model. -#Pi login information -#When using the continuous train option, these credentials will -#be used to copy the final model to your vehicle. If not using this option, no need to set these. -PI_USERNAME = "pi" # username on pi -PI_PASSWD = "raspberry" # password is optional. Only used from Windows machine. Ubuntu and mac users should copy their public keys to the pi. `ssh-copy-id username@hostname` -PI_HOSTNAME = "raspberrypi.local" # the network hostname or ip address -PI_DONKEY_ROOT = "/home/pi/mycar" # the location of the mycar dir on the pi. this will be used to help locate the final model destination. - # Region of interst cropping # only supported in Categorical and Linear models. # If these crops values are too large, they will cause the stride values to become negative and the model with not be valid. ROI_CROP_TOP = 0 #the number of rows of pixels to ignore on the top of the image ROI_CROP_BOTTOM = 0 #the number of rows of pixels to ignore on the bottom of the image + #Model transfer options #When copying weights during a model transfer operation, should we freeze a certain number of layers #to the incoming weights and not allow them to change during training? @@ -141,15 +139,15 @@ WEB_INIT_MODE = "user" # which control mode to start in. one of user|local_angle|local. Setting local will start in ai mode. #JOYSTICK -USE_JOYSTICK_AS_DEFAULT = False #when starting the manage.py, when True, will not require a --js option to use the joystick +USE_JOYSTICK_AS_DEFAULT = True #when starting the manage.py, when True, will not require a --js option to use the joystick JOYSTICK_MAX_THROTTLE = 0.5 #this scalar is multiplied with the -1 to 1 throttle value to limit the maximum throttle. This can help if you drop the controller or just don't need the full speed available. JOYSTICK_STEERING_SCALE = 1.0 #some people want a steering that is less sensitve. This scalar is multiplied with the steering -1 to 1. It can be negative to reverse dir. AUTO_RECORD_ON_THROTTLE = True #if true, we will record whenever throttle is not zero. if false, you must manually toggle recording with some other trigger. Usually circle button on joystick. -CONTROLLER_TYPE='ps3' #(ps3|ps4|xbox|nimbus|wiiu|F710|rc3|MM1|custom) custom will run the my_joystick.py controller written by the `donkey createjs` command +CONTROLLER_TYPE = 'xbox' #(ps3|ps4|xbox|nimbus|wiiu|F710|rc3|MM1|custom) custom will run the my_joystick.py controller written by the `donkey createjs` command USE_NETWORKED_JS = False #should we listen for remote joystick control over the network? -NETWORK_JS_SERVER_IP = "192.168.0.1"#when listening for network joystick control, which ip is serving this information -JOYSTICK_DEADZONE = 0.0 # when non zero, this is the smallest throttle before recording triggered. -JOYSTICK_THROTTLE_DIR = -1.0 # use -1.0 to flip forward/backward, use 1.0 to use joystick's natural forward/backward +NETWORK_JS_SERVER_IP = None #when listening for network joystick control, which ip is serving this information +JOYSTICK_DEADZONE = 0.01 # when non zero, this is the smallest throttle before recording triggered. +JOYSTICK_THROTTLE_DIR = 1.0 # use -1.0 to flip forward/backward, use 1.0 to use joystick's natural forward/backward USE_FPV = False # send camera data to FPV webserver JOYSTICK_DEVICE_FILE = "/dev/input/js0" # this is the unix file use to access the joystick. @@ -185,8 +183,18 @@ # eg.'/dev/tty.usbmodemXXXXXX' and replace the port accordingly MM1_SERIAL_PORT = '/dev/ttyS0' # Serial Port for reading and sending MM1 data. +#TELEMETRY +TELEMETRY_DONKEY_NAME = 'my_robot1234' +TELEMETRY_PUBLISH_PERIOD = 1 +HAVE_MQTT_TELEMETRY = False +TELEMETRY_MQTT_TOPIC_TEMPLATE = 'donkey/%s/telemetry' +TELEMETRY_MQTT_JSON_ENABLE = True +TELEMETRY_MQTT_BROKER_HOST = 'broker.emqx.io' +TELEMETRY_MQTT_BROKER_PORT = 1883 + #RECORD OPTIONS RECORD_DURING_AI = False #normally we do not record during ai mode. Set this to true to get image and steering records for your Ai. Be careful not to use them to train. +AUTO_CREATE_NEW_TUB = False #create a new tub (tub_YY_MM_DD) directory when recording or append records to data directory directly #LED HAVE_RGB_LED = False #do you have an RGB LED like https://www.amazon.com/dp/B07BNRZWNF @@ -290,4 +298,4 @@ # Stop Sign Detector STOP_SIGN_DETECTOR = False STOP_SIGN_MIN_SCORE = 0.2 -STOP_SIGN_SHOW_BOUNDING_BOX = True \ No newline at end of file +STOP_SIGN_SHOW_BOUNDING_BOX = True diff --git a/donkeycar/templates/cfg_cv_control.py b/donkeycar/templates/cfg_cv_control.py index 58e41d8cb..c22fb9455 100755 --- a/donkeycar/templates/cfg_cv_control.py +++ b/donkeycar/templates/cfg_cv_control.py @@ -29,12 +29,6 @@ IMAGE_H = 120 IMAGE_DEPTH = 3 # default RGB=3, make 1 for mono -# Region of interst cropping -# only supported in Categorical and Linear models. -# If these crops values are too large, they will cause the stride values to become negative and the model with not be valid. -ROI_CROP_TOP = 0 #the number of rows of pixels to ignore on the top of the image -ROI_CROP_BOTTOM = 0 #the number of rows of pixels to ignore on the bottom of the image - #9865, over rides only if needed, ie. TX2.. PCA9685_I2C_ADDR = 0x40 PCA9685_I2C_BUSNUM = None diff --git a/donkeycar/templates/cfg_simulator.py b/donkeycar/templates/cfg_simulator.py new file mode 100644 index 000000000..d206745fb --- /dev/null +++ b/donkeycar/templates/cfg_simulator.py @@ -0,0 +1,280 @@ +""" +CAR CONFIG + +This file is read by your car application's manage.py script to change the car +performance. + +EXMAPLE +----------- +import dk +cfg = dk.load_config(config_path='~/mycar/config.py') +print(cfg.CAMERA_RESOLUTION) + +""" + + +import os + +#PATHS +CAR_PATH = PACKAGE_PATH = os.path.dirname(os.path.realpath(__file__)) +DATA_PATH = os.path.join(CAR_PATH, 'data') +MODELS_PATH = os.path.join(CAR_PATH, 'models') + +#VEHICLE +DRIVE_LOOP_HZ = 20 # the vehicle loop will pause if faster than this speed. +MAX_LOOPS = None # the vehicle loop can abort after this many iterations, when given a positive integer. + +#CAMERA +CAMERA_TYPE = "MOCK" # (PICAM|WEBCAM|CVCAM|CSIC|V4L|D435|MOCK|IMAGE_LIST) +IMAGE_W = 160 +IMAGE_H = 120 +IMAGE_DEPTH = 3 # default RGB=3, make 1 for mono +CAMERA_FRAMERATE = DRIVE_LOOP_HZ +CAMERA_VFLIP = False +CAMERA_HFLIP = False +# For CSIC camera - If the camera is mounted in a rotated position, changing the below parameter will correct the output frame orientation +CSIC_CAM_GSTREAMER_FLIP_PARM = 0 # (0 => none , 4 => Flip horizontally, 6 => Flip vertically) + +# For IMAGE_LIST camera +# PATH_MASK = "~/mycar/data/tub_1_20-03-12/*.jpg" + +#9865, over rides only if needed, ie. TX2.. +PCA9685_I2C_ADDR = 0x40 #I2C address, use i2cdetect to validate this number +PCA9685_I2C_BUSNUM = None #None will auto detect, which is fine on the pi. But other platforms should specify the bus num. + +#SSD1306_128_32 +USE_SSD1306_128_32 = False # Enable the SSD_1306 OLED Display +SSD1306_128_32_I2C_BUSNUM = 1 # I2C bus number + +#DRIVETRAIN +#These options specify which chasis and motor setup you are using. Most are using SERVO_ESC. +#DC_STEER_THROTTLE uses HBridge pwm to control one steering dc motor, and one drive wheel motor +#DC_TWO_WHEEL uses HBridge pwm to control two drive motors, one on the left, and one on the right. +#SERVO_HBRIDGE_PWM use ServoBlaster to output pwm control from the PiZero directly to control steering, and HBridge for a drive motor. +#PIGPIO_PWM uses Raspberrys internal PWM +DRIVE_TRAIN_TYPE = "MOCK" # SERVO_ESC|DC_STEER_THROTTLE|DC_TWO_WHEEL|SERVO_HBRIDGE_PWM|PIGPIO_PWM|MM1|MOCK + +#STEERING +STEERING_CHANNEL = 1 #channel on the 9685 pwm board 0-15 +STEERING_LEFT_PWM = 460 #pwm value for full left steering +STEERING_RIGHT_PWM = 290 #pwm value for full right steering + +#STEERING FOR PIGPIO_PWM +STEERING_PWM_PIN = 13 #Pin numbering according to Broadcom numbers +STEERING_PWM_FREQ = 50 #Frequency for PWM +STEERING_PWM_INVERTED = False #If PWM needs to be inverted + +#THROTTLE +THROTTLE_CHANNEL = 0 #channel on the 9685 pwm board 0-15 +THROTTLE_FORWARD_PWM = 500 #pwm value for max forward throttle +THROTTLE_STOPPED_PWM = 370 #pwm value for no movement +THROTTLE_REVERSE_PWM = 220 #pwm value for max reverse throttle + +#THROTTLE FOR PIGPIO_PWM +THROTTLE_PWM_PIN = 18 #Pin numbering according to Broadcom numbers +THROTTLE_PWM_FREQ = 50 #Frequency for PWM +THROTTLE_PWM_INVERTED = False #If PWM needs to be inverted + +#DC_STEER_THROTTLE with one motor as steering, one as drive +#these GPIO pinouts are only used for the DRIVE_TRAIN_TYPE=DC_STEER_THROTTLE +HBRIDGE_PIN_LEFT = 18 +HBRIDGE_PIN_RIGHT = 16 +HBRIDGE_PIN_FWD = 15 +HBRIDGE_PIN_BWD = 13 + +#DC_TWO_WHEEL - with two wheels as drive, left and right. +#these GPIO pinouts are only used for the DRIVE_TRAIN_TYPE=DC_TWO_WHEEL +HBRIDGE_PIN_LEFT_FWD = 18 +HBRIDGE_PIN_LEFT_BWD = 16 +HBRIDGE_PIN_RIGHT_FWD = 15 +HBRIDGE_PIN_RIGHT_BWD = 13 + + +#TRAINING +#The DEFAULT_MODEL_TYPE will choose which model will be created at training time. This chooses +#between different neural network designs. You can override this setting by passing the command +#line parameter --type to the python manage.py train and drive commands. +DEFAULT_MODEL_TYPE = 'linear' #(linear|categorical|tflite_linear|tensorrt_linear) +BATCH_SIZE = 128 #how many records to use when doing one pass of gradient decent. Use a smaller number if your gpu is running out of memory. +TRAIN_TEST_SPLIT = 0.8 #what percent of records to use for training. the remaining used for validation. +MAX_EPOCHS = 100 #how many times to visit all records of your data +SHOW_PLOT = True #would you like to see a pop up display of final loss? +VERBOSE_TRAIN = True #would you like to see a progress bar with text during training? +USE_EARLY_STOP = True #would you like to stop the training if we see it's not improving fit? +EARLY_STOP_PATIENCE = 5 #how many epochs to wait before no improvement +MIN_DELTA = .0005 #early stop will want this much loss change before calling it improved. +PRINT_MODEL_SUMMARY = True #print layers and weights to stdout +OPTIMIZER = None #adam, sgd, rmsprop, etc.. None accepts default +LEARNING_RATE = 0.001 #only used when OPTIMIZER specified +LEARNING_RATE_DECAY = 0.0 #only used when OPTIMIZER specified +SEND_BEST_MODEL_TO_PI = False #change to true to automatically send best model during training +CACHE_IMAGES = True #keep images in memory. will speed succesive epochs, but crater if not enough mem. + +PRUNE_CNN = False #This will remove weights from your model. The primary goal is to increase performance. +PRUNE_PERCENT_TARGET = 75 # The desired percentage of pruning. +PRUNE_PERCENT_PER_ITERATION = 20 # Percenge of pruning that is perform per iteration. +PRUNE_VAL_LOSS_DEGRADATION_LIMIT = 0.2 # The max amout of validation loss that is permitted during pruning. +PRUNE_EVAL_PERCENT_OF_DATASET = .05 # percent of dataset used to perform evaluation of model. + +#Model transfer options +#When copying weights during a model transfer operation, should we freeze a certain number of layers +#to the incoming weights and not allow them to change during training? +FREEZE_LAYERS = False #default False will allow all layers to be modified by training +NUM_LAST_LAYERS_TO_TRAIN = 7 #when freezing layers, how many layers from the last should be allowed to train? + +#WEB CONTROL +WEB_CONTROL_PORT = 8887 # which port to listen on when making a web controller +WEB_INIT_MODE = "user" # which control mode to start in. one of user|local_angle|local. Setting local will start in ai mode. + +#JOYSTICK +USE_JOYSTICK_AS_DEFAULT = False #when starting the manage.py, when True, will not require a --js option to use the joystick +JOYSTICK_MAX_THROTTLE = 0.5 #this scalar is multiplied with the -1 to 1 throttle value to limit the maximum throttle. This can help if you drop the controller or just don't need the full speed available. +JOYSTICK_STEERING_SCALE = 1.0 #some people want a steering that is less sensitve. This scalar is multiplied with the steering -1 to 1. It can be negative to reverse dir. +AUTO_RECORD_ON_THROTTLE = True #if true, we will record whenever throttle is not zero. if false, you must manually toggle recording with some other trigger. Usually circle button on joystick. +CONTROLLER_TYPE = 'xbox' #(ps3|ps4|xbox|nimbus|wiiu|F710|rc3|MM1|custom) custom will run the my_joystick.py controller written by the `donkey createjs` command +USE_NETWORKED_JS = False #should we listen for remote joystick control over the network? +NETWORK_JS_SERVER_IP = None #when listening for network joystick control, which ip is serving this information +JOYSTICK_DEADZONE = 0.01 # when non zero, this is the smallest throttle before recording triggered. +JOYSTICK_THROTTLE_DIR = 1.0 # use -1.0 to flip forward/backward, use 1.0 to use joystick's natural forward/backward +USE_FPV = False # send camera data to FPV webserver +JOYSTICK_DEVICE_FILE = "/dev/input/js0" # this is the unix file use to access the joystick. + +#For the categorical model, this limits the upper bound of the learned throttle +#it's very IMPORTANT that this value is matched from the training PC config.py and the robot.py +#and ideally wouldn't change once set. +MODEL_CATEGORICAL_MAX_THROTTLE_RANGE = 0.5 + +#RNN or 3D +SEQUENCE_LENGTH = 3 #some models use a number of images over time. This controls how many. + +#IMU +HAVE_IMU = False #when true, this add a Mpu6050 part and records the data. Can be used with a +IMU_SENSOR = 'mpu6050' # (mpu6050|mpu9250) +IMU_DLP_CONFIG = 0 # Digital Lowpass Filter setting (0:250Hz, 1:184Hz, 2:92Hz, 3:41Hz, 4:20Hz, 5:10Hz, 6:5Hz) + +#SOMBRERO +HAVE_SOMBRERO = False #set to true when using the sombrero hat from the Donkeycar store. This will enable pwm on the hat. + +#ROBOHAT MM1 +HAVE_ROBOHAT = False # set to true when using the Robo HAT MM1 from Robotics Masters. This will change to RC Control. +MM1_STEERING_MID = 1500 # Adjust this value if your car cannot run in a straight line +MM1_MAX_FORWARD = 2000 # Max throttle to go fowrward. The bigger the faster +MM1_STOPPED_PWM = 1500 +MM1_MAX_REVERSE = 1000 # Max throttle to go reverse. The smaller the faster +MM1_SHOW_STEERING_VALUE = False +# Serial port +# -- Default Pi: '/dev/ttyS0' +# -- Jetson Nano: '/dev/ttyTHS1' +# -- Google coral: '/dev/ttymxc0' +# -- Windows: 'COM3', Arduino: '/dev/ttyACM0' +# -- MacOS/Linux:please use 'ls /dev/tty.*' to find the correct serial port for mm1 +# eg.'/dev/tty.usbmodemXXXXXX' and replace the port accordingly +MM1_SERIAL_PORT = '/dev/ttyS0' # Serial Port for reading and sending MM1 data. + +#RECORD OPTIONS +RECORD_DURING_AI = False #normally we do not record during ai mode. Set this to true to get image and steering records for your Ai. Be careful not to use them to train. +AUTO_CREATE_NEW_TUB = False #create a new tub (tub_YY_MM_DD) directory when recording or append records to data directory directly + +#LED +HAVE_RGB_LED = False #do you have an RGB LED like https://www.amazon.com/dp/B07BNRZWNF +LED_INVERT = False #COMMON ANODE? Some RGB LED use common anode. like https://www.amazon.com/Xia-Fly-Tri-Color-Emitting-Diffused/dp/B07MYJQP8B + +#LED board pin number for pwm outputs +#These are physical pinouts. See: https://www.raspberrypi-spy.co.uk/2012/06/simple-guide-to-the-rpi-gpio-header-and-pins/ +LED_PIN_R = 12 +LED_PIN_G = 10 +LED_PIN_B = 16 + +#LED status color, 0-100 +LED_R = 0 +LED_G = 0 +LED_B = 1 + +#LED Color for record count indicator +REC_COUNT_ALERT = 1000 #how many records before blinking alert +REC_COUNT_ALERT_CYC = 15 #how many cycles of 1/20 of a second to blink per REC_COUNT_ALERT records +REC_COUNT_ALERT_BLINK_RATE = 0.4 #how fast to blink the led in seconds on/off + +#first number is record count, second tuple is color ( r, g, b) (0-100) +#when record count exceeds that number, the color will be used +RECORD_ALERT_COLOR_ARR = [ (0, (1, 1, 1)), + (3000, (5, 5, 5)), + (5000, (5, 2, 0)), + (10000, (0, 5, 0)), + (15000, (0, 5, 5)), + (20000, (0, 0, 5)), ] + + +#LED status color, 0-100, for model reloaded alert +MODEL_RELOADED_LED_R = 100 +MODEL_RELOADED_LED_G = 0 +MODEL_RELOADED_LED_B = 0 + + +#BEHAVIORS +#When training the Behavioral Neural Network model, make a list of the behaviors, +#Set the TRAIN_BEHAVIORS = True, and use the BEHAVIOR_LED_COLORS to give each behavior a color +TRAIN_BEHAVIORS = False +BEHAVIOR_LIST = ['Left_Lane', "Right_Lane"] +BEHAVIOR_LED_COLORS =[ (0, 10, 0), (10, 0, 0) ] #RGB tuples 0-100 per chanel + +#Localizer +#The localizer is a neural network that can learn to predice it's location on the track. +#This is an experimental feature that needs more developement. But it can currently be used +#to predict the segement of the course, where the course is divided into NUM_LOCATIONS segments. +TRAIN_LOCALIZER = False +NUM_LOCATIONS = 10 +BUTTON_PRESS_NEW_TUB = False #when enabled, makes it easier to divide our data into one tub per track length if we make a new tub on each X button press. + +#DonkeyGym +#Only on Ubuntu linux, you can use the simulator as a virtual donkey and +#issue the same python manage.py drive command as usual, but have them control a virtual car. +#This enables that, and sets the path to the simualator and the environment. +#You will want to download the simulator binary from: https://github.com/tawnkramer/donkey_gym/releases/download/v18.9/DonkeySimLinux.zip +#then extract that and modify DONKEY_SIM_PATH. +DONKEY_GYM = True +DONKEY_SIM_PATH = "path to sim" #"/home/tkramer/projects/sdsandbox/sdsim/build/DonkeySimLinux/donkey_sim.x86_64" when racing on virtual-race-league use "remote", or user "remote" when you want to start the sim manually first. +DONKEY_GYM_ENV_NAME = "donkey-generated-track-v0" # ("donkey-generated-track-v0"|"donkey-generated-roads-v0"|"donkey-warehouse-v0"|"donkey-avc-sparkfun-v0") +GYM_CONF = { "img_h" : IMAGE_H, "img_w" : IMAGE_W, "body_style" : "donkey", "body_rgb" : (128, 128, 128), "car_name" : "car", "font_size" : 100 } # body style(donkey|bare|car01) body rgb 0-255 +GYM_CONF["racer_name"] = "Your Name" +GYM_CONF["country"] = "Place" +GYM_CONF["bio"] = "I race robots." + +SIM_HOST = "127.0.0.1" # when racing on virtual-race-league use host "trainmydonkey.com" +SIM_ARTIFICIAL_LATENCY = 0 # this is the millisecond latency in controls. Can use useful in emulating the delay when useing a remote server. values of 100 to 400 probably reasonable. + +#publish camera over network +#This is used to create a tcp service to pushlish the camera feed +PUB_CAMERA_IMAGES = False + +#When racing, to give the ai a boost, configure these values. +AI_LAUNCH_DURATION = 0.0 # the ai will output throttle for this many seconds +AI_LAUNCH_THROTTLE = 0.0 # the ai will output this throttle value +AI_LAUNCH_ENABLE_BUTTON = 'R2' # this keypress will enable this boost. It must be enabled before each use to prevent accidental trigger. +AI_LAUNCH_KEEP_ENABLED = False # when False ( default) you will need to hit the AI_LAUNCH_ENABLE_BUTTON for each use. This is safest. When this True, is active on each trip into "local" ai mode. + +#Scale the output of the throttle of the ai pilot for all model types. +AI_THROTTLE_MULT = 1.0 # this multiplier will scale every throttle value for all output from NN models + +#Path following +PATH_FILENAME = "donkey_path.pkl" # the path will be saved to this filename +PATH_SCALE = 5.0 # the path display will be scaled by this factor in the web page +PATH_OFFSET = (0, 0) # 255, 255 is the center of the map. This offset controls where the origin is displayed. +PATH_MIN_DIST = 0.3 # after travelling this distance (m), save a path point +PID_P = -10.0 # proportional mult for PID path follower +PID_I = 0.000 # integral mult for PID path follower +PID_D = -0.2 # differential mult for PID path follower +PID_THROTTLE = 0.2 # constant throttle value during path following +SAVE_PATH_BTN = "cross" # joystick button to save path +RESET_ORIGIN_BTN = "triangle" # joystick button to press to move car back to origin + +# Intel Realsense D435 and D435i depth sensing camera +REALSENSE_D435_RGB = True # True to capture RGB image +REALSENSE_D435_DEPTH = True # True to capture depth as image array +REALSENSE_D435_IMU = False # True to capture IMU data (D435i only) +REALSENSE_D435_ID = None # serial number of camera or None if you only have one camera (it will autodetect) + +# Stop Sign Detector +STOP_SIGN_DETECTOR = False +STOP_SIGN_MIN_SCORE = 0.2 +STOP_SIGN_SHOW_BOUNDING_BOX = True diff --git a/donkeycar/templates/cfg_square.py b/donkeycar/templates/cfg_square.py index f0816eebc..f9256fb2a 100755 --- a/donkeycar/templates/cfg_square.py +++ b/donkeycar/templates/cfg_square.py @@ -65,11 +65,6 @@ PRUNE_VAL_LOSS_DEGRADATION_LIMIT = 0.2 # The max amout of validation loss that is permitted during pruning. PRUNE_EVAL_PERCENT_OF_DATASET = .05 # percent of dataset used to perform evaluation of model. -# Region of interst cropping -# only supported in Categorical and Linear models. -ROI_CROP_TOP = 0 -ROI_CROP_BOTTOM = 0 - #model transfer options FREEZE_LAYERS = False NUM_LAST_LAYERS_TO_TRAIN = 7 diff --git a/donkeycar/templates/complete.py b/donkeycar/templates/complete.py index ce912e0ac..7b18da1bb 100644 --- a/donkeycar/templates/complete.py +++ b/donkeycar/templates/complete.py @@ -3,9 +3,8 @@ Scripts to drive a donkey 2 car Usage: - manage.py (drive) [--model=] [--js] [--type=(linear|categorical|rnn|imu|behavior|3d|localizer|latent)] [--camera=(single|stereo)] [--meta= ...] [--myconfig=] - manage.py (train) [--tub=] [--file= ...] (--model=) [--transfer=] [--type=(linear|categorical|rnn|imu|behavior|3d|localizer)] [--continuous] [--aug] [--myconfig=] - + manage.py (drive) [--model=] [--js] [--type=(linear|categorical)] [--camera=(single|stereo)] [--meta= ...] [--myconfig=] + manage.py (train) [--tubs=tubs] (--model=) [--type=(linear|inferred|tensorrt_linear|tflite_linear)] Options: -h --help Show this screen. @@ -23,17 +22,17 @@ import donkeycar as dk -#import parts -from donkeycar.parts.transform import Lambda, TriggeredCallback, DelayedTrigger +from donkeycar.parts.transform import TriggeredCallback, DelayedTrigger +from donkeycar.parts.tub_v2 import TubWriter from donkeycar.parts.datastore import TubHandler -from donkeycar.parts.controller import LocalWebController, \ - JoystickController, WebFpv +from donkeycar.parts.controller import LocalWebController, JoystickController, WebFpv from donkeycar.parts.throttle_filter import ThrottleFilter from donkeycar.parts.behavior import BehaviorPart from donkeycar.parts.file_watcher import FileWatcher from donkeycar.parts.launch import AiLaunch from donkeycar.utils import * + def drive(cfg, model_path=None, use_joystick=False, model_type=None, camera_type='single', meta=[]): ''' Construct a working robotic vehicle from many parts. @@ -130,6 +129,9 @@ def drive(cfg, model_path=None, use_joystick=False, model_type=None, camera_type elif cfg.CAMERA_TYPE == "IMAGE_LIST": from donkeycar.parts.camera import ImageListCamera cam = ImageListCamera(path_mask=cfg.PATH_MASK) + elif cfg.CAMERA_TYPE == "LEOPARD": + from donkeycar.parts.leopard_imaging import LICamera + cam = LICamera(width=cfg.IMAGE_W, height=cfg.IMAGE_H, fps=cfg.CAMERA_FRAMERATE) else: raise(Exception("Unkown camera type: %s" % cfg.CAMERA_TYPE)) @@ -298,26 +300,6 @@ def show_record_acount_status(): V.add(imu, outputs=['imu/acl_x', 'imu/acl_y', 'imu/acl_z', 'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z'], threaded=True) - class ImgPreProcess(): - ''' - preprocess camera image for inference. - normalize and crop if needed. - ''' - def __init__(self, cfg): - self.cfg = cfg - - def run(self, img_arr): - return normalize_and_crop(img_arr, self.cfg) - - if "coral" in model_type: - inf_input = 'cam/image_array' - else: - inf_input = 'cam/normalized/cropped' - V.add(ImgPreProcess(cfg), - inputs=['cam/image_array'], - outputs=[inf_input], - run_condition='run_pilot') - # Use the FPV preview, which will show the cropped image output, or the full frame. if cfg.USE_FPV: V.add(WebFpv(), inputs=['cam/image_array'], threaded=True) @@ -331,16 +313,16 @@ def run(self, img_arr): except: pass - inputs = [inf_input, "behavior/one_hot_state_array"] + inputs = ['cam/image_array', "behavior/one_hot_state_array"] #IMU elif model_type == "imu": assert(cfg.HAVE_IMU) #Run the pilot if the mode is not user. - inputs=[inf_input, + inputs=['cam/image_array', 'imu/acl_x', 'imu/acl_y', 'imu/acl_z', 'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z'] else: - inputs=[inf_input] + inputs=['cam/image_array'] def load_model(kl, model_path): start = time.time() @@ -419,8 +401,8 @@ def reload_weights(filename): outputs.append("pilot/loc") V.add(kl, inputs=inputs, - outputs=outputs, - run_condition='run_pilot') + outputs=outputs, + run_condition='run_pilot') if cfg.STOP_SIGN_DETECTOR: from donkeycar.parts.object_detector.stop_sign_detector import StopSignDetector @@ -598,9 +580,18 @@ def run(self, mode, recording): inputs += ['pilot/angle', 'pilot/throttle'] types += ['float', 'float'] - th = TubHandler(path=cfg.DATA_PATH) - tub = th.new_tub_writer(inputs=inputs, types=types, user_meta=meta) - V.add(tub, inputs=inputs, outputs=["tub/num_records"], run_condition='recording') + # do we want to store new records into own dir or append to existing + tub_path = TubHandler(path=cfg.DATA_PATH).create_tub_path() if \ + cfg.AUTO_CREATE_NEW_TUB else cfg.DATA_PATH + tub_writer = TubWriter(tub_path, inputs=inputs, types=types, metadata=meta) + V.add(tub_writer, inputs=inputs, outputs=["tub/num_records"], run_condition='recording') + + # Telemetry (we add the same metrics added to the TubHandler + if cfg.HAVE_MQTT_TELEMETRY: + from donkeycar.parts.telemetry import MqttTelemetry + published_inputs, published_types = MqttTelemetry.filter_supported_metrics(inputs, types) + tel = MqttTelemetry(cfg, default_inputs=published_inputs, default_types=published_types) + V.add(tel, inputs=published_inputs, outputs=["tub/queue_size"], threaded=False) if cfg.PUB_CAMERA_IMAGES: from donkeycar.parts.network import TCPServeValue @@ -616,23 +607,11 @@ def run(self, mode, recording): print("You can now go to :%d to drive your car." % cfg.WEB_CONTROL_PORT) elif isinstance(ctr, JoystickController): print("You can now move your joystick to drive your car.") - #tell the controller about the tub - ctr.set_tub(tub) - - if cfg.BUTTON_PRESS_NEW_TUB: - - def new_tub_dir(): - V.parts.pop() - tub = th.new_tub_writer(inputs=inputs, types=types, user_meta=meta) - V.add(tub, inputs=inputs, outputs=["tub/num_records"], run_condition='recording') - ctr.set_tub(tub) - - ctr.set_button_down_trigger('cross', new_tub_dir) + ctr.set_tub(tub_writer.tub) ctr.print_controls() #run the vehicle for 20 seconds - V.start(rate_hz=cfg.DRIVE_LOOP_HZ, - max_loop_count=cfg.MAX_LOOPS) + V.start(rate_hz=cfg.DRIVE_LOOP_HZ, max_loop_count=cfg.MAX_LOOPS) if __name__ == '__main__': @@ -642,29 +621,9 @@ def new_tub_dir(): if args['drive']: model_type = args['--type'] camera_type = args['--camera'] - drive(cfg, model_path=args['--model'], use_joystick=args['--js'], model_type=model_type, camera_type=camera_type, meta=args['--meta']) - - if args['train']: - from train import multi_train, preprocessFileList - - tub = args['--tub'] - model = args['--model'] - transfer = args['--transfer'] - model_type = args['--type'] - continuous = args['--continuous'] - aug = args['--aug'] - dirs = preprocessFileList( args['--file'] ) - - if tub is not None: - tub_paths = [os.path.expanduser(n) for n in tub.split(',')] - dirs.extend( tub_paths ) - - if model_type is None: - model_type = cfg.DEFAULT_MODEL_TYPE - print("using default model type of", model_type) - - multi_train(cfg, dirs, model, transfer, model_type, continuous, aug) + elif args['train']: + print('Use python train.py instead.\n') diff --git a/donkeycar/templates/simulator.py b/donkeycar/templates/simulator.py new file mode 100644 index 000000000..9effcf8c1 --- /dev/null +++ b/donkeycar/templates/simulator.py @@ -0,0 +1,437 @@ +#!/usr/bin/env python3 +""" +Scripts to drive a donkey 4 car + +Usage: + manage.py (drive) [--model=] [--js] [--type=(linear|categorical)] [--camera=(single|stereo)] [--meta= ...] [--myconfig=] + manage.py (train) [--tubs=tubs] (--model=) [--type=(linear|inferred|tensorrt_linear|tflite_linear)] + +Options: + -h --help Show this screen. + --js Use physical joystick. + -f --file= A text file containing paths to tub files, one per line. Option may be used more than once. + --meta= Key/Value strings describing describing a piece of meta data about this drive. Option may be used more than once. + --myconfig=filename Specify myconfig file to use. + [default: myconfig.py] +""" +import os +import time + +from docopt import docopt +import numpy as np + +import donkeycar as dk + +from donkeycar.parts.transform import TriggeredCallback, DelayedTrigger +from donkeycar.parts.tub_v2 import TubWriter +from donkeycar.parts.datastore import TubHandler +from donkeycar.parts.controller import LocalWebController, JoystickController, WebFpv +from donkeycar.parts.throttle_filter import ThrottleFilter +from donkeycar.parts.behavior import BehaviorPart +from donkeycar.parts.file_watcher import FileWatcher +from donkeycar.parts.launch import AiLaunch +from donkeycar.utils import * + + +def drive(cfg, model_path=None, use_joystick=False, model_type=None, camera_type='single', meta=[]): + ''' + Construct a working robotic vehicle from many parts. + Each part runs as a job in the Vehicle loop, calling either + it's run or run_threaded method depending on the constructor flag `threaded`. + All parts are updated one after another at the framerate given in + cfg.DRIVE_LOOP_HZ assuming each part finishes processing in a timely manner. + Parts may have named outputs and inputs. The framework handles passing named outputs + to parts requesting the same named input. + ''' + + if cfg.DONKEY_GYM: + #the simulator will use cuda and then we usually run out of resources + #if we also try to use cuda. so disable for donkey_gym. + os.environ["CUDA_VISIBLE_DEVICES"]="-1" + + if model_type is None: + if cfg.TRAIN_LOCALIZER: + model_type = "localizer" + elif cfg.TRAIN_BEHAVIORS: + model_type = "behavior" + else: + model_type = cfg.DEFAULT_MODEL_TYPE + + #Initialize car + V = dk.vehicle.Vehicle() + + from donkeycar.parts.dgym import DonkeyGymEnv + + inputs = [] + + cam = DonkeyGymEnv(cfg.DONKEY_SIM_PATH, host=cfg.SIM_HOST, env_name=cfg.DONKEY_GYM_ENV_NAME, conf=cfg.GYM_CONF, delay=cfg.SIM_ARTIFICIAL_LATENCY) + threaded = True + inputs = ['angle', 'throttle', 'brake'] + + V.add(cam, inputs=inputs, outputs=['cam/image_array'], threaded=threaded) + + if use_joystick or cfg.USE_JOYSTICK_AS_DEFAULT: + #modify max_throttle closer to 1.0 to have more power + #modify steering_scale lower than 1.0 to have less responsive steering + if cfg.CONTROLLER_TYPE == "MM1": + from donkeycar.parts.robohat import RoboHATController + ctr = RoboHATController(cfg) + elif "custom" == cfg.CONTROLLER_TYPE: + # + # custom controller created with `donkey createjs` command + # + from my_joystick import MyJoystickController + ctr = MyJoystickController( + throttle_dir=cfg.JOYSTICK_THROTTLE_DIR, + throttle_scale=cfg.JOYSTICK_MAX_THROTTLE, + steering_scale=cfg.JOYSTICK_STEERING_SCALE, + auto_record_on_throttle=cfg.AUTO_RECORD_ON_THROTTLE) + ctr.set_deadzone(cfg.JOYSTICK_DEADZONE) + else: + from donkeycar.parts.controller import get_js_controller + + ctr = get_js_controller(cfg) + + if cfg.USE_NETWORKED_JS: + from donkeycar.parts.controller import JoyStickSub + netwkJs = JoyStickSub(cfg.NETWORK_JS_SERVER_IP) + V.add(netwkJs, threaded=True) + ctr.js = netwkJs + + V.add(ctr, + inputs=['cam/image_array'], + outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'], + threaded=True) + + else: + #This web controller will create a web server that is capable + #of managing steering, throttle, and modes, and more. + ctr = LocalWebController(port=cfg.WEB_CONTROL_PORT, mode=cfg.WEB_INIT_MODE) + + V.add(ctr, + inputs=['cam/image_array', 'tub/num_records'], + outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'], + threaded=True) + + #this throttle filter will allow one tap back for esc reverse + th_filter = ThrottleFilter() + V.add(th_filter, inputs=['user/throttle'], outputs=['user/throttle']) + + #See if we should even run the pilot module. + #This is only needed because the part run_condition only accepts boolean + class PilotCondition: + def run(self, mode): + if mode == 'user': + return False + else: + return True + + V.add(PilotCondition(), inputs=['user/mode'], outputs=['run_pilot']) + + class LedConditionLogic: + def __init__(self, cfg): + self.cfg = cfg + + def run(self, mode, recording, recording_alert, behavior_state, model_file_changed, track_loc): + #returns a blink rate. 0 for off. -1 for on. positive for rate. + + if track_loc is not None: + led.set_rgb(*self.cfg.LOC_COLORS[track_loc]) + return -1 + + if model_file_changed: + led.set_rgb(self.cfg.MODEL_RELOADED_LED_R, self.cfg.MODEL_RELOADED_LED_G, self.cfg.MODEL_RELOADED_LED_B) + return 0.1 + else: + led.set_rgb(self.cfg.LED_R, self.cfg.LED_G, self.cfg.LED_B) + + if recording_alert: + led.set_rgb(*recording_alert) + return self.cfg.REC_COUNT_ALERT_BLINK_RATE + else: + led.set_rgb(self.cfg.LED_R, self.cfg.LED_G, self.cfg.LED_B) + + if behavior_state is not None and model_type == 'behavior': + r, g, b = self.cfg.BEHAVIOR_LED_COLORS[behavior_state] + led.set_rgb(r, g, b) + return -1 #solid on + + if recording: + return -1 #solid on + elif mode == 'user': + return 1 + elif mode == 'local_angle': + return 0.5 + elif mode == 'local': + return 0.1 + return 0 + + if cfg.HAVE_RGB_LED and not cfg.DONKEY_GYM: + from donkeycar.parts.led_status import RGB_LED + led = RGB_LED(cfg.LED_PIN_R, cfg.LED_PIN_G, cfg.LED_PIN_B, cfg.LED_INVERT) + led.set_rgb(cfg.LED_R, cfg.LED_G, cfg.LED_B) + + V.add(LedConditionLogic(cfg), inputs=['user/mode', 'recording', "records/alert", 'behavior/state', 'modelfile/modified', "pilot/loc"], + outputs=['led/blink_rate']) + + V.add(led, inputs=['led/blink_rate']) + + def get_record_alert_color(num_records): + col = (0, 0, 0) + for count, color in cfg.RECORD_ALERT_COLOR_ARR: + if num_records >= count: + col = color + return col + + class RecordTracker: + def __init__(self): + self.last_num_rec_print = 0 + self.dur_alert = 0 + self.force_alert = 0 + + def run(self, num_records): + if num_records is None: + return 0 + + if self.last_num_rec_print != num_records or self.force_alert: + self.last_num_rec_print = num_records + + if num_records % 10 == 0: + print("recorded", num_records, "records") + + if num_records % cfg.REC_COUNT_ALERT == 0 or self.force_alert: + self.dur_alert = num_records // cfg.REC_COUNT_ALERT * cfg.REC_COUNT_ALERT_CYC + self.force_alert = 0 + + if self.dur_alert > 0: + self.dur_alert -= 1 + + if self.dur_alert != 0: + return get_record_alert_color(num_records) + + return 0 + + rec_tracker_part = RecordTracker() + V.add(rec_tracker_part, inputs=["tub/num_records"], outputs=['records/alert']) + + if cfg.AUTO_RECORD_ON_THROTTLE and isinstance(ctr, JoystickController): + #then we are not using the circle button. hijack that to force a record count indication + def show_record_acount_status(): + rec_tracker_part.last_num_rec_print = 0 + rec_tracker_part.force_alert = 1 + ctr.set_button_down_trigger('circle', show_record_acount_status) + + # Use the FPV preview, which will show the cropped image output, or the full frame. + if cfg.USE_FPV: + V.add(WebFpv(), inputs=['cam/image_array'], threaded=True) + + #Behavioral state + if cfg.TRAIN_BEHAVIORS: + bh = BehaviorPart(cfg.BEHAVIOR_LIST) + V.add(bh, outputs=['behavior/state', 'behavior/label', "behavior/one_hot_state_array"]) + try: + ctr.set_button_down_trigger('L1', bh.increment_state) + except: + pass + + inputs = ['cam/image_array', "behavior/one_hot_state_array"] + else: + inputs=['cam/image_array'] + + def load_model(kl, model_path): + start = time.time() + print('loading model', model_path) + kl.load(model_path) + print('finished loading in %s sec.' % (str(time.time() - start)) ) + + def load_weights(kl, weights_path): + start = time.time() + try: + print('loading model weights', weights_path) + kl.model.load_weights(weights_path) + print('finished loading in %s sec.' % (str(time.time() - start)) ) + except Exception as e: + print(e) + print('ERR>> problems loading weights', weights_path) + + def load_model_json(kl, json_fnm): + start = time.time() + print('loading model json', json_fnm) + from tensorflow.python import keras + try: + with open(json_fnm, 'r') as handle: + contents = handle.read() + kl.model = keras.models.model_from_json(contents) + print('finished loading json in %s sec.' % (str(time.time() - start)) ) + except Exception as e: + print(e) + print("ERR>> problems loading model json", json_fnm) + + if model_path: + #When we have a model, first create an appropriate Keras part + kl = dk.utils.get_model_by_type(model_type, cfg) + + model_reload_cb = None + + if '.h5' in model_path or '.uff' in model_path or 'tflite' in model_path or '.pkl' in model_path: + #when we have a .h5 extension + #load everything from the model file + load_model(kl, model_path) + + def reload_model(filename): + load_model(kl, filename) + + model_reload_cb = reload_model + + elif '.json' in model_path: + #when we have a .json extension + #load the model from there and look for a matching + #.wts file with just weights + load_model_json(kl, model_path) + weights_path = model_path.replace('.json', '.weights') + load_weights(kl, weights_path) + + def reload_weights(filename): + weights_path = filename.replace('.json', '.weights') + load_weights(kl, weights_path) + + model_reload_cb = reload_weights + + else: + print("ERR>> Unknown extension type on model file!!") + return + + #this part will signal visual LED, if connected + V.add(FileWatcher(model_path, verbose=True), outputs=['modelfile/modified']) + + #these parts will reload the model file, but only when ai is running so we don't interrupt user driving + V.add(FileWatcher(model_path), outputs=['modelfile/dirty'], run_condition="ai_running") + V.add(DelayedTrigger(100), inputs=['modelfile/dirty'], outputs=['modelfile/reload'], run_condition="ai_running") + V.add(TriggeredCallback(model_path, model_reload_cb), inputs=["modelfile/reload"], run_condition="ai_running") + + outputs=['pilot/angle', 'pilot/throttle'] + + if cfg.TRAIN_LOCALIZER: + outputs.append("pilot/loc") + + V.add(kl, inputs=inputs, + outputs=outputs, + run_condition='run_pilot') + + if cfg.STOP_SIGN_DETECTOR: + from donkeycar.parts.object_detector.stop_sign_detector import StopSignDetector + V.add(StopSignDetector(cfg.STOP_SIGN_MIN_SCORE, cfg.STOP_SIGN_SHOW_BOUNDING_BOX), inputs=['cam/image_array', 'pilot/throttle'], outputs=['pilot/throttle', 'cam/image_array']) + + #Choose what inputs should change the car. + class DriveMode: + def run(self, mode, + user_angle, user_throttle, user_brake, + pilot_angle, pilot_throttle, pilot_brake): + if mode == 'user': + return user_angle, user_throttle, user_brake + + elif mode == 'local_angle': + return pilot_angle if pilot_angle else 0.0, user_throttle, user_brake + + else: + return pilot_angle if pilot_angle else 0.0, pilot_throttle * cfg.AI_THROTTLE_MULT if pilot_throttle else 0.0, pilot_brake if pilot_brake else 0.0 + + V.add(DriveMode(), + inputs=['user/mode', 'user/angle', 'user/throttle', 'user/brake', + 'pilot/angle', 'pilot/throttle', 'pilot/brake'], + outputs=['angle', 'throttle', 'brake']) + + + #to give the car a boost when starting ai mode in a race. + aiLauncher = AiLaunch(cfg.AI_LAUNCH_DURATION, cfg.AI_LAUNCH_THROTTLE, cfg.AI_LAUNCH_KEEP_ENABLED) + + V.add(aiLauncher, + inputs=['user/mode', 'throttle'], + outputs=['throttle']) + + if isinstance(ctr, JoystickController): + ctr.set_button_down_trigger(cfg.AI_LAUNCH_ENABLE_BUTTON, aiLauncher.enable_ai_launch) + + + class AiRunCondition: + ''' + A bool part to let us know when ai is running. + ''' + def run(self, mode): + if mode == "user": + return False + return True + + V.add(AiRunCondition(), inputs=['user/mode'], outputs=['ai_running']) + + #Ai Recording + class AiRecordingCondition: + ''' + return True when ai mode, otherwize respect user mode recording flag + ''' + def run(self, mode, recording): + if mode == 'user': + return recording + return True + + if cfg.RECORD_DURING_AI: + V.add(AiRecordingCondition(), inputs=['user/mode', 'recording'], outputs=['recording']) + + #add tub to save data + + inputs=['cam/image_array', + 'user/angle', 'user/throttle', + 'user/mode'] + + types=['image_array', + 'float', 'float', + 'str'] + + if cfg.TRAIN_BEHAVIORS: + inputs += ['behavior/state', 'behavior/label', "behavior/one_hot_state_array"] + types += ['int', 'str', 'vector'] + + if cfg.RECORD_DURING_AI: + inputs += ['pilot/angle', 'pilot/throttle'] + types += ['float', 'float'] + + # do we want to store new records into own dir or append to existing + tub_path = TubHandler(path=cfg.DATA_PATH).create_tub_path() if \ + cfg.AUTO_CREATE_NEW_TUB else cfg.DATA_PATH + tub_writer = TubWriter(tub_path, inputs=inputs, types=types, metadata=meta) + V.add(tub_writer, inputs=inputs, outputs=["tub/num_records"], run_condition='recording') + + if cfg.PUB_CAMERA_IMAGES: + from donkeycar.parts.network import TCPServeValue + from donkeycar.parts.image import ImgArrToJpg + pub = TCPServeValue("camera") + V.add(ImgArrToJpg(), inputs=['cam/image_array'], outputs=['jpg/bin']) + V.add(pub, inputs=['jpg/bin']) + + if type(ctr) is LocalWebController: + if cfg.DONKEY_GYM: + print("You can now go to http://localhost:%d to drive your car." % cfg.WEB_CONTROL_PORT) + else: + print("You can now go to :%d to drive your car." % cfg.WEB_CONTROL_PORT) + elif isinstance(ctr, JoystickController): + print("You can now move your joystick to drive your car.") + ctr.set_tub(tub_writer.tub) + ctr.print_controls() + + #run the vehicle for 20 seconds + V.start(rate_hz=cfg.DRIVE_LOOP_HZ, max_loop_count=cfg.MAX_LOOPS) + + +if __name__ == '__main__': + args = docopt(__doc__) + cfg = dk.load_config(myconfig=args['--myconfig']) + + if args['drive']: + model_type = args['--type'] + camera_type = args['--camera'] + drive(cfg, model_path=args['--model'], use_joystick=args['--js'], + model_type=model_type, camera_type=camera_type, + meta=args['--meta']) + elif args['train']: + print('Use python train.py instead.\n') + diff --git a/donkeycar/templates/train.py b/donkeycar/templates/train.py old mode 100755 new mode 100644 index 29a3b673b..729d99a8d --- a/donkeycar/templates/train.py +++ b/donkeycar/templates/train.py @@ -1,1043 +1,28 @@ #!/usr/bin/env python3 """ Scripts to train a keras model using tensorflow. -Uses the data written by the donkey v2.2 tub writer, -but faster training with proper sampling of distribution over tubs. -Has settings for continuous training that will look for new files as it trains. -Modify on_best_model if you wish continuous training to update your pi as it builds. -You can drop this in your ~/mycar dir. -Basic usage should feel familiar: python train.py --model models/mypilot - +Basic usage should feel familiar: train.py --tubs data/ --model models/mypilot.h5 Usage: - train.py [--tub=] [--file= ...] (--model=) [--transfer=] [--type=(linear|latent|categorical|rnn|imu|behavior|3d|look_ahead|tensorrt_linear|tflite_linear|coral_tflite_linear)] [--figure_format=] [--continuous] [--aug] + train.py [--tubs=tubs] (--model=) [--type=(linear|inferred|tensorrt_linear|tflite_linear)] Options: -h --help Show this screen. - -f --file= A text file containing paths to tub files, one per line. Option may be used more than once. - --figure_format=png The file format of the generated figure (see https://matplotlib.org/api/_as_gen/matplotlib.pyplot.savefig.html), e.g. 'png', 'pdf', 'svg', ... """ -import os -import glob -import random -import json -import time -import zlib -from os.path import basename, join, splitext, dirname -import pickle -import datetime -from tensorflow.python import keras from docopt import docopt -import numpy as np -from PIL import Image - import donkeycar as dk -from donkeycar.parts.datastore import Tub -from donkeycar.parts.keras import KerasLinear, KerasIMU,\ - KerasCategorical, KerasBehavioral, Keras3D_CNN,\ - KerasRNN_LSTM, KerasLatent, KerasLocalizer -from donkeycar.parts.augment import augment_image -from donkeycar.utils import * - -figure_format = 'png' - - -''' -matplotlib can be a pain to setup on a Mac. So handle the case where it is absent. When present, -use it to generate a plot of training results. -''' -try: - import matplotlib.pyplot as plt - do_plot = True -except: - do_plot = False - print("matplotlib not installed") - - -''' -Tub management -''' - - -def make_key(sample): - tub_path = sample['tub_path'] - index = sample['index'] - return tub_path + str(index) - - -def make_next_key(sample, index_offset): - tub_path = sample['tub_path'] - index = sample['index'] + index_offset - return tub_path + str(index) - - -def collate_records(records, gen_records, opts): - ''' - open all the .json records from records list passed in, - read their contents, - add them to a list of gen_records, passed in. - use the opts dict to specify config choices - ''' - - new_records = {} - - for record_path in records: - - basepath = os.path.dirname(record_path) - index = get_record_index(record_path) - sample = { 'tub_path' : basepath, "index" : index } - - key = make_key(sample) - - if key in gen_records: - continue - - try: - with open(record_path, 'r') as fp: - json_data = json.load(fp) - except: - continue - - image_filename = json_data["cam/image_array"] - image_path = os.path.join(basepath, image_filename) - - sample['record_path'] = record_path - sample["image_path"] = image_path - sample["json_data"] = json_data - - angle = float(json_data['user/angle']) - throttle = float(json_data["user/throttle"]) - - if opts['categorical']: - angle = dk.utils.linear_bin(angle) - throttle = dk.utils.linear_bin(throttle, N=20, offset=0, R=opts['cfg'].MODEL_CATEGORICAL_MAX_THROTTLE_RANGE) - - sample['angle'] = angle - sample['throttle'] = throttle - - try: - accl_x = float(json_data['imu/acl_x']) - accl_y = float(json_data['imu/acl_y']) - accl_z = float(json_data['imu/acl_z']) - - gyro_x = float(json_data['imu/gyr_x']) - gyro_y = float(json_data['imu/gyr_y']) - gyro_z = float(json_data['imu/gyr_z']) - - sample['imu_array'] = np.array([accl_x, accl_y, accl_z, gyro_x, gyro_y, gyro_z]) - except: - pass - - try: - behavior_arr = np.array(json_data['behavior/one_hot_state_array']) - sample["behavior_arr"] = behavior_arr - except: - pass - - try: - location_arr = np.array(json_data['location/one_hot_state_array']) - sample["location"] = location_arr - except: - pass - - - sample['img_data'] = None - - # Initialise 'train' to False - sample['train'] = False - - # We need to maintain the correct train - validate ratio across the dataset, even if continous training - # so don't add this sample to the main records list (gen_records) yet. - new_records[key] = sample - - # new_records now contains all our NEW samples - # - set a random selection to be the training samples based on the ratio in CFG file - shufKeys = list(new_records.keys()) - random.shuffle(shufKeys) - trainCount = 0 - # Ratio of samples to use as training data, the remaining are used for evaluation - targetTrainCount = int(opts['cfg'].TRAIN_TEST_SPLIT * len(shufKeys)) - for key in shufKeys: - new_records[key]['train'] = True - trainCount += 1 - if trainCount >= targetTrainCount: - break - # Finally add all the new records to the existing list - gen_records.update(new_records) - -def save_json_and_weights(model, filename): - ''' - given a keras model and a .h5 filename, save the model file - in the json format and the weights file in the h5 format - ''' - if not '.h5' == filename[-3:]: - raise Exception("Model filename should end with .h5") - - arch = model.to_json() - json_fnm = filename[:-2] + "json" - weights_fnm = filename[:-2] + "weights" - - with open(json_fnm, "w") as outfile: - parsed = json.loads(arch) - arch_pretty = json.dumps(parsed, indent=4, sort_keys=True) - outfile.write(arch_pretty) - - model.save_weights(weights_fnm) - return json_fnm, weights_fnm - - -class MyCPCallback(keras.callbacks.ModelCheckpoint): - ''' - custom callback to interact with best val loss during continuous training - ''' - - def __init__(self, send_model_cb=None, cfg=None, *args, **kwargs): - super().__init__(*args, **kwargs) - self.reset_best_end_of_epoch = False - self.send_model_cb = send_model_cb - self.last_modified_time = None - self.cfg = cfg - - def reset_best(self): - self.reset_best_end_of_epoch = True - - def on_epoch_end(self, epoch, logs=None): - super().on_epoch_end(epoch, logs) - - if self.send_model_cb: - ''' - check whether the file changed and send to the pi - ''' - filepath = self.filepath.format(epoch=epoch, **logs) - if os.path.exists(filepath): - last_modified_time = os.path.getmtime(filepath) - if self.last_modified_time is None or self.last_modified_time < last_modified_time: - self.last_modified_time = last_modified_time - self.send_model_cb(self.cfg, self.model, filepath) - - ''' - when reset best is set, we want to make sure to run an entire epoch - before setting our new best on the new total records - ''' - if self.reset_best_end_of_epoch: - self.reset_best_end_of_epoch = False - self.best = np.Inf - - -def on_best_model(cfg, model, model_filename): - - on_windows = os.name == 'nt' - - # If we wish, send the best model to the pi. - # On mac or linux we have scp: - if not on_windows: - print('sending model to the pi') - - command = 'scp %s %s@%s:~/%s/models/;' % (model_filename, cfg.PI_USERNAME, cfg.PI_HOSTNAME, cfg.PI_DONKEY_ROOT) - - print("sending", command) - res = os.system(command) - print(res) - - else: #yes, we are on windows machine - - #On windoz no scp. In order to use this you must first setup - #an ftp daemon on the pi. ie. sudo apt-get install vsftpd - #and then make sure you enable write permissions in the conf - try: - import paramiko - except: - raise Exception("first install paramiko: pip install paramiko") - - host = cfg.PI_HOSTNAME - username = cfg.PI_USERNAME - password = cfg.PI_PASSWD - server = host - files = [] - - localpath = model_filename - remotepath = '/home/%s/%s/%s' %(username, cfg.PI_DONKEY_ROOT, model_filename.replace('\\', '/')) - files.append((localpath, remotepath)) - - print("sending", files) - - try: - ssh = paramiko.SSHClient() - ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) - ssh.load_host_keys(os.path.expanduser(os.path.join("~", ".ssh", "known_hosts"))) - ssh.connect(server, username=username, password=password) - sftp = ssh.open_sftp() - - for localpath, remotepath in files: - sftp.put(localpath, remotepath) - - sftp.close() - ssh.close() - print("send succeded") - except: - print("send failed") - - -def train(cfg, tub_names, model_name, transfer_model, model_type, continuous, aug): - ''' - use the specified data in tub_names to train an artifical neural network - saves the output trained model as model_name - ''' - verbose = cfg.VERBOSE_TRAIN - - if model_type is None: - model_type = cfg.DEFAULT_MODEL_TYPE - - if "tflite" in model_type: - #even though we are passed the .tflite output file, we train with an intermediate .h5 - #output and then convert to final .tflite at the end. - assert(".tflite" in model_name) - #we only support the linear model type right now for tflite - assert("linear" in model_type) - model_name = model_name.replace(".tflite", ".h5") - elif "tensorrt" in model_type: - #even though we are passed the .uff output file, we train with an intermediate .h5 - #output and then convert to final .uff at the end. - assert(".uff" in model_name) - #we only support the linear model type right now for tensorrt - assert("linear" in model_type) - model_name = model_name.replace(".uff", ".h5") - - if model_name and not '.h5' == model_name[-3:]: - raise Exception("Model filename should end with .h5") - - if continuous: - print("continuous training") - - gen_records = {} - opts = { 'cfg' : cfg} - - if "linear" in model_type: - train_type = "linear" - else: - train_type = model_type - - kl = get_model_by_type(train_type, cfg=cfg) - - opts['categorical'] = type(kl) in [KerasCategorical, KerasBehavioral] - - print('training with model type', type(kl)) - - if transfer_model: - print('loading weights from model', transfer_model) - kl.load(transfer_model) - - #when transfering models, should we freeze all but the last N layers? - if cfg.FREEZE_LAYERS: - num_to_freeze = len(kl.model.layers) - cfg.NUM_LAST_LAYERS_TO_TRAIN - print('freezing %d layers' % num_to_freeze) - for i in range(num_to_freeze): - kl.model.layers[i].trainable = False - - if cfg.OPTIMIZER: - kl.set_optimizer(cfg.OPTIMIZER, cfg.LEARNING_RATE, cfg.LEARNING_RATE_DECAY) - - kl.compile() - - if cfg.PRINT_MODEL_SUMMARY: - print(kl.model.summary()) - - opts['keras_pilot'] = kl - opts['continuous'] = continuous - opts['model_type'] = model_type - - extract_data_from_pickles(cfg, tub_names) - - records = gather_records(cfg, tub_names, opts, verbose=True) - print('collating %d records ...' % (len(records))) - collate_records(records, gen_records, opts) - - def generator(save_best, opts, data, batch_size, isTrainSet=True, min_records_to_train=1000): - - num_records = len(data) - - while True: - - if isTrainSet and opts['continuous']: - ''' - When continuous training, we look for new records after each epoch. - This will add new records to the train and validation set. - ''' - records = gather_records(cfg, tub_names, opts) - if len(records) > num_records: - collate_records(records, gen_records, opts) - new_num_rec = len(data) - if new_num_rec > num_records: - print('picked up', new_num_rec - num_records, 'new records!') - num_records = new_num_rec - save_best.reset_best() - if num_records < min_records_to_train: - print("not enough records to train. need %d, have %d. waiting..." % (min_records_to_train, num_records)) - time.sleep(10) - continue - - batch_data = [] - - keys = list(data.keys()) - - random.shuffle(keys) - - kl = opts['keras_pilot'] - - if type(kl.model.output) is list: - model_out_shape = (2, 1) - else: - model_out_shape = kl.model.output.shape - - if type(kl.model.input) is list: - model_in_shape = (2, 1) - else: - model_in_shape = kl.model.input.shape - - has_imu = type(kl) is KerasIMU - has_bvh = type(kl) is KerasBehavioral - img_out = type(kl) is KerasLatent - loc_out = type(kl) is KerasLocalizer - - if img_out: - import cv2 - - for key in keys: - - if not key in data: - continue - - _record = data[key] - - if _record['train'] != isTrainSet: - continue - - if continuous: - #in continuous mode we need to handle files getting deleted - filename = _record['image_path'] - if not os.path.exists(filename): - data.pop(key, None) - continue - - batch_data.append(_record) - - if len(batch_data) == batch_size: - inputs_img = [] - inputs_imu = [] - inputs_bvh = [] - angles = [] - throttles = [] - out_img = [] - out_loc = [] - out = [] - - for record in batch_data: - #get image data if we don't already have it - if record['img_data'] is None: - filename = record['image_path'] - - img_arr = load_scaled_image_arr(filename, cfg) - - if img_arr is None: - break - - if aug: - img_arr = augment_image(img_arr) - - if cfg.CACHE_IMAGES: - record['img_data'] = img_arr - else: - img_arr = record['img_data'] - - if img_out: - rz_img_arr = cv2.resize(img_arr, (127, 127)) / 255.0 - out_img.append(rz_img_arr[:,:,0].reshape((127, 127, 1))) - - if loc_out: - out_loc.append(record['location']) - - if has_imu: - inputs_imu.append(record['imu_array']) - - if has_bvh: - inputs_bvh.append(record['behavior_arr']) - - inputs_img.append(img_arr) - angles.append(record['angle']) - throttles.append(record['throttle']) - out.append([record['angle'], record['throttle']]) - - if img_arr is None: - continue - - img_arr = np.array(inputs_img).reshape(batch_size,\ - cfg.TARGET_H, cfg.TARGET_W, cfg.TARGET_D) - - if has_imu: - X = [img_arr, np.array(inputs_imu)] - elif has_bvh: - X = [img_arr, np.array(inputs_bvh)] - else: - X = [img_arr] - - if img_out: - y = [out_img, np.array(angles), np.array(throttles)] - elif out_loc: - y = [ np.array(angles), np.array(throttles), np.array(out_loc)] - elif model_out_shape[1] == 2: - y = [np.array([out]).reshape(batch_size, 2) ] - else: - y = [np.array(angles), np.array(throttles)] - - yield X, y - - batch_data = [] - - model_path = os.path.expanduser(model_name) - - # checkpoint to save model after each epoch and send best to the pi. - send_model_cb = on_best_model if cfg.SEND_BEST_MODEL_TO_PI else None - save_best = MyCPCallback(send_model_cb=send_model_cb, - filepath=model_path, - monitor='val_loss', - verbose=verbose, - save_best_only=True, - mode='min', - cfg=cfg) - - train_gen = generator(save_best, opts, gen_records, cfg.BATCH_SIZE, True) - val_gen = generator(save_best, opts, gen_records, cfg.BATCH_SIZE, False) - - total_records = len(gen_records) - - num_train = 0 - num_val = 0 - - for key, _record in gen_records.items(): - if _record['train'] == True: - num_train += 1 - else: - num_val += 1 - - print("train: %d, val: %d" % (num_train, num_val)) - print('total records: %d' %(total_records)) - - if not continuous: - steps_per_epoch = num_train // cfg.BATCH_SIZE - else: - steps_per_epoch = 100 - - val_steps = num_val // cfg.BATCH_SIZE - print('steps_per_epoch', steps_per_epoch) - - cfg.model_type = model_type - - go_train(kl, cfg, train_gen, val_gen, gen_records, model_name, steps_per_epoch, val_steps, continuous, verbose, save_best) - - - -def go_train(kl, cfg, train_gen, val_gen, gen_records, model_name, steps_per_epoch, val_steps, continuous, verbose, save_best=None): - - start = time.time() +from donkeycar.pipeline.training import train - model_path = os.path.expanduser(model_name) - send_model_cb = on_best_model if cfg.SEND_BEST_MODEL_TO_PI else None - # checkpoint to save model after each epoch and send best to the pi. - if save_best is None: - save_best = MyCPCallback(send_model_cb=send_model_cb, - filepath=model_path, - monitor='val_loss', - verbose=verbose, - save_best_only=True, - mode='min', - cfg=cfg) - #stop training if the validation error stops improving. - early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', - min_delta=cfg.MIN_DELTA, - patience=cfg.EARLY_STOP_PATIENCE, - verbose=verbose, - mode='auto') - - if steps_per_epoch < 2: - raise Exception("Too little data to train. Please record more records.") - - if continuous: - epochs = 100000 - else: - epochs = cfg.MAX_EPOCHS - - workers_count = 1 - use_multiprocessing = False - - callbacks_list = [save_best] - - if cfg.USE_EARLY_STOP and not continuous: - callbacks_list.append(early_stop) - - history = kl.model.fit_generator( - train_gen, - steps_per_epoch=steps_per_epoch, - epochs=epochs, - verbose=cfg.VERBOSE_TRAIN, - validation_data=val_gen, - callbacks=callbacks_list, - validation_steps=val_steps, - workers=workers_count, - use_multiprocessing=use_multiprocessing) - - full_model_val_loss = min(history.history['val_loss']) - max_val_loss = full_model_val_loss + cfg.PRUNE_VAL_LOSS_DEGRADATION_LIMIT - - duration_train = time.time() - start - print("Training completed in %s." % str(datetime.timedelta(seconds=round(duration_train))) ) - - print("\n\n----------- Best Eval Loss :%f ---------" % save_best.best) - - if cfg.SHOW_PLOT: - try: - if do_plot: - plt.figure(1) - - # Only do accuracy if we have that data (e.g. categorical outputs) - if 'angle_out_acc' in history.history: - plt.subplot(121) - - # summarize history for loss - plt.plot(history.history['loss']) - plt.plot(history.history['val_loss']) - plt.title('model loss') - plt.ylabel('loss') - plt.xlabel('epoch') - plt.legend(['train', 'validate'], loc='upper right') - - # summarize history for acc - if 'angle_out_acc' in history.history: - plt.subplot(122) - plt.plot(history.history['angle_out_acc']) - plt.plot(history.history['val_angle_out_acc']) - plt.title('model angle accuracy') - plt.ylabel('acc') - plt.xlabel('epoch') - #plt.legend(['train', 'validate'], loc='upper left') - - plt.savefig(model_path + '_loss_acc_%f.%s' % (save_best.best, figure_format)) - plt.show() - else: - print("not saving loss graph because matplotlib not set up.") - except Exception as ex: - print("problems with loss graph: {}".format( ex ) ) - - #Save tflite, optionally in the int quant format for Coral TPU - if "tflite" in cfg.model_type: - print("\n\n--------- Saving TFLite Model ---------") - tflite_fnm = model_path.replace(".h5", ".tflite") - assert(".tflite" in tflite_fnm) - - prepare_for_coral = "coral" in cfg.model_type - - if prepare_for_coral: - #compile a list of records to calibrate the quantization - data_list = [] - max_items = 1000 - for key, _record in gen_records.items(): - data_list.append(_record) - if len(data_list) == max_items: - break - - stride = 1 - num_calibration_steps = len(data_list) // stride - - #a generator function to help train the quantizer with the expected range of data from inputs - def representative_dataset_gen(): - start = 0 - end = stride - for _ in range(num_calibration_steps): - batch_data = data_list[start:end] - inputs = [] - - for record in batch_data: - filename = record['image_path'] - img_arr = load_scaled_image_arr(filename, cfg) - inputs.append(img_arr) - - start += stride - end += stride - - # Get sample input data as a numpy array in a method of your choosing. - yield [ np.array(inputs, dtype=np.float32).reshape(stride, cfg.TARGET_H, cfg.TARGET_W, cfg.TARGET_D) ] - else: - representative_dataset_gen = None - - from donkeycar.parts.tflite import keras_model_to_tflite - keras_model_to_tflite(model_path, tflite_fnm, representative_dataset_gen) - print("Saved TFLite model:", tflite_fnm) - if prepare_for_coral: - print("compile for Coral w: edgetpu_compiler", tflite_fnm) - os.system("edgetpu_compiler " + tflite_fnm) - - #Save tensorrt - if "tensorrt" in cfg.model_type: - print("\n\n--------- Saving TensorRT Model ---------") - # TODO RAHUL - # flatten model_path - # convert to uff - # print("Saved TensorRT model:", uff_filename) - - -def sequence_train(cfg, tub_names, model_name, transfer_model, model_type, continuous, aug): - ''' - use the specified data in tub_names to train an artifical neural network - saves the output trained model as model_name - trains models which take sequence of images - ''' - assert(not continuous) - - print("sequence of images training") - - kl = dk.utils.get_model_by_type(model_type=model_type, cfg=cfg) - - if cfg.PRINT_MODEL_SUMMARY: - print(kl.model.summary()) - - tubs = gather_tubs(cfg, tub_names) - - verbose = cfg.VERBOSE_TRAIN - - records = [] - - for tub in tubs: - record_paths = glob.glob(os.path.join(tub.path, 'record_*.json')) - print("Tub:", tub.path, "has", len(record_paths), 'records') - - record_paths.sort(key=get_record_index) - records += record_paths - - - print('collating records') - gen_records = {} - - for record_path in records: - - with open(record_path, 'r') as fp: - json_data = json.load(fp) - - basepath = os.path.dirname(record_path) - image_filename = json_data["cam/image_array"] - image_path = os.path.join(basepath, image_filename) - sample = { 'record_path' : record_path, "image_path" : image_path, "json_data" : json_data } - - sample["tub_path"] = basepath - sample["index"] = get_image_index(image_filename) - - angle = float(json_data['user/angle']) - throttle = float(json_data["user/throttle"]) - - sample['target_output'] = np.array([angle, throttle]) - sample['angle'] = angle - sample['throttle'] = throttle - - - sample['img_data'] = None - - key = make_key(sample) - - gen_records[key] = sample - - - - print('collating sequences') - - sequences = [] - - target_len = cfg.SEQUENCE_LENGTH - look_ahead = False - - if model_type == "look_ahead": - target_len = cfg.SEQUENCE_LENGTH * 2 - look_ahead = True - - for k, sample in gen_records.items(): - - seq = [] - - for i in range(target_len): - key = make_next_key(sample, i) - if key in gen_records: - seq.append(gen_records[key]) - else: - continue - - if len(seq) != target_len: - continue - - sequences.append(seq) - - print("collated", len(sequences), "sequences of length", target_len) - - #shuffle and split the data - train_data, val_data = train_test_split(sequences, test_size=(1 - cfg.TRAIN_TEST_SPLIT)) - - - def generator(data, opt, batch_size=cfg.BATCH_SIZE): - num_records = len(data) - - while True: - #shuffle again for good measure - random.shuffle(data) - - for offset in range(0, num_records, batch_size): - batch_data = data[offset:offset+batch_size] - - if len(batch_data) != batch_size: - break - - b_inputs_img = [] - b_vec_in = [] - b_labels = [] - b_vec_out = [] - - for seq in batch_data: - inputs_img = [] - vec_in = [] - labels = [] - vec_out = [] - num_images_target = len(seq) - iTargetOutput = -1 - if opt['look_ahead']: - num_images_target = cfg.SEQUENCE_LENGTH - iTargetOutput = cfg.SEQUENCE_LENGTH - 1 - - for iRec, record in enumerate(seq): - #get image data if we don't already have it - if len(inputs_img) < num_images_target: - if record['img_data'] is None: - img_arr = load_scaled_image_arr(record['image_path'], cfg) - if img_arr is None: - break - if aug: - img_arr = augment_image(img_arr) - - if cfg.CACHE_IMAGES: - record['img_data'] = img_arr - else: - img_arr = record['img_data'] - - inputs_img.append(img_arr) - - if iRec >= iTargetOutput: - vec_out.append(record['angle']) - vec_out.append(record['throttle']) - else: - vec_in.append(0.0) #record['angle']) - vec_in.append(0.0) #record['throttle']) - - label_vec = seq[iTargetOutput]['target_output'] - - if look_ahead: - label_vec = np.array(vec_out) - - labels.append(label_vec) - - b_inputs_img.append(inputs_img) - b_vec_in.append(vec_in) - - b_labels.append(labels) - - - if look_ahead: - X = [np.array(b_inputs_img).reshape(batch_size,\ - cfg.TARGET_H, cfg.TARGET_W, cfg.SEQUENCE_LENGTH)] - X.append(np.array(b_vec_in)) - y = np.array(b_labels).reshape(batch_size, (cfg.SEQUENCE_LENGTH + 1) * 2) - else: - X = [np.array(b_inputs_img).reshape(batch_size,\ - cfg.SEQUENCE_LENGTH, cfg.TARGET_H, cfg.TARGET_W, cfg.TARGET_D)] - y = np.array(b_labels).reshape(batch_size, 2) - - yield X, y - - opt = { 'look_ahead' : look_ahead, 'cfg' : cfg } - - train_gen = generator(train_data, opt) - val_gen = generator(val_data, opt) - - model_path = os.path.expanduser(model_name) - - total_records = len(sequences) - total_train = len(train_data) - total_val = len(val_data) - - print('train: %d, validation: %d' %(total_train, total_val)) - steps_per_epoch = total_train // cfg.BATCH_SIZE - val_steps = total_val // cfg.BATCH_SIZE - print('steps_per_epoch', steps_per_epoch) - - if steps_per_epoch < 2: - raise Exception("Too little data to train. Please record more records.") - - cfg.model_type = model_type - - go_train(kl, cfg, train_gen, val_gen, gen_records, model_name, steps_per_epoch, val_steps, continuous, verbose) - - ''' - kl.train(train_gen, - val_gen, - saved_model_path=model_path, - steps=steps_per_epoch, - train_split=cfg.TRAIN_TEST_SPLIT, - use_early_stop = cfg.USE_EARLY_STOP) - ''' - - -def multi_train(cfg, tub, model, transfer, model_type, continuous, aug): - ''' - choose the right regime for the given model type - ''' - train_fn = train - if model_type in ("rnn",'3d','look_ahead'): - train_fn = sequence_train - - train_fn(cfg, tub, model, transfer, model_type, continuous, aug) - - -def prune(model, validation_generator, val_steps, cfg): - percent_pruning = float(cfg.PRUNE_PERCENT_PER_ITERATION) - total_channels = get_total_channels(model) - n_channels_delete = int(math.floor(percent_pruning / 100 * total_channels)) - - apoz_df = get_model_apoz(model, validation_generator) - - model = prune_model(model, apoz_df, n_channels_delete) - - name = '{}/model_pruned_{}_percent.h5'.format(cfg.MODELS_PATH, percent_pruning) - - model.save(name) - - return model, n_channels_delete - - -def extract_data_from_pickles(cfg, tubs): - """ - Extracts record_{id}.json and image from a pickle with the same id if exists in the tub. - Then writes extracted json/jpg along side the source pickle that tub. - This assumes the format {id}.pickle in the tub directory. - :param cfg: config with data location configuration. Generally the global config object. - :param tubs: The list of tubs involved in training. - :return: implicit None. - """ - t_paths = gather_tub_paths(cfg, tubs) - for tub_path in t_paths: - file_paths = glob.glob(join(tub_path, '*.pickle')) - print('found {} pickles writing json records and images in tub {}'.format(len(file_paths), tub_path)) - for file_path in file_paths: - # print('loading data from {}'.format(file_paths)) - with open(file_path, 'rb') as f: - p = zlib.decompress(f.read()) - data = pickle.loads(p) - - base_path = dirname(file_path) - filename = splitext(basename(file_path))[0] - image_path = join(base_path, filename + '.jpg') - img = Image.fromarray(np.uint8(data['val']['cam/image_array'])) - img.save(image_path) - - data['val']['cam/image_array'] = filename + '.jpg' - - with open(join(base_path, 'record_{}.json'.format(filename)), 'w') as f: - json.dump(data['val'], f) - - -def prune_model(model, apoz_df, n_channels_delete): - from kerassurgeon import Surgeon - import pandas as pd - - # Identify 5% of channels with the highest APoZ in model - sorted_apoz_df = apoz_df.sort_values('apoz', ascending=False) - high_apoz_index = sorted_apoz_df.iloc[0:n_channels_delete, :] - - # Create the Surgeon and add a 'delete_channels' job for each layer - # whose channels are to be deleted. - surgeon = Surgeon(model, copy=True) - for name in high_apoz_index.index.unique().values: - channels = list(pd.Series(high_apoz_index.loc[name, 'index'], - dtype=np.int64).values) - surgeon.add_job('delete_channels', model.get_layer(name), - channels=channels) - # Delete channels - return surgeon.operate() - - -def get_total_channels(model): - start = None - end = None - channels = 0 - for layer in model.layers[start:end]: - if layer.__class__.__name__ == 'Conv2D': - channels += layer.filters - return channels - - -def get_model_apoz(model, generator): - from kerassurgeon.identify import get_apoz - import pandas as pd - - # Get APoZ - start = None - end = None - apoz = [] - for layer in model.layers[start:end]: - if layer.__class__.__name__ == 'Conv2D': - print(layer.name) - apoz.extend([(layer.name, i, value) for (i, value) - in enumerate(get_apoz(model, layer, generator))]) - - layer_name, index, apoz_value = zip(*apoz) - apoz_df = pd.DataFrame({'layer': layer_name, 'index': index, - 'apoz': apoz_value}) - apoz_df = apoz_df.set_index('layer') - return apoz_df - - -def removeComments( dir_list ): - for i in reversed(range(len(dir_list))): - if dir_list[i].startswith("#"): - del dir_list[i] - elif len(dir_list[i]) == 0: - del dir_list[i] - -def preprocessFileList( filelist ): - dirs = [] - if filelist is not None: - for afile in filelist: - with open(afile, "r") as f: - tmp_dirs = f.read().split('\n') - dirs.extend(tmp_dirs) - - removeComments( dirs ) - return dirs - -if __name__ == "__main__": +def main(): args = docopt(__doc__) cfg = dk.load_config() - tub = args['--tub'] + tubs = args['--tubs'] model = args['--model'] - transfer = args['--transfer'] model_type = args['--type'] + train(cfg, tubs, model, model_type) - if model_type is None: - model_type = cfg.DEFAULT_MODEL_TYPE - print("using default model type of", model_type) - - if args['--figure_format']: - figure_format = args['--figure_format'] - continuous = args['--continuous'] - aug = args['--aug'] - - dirs = preprocessFileList( args['--file'] ) - if tub is not None: - tub_paths = [os.path.expanduser(n) for n in tub.split(',')] - dirs.extend( tub_paths ) - multi_train(cfg, dirs, model, transfer, model_type, continuous, aug) +if __name__ == "__main__": + main() diff --git a/donkeycar/tests/setup.py b/donkeycar/tests/setup.py index 67ef629d7..19d850b80 100644 --- a/donkeycar/tests/setup.py +++ b/donkeycar/tests/setup.py @@ -1,38 +1,44 @@ import os import platform import pytest -from donkeycar.parts.datastore import Tub +from donkeycar.parts.tub_v2 import Tub from donkeycar.parts.simulation import SquareBoxCamera, MovingSquareTelemetry from donkeycar.management.base import CreateCar -import numpy as np + def on_pi(): if 'arm' in platform.machine(): return True return False + temp_tub_path = None + @pytest.fixture def tub_path(tmpdir): tub_path = tmpdir.mkdir('tubs').join('tub') return str(tub_path) + @pytest.fixture def tub(tub_path): t = create_sample_tub(tub_path, records=128) return t + @pytest.fixture def tubs(tmpdir, tubs=5): tubs_dir = tmpdir.mkdir('tubs') - tub_paths = [ str(tubs_dir.join('tub_{}'.format(i))) for i in range(tubs) ] - tubs = [ create_sample_tub(tub_path, records=5) for tub_path in tub_paths ] - return (str(tubs_dir), tub_paths, tubs) - + tub_paths = [str(tubs_dir.join('tub_{}'.format(i))) for i in range(tubs)] + tubs = [create_sample_tub(tub_path, records=5) for tub_path in tub_paths] + return str(tubs_dir), tub_paths, tubs + + def create_sample_tub(path, records=128): - inputs=['cam/image_array', 'user/angle', 'user/throttle', 'location/one_hot_state_array'] - types=['image_array', 'float', 'float', 'vector'] + inputs = ['cam/image_array', 'user/angle', 'user/throttle', + 'location/one_hot_state_array'] + types = ['image_array', 'float', 'float', 'vector'] t = Tub(path, inputs=inputs, types=types) cam = SquareBoxCamera() tel = MovingSquareTelemetry() @@ -40,9 +46,11 @@ def create_sample_tub(path, records=128): for _ in range(records): x, y = tel.run() img_arr = cam.run(x, y) - loc = [0 for i in range(num_loc)] + loc = [0.0] * num_loc loc[1] = 1.0 - t.put_record({'cam/image_array': img_arr, 'user/angle': x, 'user/throttle':y , 'location/one_hot_state_array':loc}) + t.write_record( + {'cam/image_array': img_arr, 'user/angle': x, 'user/throttle': y, + 'location/one_hot_state_array': loc}) global temp_tub_path temp_tub_path = t @@ -50,19 +58,22 @@ def create_sample_tub(path, records=128): return t + def d2_path(temp_path): path = os.path.join(temp_path, 'd2') return str(path) -def default_template(d2_path): + +def default_template(car_dir): c = CreateCar() - c.create_car(d2_path, overwrite=True) - return d2_path + c.create_car(car_dir, template='complete', overwrite=True) + return car_dir + -def custom_template(d2_path, template): +def custom_template(car_dir, template): c = CreateCar() - c.create_car(d2_path, template=template, overwrite=True) - return d2_path + c.create_car(car_dir, template=template, overwrite=True) + return car_dir def create_sample_record(): @@ -70,4 +81,4 @@ def create_sample_record(): tel = MovingSquareTelemetry() x, y = tel.run() img_arr = cam.run(x, y) - return {'cam/image_array': img_arr, 'angle': x, 'throttle':y} \ No newline at end of file + return {'cam/image_array': img_arr, 'angle': x, 'throttle': y} diff --git a/donkeycar/tests/test_catalog_v2.py b/donkeycar/tests/test_catalog_v2.py new file mode 100644 index 000000000..29d1c7097 --- /dev/null +++ b/donkeycar/tests/test_catalog_v2.py @@ -0,0 +1,44 @@ +import os +import shutil +import tempfile +import time +import unittest +from pathlib import Path + +from donkeycar.parts.datastore_v2 import Catalog, CatalogMetadata, Seekable + + +class TestCatalog(unittest.TestCase): + + def setUp(self): + self._path = tempfile.mkdtemp() + self._catalog_path = os.path.join(self._path, 'test.catalog') + + def test_basic_catalog_operations(self): + catalog = Catalog(self._catalog_path) + for i in range(0, 10): + catalog.write_record(self._newRecord()) + + self.assertEqual(os.path.exists(catalog.path.as_posix()), True) + self.assertEqual(os.path.exists(catalog.manifest.manifest_path.as_posix()), True) + + catalog_2 = Catalog(self._catalog_path) + catalog_2.seekable.seek_line_start(1) + line = catalog_2.seekable.readline() + count = 0 + while line is not None and len(line) > 0: + print('Contents %s' % (line)) + count += 1 + line = catalog_2.seekable.readline() + + self.assertEqual(count, 10) + + def tearDown(self): + shutil.rmtree(self._path) + + def _newRecord(self): + record = {'at' : time.time()} + return record + +if __name__ == '__main__': + unittest.main() diff --git a/donkeycar/tests/test_datastore_v2.py b/donkeycar/tests/test_datastore_v2.py new file mode 100644 index 000000000..1edad5ac6 --- /dev/null +++ b/donkeycar/tests/test_datastore_v2.py @@ -0,0 +1,72 @@ +import os +import shutil +import tempfile +import time +import unittest +from pathlib import Path + +from donkeycar.parts.datastore_v2 import Manifest + + +class TestDatastore(unittest.TestCase): + + def setUp(self): + self._path = tempfile.mkdtemp() + + def test_basic_datastore_operations(self): + # 2 records per catalog entry in the manifest + manifest = Manifest(self._path, max_len=2) + count = 10 + for i in range(count): + manifest.write_record(self._newRecord()) + + read_records = 0 + for entry in manifest: + print('Entry %s' % (entry)) + read_records += 1 + + self.assertEqual(count, read_records) + + def test_deletion(self): + manifest = Manifest(self._path, max_len=2) + count = 10 + deleted = 5 + for i in range(count): + manifest.write_record(self._newRecord()) + + for i in range(deleted): + manifest.delete_record(i) + + read_records = 0 + for entry in manifest: + print('Entry %s' % (entry)) + read_records += 1 + + self.assertEqual((count - deleted), read_records) + + + def test_memory_mapped_read(self): + manifest = Manifest(self._path, max_len=2) + for i in range(10): + manifest.write_record(self._newRecord()) + manifest.close() + + manifest_2 = Manifest(self._path, read_only=True) + read_records = 0 + for _ in manifest_2: + read_records += 1 + + manifest_2.close() + + self.assertEqual(10, read_records) + + + def tearDown(self): + shutil.rmtree(self._path) + + def _newRecord(self): + record = {'at' : time.time()} + return record + +if __name__ == '__main__': + unittest.main() diff --git a/donkeycar/tests/test_keras.py b/donkeycar/tests/test_keras.py index c29b22563..35294ca1d 100644 --- a/donkeycar/tests/test_keras.py +++ b/donkeycar/tests/test_keras.py @@ -1,16 +1,9 @@ # -*- coding: utf-8 -*- -import pytest + from donkeycar.parts.keras import * from donkeycar.utils import * import numpy as np -@pytest.fixture -def pilot(): - kp = KerasPilot() - return kp - -def test_pilot_types(pilot): - assert 1 == 1 def test_categorical(): km = KerasCategorical() @@ -18,19 +11,6 @@ def test_categorical(): img = get_test_img(km.model) km.run(img) -def test_categorical_med(): - input_shape=(64, 64, 1) - km = KerasCategorical(input_shape=input_shape) - assert km.model is not None - img = get_test_img(km.model) - km.run(img) - -def test_categorical_tiny(): - input_shape=(32, 32, 1) - km = KerasCategorical(input_shape=input_shape) - assert km.model is not None - img = get_test_img(km.model) - km.run(img) def test_linear(): km = KerasLinear() @@ -38,27 +18,35 @@ def test_linear(): img = get_test_img(km.model) km.run(img) + def test_imu(): km = KerasIMU() assert km.model is not None img = get_test_img(km.model) imu = np.random.rand(6).tolist() - km.run(img, *imu) + km.run(img, imu) + def test_rnn(): km = KerasRNN_LSTM() assert km.model is not None img = get_test_img(km.model) km.run(img) - + + def test_3dconv(): km = Keras3D_CNN() assert km.model is not None img = get_test_img(km.model) km.run(img) + def test_localizer(): km = KerasLocalizer() assert km.model is not None img = get_test_img(km.model) - km.run(img) \ No newline at end of file + km.run(img) + + + + diff --git a/donkeycar/tests/test_management.py b/donkeycar/tests/test_management.py deleted file mode 100755 index 52ad2a831..000000000 --- a/donkeycar/tests/test_management.py +++ /dev/null @@ -1,9 +0,0 @@ - -from donkeycar.management import base -from tempfile import tempdir - -def get_test_tub_path(): - tempdir() - -def test_tubcheck(): - tc = base.TubCheck() \ No newline at end of file diff --git a/donkeycar/tests/test_pipeline.py b/donkeycar/tests/test_pipeline.py new file mode 100644 index 000000000..708197c2b --- /dev/null +++ b/donkeycar/tests/test_pipeline.py @@ -0,0 +1,170 @@ +import time +import unittest +from typing import List + +import numpy as np + +from donkeycar.config import Config +from donkeycar.parts.keras import KerasLinear +from donkeycar.pipeline.sequence import SizedIterator, TubSequence +from donkeycar.pipeline.training import BatchSequence +from donkeycar.pipeline.types import TubRecord, TubRecordDict + + +def random_records(size: int = 100) -> List[TubRecord]: + return [random_record() for _ in range(size)] + + +def random_record() -> TubRecord: + now = int(time.time()) + underlying: TubRecordDict = { + 'cam/image_array': f'/path/to/{now}.txt', + 'user/angle': np.random.uniform(0, 1.), + 'user/throttle': np.random.uniform(0, 1.), + 'user/mode': 'driving', + 'imu/acl_x': None, + 'imu/acl_y': None, + 'imu/acl_z': None, + 'imu/gyr_x': None, + 'imu/gyr_y': None, + 'imu/gyr_z': None + } + return TubRecord(config=Config(), base_path='/base', underlying=underlying) + + +size = 10 + + +class TestPipeline(unittest.TestCase): + + def setUp(self): + records = random_records(size=size) + self.sequence = TubSequence(records=records) + + def test_basic_iteration(self): + self.assertEqual(len(self.sequence), size) + count = 0 + for record in self.sequence: + print(f'Record {record}') + count += 1 + + self.assertEqual(count, size) + + def test_basic_map_operations(self): + transformed = TubSequence.build_pipeline( + self.sequence, + x_transform=lambda record: record.underlying['user/angle'], + y_transform=lambda record: record.underlying['user/throttle']) + + transformed_2 = TubSequence.build_pipeline( + self.sequence, + x_transform=lambda record: record.underlying['user/angle'] * 2, + y_transform=lambda record: record.underlying['user/throttle'] * 2) + + self.assertEqual(len(transformed), size) + self.assertEqual(len(transformed_2), size) + + transformed_list = list(transformed) + transformed_list_2 = list(transformed_2) + index = np.random.randint(0, 9) + + x1, y1 = transformed_list[index] + x2, y2 = transformed_list_2[index] + + self.assertAlmostEqual(x1 * 2, x2) + self.assertAlmostEqual(y1 * 2, y2) + + def test_more_map_operations(self): + transformed = TubSequence.build_pipeline( + self.sequence, + x_transform=lambda record: record.underlying['user/angle'], + y_transform=lambda record: record.underlying['user/throttle']) + + transformed_2 = TubSequence.build_pipeline( + self.sequence, + x_transform=lambda record: record.underlying['user/angle'] * 2, + y_transform=lambda record: record.underlying['user/throttle'] * 2) + + transformed_3 = TubSequence.map_pipeline( + x_transform=lambda x: x, + y_transform=lambda y: y, + pipeline=transformed_2 + ) + + self.assertEqual(len(transformed), size) + self.assertEqual(len(transformed_2), size) + self.assertEqual(len(transformed_3), size) + + transformed_list = list(transformed) + transformed_list_2 = list(transformed_3) + index = np.random.randint(0, 9) + + x1, y1 = transformed_list[index] + x2, y2 = transformed_list_2[index] + + self.assertAlmostEqual(x1 * 2, x2) + self.assertAlmostEqual(y1 * 2, y2) + + def test_map_factory(self): + transformed = TubSequence.build_pipeline( + self.sequence, + x_transform=lambda record: record.underlying['user/angle'], + y_transform=lambda record: record.underlying['user/throttle']) + + transformed_2 = TubSequence.build_pipeline( + self.sequence, + x_transform=lambda record: record.underlying['user/angle'] * 2, + y_transform=lambda record: record.underlying['user/throttle'] * 2) + + transformed_3 = TubSequence.map_pipeline_factory( + x_transform=lambda x: x, + y_transform=lambda y: y, + factory=lambda: transformed_2 + ) + + self.assertEqual(len(transformed), size) + self.assertEqual(len(transformed_2), size) + self.assertEqual(len(transformed_3), size) + + transformed_list = list(transformed) + transformed_list_2 = list(transformed_3) + index = np.random.randint(0, 9) + + x1, y1 = transformed_list[index] + x2, y2 = transformed_list_2[index] + + self.assertAlmostEqual(x1 * 2, x2) + self.assertAlmostEqual(y1 * 2, y2) + + def test_iterator_consistency(self): + extract = TubSequence.build_pipeline( + self.sequence, + x_transform=lambda record: record.underlying['user/angle'], + y_transform=lambda record: record.underlying['user/throttle']) + # iterate twice through half the data + r1 = list() + r2 = list() + for r in r1, r2: + iterator = iter(extract) + for i in range(size // 2): + r.append(next(iterator)) + + self.assertEqual(r1, r2) + # now transform and iterate through pipeline twice to see iterator + # doesn't exhaust + transformed = TubSequence.map_pipeline( + x_transform=lambda x: 2 * x, + y_transform=lambda y: 3 * y, + pipeline=extract) + l1 = list(transformed) + l2 = list(transformed) + self.assertEqual(l1, l2) + for e, t in zip(extract, transformed): + ex, ey = e + tx, ty = t + self.assertAlmostEqual(2 * ex, tx) + self.assertAlmostEqual(3 * ey, ty) + + +if __name__ == '__main__': + unittest.main() diff --git a/donkeycar/tests/test_seekable_v2.py b/donkeycar/tests/test_seekable_v2.py new file mode 100644 index 000000000..9bc59f9c3 --- /dev/null +++ b/donkeycar/tests/test_seekable_v2.py @@ -0,0 +1,79 @@ +import os +import tempfile +import unittest + +from donkeycar.parts.datastore_v2 import Seekable + + +class TestSeekeable(unittest.TestCase): + + def setUp(self): + self._file, self._path = tempfile.mkstemp() + + def test_offset_tracking(self): + appendable = Seekable(self._path) + with appendable: + appendable.writeline('Line 1') + appendable.writeline('Line 2') + self.assertEqual(len(appendable.line_lengths), 2) + appendable.seek_line_start(1) + self.assertEqual(appendable.readline(), 'Line 1') + appendable.seek_line_start(2) + self.assertEqual(appendable.readline(), 'Line 2') + appendable.seek_end_of_file() + appendable.truncate_until_end(1) + appendable.writeline('Line 2 Revised') + appendable.seek_line_start(2) + self.assertEqual(appendable.readline(), 'Line 2 Revised') + + def test_read_from_and_update(self): + appendable = Seekable(self._path) + with appendable: + appendable.writeline('Line 1') + appendable.writeline('Line 2') + appendable.writeline('Line 3') + # Test idempotent read + current_offset = appendable.file.tell() + lines = appendable.read_from(2) + self.assertEqual(len(lines), 2) + self.assertEqual(lines[0], 'Line 2') + self.assertEqual(lines[1], 'Line 3') + self.assertEqual(appendable.file.tell(), current_offset) + # Test update + appendable.update_line(1, 'Line 1 is longer') + lines = appendable.read_from(1) + self.assertEqual(len(lines), 3) + self.assertEqual(lines[0], 'Line 1 is longer') + self.assertEqual(lines[1], 'Line 2') + self.assertEqual(lines[2], 'Line 3') + + def test_read_contents(self): + appendable = Seekable(self._path) + with appendable: + appendable.writeline('Line 1') + appendable.writeline('Line 2') + self.assertEqual(len(appendable.line_lengths), 2) + appendable.file.seek(0) + appendable._read_contents() + self.assertEqual(len(appendable.line_lengths), 2) + + def test_restore_from_index(self): + appendable = Seekable(self._path) + with appendable: + appendable.writeline('Line 1') + appendable.writeline('Line 2') + self.assertEqual(len(appendable.line_lengths), 2) + + appendable = Seekable(self._path, line_lengths=appendable.line_lengths) + with appendable: + self.assertEqual(len(appendable.line_lengths), 2) + appendable.seek_line_start(1) + self.assertEqual(appendable.readline(), 'Line 1') + self.assertEqual(appendable.readline(), 'Line 2') + + def tearDown(self): + os.remove(self._path) + + +if __name__ == '__main__': + unittest.main() diff --git a/donkeycar/tests/test_telemetry.py b/donkeycar/tests/test_telemetry.py new file mode 100644 index 000000000..b522f9580 --- /dev/null +++ b/donkeycar/tests/test_telemetry.py @@ -0,0 +1,42 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +import time +from unittest import mock +from paho.mqtt.client import Client +import donkeycar.templates.cfg_complete as cfg +from donkeycar.parts.telemetry import MqttTelemetry +import pytest + + +def test_mqtt_telemetry(): + + # Create receiver + sub = Client(clean_session=True) + + # def on_message(client, userdata, message): + # data = message.payload + # print(message) + + on_message_mock = mock.Mock() + sub.on_message = on_message_mock + sub.connect(cfg.TELEMETRY_MQTT_BROKER_HOST) + sub.loop_start() + name = "donkey/%s/telemetry" % cfg.TELEMETRY_DONKEY_NAME + sub.subscribe(name) + + t = MqttTelemetry(cfg, default_inputs=['angle'], default_types=['float']) + t.publish() + + timestamp = t.report({'speed': 16, 'voltage': 12}) + t.run(33.3) + assert t.qsize == 2 + + time.sleep(1.5) + + t.publish() + assert t.qsize == 0 + + time.sleep(0.5) + + res = str.encode('[{"ts": %s, "values": {"speed": 16, "voltage": 12, "angle": 33.3}}]' % timestamp) + assert on_message_mock.call_args_list[0][0][2].payload == res diff --git a/donkeycar/tests/test_template.py b/donkeycar/tests/test_template.py index 663b74389..3cdb75331 100755 --- a/donkeycar/tests/test_template.py +++ b/donkeycar/tests/test_template.py @@ -1,27 +1,24 @@ # -*- coding: utf-8 -*- -import tempfile + from tempfile import gettempdir -import unittest -from donkeycar.parts.datastore import TubWriter, Tub -from donkeycar.parts.datastore import TubHandler from donkeycar.templates import complete import donkeycar as dk import os -import pytest +from .setup import default_template, d2_path, custom_template -#fixtures -from .setup import tub, tub_path, on_pi, default_template, d2_path, custom_template def test_config(): path = default_template(d2_path(gettempdir())) cfg = dk.load_config(os.path.join(path, 'config.py')) - assert(cfg != None) + assert (cfg is not None) + def test_drive(): path = default_template(d2_path(gettempdir())) myconfig = open(os.path.join(path, 'myconfig.py'), "wt") myconfig.write("CAMERA_TYPE = 'MOCK'\n") + myconfig.write("USE_SSD1306_128_32 = False \n") myconfig.write("DRIVE_TRAIN_TYPE = 'None'") myconfig.close() cfg = dk.load_config(os.path.join(path, 'config.py')) @@ -30,10 +27,10 @@ def test_drive(): def test_custom_templates(): - template_names = ["complete", "basic_web", "square"] + template_names = ["complete", "basic", "square"] for template in template_names: path = custom_template(d2_path(gettempdir()), template=template) cfg = dk.load_config(os.path.join(path, 'config.py')) - assert(cfg != None) + assert (cfg is not None) mcfg = dk.load_config(os.path.join(path, 'myconfig.py')) - assert(mcfg != None) + assert (mcfg is not None) diff --git a/donkeycar/tests/test_torch.py b/donkeycar/tests/test_torch.py new file mode 100644 index 000000000..9b311476e --- /dev/null +++ b/donkeycar/tests/test_torch.py @@ -0,0 +1,123 @@ +import pytest +import tarfile +import os +import numpy as np +from collections import defaultdict, namedtuple + +import torch +import pytorch_lightning as pl +from donkeycar.parts.pytorch.torch_train import train +from donkeycar.parts.pytorch.torch_data import TorchTubDataModule +from donkeycar.parts.pytorch.torch_utils import get_model_by_type + +from donkeycar.config import Config + +Data = namedtuple('Data', ['type', 'name', 'convergence', 'pretrained']) + + +@pytest.fixture +def config() -> Config: + """ Config for the test with relevant parameters""" + cfg = Config() + cfg.BATCH_SIZE = 2 + cfg.TRAIN_TEST_SPLIT = 0.8 + cfg.IMAGE_H = 120 + cfg.IMAGE_W = 160 + cfg.IMAGE_DEPTH = 3 + cfg.PRINT_MODEL_SUMMARY = True + cfg.EARLY_STOP_PATIENCE = 1000 + cfg.MAX_EPOCHS = 20 + cfg.MODEL_CATEGORICAL_MAX_THROTTLE_RANGE = 0.8 + cfg.VERBOSE_TRAIN = True + cfg.MIN_DELTA = 0.0005 + return cfg + + +@pytest.fixture(scope='session') +def car_dir(tmpdir_factory): + """ Creating car dir with sub dirs and extracting tub """ + dir = tmpdir_factory.mktemp('mycar') + os.mkdir(os.path.join(dir, 'models')) + # extract tub.tar.gz into temp car_dir/tub + this_dir = os.path.dirname(os.path.abspath(__file__)) + with tarfile.open(os.path.join(this_dir, 'tub', 'tub.tar.gz')) as file: + file.extractall(dir) + return dir + + +# define the test data +d1 = Data(type='resnet18', name='resnet18a', convergence=0.8, pretrained=None) +test_data = [d1] + + +@pytest.mark.skipif("TRAVIS" in os.environ, + reason='Suppress training test in CI') +@pytest.mark.parametrize('data', test_data) +def test_train(config: Config, car_dir: str, data: Data) -> None: + """ + Testing convergence of the linear model + + :param config: donkey config + :param car_dir: car directory (this is a temp dir) + :param data: test case data + :return: None + """ + def pilot_path(name): + pilot_name = f'pilot_{name}.ckpt' + return os.path.join(car_dir, 'models', pilot_name) + + tub_dir = os.path.join(car_dir, 'tub') + loss = train(config, tub_dir, pilot_path( + data.name), data.type, checkpoint_path=None) + + # check loss is converging + assert loss[-1] < loss[0] * data.convergence + + +@pytest.mark.parametrize('model_type', ['resnet18']) +def test_training_pipeline(config: Config, model_type: str, car_dir: str) \ + -> None: + """ + Testing consistency of the model interfaces and data used in training + pipeline. + + :param config: donkey config + :param model_type: test specification of model type + :param tub_dir: tub directory (car_dir/tub) + :return: None + """ + model = get_model_by_type( + model_type, config, checkpoint_path=None) + + tub_dir = os.path.join(car_dir, 'tub') + data_module = TorchTubDataModule(config, [tub_dir]) + + if torch.cuda.is_available(): + print('Using CUDA') + gpus = -1 + else: + print('Not using CUDA') + gpus = 0 + + # Overfit the data + trainer = pl.Trainer(gpus=gpus, overfit_batches=2, + progress_bar_refresh_rate=30, max_epochs=30) + trainer.fit(model, data_module) + final_loss = model.loss_history[-1] + assert final_loss < 0.30, "final_loss of {} is too high. History: {}".format( + final_loss, model.loss_history) + + # Check the batch data makes sense + for batch in data_module.train_dataloader(): + x, y = batch + # In order to use a model pre-trained on ImageNet, the image + # will be re-sized to 3x224x224 regardless of what the user chooses. + assert(x.shape == (config.BATCH_SIZE, 3, 224, 224)) + assert(y.shape == (config.BATCH_SIZE, 2)) + break + + + # Check inference + val_x, val_y = next(iter(data_module.val_dataloader())) + output = model(val_x) + assert(output.shape == (config.BATCH_SIZE, 2)) diff --git a/donkeycar/tests/test_train.py b/donkeycar/tests/test_train.py old mode 100755 new mode 100644 index b36bc7b11..69ee4f263 --- a/donkeycar/tests/test_train.py +++ b/donkeycar/tests/test_train.py @@ -1,225 +1,128 @@ -# -*- coding: utf-8 -*- # import pytest +import tarfile import os +import numpy as np +from collections import defaultdict, namedtuple + +from donkeycar.pipeline.training import train, BatchSequence +from donkeycar.config import Config +from donkeycar.pipeline.types import TubDataset +from donkeycar.utils import get_model_by_type, normalize_image + +Data = namedtuple('Data', ['type', 'name', 'convergence', 'pretrained']) + + +@pytest.fixture +def config() -> Config: + """ Config for the test with relevant parameters""" + cfg = Config() + cfg.BATCH_SIZE = 64 + cfg.TRAIN_TEST_SPLIT = 0.8 + cfg.IMAGE_H = 120 + cfg.IMAGE_W = 160 + cfg.IMAGE_DEPTH = 3 + cfg.PRINT_MODEL_SUMMARY = True + cfg.EARLY_STOP_PATIENCE = 1000 + cfg.MAX_EPOCHS = 20 + cfg.MODEL_CATEGORICAL_MAX_THROTTLE_RANGE = 0.8 + cfg.VERBOSE_TRAIN = True + cfg.MIN_DELTA = 0.0005 + return cfg + + +@pytest.fixture(scope='session') +def car_dir(tmpdir_factory): + """ Creating car dir with sub dirs and extracting tub """ + dir = tmpdir_factory.mktemp('mycar') + os.mkdir(os.path.join(dir, 'models')) + # extract tub.tar.gz into temp car_dir/tub + this_dir = os.path.dirname(os.path.abspath(__file__)) + with tarfile.open(os.path.join(this_dir, 'tub', 'tub.tar.gz')) as file: + file.extractall(dir) + return dir + + +# define the test data +d1 = Data(type='linear', name='lin1', convergence=0.6, pretrained=None) +d2 = Data(type='categorical', name='cat1', convergence=0.85, pretrained=None) +d3 = Data(type='inferred', name='inf1', convergence=0.6, pretrained=None) +d4 = Data(type='latent', name='lat1', convergence=0.5, pretrained=None) +d5 = Data(type='latent', name='lat2', convergence=0.5, pretrained='lat1') +test_data = [d1, d2, d3] + + +@pytest.mark.skipif("TRAVIS" in os.environ, + reason='Suppress training test in CI') +@pytest.mark.parametrize('data', test_data) +def test_train(config: Config, car_dir: str, data: Data) -> None: + """ + Testing convergence of the linear an categorical models + + :param config: donkey config + :param car_dir: car directory (this is a temp dir) + :param data: test case data + :return: None + """ + def pilot_path(name): + pilot_name = f'pilot_{name}.h5' + return os.path.join(car_dir, 'models', pilot_name) + + if data.pretrained: + config.LATENT_TRAINED = pilot_path(data.pretrained) + tub_dir = os.path.join(car_dir, 'tub') + history = train(config, tub_dir, pilot_path(data.name), data.type) + loss = history.history['loss'] + # check loss is converging + assert loss[-1] < loss[0] * data.convergence + + +@pytest.mark.parametrize('model_type', ['linear', 'categorical', 'inferred']) +def test_training_pipeline(config: Config, model_type: str, car_dir: str) \ + -> None: + """ + Testing consistency of the model interfaces and data used in training + pipeline. + + :param config: donkey config + :param model_type: test specification of model type + :param tub_dir: tub directory (car_dir/tub) + :return: None + """ + kl = get_model_by_type(model_type, config) + tub_dir = os.path.join(car_dir, 'tub') + # don't shuffle so we can identify data for testing + dataset = TubDataset(config, [tub_dir], shuffle=False) + training_records, validation_records = dataset.train_test_split() + seq = BatchSequence(kl, config, training_records, True) + data_train = seq.create_tf_data() + num_whole_batches = len(training_records) // config.BATCH_SIZE + # this takes all batches into one list + tf_batch = list(data_train.take(num_whole_batches).as_numpy_iterator()) + it = iter(training_records) + for xy_batch in tf_batch: + # extract x and y values from records, asymmetric in x and y b/c x + # requires image manipulations + batch_records = [next(it) for _ in range(config.BATCH_SIZE)] + records_x = [kl.x_translate(normalize_image(kl.x_transform(r))) for + r in batch_records] + records_y = [kl.y_translate(kl.y_transform(r)) for r in + batch_records] + # from here all checks are symmetrical between x and y + for batch, o_type, records \ + in zip(xy_batch, kl.output_types(), (records_x, records_y)): + # check batch dictionary have expected keys + assert batch.keys() == o_type.keys(), \ + 'batch keys need to match models output types' + # convert record values into arrays of batch size + values = defaultdict(list) + for r in records: + for k, v in r.items(): + values[k].append(v) + # now convert arrays of floats or numpy arrays into numpy arrays + np_dict = dict() + for k, v in values.items(): + np_dict[k] = np.array(v) + # compare record values with values from tf.data + for k, v in batch.items(): + assert np.isclose(v, np_dict[k]).all() -from donkeycar.templates.train import multi_train -from donkeycar.parts.datastore import Tub -from donkeycar.parts.simulation import SquareBoxCamera, MovingSquareTelemetry - -from donkeycar.templates.train import gather_records, collate_records - -#fixtures -from .setup import tub, tub_path, on_pi -from .setup import create_sample_tub - -def cfg_defaults(cfg): - cfg.MAX_EPOCHS = 1 - cfg.BATCH_SIZE = 10 - cfg.SHOW_PLOT = False - cfg.VERBOSE_TRAIN = False - cfg.OPTIMIZER = "adam" - cfg.TARGET_H = cfg.IMAGE_H - cfg.ROI_CROP_TOP - cfg.ROI_CROP_BOTTOM - cfg.TARGET_W = cfg.IMAGE_W - cfg.TARGET_D = cfg.IMAGE_DEPTH - cfg.NUM_LOCATIONS = 10 - - -@pytest.mark.skipif(on_pi() == True, reason='Too slow on RPi') -def test_train_cat(tub, tub_path): - t = Tub(tub_path) - assert t is not None - - import donkeycar.templates.cfg_complete as cfg - tempfolder = tub_path[:-3] - model_path = os.path.join(tempfolder, 'test.h5') - cfg_defaults(cfg) - - tub = tub_path - model = model_path - transfer = None - model_type = "categorical" - continuous = False - aug = False - multi_train(cfg, tub, model, transfer, model_type, continuous, aug) - - -@pytest.mark.skipif(on_pi() == True, reason='Too slow on RPi') -def test_train_linear(tub, tub_path): - t = Tub(tub_path) - assert t is not None - - import donkeycar.templates.cfg_complete as cfg - tempfolder = tub_path[:-3] - model_path = os.path.join(tempfolder, 'test.h5') - cfg_defaults(cfg) - - tub = tub_path - model = model_path - transfer = None - model_type = "linear" - continuous = False - aug = False - multi_train(cfg, tub, model, transfer, model_type, continuous, aug) - - -@pytest.mark.skipif(on_pi() == True, reason='Too slow on RPi') -def test_train_localizer(tub, tub_path): - t = Tub(tub_path) - assert t is not None - - import donkeycar.templates.cfg_complete as cfg - tempfolder = tub_path[:-3] - model_path = os.path.join(tempfolder, 'test.h5') - cfg_defaults(cfg) - - tub = tub_path - model = model_path - transfer = None - model_type = "localizer" - continuous = False - aug = False - multi_train(cfg, tub, model, transfer, model_type, continuous, aug) - -''' - -latent test requires opencv right now. and fails on travis ci. -re-enable when we figure out a recipe for opencv on travis. - -@pytest.mark.skipif(on_pi() == True, reason='Too slow on RPi') -def test_train_latent(tub, tub_path): - t = Tub(tub_path) - assert t is not None - - import donkeycar.templates.cfg_complete as cfg - tempfolder = tub_path[:-3] - model_path = os.path.join(tempfolder, 'test.h5') - cfg.MAX_EPOCHS = 1 - cfg.BATCH_SIZE = 10 - cfg.SHOW_PLOT = False - cfg.VERBOSE_TRAIN = False - cfg.OPTIMIZER = "adam" - - tub = tub_path - model = model_path - transfer = None - model_type = "latent" - continuous = False - aug = True - multi_train(cfg, tub, model, transfer, model_type, continuous, aug) -''' - -@pytest.mark.skipif(on_pi() == True, reason='Too slow on RPi') -def test_train_seq(tub, tub_path): - t = Tub(tub_path) - assert t is not None - - import donkeycar.templates.cfg_complete as cfg - tempfolder = tub_path[:-3] - model_path = os.path.join(tempfolder, 'test.h5') - cfg_defaults(cfg) - - tub = tub_path - model = model_path - transfer = None - model_type = "rnn" - continuous = False - aug = True - multi_train(cfg, tub, model, transfer, model_type, continuous, aug) - -# Helper function to calculate the Train-Test split (ratio) of a collated dataset -def calculate_TrainTestSplit(gen_records): - train_recs = 0 - test_recs = 0 - for key in gen_records: - if gen_records[key]['train']: - train_recs += 1 - else: - test_recs += 1 - - total_recs = len(gen_records) - print("Total Records: {}".format(total_recs)) - print("Train Records: {}".format(train_recs)) - print("Test Records: {}".format(test_recs)) - assert total_recs == train_recs + test_recs - ratio = train_recs / total_recs - print("Calculated Train-Test Split: {}".format(ratio)) - return ratio - -@pytest.mark.skipif(on_pi() == True, reason='Too slow on RPi') -def test_train_TrainTestSplit_simple(tub_path): - # Check whether the Train-Test splitting is working correctly on a dataset. - initial_records = 100 - - # Setup the test data - t = create_sample_tub(tub_path, records=initial_records) - assert t is not None - - # Import the configuration - import donkeycar.templates.cfg_complete as cfg - - # Initial Setup - opts = {'categorical' : False} - opts['cfg'] = cfg - - orig_TRAIN_TEST_SPLIT = cfg.TRAIN_TEST_SPLIT - records = gather_records(cfg, tub_path, opts, verbose=True) - assert len(records) == initial_records - - # Attempt a 50:50 split - gen_records = {} - cfg.TRAIN_TEST_SPLIT = 0.5 - print() - print("Testing a {} Test-Train split...".format(opts['cfg'].TRAIN_TEST_SPLIT)) - print() - collate_records(records, gen_records, opts) - ratio = calculate_TrainTestSplit(gen_records) - assert ratio == cfg.TRAIN_TEST_SPLIT - - # Attempt a split based on config file (reset of the record set) - gen_records = {} - cfg.TRAIN_TEST_SPLIT = orig_TRAIN_TEST_SPLIT - print() - print("Testing a {} Test-Train split...".format(opts['cfg'].TRAIN_TEST_SPLIT)) - print() - collate_records(records, gen_records, opts) - ratio = calculate_TrainTestSplit(gen_records) - assert ratio == cfg.TRAIN_TEST_SPLIT - -@pytest.mark.skipif(on_pi() == True, reason='Too slow on RPi') -def test_train_TrainTestSplit_continuous(tub_path): - # Check whether the Train-Test splitting is working correctly when a dataset is extended. - initial_records = 100 - - # Setup the test data - t = create_sample_tub(tub_path, records=initial_records) - assert t is not None - - # Import the configuration - import donkeycar.templates.cfg_complete as cfg - - # Initial Setup - gen_records = {} - opts = {'categorical' : False} - opts['cfg'] = cfg - - # Perform the initial split - print() - print("Initial split of {} records to {} Test-Train split...".format(initial_records, opts['cfg'].TRAIN_TEST_SPLIT)) - print() - records = gather_records(cfg, tub_path, opts, verbose=True) - assert len(records) == initial_records - collate_records(records, gen_records, opts) - ratio = calculate_TrainTestSplit(gen_records) - assert ratio == cfg.TRAIN_TEST_SPLIT - - # Add some more records and recheck the ratio (only the NEW records should be added) - additional_records = 200 - print() - print("Added an extra {} records, aiming for overall {} Test-Train split...".format(additional_records, opts['cfg'].TRAIN_TEST_SPLIT)) - print() - create_sample_tub(tub_path, records=additional_records) - records = gather_records(cfg, tub_path, opts, verbose=True) - assert len(records) == (initial_records + additional_records) - collate_records(records, gen_records, opts) - ratio = calculate_TrainTestSplit(gen_records) - assert ratio == cfg.TRAIN_TEST_SPLIT diff --git a/donkeycar/tests/test_tub.py b/donkeycar/tests/test_tub.py deleted file mode 100644 index 69cb3b948..000000000 --- a/donkeycar/tests/test_tub.py +++ /dev/null @@ -1,144 +0,0 @@ -# -*- coding: utf-8 -*- -import os -import tempfile -import unittest - -from donkeycar.parts.datastore import TubHandler -from donkeycar.parts.datastore import TubWriter, Tub -from donkeycar.utils import arr_to_img, img_to_arr -from PIL import ImageChops - -# fixtures -from .setup import tub, tub_path - - -def test_tub_load(tub, tub_path): - """Tub loads from existing tub path.""" - t = Tub(tub_path) - assert t is not None - - -def test_tub_update_df(tub): - """ Tub updates its dataframe """ - tub.update_df() - assert len(tub.df) == 128 - - -def test_tub_add_record(tub): - """Tub can save a record and then retrieve it.""" - import numpy as np - img_arr = np.zeros((120, 160)) - x = 123 - y = 90 - rec_in = {'cam/image_array': img_arr, 'user/angle': x, 'user/throttle': y} - rec_index = tub.put_record(rec_in) - rec_out = tub.get_record(rec_index) - # Ignore the milliseconds key, which is added when the record is written - if 'milliseconds' in rec_out: - rec_out.pop('milliseconds') - assert rec_in.keys() == rec_out.keys() - - -def test_tub_write_numpy(tub): - """Tub can save a record that contains a numpy float, and retrieve it as a - python float.""" - import numpy as np - x = 123 - z = np.float32(11.1) - rec_in = {'user/angle': x, 'user/throttle':z} - rec_index = tub.put_record(rec_in) - rec_out = tub.get_record(rec_index) - assert type(rec_out['user/throttle']) == float - - -def test_tub_exclude(tub): - """ Make sure the Tub will exclude records in the exclude set """ - ri = lambda fnm: int(os.path.basename(fnm).split('_')[1].split('.')[0]) - - before = tub.gather_records() - # Make sure we gathered records correctly - assert len(before) == tub.get_num_records() - tub.exclude.add(1) - after = tub.gather_records() - # Make sure we excluded the correct number of records - assert len(after) == (tub.get_num_records() - 1) - before = set([ri(f) for f in before]) - after = set([ri(f) for f in after]) - diff = before - after - assert len(diff) == 1 - # Make sure we exclude the correct index - assert 1 in diff - - -def test_tub_augment(tub): - """Tub with augmented images which only differ slightly.""" - import numpy as np - index = tub.get_index(shuffled=False) - img_arr_before = [tub.get_record(ix)['cam/image_array'] for ix in index] - tub.augment_images() - img_arr_after = [tub.get_record(ix)['cam/image_array'] for ix in index] - total_change = 0 - for img_arr_b, img_arr_a in zip(img_arr_before, img_arr_after): - assert img_arr_a.shape == img_arr_b.shape, 'image size broken' - img_a = arr_to_img(img_arr_a) - img_b = arr_to_img(img_arr_b) - diff = ImageChops.difference(img_a, img_b) - diff_arr = img_to_arr(diff) - # normalise this number - num_pixel_channel = np.prod(diff_arr.shape) - avg_change = diff_arr.sum() / num_pixel_channel - # check that the augmented image is different if not totally black - if img_arr_b.max() > 0: - assert avg_change != 0, "Augmentation didn't do anything" - # change per chanel can be maximally 255 in 8-bit - total_change += avg_change - - # An average change of 1 (per 255) would be already too big on the moving - # square images. Empirically we see changes in the order of 0.1 - assert total_change / len(img_arr_before) < 1.0, \ - 'The augmented pictures differ too much.' - - -class TestTubWriter(unittest.TestCase): - def setUp(self): - self.tempfolder = tempfile.TemporaryDirectory().name - self.path = os.path.join(self.tempfolder, 'new') - self.inputs = ['name', 'age', 'pic'] - self.types = ['str', 'float', 'str'] - - def test_tub_create(self): - tub = TubWriter(self.path, inputs=self.inputs, types=self.types) - - def test_tub_path(self): - tub = TubWriter(self.path, inputs=self.inputs, types=self.types) - tub.run('will', 323, 'asdfasdf') - - def test_make_paths_absolute(self): - tub = Tub(self.path, inputs=['file_path'], types=['image']) - rel_file_name = 'test.jpg' - record_dict = {'file_path': rel_file_name} - abs_record_dict = tub.make_record_paths_absolute(record_dict) - - assert abs_record_dict['file_path'] == os.path.join(self.path, rel_file_name) - - def test_tub_meta(self): - meta = ["location:Here", "task:sometask"] - tub = Tub(self.path, inputs=['file_path'], types=['image'], user_meta=meta) - t2 = Tub(self.path) - assert "location" in tub.meta - assert "location" in t2.meta - assert "sometask" == t2.meta["task"] - - def test_tub_like_driver(self): - """ The manage.py/donkey2.py drive command creates a tub using TubHandler, - so test that way. - """ - os.makedirs(self.tempfolder) - meta = ["location:Here2", "task:sometask2"] - th = TubHandler(self.tempfolder) - tub = th.new_tub_writer(inputs=self.inputs, types=self.types, user_meta=meta) - t2 = Tub(tub.path) - assert tub.meta == t2.meta - assert tub.meta['location'] == "Here2" - assert t2.meta['inputs'] == self.inputs - assert t2.meta['location'] == "Here2" diff --git a/donkeycar/tests/test_tub_v2.py b/donkeycar/tests/test_tub_v2.py new file mode 100644 index 000000000..e693633d3 --- /dev/null +++ b/donkeycar/tests/test_tub_v2.py @@ -0,0 +1,42 @@ +import shutil +import tempfile +import unittest + +from donkeycar.parts.tub_v2 import Tub + + +class TestTub(unittest.TestCase): + + def setUp(self): + self._path = tempfile.mkdtemp() + inputs = ['input'] + types = ['int'] + self.tub = Tub(self._path, inputs, types) + + def test_basic_tub_operations(self): + entries = list(self.tub) + self.assertEqual(len(entries), 0) + write_count = 10 + delete_indexes = [0, 8] + + records = [{'input': i} for i in range(write_count)] + for record in records: + self.tub.write_record(record) + + for index in delete_indexes: + self.tub.delete_record(index) + + count = 0 + for record in self.tub: + print('Record %s' % (record)) + count += 1 + + self.assertEqual(count, (write_count - len(delete_indexes))) + self.assertEqual(len(self.tub), (write_count - len(delete_indexes))) + + def tearDown(self): + shutil.rmtree(self._path) + + +if __name__ == '__main__': + unittest.main() diff --git a/donkeycar/tests/test_web_controller.py b/donkeycar/tests/test_web_controller.py index b07b4e9f4..d3b95e9bd 100755 --- a/donkeycar/tests/test_web_controller.py +++ b/donkeycar/tests/test_web_controller.py @@ -30,4 +30,3 @@ def test_web_control_user_defined_port(): assert server.port == 12345 - diff --git a/donkeycar/tests/tub/tub.tar.gz b/donkeycar/tests/tub/tub.tar.gz new file mode 100644 index 000000000..c00dd3320 Binary files /dev/null and b/donkeycar/tests/tub/tub.tar.gz differ diff --git a/donkeycar/utils.py b/donkeycar/utils.py index f41dcd97e..edc3f97e8 100644 --- a/donkeycar/utils.py +++ b/donkeycar/utils.py @@ -16,15 +16,16 @@ import random import time import signal - +from typing import List, Any, Tuple from PIL import Image import numpy as np + ''' IMAGES ''' -one_byte_scale = 1.0 / 255.0 +ONE_BYTE_SCALE = 1.0 / 255.0 def scale(im, size=128): @@ -71,8 +72,8 @@ def arr_to_img(arr): def img_to_arr(img): ''' - accepts: numpy array with shape (Height, Width, Channels) - returns: binary stream (used to save to database) + accepts: PIL image + returns: a numpy uint8 image ''' return np.array(img) @@ -94,75 +95,96 @@ def binary_to_img(binary): def norm_img(img): - return (img - img.mean() / np.std(img)) * one_byte_scale - - -def create_video(img_dir_path, output_video_path): - import envoy - # Setup path to the images with telemetry. - full_path = os.path.join(img_dir_path, 'frame_*.png') - - # Run ffmpeg. - command = ("""ffmpeg - -framerate 30/1 - -pattern_type glob -i '%s' - -c:v libx264 - -r 15 - -pix_fmt yuv420p - -y - %s""" % (full_path, output_video_path)) - response = envoy.run(command) + return (img - img.mean() / np.std(img)) * ONE_BYTE_SCALE def rgb2gray(rgb): - ''' - take a numpy rgb image return a new single channel image converted to greyscale - ''' - return np.dot(rgb[...,:3], [0.299, 0.587, 0.114]) + """ + Convert normalized numpy image array with shape (w, h, 3) into greyscale + image of shape (w, h) + :param rgb: normalized [0,1] float32 numpy image array or [0,255] uint8 + numpy image array with shape(w,h,3) + :return: normalized [0,1] float32 numpy image array shape(w,h) or + [0,255] uint8 numpy array in grey scale + """ + # this will translate a uint8 array into a float64 one + grey = np.dot(rgb[..., :3], [0.299, 0.587, 0.114]) + # transform back if the input is a uint8 array + if rgb.dtype.type is np.uint8: + grey = round(grey).astype(np.uint8) + return grey def img_crop(img_arr, top, bottom): - - if bottom is 0: + if bottom == 0: end = img_arr.shape[0] else: end = -bottom return img_arr[top:end, ...] -def normalize_and_crop(img_arr, cfg): - img_arr = img_arr.astype(np.float32) * one_byte_scale - if cfg.ROI_CROP_TOP or cfg.ROI_CROP_BOTTOM: - img_arr = img_crop(img_arr, cfg.ROI_CROP_TOP, cfg.ROI_CROP_BOTTOM) - if len(img_arr.shape) == 2: - img_arrH = img_arr.shape[0] - img_arrW = img_arr.shape[1] - img_arr = img_arr.reshape(img_arrH, img_arrW, 1) - return img_arr +def normalize_image(img_arr_uint): + """ + Convert uint8 numpy image array into [0,1] float image array + :param img_arr_uint: [0,255]uint8 numpy image array + :return: [0,1] float32 numpy image array + """ + return img_arr_uint.astype(np.float64) * ONE_BYTE_SCALE -def load_scaled_image_arr(filename, cfg): - ''' - load an image from the filename, and use the cfg to resize if needed - also apply cropping and normalize - ''' - import donkeycar as dk +def denormalize_image(img_arr_float): + """ + :param img_arr_float: [0,1] float numpy image array + :return: [0,255]uint8 numpy image array + """ + return (img_arr_float * 255.0).astype(np.uint8) + + +def load_pil_image(filename, cfg): + """Loads an image from a file path as a PIL image. Also handles resizing. + + Args: + filename (string): path to the image file + cfg (object): donkey configuration file + + Returns: a PIL image. + """ try: img = Image.open(filename) if img.height != cfg.IMAGE_H or img.width != cfg.IMAGE_W: img = img.resize((cfg.IMAGE_W, cfg.IMAGE_H)) - img_arr = np.array(img) - img_arr = normalize_and_crop(img_arr, cfg) - croppedImgH = img_arr.shape[0] - croppedImgW = img_arr.shape[1] - if img_arr.shape[2] == 3 and cfg.IMAGE_DEPTH == 1: - img_arr = dk.utils.rgb2gray(img_arr).reshape(croppedImgH, croppedImgW, 1) + + if cfg.IMAGE_DEPTH == 1: + img = img.convert('L') + + return img + except Exception as e: print(e) print('failed to load image:', filename) - img_arr = None - return img_arr + return None + + +def load_image(filename, cfg): + """ + :param string filename: path to image file + :param cfg: donkey config + :return np.ndarray: numpy uint8 image array + """ + img = load_pil_image(filename, cfg) + + if not img: + return None + img_arr = np.asarray(img) + + # If the PIL image is greyscale, the np array will have shape (H, W) + # Need to add a depth channel by expanding to (H, W, 1) + if img.mode == 'L': + h, w = img_arr.shape[:2] + img_arr = img_arr.reshape(h, w, 1) + + return img_arr ''' FILES @@ -222,7 +244,7 @@ def linear_bin(a, N=15, offset=1, R=2.0): offset one hot bin by offset, commonly R/2 ''' a = a + offset - b = round(a / (R/(N-offset))) + b = round(a / (R / (N - offset))) arr = np.zeros(N) b = clamp(b, 0, N - 1) arr[int(b)] = 1 @@ -236,7 +258,7 @@ def linear_unbin(arr, N=15, offset=-1, R=2.0): rescale given R range and offset ''' b = np.argmax(arr) - a = b *(R/(N + offset)) + offset + a = b * (R / (N + offset)) + offset return a @@ -299,16 +321,39 @@ def dist(x1, y1, x2, y2): NETWORKING ''' + def my_ip(): s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect(('192.0.0.8', 1027)) return s.getsockname()[0] +''' +THROTTLE +''' + +STEERING_MIN = -1. +STEERING_MAX = 1. +# Scale throttle ~ 0.5 - 1.0 depending on the steering angle +EXP_SCALING_FACTOR = 0.5 +DAMPENING = 0.05 + + +def _steering(input_value): + input_value = clamp(input_value, STEERING_MIN, STEERING_MAX) + return ((input_value - STEERING_MIN) / (STEERING_MAX - STEERING_MIN)) + + +def throttle(input_value): + magnitude = _steering(input_value) + decay = math.exp(magnitude * EXP_SCALING_FACTOR) + dampening = DAMPENING * magnitude + return ((1 / decay) - dampening) ''' OTHER ''' + def map_frange(x, X_min, X_max, Y_min, Y_max): ''' Linear mapping between two ranges of values @@ -329,7 +374,6 @@ def merge_two_dicts(x, y): return z - def param_gen(params): ''' Accepts a dictionary of parameter options and returns @@ -357,7 +401,6 @@ def run_shell_command(cmd, cwd=None, timeout=15): return out, err, proc.pid - def kill(proc_id): os.kill(proc_id, signal.SIGINT) @@ -366,97 +409,13 @@ def eprint(*args, **kwargs): print(*args, file=sys.stderr, **kwargs) -""" -Tub management -""" - - -def expand_path_masks(paths): - ''' - take a list of paths and expand any wildcards - returns a new list of paths fully expanded - ''' - import glob - expanded_paths = [] - for path in paths: - if '*' in path or '?' in path: - mask_paths = glob.glob(path) - expanded_paths += mask_paths - else: - expanded_paths.append(path) - - return expanded_paths - - -def gather_tub_paths(cfg, tub_names=None): - ''' - takes as input the configuration, and the comma seperated list of tub paths - returns a list of Tub paths - ''' - if tub_names: - if type(tub_names) == list: - tub_paths = [os.path.expanduser(n) for n in tub_names] - else: - tub_paths = [os.path.expanduser(n) for n in tub_names.split(',')] - return expand_path_masks(tub_paths) - else: - paths = [os.path.join(cfg.DATA_PATH, n) for n in os.listdir(cfg.DATA_PATH)] - dir_paths = [] - for p in paths: - if os.path.isdir(p): - dir_paths.append(p) - return dir_paths - - -def gather_tubs(cfg, tub_names): - ''' - takes as input the configuration, and the comma seperated list of tub paths - returns a list of Tub objects initialized to each path - ''' - from donkeycar.parts.datastore import Tub - - tub_paths = gather_tub_paths(cfg, tub_names) - tubs = [Tub(p) for p in tub_paths] - - return tubs - -""" -Training helpers -""" - -def get_image_index(fnm): - sl = os.path.basename(fnm).split('_') - return int(sl[0]) - - -def get_record_index(fnm): - sl = os.path.basename(fnm).split('_') - return int(sl[1].split('.')[0]) - - -def gather_records(cfg, tub_names, opts=None, verbose=False): - - tubs = gather_tubs(cfg, tub_names) - - records = [] - - for tub in tubs: - if verbose: - print(tub.path) - record_paths = tub.gather_records() - records += record_paths - - return records - - -def get_model_by_type(model_type, cfg): +def get_model_by_type(model_type: str, cfg: 'Config') -> 'KerasPilot': ''' given the string model_type and the configuration settings in cfg create a Keras model and return it. ''' - from donkeycar.parts.keras import KerasRNN_LSTM, KerasBehavioral, \ - KerasCategorical, KerasIMU, KerasLinear, Keras3D_CNN, \ - KerasLocalizer, KerasLatent + from donkeycar.parts.keras import KerasPilot, KerasCategorical, \ + KerasLinear, KerasInferred from donkeycar.parts.tflite import TFLitePilot if model_type is None: @@ -464,48 +423,38 @@ def get_model_by_type(model_type, cfg): print("\"get_model_by_type\" model Type is: {}".format(model_type)) input_shape = (cfg.IMAGE_H, cfg.IMAGE_W, cfg.IMAGE_DEPTH) - roi_crop = (cfg.ROI_CROP_TOP, cfg.ROI_CROP_BOTTOM) - - if model_type == "tflite_linear": + kl: KerasPilot + if model_type == "linear": + kl = KerasLinear(input_shape=input_shape) + elif model_type == "categorical": + kl = KerasCategorical(input_shape=input_shape, + throttle_range=cfg.MODEL_CATEGORICAL_MAX_THROTTLE_RANGE) + elif model_type == 'inferred': + kl = KerasInferred(input_shape=input_shape) + elif model_type == "tflite_linear": kl = TFLitePilot() - elif model_type == "localizer" or cfg.TRAIN_LOCALIZER: - kl = KerasLocalizer(num_locations=cfg.NUM_LOCATIONS, input_shape=input_shape) - elif model_type == "behavior" or cfg.TRAIN_BEHAVIORS: - kl = KerasBehavioral(num_outputs=2, num_behavior_inputs=len(cfg.BEHAVIOR_LIST), input_shape=input_shape) - elif model_type == "imu": - kl = KerasIMU(num_outputs=2, num_imu_inputs=6, input_shape=input_shape, roi_crop=roi_crop) - elif model_type == "linear": - kl = KerasLinear(input_shape=input_shape, roi_crop=roi_crop) elif model_type == "tensorrt_linear": - # Aggressively lazy load this. This module imports pycuda.autoinit which causes a lot of unexpected things - # to happen when using TF-GPU for training. + # Aggressively lazy load this. This module imports pycuda.autoinit + # which causes a lot of unexpected things to happen when using TF-GPU + # for training. from donkeycar.parts.tensorrt import TensorRTLinear kl = TensorRTLinear(cfg=cfg) - elif model_type == "coral_tflite_linear": - from donkeycar.parts.coral import CoralLinearPilot - kl = CoralLinearPilot() - elif model_type == "3d": - kl = Keras3D_CNN(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, seq_length=cfg.SEQUENCE_LENGTH, roi_crop=roi_crop) - elif model_type == "rnn": - kl = KerasRNN_LSTM(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, seq_length=cfg.SEQUENCE_LENGTH, roi_crop=roi_crop) - elif model_type == "categorical": - kl = KerasCategorical(input_shape=input_shape, throttle_range=cfg.MODEL_CATEGORICAL_MAX_THROTTLE_RANGE, roi_crop=roi_crop) - elif model_type == "latent": - kl = KerasLatent(input_shape=input_shape) - elif model_type == "fastai": - from donkeycar.parts.fastai import FastAiPilot - kl = FastAiPilot() else: - raise Exception("unknown model type: %s" % model_type) + raise Exception("Unknown model type {:}, supported types are " + "linear, categorical, inferred, tflite_linear, " + "tensorrt_linear" + .format(model_type)) return kl def get_test_img(model): - ''' - query the input to see what it likes - make an image capable of using with that test model - ''' + """ + query the input to see what it likes make an image capable of using with + that test model + :param model: input keras model + :return np.ndarry(np.uint8): numpy random img array + """ assert(len(model.inputs) > 0) try: count, h, w, ch = model.inputs[0].get_shape() @@ -514,32 +463,35 @@ def get_test_img(model): count, seq_len, h, w, ch = model.inputs[0].get_shape() # generate random array in the right shape - img = np.random.rand(int(h), int(w), int(ch)) - - return img + img = np.random.randint(0, 255, size=(h, w, ch)) + return img.astype(np.uint8) -def train_test_split(data_list, shuffle=True, test_size=0.2): +def train_test_split(data_list: List[Any], + shuffle: bool = True, + test_size: float = 0.2) -> Tuple[List[Any], List[Any]]: ''' take a list, split it into two sets while selecting a random element in order to shuffle the results. use the test_size to choose the split percent. shuffle is always True, left there to be backwards compatible ''' - assert shuffle - train_data = [] + target_train_size = int(len(data_list) * (1. - test_size)) - target_train_size = len(data_list) * (1. - test_size) + if shuffle: + train_data = [] + i_sample = 0 + while i_sample < target_train_size and len(data_list) > 1: + i_choice = random.randint(0, len(data_list) - 1) + train_data.append(data_list.pop(i_choice)) + i_sample += 1 - i_sample = 0 + # remainder of the original list is the validation set + val_data = data_list - while i_sample < target_train_size and len(data_list) > 1: - i_choice = random.randint(0, len(data_list) - 1) - train_data.append(data_list.pop(i_choice)) - i_sample += 1 - - # remainder of the original list is the validation set - val_data = data_list + else: + train_data = data_list[:target_train_size] + val_data = data_list[target_train_size:] return train_data, val_data @@ -564,4 +516,4 @@ def on_frame(self): e = time.time() print('fps', 100.0 / (e - self.t)) self.t = time.time() - self.iter = 0 \ No newline at end of file + self.iter = 0 diff --git a/donkeycar/vehicle.py b/donkeycar/vehicle.py index 0c7eeef59..ab504500b 100644 --- a/donkeycar/vehicle.py +++ b/donkeycar/vehicle.py @@ -81,7 +81,7 @@ def add(self, part, inputs=[], outputs=[], Channel names to save to memory. threaded : boolean If a part should be run in a separate thread. - run_condition : boolean + run_condition : str If a part should be run or not """ assert type(inputs) is list, "inputs is not a list: %r" % inputs diff --git a/install/envs/mac.yml b/install/envs/mac.yml index 9f01121a1..ea41bc99a 100644 --- a/install/envs/mac.yml +++ b/install/envs/mac.yml @@ -1,19 +1,39 @@ name: donkey -dependencies: -- h5py==2.8.0 -- pillow -- tensorflow==1.13.1 -- opencv -- matplotlib -- tornado -- docopt -- pandas -- pylint -- pytest -- pip +channels: + - defaults + - conda-forge + - pytorch -- pip: +dependencies: + - python=3.7 + - h5py + - pillow + - opencv + - matplotlib + - tornado + - docopt + - pandas + - pylint + - pytest + - pytest-cov + - codecov + - pip + - imgaug + - progress - moviepy - paho-mqtt - - PrettyTable \ No newline at end of file + - PrettyTable + - pyfiglet + - mypy + - pytorch=1.7.1 + - torchvision + - torchaudio + - pytorch-lightning + - numpy + - pip: + - tensorflow==2.2.0 + - git+https://github.com/autorope/keras-vis.git + - simple-pid + - opencv-python-headless + diff --git a/install/envs/rpi.yml b/install/envs/rpi.yml deleted file mode 100644 index 6f094d45a..000000000 --- a/install/envs/rpi.yml +++ /dev/null @@ -1,43 +0,0 @@ -name: donkey -dependencies: -- certifi=14.05.14=py34_0 -- freetype=2.5.2=2 -- h5py=2.5.0=np19py34_3 -- hdf5=1.8.15.1=1 -- jpeg=8d=0 -- libgfortran=1.0=0 -- libpng=1.6.17=0 -- libtiff=4.0.2=1 -- numpy=1.9.2=py34_1 -- openblas=0.2.14=1 -- openssl=1.0.1k=1 -- pillow=2.9.0=py34_0 -- pip=7.1.2=py34_0 -- python=3.4.3=1 -- python-dateutil=2.4.2=py34_0 -- pytz=2015.4=py34_0 -- requests=2.7.0=py34_0 -- scipy=0.16.0=np19py34_1 -- setuptools=18.1=py34_0 -- six=1.9.0=py34_0 -- sqlite=3.8.4.1=1 -- tornado=4.2.1=py34_0 -- wheel=0.24.0=py34_0 -- xz=5.0.5=0 -- zlib=1.2.8=0 -- pip: - - adafruit-gpio==1.0.3 - - adafruit-pca9685==1.0.1 - - adafruit-pureio==0.2.1 - - cheroot==5.8.3 - - docopt==0.6.2 - - envoy==0.0.3 - - picamera==1.13 - - portend==2.1.2 - - protobuf==3.3.0 - - pyyaml==3.12 - - spidev==3.2 - - tempora==1.8 - - theano==0.9.0 - - werkzeug==0.12.2 - - tensorflow==1.7.0 diff --git a/install/envs/ubuntu.yml b/install/envs/ubuntu.yml index d535f9bc2..1b818f7b0 100644 --- a/install/envs/ubuntu.yml +++ b/install/envs/ubuntu.yml @@ -1,19 +1,38 @@ name: donkey -dependencies: -- h5py==2.10.0 -- pillow -- tensorflow==1.13.1 -- opencv -- matplotlib -- tornado -- docopt -- pandas -- pylint -- pytest -- pip +channels: + - defaults + - conda-forge + - pytorch -- pip: +dependencies: + - python=3.7 + - h5py + - pillow + - opencv + - matplotlib + - tornado + - docopt + - pandas + - pylint + - pytest + - pytest-cov + - codecov + - pip + - imgaug + - progress - moviepy - paho-mqtt - - PrettyTable \ No newline at end of file + - PrettyTable + - pyfiglet + - mypy + - pytorch=1.7.1 + - torchvision + - torchaudio + - pytorch-lightning + - numpy + - tensorflow==2.2.0 + - pip: + - git+https://github.com/autorope/keras-vis.git + - simple-pid + - opencv-python-headless \ No newline at end of file diff --git a/install/envs/windows.yml b/install/envs/windows.yml deleted file mode 100755 index d535f9bc2..000000000 --- a/install/envs/windows.yml +++ /dev/null @@ -1,19 +0,0 @@ -name: donkey -dependencies: - -- h5py==2.10.0 -- pillow -- tensorflow==1.13.1 -- opencv -- matplotlib -- tornado -- docopt -- pandas -- pylint -- pytest -- pip - -- pip: - - moviepy - - paho-mqtt - - PrettyTable \ No newline at end of file diff --git a/install/nano/install-jp44.sh b/install/nano/install-jp44.sh new file mode 100644 index 000000000..1cb8f3cec --- /dev/null +++ b/install/nano/install-jp44.sh @@ -0,0 +1,60 @@ +#!/bin/bash -e + +password='jetson' +# Get the full dir name of this script +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" + +# Keep updating the existing sudo time stamp +sudo -v +while true; do sudo -n true; sleep 120; kill -0 "$$" || exit; done 2>/dev/null & + +######################################## +# Install DonkeyCar +######################################## +sudo apt-get update -y +sudo apt-get upgrade -y +sudo apt-get install -y libhdf5-serial-dev hdf5-tools libhdf5-dev zlib1g-dev zip libjpeg8-dev liblapack-dev libblas-dev gfortran +sudo apt-get install -y python3-dev python3-pip +sudo apt-get install -y libxslt1-dev libxml2-dev libffi-dev libcurl4-openssl-dev libssl-dev libpng-dev libopenblas-dev +sudo apt-get install -y git +sudo apt-get install -y openmpi-doc openmpi-bin libopenmpi-dev libopenblas-dev + +# Install Tensorflow as system package +sudo -H pip3 install -r requirements.txt +#sudo -H pip3 install --pre --extra-index-url https://developer.download.nvidia.com/compute/redist/jp/v44 'tensorflow>2' +sudo -H pip3 install --pre --extra-index-url https://developer.download.nvidia.com/compute/redist/jp/v44 tensorflow==2.2.0+nv20.6 + +######################################## +# Install PyTorch v1.7 - torchvision v0.8.1 +# pytorch 1.6.0-rc2 +# https://forums.developer.nvidia.com/t/pytorch-for-jetson-nano-version-1-5-0-now-available/72048/392 +wget https://nvidia.box.com/shared/static/wa34qwrwtk9njtyarwt5nvo6imenfy26.whl -O torch-1.7.0-cp36-cp36m-linux_aarch64.whl +sudo -H pip3 install ./torch-1.7.0-cp36-cp36m-linux_aarch64.whl + +# Install PyTorch Vision +sudo apt-get install libjpeg-dev zlib1g-dev libpython3-dev libavcodec-dev libavformat-dev libswscale-dev +mkdir -p ~/projects; cd ~/projects +git clone --branch v0.8.1 https://github.com/pytorch/vision torchvision +cd torchvision +export BUILD_VERSION=0.8.1 +sudo python3 setup.py install + + +# Create virtual enviroment +pip3 install virtualenv +python3 -m virtualenv -p python3 ~/.virtualenv/donkeycar --system-site-packages +echo "source ~/.virtualenv/donkeycar/bin/activate" >> ~/.bashrc +# "source ~/.virtualenv/donkeycar/bin/activate" in the shell script +. ~/.virtualenv/donkeycar/bin/activate + + +# Install DonkeyCar as user package +cd ~/projects +git clone https://github.com/autorope/donkeycar +cd donkeycar +git checkout dev +pip install -e .[nano] + +# https://github.com/keras-team/keras-tuner/issues/317 +echo "export LD_PRELOAD=/usr/lib/aarch64-linux-gnu/libgomp.so.1" >> ~/.bashrc +#export LD_PRELOAD=/usr/lib/aarch64-linux-gnu/libgomp.so.1 \ No newline at end of file diff --git a/install/nano/requirements.txt b/install/nano/requirements.txt new file mode 100644 index 000000000..daa2dc778 --- /dev/null +++ b/install/nano/requirements.txt @@ -0,0 +1,28 @@ +pip +testresources +setuptools +futures +protobuf +pybind11 +cython +numpy +future +mock +h5py==2.10.0 +keras_preprocessing +keras_applications +gast +grpcio +absl-py +py-cpuinfo +psutil +portpicker +six +requests +astor +termcolor +wrapt +google-pasta +scipy +pandas +gdown \ No newline at end of file diff --git a/scripts/convert_to_tub_v2.py b/scripts/convert_to_tub_v2.py new file mode 100755 index 000000000..b57c137fc --- /dev/null +++ b/scripts/convert_to_tub_v2.py @@ -0,0 +1,81 @@ +#!/usr/bin/env python3 +''' +Usage: + convert_to_tub_v2.py --tub= --output= + +Note: + This script converts the old datastore format, to the new datastore format. +''' + +import json +import os +import traceback +from pathlib import Path + +from docopt import docopt +from PIL import Image +from progress.bar import IncrementalBar + +import donkeycar as dk +from donkeycar.parts.datastore import Tub as LegacyTub +from donkeycar.parts.tub_v2 import Tub + + +def convert_to_tub_v2(paths, output_path): + empty_record = {'__empty__': True} + + if type(paths) is str: + paths = [paths] + legacy_tubs = [LegacyTub(path) for path in paths] + output_tub = None + print(f'Total number of tubs: {len(legacy_tubs)}') + + for legacy_tub in legacy_tubs: + if not output_tub: + # add input and type for empty records recording + inputs = legacy_tub.inputs + ['__empty__'] + types = legacy_tub.types + ['boolean'] + output_tub = Tub(output_path, inputs, types, + list(legacy_tub.meta.items())) + + record_paths = legacy_tub.gather_records() + bar = IncrementalBar('Converting', max=len(record_paths)) + previous_index = None + for record_path in record_paths: + try: + contents = Path(record_path).read_text() + record = json.loads(contents) + image_path = record['cam/image_array'] + current_index = int(image_path.split('_')[0]) + image_path = os.path.join(legacy_tub.path, image_path) + image_data = Image.open(image_path) + record['cam/image_array'] = image_data + # first record or they are continuous, just append + if not previous_index or current_index == previous_index + 1: + output_tub.write_record(record) + previous_index = current_index + # otherwise fill the gap with dummy records + else: + # Skipping over previous record here because it has + # already been written. + previous_index += 1 + # Adding empty record nodes, and marking them deleted + # until the next valid record. + while previous_index < current_index: + idx = output_tub.manifest.current_index + output_tub.write_record(empty_record) + output_tub.delete_record(idx) + previous_index += 1 + bar.next() + except Exception as exception: + print(f'Ignoring record path {record_path}\n', exception) + traceback.print_exc() + + +if __name__ == '__main__': + args = docopt(__doc__) + + input_path = args["--tub"] + output_path = args["--output"] + paths = input_path.split(',') + convert_to_tub_v2(paths, output_path) diff --git a/scripts/cull_tub.py b/scripts/cull_tub.py deleted file mode 100644 index a2b699a1e..000000000 --- a/scripts/cull_tub.py +++ /dev/null @@ -1,81 +0,0 @@ -''' -Usage: - cull_tub.py --tub= --count= - -Note: - This script will remove records in a given tub until it reaches a desired count. - It will try to sample from the records to maintain a even steering distribution - as it selects records to remove. -''' - -import os - -from docopt import docopt -import json - -import donkeycar as dk -from donkeycar.utils import * -from donkeycar.parts.datastore import TubGroup - -def main(tub_path, count): - cfg = dk.load_config('config.py') - records_paths = gather_records(cfg, tub_path) - - record_name = "user/angle" - num_bins = 20 - half_bins = num_bins // 2 - bins = {} - records = [] - - for i in range(num_bins): - bins[i] = [] - - - if len(records_paths) <= count: - print("we have fewer records than target count.") - return - - #put the record in a bin based on angle. expecting -1 to 1 - for record_path in records_paths: - with open(record_path, 'r') as fp: - record = json.load(fp) - user_angle = float(record["user/angle"]) - record["filename"] = record_path - iBin = round((user_angle * half_bins) + (half_bins - 1)) % num_bins - bins[iBin].append(record) - records.append(record) - - - for i in range(num_bins): - print("iBin:", len(bins[i])) - - - keep = [] - iBin = 0 - while len(keep) < count: - iBin += 1 - if iBin >= num_bins: - iBin = 0 - num_elem = len(bins[iBin]) - if num_elem > 0: - iRec = random.randint(0, num_elem - 1) - rec = bins[iBin].pop(iRec) - keep.append(rec) - - print("have", count, "records selected. removing the rest...") - - - for record in records: - if record in keep: - continue - img_filename = os.path.join(tub_path, record['cam/image_array']) - os.unlink(img_filename) - os.unlink(record["filename"]) - - print('done') - -if __name__ == '__main__': - args = docopt(__doc__) - - count = int(args["--count"]) - main(args["--tub"], count) diff --git a/scripts/cull_tubs.py b/scripts/cull_tubs.py deleted file mode 100644 index e2c5138df..000000000 --- a/scripts/cull_tubs.py +++ /dev/null @@ -1,13 +0,0 @@ -#Cull many tubs -import os -import sys - -from cull_tub import main - -tubs = os.listdir(sys.argv[1]) -count = int(sys.argv[2]) -print("found", len(tubs), "tubs") -for tub in tubs: - if os.path.isdir(tub): - print("working on", tub) - main(tub, count) \ No newline at end of file diff --git a/scripts/graph_listener.py b/scripts/graph_listener.py old mode 100644 new mode 100755 diff --git a/scripts/preview_augumentations.py b/scripts/preview_augumentations.py new file mode 100755 index 000000000..dfb775928 --- /dev/null +++ b/scripts/preview_augumentations.py @@ -0,0 +1,54 @@ +''' +Usage: + preview_augumentations.py + +Note: + This script helps preview augumentations used when the model is being trained. +''' + +import time +import cv2 + +from donkeycar.parts.augmentations import Augmentations + +# Camera Parameters +WIDTH = 640 +HEIGHT = 480 + +# Example augumentations +cropping = Augmentations.crop(0, 0, 100, 0, keep_size=True) +mask = Augmentations.trapezoidal_mask(10, 630, 100, 300, 50, 480) + + +def preview_augmentations(): + print('Connecting to Camera') + capture = cv2.VideoCapture(0) + time.sleep(2) + if capture.isOpened(): + print('Camera Connected.') + else: + print('Unable to connect. Are you sure you are using the right camera parameters ?') + return + + while True: + success, frame = capture.read() + if success: + cropped = cropping.augment_image(frame) + masked = mask.augment_image(frame) + # Convert to RGB + cv2.imshow('Preview', frame) + cv2.imshow('Cropped', cropped) + cv2.imshow('Trapezoidal Mask', masked) + prompt = cv2.waitKey(1) & 0xFF + if prompt == ord(' '): + # Store output + pass + elif prompt == ord('q'): + break + + capture.release() + cv2.destroyAllWindows() + + +if __name__ == "__main__": + preview_augmentations() diff --git a/scripts/profile.py b/scripts/profile.py index 59a7f4163..3f312bb18 100755 --- a/scripts/profile.py +++ b/scripts/profile.py @@ -11,7 +11,6 @@ from docopt import docopt import donkeycar as dk import numpy as np -import time from donkeycar.utils import FPSTimer @@ -21,38 +20,19 @@ def profile(model_path, model_type): model = dk.utils.get_model_by_type(model_type, cfg) model.load(model_path) - count, h, w, ch = 1, cfg.TARGET_H, cfg.TARGET_W, cfg.TARGET_D - seq_len = 0 - - if "rnn" in model_type or "3d" in model_type: - seq_len = cfg.SEQUENCE_LENGTH + h, w, ch = cfg.TARGET_H, cfg.TARGET_W, cfg.TARGET_D # generate random array in the right shape in [0,1) - img = np.random.rand(int(h), int(w), int(ch)) - - if seq_len: - img_arr = [] - for i in range(seq_len): - img_arr.append(img) - img_arr = np.array(img_arr) + img = np.random.randint(0, 255, size=(h, w, ch)) # make a timer obj timer = FPSTimer() try: while True: - - ''' - run forward pass on model - ''' - if seq_len: - model.run(img_arr) - else: - model.run(img) - - ''' - keep track of iterations and give feed back on iter/sec - ''' + # run inferencing + model.run(img) + # time timer.on_frame() except KeyboardInterrupt: diff --git a/scripts/profile_coral.py b/scripts/profile_coral.py old mode 100644 new mode 100755 diff --git a/scripts/remote_cam_view.py b/scripts/remote_cam_view.py old mode 100644 new mode 100755 diff --git a/scripts/remote_cam_view_tcp.py b/scripts/remote_cam_view_tcp.py old mode 100644 new mode 100755 diff --git a/scripts/salient_vis_listener.py b/scripts/salient_vis_listener.py old mode 100644 new mode 100755 diff --git a/scripts/tflite_convert.py b/scripts/tflite_convert.py old mode 100644 new mode 100755 diff --git a/scripts/tflite_profile.py b/scripts/tflite_profile.py old mode 100644 new mode 100755 diff --git a/setup.py b/setup.py index 3a784ae38..79e545862 100644 --- a/setup.py +++ b/setup.py @@ -1,8 +1,9 @@ -from setuptools import setup, find_packages - import os -#include the non python files +from setuptools import find_packages, setup + + +# include the non python files def package_files(directory, strip_leading): paths = [] for (path, directories, filenames) in os.walk(directory): @@ -11,9 +12,10 @@ def package_files(directory, strip_leading): paths.append(package_file[len(strip_leading):]) return paths -car_templates=['templates/*'] -web_controller_html = package_files('donkeycar/parts/controllers/templates', 'donkeycar/') +car_templates = ['templates/*'] +web_controller_html = package_files('donkeycar/parts/controllers/templates', + 'donkeycar/') extra_files = car_templates + web_controller_html print('extra_files', extra_files) @@ -21,85 +23,85 @@ def package_files(directory, strip_leading): with open("README.md", "r") as fh: long_description = fh.read() - setup(name='donkeycar', - version='3.1.5', - long_description = long_description, - description='Self driving library for python.', - url='https://github.com/autorope/donkeycar', - author='Will Roscoe, Adam Conway, Tawn Kramer', - author_email='wroscoe@gmail.com, adam@casaconway.com, tawnkramer@gmail.com', - license='MIT', - entry_points={ - 'console_scripts': [ - 'donkey=donkeycar.management.base:execute_from_command_line', - ], - }, - install_requires=['numpy', - 'pillow', - 'docopt', - 'tornado', - 'requests', - 'h5py==2.10.0', - 'moviepy', - 'pandas', - 'PrettyTable', - 'paho-mqtt', - "simple_pid" - ], - - extras_require={ - 'pi': [ - 'picamera', - 'Adafruit_PCA9685', - 'Adafruit_SSD1306', - 'RPi.GPIO', - 'pyserial', - ], - 'nano': [ - 'Adafruit_PCA9685', - 'Adafruit_SSD1306', - ], - 'pc': [ - 'matplotlib', - ], - 'dev' : [ - 'pytest', - 'pytest-cov', - 'responses', - ], - 'ci': ['codecov'], - 'tf': ['tensorflow==1.13.1'], - 'tf_gpu': ['tensorflow-gpu==1.13.1'], - 'mm1': ['pyserial'] - }, - package_data={ - 'donkeycar': extra_files, - }, - + version='4.1.0', + long_description=long_description, + description='Self driving library for python.', + url='https://github.com/autorope/donkeycar', + author='Will Roscoe, Adam Conway, Tawn Kramer', + author_email='wroscoe@gmail.com, adam@casaconway.com, tawnkramer@gmail.com', + license='MIT', + entry_points={ + 'console_scripts': [ + 'donkey=donkeycar.management.base:execute_from_command_line', + ], + }, + install_requires=[ + 'numpy', + 'pillow', + 'docopt', + 'tornado', + 'requests', + 'h5py', + 'PrettyTable', + 'paho-mqtt', + "simple_pid", + 'progress', + 'typing_extensions', + 'pyfiglet' + ], + extras_require={ + 'pi': [ + 'picamera', + 'Adafruit_PCA9685', + 'Adafruit_SSD1306', + 'RPi.GPIO', + 'pyserial', + ], + 'nano': [ + 'Adafruit_PCA9685', + 'Adafruit_SSD1306', + 'RPi.GPIO' + ], + 'pc': [ + 'matplotlib', + 'imgaug', + ], + 'dev': [ + 'pytest', + 'pytest-cov', + 'responses', + 'mypy' + ], + 'ci': ['codecov'], + 'tf': ['tensorflow>=2.2.0'], + 'torch': [ + 'pytorch>=1.7.1', + 'torchvision', + 'torchaudio' + ], + 'mm1': ['pyserial'] + }, + package_data={ + 'donkeycar': extra_files, + }, include_package_data=True, - classifiers=[ # How mature is this project? Common values are # 3 - Alpha # 4 - Beta # 5 - Production/Stable - 'Development Status :: 3 - Alpha', - + 'Development Status :: 4 - Beta', # Indicate who your project is intended for 'Intended Audience :: Developers', 'Topic :: Scientific/Engineering :: Artificial Intelligence', - # Pick your license as you wish (should match "license" above) 'License :: OSI Approved :: MIT License', - # Specify the Python versions you support here. In particular, ensure # that you indicate whether you support Python 2, Python 3 or both. - 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', ], keywords='selfdriving cars donkeycar diyrobocars', - packages=find_packages(exclude=(['tests', 'docs', 'site', 'env'])), - ) + )