Skip to content

Commit

Permalink
Release 5.0 and installation with pip (#1153)
Browse files Browse the repository at this point in the history
* Created setup.cfg and pyproject.toml for DC install.
* Remove obsolete conda yml files.
* Avoid moviepy import error and fix ui behavior of merging deleted indexes which was missing its implementation in the datastore.
* Adding Pi user and hostname, as this is required by the ui.
* Add missing opencv for RPi
* include scripts folder in pypi package
* Make torch import local in test
* Fixed tubwriter test after updating of session_id to be a tuple
* Froze nano versions for numpy
* Added target macos for install on apple arm in setup.cfg
* Set version to right version for main branch commit.
* Remove obsolete argument in tub
  • Loading branch information
DocGarbanzo authored Dec 20, 2023
1 parent 1b548b9 commit 0182bb8
Show file tree
Hide file tree
Showing 24 changed files with 246 additions and 616 deletions.
12 changes: 4 additions & 8 deletions .github/workflows/python-package-conda.yml
Original file line number Diff line number Diff line change
Expand Up @@ -10,11 +10,7 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
include:
- os: macos-latest
ENV_FILE: install/envs/mac.yml
- os: ubuntu-latest
ENV_FILE: install/envs/ubuntu.yml
os: ["macos-latest", "ubuntu-latest"]
fail-fast: false
defaults:
run:
Expand All @@ -28,16 +24,16 @@ jobs:
python-version: 3.9
mamba-version: "*"
activate-environment: donkey
environment-file: ${{matrix.ENV_FILE}}
auto-activate-base: false
channels: default, conda-forge, pytorch
channel-priority: true
- name: Conda info and list
run: |
echo Environment file ${{matrix.ENV_FILE}}
conda info
conda list
- name: Install donkey
run: pip install -e .[pc]
run: |
pip install -e .[pc,dev]
pip list
- name: Run tests
run: pytest
2 changes: 1 addition & 1 deletion MANIFEST.in
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
include donkeycar/templates/*
include VERSION
include scripts
recursive-include donkeycar/parts/web_controller/templates/ *
3 changes: 1 addition & 2 deletions donkeycar/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,8 @@
import sys
from pyfiglet import Figlet
import logging
from pkg_resources import get_distribution

__version__ = get_distribution('donkeycar').version
__version__ = '5.1.dev0'

logging.basicConfig(level=os.environ.get('LOGLEVEL', 'INFO').upper())

Expand Down
73 changes: 63 additions & 10 deletions donkeycar/management/makemovie.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,12 @@
import moviepy.editor as mpy
import tempfile

from tensorflow.python.keras import activations
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.models import load_model
import tensorflow as tf
import cv2
from matplotlib import cm
try:
from vis.utils import utils
except:
raise Exception("Please install keras-vis: pip install git+https://github.com/autorope/keras-vis.git")


import donkeycar as dk
from donkeycar.parts.tub_v2 import Tub
Expand All @@ -17,13 +16,64 @@
DEG_TO_RAD = math.pi / 180.0


def apply_modifications(model, custom_objects=None):
"""Applies modifications to the model layers to create a new Graph. For
example, simply changing `model.layers[idx].activation = new activation`
does not change the graph. The entire graph needs to be updated with
modified inbound and outbound tensors because of change in layer building
function.
Args:
model: The `keras.models.Model` instance.
Returns:
The modified model with changes applied. Does not mutate the original
`model`.
"""
# The strategy is to save the modified model and load it back. This is
# done because setting the activation in a Keras layer doesnt actually
# change the graph. We have to iterate the entire graph and change the
# layer inbound and outbound nodes with modified tensors. This is doubly
# complicated in Keras 2.x since multiple inbound and outbound nodes are
# allowed with the Graph API.
model_path = os.path.join(tempfile.gettempdir(),
next(tempfile._get_candidate_names()) + '.h5')
try:
model.save(model_path)
return load_model(model_path, custom_objects=custom_objects)
finally:
os.remove(model_path)


def normalize(array, min_value=0., max_value=1.):
"""Normalizes the numpy array to (min_value, max_value)
Args:
array: The numpy array
min_value: The min value in normalized array (Default value = 0)
max_value: The max value in normalized array (Default value = 1)
Returns:
The array normalized to range between (min_value, max_value)
"""
arr_min = np.min(array)
arr_max = np.max(array)
normalized = (array - arr_min) / (arr_max - arr_min + K.epsilon())
return (max_value - min_value) * normalized + min_value


class MakeMovie(object):

def run(self, args, parser):
'''
Load the images from a tub and create a movie from them.
Movie
'''
try:
import moviepy.editor as mpy
except ImportError as e:
logger.error(f'Please install moviepy first: {e}')
return

if args.tub is None:
print("ERR>> --tub argument missing.")
Expand All @@ -40,15 +90,18 @@ def run(self, args, parser):

if args.type is None and args.model is not None:
args.type = self.cfg.DEFAULT_MODEL_TYPE
print("Model type not provided. Using default model type from config file")
print("Model type not provided. Using default model type from "
"config file")

if args.salient:
if args.model is None:
print("ERR>> salient visualization requires a model. Pass with the --model arg.")
print("ERR>> salient visualization requires a model. Pass "
"with the --model arg.")
parser.print_help()

if args.type not in ['linear', 'categorical']:
print("Model type {} is not supported. Only linear or categorical is supported for salient visualization".format(args.type))
print(f"Model type {args.type} is not supported. Only linear "
f"or categorical is supported for salient visualization")
parser.print_help()
return

Expand Down Expand Up @@ -181,7 +234,7 @@ def init_salient(self, model):
for li in layer_idx:
model.layers[li].activation = activations.linear
# build salient model and optimizer
sal_model = utils.apply_modifications(model)
sal_model = apply_modifications(model)
self.sal_model = sal_model
return True

Expand Down Expand Up @@ -210,7 +263,7 @@ def compute_visualisation_mask(self, img):

channel_idx = 1 if K.image_data_format() == 'channels_first' else -1
grads = np.sum(grads, axis=channel_idx)
res = utils.normalize(grads)[0]
res = normalize(grads)[0]
return res

def draw_salient(self, img):
Expand Down
70 changes: 53 additions & 17 deletions donkeycar/parts/datastore_v2.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import atexit
import json
import mmap
import os
Expand All @@ -7,7 +8,6 @@

logger = logging.getLogger(__name__)


NEWLINE = '\n'
NEWLINE_STRIP = '\r\n'

Expand Down Expand Up @@ -244,6 +244,7 @@ def __init__(self, base_path, inputs=[], types=[], metadata=[],
self.catalog_metadata = dict()
self.deleted_indexes = set()
self._updated_session = False
self._is_closed = False
has_catalogs = False

if self.manifest_path.exists():
Expand All @@ -258,10 +259,12 @@ def __init__(self, base_path, inputs=[], types=[], metadata=[],
self.manifest_metadata['created_at'] = created_at
if not self.base_path.exists():
self.base_path.mkdir(parents=True, exist_ok=True)
logger.info(f'Created a new datastore at'
logger.info(f'Creating a new datastore at'
f' {self.base_path.as_posix()}')
self.seekeable = Seekable(self.manifest_path,
read_only=self.read_only)
logger.info(f'Creating a new manifest at '
f'{self.manifest_path.as_posix()}')

if not has_catalogs:
self._write_contents()
Expand All @@ -275,7 +278,14 @@ def __init__(self, base_path, inputs=[], types=[], metadata=[],
start_index=self.current_index)
# Create a new session_id, which will be added to each record in the
# tub, when Tub.write_record() is called.
self.session_id = self.create_new_session()
self.session_id = self.create_new_session_id()

def exit_hook():
if not self._is_closed:
logger.error(f"Unexpected closing manifest {self.base_path}")
self.close()
# Automatically save config when program ends
atexit.register(exit_hook)

def write_record(self, record):
new_catalog = self.current_index > 0 \
Expand All @@ -300,8 +310,8 @@ def delete_records(self, record_indexes):
self.deleted_indexes.update(record_indexes)
self._update_catalog_metadata(update=True)
if record_indexes:
logger.info(f'Deleted records {min(record_indexes)} - '
f'{max(record_indexes)}')
logger.info(f'Deleting {len(record_indexes)} records: '
f'{min(record_indexes)} - {max(record_indexes)}')

def restore_records(self, record_indexes):
# Does not actually delete the record, but marks it as deleted.
Expand Down Expand Up @@ -339,8 +349,19 @@ def _read_metadata(self, metadata=[]):

def _read_contents(self):
self.seekeable.seek_line_start(1)
self.inputs = json.loads(self.seekeable.readline())
self.types = json.loads(self.seekeable.readline())
manifest_inputs = json.loads(self.seekeable.readline())
manifest_types = json.loads(self.seekeable.readline())
if not self.inputs and not self.types:
self.inputs = manifest_inputs
self.types = manifest_types
else:
assert self.inputs == manifest_inputs \
and self.types == manifest_types, \
f'Trying to create a tub with different inputs/types than ' \
f'the stored tub. This is only allowed when new tub ' \
f'specifies no inputs. New inputs: {self.inputs} vs ' \
f'stored inputs: {manifest_inputs}, new types {self.types}'\
f' vs stored types: {manifest_types}'
self.metadata = json.loads(self.seekeable.readline())
self.manifest_metadata = json.loads(self.seekeable.readline())
# Catalog metadata
Expand Down Expand Up @@ -370,32 +391,47 @@ def _update_catalog_metadata(self, update=True):
self.catalog_metadata = catalog_metadata
self.seekeable.writeline(json.dumps(catalog_metadata))

def create_new_session(self):
def _update_session_info(self):
""" Creates a new session id and appends it to the metadata."""
sessions = self.manifest_metadata.get('sessions', {})
last_id = -1
if sessions:
last_id = sessions['last_id']
else:
if not sessions:
sessions['all_full_ids'] = []
this_id = last_id + 1
date = time.strftime('%y-%m-%d')
this_full_id = date + '_' + str(this_id)
this_id, this_full_id = self.session_id
sessions['last_id'] = this_id
sessions['last_full_id'] = this_full_id
sessions['all_full_ids'].append(this_full_id)
self.manifest_metadata['sessions'] = sessions
return this_full_id

def create_new_session_id(self):
""" Creates a new session id and appends it to the metadata."""
sessions = self.manifest_metadata.get('sessions', {})
new_id = sessions['last_id'] + 1 if sessions else 0
new_full_id = f"{time.strftime('%y-%m-%d')}_{new_id}"
return new_id, new_full_id

def add_deleted_indexes(self, indexes):
if isinstance(indexes, int):
indexes = {indexes}
self.deleted_indexes.update(indexes)
self._update_catalog_metadata(update=True)

def close(self):
""" Closing tub closes open files for catalog, catalog manifest and
manifest.json"""
# If records were received, write updated session_id dictionary into
# the metadata, otherwise keep the session_id information unchanged
if self._updated_session:
self.seekeable.update_line(4, json.dumps(self.manifest_metadata))
logger.info(f'Saving new session {self.session_id[1]}')
self._update_session_info()
self.write_metadata()
self.current_catalog.close()
self.seekeable.close()
self._is_closed = True
logger.info(f'Closing manifest {self.base_path}')

def write_metadata(self):
self.seekeable.update_line(3, json.dumps(self.metadata))
self.seekeable.update_line(4, json.dumps(self.manifest_metadata))

def __iter__(self):
return ManifestIterator(self)
Expand Down
3 changes: 1 addition & 2 deletions donkeycar/parts/tub_v2.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,8 +78,7 @@ def write_record(self, record=None):
# Private properties
contents['_timestamp_ms'] = int(round(time.time() * 1000))
contents['_index'] = self.manifest.current_index
contents['_session_id'] = self.manifest.session_id

contents['_session_id'] = self.manifest.session_id[1]
self.manifest.write_record(contents)

def delete_records(self, record_indexes):
Expand Down
4 changes: 4 additions & 0 deletions donkeycar/templates/cfg_complete.py
Original file line number Diff line number Diff line change
Expand Up @@ -760,3 +760,7 @@
# FPS counter
SHOW_FPS = False
FPS_DEBUG_INTERVAL = 10 # the interval in seconds for printing the frequency info into the shell

# PI connection
PI_USERNAME = "pi"
PI_HOSTNAME = "donkeypi.local"
22 changes: 11 additions & 11 deletions donkeycar/tests/test_torch.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,14 +2,7 @@
import tarfile
import os
import platform
from collections import defaultdict, namedtuple

import torch
import pytorch_lightning as pl
from donkeycar.parts.pytorch.torch_train import train
from donkeycar.parts.pytorch.torch_data import TorchTubDataModule
from donkeycar.parts.pytorch.torch_utils import get_model_by_type

from collections import namedtuple
from donkeycar.config import Config

Data = namedtuple('Data', ['type', 'name', 'convergence', 'pretrained'])
Expand Down Expand Up @@ -68,6 +61,8 @@ def test_train(config: Config, car_dir: str, data: Data) -> None:
:param data: test case data
:return: None
"""
from donkeycar.parts.pytorch.torch_train import train

def pilot_path(name):
pilot_name = f'pilot_{name}.ckpt'
return os.path.join(car_dir, 'models', pilot_name)
Expand Down Expand Up @@ -95,6 +90,11 @@ def test_training_pipeline(config: Config, model_type: str, car_dir: str) \
:param tub_dir: tub directory (car_dir/tub)
:return: None
"""
import torch
import pytorch_lightning as pl
from donkeycar.parts.pytorch.torch_data import TorchTubDataModule
from donkeycar.parts.pytorch.torch_utils import get_model_by_type

model = get_model_by_type(
model_type, config, checkpoint_path=None)

Expand All @@ -120,11 +120,11 @@ def test_training_pipeline(config: Config, model_type: str, car_dir: str) \
x, y = batch
# In order to use a model pre-trained on ImageNet, the image
# will be re-sized to 3x224x224 regardless of what the user chooses.
assert(x.shape == (config.BATCH_SIZE, 3, 224, 224))
assert(y.shape == (config.BATCH_SIZE, 2))
assert x.shape == (config.BATCH_SIZE, 3, 224, 224), "shape mismatch"
assert y.shape == (config.BATCH_SIZE, 2), "shape mismatch"
break

# Check inference
val_x, val_y = next(iter(data_module.val_dataloader()))
output = model(val_x)
assert(output.shape == (config.BATCH_SIZE, 2))
assert output.shape == (config.BATCH_SIZE, 2), "shape mismatch"
10 changes: 0 additions & 10 deletions install/README.md

This file was deleted.

Loading

0 comments on commit 0182bb8

Please sign in to comment.