Skip to content

Commit d1e2800

Browse files
update dependency versions
1 parent cb8f449 commit d1e2800

File tree

6 files changed

+18
-18
lines changed

6 files changed

+18
-18
lines changed

daart/callbacks.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -147,7 +147,7 @@ def on_epoch_end(self, data_generator, model, trainer, **kwargs):
147147
batch[np.sum(batch, axis=1) == 0, 0] = 1
148148
# turn into a one-hot vector
149149
batch = np.argmax(batch, axis=1)
150-
pseudo_labels_data.append(batch.astype(np.int))
150+
pseudo_labels_data.append(batch.astype(int))
151151
pseudo_labels.append(pseudo_labels_data)
152152

153153
# total_new_pseudos = \
@@ -210,7 +210,7 @@ def on_epoch_end(self, data_generator, model, trainer, **kwargs):
210210
new_batch[np.sum(new_batch, axis=1) == 0, 0] = 1
211211
# turn into a one-hot vector
212212
new_batch = np.argmax(new_batch, axis=1)
213-
pseudo_labels_data.append(new_batch.astype(np.int))
213+
pseudo_labels_data.append(new_batch.astype(int))
214214
pseudo_labels.append(pseudo_labels_data)
215215

216216
# update the data generator with the new psuedo-labels

daart/data.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -398,7 +398,7 @@ def load_data(self, sequence_length: int, input_type: str) -> None:
398398
# if no path given, assume same size as markers and set all to background
399399
if 'markers' in self.data.keys():
400400
data_curr = np.zeros(
401-
(len(self.data['markers']) * sequence_length,), dtype=np.int)
401+
(len(self.data['markers']) * sequence_length,), dtype=int)
402402
else:
403403
raise FileNotFoundError(
404404
'Could not load "labels_strong" from None file without markers')
@@ -636,12 +636,12 @@ def count_class_examples(self) -> np.array:
636636

637637
assert 'labels_strong' in self.signals[0], 'Cannot count examples without hand labels'
638638

639-
totals = np.zeros(len(self.label_names), dtype=np.int)
639+
totals = np.zeros(len(self.label_names), dtype=int)
640640
for dataset in self.datasets:
641641
pad = dataset.sequence_pad
642642
for b, batch in enumerate(dataset.data['labels_strong']):
643643
# log number of examples for batch
644-
counts = np.bincount(batch[pad:-pad].astype('int'))
644+
counts = np.bincount(batch[pad:-pad].astype(int))
645645
if len(counts) == len(totals):
646646
totals += counts
647647
else:
@@ -874,7 +874,7 @@ def load_label_csv(filepath: str) -> tuple:
874874
875875
"""
876876
labels = np.genfromtxt(
877-
filepath, delimiter=',', dtype=np.int, encoding=None, skip_header=1)[:, 1:]
877+
filepath, delimiter=',', dtype=int, encoding=None, skip_header=1)[:, 1:]
878878
label_names = list(
879879
np.genfromtxt(filepath, delimiter=',', dtype=None, encoding=None, max_rows=1)[1:])
880880
return labels, label_names

daart/train.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -81,7 +81,7 @@ def update_metrics(
8181
self,
8282
dtype: str,
8383
loss_dict: dict,
84-
dataset: Union[int, np.int64, list, None] = None
84+
dataset: Union[int, int, list, None] = None
8585
) -> None:
8686
"""Update metrics for a specific dtype/dataset.
8787
@@ -123,12 +123,12 @@ def update_metrics(
123123
def create_metric_row(
124124
self,
125125
dtype: str,
126-
epoch: Union[int, np.int64],
127-
batch: Union[int, np.int64],
128-
dataset: Union[int, np.int64],
129-
trial: Union[int, np.int64, None],
130-
best_epoch: Optional[Union[int, np.int64]] = None,
131-
by_dataset: bool = False
126+
epoch: Union[int, int],
127+
batch: Union[int, int],
128+
dataset: Union[int, int],
129+
trial: Union[int, int, None],
130+
best_epoch: Optional[Union[int, int]] = None,
131+
by_dataset: bool = False,
132132
) -> dict:
133133
"""Export metrics and other data (e.g. epoch) for logging train progress.
134134

docs/source/installation.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ Then, create a conda environment:
2929

3030
.. code-block:: console
3131
32-
conda create --name daart python=3.6
32+
conda create --name daart python=3.10
3333
3434
Active the new environment:
3535

examples/fit_models.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -210,7 +210,7 @@
210210
"source": [
211211
"# load hand labels\n",
212212
"from numpy import genfromtxt\n",
213-
"labels = genfromtxt(hand_labels_file, delimiter=',', dtype=np.int, encoding=None)\n",
213+
"labels = genfromtxt(hand_labels_file, delimiter=',', dtype=int, encoding=None)\n",
214214
"labels = labels[1:, 1:] # get rid of headers, etc.\n",
215215
"states = np.argmax(labels, axis=1)\n",
216216
"\n",

setup.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
from distutils.core import setup
22

33

4-
VERSION = '1.0.2'
4+
VERSION = '1.1.0'
55

66
# add the README.md file to the long_description
77
with open('README.md', 'r') as fh:
@@ -12,7 +12,7 @@
1212
'jupyter',
1313
'matplotlib',
1414
'numpy',
15-
'opencv-python',
15+
'opencv-python-headless',
1616
'pandas',
1717
'pytest',
1818
'pyyaml',
@@ -21,7 +21,7 @@
2121
'seaborn',
2222
'tables',
2323
'test-tube',
24-
'torch==1.8.0',
24+
'torch',
2525
'tqdm',
2626
'typeguard',
2727
]

0 commit comments

Comments
 (0)