From 780908647c0f150997a3bed11ae7e2b92945562a Mon Sep 17 00:00:00 2001
From: jrzaurin
Date: Fri, 7 Aug 2020 17:47:15 +0100
Subject: [PATCH 01/12] #18 Implementation of the Linear model that is the wide
component via an Embedding layer. This helps optimize speed and memory usage.
Adapted all the submodules accordingly
---
examples/adult_script.py | 6 +-
examples/airbnb_script.py | 5 +-
examples/airbnb_script_multiclass.py | 8 +-
pytorch_widedeep/models/_wd_dataset.py | 20 ++--
pytorch_widedeep/models/wide.py | 47 +++++---
.../preprocessing/_preprocessors.py | 106 +++++++++++-------
pytorch_widedeep/wdtypes.py | 1 -
tests/test_data_utils/test_du_wide.py | 4 +-
8 files changed, 123 insertions(+), 74 deletions(-)
diff --git a/examples/adult_script.py b/examples/adult_script.py
index 840acae4..cdcb7750 100644
--- a/examples/adult_script.py
+++ b/examples/adult_script.py
@@ -53,7 +53,7 @@
)
X_deep = prepare_deep.fit_transform(df)
- wide = Wide(wide_dim=X_wide.shape[1], pred_dim=1)
+ wide = Wide(wide_dim=np.unique(X_wide).shape[0], pred_dim=1)
deepdense = DeepDense(
hidden_layers=[64, 32],
dropout=[0.2, 0.2],
@@ -63,7 +63,7 @@
)
model = WideDeep(wide=wide, deepdense=deepdense)
- wide_opt = torch.optim.Adam(model.wide.parameters())
+ wide_opt = torch.optim.Adam(model.wide.parameters(), lr=0.01)
deep_opt = RAdam(model.deepdense.parameters())
wide_sch = torch.optim.lr_scheduler.StepLR(wide_opt, step_size=3)
deep_sch = torch.optim.lr_scheduler.StepLR(deep_opt, step_size=5)
@@ -92,6 +92,6 @@
X_deep=X_deep,
target=target,
n_epochs=10,
- batch_size=256,
+ batch_size=64,
val_split=0.2,
)
diff --git a/examples/airbnb_script.py b/examples/airbnb_script.py
index 36af56d7..f62b960c 100644
--- a/examples/airbnb_script.py
+++ b/examples/airbnb_script.py
@@ -1,3 +1,4 @@
+import numpy as np
import torch
import pandas as pd
from torchvision.transforms import ToTensor, Normalize
@@ -64,7 +65,7 @@
image_processor = ImagePreprocessor(img_col=img_col, img_path=img_path)
X_images = image_processor.fit_transform(df)
- wide = Wide(wide_dim=X_wide.shape[1], pred_dim=1)
+ wide = Wide(wide_dim=np.unique(X_wide).shape[0], pred_dim=1)
deepdense = DeepDense(
hidden_layers=[64, 32],
dropout=[0.2, 0.2],
@@ -85,7 +86,7 @@
wide=wide, deepdense=deepdense, deeptext=deeptext, deepimage=deepimage
)
- wide_opt = torch.optim.Adam(model.wide.parameters())
+ wide_opt = torch.optim.Adam(model.wide.parameters(), lr=0.01)
deep_opt = torch.optim.Adam(model.deepdense.parameters())
text_opt = RAdam(model.deeptext.parameters())
img_opt = RAdam(model.deepimage.parameters())
diff --git a/examples/airbnb_script_multiclass.py b/examples/airbnb_script_multiclass.py
index 838d27ce..a0cda5b0 100644
--- a/examples/airbnb_script_multiclass.py
+++ b/examples/airbnb_script_multiclass.py
@@ -39,7 +39,8 @@
embed_cols=cat_embed_cols, continuous_cols=continuous_cols
)
X_deep = prepare_deep.fit_transform(df)
- wide = Wide(wide_dim=X_wide.shape[1], pred_dim=3)
+
+ wide = Wide(wide_dim=np.unique(X_wide).shape[0], pred_dim=3)
deepdense = DeepDense(
hidden_layers=[64, 32],
dropout=[0.2, 0.2],
@@ -48,7 +49,10 @@
continuous_cols=continuous_cols,
)
model = WideDeep(wide=wide, deepdense=deepdense, pred_dim=3)
- model.compile(method="multiclass", metrics=[Accuracy, F1Score])
+ optimizer = torch.optim.Adam(model.parameters(), lr=0.03)
+ model.compile(
+ method="multiclass", metrics=[Accuracy, F1Score], optimizers=optimizer
+ )
model.fit(
X_wide=X_wide,
diff --git a/pytorch_widedeep/models/_wd_dataset.py b/pytorch_widedeep/models/_wd_dataset.py
index fafde273..aa5dc6e4 100644
--- a/pytorch_widedeep/models/_wd_dataset.py
+++ b/pytorch_widedeep/models/_wd_dataset.py
@@ -11,12 +11,8 @@ class WideDeepDataset(Dataset):
Parameters
----------
- X_wide: np.ndarray, scipy csr sparse matrix.
- wide input.Note that if a sparse matrix is passed to the
- WideDeepDataset class, the loading process will be notably slow since
- the transformation to a dense matrix is done on an index basis 'on the
- fly'. At the moment this is the best option given the current support
- offered for sparse tensors for pytorch.
+ X_wide: np.ndarray
+ wide input
X_deep: np.ndarray
deepdense input
X_text: np.ndarray
@@ -24,13 +20,14 @@ class WideDeepDataset(Dataset):
X_img: np.ndarray
deepimage input
target: np.ndarray
- transforms: MultipleTransforms() object (which is in itself a torchvision
- Compose). See in models/_multiple_transforms.py
+ target array
+ transforms: :obj:`MultipleTransforms`
+ torchvision Compose object. See models/_multiple_transforms.py
"""
def __init__(
self,
- X_wide: Union[np.ndarray, sparse_matrix],
+ X_wide: np.ndarray,
X_deep: np.ndarray,
target: Optional[np.ndarray] = None,
X_text: Optional[np.ndarray] = None,
@@ -53,10 +50,7 @@ def __init__(
def __getitem__(self, idx: int):
# X_wide and X_deep are assumed to be *always* present
- if isinstance(self.X_wide, sparse_matrix):
- X = Bunch(wide=np.array(self.X_wide[idx].todense()).squeeze())
- else:
- X = Bunch(wide=self.X_wide[idx])
+ X = Bunch(wide=self.X_wide[idx])
X.deepdense = self.X_deep[idx]
if self.X_text is not None:
X.deeptext = self.X_text[idx]
diff --git a/pytorch_widedeep/models/wide.py b/pytorch_widedeep/models/wide.py
index 10cc7906..eaf4c0f3 100644
--- a/pytorch_widedeep/models/wide.py
+++ b/pytorch_widedeep/models/wide.py
@@ -1,16 +1,24 @@
+import math
+
+import torch
from torch import nn
from ..wdtypes import *
class Wide(nn.Module):
- r"""Simple linear layer that will receive the one-hot encoded `'wide'`
- input and connect it to the output neuron(s).
+ r"""Wide component
+
+ Linear model implemented via an Embedding layer connected to the output
+ neuron(s).
Parameters
-----------
wide_dim: int
- size of the input tensor
+ size of the Embedding layer. `wide_dim` is the summation of all the
+ individual values for all the features that go through the wide
+ component. For example, if the wide component receives 2 features with
+ 5 individual values each, `wide_dim = 10`
pred_dim: int
size of the ouput tensor containing the predictions
@@ -23,21 +31,34 @@ class Wide(nn.Module):
--------
>>> import torch
>>> from pytorch_widedeep.models import Wide
- >>> X = torch.empty(4, 4).random_(2)
- >>> wide = Wide(wide_dim=X.size(0), pred_dim=1)
+ >>> X = torch.empty(4, 4).random_(6)
+ >>> wide = Wide(wide_dim=X.unique().size(0), pred_dim=1)
>>> wide(X)
- tensor([[-0.8841],
- [-0.8633],
- [-1.2713],
- [-0.4762]], grad_fn=)
+ tensor([[-0.1138],
+ [ 0.4603],
+ [ 1.0762],
+ [ 0.8160]], grad_fn=)
"""
def __init__(self, wide_dim: int, pred_dim: int = 1):
super(Wide, self).__init__()
- self.wide_linear = nn.Linear(wide_dim, pred_dim)
+ self.wide_linear = nn.Embedding(wide_dim + 1, pred_dim, padding_idx=0)
+ # (Sum(Embedding) + bias) is equivalent to (OneHotVector + Linear)
+ self.bias = nn.Parameter(torch.zeros(pred_dim))
+ self._reset_parameters()
+
+ def _reset_parameters(self) -> None:
+ r"""initialize Embedding and bias like nn.Linear. See `original
+ implementation
+ `_.
+ """
+ nn.init.kaiming_uniform_(self.wide_linear.weight, a=math.sqrt(5))
+ fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.wide_linear.weight)
+ bound = 1 / math.sqrt(fan_in)
+ nn.init.uniform_(self.bias, -bound, bound)
def forward(self, X: Tensor) -> Tensor: # type: ignore
- r"""Forward pass. Simply connecting the one-hot encoded input with the
- ouput neuron(s) """
- out = self.wide_linear(X.float())
+ r"""Forward pass. Simply connecting the Embedding layer with the ouput
+ neuron(s)"""
+ out = self.wide_linear(X.long()).sum(dim=1) + self.bias
return out
diff --git a/pytorch_widedeep/preprocessing/_preprocessors.py b/pytorch_widedeep/preprocessing/_preprocessors.py
index 0c04f79a..ec08c78a 100644
--- a/pytorch_widedeep/preprocessing/_preprocessors.py
+++ b/pytorch_widedeep/preprocessing/_preprocessors.py
@@ -42,6 +42,12 @@ def fit_transform(self, df: pd.DataFrame):
class WidePreprocessor(BasePreprocessor):
r"""Preprocessor to prepare the wide input dataset
+ This Preprocessor prepares the data for the wide, linear component. This
+ linear model is implemented via an Embedding layer that is connected to
+ the output neuron. ``WidePreprocessor`` simply numerically encodes all the
+ unique values of all categorical columns ``wide_cols + crossed_cols``. See
+ the Example below.
+
Parameters
----------
wide_cols: List[str]
@@ -50,16 +56,14 @@ class WidePreprocessor(BasePreprocessor):
crossed_cols: List[Tuple[str, str]]
List of Tuples with the name of the columns that will be `'crossed'`
and then one-hot encoded. e.g. [('education', 'occupation'), ...]
- already_dummies: List[str]
- List of columns that are already dummies/one-hot encoded, and
- therefore do not need to be processed
Attributes
----------
- one_hot_enc: :obj:`OneHotEncoder`
- an instance of :class:`sklearn.preprocessing.OneHotEncoder`
wide_crossed_cols: :obj:`List`
List with the names of all columns that will be one-hot encoded
+ feature_dict: :obj:`Dict`
+ Dictionary where the keys are the result of pasting `colname + '_' +
+ column value` and the values are the corresponding mapped integer.
Examples
--------
@@ -69,67 +73,93 @@ class WidePreprocessor(BasePreprocessor):
>>> wide_cols = ['color']
>>> crossed_cols = [('color', 'size')]
>>> wide_preprocessor = WidePreprocessor(wide_cols=wide_cols, crossed_cols=crossed_cols)
- >>> wide_preprocessor.fit_transform(df)
- array([[0., 0., 1., 0., 0., 1.],
- [1., 0., 0., 1., 0., 0.],
- [0., 1., 0., 0., 1., 0.]])
+ >>> X_wide = wide_preprocessor.fit_transform(df)
+ >>> X_wide
+ array([[1, 4],
+ [2, 5],
+ [3, 6]])
+ >>> wide_preprocessor.feature_dict
+ {'color_r': 1,
+ 'color_b': 2,
+ 'color_g': 3,
+ 'color_size_r-s': 4,
+ 'color_size_b-n': 5,
+ 'color_size_g-l': 6}
+ >>> wide_preprocessor.inverse_transform(X_wide)
+ color color_size
+ 0 r r-s
+ 1 b b-n
+ 2 g g-l
"""
def __init__(
- self,
- wide_cols: List[str],
- crossed_cols=None,
- already_dummies: Optional[List[str]] = None,
- sparse=False,
- handle_unknown="ignore",
+ self, wide_cols: List[str], crossed_cols=None,
):
super(WidePreprocessor, self).__init__()
self.wide_cols = wide_cols
self.crossed_cols = crossed_cols
- self.already_dummies = already_dummies
- self.one_hot_enc = OneHotEncoder(sparse=sparse, handle_unknown=handle_unknown)
def fit(self, df: pd.DataFrame) -> BasePreprocessor:
"""Fits the Preprocessor and creates required attributes
"""
df_wide = self._prepare_wide(df)
self.wide_crossed_cols = df_wide.columns.tolist()
- if self.already_dummies:
- dummy_cols = [
- c for c in self.wide_crossed_cols if c not in self.already_dummies
- ]
- self.one_hot_enc.fit(df_wide[dummy_cols])
- else:
- self.one_hot_enc.fit(df_wide[self.wide_crossed_cols])
+ vocab = self._make_global_feature_list(df_wide[self.wide_crossed_cols])
+ # leave 0 as padding index
+ self.feature_dict = {v: i + 1 for i, v in enumerate(vocab)}
return self
- def transform(self, df: pd.DataFrame) -> Union[sparse_matrix, np.ndarray]:
- """Returns the processed dataframe as a one hot encoded dense or
- sparse matrix
+ def transform(self, df: pd.DataFrame) -> np.array:
+ r"""Returns the processed dataframe
"""
try:
- self.one_hot_enc.categories_
+ self.feature_dict
except:
raise NotFittedError(
"This WidePreprocessor instance is not fitted yet. "
"Call 'fit' with appropriate arguments before using this estimator."
)
df_wide = self._prepare_wide(df)
- if self.already_dummies:
- X_oh_1 = df_wide[self.already_dummies].values
- dummy_cols = [
- c for c in self.wide_crossed_cols if c not in self.already_dummies
- ]
- X_oh_2 = self.one_hot_enc.transform(df_wide[dummy_cols])
- return np.hstack((X_oh_1, X_oh_2))
- else:
- return self.one_hot_enc.transform(df_wide[self.wide_crossed_cols])
+ encoded = np.zeros([len(df_wide), len(self.wide_crossed_cols)], dtype=np.long)
+ for col_i, col in enumerate(self.wide_crossed_cols):
+ encoded[:, col_i] = df_wide[col].apply(
+ lambda x: self.feature_dict[col + "_" + str(x)]
+ if col + "_" + str(x) in self.feature_dict
+ else 0
+ )
+ return encoded.astype("int64")
+
+ def inverse_transform(self, encoded: np.ndarray) -> pd.DataFrame:
+ r"""Takes as input the output from the ``transform`` method and it will
+ return the original values.
- def fit_transform(self, df: pd.DataFrame) -> Union[sparse_matrix, np.ndarray]:
+ Parameters
+ ----------
+ encoded: np.ndarray
+ array with the output of the ``transform`` method
+ """
+ decoded = pd.DataFrame(encoded, columns=self.wide_crossed_cols)
+ inverse_dict = {k: v for v, k in self.feature_dict.items()}
+ decoded = decoded.applymap(lambda x: inverse_dict[x])
+ for col in decoded.columns:
+ rm_str = "".join([col, "_"])
+ decoded[col] = decoded[col].apply(lambda x: x.replace(rm_str, ""))
+ return decoded
+
+ def fit_transform(self, df: pd.DataFrame) -> np.ndarray:
"""Combines ``fit`` and ``transform``
"""
return self.fit(df).transform(df)
+ def _make_global_feature_list(self, df: pd.DataFrame) -> List:
+ vocab = []
+ for column in df.columns:
+ vocab += self._make_column_feature_list(df[column])
+ return vocab
+
+ def _make_column_feature_list(self, s: pd.Series) -> List:
+ return [s.name + "_" + str(x) for x in s.unique()]
+
def _cross_cols(self, df: pd.DataFrame):
df_cc = df.copy()
crossed_colnames = []
diff --git a/pytorch_widedeep/wdtypes.py b/pytorch_widedeep/wdtypes.py
index 232e7d83..ed46ddc3 100644
--- a/pytorch_widedeep/wdtypes.py
+++ b/pytorch_widedeep/wdtypes.py
@@ -18,7 +18,6 @@
from torch import Tensor
from torch.nn import Module
-from scipy.sparse.csr import csr_matrix as sparse_matrix
from torch.optim.optimizer import Optimizer
from torchvision.transforms import (
Pad,
diff --git a/tests/test_data_utils/test_du_wide.py b/tests/test_data_utils/test_du_wide.py
index ec250eb3..ed02d8ae 100644
--- a/tests/test_data_utils/test_du_wide.py
+++ b/tests/test_data_utils/test_du_wide.py
@@ -39,7 +39,7 @@ def create_test_dataset(input_type, with_crossed=True):
)
def test_preprocessor1(input_df, expected_shape):
wide_mtx = preprocessor1.fit_transform(input_df)
- assert wide_mtx.shape[1] == expected_shape
+ assert np.unique(wide_mtx).shape[0] == expected_shape
###############################################################################
@@ -63,4 +63,4 @@ def test_preprocessor1(input_df, expected_shape):
)
def test_prepare_wide_wo_crossed(input_df, expected_shape):
wide_mtx = preprocessor2.fit_transform(input_df)
- assert wide_mtx.shape[1] == expected_shape
+ assert np.unique(wide_mtx).shape[0] == expected_shape
From 3ef31319476ac24c86f62c665ecc086646b268f3 Mon Sep 17 00:00:00 2001
From: jrzaurin
Date: Fri, 7 Aug 2020 19:29:11 +0100
Subject: [PATCH 02/12] Changed loss value printed on the screen for
regression. From RMSE to MSE. Adjusted documentation. Also changed unit test
for callbacks to the new Linear-Embedding implementation, which implied
incresing delta for early stopping
---
README.md | 4 ++++
pytorch_widedeep/callbacks.py | 2 +-
pytorch_widedeep/models/wide_deep.py | 14 +++++++++++---
tests/test_model_functioning/test_callbacks.py | 2 +-
4 files changed, 17 insertions(+), 5 deletions(-)
diff --git a/README.md b/README.md
index 4c6f1d7e..f97b128f 100644
--- a/README.md
+++ b/README.md
@@ -5,6 +5,10 @@
[![Build Status](https://travis-ci.org/jrzaurin/pytorch-widedeep.svg?branch=master)](https://travis-ci.org/jrzaurin/pytorch-widedeep)
[![Documentation Status](https://readthedocs.org/projects/pytorch-widedeep/badge/?version=latest)](https://pytorch-widedeep.readthedocs.io/en/latest/?badge=latest)
+[![Python 3.6 3.7
+3.8](https://img.shields.io/badge/python-3.6%20%203.7%203.8-blue.svg
+)](https://www.python.org/)
+[![Maintenance](https://img.shields.io/badge/Maintained%3F-yes-green.svg)](https://github.com/jrzaurin/pytorch-widedeep/graphs/commit-activity)
# pytorch-widedeep
diff --git a/pytorch_widedeep/callbacks.py b/pytorch_widedeep/callbacks.py
index 937c12dd..19b9cd86 100644
--- a/pytorch_widedeep/callbacks.py
+++ b/pytorch_widedeep/callbacks.py
@@ -115,7 +115,7 @@ class History(Callback):
r"""Callback that records events into a :obj:`History` object.
This callback runs by default within :obj:`WideDeep`. See
- :class:`pytorch_widedeep.models.wide_deep.WideDeep`. Documentation ss
+ :class:`pytorch_widedeep.models.wide_deep.WideDeep`. Documentation is
included here for completion.
"""
diff --git a/pytorch_widedeep/models/wide_deep.py b/pytorch_widedeep/models/wide_deep.py
index 0430af08..06b4e396 100644
--- a/pytorch_widedeep/models/wide_deep.py
+++ b/pytorch_widedeep/models/wide_deep.py
@@ -232,7 +232,15 @@ def compile(
Parameters
----------
method: str
- One of `regression`, `binary` or `multiclass`
+ One of `regression`, `binary` or `multiclass`. The default when
+ performing a `regression`, a `binary` classification or a
+ `multiclass` classification is the `mean squared error
+ `_
+ (MSE), `Binary Cross Entropy
+ `_
+ (BCE) and `Cross Entropy
+ `_
+ (CE) respectively.
optimizers: Union[Optimizer, Dict[str, Optimizer]], Optional, Default=AdamW
- An instance of ``pytorch``'s ``Optimizer`` object (e.g. :obj:`torch.optim.Adam()`) or
- a dictionary where there keys are the model components (i.e.
@@ -594,7 +602,7 @@ def fit(
loss=train_loss,
)
else:
- t.set_postfix(loss=np.sqrt(train_loss))
+ t.set_postfix(loss=train_loss)
if self.lr_scheduler:
self._lr_scheduler_step(step_location="on_batch_end")
self.callback_container.on_batch_end(batch=batch_idx)
@@ -626,7 +634,7 @@ def fit(
loss=val_loss,
)
else:
- v.set_postfix(loss=np.sqrt(val_loss))
+ v.set_postfix(loss=val_loss)
epoch_logs["val_loss"] = val_loss
if score is not None:
for k, v in score.items():
diff --git a/tests/test_model_functioning/test_callbacks.py b/tests/test_model_functioning/test_callbacks.py
index 9c866815..f872f426 100644
--- a/tests/test_model_functioning/test_callbacks.py
+++ b/tests/test_model_functioning/test_callbacks.py
@@ -105,7 +105,7 @@ def test_early_stop():
method="binary",
callbacks=[
EarlyStopping(
- min_delta=0.1, patience=3, restore_best_weights=True, verbose=1
+ min_delta=5., patience=3, restore_best_weights=True, verbose=1
)
],
verbose=1,
From cd105d2da35ca7d5f82a653b0bb10b13262ca12e Mon Sep 17 00:00:00 2001
From: jrzaurin
Date: Fri, 7 Aug 2020 19:29:55 +0100
Subject: [PATCH 03/12] Fixed minor style conflict
---
tests/test_model_functioning/test_callbacks.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tests/test_model_functioning/test_callbacks.py b/tests/test_model_functioning/test_callbacks.py
index f872f426..403efaee 100644
--- a/tests/test_model_functioning/test_callbacks.py
+++ b/tests/test_model_functioning/test_callbacks.py
@@ -105,7 +105,7 @@ def test_early_stop():
method="binary",
callbacks=[
EarlyStopping(
- min_delta=5., patience=3, restore_best_weights=True, verbose=1
+ min_delta=5.0, patience=3, restore_best_weights=True, verbose=1
)
],
verbose=1,
From 35fd1037257b1182d2aa26a25e6c9081c6074324 Mon Sep 17 00:00:00 2001
From: jrzaurin
Date: Fri, 7 Aug 2020 19:39:48 +0100
Subject: [PATCH 04/12] made it consistent with README
---
pypi_README.md | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/pypi_README.md b/pypi_README.md
index a93b7b84..1b493004 100644
--- a/pypi_README.md
+++ b/pypi_README.md
@@ -1,5 +1,9 @@
[![Build Status](https://travis-ci.org/jrzaurin/pytorch-widedeep.svg?branch=master)](https://travis-ci.org/jrzaurin/pytorch-widedeep)
[![Documentation Status](https://readthedocs.org/projects/pytorch-widedeep/badge/?version=latest)](https://pytorch-widedeep.readthedocs.io/en/latest/?badge=latest)
+[![Python 3.6 3.7
+3.8](https://img.shields.io/badge/python-3.6%20%203.7%203.8-blue.svg
+)](https://www.python.org/)
+[![Maintenance](https://img.shields.io/badge/Maintained%3F-yes-green.svg)](https://github.com/jrzaurin/pytorch-widedeep/graphs/commit-activity)
# pytorch-widedeep
From 6d9205dfdb339bc79949e50ab641c48b3f28208b Mon Sep 17 00:00:00 2001
From: jrzaurin
Date: Fri, 7 Aug 2020 21:42:38 +0100
Subject: [PATCH 05/12] All unit test adapted to new Wide implementation
---
tests/test_model_functioning/test_callbacks.py | 8 ++++----
tests/test_model_functioning/test_data_inputs.py | 4 ++--
tests/test_model_functioning/test_fit_methods.py | 4 ++--
tests/test_model_functioning/test_focal_loss.py | 4 ++--
tests/test_model_functioning/test_initializers.py | 4 ++--
tests/test_warm_up/test_warm_up_routines.py | 4 ++--
6 files changed, 14 insertions(+), 14 deletions(-)
diff --git a/tests/test_model_functioning/test_callbacks.py b/tests/test_model_functioning/test_callbacks.py
index 403efaee..6dcb6ff7 100644
--- a/tests/test_model_functioning/test_callbacks.py
+++ b/tests/test_model_functioning/test_callbacks.py
@@ -16,7 +16,7 @@
)
# Wide array
-X_wide = np.random.choice(2, (100, 100), p=[0.8, 0.2])
+X_wide = np.random.choice(50, (100, 10))
# Deep Array
colnames = list(string.ascii_lowercase)[:10]
@@ -38,7 +38,7 @@
###############################################################################
# Test that history saves the information adequately
###############################################################################
-wide = Wide(100, 1)
+wide = Wide(np.unique(X_wide).shape[0], 1)
deepdense = DeepDense(
hidden_layers=[32, 16],
dropout=[0.5, 0.5],
@@ -92,7 +92,7 @@ def test_history_callback(optimizers, schedulers, len_loss_output, len_lr_output
# Test that EarlyStopping stops as expected
###############################################################################
def test_early_stop():
- wide = Wide(100, 1)
+ wide = Wide(np.unique(X_wide).shape[0], 1)
deepdense = DeepDense(
hidden_layers=[32, 16],
dropout=[0.5, 0.5],
@@ -122,7 +122,7 @@ def test_early_stop():
"save_best_only, max_save, n_files", [(True, 2, 2), (False, 2, 2), (False, 0, 5)]
)
def test_model_checkpoint(save_best_only, max_save, n_files):
- wide = Wide(100, 1)
+ wide = Wide(np.unique(X_wide).shape[0], 1)
deepdense = DeepDense(
hidden_layers=[32, 16],
dropout=[0.5, 0.5],
diff --git a/tests/test_model_functioning/test_data_inputs.py b/tests/test_model_functioning/test_data_inputs.py
index 7189c2a0..1819fb8b 100644
--- a/tests/test_model_functioning/test_data_inputs.py
+++ b/tests/test_model_functioning/test_data_inputs.py
@@ -14,7 +14,7 @@
)
# Wide array
-X_wide = np.random.choice(2, (100, 100), p=[0.8, 0.2])
+X_wide = np.random.choice(50, (100, 100))
# Deep Array
colnames = list(string.ascii_lowercase)[:10]
@@ -50,7 +50,7 @@
) = train_test_split(X_wide, X_deep, X_text, X_img, target)
# build model components
-wide = Wide(100, 1)
+wide = Wide(np.unique(X_wide).shape[0], 1)
deepdense = DeepDense(
hidden_layers=[32, 16],
dropout=[0.5, 0.5],
diff --git a/tests/test_model_functioning/test_fit_methods.py b/tests/test_model_functioning/test_fit_methods.py
index d41042c1..e3ec4134 100644
--- a/tests/test_model_functioning/test_fit_methods.py
+++ b/tests/test_model_functioning/test_fit_methods.py
@@ -6,7 +6,7 @@
from pytorch_widedeep.models import Wide, WideDeep, DeepDense
# Wide array
-X_wide = np.random.choice(2, (100, 100), p=[0.8, 0.2])
+X_wide = np.random.choice(50, (100, 100))
# Deep Array
colnames = list(string.ascii_lowercase)[:10]
@@ -51,7 +51,7 @@ def test_fit_methods(
pred_dim,
probs_dim,
):
- wide = Wide(100, pred_dim)
+ wide = Wide(np.unique(X_wide).shape[0], pred_dim)
deepdense = DeepDense(
hidden_layers=[32, 16],
dropout=[0.5, 0.5],
diff --git a/tests/test_model_functioning/test_focal_loss.py b/tests/test_model_functioning/test_focal_loss.py
index 2009e67a..82bb33d5 100644
--- a/tests/test_model_functioning/test_focal_loss.py
+++ b/tests/test_model_functioning/test_focal_loss.py
@@ -6,7 +6,7 @@
from pytorch_widedeep.models import Wide, WideDeep, DeepDense
# Wide array
-X_wide = np.random.choice(2, (100, 100), p=[0.8, 0.2])
+X_wide = np.random.choice(50, (100, 10))
# Deep Array
colnames = list(string.ascii_lowercase)[:10]
@@ -32,7 +32,7 @@
],
)
def test_focal_loss(X_wide, X_deep, target, method, pred_dim, probs_dim):
- wide = Wide(100, pred_dim)
+ wide = Wide(np.unique(X_wide).shape[0], pred_dim)
deepdense = DeepDense(
hidden_layers=[32, 16],
dropout=[0.5, 0.5],
diff --git a/tests/test_model_functioning/test_initializers.py b/tests/test_model_functioning/test_initializers.py
index e6129544..d97a6d79 100644
--- a/tests/test_model_functioning/test_initializers.py
+++ b/tests/test_model_functioning/test_initializers.py
@@ -19,7 +19,7 @@
)
# Wide array
-X_wide = np.random.choice(2, (100, 100), p=[0.8, 0.2])
+X_wide = np.random.choice(50, (100, 100))
# Deep Array
colnames = list(string.ascii_lowercase)[:10]
@@ -58,7 +58,7 @@
def test_initializers_1():
- wide = Wide(100, 1)
+ wide = Wide(np.unique(X_wide).shape[0], 1)
deepdense = DeepDense(
hidden_layers=[32, 16],
dropout=[0.5, 0.5],
diff --git a/tests/test_warm_up/test_warm_up_routines.py b/tests/test_warm_up/test_warm_up_routines.py
index 8fc2164e..4cd2dbdf 100644
--- a/tests/test_warm_up/test_warm_up_routines.py
+++ b/tests/test_warm_up/test_warm_up_routines.py
@@ -87,7 +87,7 @@ def loss_fn(y_pred, y_true):
target = torch.empty(100, 1).random_(0, 2)
# wide
-X_wide = torch.empty(100, 10).random_(0, 2)
+X_wide = torch.empty(100, 4).random_(1, 20)
# deep
colnames = list(string.ascii_lowercase)[:10]
@@ -107,7 +107,7 @@ def loss_fn(y_pred, y_true):
# Define the model components
# wide
-wide = Wide(10, 1)
+wide = Wide(X_wide.unique().size(0), 1)
if use_cuda:
wide.cuda()
From d2886ea20116745c63d09e32259fb44018ce1f35 Mon Sep 17 00:00:00 2001
From: jrzaurin
Date: Fri, 7 Aug 2020 23:59:32 +0100
Subject: [PATCH 06/12] Adapted notebooks to new Wide implementation
---
examples/01_Preprocessors_and_utils.ipynb | 94 ++++++--
examples/02_Model_Components.ipynb | 213 +++++++++++++++-
..._Binary_Classification_with_Defaults.ipynb | 42 ++--
...ry_Classification_Varying_Parameters.ipynb | 227 +++++++++---------
.../05_Regression_with_Images_and_Text.ipynb | 60 ++---
examples/06_WarmUp_Model_Components.ipynb | 72 +++---
pytorch_widedeep/models/_warmup.py | 2 +-
7 files changed, 482 insertions(+), 228 deletions(-)
diff --git a/examples/01_Preprocessors_and_utils.ipynb b/examples/01_Preprocessors_and_utils.ipynb
index b0fbdb48..8457bb6c 100644
--- a/examples/01_Preprocessors_and_utils.ipynb
+++ b/examples/01_Preprocessors_and_utils.ipynb
@@ -50,7 +50,9 @@
"source": [
"## 1. WidePreprocessor\n",
"\n",
- "This class simply takes a dataset and one-hot encodes it, with a few additional rings and bells. "
+ "The Wide component of the model is a linear model that in principle, could be implemented as a linear layer receiving the result of on one-hot encoding categorical columns. However, this is not memory efficient. Therefore, we implement a liner layer as an Embedding layer plus a bias. I will explain in a bit more detail later. \n",
+ "\n",
+ "With that in mind, `WidePreprocessor` simply encodes the categories numerically so that they are the indexes of the lookup table that is an Embedding layer."
]
},
{
@@ -284,13 +286,13 @@
{
"data": {
"text/plain": [
- "array([[0., 1., 0., ..., 0., 0., 0.],\n",
- " [0., 0., 0., ..., 0., 0., 0.],\n",
- " [0., 0., 0., ..., 0., 0., 0.],\n",
+ "array([[ 1, 17, 23, ..., 89, 91, 316],\n",
+ " [ 2, 18, 23, ..., 89, 92, 317],\n",
+ " [ 3, 18, 24, ..., 89, 93, 318],\n",
" ...,\n",
- " [0., 0., 0., ..., 0., 0., 0.],\n",
- " [0., 0., 0., ..., 0., 0., 0.],\n",
- " [0., 0., 0., ..., 0., 0., 0.]])"
+ " [ 2, 20, 23, ..., 90, 103, 323],\n",
+ " [ 2, 17, 23, ..., 89, 103, 323],\n",
+ " [ 2, 21, 29, ..., 90, 115, 324]])"
]
},
"execution_count": 6,
@@ -306,45 +308,103 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "or sparse"
+ "Let's take from example the first entry"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
- "outputs": [],
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "array([ 1, 17, 23, 32, 47, 89, 91, 316])"
+ ]
+ },
+ "execution_count": 7,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
"source": [
- "wide_preprocessor_sparse = WidePreprocessor(wide_cols=wide_cols, crossed_cols=crossed_cols, sparse=True)\n",
- "X_wide_sparse = wide_preprocessor_sparse.fit_transform(df)"
+ "X_wide[0]"
]
},
{
"cell_type": "code",
- "execution_count": 8,
+ "execution_count": 10,
"metadata": {},
"outputs": [
{
"data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " education | \n",
+ " relationship | \n",
+ " workclass | \n",
+ " occupation | \n",
+ " native-country | \n",
+ " gender | \n",
+ " education_occupation | \n",
+ " native-country_occupation | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 | \n",
+ " 11th | \n",
+ " Own-child | \n",
+ " Private | \n",
+ " Machine-op-inspct | \n",
+ " United-States | \n",
+ " Male | \n",
+ " 11th-Machine-op-inspct | \n",
+ " United-States-Machine-op-inspct | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
"text/plain": [
- "<48842x796 sparse matrix of type ''\n",
- "\twith 390736 stored elements in Compressed Sparse Row format>"
+ " education relationship workclass occupation native-country gender \\\n",
+ "0 11th Own-child Private Machine-op-inspct United-States Male \n",
+ "\n",
+ " education_occupation native-country_occupation \n",
+ "0 11th-Machine-op-inspct United-States-Machine-op-inspct "
]
},
- "execution_count": 8,
+ "execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
- "X_wide_sparse"
+ "wide_preprocessor.inverse_transform(X_wide[:1])"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "Note that while this will save memory on disk, due to the batch generation process for `WideDeep` the running time will be notably slow. See [here](https://github.com/jrzaurin/pytorch-widedeep/blob/bfbe6e5d2309857db0dcc5cf3282dfa60504aa52/pytorch_widedeep/models/_wd_dataset.py#L47) for more details."
+ "As we can see, `wide_preprocessor` numerically encodes the `wide_cols` and the `crossed_cols`, which can be recovered using the method `inverse_transform`."
]
},
{
diff --git a/examples/02_Model_Components.ipynb b/examples/02_Model_Components.ipynb
index 8e4fe64a..d5ea250d 100644
--- a/examples/02_Model_Components.ipynb
+++ b/examples/02_Model_Components.ipynb
@@ -23,7 +23,11 @@
"source": [
"### 1. Wide\n",
"\n",
- "The wide component is simply a Linear layer \"plugged\" into the output neuron(s)"
+ "The wide component is a Linear layer \"plugged\" into the output neuron(s)\n",
+ "\n",
+ "The only particularity of our implementation is that we have implemented the linear layer via an Embedding layer plus a bias. While the implementations are equivalent, the latter is faster and far more memory efficient, since we do not need to one hot encode the categorical features. \n",
+ "\n",
+ "Let's assume we the following dataset:"
]
},
{
@@ -31,13 +35,199 @@
"execution_count": 1,
"metadata": {},
"outputs": [],
+ "source": [
+ "import torch\n",
+ "import pandas as pd\n",
+ "import numpy as np\n",
+ "\n",
+ "from torch import nn"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " color | \n",
+ " size | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 | \n",
+ " r | \n",
+ " s | \n",
+ "
\n",
+ " \n",
+ " 1 | \n",
+ " b | \n",
+ " n | \n",
+ "
\n",
+ " \n",
+ " 2 | \n",
+ " g | \n",
+ " l | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " color size\n",
+ "0 r s\n",
+ "1 b n\n",
+ "2 g l"
+ ]
+ },
+ "execution_count": 3,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df = pd.DataFrame({'color': ['r', 'b', 'g'], 'size': ['s', 'n', 'l']})\n",
+ "df.head()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "one hot encoded, the first observation would be"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "obs_0_oh = (np.array([1., 0., 0., 1., 0., 0.])).astype('float32')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "if we simply numerically encode (label encode or `le`) the values, starting from 1 (we will save 0 for padding, i.e. unseen values)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "obs_0_le = (np.array([0, 3])).astype('int64')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "now, let's see if the two implementations are equivalent"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# we have 6 different values. Let's assume we are performing a regression, so pred_dim = 1\n",
+ "lin = nn.Linear(6, 1)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "emb = nn.Embedding(6, 1) \n",
+ "emb.weight = nn.Parameter(lin.weight.reshape_as(emb.weight))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "tensor([-0.9452], grad_fn=)"
+ ]
+ },
+ "execution_count": 8,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "lin(torch.tensor(obs_0_oh))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "tensor([-0.9452], grad_fn=)"
+ ]
+ },
+ "execution_count": 9,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "emb(torch.tensor(obs_0_le)).sum() + lin.bias"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "And this is precisely how the linear component `Wide` is implemented"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 10,
+ "metadata": {},
+ "outputs": [],
"source": [
"from pytorch_widedeep.models import Wide"
]
},
{
"cell_type": "code",
- "execution_count": 2,
+ "execution_count": 92,
"metadata": {},
"outputs": [],
"source": [
@@ -46,27 +236,34 @@
},
{
"cell_type": "code",
- "execution_count": 2,
+ "execution_count": 11,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"Wide(\n",
- " (wide_linear): Linear(in_features=100, out_features=1, bias=True)\n",
+ " (wide_linear): Embedding(11, 1, padding_idx=0)\n",
")"
]
},
- "execution_count": 2,
+ "execution_count": 11,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
- "wide = Wide(100, 1)\n",
+ "wide = Wide(wide_dim=10, pred_dim=1)\n",
"wide"
]
},
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Note that even though the input dim is 10, the Embedding layer has 11 weights. This is because we save 0 for padding, which is used for unseen values during the encoding process"
+ ]
+ },
{
"cell_type": "markdown",
"metadata": {},
@@ -78,12 +275,10 @@
},
{
"cell_type": "code",
- "execution_count": 3,
+ "execution_count": 96,
"metadata": {},
"outputs": [],
"source": [
- "import torch\n",
- "\n",
"from pytorch_widedeep.models import DeepDense"
]
},
diff --git a/examples/03_Binary_Classification_with_Defaults.ipynb b/examples/03_Binary_Classification_with_Defaults.ipynb
index 1d97d8fa..c645333c 100644
--- a/examples/03_Binary_Classification_with_Defaults.ipynb
+++ b/examples/03_Binary_Classification_with_Defaults.ipynb
@@ -419,14 +419,14 @@
"name": "stdout",
"output_type": "stream",
"text": [
- "[[0. 1. 0. ... 0. 0. 0.]\n",
- " [0. 0. 0. ... 0. 0. 0.]\n",
- " [0. 0. 0. ... 0. 0. 0.]\n",
+ "[[ 1 17 23 ... 89 91 316]\n",
+ " [ 2 18 23 ... 89 92 317]\n",
+ " [ 3 18 24 ... 89 93 318]\n",
" ...\n",
- " [0. 0. 0. ... 0. 0. 0.]\n",
- " [0. 0. 0. ... 0. 0. 0.]\n",
- " [0. 0. 0. ... 0. 0. 0.]]\n",
- "(48842, 796)\n"
+ " [ 2 20 23 ... 90 103 323]\n",
+ " [ 2 17 23 ... 89 103 323]\n",
+ " [ 2 21 29 ... 90 115 324]]\n",
+ "(48842, 8)\n"
]
}
],
@@ -479,7 +479,7 @@
"metadata": {},
"outputs": [],
"source": [
- "wide = Wide(wide_dim=X_wide.shape[1], pred_dim=1)\n",
+ "wide = Wide(wide_dim=np.unique(X_wide).shape[0], pred_dim=1)\n",
"deepdense = DeepDense(hidden_layers=[64,32], \n",
" deep_column_idx=preprocess_deep.deep_column_idx,\n",
" embed_input=preprocess_deep.embeddings_input,\n",
@@ -497,7 +497,7 @@
"text/plain": [
"WideDeep(\n",
" (wide): Wide(\n",
- " (wide_linear): Linear(in_features=796, out_features=1, bias=True)\n",
+ " (wide_linear): Embedding(797, 1, padding_idx=0)\n",
" )\n",
" (deepdense): Sequential(\n",
" (0): DeepDense(\n",
@@ -577,7 +577,7 @@
"output_type": "stream",
"text": [
"\r",
- " 0%| | 0/153 [00:00, ?it/s]"
+ " 0%| | 0/611 [00:00, ?it/s]"
]
},
{
@@ -591,21 +591,21 @@
"name": "stderr",
"output_type": "stream",
"text": [
- "epoch 1: 100%|██████████| 153/153 [00:01<00:00, 102.41it/s, loss=0.585, metrics={'acc': 0.7512, 'prec': 0.1818}]\n",
- "valid: 100%|██████████| 39/39 [00:00<00:00, 98.78it/s, loss=0.513, metrics={'acc': 0.754, 'prec': 0.2429}] \n",
- "epoch 2: 100%|██████████| 153/153 [00:01<00:00, 117.30it/s, loss=0.481, metrics={'acc': 0.782, 'prec': 0.8287}] \n",
- "valid: 100%|██████████| 39/39 [00:00<00:00, 106.49it/s, loss=0.454, metrics={'acc': 0.7866, 'prec': 0.8245}]\n",
- "epoch 3: 100%|██████████| 153/153 [00:01<00:00, 124.78it/s, loss=0.44, metrics={'acc': 0.8055, 'prec': 0.781}] \n",
- "valid: 100%|██████████| 39/39 [00:00<00:00, 115.36it/s, loss=0.425, metrics={'acc': 0.8077, 'prec': 0.7818}]\n",
- "epoch 4: 100%|██████████| 153/153 [00:01<00:00, 125.01it/s, loss=0.418, metrics={'acc': 0.814, 'prec': 0.7661}] \n",
- "valid: 100%|██████████| 39/39 [00:00<00:00, 114.92it/s, loss=0.408, metrics={'acc': 0.8149, 'prec': 0.7671}]\n",
- "epoch 5: 100%|██████████| 153/153 [00:01<00:00, 116.57it/s, loss=0.404, metrics={'acc': 0.819, 'prec': 0.7527}]\n",
- "valid: 100%|██████████| 39/39 [00:00<00:00, 108.89it/s, loss=0.397, metrics={'acc': 0.8203, 'prec': 0.7547}]\n"
+ "epoch 1: 100%|██████████| 611/611 [00:04<00:00, 128.34it/s, loss=0.655, metrics={'acc': 0.6487, 'prec': 0.2352}]\n",
+ "valid: 100%|██████████| 153/153 [00:00<00:00, 173.04it/s, loss=0.517, metrics={'acc': 0.6659, 'prec': 0.2524}]\n",
+ "epoch 2: 100%|██████████| 611/611 [00:04<00:00, 133.99it/s, loss=0.466, metrics={'acc': 0.7725, 'prec': 0.5456}]\n",
+ "valid: 100%|██████████| 153/153 [00:00<00:00, 171.03it/s, loss=0.433, metrics={'acc': 0.7765, 'prec': 0.5598}]\n",
+ "epoch 3: 100%|██████████| 611/611 [00:04<00:00, 132.06it/s, loss=0.413, metrics={'acc': 0.803, 'prec': 0.6451}] \n",
+ "valid: 100%|██████████| 153/153 [00:00<00:00, 172.22it/s, loss=0.4, metrics={'acc': 0.8045, 'prec': 0.648}] \n",
+ "epoch 4: 100%|██████████| 611/611 [00:04<00:00, 131.57it/s, loss=0.39, metrics={'acc': 0.8181, 'prec': 0.6836}] \n",
+ "valid: 100%|██████████| 153/153 [00:00<00:00, 169.62it/s, loss=0.384, metrics={'acc': 0.8195, 'prec': 0.6841}]\n",
+ "epoch 5: 100%|██████████| 611/611 [00:04<00:00, 130.85it/s, loss=0.378, metrics={'acc': 0.8247, 'prec': 0.6941}]\n",
+ "valid: 100%|██████████| 153/153 [00:00<00:00, 171.85it/s, loss=0.376, metrics={'acc': 0.8254, 'prec': 0.6946}]\n"
]
}
],
"source": [
- "model.fit(X_wide=X_wide, X_deep=X_deep, target=target, n_epochs=5, batch_size=256, val_split=0.2)"
+ "model.fit(X_wide=X_wide, X_deep=X_deep, target=target, n_epochs=5, batch_size=64, val_split=0.2)"
]
},
{
diff --git a/examples/04_Binary_Classification_Varying_Parameters.ipynb b/examples/04_Binary_Classification_Varying_Parameters.ipynb
index 3333cb9d..e4aa227e 100644
--- a/examples/04_Binary_Classification_Varying_Parameters.ipynb
+++ b/examples/04_Binary_Classification_Varying_Parameters.ipynb
@@ -419,14 +419,14 @@
"name": "stdout",
"output_type": "stream",
"text": [
- "[[0. 1. 0. ... 0. 0. 0.]\n",
- " [0. 0. 0. ... 0. 0. 0.]\n",
- " [0. 0. 0. ... 0. 0. 0.]\n",
+ "[[ 1 17 23 ... 89 91 316]\n",
+ " [ 2 18 23 ... 89 92 317]\n",
+ " [ 3 18 24 ... 89 93 318]\n",
" ...\n",
- " [0. 0. 0. ... 0. 0. 0.]\n",
- " [0. 0. 0. ... 0. 0. 0.]\n",
- " [0. 0. 0. ... 0. 0. 0.]]\n",
- "(48842, 796)\n"
+ " [ 2 20 23 ... 90 103 323]\n",
+ " [ 2 17 23 ... 89 103 323]\n",
+ " [ 2 21 29 ... 90 115 324]]\n",
+ "(48842, 8)\n"
]
}
],
@@ -488,7 +488,7 @@
"metadata": {},
"outputs": [],
"source": [
- "wide = Wide(wide_dim=X_wide.shape[1], pred_dim=1)\n",
+ "wide = Wide(wide_dim=np.unique(X_wide).shape[0], pred_dim=1)\n",
"# We can add dropout and batchnorm to the dense layers\n",
"deepdense = DeepDense(hidden_layers=[64,32], dropout=[0.5, 0.5], batchnorm=True,\n",
" deep_column_idx=preprocess_deep.deep_column_idx,\n",
@@ -507,7 +507,7 @@
"text/plain": [
"WideDeep(\n",
" (wide): Wide(\n",
- " (wide_linear): Linear(in_features=796, out_features=1, bias=True)\n",
+ " (wide_linear): Embedding(797, 1, padding_idx=0)\n",
" )\n",
" (deepdense): Sequential(\n",
" (0): DeepDense(\n",
@@ -575,13 +575,13 @@
},
{
"cell_type": "code",
- "execution_count": 11,
+ "execution_count": 15,
"metadata": {},
"outputs": [],
"source": [
"# Optimizers\n",
- "wide_opt = torch.optim.Adam(model.wide.parameters())\n",
- "deep_opt = RAdam(model.deepdense.parameters())\n",
+ "wide_opt = torch.optim.Adam(model.wide.parameters(), lr=0.03)\n",
+ "deep_opt = RAdam(model.deepdense.parameters(), lr=0.01)\n",
"# LR Schedulers\n",
"wide_sch = torch.optim.lr_scheduler.StepLR(wide_opt, step_size=3)\n",
"deep_sch = torch.optim.lr_scheduler.StepLR(deep_opt, step_size=5)"
@@ -596,7 +596,7 @@
},
{
"cell_type": "code",
- "execution_count": 12,
+ "execution_count": 16,
"metadata": {},
"outputs": [],
"source": [
@@ -611,7 +611,7 @@
},
{
"cell_type": "code",
- "execution_count": 13,
+ "execution_count": 17,
"metadata": {},
"outputs": [],
"source": [
@@ -623,7 +623,7 @@
},
{
"cell_type": "code",
- "execution_count": 14,
+ "execution_count": 18,
"metadata": {},
"outputs": [
{
@@ -645,26 +645,26 @@
"name": "stderr",
"output_type": "stream",
"text": [
- "epoch 1: 100%|██████████| 153/153 [00:01<00:00, 96.80it/s, loss=0.582, metrics={'acc': 0.7447, 'rec': 0.0374}]\n",
- "valid: 100%|██████████| 39/39 [00:00<00:00, 117.19it/s, loss=0.512, metrics={'acc': 0.7488, 'rec': 0.0347}]\n",
- "epoch 2: 100%|██████████| 153/153 [00:01<00:00, 112.47it/s, loss=0.481, metrics={'acc': 0.7819, 'rec': 0.1127}]\n",
- "valid: 100%|██████████| 39/39 [00:00<00:00, 119.48it/s, loss=0.454, metrics={'acc': 0.7866, 'rec': 0.139}]\n",
- "epoch 3: 100%|██████████| 153/153 [00:01<00:00, 99.29it/s, loss=0.44, metrics={'acc': 0.8091, 'rec': 0.2838}] \n",
- "valid: 100%|██████████| 39/39 [00:00<00:00, 88.13it/s, loss=0.425, metrics={'acc': 0.8108, 'rec': 0.2925}]\n",
- "epoch 4: 100%|██████████| 153/153 [00:01<00:00, 103.34it/s, loss=0.426, metrics={'acc': 0.8131, 'rec': 0.3124}]\n",
- "valid: 100%|██████████| 39/39 [00:00<00:00, 101.78it/s, loss=0.423, metrics={'acc': 0.814, 'rec': 0.3156}]\n",
- "epoch 5: 100%|██████████| 153/153 [00:01<00:00, 100.77it/s, loss=0.423, metrics={'acc': 0.8132, 'rec': 0.3134}]\n",
- "valid: 100%|██████████| 39/39 [00:00<00:00, 103.36it/s, loss=0.421, metrics={'acc': 0.814, 'rec': 0.3165}]\n",
- "epoch 6: 100%|██████████| 153/153 [00:01<00:00, 100.09it/s, loss=0.421, metrics={'acc': 0.8134, 'rec': 0.3147}]\n",
- "valid: 100%|██████████| 39/39 [00:00<00:00, 111.64it/s, loss=0.418, metrics={'acc': 0.8141, 'rec': 0.3178}]\n",
- "epoch 7: 100%|██████████| 153/153 [00:01<00:00, 103.15it/s, loss=0.42, metrics={'acc': 0.8133, 'rec': 0.3148}]\n",
- "valid: 100%|██████████| 39/39 [00:00<00:00, 100.57it/s, loss=0.418, metrics={'acc': 0.8141, 'rec': 0.3179}]\n",
- "epoch 8: 100%|██████████| 153/153 [00:01<00:00, 98.05it/s, loss=0.42, metrics={'acc': 0.8133, 'rec': 0.3148}] \n",
- "valid: 100%|██████████| 39/39 [00:00<00:00, 105.68it/s, loss=0.418, metrics={'acc': 0.8141, 'rec': 0.3179}]\n",
- "epoch 9: 100%|██████████| 153/153 [00:01<00:00, 101.05it/s, loss=0.419, metrics={'acc': 0.8133, 'rec': 0.3149}]\n",
- "valid: 100%|██████████| 39/39 [00:00<00:00, 99.49it/s, loss=0.418, metrics={'acc': 0.8141, 'rec': 0.3181}]\n",
- "epoch 10: 100%|██████████| 153/153 [00:01<00:00, 97.72it/s, loss=0.419, metrics={'acc': 0.8133, 'rec': 0.3149}]\n",
- "valid: 100%|██████████| 39/39 [00:00<00:00, 102.56it/s, loss=0.418, metrics={'acc': 0.8141, 'rec': 0.3181}]\n"
+ "epoch 1: 100%|██████████| 153/153 [00:02<00:00, 72.33it/s, loss=0.503, metrics={'acc': 0.7885, 'rec': 0.4864}]\n",
+ "valid: 100%|██████████| 39/39 [00:00<00:00, 127.72it/s, loss=0.386, metrics={'acc': 0.7962, 'rec': 0.4998}]\n",
+ "epoch 2: 100%|██████████| 153/153 [00:02<00:00, 71.76it/s, loss=0.374, metrics={'acc': 0.8268, 'rec': 0.5242}]\n",
+ "valid: 100%|██████████| 39/39 [00:00<00:00, 126.72it/s, loss=0.372, metrics={'acc': 0.8277, 'rec': 0.5281}]\n",
+ "epoch 3: 100%|██████████| 153/153 [00:02<00:00, 73.21it/s, loss=0.367, metrics={'acc': 0.8298, 'rec': 0.5242}]\n",
+ "valid: 100%|██████████| 39/39 [00:00<00:00, 126.68it/s, loss=0.37, metrics={'acc': 0.8303, 'rec': 0.5279}]\n",
+ "epoch 4: 100%|██████████| 153/153 [00:02<00:00, 71.37it/s, loss=0.36, metrics={'acc': 0.8319, 'rec': 0.5372}] \n",
+ "valid: 100%|██████████| 39/39 [00:00<00:00, 128.64it/s, loss=0.369, metrics={'acc': 0.8324, 'rec': 0.5412}]\n",
+ "epoch 5: 100%|██████████| 153/153 [00:02<00:00, 71.53it/s, loss=0.359, metrics={'acc': 0.8322, 'rec': 0.5378}]\n",
+ "valid: 100%|██████████| 39/39 [00:00<00:00, 119.31it/s, loss=0.369, metrics={'acc': 0.8325, 'rec': 0.5412}]\n",
+ "epoch 6: 100%|██████████| 153/153 [00:02<00:00, 71.37it/s, loss=0.359, metrics={'acc': 0.8322, 'rec': 0.5361}]\n",
+ "valid: 100%|██████████| 39/39 [00:00<00:00, 125.99it/s, loss=0.369, metrics={'acc': 0.8326, 'rec': 0.5398}]\n",
+ "epoch 7: 100%|██████████| 153/153 [00:02<00:00, 70.20it/s, loss=0.358, metrics={'acc': 0.8329, 'rec': 0.5396}]\n",
+ "valid: 100%|██████████| 39/39 [00:00<00:00, 124.88it/s, loss=0.369, metrics={'acc': 0.8331, 'rec': 0.5416}]\n",
+ "epoch 8: 100%|██████████| 153/153 [00:02<00:00, 70.75it/s, loss=0.358, metrics={'acc': 0.833, 'rec': 0.5374}] \n",
+ "valid: 100%|██████████| 39/39 [00:00<00:00, 125.81it/s, loss=0.369, metrics={'acc': 0.8331, 'rec': 0.5397}]\n",
+ "epoch 9: 100%|██████████| 153/153 [00:02<00:00, 70.40it/s, loss=0.358, metrics={'acc': 0.833, 'rec': 0.5368}] \n",
+ "valid: 100%|██████████| 39/39 [00:00<00:00, 125.07it/s, loss=0.369, metrics={'acc': 0.8331, 'rec': 0.5391}]\n",
+ "epoch 10: 100%|██████████| 153/153 [00:02<00:00, 70.20it/s, loss=0.358, metrics={'acc': 0.8329, 'rec': 0.537}] \n",
+ "valid: 100%|██████████| 39/39 [00:00<00:00, 124.43it/s, loss=0.369, metrics={'acc': 0.8331, 'rec': 0.5392}]\n"
]
}
],
@@ -674,7 +674,7 @@
},
{
"cell_type": "code",
- "execution_count": 15,
+ "execution_count": 19,
"metadata": {},
"outputs": [
{
@@ -799,7 +799,7 @@
" 'zero_grad']"
]
},
- "execution_count": 15,
+ "execution_count": 19,
"metadata": {},
"output_type": "execute_result"
}
@@ -817,7 +817,7 @@
},
{
"cell_type": "code",
- "execution_count": 16,
+ "execution_count": 20,
"metadata": {},
"outputs": [
{
@@ -826,7 +826,7 @@
"[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]"
]
},
- "execution_count": 16,
+ "execution_count": 20,
"metadata": {},
"output_type": "execute_result"
}
@@ -837,14 +837,14 @@
},
{
"cell_type": "code",
- "execution_count": 17,
+ "execution_count": 21,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
- "{'train_loss': [0.582023054166557, 0.48075080015300925, 0.44022563099861145, 0.42563695144030006, 0.42342905612552867, 0.42120904256315794, 0.41995110737732033, 0.419722778734818, 0.4194869099099652, 0.41935000135228523], 'train_acc': [0.7446574360811814, 0.7818954265093543, 0.8090753205538351, 0.8130934404831981, 0.8132214060860441, 0.8133749648094593, 0.8132981854477517, 0.8132981854477517, 0.8132981854477517, 0.8132725923271824], 'train_rec': [0.037437159568071365, 0.11273933202028275, 0.28377366065979004, 0.31243982911109924, 0.3134025037288666, 0.3146860599517822, 0.31479302048683167, 0.31479302048683167, 0.3148999810218811, 0.3148999810218811], 'val_loss': [0.5119115939507117, 0.4539328179298303, 0.42495738925077975, 0.4227801507864243, 0.42057751004512495, 0.41838682691256207, 0.4181652260132325, 0.41793563885566515, 0.4176993484680469, 0.4176750809718401], 'val_acc': [0.7488432087138119, 0.7865771262438066, 0.8107571352524466, 0.8139715818353057, 0.8139920560173621, 0.8141353752917571, 0.8140739527455878, 0.8140739527455878, 0.8140739527455878, 0.8140534785635314], 'val_rec': [0.034739453345537186, 0.13904337584972382, 0.29246172308921814, 0.3155643045902252, 0.3165055215358734, 0.3177889883518219, 0.317874550819397, 0.317874550819397, 0.3181312680244446, 0.3181312680244446]}\n"
+ "{'train_loss': [0.5026861273385341, 0.37383826573689777, 0.36658557158669614, 0.3601557047538508, 0.3594148938172783, 0.35907501001763187, 0.358282413942362, 0.35823015644659406, 0.35819698957835927, 0.3581014702133104], 'train_acc': [0.788549637857344, 0.8267857599877153, 0.8297545619737414, 0.8318787909809843, 0.8321859084278146, 0.832237094668953, 0.832902515803752, 0.8329792951654595, 0.8329537020448904, 0.8329281089243211], 'train_rec': [0.48636218905448914, 0.5242272019386292, 0.5242272019386292, 0.5371697545051575, 0.5378115177154541, 0.5361000895500183, 0.5396299362182617, 0.5373836755752563, 0.5368488430976868, 0.5369558334350586], 'val_loss': [0.38589231249613637, 0.371902360365941, 0.36999432627971357, 0.36935041348139447, 0.3691598016482133, 0.36905216712218064, 0.36900061674607104, 0.36898223635477895, 0.36896658937136334, 0.36896434120642835], 'val_acc': [0.79624094017444, 0.8277302321772245, 0.8302895049342779, 0.832357397321977, 0.8325211907784285, 0.8325826133245977, 0.833073993693952, 0.8331354162401212, 0.8330944678760084, 0.833073993693952], 'val_rec': [0.4997860789299011, 0.5281081795692444, 0.5279369950294495, 0.5411996245384216, 0.5411996245384216, 0.5398305654525757, 0.5416274666786194, 0.5396594405174255, 0.5391460657119751, 0.5392315983772278]}\n"
]
}
],
@@ -854,14 +854,14 @@
},
{
"cell_type": "code",
- "execution_count": 18,
+ "execution_count": 22,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
- "{'lr_wide_0': [0.001, 0.001, 0.001, 0.0001, 0.0001, 0.0001, 1e-05, 1e-05, 1e-05, 1.0000000000000002e-06], 'lr_deepdense_0': [0.001, 0.001, 0.001, 0.001, 0.001, 0.0001, 0.0001, 0.0001, 0.0001, 0.0001]}\n"
+ "{'lr_wide_0': [0.03, 0.03, 0.03, 0.003, 0.003, 0.003, 0.00030000000000000003, 0.00030000000000000003, 0.00030000000000000003, 3.0000000000000004e-05], 'lr_deepdense_0': [0.01, 0.01, 0.01, 0.01, 0.01, 0.001, 0.001, 0.001, 0.001, 0.001]}\n"
]
}
],
@@ -880,84 +880,83 @@
},
{
"cell_type": "code",
- "execution_count": 19,
+ "execution_count": 23,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
- "{'11th': array([-0.02114219, -0.3634936 , 0.03710679, -0.07243915, -0.28715202,\n",
- " -0.29929525, 0.11913099, -0.01372065, -0.06960961, 0.11129184,\n",
- " -0.11541647, 0.02515038, -0.32817808, 0.19789433, -0.6190677 ,\n",
- " 0.13042031], dtype=float32),\n",
- " 'HS-grad': array([ 2.00377326e-04, -2.61859149e-01, -2.51907468e-01, -1.70783494e-02,\n",
- " -1.04680985e-01, -1.51709780e-01, 1.73194274e-01, -1.53597221e-01,\n",
- " -2.76275307e-01, -3.37639779e-01, 5.94966952e-03, 2.58735180e-01,\n",
- " -1.08496705e-02, -8.25304538e-02, -2.43277356e-01, 4.01295513e-01],\n",
- " dtype=float32),\n",
- " 'Assoc-acdm': array([ 0.0126759 , 0.15168795, -0.03856753, -0.27679357, -0.47500238,\n",
- " 0.45382416, -0.545228 , 0.1339748 , 0.02405205, 0.02809528,\n",
- " -0.41063702, -0.06350306, -0.08130409, -0.13869216, 0.4932242 ,\n",
- " 0.17304394], dtype=float32),\n",
- " 'Some-college': array([-0.1610479 , -0.25853214, -0.236602 , 0.13044621, 0.03830301,\n",
- " -0.1743144 , -0.28899103, -0.11883932, 0.08455969, -0.08742228,\n",
- " -0.46097067, -0.11231954, 0.37493324, -0.2029054 , -0.07289007,\n",
- " -0.03197158], dtype=float32),\n",
- " '10th': array([-0.07396724, 0.06467492, -0.08107238, -0.03854853, -0.06056274,\n",
- " 0.17571206, 0.1095883 , 0.12067619, -0.40733424, 0.32879853,\n",
- " -0.17957865, 0.6560938 , -0.10061017, 0.23316202, 0.3059522 ,\n",
- " -0.14240988], dtype=float32),\n",
- " 'Prof-school': array([-0.04007137, -0.2707076 , 0.23113215, -0.41783914, 0.25955105,\n",
- " -0.30054352, 0.32043606, -0.20860812, -0.15136348, -0.36359408,\n",
- " 0.49961898, -0.13973509, 0.51584864, 0.47093126, -0.0276325 ,\n",
- " -0.20662539], dtype=float32),\n",
- " '7th-8th': array([-0.07773081, 0.09345848, 0.072533 , -0.24359678, 0.14904591,\n",
- " 0.18480958, 0.01799594, 0.12402041, -0.35343906, 0.23270686,\n",
- " 0.10102016, -0.0258682 , 0.40796915, -0.05507657, 0.4019308 ,\n",
- " 0.05231443], dtype=float32),\n",
- " 'Bachelors': array([-0.12334875, 0.2271091 , -0.22389385, 0.5577601 , -0.05163969,\n",
- " -0.37246484, -0.02689779, 0.18202123, 0.59914356, -0.07744938,\n",
- " 0.5633556 , -0.18728566, -0.43923494, 0.2014725 , 0.00761633,\n",
- " -0.0447193 ], dtype=float32),\n",
- " 'Masters': array([-0.16333768, -0.16029981, -0.01482454, -0.04896322, -0.0047817 ,\n",
- " 0.09887701, -0.15091099, 0.22599514, -0.17000915, 0.16678709,\n",
- " -0.3679181 , -0.18114986, -0.16266271, -0.27970657, -0.1254899 ,\n",
- " 0.31768733], dtype=float32),\n",
- " 'Doctorate': array([ 0.01903534, -0.02743328, 0.16066255, -0.11599138, -0.00787276,\n",
- " 0.145728 , 0.24741152, -0.09514342, -0.23147094, -0.1098811 ,\n",
- " -0.12666361, 0.19410084, -0.05531591, -0.37460938, 0.42867297,\n",
- " 0.01255902], dtype=float32),\n",
- " '5th-6th': array([-0.1874508 , -0.01520642, -0.23055367, -0.10444976, 0.1880218 ,\n",
- " 0.06044631, -0.17084908, 0.28993553, 0.19094709, 0.01088051,\n",
- " -0.05885294, 0.26692954, -0.10718243, -0.07673435, -0.00814716,\n",
- " 0.46550933], dtype=float32),\n",
- " 'Assoc-voc': array([-0.1354011 , -0.44471708, 0.18469264, -0.02088883, 0.0346331 ,\n",
- " 0.07825129, 0.22990814, -0.38387823, 0.01530089, -0.30289283,\n",
- " -0.18230931, 0.19571105, -0.03887892, 0.01946613, 0.16479516,\n",
- " 0.41735104], dtype=float32),\n",
- " '9th': array([-0.29879665, 0.04421502, -0.11862607, 0.1772717 , -0.12706555,\n",
- " 0.04192697, -0.28609815, 0.3248482 , -0.04987352, -0.39138898,\n",
- " -0.057826 , 0.4970304 , -0.1326947 , 0.22000486, -0.00846681,\n",
- " 0.2706219 ], dtype=float32),\n",
- " '12th': array([-0.14942442, -0.22520241, 0.04879642, -0.16480213, -0.00521241,\n",
- " -0.07897403, 0.07396449, -0.29127416, -0.26175758, -0.5076894 ,\n",
- " -0.06036085, 0.3846129 , -0.6074103 , 0.27427655, -0.15219459,\n",
- " 0.24506666], dtype=float32),\n",
- " '1st-4th': array([ 0.02215668, 0.21796826, -0.35868096, 0.03803689, 0.02591529,\n",
- " 0.3914331 , -0.58327377, 0.3261264 , 0.36127493, -0.25838605,\n",
- " -0.05334533, -0.04685102, 0.17751735, 0.08530575, 0.13134745,\n",
- " 0.44403064], dtype=float32),\n",
- " 'Preschool': array([-0.33479828, 0.19172014, 0.26898265, 0.04768471, 0.01425556,\n",
- " 0.02984914, -0.02165659, 0.09084602, -0.26122406, 0.06567731,\n",
- " 0.0431284 , 0.3698193 , 0.6405797 , -0.00345286, 0.10917825,\n",
- " -0.07227341], dtype=float32),\n",
- " 'unseen': array([ 0.34454626, 0.08338903, 0.00250609, -0.27078775, 0.12649588,\n",
- " 0.35320354, -0.02497412, 0.2975028 , 0.21158105, 0.04682659,\n",
- " 0.03411686, -0.02839612, 0.16605824, 0.15381509, -0.00892953,\n",
- " -0.820573 ], dtype=float32)}"
+ "{'11th': array([ 0.33238176, 0.02123132, 0.42671534, -0.16836806, 0.04070434,\n",
+ " 0.21476945, -0.05866506, 0.09599391, 0.21264766, -0.08261641,\n",
+ " -0.4364204 , 0.5176953 , -0.17785792, 0.1990719 , 0.05055304,\n",
+ " -0.05390744], dtype=float32),\n",
+ " 'HS-grad': array([ 0.1851779 , -0.0601109 , -0.04134565, -0.17099169, 0.01647249,\n",
+ " 0.1691518 , -0.03775224, -0.01711482, -0.13714994, -0.02202759,\n",
+ " -0.2350222 , 0.20368417, 0.06420711, 0.08465873, 0.11443923,\n",
+ " -0.28585908], dtype=float32),\n",
+ " 'Assoc-acdm': array([-0.2891686 , -0.25329128, -0.03977084, 0.34204823, 0.4393897 ,\n",
+ " 0.24583909, -0.08771466, 0.3398704 , 0.06197336, -0.09200054,\n",
+ " 0.13266966, -0.27940965, -0.10639463, 0.16516595, 0.20191231,\n",
+ " -0.11804624], dtype=float32),\n",
+ " 'Some-college': array([ 0.17284533, -0.34509236, -0.22175975, -0.11192639, 0.14154772,\n",
+ " 0.04188053, 0.14860624, 0.28312132, 0.06071718, -0.10315312,\n",
+ " -0.05902205, -0.03197744, 0.20363455, 0.04027565, 0.43063605,\n",
+ " 0.21163562], dtype=float32),\n",
+ " '10th': array([ 0.13888928, 0.28386956, 0.18166119, 0.02652328, 0.11637231,\n",
+ " 0.24056876, -0.06386037, 0.05930374, 0.04393852, 0.17677549,\n",
+ " 0.27980283, -0.01221516, 0.12281907, 0.04273703, 0.22282158,\n",
+ " -0.25718638], dtype=float32),\n",
+ " 'Prof-school': array([ 0.26996085, 0.06557842, 0.0957497 , 0.06524102, 0.05351401,\n",
+ " 0.34774455, -0.39007127, -0.35276353, -0.19460988, 0.06306136,\n",
+ " -0.03555794, 0.02946662, 0.47177076, 0.21887466, 0.34440616,\n",
+ " 0.17761633], dtype=float32),\n",
+ " '7th-8th': array([-0.14013144, -0.20337081, 0.6704599 , -0.10210201, 0.1633953 ,\n",
+ " -0.03677108, -0.04664218, -0.13967332, -0.02610652, -0.15920916,\n",
+ " -0.18137608, -0.01846946, 0.35807863, 0.0148629 , 0.2857368 ,\n",
+ " 0.28930005], dtype=float32),\n",
+ " 'Bachelors': array([-0.38666266, 0.17745058, -0.6287257 , 0.22080924, 0.25037012,\n",
+ " -0.10224682, 0.5612052 , -0.24709803, 0.03214271, -0.22835065,\n",
+ " -0.14132145, 0.3010941 , -0.23835489, 0.08622 , -0.04518703,\n",
+ " 0.31074366], dtype=float32),\n",
+ " 'Masters': array([-0.41403466, -0.33947882, 0.14072244, -0.22146806, -0.18230349,\n",
+ " -0.1195543 , -0.84759206, 0.25256675, 0.14532281, -0.01060636,\n",
+ " -0.03578382, -0.07117725, 0.10634375, -0.11669173, 0.17765476,\n",
+ " -0.03559739], dtype=float32),\n",
+ " 'Doctorate': array([ 0.00375404, -0.02784416, -0.28326795, 0.22763273, 0.03977633,\n",
+ " 0.2893272 , 0.25680798, 0.36434892, -0.65951985, -0.23679003,\n",
+ " -0.11408209, -0.23283346, -0.27024168, 0.0655888 , -0.28381783,\n",
+ " -0.01525949], dtype=float32),\n",
+ " '5th-6th': array([ 0.00683184, 0.23564084, -0.132059 , -0.3406017 , -0.06710123,\n",
+ " -0.09649926, 0.50411046, -0.12363172, -0.0353502 , -0.53238744,\n",
+ " -0.05181202, -0.05146485, -0.23931046, -0.26453286, 0.08420272,\n",
+ " 0.0235041 ], dtype=float32),\n",
+ " 'Assoc-voc': array([ 0.01930698, -0.2455314 , 0.2246628 , 0.16216752, -0.4528598 ,\n",
+ " -0.6121017 , 0.15893641, 0.01993939, -0.3148845 , 0.03837916,\n",
+ " 0.0767131 , -0.36453167, 0.19929656, 0.28016493, 0.29385152,\n",
+ " -0.47822088], dtype=float32),\n",
+ " '9th': array([-0.03110321, 0.69687057, -0.33127317, 0.06741869, 0.08373164,\n",
+ " 0.25090563, 0.07099659, 0.21758935, -0.07414749, -0.19316533,\n",
+ " 0.21613942, 0.28149685, -0.41364396, -0.0439614 , -0.02726781,\n",
+ " -0.04664526], dtype=float32),\n",
+ " '12th': array([ 0.46782094, 0.1987633 , 0.11554655, -0.23237073, -0.35828865,\n",
+ " -0.08366812, 0.0086338 , 0.46672872, -0.24939838, 0.22630745,\n",
+ " -0.16754937, -0.4713689 , -0.08152255, 0.02004629, 0.1118032 ,\n",
+ " 0.20979449], dtype=float32),\n",
+ " '1st-4th': array([-0.16926417, 0.11347993, 0.02692448, -0.10284851, 0.25171363,\n",
+ " -0.04539176, -0.24491136, 0.3281045 , -0.08861455, 0.18578447,\n",
+ " 0.23892452, -0.00729677, 0.16713212, 0.2949316 , -0.00725389,\n",
+ " -0.20607162], dtype=float32),\n",
+ " 'Preschool': array([-0.30532706, 0.25465214, -0.5603218 , -0.16249408, -0.32321507,\n",
+ " 0.11698078, 0.01557691, -0.3124683 , -0.25044286, 0.08334377,\n",
+ " 0.2094927 , 0.03301949, -0.01236501, -0.24443303, -0.03395106,\n",
+ " -0.01797807], dtype=float32),\n",
+ " 'unseen': array([-0.17771505, 0.3246768 , -0.29062387, 0.12164559, 0.34164497,\n",
+ " -0.5451506 , 0.22189835, 0.21224639, 0.4933099 , -0.03533744,\n",
+ " -0.12335563, 0.12472781, 0.1412489 , 0.17336178, 0.4160364 ,\n",
+ " -0.32417113], dtype=float32)}"
]
},
- "execution_count": 19,
+ "execution_count": 23,
"metadata": {},
"output_type": "execute_result"
}
diff --git a/examples/05_Regression_with_Images_and_Text.ipynb b/examples/05_Regression_with_Images_and_Text.ipynb
index b5831e53..1b1319be 100644
--- a/examples/05_Regression_with_Images_and_Text.ipynb
+++ b/examples/05_Regression_with_Images_and_Text.ipynb
@@ -1058,7 +1058,7 @@
"name": "stderr",
"output_type": "stream",
"text": [
- " 4%|▍ | 42/1001 [00:00<00:02, 411.81it/s]"
+ " 4%|▍ | 43/1001 [00:00<00:02, 424.34it/s]"
]
},
{
@@ -1072,7 +1072,7 @@
"name": "stderr",
"output_type": "stream",
"text": [
- "100%|██████████| 1001/1001 [00:02<00:00, 402.73it/s]\n"
+ "100%|██████████| 1001/1001 [00:02<00:00, 400.65it/s]\n"
]
},
{
@@ -1097,12 +1097,12 @@
},
{
"cell_type": "code",
- "execution_count": 9,
+ "execution_count": 10,
"metadata": {},
"outputs": [],
"source": [
"# Linear model\n",
- "wide = Wide(wide_dim=X_wide.shape[1], pred_dim=1)\n",
+ "wide = Wide(wide_dim=np.unique(X_wide).shape[0], pred_dim=1)\n",
"# DeepDense: 2 Dense layers\n",
"deepdense = DeepDense(hidden_layers=[128,64], dropout=[0.5, 0.5], \n",
" deep_column_idx=deep_preprocessor.deep_column_idx,\n",
@@ -1125,7 +1125,7 @@
},
{
"cell_type": "code",
- "execution_count": 10,
+ "execution_count": 11,
"metadata": {},
"outputs": [],
"source": [
@@ -1141,7 +1141,7 @@
},
{
"cell_type": "code",
- "execution_count": 11,
+ "execution_count": 12,
"metadata": {},
"outputs": [],
"source": [
@@ -1150,7 +1150,7 @@
},
{
"cell_type": "code",
- "execution_count": 12,
+ "execution_count": 13,
"metadata": {},
"outputs": [
{
@@ -1172,8 +1172,8 @@
"name": "stderr",
"output_type": "stream",
"text": [
- "epoch 1: 100%|██████████| 25/25 [01:13<00:00, 2.93s/it, loss=135]\n",
- "valid: 100%|██████████| 7/7 [00:14<00:00, 2.10s/it, loss=124] \n"
+ "epoch 1: 100%|██████████| 25/25 [01:08<00:00, 2.74s/it, loss=1.73e+4]\n",
+ "valid: 100%|██████████| 7/7 [00:14<00:00, 2.01s/it, loss=1.45e+4]\n"
]
}
],
@@ -1193,11 +1193,11 @@
},
{
"cell_type": "code",
- "execution_count": 13,
+ "execution_count": 14,
"metadata": {},
"outputs": [],
"source": [
- "wide = Wide(wide_dim=X_wide.shape[1], pred_dim=1)\n",
+ "wide = Wide(wide_dim=np.unique(X_wide).shape[0], pred_dim=1)\n",
"deepdense = DeepDense(hidden_layers=[128,64], dropout=[0.5, 0.5], \n",
" deep_column_idx=deep_preprocessor.deep_column_idx,\n",
" embed_input=deep_preprocessor.embeddings_input,\n",
@@ -1217,7 +1217,7 @@
},
{
"cell_type": "code",
- "execution_count": 14,
+ "execution_count": 15,
"metadata": {},
"outputs": [],
"source": [
@@ -1233,7 +1233,7 @@
},
{
"cell_type": "code",
- "execution_count": 15,
+ "execution_count": 16,
"metadata": {},
"outputs": [
{
@@ -1241,7 +1241,7 @@
"text/plain": [
"WideDeep(\n",
" (wide): Wide(\n",
- " (wide_linear): Linear(in_features=356, out_features=1, bias=True)\n",
+ " (wide_linear): Embedding(357, 1, padding_idx=0)\n",
" )\n",
" (deepdense): DeepDense(\n",
" (embed_layers): ModuleDict(\n",
@@ -1386,7 +1386,7 @@
")"
]
},
- "execution_count": 15,
+ "execution_count": 16,
"metadata": {},
"output_type": "execute_result"
}
@@ -1406,7 +1406,7 @@
},
{
"cell_type": "code",
- "execution_count": 16,
+ "execution_count": 17,
"metadata": {},
"outputs": [],
"source": [
@@ -1420,11 +1420,11 @@
},
{
"cell_type": "code",
- "execution_count": 17,
+ "execution_count": 18,
"metadata": {},
"outputs": [],
"source": [
- "wide_opt = torch.optim.Adam(model.wide.parameters())\n",
+ "wide_opt = torch.optim.Adam(model.wide.parameters(), lr=0.03)\n",
"deep_opt = torch.optim.Adam(deep_params)\n",
"text_opt = RAdam(model.deeptext.parameters())\n",
"img_opt = RAdam(model.deepimage.parameters())\n",
@@ -1433,7 +1433,7 @@
},
{
"cell_type": "code",
- "execution_count": 18,
+ "execution_count": 19,
"metadata": {},
"outputs": [],
"source": [
@@ -1446,7 +1446,7 @@
},
{
"cell_type": "code",
- "execution_count": 19,
+ "execution_count": 20,
"metadata": {},
"outputs": [],
"source": [
@@ -1472,7 +1472,7 @@
},
{
"cell_type": "code",
- "execution_count": 20,
+ "execution_count": 21,
"metadata": {},
"outputs": [],
"source": [
@@ -1482,7 +1482,7 @@
},
{
"cell_type": "code",
- "execution_count": 21,
+ "execution_count": 22,
"metadata": {},
"outputs": [
{
@@ -1490,7 +1490,7 @@
"text/plain": [
"WideDeep(\n",
" (wide): Wide(\n",
- " (wide_linear): Linear(in_features=356, out_features=1, bias=True)\n",
+ " (wide_linear): Embedding(357, 1, padding_idx=0)\n",
" )\n",
" (deepdense): DeepDense(\n",
" (embed_layers): ModuleDict(\n",
@@ -1635,7 +1635,7 @@
")"
]
},
- "execution_count": 21,
+ "execution_count": 22,
"metadata": {},
"output_type": "execute_result"
}
@@ -1646,7 +1646,7 @@
},
{
"cell_type": "code",
- "execution_count": 22,
+ "execution_count": 23,
"metadata": {},
"outputs": [
{
@@ -1668,8 +1668,8 @@
"name": "stderr",
"output_type": "stream",
"text": [
- "epoch 1: 100%|██████████| 25/25 [02:02<00:00, 4.88s/it, loss=128]\n",
- "valid: 100%|██████████| 7/7 [00:14<00:00, 2.09s/it, loss=94.5]\n"
+ "epoch 1: 100%|██████████| 25/25 [02:04<00:00, 4.98s/it, loss=1.24e+4]\n",
+ "valid: 100%|██████████| 7/7 [00:16<00:00, 2.33s/it, loss=9.26e+3]\n"
]
}
],
@@ -1687,13 +1687,13 @@
},
{
"cell_type": "code",
- "execution_count": 23,
+ "execution_count": 24,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
- "{'lr_wide_0': [0.001, 0.001],\n",
+ "{'lr_wide_0': [0.03, 0.03],\n",
" 'lr_deepdense_0': [0.0001, 0.0001],\n",
" 'lr_deepdense_1': [0.0001, 0.0001],\n",
" 'lr_deepdense_2': [0.0001, 0.0001],\n",
@@ -1712,7 +1712,7 @@
" 'lr_deephead_0': [0.001, 0.001]}"
]
},
- "execution_count": 23,
+ "execution_count": 24,
"metadata": {},
"output_type": "execute_result"
}
diff --git a/examples/06_WarmUp_Model_Components.ipynb b/examples/06_WarmUp_Model_Components.ipynb
index 70176697..3ee10f57 100644
--- a/examples/06_WarmUp_Model_Components.ipynb
+++ b/examples/06_WarmUp_Model_Components.ipynb
@@ -259,7 +259,7 @@
"metadata": {},
"outputs": [],
"source": [
- "wide = Wide(wide_dim=X_wide.shape[1], pred_dim=1)\n",
+ "wide = Wide(wide_dim=np.unique(X_wide).shape[0], pred_dim=1)\n",
"deepdense = DeepDense(hidden_layers=[64,32], \n",
" deep_column_idx=preprocess_deep.deep_column_idx,\n",
" embed_input=preprocess_deep.embeddings_input,\n",
@@ -307,11 +307,11 @@
"name": "stderr",
"output_type": "stream",
"text": [
- "epoch 1: 100%|██████████| 153/153 [00:01<00:00, 127.54it/s, loss=0.476, metrics={'acc': 0.7808972948071559}]\n",
- "epoch 2: 100%|██████████| 153/153 [00:01<00:00, 126.88it/s, loss=0.373, metrics={'acc': 0.8048268625393494}]\n",
- "epoch 3: 100%|██████████| 153/153 [00:01<00:00, 141.92it/s, loss=0.365, metrics={'acc': 0.8136820822562895}]\n",
- "epoch 4: 100%|██████████| 153/153 [00:01<00:00, 151.56it/s, loss=0.362, metrics={'acc': 0.8182312594374632}]\n",
- "epoch 5: 100%|██████████| 153/153 [00:00<00:00, 158.22it/s, loss=0.36, metrics={'acc': 0.8210477823561027}]\n",
+ "epoch 1: 100%|██████████| 153/153 [00:01<00:00, 92.77it/s, loss=0.598, metrics={'acc': 0.697438128631024}] \n",
+ "epoch 2: 100%|██████████| 153/153 [00:01<00:00, 106.70it/s, loss=0.394, metrics={'acc': 0.758272976223991}] \n",
+ "epoch 3: 100%|██████████| 153/153 [00:01<00:00, 101.83it/s, loss=0.371, metrics={'acc': 0.7821172335542873}]\n",
+ "epoch 4: 100%|██████████| 153/153 [00:01<00:00, 107.60it/s, loss=0.365, metrics={'acc': 0.7943976659074041}]\n",
+ "epoch 5: 100%|██████████| 153/153 [00:01<00:00, 105.79it/s, loss=0.363, metrics={'acc': 0.8018273488086403}]\n",
" 0%| | 0/153 [00:00, ?it/s]"
]
},
@@ -326,11 +326,11 @@
"name": "stderr",
"output_type": "stream",
"text": [
- "epoch 1: 100%|██████████| 153/153 [00:01<00:00, 78.65it/s, loss=0.397, metrics={'acc': 0.8198073691125158}]\n",
- "epoch 2: 100%|██████████| 153/153 [00:02<00:00, 75.69it/s, loss=0.348, metrics={'acc': 0.8221936229255862}]\n",
- "epoch 3: 100%|██████████| 153/153 [00:02<00:00, 74.79it/s, loss=0.343, metrics={'acc': 0.8243576126737133}]\n",
- "epoch 4: 100%|██████████| 153/153 [00:01<00:00, 76.79it/s, loss=0.338, metrics={'acc': 0.8264502057402526}]\n",
- "epoch 5: 100%|██████████| 153/153 [00:01<00:00, 79.57it/s, loss=0.334, metrics={'acc': 0.8283059913495252}]\n",
+ "epoch 1: 100%|██████████| 153/153 [00:02<00:00, 73.75it/s, loss=0.398, metrics={'acc': 0.802813537054573}] \n",
+ "epoch 2: 100%|██████████| 153/153 [00:02<00:00, 73.16it/s, loss=0.349, metrics={'acc': 0.8077444782842372}]\n",
+ "epoch 3: 100%|██████████| 153/153 [00:02<00:00, 72.68it/s, loss=0.343, metrics={'acc': 0.811737005093031}] \n",
+ "epoch 4: 100%|██████████| 153/153 [00:02<00:00, 75.04it/s, loss=0.338, metrics={'acc': 0.8150868602075317}]\n",
+ "epoch 5: 100%|██████████| 153/153 [00:02<00:00, 74.24it/s, loss=0.335, metrics={'acc': 0.8180226755048243}]\n",
" 0%| | 0/153 [00:00, ?it/s]"
]
},
@@ -345,16 +345,16 @@
"name": "stderr",
"output_type": "stream",
"text": [
- "epoch 1: 100%|██████████| 153/153 [00:01<00:00, 114.10it/s, loss=0.36, metrics={'acc': 0.8323}]\n",
- "valid: 100%|██████████| 39/39 [00:00<00:00, 123.16it/s, loss=0.364, metrics={'acc': 0.8325}]\n",
- "epoch 2: 100%|██████████| 153/153 [00:01<00:00, 113.50it/s, loss=0.359, metrics={'acc': 0.8325}]\n",
- "valid: 100%|██████████| 39/39 [00:00<00:00, 122.56it/s, loss=0.364, metrics={'acc': 0.8327}]\n",
- "epoch 3: 100%|██████████| 153/153 [00:01<00:00, 110.90it/s, loss=0.359, metrics={'acc': 0.8325}]\n",
- "valid: 100%|██████████| 39/39 [00:00<00:00, 119.56it/s, loss=0.363, metrics={'acc': 0.8327}]\n",
- "epoch 4: 100%|██████████| 153/153 [00:01<00:00, 112.92it/s, loss=0.359, metrics={'acc': 0.8326}]\n",
- "valid: 100%|██████████| 39/39 [00:00<00:00, 121.00it/s, loss=0.363, metrics={'acc': 0.8329}]\n",
- "epoch 5: 100%|██████████| 153/153 [00:01<00:00, 114.15it/s, loss=0.358, metrics={'acc': 0.8327}]\n",
- "valid: 100%|██████████| 39/39 [00:00<00:00, 108.91it/s, loss=0.363, metrics={'acc': 0.8329}]\n"
+ "epoch 1: 100%|██████████| 153/153 [00:02<00:00, 74.96it/s, loss=0.361, metrics={'acc': 0.8315}]\n",
+ "valid: 100%|██████████| 39/39 [00:00<00:00, 136.56it/s, loss=0.365, metrics={'acc': 0.8318}]\n",
+ "epoch 2: 100%|██████████| 153/153 [00:02<00:00, 75.09it/s, loss=0.361, metrics={'acc': 0.8317}]\n",
+ "valid: 100%|██████████| 39/39 [00:00<00:00, 129.90it/s, loss=0.365, metrics={'acc': 0.8321}]\n",
+ "epoch 3: 100%|██████████| 153/153 [00:02<00:00, 73.24it/s, loss=0.36, metrics={'acc': 0.8317}] \n",
+ "valid: 100%|██████████| 39/39 [00:00<00:00, 131.37it/s, loss=0.365, metrics={'acc': 0.8321}]\n",
+ "epoch 4: 100%|██████████| 153/153 [00:02<00:00, 72.38it/s, loss=0.36, metrics={'acc': 0.832}] \n",
+ "valid: 100%|██████████| 39/39 [00:00<00:00, 130.72it/s, loss=0.365, metrics={'acc': 0.8324}]\n",
+ "epoch 5: 100%|██████████| 153/153 [00:02<00:00, 72.24it/s, loss=0.359, metrics={'acc': 0.8322}]\n",
+ "valid: 100%|██████████| 39/39 [00:00<00:00, 130.20it/s, loss=0.364, metrics={'acc': 0.8326}]\n"
]
}
],
@@ -450,7 +450,7 @@
"name": "stderr",
"output_type": "stream",
"text": [
- " 8%|▊ | 84/1001 [00:00<00:02, 416.73it/s]"
+ " 8%|▊ | 84/1001 [00:00<00:02, 418.96it/s]"
]
},
{
@@ -464,7 +464,7 @@
"name": "stderr",
"output_type": "stream",
"text": [
- "100%|██████████| 1001/1001 [00:02<00:00, 400.82it/s]\n"
+ "100%|██████████| 1001/1001 [00:02<00:00, 409.78it/s]\n"
]
},
{
@@ -497,7 +497,7 @@
"metadata": {},
"outputs": [],
"source": [
- "wide = Wide(wide_dim=X_wide.shape[1], pred_dim=1)\n",
+ "wide = Wide(wide_dim=np.unique(X_wide).shape[0], pred_dim=1)\n",
"deepdense = DeepDense( hidden_layers=[64,32], dropout=[0.2,0.2],\n",
" deep_column_idx=prepare_deep.deep_column_idx,\n",
" embed_input=prepare_deep.embeddings_input,\n",
@@ -519,7 +519,7 @@
},
{
"cell_type": "code",
- "execution_count": 6,
+ "execution_count": 12,
"metadata": {},
"outputs": [],
"source": [
@@ -565,7 +565,7 @@
"text/plain": [
"WideDeep(\n",
" (wide): Wide(\n",
- " (wide_linear): Linear(in_features=356, out_features=1, bias=True)\n",
+ " (wide_linear): Embedding(357, 1, padding_idx=0)\n",
" )\n",
" (deepdense): Sequential(\n",
" (0): DeepDense(\n",
@@ -848,7 +848,7 @@
"name": "stderr",
"output_type": "stream",
"text": [
- "epoch 1: 100%|██████████| 25/25 [00:00<00:00, 58.03it/s, loss=127]\n",
+ "epoch 1: 100%|██████████| 25/25 [00:00<00:00, 34.36it/s, loss=1.64e+4]\n",
" 0%| | 0/25 [00:00, ?it/s]"
]
},
@@ -863,7 +863,7 @@
"name": "stderr",
"output_type": "stream",
"text": [
- "epoch 1: 100%|██████████| 25/25 [00:00<00:00, 47.80it/s, loss=116]\n",
+ "epoch 1: 100%|██████████| 25/25 [00:00<00:00, 46.93it/s, loss=1.37e+4]\n",
" 0%| | 0/25 [00:00, ?it/s]"
]
},
@@ -878,7 +878,7 @@
"name": "stderr",
"output_type": "stream",
"text": [
- "epoch 1: 100%|██████████| 25/25 [00:04<00:00, 5.94it/s, loss=132]\n",
+ "epoch 1: 100%|██████████| 25/25 [00:04<00:00, 5.53it/s, loss=1.74e+4]\n",
" 0%| | 0/25 [00:00, ?it/s]"
]
},
@@ -893,7 +893,7 @@
"name": "stderr",
"output_type": "stream",
"text": [
- "epoch 1: 100%|██████████| 25/25 [01:12<00:00, 2.92s/it, loss=119]\n",
+ "epoch 1: 100%|██████████| 25/25 [01:05<00:00, 2.63s/it, loss=1.41e+4]\n",
" 0%| | 0/25 [00:00, ?it/s]"
]
},
@@ -908,7 +908,7 @@
"name": "stderr",
"output_type": "stream",
"text": [
- "epoch 1: 100%|██████████| 25/25 [01:48<00:00, 4.34s/it, loss=108]\n",
+ "epoch 1: 100%|██████████| 25/25 [01:29<00:00, 3.57s/it, loss=1.17e+4]\n",
" 0%| | 0/25 [00:00, ?it/s]"
]
},
@@ -923,7 +923,7 @@
"name": "stderr",
"output_type": "stream",
"text": [
- "epoch 1: 100%|██████████| 25/25 [02:05<00:00, 5.01s/it, loss=106] \n",
+ "epoch 1: 100%|██████████| 25/25 [01:51<00:00, 4.46s/it, loss=1.11e+4]\n",
" 0%| | 0/25 [00:00, ?it/s]"
]
},
@@ -938,7 +938,7 @@
"name": "stderr",
"output_type": "stream",
"text": [
- "epoch 1: 100%|██████████| 25/25 [02:57<00:00, 7.11s/it, loss=105] \n",
+ "epoch 1: 100%|██████████| 25/25 [02:17<00:00, 5.48s/it, loss=1.11e+4]\n",
" 0%| | 0/25 [00:00, ?it/s]"
]
},
@@ -953,7 +953,7 @@
"name": "stderr",
"output_type": "stream",
"text": [
- "epoch 1: 100%|██████████| 25/25 [03:40<00:00, 8.83s/it, loss=104] \n",
+ "epoch 1: 100%|██████████| 25/25 [02:50<00:00, 6.83s/it, loss=1.08e+4]\n",
" 0%| | 0/25 [00:00, ?it/s]"
]
},
@@ -968,8 +968,8 @@
"name": "stderr",
"output_type": "stream",
"text": [
- "epoch 1: 100%|██████████| 25/25 [01:20<00:00, 3.23s/it, loss=120]\n",
- "valid: 100%|██████████| 7/7 [00:14<00:00, 2.06s/it, loss=109] \n"
+ "epoch 1: 100%|██████████| 25/25 [01:10<00:00, 2.83s/it, loss=1.45e+4]\n",
+ "valid: 100%|██████████| 7/7 [00:14<00:00, 2.00s/it, loss=1.19e+4]\n"
]
}
],
diff --git a/pytorch_widedeep/models/_warmup.py b/pytorch_widedeep/models/_warmup.py
index 5e858222..5b53fbb6 100644
--- a/pytorch_widedeep/models/_warmup.py
+++ b/pytorch_widedeep/models/_warmup.py
@@ -263,7 +263,7 @@ def _warm(
acc = self.metric(F.softmax(y_pred, dim=1), y)
t.set_postfix(metrics=acc, loss=avg_loss)
else:
- t.set_postfix(loss=np.sqrt(avg_loss))
+ t.set_postfix(loss=avg_loss)
def _steps_up_down(self, steps: int, n_epochs: int = 1) -> Tuple[int, int]:
r"""
From 1f22d9ed7c1b6240a5fd62c880e77ddc2d6b316d Mon Sep 17 00:00:00 2001
From: jrzaurin
Date: Sat, 8 Aug 2020 16:24:34 +0100
Subject: [PATCH 07/12] Adjusted README and figures to the new Wide
Implementation
---
README.md | 37 ++++++++++++++------------------
docs/figures/architecture_1.png | Bin 110127 -> 109210 bytes
docs/figures/architecture_2.png | Bin 123977 -> 123064 bytes
pypi_README.md | 18 ++++++----------
4 files changed, 22 insertions(+), 33 deletions(-)
diff --git a/README.md b/README.md
index f97b128f..9fd2cbe0 100644
--- a/README.md
+++ b/README.md
@@ -5,11 +5,13 @@
[![Build Status](https://travis-ci.org/jrzaurin/pytorch-widedeep.svg?branch=master)](https://travis-ci.org/jrzaurin/pytorch-widedeep)
[![Documentation Status](https://readthedocs.org/projects/pytorch-widedeep/badge/?version=latest)](https://pytorch-widedeep.readthedocs.io/en/latest/?badge=latest)
-[![Python 3.6 3.7
-3.8](https://img.shields.io/badge/python-3.6%20%203.7%203.8-blue.svg
-)](https://www.python.org/)
[![Maintenance](https://img.shields.io/badge/Maintained%3F-yes-green.svg)](https://github.com/jrzaurin/pytorch-widedeep/graphs/commit-activity)
+Platform | Version Support
+---------|:---------------
+OSX | [![Python 3.6 3.7](https://img.shields.io/badge/python-3.6%20%203.7-blue.svg)](https://www.python.org/)
+Linux | [![Python 3.6 3.7 3.8](https://img.shields.io/badge/python-3.6%20%203.7%203.8-blue.svg)](https://www.python.org/)
+
# pytorch-widedeep
A flexible package to combine tabular data with text and images using wide and
@@ -38,11 +40,11 @@ few lines of code.
-Architecture 1 combines the `Wide`, one-hot encoded features with the outputs
-from the `DeepDense`, `DeepText` and `DeepImage` components connected to a
-final output neuron or neurons, depending on whether we are performing a
-binary classification or regression, or a multi-class classification. The
-components within the faded-pink rectangles are concatenated.
+Architecture 1 combines the `Wide`, Linear model with the outputs from the
+`DeepDense`, `DeepText` and `DeepImage` components connected to a final output
+neuron or neurons, depending on whether we are performing a binary
+classification or regression, or a multi-class classification. The components
+within the faded-pink rectangles are concatenated.
In math terms, and following the notation in the
[paper](https://arxiv.org/abs/1606.07792), Architecture 1 can be formulated
@@ -69,10 +71,10 @@ otherwise".*
-Architecture 2 combines the `Wide` one-hot encoded features with the Deep
-components of the model connected to the output neuron(s), after the different
-Deep components have been themselves combined through a FC-Head (that I refer
-as `deephead`).
+Architecture 2 combines the `Wide`, Linear model with the Deep components of
+the model connected to the output neuron(s), after the different Deep
+components have been themselves combined through a FC-Head (that I refer as
+`deephead`).
In math terms, and following the notation in the
[paper](https://arxiv.org/abs/1606.07792), Architecture 2 can be formulated
@@ -88,7 +90,8 @@ and `DeepImage` are optional. `pytorch-widedeep` includes standard text (stack
of LSTMs) and image (pre-trained ResNets or stack of CNNs) models. However,
the user can use any custom model as long as it has an attribute called
`output_dim` with the size of the last layer of activations, so that
-`WideDeep` can be constructed. See the examples folder for more information.
+`WideDeep` can be constructed. See the examples folder or the docs for more
+information.
### Installation
@@ -116,14 +119,6 @@ cd pytorch-widedeep
pip install -e .
```
-### Examples
-
-There are a number of notebooks in the `examples` folder plus some additional
-files. These notebooks cover most of the utilities of this package and can
-also act as documentation. In the case that github does not render the
-notebooks, or it renders them missing some parts, they are saved as markdown
-files in the `docs` folder.
-
### Quick start
Binary classification with the [adult
diff --git a/docs/figures/architecture_1.png b/docs/figures/architecture_1.png
index a9fb25df7ae99cbcbf5190feea201a37dc2b8ece..5ffa7c98ff5e1a06784d3145dce2d2c031aefd2a 100644
GIT binary patch
literal 109210
zcmeFZWmuHm_cjcO2uevQC@Cc=-5t`1NS8{tGIS#f0xI1pNJ)2t2#Dm+T~b30-TCgh
zZ{WU<|L^xa|1a;S_joxDnDLsq_O)Z}bDis4Ya8%P=_%H2lG{i~NLaEmlFyNlZsH*!
zp=_h0fh!r}^z}$cn1<#O63=8MBq*NQ*_xPJ86zRd1Vl#P(hS$Q=e@zhF01MD#OQsU
zWGJzyG&U|{B
zm(BO+jS?y=&5(>MFKTM5Z(D!WwNpDfh8=nldVH0kL_(7FPk8(SeVeiZ$vYa|Xj>fV
z5|c$FTJBH^h`SNbPlk-fCLeX4$6s{by>R#3N6*5L=Hl`PKa00_*6rWLCZa
z61`gPCxz$ZX(lK)?+xKRp{&6;`l=GrMd2^M^E#F5xRXhi7l(aLcmwyUIsa)VLTDsiDr)gl(e5cOR!pE>Z=B+I>SA
z{b(g&5lS8^@%!yOW?#UB=sK0qZ^%4p;8)H8B(A6M-Xi)33n?mQRQ!Zp(sz1Z+hbIt
zJXe&C>Bvua?e7UE|ahl2c
zN>7t@pNMW2GvPzcPoakabbnB4k&=U1TX|+{w$N&^8s1O0oNNZ8@;82(5#M~Yi5`wI
z`abskb8(tibbl!RP}jy+V-$RpqzYdW-Su%YWMK2>^{-5*jI9hF5nrR8qT3Ibp~erp
z^Vpj}em4BJZ2C8>Z*|`czXg4>>3dL2G#a+1giV(jnm&(bN8^HXM1Mqebl1f@RlX?g
zQ04Vw@ph|rxpr(hjs=wTcy`gjH2!BdvH}Jku25MJmL<7{xdt7J%gc9YcW79KMM+wI
zRgWCP(atPex2Q{B?oTk+)#g`})e#$)viv%kQkT-dyubU>m834D{++(Oh**P!S6WvO
zSz1>=kp*i{{*X;ieb(6v|5U@wrry{L)m+tVh$!rxQUQz9uzqw-$8chSPM(|0L7Ytk
znS^bEey)D~lwF;WtLZM;YWOaEuW9$gvHWhuYI08l^v8W}OcseBnw=w!ylqR|6ePU&
zH!nvN}0_^Dv0s2+s!%o+O@CLDWZ}7Lgk%`
zaoT?HaTg{Rggb~Q-mrXftyZg{>VwPss!n&`Ba9vWI~h`iQeRrMgVKnEqA)u|M`zT(Nx}6tYz76(Qj&L
zF3{_kZ=DFYbtDidPaLtSn|xd|ypO&Ac;8^0XH3NLiQ^0(fxrOYgxxm*G(IBUAzpn)
zD*mEGyF_jQ2i~-9{qEN=3)mf&te}dZX5$ihnAuOW>E6CxuC#)xK3%?iseFfgr`9K}
zlEIIO`*|0aHOGt04K;#=xc1ZHqji$Tu`A-|c3TziYq00LOhBAWoqXKNPK+b90*VXS^>>ba*=a>)j57xJe_S!Z_=OLf1e|#}yHBH{9IrKYZ
z*%Vqk8G?=%^w>5`8=U8#SDm|^k_S88XC-McJUv5NMe_Vn?wv2H{!)6T!beX`UFN>w-4&^h6|gwTZ^aGRUI{%GuVGDjl31
zo%Ngz-fJ``e2kJ2H+nI@^4n4nlH{$?u^KT+e?c9OU)Sg
z!wR*8hW8$QHve?9sCI;_sABHn?Cn|4B*_EekU3?VCwLijYqHLf_rogL
zw@kte!`QRAwCMPRs<@9F-Gh^
zeY-iTq#50Dt|oEIrM2cyGgcK~$|50Rzh&A+!ncyhI|}|Bm~ON481G|LzT108_w(f+
z?N`C{qAw_2BM3v(LiczDDBKhsgL!3iAFoHgp?N?9wWETRFV=Q{?wX7gh=~rXU`oeC#cg<*u-9evxsP{o>K!z^0&vcwgwiSyeo>ec8Ms0t2sP1*m1m+UL8S6A`w4`)t^c)-*SS1gRNm@SZDP}7s`qMtNu->;7KHP~(b|Nk3*Y#-L{N0{5
zkS6~Hn~YI-Xqw7$St!Zl)N^%rD4vcsxpIO#JMYYBpsl~*{e|5u0c@bCA2|~zc}oDY
z#B^dc$F~8&g4K1CpAN1L`@*N-SohpnI2{fhIi0?22)eQ@&2GtmjrOeStZ`qL>x}Hq
zk<}dAO)J+t^z=e>?`g@die6kCnzPEG&DNp)?_mkF#ow{!iU
zb@2x`50V9+2|7>L%~PJeT6om)v@Cd%oO~T)WBJgqoqST+NEyvl{$k(+(y!W@DRM@5
zI($^$<}nGKhoU93dcE3yJuDB6&O3X1=A^&SK;o>o|7*T$y<)8*b1J>ojq}Nl(6Yzi
ztxTLQ@-*Qol1}o65nBtVUzL$Dxj7l{c*AOtQZj$q`eQpQ#H0%EU}2;RXJX$yRL&K?
zhaJF!RPzp5MjKs}^Hh2iC7#rXWKAu3C7=;$Z}Yx6iHea+WsXPhOo0EU$(zj&d`RC+
zkc5BVx^;^i2iw!z^u-z)67$WKk_@?zn^v1}O+p8Ik(&mA#}ALSz8LI_68g{`7GK<}
zoBY`5UYhh4L>+;~nzAN}ibxFLJvtI9G6~X6@D3UL3L`(belLwok96a&>nKP_{^m%i
zm)}tWuZW)z@Qb+T@7Eh)en_{#zd&q*g43a;>5vXYiG*J#>dCU`h=a8ot+tc
zgBjvtWBF?KR){Bk$>N+
z1~Imiu(bwv+6(_P>z6nG`@{cv<6qCz`sbNET%7-Ysa#1FCy_(T8qE8_i37^&w?2P7m>Bw0zZ7tY9Q
zlec0B)b=<3NZb#j_NS+iiOQl1^Q8`bNFzgl`{d$?-&pgVb|RaD>oy(ltkn7qTwYmYN`?i7SePjQXE{
ze00#MAB8yIA;qTjMnXZS_@95g?^1;PzK8tZZbe3YO-K=PpYbl;f86zVOFokIH~zCl
z#AmoUNIuP)0lfc*cG0P~N&gSs)x3#L{Y$PH}1*
z7ki$$@;Ps&L@=nywMRUD^ih9H%Uce(imDw^oTs
zq|>yAhU@OqUAU&Z(?i}jqWO9?CUI>Ed?iZ--}IFdcqucLles5lV%gK@+rsIxYX}?<
z3RKekn6!$U1rKUncILNQ$s3q&oUv3`j}Phg*Erc8LA}l)I>Qk5%jJKc0Zg~Yy+Dxg
z@&a|3w~4jT(R}#!xNSp=Ybz16_V3Lw4fYzfZIC)`$6)GK;}5sUV_QQ#W|o@HelEbEn;eL;bj>3&MpV2xpL_43&%x}P&>!us4tMoiPuJ_MH(Zp&&v=TGxWR
zkEA~mIk}we4KEZloU^yC4Cc^%e!#=M%1@p)rQs`9{Y^Ppf2_jR_O@APqJRds%>@0;
z+XNxE2^nIJcG!)6zwCxwaCn`&kCvDQj}C}Gz#`#%gA)r?jOT7^k7Vqs7KRMx>vbi#
zH=J$rRlLDz8tA0(qHT3BLhwj~{y
zOBjc#5>y^u-K~->;l^V%^78idQjdkluczc5FYNQ>pi<-dg)e<3(}|tU`@R*8!=T9(
zH|6a!(E_>p2b>zPt%uK7YTxmbe=TRTo0l2?QlAONI2Lnpx&t-&Sm;UqT|JLsp*^w$
zR>hd3Q4p2vdE92T*b%L~TW2AvJsmGREOanoSg))sv={MBF@C77(;b_HQ%2u?OR6N_
z+EOPesM~jNpl>b2;TdQ(S#Lwb@2{cL-oTWVt{xa0vgd<`FT^`Dd
z=UYe3UlWFUR<0@`N72*m^d`-rzV9r@-z_ystS)qryh7z_5C&
z@fP{WoRT;6;YMG2&?KxJew2MyH8_h}xwfa2^2B2ByP8V3dzfSLcoc_IGT-8Olq$18
zI|iq;fI&e=l8{@eWx|h`;VPxVp&VH{??s0R470eo?t}>alf)E*2z;dSlYue4-4O{K
zA*0|b=#1ye!7PB3_2tEW;xAUv0)qz6fy%dYsR>BQlQ8{N)JJIvD>M4T+?GR~Ntx3s
z&=w3yX+EE)ZWGq6X9F8P!ttgjOMmeLVmEoeZIR*6=>l*|Mh1RPcWg^ENSk0sHhq=yZc&eF7r0K|V
z@OT(UwHPa1Jap9pi^>&EHnEB!%?{RWee!jfFZP{E&^^}j;UmV7C8KSZ$yO}t^W)Wm
z#Uz{LJyqY7`o`Mrg^u`x1;j(B7ti3ocKk0~gY*^4sNLMmQ23fqZ$NvlM3VM?P^lrMr1lcYx9EwrMM=^2#
zcn;i&rp`o-Q-P3yFCR%t?}E~efORY(Ut^^~6Q_!!-AFII2oLO#=a{ST$qBaBNsUnq_pAQ((wiStOBRd_P9@<^);4
z=x!ARw#KG@1=y#k0I;V8U;$TqFbUx^iy8^BYL_Th+^jO
zKO>YG_>nN4;buVEx3(`UVhT-ylNBd-KlI;EI6c}KN0^|gzNDLihP!`TjhI+h`)+L}
z$Mm;4CExg@R?eC1X(6XxicHs)g1Oe8fv9b_%r)Fug6xrva>Yc|t`~v7hei`C*e7J{VBW-27ZO@OT@~I!TIjB@ZG{zxC?@t*ROajmN!dZZ6G#+%qF^&+gMg
z%3rSCa}VF~M9OC()GDo%-z6@Y{DndW1rYxRT6O8!E|lkEBBaNw!v3E8YMzP}Hz-36
zVaZ;ucyZzsNgfUil<{-?5u*?TaR%T?Az;0LR+#kNEcqX@AJ39(>zz6*^%n6R~_!ZJ=$JK
z^4EI$H#ziqf)9AT32Q>D)FoTIKzAnk+C%XP?X6AO(y2?5*aN4LY+H)Ahns(T;~uN$
z#?%c}*xBg9WCo57?eoPzmo$>j`Y5Za0hCw{eqV6YdNEz^0e7$&G&!L5>Iz-rYgQlM
z&x=|^!xNhRfY#3MwC>(vupVMQEt;xszOU;DA2Uu?`8@HOt^O>A&Cm~KY;tyXFkLac
za{7@+$Nqj%W}N`fS5pc$gL?N~md}qvmIQ{sp0Ef#sC3>+Z4)1)V%C1s0!fmF2ier@
zn5{U}69>eN{=Vi+|G6ldRd(qbU&C&ZyFXbO$X4aCRw+3xbuD_p*@wx6wX~EP`zfjM
z;WR_7HkNkKx60uua|)G$>B_f;rRSfCIewMr!78QXv@2|^4=mGPO)mh~R^A%UACe_y
zsGCeB6?o{#^~-wNJK@2E1FL4WeYUm6c<|#8Udth-1H6-EJ}a&10wVD(J>iQ}$auZF
z-B2o9^@KUaTliR6xe&ZSIJss?Fp#8l$RKI7WURpR?AS%^m*;vFY@#Ezv@*ytCz(Xj
zSnP7?HW49N{YDbq)ml2=wqG0RW&EAmDA|PqxgoCoN8LYg%3)YfA66A>zYgbm21B!K
zk7nIi=T3IbcT*6tC6w7s%z;)|lx)1kVgcyaU2c#niQ#68W#&5Nbg(|TAJe^%DWy0u
zS8{oiD=S!
zv51yWKPELk(1QX~dI4a$witI3Lc(ozKTn{{a(LfDqTp;PEm*-M&K9?iCq%Q@NY*V6
z4bB<z^_9Gw;CBrL40r+%d^~;+E70}~sXf|>_j_@Hx
z`%aQ;Cr`*Bp#H9UI_YweS@-mv8`ZVSdA78w`bAT@)0$!@&n>(--(*0K{`iwiM?O~l
zMaHO8o?EvH!jUvi-bMY)SYQ5cIyn+`sY&&)s`0&*!JzgKN?#$*8XrlIVJ^#|
z(%~Aot7nEx=ucErp%a2+(bqZZd245!l`AZ9$jjsRJc5%X*2@e)j2YYqnOrDpB`blu
zV7oX5?*v@ML^Oz)Xo^QU=<)&vMT(Z*M%{sa1d&wl%qPTNwz$k@?AV9
z&u)0R9jt%eekcxqAT#jl7ub10eO8jtlPRW|6zX>j1vDaH|SIj!r0DGtdR_5zMvfAsB`v^W~xUJ^;A8wlyrc4GxkavQ6on02Jj
zwmVo|H_O;hlPf}9yMW#|>XUN*`-!kJvaB!b=9P#m}X|hyKRPHoBsd~Lh
z7fHeSlC(QYKvLrz%FWv`CpA1hrXG8P8fk+5eKktQ3H%9XpnL1fzxp|1+)Jt`;BC2A
z5tJr6b$_P)IZERd`k_{m*=(~qgGP6{g!>c`SCfWQV+cnvjDQ}+eNB~8@I4O
za@1-Sn?lJlS9huD-q6!2CE%J0i@)_#4+@v1>)up*i{f97m3jXY-y*>TuBpD*+jmcA
zJ0H+1m46e0nK1g}kQ33N(*m(pBTQPk`RhbP&`76o|JBUJsB=
zmE_-kcbO*ol_KWb9Lv$Gy8|q#TsDGPzXbaVtu{!6KI+Rz`ZuuXm_?5_ERT!e*AIVl2!7;Ot3(TWDD>2fsL
zs+mzW8}&NHMr~Alki`i1qwO{zE77$C-HQ&Rk(ZmQb-6KHwS;;Zm{H&|AoRKg#qEbz
zrX-4sq1)i8H&kH2I_+_gn5$VNu3n(e-1>!dXBFS5D^3k?5LUo8qQ=TBJ3!pe1RUbQ
zs*oAx)$T&41FrqgZn=y4mAFZPaT6$i^gA$UGzh{vfP^yN9DpA%77viryeGw{)T~#0
zWhjpkNF=f#`^9m6-w05$&`utFySVP0_nlm=5^3qrBsAf)ieVu7y(wwSVcbq_JysgZ
zqE{_bWw(Hms%(#P-5Gn(8J}~%CGzVLzemw1&<74Z<2PNhI~9q{r;ad63GK&f48a@?
zf9jrp``GOgqqaRCV81A;z#1C5Ok+LvA`B>_{R0af8Vf%^bSSCMX_;cTl-O;~V3;bk
z7;di?_$Rs$;rxzbzO~R9d*^Lc9tL&&;cQUfcQr=Y*ljO(>$14g^;Z9>05Bw`?hV-$
zsNl-T_%LeZ|GsD7PI-2`7Y-=b^tL+5!(6qT_Z(iYWV2sn7ZNZ=b=X4guRQ!TUU9MB
z*-)uU#B!=tf8m*f!9Vp~0-<>EOgT&NJFd~+y?eKUKhp4e$}|zuT*q*?#HBQcgmQi_
z2B=uI$16F-5Loy92hb#;v{D;Au{a(t$?xjSD-}Oped0O2xW!pmX*G&{w31WC?y}8r
zv;*yD(|=z1<`waim)@KpQuzhpy;3~T-=~3?|Bd^qAjj22&ph=3l9>!>+!zoHwsP%;
zbKkzSr;6Dc8_1@IZvyW_P-C*;RT;|$KAV+6JFB}raNsMgKl=4wcdEu+MVccJoBSUi7h4_
zDA;9YdVrWk60e^w#@n)cz2+o!yz;-57+{(zc)EA4RUA*;9;W7f_;^RP!)j_-l%NYc
zP`LbR_tfz-HRYl}>u*~deOnhMolM(4-VItf-pex6V*(uU6S+vzTC*AZ+GPsE=MC^T
z_mynpO|QZ<)KiKSB3=i-huu0+1(6efHpwm*ffkP(qZEy#d&r$&~2yzTNz9>SXsm
zG3~PNGj8e0K9MO3y`3)=IX4$g4Wa7l$cur9%uLgyZ7}i$~Xq5E1kQ
zeg`?Q6qM6m7apZU+T~1i68?^%zkl!ce!02x1)r{)U&=_jt<>ByRY*EdyUgf#cR38~
zEwk1UpR0bo{|-#!nMaA>y=&8;^8wmvn_;jJEl_55EZ7v|(2v$iGQUA!7WyX=)`q)F
z>DvpG(dW_MS0j4q)g*Y06mR@d
zNCb1i{FzCo!k%Y8e-+DDQUMUK5j0jnF5oB~$4yGlsJ^!As@^4|TW%GLtFZg})r~8v
z@C+9VFdt??wz8RNG69m7R39{nA|>
z^unUDcoO=mTc645$~ZqFjI-!o``qOPB$O?}o}Ubn3~GVgbg%uf)rPP1%JU<NfBZXptM9dZkMc0%ZwFVf;OE2GD^6N#-L}by@ft*-uGx?da+PqKl
zcYCDAyw9!emXf{AO85Q@k1wyttDLt2J!XoHJNW$=X+Q@Y$r-PDCHmwJSlq>mr_)!n
zhb)4=3m?}2TWM}EOx<*SqAG4!m^<}OXJU;C8de-@iOF760KSIIaDG#r((~bpY0q&>
z@+;!(En0{G{2`vF4gYFyDP*xPUA_bG1ZzuXov84~s``cFxf8l6UKgX=LprMS#}0Wj
zx{92K-&-^?6m<+)WdKZCP1VLycB{X?4%$tC*)*Q+F9}?2WL#yRI4;Zf*_I${Mm{^E
zusogVXn+l1bWx#EPR~
zTd_h#3|cXsYNxgz8q*xKJl1Ucb%EIvFDm5x9
zsx{#&F&D|_B|?<5vi)@aFGq5p&+eQ3>QGmC1n?rG#pw;epO3E_F8KYJX`F8BU=5B2
z>77Y1l1P?B4U1O=TpD+!oJbnSfhZu
zbug{$x*qk3xWD~y3o6&-hvNbdHlK?%l?Hr*rx>pzk`Y!4*MswASdLz$UC~kFfDMSc
zJ=(o>H9Q$nWGo5`ieAWQ(VWC%UEWMsexW`&$KoxQ#n|4|rZ}N!Dk;16&e*zz^Am{6
zdi}yNAQg$P=DiO1pWX-{d1iE}=^>-OQv{e_SjTpJC1{{GOQZmuUCD3a$+-C0F-|+r
zwDYa%wZW>Fn~iKrfG9LgDDP=7S+{$H
z1hB_zxF`gr446?oQX*H|+sFB~ZmqM5-D2lsAW#%Dcf}3rkZ>^=Rx~**9B!5LRqs(~
zm73{rz8luJq~53-mMbmZOJpiBwGE+FL>rSw&}Se7L~)w;jd)zi5lASF2!3&Y3eQ9G
zii4d}JON6m`Pi~zaHAZi8R0NZq`}Mhm
zd2eMehT~I3+9Y;sqUCa2_r|5;oTRSfBhW^j@3{J&`&+L8b
zY5}>}ZnN}``iOSku?Jx%=6*9QOB{ujLm
zp0s59@%*|_#}ikx#xYMLeFi{d_p@>6PxcUp=X^WkNJ|!y}|7THrSiqw6OyizO{Ie)M1qLE6K)$k;R~A&!r&V4z
zu4pn|7&gEW*pbAJFz`H@Tnz=G;bY%hPEq-Kq!=5j?ZUsv*T@gurYf9O1%{@9N11t`iA-
z9BsSHtgrVZk}&CAU68Fed-t&lG7Ya15F4tn5u#6*J8LRWQPSi;o&FiHBjzw5bW`}*lB8Z20Ndh8k<&b!@8fWtL-06Mk*zKN8ly3
z!wR~S#yigy!2qY}Nr~=+s+SvS8??4lwf{ES5Cpr}9{*~4HE+OZ7rPS~`9BTmF=((*
zbwr&d*AN#cpX`%{n{)|>eSUD@B4ijy(6-=b@W_1fx1cAtiN?ufZ!x+AskNbbJrb9%PnRq=0oVgmLQGL7x=FMHYq
z#Rmd}0k_7ZgikkRmnQ}6OJ>k;&Xx2%U21jfT)Tuz@*@}=3OW-;b^h{FB5XWp*a@qs
z*mPq$!Uq$O?dgm2;eWgJVg%rN3hZKC0q&-+Xqxor+V3}%6OZ44qK|GI=3DCcK--Fh
zB|Uib=qXFUP!SO`StuZ(?Rrk>UmTg(oD`Dle;LVvl%B;{i7C{P=ja-iP_)53i8he&
z`(Mpdvq6g0XfdLyX}kw;v2X52;*J6Y>e%GM3#d;;q9Pe>J4LdfkxW|b%i|RvREw$t
zw3Js&_3Dm!vNel>;ePe;Al)$bFYpj00$cjGt$+Q$wlw{-6mJk~w3e4bU|!!|Ha>I|
z7||HY9i{1t_hM?e@a$VvVK-4=ocks8=rc*=)>v}HgN8Fl%|L=GHoDj6r$@%XMaz!T
ztbe}lHDbLJvU%@bO%uA34?@ZTxq8;|Bg0ldIU^(28q!Wi!&Vl2=9hNMeS`(A^uS%k
z;uL8X>FS?^yUsNSl!S3vDvy5#w5g*ld>P@xo<=zb+wa?Z7Ku5AWLv*T%uiIw>)TS=PBF0?
z{_5E7kvMRH<1}OWao?KN`Ko+*53UAi<89=u;5Ej`+;k7hLtQqUC>_7_7_#M7#I~-2
zJ&w<y
zT2Jq^rvQ^w;OXQ(z1R6^vD<-7P1iqg_ZtCsNAMn=tFHXOK{4)#iUj3bCtuTln9uG&
zYh){Ig3*-xXGg*ZbG*U>2~S2!_9k=nYKH!|i3J;VKHj!|OG4hKzj<$U?nWj@MQ`|W
z|L|{oWz1p?gkLa$CRzVqajRME<;GN<-E1>PTRcwzh@iQ~2oY&QOftcd^^46WI)q~5
z7$4b;f1meFKENn^I99UTI^WTT*M;T()-p&;J^w0x6Oy5BHsqZOD*!bG?V!NNT7I%-
zIO3~B4Ilw{$|+BSL)qOAU%LVPJbiY)U(qo7f8E}13Fv_kxHeJ6p=6pV7x~-|bD2t6
zNR3$u1_`@B{r+N^kx_lZSsAj}8G5|ehP{{kg!vL0M8!e$r68j>cFoE0WM>kcrb0oK
zCZOch&(I2k28R(mewR7+Hi3-khjKL_Arl@>I6$p^2VVv;tipcQzn$3+#OfX{^l)9R
zE=4{H;N4;3A8xno*&||!2>MXEZW!eC>CN&>r^x+nM+>M+12%5?4@Eq6?bqcGG=R`T
z148_y%)+Y@2XtzLU!%I-x^(IsicE<^~Q6U+IBFxn=XRuF`g}
zclYNk2z9kQP#)$%DNu2vIFDIYGN8B)%HaSFJ2o0Fl+dyXUtMJ|!{h*{J#T{;T=$*~
zgt2|cFWTh3gYLEFc&Oton%W>`j&p^WfXR0$JYYBah(Teo7+14YxR{ybDhJ}|M{!@5
zs$S%3svI9|u!4BKqUO<~{*GiXPg*d>iW5y)USzCO6XL{ZI0pbU>X!UOVZq
zCGS6tqk|of@l`JVdbY@K+LP6R0+e{9Ss)Qwb$}|)&;NH5V7hr5wjbG0LR4VlZ(GU5=LVyTV`=E4$M>O<#HeVdD<_xeYDWf&{T=|hVx^~
znhg`iIN9$z2F2H!j}L;9<4&P?T-QdsqkJ`ZYU%T7Er*p1tbP+CCRH5|mm$7Ckgp}NcqFgpS5U0qkC(I@6+YHBKC
zv(t|7IId~xym2}eKDlS@^FgJ_b*
zZT}OH*E7S|jc+^MOU9r8VJ%l%cxE(5#A&Iu2VkPKADy>mrLqkg>f1=lZW(I!il(9w
zjF*}}I+%1az%(uTr(?W`WiyrUxLP*R1k_UWK$F{^=SQ#%{sOm9v@e{HJTP-$L_|(b
zxygvahc@9|EN=5222h7(w=(c}{8z44UAjsdJ79npcRG{m8Zk()OR+4Y{`_h3r%{aK
zG?@k^UEgdbs}Xg(V-R}~l#p{usnaSYED@N;GPDMh^R#l^DFYI8s+e(?umguu9gE3z
z+$)2G=%cq>_;nwxQ2en;*&)OhenwTjjNlKC@*0NE5(yhL%1e7@9
z>U^=5Rx&pQAho3GbzonEi7-O{O=~T?o;M~GZE9msBxSO+{T>5f(*NFS>dr%p9vOd@
z)y)@OO!B!>h=|=j8q^o_6Mn6!jCVg)nE`?UA8|^MRS+mb6Me1F0cxth0VOo!=RPor
z*Zf`U!#kM@v9I3+GOFip-d3ux7;HCUSb1VLohn$4{7;BE4nj=l2bu|i*Njf>B|u$T
z=d4H3YaFrci>GIWanjtFliBV(
z6m-_TO|%9#!aOgSElQ~N?NqSbIsQ~Vb@fj$NaH-^yJ>G@^oWT5nJAD^L+M8|K_P1jC|&V<
zO!Cq`8jnUUhQbaExTWHLup4&r^_nPvMKTO~C2}2~-9k?ZX9bYUq4n%PZ
zs4>2wTTI<~W5H$C^?aZmnlxGj920wNAb+W_PPtXUDD(P<%f%2w94nDAcsO~Lga3wK
z>3&qqq+dINisRtPpO=*rq~1A5bx+%&%Dg`V@X+mVt42&tL&BDhai)t#m%^Z7Ran#8
zyIC5x-m03WB|>Fy{`@RvRQxusnSKwb7&(^=ZMeac(;7|pV&PKOoI@Y
z=4HJXGO7(_;j1PzViC{UPh>*c2$m1YK)b_LBJb<3%`3$SL+eP5^or-QTpXN~4Nrs?cw^Weq1Knt}5nDzPOWM2#n8O>k0I?OG6#+Ps
zB{@_OC$qTWsCVOgwI8y
z62?iIr(YD57AH>dh-KB|EbRo6L6M0AyO2Tnqo+!v9To#wltDz3F^s)r=pm7yGIYEh
z%8#k-FR_Y8OuUR5d7*Js1yXJ=Jh4L#qz4l;58?qsu}b(abj1;x$t(;ej_P?VfF
z`$)nVBtgrAwa?W3nGV)Q`(xz`Vf
zh%f)8AIhM{Fk0(U^4P}E32n0-z=`z&4AlFMBw2(~kRInY?4JC*(whG~2G$@rEuefg
zF~TSy2*M?u&6bFExW59zzfR&vjPh
zx`ODwkZZQ8f^p@r?8@=}xUC!fP83$f_Rcria3jEY%g2F5fbty~5bnV}uYiJL7m;Q=
z7g)r&8a|p22>m<0tK}$cZU(W|7q-v=oI@ZGfR4s6Q0Ca$BW|`fMD-BlaUy`Vjoy+A
zrxjPvUwec|ZLJ&9nD_u8oX^h#Ee$2Gr7=)98`WhAs*{DU|k1Gq$CqSuViErrcO
zP;6l0$?Haesc!!f0%k}u=uM^k`RS<9-ii~om6W0sTYfh!FRc^zC(11x
zGEOxMhp8sO>c{B1A{nDG$o7|dC2kYYzdZ1t3q;c>+b;bpH-|_ICFFLZLnm`DynN)N
z^JVh1&k{b>CF;`x8Ifa}u^^GM{HvHL7Olmz-imDqxTyFnv>i&h-{)~ELd@kfzksx5D!WE`hz?Qj-I!GbFpc%hq@mo@vMV!
ziHWh|RwTjWFgG~P!Dm1x1k^s?;)7UB-N(zPwcvH$oGF~`ZHI!A1uikEX|PGwwERU>
zkN9%y@dTL=&?x#5k+%eWHoiJOYASwLAVcjq@zkL>!;ji5gqqZ;*whm(@@)W9Oe(gS3d&>Gw
zyUd~ucnV3_DCV#fqsZz;9Z>P(0tfSb)je&-%k#2NbNkT?uab_Nh@@k^lXUQ$##jhc~mMIQTAzP#ov*G&kTr>
z?+b1OCSCFl@8BQnlQj$AAVk=!Ccie@hBK33ieLAN28o1=qSSY_8zbOQpk%u
z?ZL+s#jUHUi(TYzQ6!*#@Pq(@VC2S`DV!$bwGm6Z-mA6SzH;(75z0iIiZn
zI+Yk{MiCKkJS<}yvcEQJtNVlG(qW+{fH2A1jmG|}=*x!Y4Txo9h1|;9gmo+5d==VD
za$83*|2P*MVs3M-*|JnI$-fK@(u=~xVY-i9W6`ah2>gmq@9i9;&KMPY;chf6lHc;I
zb@K!U*$Ztg)xq96M_Rst+PMoLj9UBzwr-%h=TJ)OxqilW3C_dIpaT_DdNm_CW7uCE
z$h2X80~j=iX|g(D61R2SlAvd`S;`-f*+*oqoaMIX32s!C&BMtDU#6n{a_7JYDbGbC
z-+Dw&d}IFs)=x$NPjo=BTH~Gg{jI;XXKfhy23m|2|3Q&l6w_nY`omP>>UeHzZ6}^&
zp1W^=gL(EAfZ6^*c{LX}!-Gm#whj6IBHPA}tLBpTQxpMoqb=t>xTG9DyclL|K>)O3
zJDY&b1a^;S$_ElW{stJJ$x?B5k#q{+$Xoof9C+kzYiwJI9RIEb*s>7mVFiI~_FtK+
zH_!`|5gRXuq=uV!h-AP~5=oep{hgD_))J$(*X%}RfgBP^dXX_AT;t9_aWXmF0huG~
z$URVGgr7C2Ak`I;KBL^}_wa8OhT~Ye1)YY
zkdULX3Dkey-pQ(B2sp_0(Iu_U*_k+EL1a*Yv>*cnxoHWpI*-|At>sZjMEK1b+A)Y(
z)Z*@Pzao+f4yc=&d~qk(F||a(e@%F+8J||Z2oz-T2z>AmM!ALk@X1Z>(q|ouUGZ?0
z#GvPQN|$i%#{B$IT==(_5={mBPl40I=KUpwVZ#8K)8<%+ZN}rc?S5sE(S9&M7$%)H
z3#b^1KG|rPUN@zAdlMvG8Gz&-hM>=wT!XXR_3~dH#?FT;^ULI@6T0KvT-c9zoRjcl
z8c_@46p&tpQEDE(ED7R7xNfA7>n^T`f4w|{ZO!~}e4eZMMxAx4{w!p7
zU8pg(U_+WH;pEeDUpj}_4^Ws>Q?r)o8NE7~V_IRT?Q*mby#VALBVlNa_v_Jy*Dbeq
z>INu#k|L9V^D}!l0jg7|4Ecy8u>W3hTyj7{wtDBZFFRbx3<}UKC`A2PEd6>xFxR;k
zH>#EkIA<2DnM$B}o3EJ7SZP@z7rEC%~0+$nbMOQtjbc&(k;SWyV
z0L<~aP6z}zK^uR(Cxk)bv-V`2iJ0wm5&poJz@b)92@sd!)SD`W$iITKu85+&akOR+
z<-U!|YDbVMKENG!qDYBM=8{nNos8>R17N`SLWohs^moV~Y;73c)ilcjeMNzN?TS@4
zV3_sD`uab5`XvIG@wg+o>k=itAP}-jFn*5Q$fd6K?)JzU^*8X?i!=*aH0el{3R3tn
zER+EQXK&n0WHx|fo1EO|prZOkq|M5T{c5A;
z#M}%H+Zl#uXAMpsAxqU}n_s4}C_{cnUuCrLBt?U0%$}|6Kt&F|@8`7O*_&l`yTyoj
zougLqeEy{<${`8DY1_qh=?#
zpLpYk$lslF33xlRGHA(eJ4I_6vrQE%z~edth5MScVH2}UzHS${heAXQod4WB0`b+P
z;z>3_p~7eG`59J-qJ887j+)SBfm%3r9gGUr2&BvZ3*bnrM5=7&Ghj37c_fj^`1@Qv
z{C_!ebVLH}PjpAx-;O+F1ckpA@>jg`jKeoI^_)kN6F5>@4jI^>+kEI#yc(T}4I&iZ
zTR7Z+f&Tl0L&+V!OpMoBC|amT>zu3_R2v*C&YQ%z@Mt0m>w0f`}Hr$wg8HaBHNX
zDMqti3}%d1FF4B&lfl`zh>KI0
z2<@CB*l^Fmsb;r?$oGT**jG1AosyL6(*E*7;Ecq}pN{V?&xE40ft^CRFa9@ALPo{Y
z5%s@op=Ll@4T2=2D*NTI<##6e%euzeoKiQ~i?Hq}u6aTl9-siJ;>~yF3U*?Do_*JX
zy8SWBz6LL^jYOW&zKteO?D)!00FdMpTGNRULgLt|wmwG6GRx=)xyaiBH?{4KmpNNnqvd}{(~AOF_!b&uC=9NYko_0El1^pyNI>XG+6NX=bbo-VjE(R3Xe
zbf2<-o9sBNJUb#5TKOvoDINIWE1YEb?0d|^J8wNVufD@0Xc&~gMCo<2WD$5*G1a>7
zsW;hUK@&ythpwwdn9NNB?Bg72q(O>*Jx4VYdWiMEVfM-jEZ*Mb$%BTFyKgi8hZOX`
z1MR2=$Z}7!vOaa!hSkiA$eJ6@C=5iFLlnI0xx#!6B|EUvDf%w@%O5Do<%WHD=cN;%
zaTB0$yjB9wcdKu={-NO#1z7$x84qyJbK`YKIf;
z|EZepnMKP(G<~1-26W5o9)fWwM|3Y3l;v*`Gz05NPaFm_GBMb7%Ii(DBLCh^gmC6?
z=oRWqWmJr!mQF6ks?r|qFBo+BEo#cY_nvn>Hd_Lkx4#9VXF+p{#HG;hy#3x&e%rrz
zJy#woiT?8+OEo%lyAF(BvlRiN^<$>;5qrP)4B0D;FW@=OxQM1;a!JhHH{aRaxN)1|
zaRau>bs})f&ABNU^Hc7(G5a9@2?;|Jar*pS7N&(r%R`*Z^$NC7&f3iUzoG@56F}-C
zVWir@K~$Vjc77%i*vSvHd!dkH;DNoP;Qg9B;F9OMfQ1{myT13VMRXb|l81
z6GZ(*WWy$);+%$DAg*o>=;1Ggf#T0zGilZAvhzS{rEM_^CbPGKgy9#pK#Eby^gdlc>FQ$-#P)3=ASO_Dt-675<8I_t{UQ7BRZwf$)yDW
zR9A8eVzXw?Q`uZw#cBX2IY8jPxN-*^>9jSSo&$avioQ8eWyj07+4U3MfQbt8d8b6q
zdl4WmrcCTZ6o!&*JybR%Xx}^U6S4eipDIB9f2;n#dpAqT9Bxy>Ic7Sea-2olx>v;X
zbqy<
zgwY{1t>m`{+$AIxE@RF=4F|l{6ryMx38#QfAhZUOkdv;f_R0Or#FGf4kRD+EPh7#D
zK89HI?Yf@WM85E{8+JIGWS9P{Htd=f$&b_r_cJ{7q`M_7DxK4|2H4-HOt-~LgIz8o
zN#N4UC)KJIXU44ilxi3qVw(`h`@F)t#B8ac}+_+#cb3y?KUq{2-;Jz(U$-F$b
zhZnbGMh7Ggbr>uT(Wj==`~GuqLWkNx+*_ZmJHRUX&)#5oB$DE7Twp8=|H%IMS~hpE
z-+0B=`;#v1&~5D^u90D9{Hsp~;#Qxx77)WCSH@?R4z^-e=fBH2$~Ia25FQ8MqjdA#
z-nyAwMy~NumyxPcjKCM@DN}O!A*}mvY>gJO_WWzrQvdcfhAXpGKw`ypAkP5dvF;}!
zU~Ak4yiq#X4pwV!Cv-VYCBub&8&K|hxXC4nMt6a*>Ds-Q(t6guhb7;v(wS&AQlv!#
zA9cj>eX;Q`OYrt~6Ahx|wwq6;85-m=WG=h;JgR0uWG5Uy!jFf@Zv68j;E2)d3uB^J
zu6shuZMHh`h&uUoh)lr5B}Z1w`zbc-kGHY;OC;xA+4BN(>__l2EUJ?A@%n*mF`aR+
zn~QWJb-gozwhN-a{NHOVh)*>YHQG`N-0RUQNX!6%<7!}}kzc*VrJlnmsVRU^Y23ZO
z|7vuwGeQSQmrq!VZp(E~(7ME=1XSHZ(B20)ZuebaCkk#3uq2psQ4Li-uW3YUW)SMqcsTS=&7kK*zk#*S{q!-1
z#x$hthHm%gK##nJ5<#GybTMidT9$RWXFxB}f=z$DC7hag#H{8g74e=zjq8L8tsz?r$rl1VC`lV)1d@O2&NyZcFRpJu?oVx^<1cs5(E_)
zk>S9O%>n4-^^U1>gUL8F^CUrMUFyWVtoeHV&isl($X_Db%kTW^_*=3reL<}D-q197
zq%h2#Hw!Hxcu73eV{-+Fn_2-SsT%|BXnM&4X`op!pGm&{Ybh9i45!egqgnZ1T%fTU
zA1b{n=Jr!3!55657rzmwJ$VPyW9&v%yM?YQmXn=ju$WO0Os>%1T=r=+p
zBpvy8WfXoK%)oZD`3$=KxJgi2b&H{!14nuCtb}TkB)NLfF(I-Rb$poEpIEz7XY)VX
zO3pUWn4|V~Hj*HFf2U9%WdF5G#9lR;p>)4M_4^xq6e&IQYVv+7x646B{p{|G7`}-9
z-us{X0fTgqW?ggq=jSC{Mwg%e1DHtnXBH{8_o>X)B4mP4hgg4Br|hs_iiD%BV?;BV
z7rBE0y>L;sy78#r&Mnv-F&h{sk_Q2#0g?eZsvidnO*tV?kf+pt>JO&g1bw56>Zp&P
z5~J`H80lh8Da0UP#AoBIS8%ukY((J>=H8)*{-J@224JQT#xVgQ$>1gb{R@=ot)B`=
z2?k>5=XrY)!Z&JZ=$Lf)Jg9Jg{Yh3SoD7bHwYx+|=FGc|{Z{)m)MqLSVJL;q!QKWm
z9=#;`!~I=LfOyZ+;r$ZuLM>&>Zm^ehE*ONh&4AA*zozZq=~wx`bR6IoAgn+xncYS%9C19RXa*tsv!L5;KlW-0DAy
z1jZ`FAh)seY~yg9h=p9&=S;!;`H;B==r?mY1HWt6awMEbhOV*wR$9D_T};A*{ZF^z
z4^*NzLmT5GrbLHxTfc{rDVgl`e$%J|SrytO_{2Y~^*iR7k(Q~j#Q<|N=~`U(=@1Af>YNpOYEBUJKOitjICD%mrW?Y3va7%WmC
zCTtN}ZkhMw+wd*I=8Z5W8srd$xBW(`?*mM-g08kEG`+{Hd}2oPVrL^KQgL|l0mFTK
zqUyh9Ieh{m?t=c*J^_DlJpM~*oy~iSnpWOvtiEnmY2-3ilZ=>>Zr0pkJMELTApmZg
z$+P9mL?}3lS%{9yV_m2^#;MBiG2T5Fa
z+MY2)|1d87pZo&E2>0xJuR`-zSYgoAcRlkA5er#_J!h^dq#e5Hp~HZt(Z)wP1B-kK
zDIW5nPXUzjjA|)bawVeqXNG(Znf8Eqd4imFH4?FA`lJmkvrg{
zCu3s1NQRz7;=_Ju`9H9fjgq})6co0N6jDF=Jyf<4#Fy2%9rN<-e2t)5Mta+^Yls+>
zs)CVa%Ra7d{^myF2=s3fMy_u^CPL`X@EgjryFmA!16;X$R1HWgQL~z(f`tx*Rfp?+r&J68zB}P#Ky=f0MkJ*
ztcq&`hIe`uHiq^Jaj|r$D^99W&JK$g!nnnfOfS}kG*so1-iJ8KVPIfj*JIqxbXB<_
z110-B%_s^5V
zI+H{zJ!^q^>Q;((A8iH&`S7}rP=HN<#;kc_(y#PT@N)Oc7AF82pfs77vwn70p78?-Gp<7s_IVK(IG;-2zTd-!8{o?bP_%G@V~Se1Fk9J~0d0g2>O66+
zCxJI6e@f@4#8^}?Lm$XE8QJ^Gu<;E2XGLE>r`;CrQ-sJ9i$osUv8Qr}>IGL^K#ySV
zCA~L%g#BhDWWV5rP{UJD;JdSa=*f}EjL(Z2VIVM(tg`*SlC`n9^aET#M=6-FKdM>C
zl!%3oU3z=>mMOqc`Q>)bWhn6$?M*j*DdXoNAX{DCeO!0@_ZHs|`Y-2y3sEdVjKH;+c
zk|eJhM-@6uurZ+JNgy)tAb1f-=G2C6K+5t|We@Rz>Svf7rUCV|
zHp3Z{e?1Xoda(YDTcfj*2u#BZp-!A0=$O!mW@5iGJYWH%a2#GA2QQldoaH;7D-rkx
zsN#3vMVr?j?BDkW#j%UU3nDPp(Ei8L9Qq+>#|IBrKiYL4hW&kzTc}MaKQ%dTBZ$-p
z)IYLs|V5(W`I&8sr<1y4P!zUf~cdA%REWCW&FU;=mS_2Va>;%B`Vt
zN5cf)yhLtOEEx|0_QnT*lU(5av746@MzsVsyg~!ZVlQU0){E~x@AUcQ=T~|&OelG7pMGHqL?n-9sKtm__}CN8r0-&d8Z>0gsw(E#@E5iQ^}cFw;IT$Zl+g9?5V9
zgMM{!mh#Jy$1T3#vU`Hs1Mwvo9{>O!VVZyzm}|GvlLSpfhFA&vJaG;F2Rl~3J{Kfy
z&CmKoKyj~|o$hLet|b}y(0jrNTDI#HW?>yNImS#nn-F+}bw#q@d}1H_VfL#ca{?5A
zrh+6Z2wf$TNe&^`(=_yg!hn3adi+Hd{*GS5G1!|LC#0N(;CNq4B$+ciHuunDVf4Gv
zMh{4@9d_L?anRcH$!JJg1I(!tpyknt1!eLgu#=7%Rs{-w4%$HV6I73+D+Qp7@VT1z
zkn-f<_$=|^24EpUiVt{2iI6cO_kT27xEHBXvflmmQJrc1*ULrbE|hGsJq*0ly}snX
z_s0r4(_Bbae)Mf{(Nx`;j_l~N1h5Ja=)I9U*n%u*)rnnMgzc*iobInD8+u`(B?wH?B_il2&Qgr{^Ee13S#EtdU!rj6Z
z!QWuY`7Iv_b<)9s{`3sqH)s`No{9y$^}KgB>4ayJfN3tG>4rKuW(R4xTiv5kqIB?I}10VL81A
z<@u3a(Fa}ABhXK!vv@(gWRG^a>Bfi0MKY*s1Kqo@SBCG?Tbe_)-d
zK5I-@)0mC}F_i%PY6s$vSTxuxD#c-SP$|jMzT58oAXUbyi!*uvP<+;LVPK(r9~a6d
zM*7L@c`;ly8unt)(yM-ZyK00!o_<(GdB@%$Nm~ty(
zkT|!tKn^1T&Ll_1$S;TghkpVNpj6-PjemStwM_CY!pN?aLR7cBd8utkodEQA90ucT
ze(-Kq!Uo0-Ksau>l4d0Ch*+G(+En(Ez3X$Ry(U5!r$B#`RLblZay@3m-F<(&XrH$4
zMlaxiUKOV+APms0F(zj0&FE?)`SyPf=ZN5N-QV?w&aNWnn%!=xu_qHA_pYW1)dBlx
zH+u&TB0nmY&m|}o9UpMq=3gbC!6M-hyZZaHU~G+sm5L6C7zI2R>Yyt1UTi#yINfxZ
zscjdepR#E{?CF~U#Yv-Jw~$ypjRSG}PUy6hA!%RV!
z{xdQ_Eg#YmG>iy?)+H!xr;^G7f@9
zc{rfoV-dS6#Q#>R+o?3j`}t2)$(3zhbO&l`0izURP*7lhV3`2Gl4d%|f%
z+vgEYX+t;@8bfR7%@K1Rk!FVC9m(hGUl7m=H}`oO7()aRU>~ARTrM`RehnR|rjVAe
zirqDQp~L);PTq}%Y}WNc8s`!KD!{|V?E%C`;xidXXb8cKedl|xEP(hj6AFo2p(3J}OYq>H
z)oFWe*DU}_jredFFte1=Js-)@D9v%GNgsnNXXx6`-z*X6;Ulxt!KNmmlqY$hpp+Ok
zJ#o1ZNY^Kr#i|Jbq$&_mKQ!HZsfN4IoSt#rySq3-hI18!*j)K>g6S|cOAvRP_xn5j
z?wbbp+c3Kd%;JcHOgw_7ItlrJqCfgv+i`Ss0~{F<5NTuK_eU_);)Z${IrB;GZOjWJ
zk_-5KEXw2E>V3B2YqUq$2x|0wS^HOV)cw67F&WTKW+V{hA?azYyH6Ae
z2@PljYy(M1GzlxGvjC99?zIW@eB*9?t4NGy7AL)2{OdT1z`Y1uO=`ef(y6v+W=dd5O?dj3o7mOtN?`pZifq7$;B85
zif>@;OZ@ia<>6i+hal+2MhS2y!=7W^TRQ%WYUS3EAUpEvvx4^l{u6TISFvWLw(5o>
zli*lSg6VGGC!LP7$5x?aJ)in1y}4`*bBC^j-y5+Gw){nVs^S9Bhq?KwqVK(OBuP8<
zXOlpIJe$@J1ezubEPuq0E(_#DWKv7to(%4Ka;Etj8{po7^&P}f0)wc|LzAD1vOUnPu7a&az
z*oXOVH%!r?ebB2LhOpdfM^=#bd4L~Uz-jsP*wPXjuzd^pepYMu`%C
z?lii*@9F+&O|Z0VndXxAspon^O>)8rJX9~BsfXjVtd1xWhm}ApT@~l+MoQ8~j!gQ;
zAl6+E4BJk!IrZg5)PCzmzXqG6YLeNT8|n?5mo54BC@vE~tU{)4c0IjvrJVz;JjCBj
zWJ3dvIS`dX)%OC|2g9X#fjm4y3MNYq*Ud$nWC_Q3i~#)DmvjIg$HkMfN$H_E7^!fP
z{1^yZ;euC`{1Id;oJ7f%-+|ID8yvD@@5U@mYup_{K_4o8Hft3CMm5|%>If-C?4afS
zZuFESt${cVfJRaRqS7jEjvwhrkt`^*th)jZwL`*yj4L@{fA0oJNXu|)BYPtLh=6+q
z+9tmyXS;)(^6qUaQljD<5VquR!LuL8D;Xhq$Rfi5GfBlw4NjhQDq%!s{!0A0FrzRf
zT8YrFyBwLH(6gtdy7xbi`PcLSZX|QaiU0vVb!$A5QB(xR$w;V51wB7lr6uIhAM$^f}jA?!e6!{u6Gzl%_Dfmfi3i5*sBI$cyf^O)sUjKp=aY70KTpr
zhKey#)?Gcc1n%9n4jNjc0dM*P@VkDB?vb&myXOJ_LgntZr#Lc=0(Ti{QtM33M*PFD
zH{F{Sfb0MTHXS;{Ft)STVJ@Ip!c3*h*r-voF6>B_D?jMa*eY5A;DqL`2oDJ2zXtLX
zAY}a1OPM%}_D(JGga@*N0Y
z@ECxpPwobnYA8j;vKtsy+KG;T7A5Kp!N($&f@Ma{4Qf2jhXh0|93;Rq>V_50-*2!3
ze&hgy?krP&fQO&E%V`dZP;@P*rX48&`jRlH
zbuSn%Pg3q0Y|J*u=0Fc*uJNlY>T%&-DR*R0XE4aH4<4N-*01TQPRU-=zdxH5*7Zoz
z$ghH{a93%)2krdg_k$n$m^k6{>uIkO3WqCWEi?tLyx~)Md@ZTYDe*jcA{l$4FK>3M
zi1zyPLUwI|y@MG~=}*4)-ebvs(gvS;`pvA^*v*ejk1V8ZRL|`9l*JNZxK0xvi@;Dk
z4dW!H!4a%6xV955m|Zy#az`mH?O(t!OtO>4wm)w^hvIVe#o+u|?wGR2?E{4&+^e`V
zxAqqsqldELa-AQM@n!p3b^94Z7l^c-^3FC>-4+Q%=v^q=ZyNN!U3P<#>P=pF-Xk-Y
zy2KLA0JJ7fne2k54z^343G7yn9JqnW(TDV;_?8Qt$88(G`M(JQb@dg&E+5ZA70j-I@ut;XP
zuxPYev}+EdzJ&!|gBs1pg-1>B;MvcN1Xugh;D~9$gNSM=&ooo?Hbd&U_oXMEJ4$3^
zap-D{R;g{PeCJju+^ZN_=5ITIy2u2yz@t^g_1lr@rg66&hoaE9fRdkZ&~E6D0Bkr9
zK$3PmPachl4PXzr-v`y~o4i?zt!lv6SB$Pr+p9(u!}XW60#s~$K2FsiE`%nk-7Vbp
zba)t+E%qoiW!48^4+ciVpu}Ma7AuP*!lz=7@r2#&P;#0?s(hohS`
zi7X8kA8S0c9QkzPOCj_7XSWC88U(TiIi|#nC8=M9yPzJOgPVZ5q;Qvoj5R5N2otOW
zx6s~RMU1R|6=%S)&i3b|OW29dyoTt|qa|>KKVhXuS-tQV)IuJ>3cSYDE;f=?exvoiZ*w`JC$0ZdhQSsy
zzuOSjZi5cUN?!OaR3Fa8W*YJ^Q^eUwl+dQUs-
zHjjR~IabdY{D5+@J(lr!CNk@IiUezMyRKLe({9?Nsk1c^rM%@LW<+1uV$SB^DX9}w
zQoW!yQ4jrY)kES^?C_@TP;@lUwtlaSXa%5~;4@@nehDBG{?6tH!gGV=sHHYj`I}qc
z9$TJd74T6-tz$34%%_0@6MgJgiUr%?sM0umIW6MID4L4iIVC>_1b%Vd-)Cv$y%}hZ
zkbwMM3Il5IYX5^h&+l(eI#(%!{+F#le}=t5D?qc_cJ3%f?JPWcoUiW?^6XJ$(79|0
zb=|905_vUc2Yhp8A*4B3cjh2Tim%90&2a`~3oI(CT?w?TyQg)qzxP)Tv;G9#q&5y3
zc}@HZ!hk@@mf$(+&8wp^=?qU*^xthHW`N%$&oc?CRtss0^hls$tzV|yBy`=C3gbAUnJKeXKmvQ?H3Vv4
zXxQe3A1IoV1fp6HAM@D33lfja5(0_fSH8{6^J~Sl0Yn0A93e-8k9x^{h8mvu;RjXN
z*XDx6hR9}!3{SQbP*OD#0Au
z2?2xbJe0~4gESh-vNFx{doaM*_3rvTD=yXyIOW!TnaZHn>ubSWq;*Yb8}ed`d2a!u
z`ElS3uca|?r)WBY-lSj_<9e9z!!#EEh#F7L?ZwaW{OFym6l0jnEX=2J83{ol#a!4^
zXg;oUZ5}^O?$8O(mucJckOB#EX{u50k;Aag63ysWDhD2{HdNP?vK$^377Fr@<
z9z}XApj)N%n1=5VL(QpB?WYjgPr+{gcR{hOk>5E>qwvTlKH6~{t!uNaFv&5Y{FFH(DsPvf*jJ~ed1Vv!a1EO#
zX0mYC=aa6K1$_7fS+XU4h|as>)2whxIG!SB56#ZJs0ez&o7_s|>orJW2D#|DLw|Pq
zH0)0Z3s(iKS0V%mC{Am1>6RmW-_ky_^b)9YPe7n3yx{4Qq1Za1`X5
z0l)(qK#UuEp30&Jo_`>#6HEEzUF&1P4}P%R7#MD)i-1~47X=K|*=eI5a5x1o$pz^X!~yqopl<*0b946zY78CHo*d)2%TPKyna}DRT@n$*p58$Ulk9oh4t7f=bkkzZ)u
z5cm+a03xq8PB16ngOSCbU=05bk3pt0jws(yI07#NU*Guv-{C1RIj2_!$jot}8LIW8
zqr~r!+66C
zGxVf$EHL1j03h1|*#)4{i5b+_gE@qu>{!AoU&)4#GR>+8(+e0a9-ae2FfWAZiR6`V
zI0qtRg;wt@XB1j=xSO*378ePG7Er13M5SocYnD%%0u;8CFsqjmQc5;MS8V#h!(pagxr=c6KFnb-fj
zHalF~MtTLotRiGccZi8+s`TybhO6!6AlGVBv9S8C8gwL!=N@J?cwH$CNb5g%8;iik
zrDYOx{Y`sj|7~@ea1&H}LinTuG+gxgM|qCi)~KD&kVB3F3xby{BcYh1p1LeaNsFaY
zSdC?1@D`Oa+B}b%;D$S>^{MuRgoVZ62C(d%WEN0AR%h0nJi#i2?IBudhEy0_Wm2Sv
zJkbF)ljMy@wW}Ys?*I-rtPfHnEP^nElek=j+01+Ydebi0BLM@!cTo>Lq
zqg3d+oF<|FfDeeCPm>ei&uDmheW^Y5EeFAYx5RtJ&Vqj(>g`y#k~Z{9XP?3#FflY>
zWx=f>fOr{_&^ev4A!=CIc3yb=W<%<&m^gCgZsb5-1}fax@8Pa?2kR9)gaZ=B2n#Nl
zlwl3k)x9s+t=L%na5+qTbX>w_bpqjWHfrlUviwUbM3->XO%yS_@;aHWw8*}@M!g9_
zyT_Vcun5OB>Pu^yOGRJ5*ls3nM0<-x+W?TA~ENH^@QU3c|x$EwsdyPTOkn56z
zgcXV1thlf-=uxj9l)u1y1ZD!Th9hQzr7F+)NLco-WDU28^L20n5d3C7Bx*A{5c1kT
z^!Be8jBi;A0#+V8ANviKe3RQ1MlC28(S(P!Ti7;{!<6P{3KZksmw~$JL2o~A3qhV;
zi-iTxVYDg({TX9dW50TcA?0~P@Qpdxhc1gF8K2<<
zT=Cr+@f@FFwu7c73i?kIp)NkQ%n6@Ty8<5s
zYYmo{k5PxiOofi^rSR5;ettn{Q12gYT`Dn~4@4Gkmj-{>!0u>p8w@$=13Hx>ccO4F
zk4^N3zpnZ9B6ZifH-rl-jDW??#Gl^r@HF_mWu(a7m;losNmiGG>y8GA#FPSJ>G(0B
zIYM$+-txMRHxOH7ZO!&STZT@vcKF=Z$U?Lkg2hZwi*zqxh6k&gHL48U1cPssirREZY{K`;&$50TVa>m$Z~ci;tu0YG-$1=HQ6T%Y-gabNEu7
z->I5tlPTqE5MFVFKe|Mt=qG&vx$aOKc$D(qJ8r&E(DWmr;SDvDxLq{j&79Fvt5?g>
zLrP~HA(*2C)Zf3)$qa4-hnL=*9ext6zKhs6?PbPd@wP}hDz&mnXTwx+ErQ(q0cMD2
z3#PHk-PKuW<-(mCRqbkd4qsxM9<63Z9==71*h`BurXOS-VbAX#NpJ+t!TDr7I68@-
zOPZ&6z`0WZ9X`=NHRxcb|1UsfVXU+7z&Vp`rRBis2wo}AD|h~$l9)k>t>?J`(U^8^
zdb%SH;^O-J_X7KGINbbDX>-|aAktPJP-k^Qp81zQ0U2E*wt8|1K;*EFY!|CG^a*vDeq5a#QnmM}Q9y7Ra_HKZzMa
zG9#r)gfACBlgtmxNWiP3G8pcYpgZ6A;5W!N^mG>v4;8T)2FFIdd>m#mM29ds)rHSV
zVaz#+$s(|H3spakH+u2jU!NPS(r`Y+FcZ81fJ!e0mjH1G$jHkwr~%+w+<7R;ax9tG
zqtGf`;1?uOzGD>(W9V*!UDcSEOJlSd)QCPaUbP}~gi{d1$Ri*61mR2*%0(F7;Q?du
zzO>Kr-A6*;SyX!zlqD806*Z1d)Y7~jx(i-7U{2nee*!;Sk9mUNAA{YGZ;=8a7|%(U
z@&L^d0lQ7J=ENL1CRlSEEZUzZdEiF#B8>lw+`?$dQ@7
z?XahX!0ht~Jc}i-eQc{s_ztj2CsFlSSGT}W$_j@`9Ct8
z-8haWEws#h+$c#8rxTn$xLhskfVU3ZF~WMu#*o%hNx6*94Xc94yV{M;>I7+j?SG^l
zCp1#m!GqFKlpLiz!39FGrc-YF3KS@=TGlXz>g&y;6Fsn*2evXT<|9*~5R1jPVDZ!T
z!3wHINO_&EsXj9<5m;g$9tm9OnIyp*vMTInZiqc53{vle0%{H>?Ec-
zp#Bb7R}`|UuYHjFUh(jRYoejw)C3kh+WI01$uk^X338Qyga+`g2=>aaL~}uwfMHyP
zY(;5fD+)-A9@4P}cJFH`Iy)OC5%A|>4B`?qZq+xfucoPFD-1>|JfJ~6(aKQruRm@P
zs=Wj5khNsaUK{AKF82y=nRLG%6VHa$PXNaIn=)OL$|YkjE*1DJv=DO#{PNfUDgtn<
zA0YWnLpStb$P?&%pDsJjc=-SQMPsuNZp>QZ71ZXk85{yipZ1vrd>*(!O!<+@x1oU&
z!DRmCTV~Zu7E_0w#G;Km&6HBJ=R4c)lMSxH$lj_EIjO?~E(aWzEe!tfxD2e9_cdT9
z3oRWGc1{bpe)yWA>F>c}QKYL?{sJf6X{3ALjuR`COt4W?h~8`EU3P&AknRL!ZyZ!#
zec2ow!LlDQ*b(>roabhDn_#nZ{b;XvsI7j$x3{{!UJ0*}%?9Nz0
z5ZA=rz%*!|49Pi7v=1M6a1Uw93u&YUK8Y-Xi07t_>S<=;UY4^6rVi&py+BTPcog4d
zle`OA4k`_9l$?NhZGt;35(3zfdI_0Fvleg{8h4i=eg7&3Srg&?`zLMSV_HaBv7<$3
zs93xaEneCd;ylSZ@`bN>=z0`+>Ag6`nL*V0vi;q;Id&+q0{Z$iUCKq{{6dD5XcoDx
zgKjPT^>EeZ<05T~xEY6UM_Wr97n$d`HUkOW0af&xB97q;5DQUZSZc)UvRq$%C>C(p
zA){1u8oluw^N7h`-bwFlbCYbe(@q9%h|Uz!?Q(pXW|
z>Z7Z|L78GUHv;(w-nbN@#pj34h>3_;kMnF_*2lX0otr^^6oWr#&wRnC_8f>
z?~(Q)lc`Eb#MH{Tjxy0-enY42vV0@LGbrSgQOeXM9TjK~6+h$b*x94U7?E=ZknSY$
zNz&=TyP+epBj^jkFM-D{G-h+|8}YBD9uM-X&6;|Mh)+YSCHgz@I3)~h!O4&S-HO(~
zn6{|c_GFXF&FWLgFX9qGogIgj=_@6I8Oz?)-a1&4#c5N+~^Yrj1|n$XNr%=U4>weaE3QG$oG%D6cXc
z8H~1&Hn0g_Ohj`eAvYkb{(lN1IAS*B>M~3l)lTN{_}UGmskY<&70AVi<;eQ#hi<10
zY15QEIQ&7g|Jx64l`OaoR{?tnmpa?p@a)zZE-2^kVl#b9f5?_5aao@vPIZbhtDvL#
z9{=Mmf^_gGMYL1Z?PZ&>nb)_8k=s4#6iLwdW~&J|IETaU*tU7tifT2SMV%dHsC44z
zs^M3$GIX=R-E+`eUlOzV|xN^x>CLXNTyH#8kA#jtgRjlmpgVAO2~vj1Rblc6u%B
zJbY2pUHjp;fA4A&TQc#!Bg{9W^F0X{wiNAD!cWS2D~i3*gr+2j=x@}i!H7C8Kn_FK
z%tl^6kDYzYQh69^XcpP6n;#@l?I65Vgvw
zs>59b;g|TN^HZOp{}*25%~usmME}hK`2Sz(|F>VNY5n&Ug`I0VE*jSl^2fM+KRk-x
zSf{usYogcjL^g5S{;})#gTHfZcP5J81u9xUT&?_|D>cfrx@{hZcH;WC+0X939-47D
z+zCc-!6XV$dBixYBs3S1|l2@XNq$No^_iK
z(Hv<=TsfWd{}flsT;KNkUlx6(w5zti@$TT)=+%bQD;RD2gI)3W!*rb~>c;iE9O}7D
zMm|bIIIhVQrPao^nw1}H&ipZBW<48fRS=ou{#~@~2gyv4Ca&u+LNvY^dfa)iFKFh!
zr|UQ5i9gG>b!j;-qV4{jpRw`7Wf-1&&BJF|WjQnet_l76cs@=jx!F4}cEfjbm3jE3
z7uUOAJoy)KR(;<%ZKraNgiYOVbzA5#j2BeB$DbluHc(p-@Z=BOYEPAIHaC7p1^$Y6
zIH)cD6EO@?C=gQ3!HQ*J@TT=y?+?8$(;1qV4QZ*E?$pfcO1AymkU-z5{gqCttwztsUyJ=reZ(FA
z+td&c9a%X!^@iZl0DpEbmw%2
zy~{|p+M9%n9A;mbvPXgz^dC7YIb)-*D|(eDnksNu@)nx%>ifu>iskBZhJ~(@ztj(B
zaFkmTm+{yS?A?AM#5SC*RlB%4Ts;^V<$oZ&@XGvEQiXs;SCuZ-|llIEti
zj*l-AQJ?E05w)`-wc0O1esNTzW{^M{DHp?co$(gV_YhTA-Es5E8o^`Z(6(8-~
zKTsC4Zup|WNcdZhcRr0+p`&W=vZfd9%h;iV)U81O@OougX0j5J`6`Rk@6&eoVwPvd
z4m2Zkw61IAu3zLad03J@8R0fF{V>VubK7RUhJCf1Mnws0j2u-O(O>ev+I%V;f3;`*
zulCo3AaFtQzpb#Au3%!B`WC;jo%~_rgV`(A#rvd*p)qlPL48>;cK0?oW>=&7?!aN~
z%HQrMN{#A>`-3bA`mWp1hB}so%`^TL!5s3
z=i~dI-(C3n=2(QkRlSf~in4SJFBY9^*72c7u7;`
zHs$=?PQA|6=Up4{P}wo1|7BY0R_Ql3nNIVeV9m}qIz(x~XkxKr>e*SUUki(4>thf3
zT?S9pyz^@KIFn1dwk19~Z`!s+8Zd{rqNtQcGgdh;2UOGKeLhrYnhxDmFUW!b(_)G-b$tor-{sx#!+7l
z$M7RxFco?b2&E+H6}-VXCWDXx_`3&7Ne5s@JXEJtW&5WE=ch1FpYD(%o
zcY}EQ%)92Y^jNAcjYR*hN#;vmEv_Kl?{-sR+
z2BB~3REwEcVs=2}J7bNA*H-2C%{SOk<8BGLTV}eKX6tqDt}o=bcG~WR{r0fTGd4Y!
ze}TIcZ%IgMa)++Yt9k`Y&_o@yS9*VT7we5+(a`M@Ht$Ex
zW6u~~8D^7)0g(W`u?~gi=1&wC$tlt?+
zSB`mB|Fv9wulv*h|IPW1WQ!YlT2?C?q{O%CJ}^oag@wCia{o&fV8x)3$`bA8s)^i@)P3rtFdcMhI
zRc|d8-y69zyXx4Ds^(^^dRrCNnVsQDHl(okYv!Im)4M6-+B2q4d{AP#WyIcF+6X
zi)W~ykvUV4@A1@E<6idjZ=U%}78e~a*M~hR50mkKuJ{t-NvB$4--)!Vxc)YM#m1SzDxF>b0oJ
ziHR~bmGELT=8rXrNanv5_n|YKz;GoYzV+Jnr)|Nh!ttK$*R10lmB
z*FI6Lx|q(aD~Gttg|zVNSM(oo`_8U(Afq5Tfw^2YhD{t|VGYsBd7dIE`jk6ieg5h4
zp8j&o@3B(?ydQ6He@qTL7CiNcNm^%oIO3K-!O@$$%}iUJlxFC%YYqy(2Obd~%n!2g
z_-)j&GF`p!AhhweBKNv25qYs5m&ZF}c4{ill}hKq9Sv4quZx9$N42(nBuz+rOf!>I
znbe1*iYB@!Q_GV$>Suiqd_U&;ovJ%tt5)Xz_f_vHm5r=yPNmUdW~{;uXVai$`dW;vs;8ZRxw(~PX*iySa&89XZe~#
zkT9hBc3YBSjvd$VtavWGPU74Em2PR6u=-lu_vkuCP5ndos>^t*wCVIS_lo!m?f^C16_LRc4$EvXw2fsTN6BK^luaE4j5AUmQNefC`
z4_W6ny6>293$r%TwM|Du8NuhJZu#`_58myy&A{2v;Xk40FBe+Oim;tHr`f{DG-4BL
zZ};pBZcf7YakS}c6_*Y3#7MFAM!Ri=rsUMx6@uS_OQyN!+}kVR!Cvl@Ys&ZT((j0~
zMhsymr{rFlX_8B7-@0>_VRkP?gyP3uEJpvy9o4Z#E}<}{u;II2gNdxGHBQp*m+w!#
zGP3!`w3tlVPd>@`HK&cOe>CFfTYRw|`>@0pFeY-w$haJPK4Y0
z>eQcI3lU>{@)!TN4gHgj#>W1LUVJ3+=9GuhFAT9)
zUxV8TOUY8n73+mtKdycY9plzRa)YWO-M7H|TrE-~3)IvB?E!d7+8j-VAiMXHukU=+2cB^H7|2-l<*?yeBkGM-%6d<*?NdBqYDjLmQ
zk{DnM-0U4nwGLL(>1h)0P%{}x5PUnQcvq#SclZ2EncB+?4+Ys@bh0}}Z)Rr_b=8`y
zC=;3`7itW}|KxDoYM{B=X!7UtW0>f5L)6tPpW_BeLP>i8naDZK;Jp^L-Ep3^H@P#?
z+$~MC^ppu*O!i7Ic<$b1Q_6ooE&k^6u-4M4(ZV>(u!Aj{Okabuu$g;o^^lNX|Alj%
z`|R-Jp_{f&%>z
zTQaY$dz5_>{7iId%rZt)5B)A?Nt{gd3L_o_$eHm5ZSeD@^epdE<
z@2jgYgY5;$Kf}DXbh3>?#h94Z{rEBW1U-?5BY1}=Ya8p~jre`NX=Wltrf?UpHw7!M
zeq(Q`X{cG#OSScd_hm>-W{AJ)JmJbH+EZTnCSFomU)^|<##dE!h^pn1y}0<#MY0j4
zyqFomT)foN*}7x@q~b*05MIeQk<$7sK@I7j>Ik6bgnh%72TSVVc6yPq|u-NBOg#
zV{dfLUwzC*&(KIqNhO`3u%sE?$K!QdK=`tA?GmY0E@{70oHlS!_Iy-3aoV!A3T}Y9
z>t&i~IhbejWZP0x`U3ujNOcQc3i$9Vf|8wBg-k_gVtD^s38C~w+-a(HSXb(p^l1dy
zNUD?ylRZ$+AwKc`OgI)ZfZa}greU?W7WRR>>H$qubZmIYBFa5-rQOED!n-7knM%5r
z>JevubgRx8#bCpkUo!Yy!mrEEB5U{YSDlJZ@}L)(B~&yR@u!0~G7L2(c256MwLV#I
z9nrJrb9eQEf(=C$E$VW_oaxo6OXv7p(+ey^+Rjl7{0OiPX)%aO(zch(f3O}V`!IR#
z>|Xfa&&c*{sZ1)4XM7~Zoh^5qV*XF*gRz%Szw^}Yym`OzXNB?+=MtmjlG){9Z5_1#
zOP=Dm%C8bSwRCfGno$7@`M1B(-Wah9Rx1q3V+j^|#{QaHyK;2f_qmU^`&W)V<-Lu*
zqip2w1H#FamGNf}fPqs}dfQ*$Vo9G4@wN3G38r>{=NrwYknKUJitU^a%~~Zk{&8N(
z1c^Y#F_plyEWsy|-}BI_oR^u|sV(j0j9A(HyS=_nynQ8*GnB0TzAz?CS6aVF;DCT9`o^%WN)IXaykobN?IT6cb59C@gqI+zmo5{1`}cP@~Bv^#QveS`4x3ZL_1i7
z@vqdt|H0i`MpgOs?V^HoE*2@>jihugKw6Mg>5!6;k_C%WKtQ@11VOr_MCtAj5Tudr
z?mCnI`|Ni=d%t_^v-kOUhJ11`823Hr@1Ak}u892!rN$*5|4f$kQV(l!IYh@bcm&Dk
zd+9$1=)-bfLsO$Je=&PY9@yV6G-@$QTRy`V-%lHUE2v|6xYJ}Mx^>onA@#kv*7`A=
za3;E1=~0d^uh?$U2aX!m=*xG@zEee*gxt(R6!kT}Grml?DD}BKJjiqU$pO80S$2%2RFBPA^55$`Z#&GFWyk>!)L(U`ZJ7>$a?@K7zspf{+rI8w`|6bG=&-PX(>N4$0Nn>-cQW8*&$N?K!Hut-6x`7H}`Bp}ZUqYW(p2uo3-z7Hj
z^W&d_d&5I*VH2s7Ayw66Z!@U!nx7W66y)GjeigPgzqV(@(~i)Tao5YxG>OZN9-IIq
zvut!+k@culyftfb@kN4H7?HN4<;L4I8S@Ndf=QgRvNDCEy!W=(yxz70UyVUgE($S9>YL~xtZPbT(dO};N!FGtg4<&Bg
zo}FUP!Wv;I>6`iTE3)dhF**n1;ESf`q*eZUyv!~QsT=sWjf0U?rR@hCrc*B*>)2P<
zi2kav@-*_p=x?79cpM~SInCu{>#7FefK^VPVnBQtZ7X_R91j*sV7OF9XS*^RugCmm
zk3(BS-ID@A2v%4u7GNYo(^SF{@6o
zF=o*4i+8r$mg-$@Xso;Iyh>uBvN7fP6QSfzI2UnSe!oc*q&ozkGQFdVzF!$9Jlh-F
z!OD1*jB=X*GcyqkS)~Y&6fd=O8R7YIb<2Q{CSKcE=t)c6J)DNB5f-(c?mtw=U_~uO
zsO+Bx4WIWgMwm#0YkGYW-s|^P=C5^Jb9VxU#4ik0S+UTPm=(+nL=F~u&>ZVm#N3P0
z0Oajo_%yghgxqq2)D`}0CB27X$VX5O|GJzK-Bkf72vlZg`3j3#`U+ENdBadvu
z>djF3=NH9`hY-uf^%~#a?k-7HK@^h%^A)B~mkBA2Tkg8`5V4OWW
z^|G4Hlh#R1hvjXEx5hv+P`E#{uqW_`-ZWZjjH5Ih!LqU$OW6t)eSRV$se@C)({FhC
z&dFc?YmuaR=}K5C1ip+#H)g4lO7wEIq-~kktF1Ss!MIKyZ+mb>BhA~c~hp;c;_hU(dUSx0CW=ZEY(0yIN9kN
z4L#+;qd*1oy|UGH%yOX=p#;3~qIGXFIs)pCSMgnN0Pj^PEMwbdqsl`eDOh{-IUB-d
z0Wt1zYvYL&LRWWoxc0PJJ(@X%zUlmsTE^Fgle7Z!uqZOcFxy<1
zeqJG_S&qQ_=5UhYDC_zlCvgltNJIL-a?{fkhAHC@x}}cEyuL_M!%1!c8@(BP%gO7x2hm9!^64zNT{Vg)nmpsG>UV
zdIjoIJxlK|G}H%wv>7Sutdm_Y{;F`sWR5++S1K;6t|Nd(^Z&w$SqwQeJ8AA2PrvCa
zbY3v&q{c6hDtyl!lO~p0a7_jltwjP4_%Kgs_#Znfm(@9$xz-EzesW;hvD6?%cOXgp
z1A`8N{}&GVp73%0ap5~DNvCft+mCu?pHl{N@!f-wI47o3d`dyJdaE)*HD5k|f5_f3
zq`BBWFn6O-Ms6GJFMS2}Te&1ASV(1&D;7qPKsLH~yzv^<=hPl0Wh7L|qmAh^dXu%2RLraU
zS07%iai*EJ3u~{=3gx?prTLu?FxZjXX!OssU6K_?1)I_AyUOZkeHx`32(C004|Iwr
zkd_7GsrkTMS?)^`kA0=1wpQuqcAqMZKsmdkWfm=0Yk_w0oJkpK**!~V^w#Oa_N}rF
zB`6>e?aE@Ly!q+=sUjrg8xM>grH#SX>;W4Da$`pR=|YE0R9PV?Dck9NfSfshbmN*I
zmVM)s*UFU?H=laYgM*Ub(x1q%LU{Mvx6uDVUjyW>*R73v5x1x72ir0P`{8OeObbfw
z2dIJV!Vj1d%Aymo`1`FPWwd4&Z-!J;*t_}tPu~@-eo*$d24jcdmu^Lj?w7~^w34FA
z_BRkk7;lxJTiJ-d8^yO+(XN&ZfQwMbWzX~?a3AUl_+|~EG
z26a_!e#emK=Rd(HD;58%+#$QyzBoy8LH8!yQ~wM#!-s6RYkUSHgv^d16SviLVW9FE
z?u!BcAQ5VD{Kh}^YN6HhMPR>fTd5cQ+|@4uEfJ%r=iAf3FBG1igr!D9s1{qkilE;z
z>t$Wwl|9?*(Fl;^kB#s??>qQ@Fic>Rjk4Ugc`0RFosGBFDtmn%ujk_qL0%YGvV$)?
zWs((G))t`K8gx6CNHo_Q
zyzMf<%r=}ooUV~oi<8cgiQo~?mI@q@XPV|TM7&9jjIvS{Y`GHs$!_t!Cw54?^hW}_
zUOJ{ZOwteoI{5MAh%Fkei}!CN0rY0JvR@9rzSgAe&CE;b-h2c-v;HI?Re(ygA}F+P
z5zYsw`jiUE2LV_VfwQBy9}w}DLpGv~$(!Vt^9516z^-V{fQ$QUBAvPp1p7^rOO$MaSoxX6F?v9^uV>gLek
zu|zpxHyehLf_Aj9oX=;GE}5r_0_K!^W-f!sPb_1Imnw16jYh6sO)kDH@XHzMCP`Id#7STv_{CAa^scTQGKyO$n{^`e(}!o1_#(2Z??S)ikztR|3$45Tn1i~
zG7k|JUP&0|X3r#V;3<8DDQU>yWsoaA2uH(j#qISUX8hoe6+*UcHtcV}*B$9!uYn(t
znJ+6<&Q*9NQ!LvijhW$?^!_Qe%+fg0P)n(C|0yN@7Vzyi=;JMq;Hdf&Fk~i%V{!2I
zqvR6Oo&=L(GLH#k##3Zl0e-^B;YT>MbcJ+w==`T-FX)Z&Q<~UEF0sq*K>1FhIy)>P
zJ-{8uo!7{IEROR$ne19j?Wd+2q-Ts*-&bjcTKP=Vor)Sya-KDM6t129y?IpmT7HC5
zV0&$h!v6$u9W;FC>DX8#CB*g8Ku=FK8wH8~%3j#4`LZVEq_YjRUT2Yb*!dbIvYX;l
zRPcL#w8Nwj1^7{3q*SrXG3|mEfMgDnu&fm5-EmR~ekJPRo<7e(>L4HHSu;jE>iI!!3aFmNM$7&a
zKK0TW7MUPPB(JF}wtN4$9**|pBVl7x@~pMzUHOlFv;F5C(26HU6PuMBtF
zyyP#Qq^*_WY#hhVPNq*iO4T>nJ7UN1hgW{SR{=eHI?c_M+
z{rzl$V4DD`6LDQM0nz~$&26nXt`*dW4V>B@m}=_+&h;jzzXbw5m@WiUYC@jSVIip3+7&zpK`z{|(A?5u0`K{g$7h}r`vr+cj
zePJsxZH$k|nG>Q)A7&^uY#Aqq{W6B%p!1@|r-etY_ouEK5Wl-bFvW0A*Qx04&E4JfOCtcUoRx%qhSF{c~5WOtPi1
z$F6xPGY%vS;rLiLS*=gxUIcqA{Sk(La*W2$n09^q5
z3v55H<)pu-%dP#xm{6&eMGAfILIOv#865S?qmCJKFMO#slpxx~j~~)abMzjoL>@!n
z^fftJ$v&(_HfdBJ=b_GbsYo4qX0(*ArhXsaLoEZNyQkpcw8d*#zjFT)K`S!r^lJ(z
zwT{Ny{vcKA-(V3`)?sxZN}Rkihk0?nMPplaUaCo0)LjxPSa}B|CzXt&Y_0OSoe}uUD;}qVp3*5H3uzc^0&ZpT6@?Hm5o->@J>a>
zr`+xX|2Khx&pkF4kUF&GLCs8(qC5<^A{)W?Hx1DBKAdc0uU+NPYi*3r59WC-5NnRJ
zr*@7vhp1HxitHZE^xE|>;hkqte^lB~X~y2@&%)r7is2;KguW)2KE4OVm#~Xg1E@is
zhwzsuvh!M^!l>XTL-;${wCTyB)Y5>>26wFjqU%Ujq#@Nfcv;YWMKilj>bJXiDvJin
z%_JUjl-a;PgxikD5tHeEra9#G@c(OI&8$bUYu}|%Pr`6V*{JXX2ZLjuy*=O+EgeCu
z$kr*f#s|N{PkJ4dVQ?gZibegUvH%SZUg%%sRNwn1!RXO>IoJ8DgU*~w5+&E?_9vC<
zE;vVliNp20sPE87ME;){))MKMAp!bjBQt%m_i@}J+v!qx%
zeLu%wwz5vce&_P>So8_@Z~nFSylmnpr7JJaDFPMUH8As&44R#g585lD$=4E>x^!r6
z$BnX_s3-`VleXI@ahG_Pi*UwxI+dK;!ZRRFYo19Kvp&c97AlF)cpmj9yafoT*Jy&D
z8f_13i^u~qv>r}e+d@)QsD^NcX?GW8IO09Y{LfL#Vl>Dt
zCwZg9To$t}o+Um-9JOrm)}T3d?-cG$lK2bu2LyS}OrFy8x?_~wz6^xSeih%xvgprm
zI77CNG>DrX`vG~dSREWi#n1Y*G(9seQHGiL)17Z)J#g31ukZ0o8+l>$zeG4U
z$zE9W71iVM=VRh8$D9n#ziC&a>=vo}F!WG+aJ-TIjNOiESG}Bfe@w6m_qcM>FH7`E
zS0DQ|Trn1HQdNlX6lafl*{N;$C8(^$td2|k@#*BWFg^D#3@8vbKP6%!Yttk3d-TcE
z!AA^laU+V3sTGZJ7rkR;+u-q5JkoI%kJ=x^sn;i?XfaUFAS6UHU;qWKMjy1{R6g0
z(%JWQ%lEk&Ny!n=HVNFTYS-^yg#Z8QMOX?Yv!R=hSHhF7YNoX>gQv}iyfM%*;4u0o
z%3pNL+IMf$quVepUfn$x8B(?wDs&M}F{hv-@F0k$0bdsH{+e;*CbA*x-wy^+S%S-L
z^2$+C+CFkbkIj2OE?`WVX!R!#|L@r0@_*jtKMSIKAu^grx*U2h#i2#=y+1yHQksts
zEq$%8%^P~(6VD=uf2m(!J8BoLbYH67|CgNeY)hopYRD$ekhE$9aZk)kpUpo^+jB9`
z9}45J*n+!Wp4$H%qdYVP^+u<9)u-?kKPrAPBbD?18Je0Taz
zneXF1Kv7z`@?}&G`ntGFQUTk>O@E+=M!U=PIs!lM&Rtv8+Br}SNRc3Wv}%D<9F*B
zzRXKr>CYox?)`CDnW(hN9hYD-*VZ{R8oCg<;i~PH5UCU+4LrFivl{HR)Dgfc$!5ok
z2ehQjCDh7@p{J&=OnfE^92;Nu78d^H_PGuGAt}_`y&BH&I2>gsRFQEyk>^Ive0FE3
zi7Fm2of4-tb7OwGScJM69D%Avdg9l#;M$0GwAD3TUPa695MAO^|j>><;lAK_)Y0{*Zp9G21pR5kNyj%F{QSQUm|z9B$U7Y^p$%r90`_
zE&GC%IEs7bMf~mUOp<8~y^Ut3+ryIB&5lyh`TQX?BWz3YqwfOjMq_UZpuCs3zhSJ6
zWLGBq9;M3inQpfiXU0mmURIeamMvU8u}T|hA{Jp&jc->~EuGTJw&F1KebyE0%JvPN
z&(HWj(5tpoZMqXj1yW_;T8>Bkep&CGw=3H??HGF%N^)u^W;2U_xULX;yq120P&PVg
zaL2Z!+Ouyx@fe;7{PpIPk}aO$XYtmEQpV-6%fMV7YJJ
zPHxZ3oW1~RYagHyD&Lp|f^3A$Fc-(90JbsrauTYEcY8TUJW>}uL=CfzGTw|=B^!Td
zg6uhHCtMBI$ofSw-R8V?lv9z|tJOT>e_4+AQRNH-C>?H0(JbF6P{(z#$Kk37-HS7j
zH2!CC#-mLj>riqA${*pR
zg+dN)XMc0$yvzDM<6Ri;fY*;$D*CpCYzPmGfeGkez2Q!aYhMf2_IT&b=&;Hdz{
zCmQyV@$$oM;pS9upNlFJw2yu{BuVGUrOzZ5L48#0aZuK@a$e_h>GD;efa#CgT`ugq
zp2xH7DTG(;yV$S2S4F=SM`Jo(gX_P!1iiezS7q6
z=P^w7ul`7sCw|+@@;E?u(MYvZ;aOX9q{0fh&42rkE<4JP<6n4$JL(4tActwkz8Bnx
zjA+=0YAIeH4G=heWMn}3{`nAW?i0qEOuk0RCo{3gNJ9b`#*ozoy4cIrid
zF9)kMu19;-g%QF%QTkXmj>Up?0A4t*xj44pd8HS~bC+M3pV*!7q|+e{_Sf&@$#VRU
zYVmo7u(vHWJBMQ|nQs>HO`G66$D~hp{wOxQKQj$^-&S2a%nkNU%(|QZK;pd{Fxv-A
zWGU0TFiyP9h@mZ60I&OIIV5?ZNc`Qi%3wodB*5N#D)DakX}0%8nLFm$V=U-Sjss9&u;7RVZygJr)Y@VI~&VW;i*-;N|@kvMX;+wXkjddRLUOy&J
zWF?nnHTnUF>vv0OIhr3FD~exQoa{CnW*k0kKjf=(?pAqH*h|$*_)Yn}*JXT4uJ5qVtI#(UH4bvS>cM04=b_r_iEK7T&k8Nd0vSuKqH`OBgH8$_}0fji%
z;b`^Jg%JKIg|su1tHNh>Bsd$mnycK6uiTuHdpmfmNk~V0
zxSW92i@5$u;TU(rZrE{MB2^R;F@Macu=Oq143Jpx<_nu%t2RoK#v~s$*wnv$M
z)qM5+_j`p)cf@I5G_Zgz{-#v!cFXK%65EXBVP+a@Qfteas;#sVdNN`1vuXOLy5XG)
zdjm|+ChKq>FTF|lsdZ(gZ+yJH{!~FjlcC`#-+cuV{vTE#M#z4zzcqSR@wE373yr1A
z)pA~=gSYhtn9kN7+K(jykh{i_LjneOkV}?y71O?3riLy5
z?s~-$6^{aDBc~;|J$SnOovSD2$6(fFS9`IICtK?$PTsZ=p85!rR$1=&snQDsq1iSz
zjq2m446UXw^F^QsklT8!Ky*hdf0ivPb#bVy^+z~-e%&c0)Wxt~Ven~ye(Dx-%hz#q
z!q>YAndZfx=Jjz`Og}-qVi^cHwsiyXu#SDyf@Wzbcr_fpduXJ$z*L2W&@Fc3u3jUpIF8`EqRG
zx+~c`N8<)MUdxsuC!Hoo@vAs<3%hoatUi0y4uCIs>K-w8av(V@nk46WT503y+){^o
zd3d2^k4nAy)gDkKH^Y3oVl7)ayEru6y>2b^)iZgLM*v)%*=pdSJ!cXe_`NHMGGvn4
z%q7}Kw#p9yIGIzSG<{1&|8PF||BE^>3+M{d81Gf0jAZ}FEfHkTBAg8(NtOSX)AgrEz##R@M~{yaloo|3rF7
z06oymTyEZNFweTdqhG0I;$;<>A>2NavMbsKY^-*p05)U#{WYg3Vkuo)wj2k7hQXauhl&4a$%(ArAq|
z6Q9svNY5E*c=~Vc)9k2NOefy=9mJ1o-iVzmr@!*dUgUrB($66K{AzCBR*&s%*Uqcr
zp<`!mF25|z^CKOO!j(s_OAg^%f-W_u!tL`+g}=U3UoMy@U$%L2!5Utb)f_tX6-k6X
zXY3}b_yjr%T3+(e#9BSGnSOwp%DXsR=3~f~z!eHTZ#u)Ug^18i))r|SJ!3H@9e09Qpj^O30u`TTOumon^8$90(dP~1s4WX@7aGZ$hdt)_Ti#+r74e(K
zh08rB&TGD6I)i^!dSi(OUb=R|8|UKLz8u#zMObK_1#t^G=w|eDRP8+FC4#^HvZ7_W
zgov*dL79u_Rr&cgLG|0lFu?}jjS3f;SX+#7(qUkIgID~Vg5Hxqy84E1;C821mKk}U
z6SWlE8L@Xko1bmZ8*bZaRe2BOJN_4OGHAk_i6}h=DtJYndx-5JgZ4DQG6y8o>D&}0qRY@fS2zTGB*HUPfX9{`3pN>uj4eT*UCH`6w+aa>kUhCwd59$n{
z(qMff(lSz1ie-%E^8V2Up*!H2H^#If*6E%-9OPb?g}o@wt;eT3TuhlQpw1w8hH1JN
zq$k=1e}K=dkbNYM+#m$|`}CwaoSp^pxaMSsfI;rtbXb`MpnXkUS!I}o@3&rcHRicr
zM6idMmtvAMyAV-_59yUKn!gf^k6jy5NhocohMW6je&vP`RC-7YF5QD1^GkI=ZjYYn-`d9BF={~29q3kcqzGqT(t*U$V#|xlZDDgvT{AM{y
z<|g7J`w}%BlB(0i0Y}h>K^u`ym?6)^(72~@YG<-*T|U`8l?Hs%GPcw?!#}P9s%bfU
zW3qaU3Qyi$@1*=*3FWR{*yDtqEL;)34ocB(%?an%E3VMnSC%V%Dp3AVR`qW`;2x0N
zR8V$tPWc9ZR)0oUF-76H330Z%E0<(u`-DrzerIs2H(f6lFTEwMP)w-DPZ{<-S=U3Z
zjag)rBZyT?xsAxazOd9T;FXu4Sq>&!P>@XowYS@1iK!ZevelD1Q*t}~
z2K?MSD%%}IDg$WRRrNQXgap!^e
zn5ACEL<0L7Wqoa09sb10dX7B#kg`+#dyIe}nZ1ueFX1u5fpWp}_r3g*Z^|AOpDzfZ
zi-ww<`u+55t8n=^Ql+6E)dLD1uYXU7TE(CF9}%tCjJ*nEY2#lbU176s{pL>$P0v8A
zwv4-IbWt2ZaQ2!F3x_{7SyJ~Oq*qR2i5o3ke9)+~0kD|4*0K#}nYiEZK2Kzjr+|2O
zg0@j`_ufY|b@pXUxo0v~*nncR9{o7b(QO;Z2W^MAwueB^28)girpS{aw+i1UFD?yu
zSQYDtbNGiPKpJl8T~?na%FeAWo-bus@F5J5Uq?bgE6VsBwtYZQX2aXMO