@@ -134,26 +134,26 @@ def load_parameters():
134
134
RNN_TYPE = 'LSTM' # RNN unit type ('LSTM' and 'GRU' supported)
135
135
INIT_FUNCTION = 'glorot_uniform' # Initialization function for matrices (see keras/initializations.py)
136
136
137
- SOURCE_TEXT_EMBEDDING_SIZE = 128 # Source language word embedding size.
137
+ SOURCE_TEXT_EMBEDDING_SIZE = 64 # Source language word embedding size.
138
138
SRC_PRETRAINED_VECTORS = None # Path to pretrained vectors (e.g.: DATA_ROOT_PATH + '/DATA/word2vec.%s.npy' % SRC_LAN)
139
139
# Set to None if you don't want to use pretrained vectors.
140
140
# When using pretrained word embeddings. this parameter must match with the word embeddings size
141
141
SRC_PRETRAINED_VECTORS_TRAINABLE = True # Finetune or not the target word embedding vectors.
142
142
143
- TARGET_TEXT_EMBEDDING_SIZE = 128 # Source language word embedding size.
143
+ TARGET_TEXT_EMBEDDING_SIZE = 64 # Source language word embedding size.
144
144
TRG_PRETRAINED_VECTORS = None # Path to pretrained vectors. (e.g. DATA_ROOT_PATH + '/DATA/word2vec.%s.npy' % TRG_LAN)
145
145
# Set to None if you don't want to use pretrained vectors.
146
146
# When using pretrained word embeddings, the size of the pretrained word embeddings must match with the word embeddings size.
147
147
TRG_PRETRAINED_VECTORS_TRAINABLE = True # Finetune or not the target word embedding vectors.
148
148
149
149
# Encoder configuration
150
- ENCODER_HIDDEN_SIZE = 128 # For models with RNN encoder
150
+ ENCODER_HIDDEN_SIZE = 64 # For models with RNN encoder
151
151
BIDIRECTIONAL_ENCODER = True # Use bidirectional encoder
152
152
N_LAYERS_ENCODER = 1 # Stack this number of encoding layers
153
153
BIDIRECTIONAL_DEEP_ENCODER = True # Use bidirectional encoder in all encoding layers
154
154
155
155
# Decoder configuration
156
- DECODER_HIDDEN_SIZE = 128 # For models with RNN decoder
156
+ DECODER_HIDDEN_SIZE = 64 # For models with RNN decoder
157
157
N_LAYERS_DECODER = 1 # Stack this number of decoding layers.
158
158
ADDITIONAL_OUTPUT_MERGE_MODE = 'sum' # Merge mode for the skip-connections
159
159
ATTENTION_SIZE = DECODER_HIDDEN_SIZE
0 commit comments