forked from tensorflow/tfjs-examples
-
Notifications
You must be signed in to change notification settings - Fork 0
/
model.js
107 lines (89 loc) · 2.92 KB
/
model.js
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
import * as tf from '@tensorflow/tfjs';
// Hyperparameters.
const LEARNING_RATE = .1;
const BATCH_SIZE = 64;
const TRAIN_STEPS = 100;
// Data constants.
const IMAGE_SIZE = 28;
const LABELS_SIZE = 10;
const optimizer = tf.train.sgd(LEARNING_RATE);
// Variables that we want to optimize
const conv1OutputDepth = 8;
const conv1Weights =
tf.variable(tf.randomNormal([5, 5, 1, conv1OutputDepth], 0, 0.1));
const conv2InputDepth = conv1OutputDepth;
const conv2OutputDepth = 16;
const conv2Weights = tf.variable(
tf.randomNormal([5, 5, conv2InputDepth, conv2OutputDepth], 0, 0.1));
const fullyConnectedWeights = tf.variable(tf.randomNormal(
[7 * 7 * conv2OutputDepth, LABELS_SIZE], 0,
1 / Math.sqrt(7 * 7 * conv2OutputDepth)));
const fullyConnectedBias = tf.variable(tf.zeros([LABELS_SIZE]));
// Loss function
function loss(labels, ys) {
return tf.losses.softmaxCrossEntropy(labels, ys).mean();
}
// Our actual model
function model(inputXs) {
const xs = inputXs.as4D(-1, IMAGE_SIZE, IMAGE_SIZE, 1);
const strides = 2;
const pad = 0;
// Conv 1
const layer1 = tf.tidy(() => {
return xs.conv2d(conv1Weights, 1, 'same')
.relu()
.maxPool([2, 2], strides, pad);
});
// Conv 2
const layer2 = tf.tidy(() => {
return layer1.conv2d(conv2Weights, 1, 'same')
.relu()
.maxPool([2, 2], strides, pad);
});
// Final layer
return layer2.as2D(-1, fullyConnectedWeights.shape[0])
.matMul(fullyConnectedWeights)
.add(fullyConnectedBias);
}
// Train the model.
export async function train(data, log) {
const returnCost = true;
for (let i = 0; i < TRAIN_STEPS; i++) {
const cost = optimizer.minimize(() => {
const batch = data.nextTrainBatch(BATCH_SIZE);
return loss(batch.labels, model(batch.xs));
}, returnCost);
log(cost.dataSync(), i);
await tf.nextFrame();
}
}
// Predict the digit number from a batch of input images.
export function predict(x) {
const pred = tf.tidy(() => {
const axis = 1;
return model(x).argMax(axis);
});
return Array.from(pred.dataSync());
}
// Given a logits or label vector, return the class indices.
export function classesFromLabel(y) {
const axis = 1;
const pred = y.argMax(axis);
return Array.from(pred.dataSync());
}