-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmnist_example.py
More file actions
102 lines (79 loc) · 2.95 KB
/
mnist_example.py
File metadata and controls
102 lines (79 loc) · 2.95 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
#!/usr/bin/env python3
"""
KortexDL Digit Classification - Digits Dataset (Stable)
========================================================
Stable training on handwritten digits.
Usage:
python mnist_example.py
"""
import numpy as np
import kortexdl as bd
try:
from sklearn.datasets import load_digits
from sklearn.model_selection import train_test_split
SKLEARN_AVAILABLE = True
except ImportError:
SKLEARN_AVAILABLE = False
def main():
print("🎯 KortexDL Digit Classification - Digits Dataset")
print("=" * 60)
if not SKLEARN_AVAILABLE:
print("❌ sklearn required: pip install scikit-learn")
return 1
# Load digits
print("\n📁 Loading Digits dataset...")
data = load_digits()
X = data.data.astype(np.float32) / 16.0 # Normalize to 0-1
y_raw = data.target
# One-hot
n_classes = 10
y = np.zeros((len(y_raw), n_classes), dtype=np.float32)
for i, label in enumerate(y_raw):
y[i, label] = 1.0
print(f"✅ Dataset: {len(X)} samples, 64 features (8x8), 10 classes")
# Split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
y_test_raw = np.argmax(y_test, axis=1)
print(f"✅ Split: {len(X_train)} train, {len(X_test)} test")
# Simpler network
print("\n🧠 Creating network...")
net = bd.Network([64, 64, 32, 10], bd.ActivationType.Sigmoid)
print("✅ Network: 64 -> 64 -> 32 -> 10 (Sigmoid)")
# Full batch training with higher LR
print("\n🏋️ Training...")
epochs = 500
learning_rate = 1.0
X_flat = X_train.flatten().tolist()
y_flat = y_train.flatten().tolist()
for epoch in range(epochs):
loss = net.train_batch(X_flat, y_flat, bd.LossType.MSE, learning_rate, len(X_train))
if epoch % 100 == 0:
correct = sum(1 for i in range(len(X_test))
if np.argmax(net.forward(X_test[i].tolist(), 1, False)) == y_test_raw[i])
acc = correct / len(X_test) * 100
print(f" Epoch {epoch:3d}: Loss = {loss:.4f}, Acc = {acc:.1f}%")
# Final evaluation
print("\n📈 Final Evaluation...")
correct = 0
predictions = []
for i in range(len(X_test)):
output = net.forward(X_test[i].tolist(), 1, False)
pred = np.argmax(output)
predictions.append(pred)
if pred == y_test_raw[i]:
correct += 1
accuracy = correct / len(X_test) * 100
print(f"✅ Test Accuracy: {accuracy:.1f}%")
print(f"✅ Correct: {correct}/{len(X_test)}")
# Per-digit accuracy
print("\n📊 Sample Predictions:")
for i in range(min(10, len(X_test))):
pred = predictions[i]
true = y_test_raw[i]
status = "✓" if pred == true else "✗"
print(f" True: {true} Pred: {pred} {status}")
print("\n" + "=" * 60)
print("✅ Complete!")
return 0
if __name__ == "__main__":
exit(main())