File tree Expand file tree Collapse file tree 2 files changed +44
-7
lines changed Expand file tree Collapse file tree 2 files changed +44
-7
lines changed Original file line number Diff line number Diff line change @@ -22,13 +22,9 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
22
22
unzip \
23
23
zip \
24
24
libzmq3-dev \
25
- python-dev \
26
- python-numpy \
27
25
python3-dev \
28
26
python3-numpy \
29
- python-pip \
30
27
python3-pip \
31
- python-tk \
32
28
python3-tk \
33
29
libtbb2 \
34
30
libtbb-dev \
@@ -106,8 +102,7 @@ RUN cargo install fd-find ripgrep
106
102
107
103
108
104
# Symlink for pip3
109
- # RUN sudo ln -s /usr/bin/pip3 /usr/bin/pip
105
+ RUN sudo ln -s /usr/bin/pip3 /usr/bin/pip
110
106
111
107
# Pytorch
112
- RUN sudo pip3 install https://download.pytorch.org/whl/cu100/torch-1.3.1%2Bcu100-cp36-cp36m-linux_x86_64.whl
113
- RUN sudo pip3 install torchvision
108
+ RUN sudo pip install torch torchvision
Original file line number Diff line number Diff line change
1
+ import numpy as np
2
+ import torch
3
+ import torch .optim as optim
4
+ import torch .nn as nn
5
+ # Data Generation
6
+ np .random .seed (42 )
7
+ x = np .random .rand (100 , 1 )
8
+ y = 1 + 2 * x + .1 * np .random .randn (100 , 1 )
9
+
10
+ # Shuffles the indices
11
+ idx = np .arange (100 )
12
+ np .random .shuffle (idx )
13
+
14
+ # Uses first 80 random indices for train
15
+ train_idx = idx [:80 ]
16
+ # Uses the remaining indices for validation
17
+ val_idx = idx [80 :]
18
+
19
+ # Generates train and validation sets
20
+ x_train , y_train = x [train_idx ], y [train_idx ]
21
+ x_val , y_val = x [val_idx ], y [val_idx ]
22
+
23
+ device = 'cuda' if torch .cuda .is_available () else 'cpu'
24
+
25
+ # Our data was in Numpy arrays, but we need to transform them into PyTorch's Tensors
26
+ # and then we send them to the chosen device
27
+ x_train_tensor = torch .from_numpy (x_train ).float ().to (device )
28
+ y_train_tensor = torch .from_numpy (y_train ).float ().to (device )
29
+
30
+ # Here we can see the difference - notice that .type() is more useful
31
+ # since it also tells us WHERE the tensor is (device)
32
+ print (type (x_train ), type (x_train_tensor ), x_train_tensor .type ())
33
+
34
+ print ("=============================================" )
35
+ print (" x-train " )
36
+ print ("=============================================" )
37
+ print (x_train )
38
+
39
+ print ("=============================================" )
40
+ print (" train-tensor " )
41
+ print ("=============================================" )
42
+ print (x_train_tensor )
You can’t perform that action at this time.
0 commit comments