Skip to content

Commit

Permalink
Add activations to models, functionality to acquire & train
Browse files Browse the repository at this point in the history
  • Loading branch information
ange1a-j14 committed Aug 9, 2024
1 parent 29e7a5c commit 2117900
Show file tree
Hide file tree
Showing 3 changed files with 53 additions and 20 deletions.
22 changes: 13 additions & 9 deletions acquire_automatic.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,12 +14,13 @@
rp_s = scpi.scpi(IP)
print('Connected to ' + IP)

def run_one_shot(start_freq=1, end_freq=1000, decimation=8192, store_data=False, plot_data=False):
def run_one_shot(start_freq=1, end_freq=1000, ampl=0.1, decimation=8192, store_data=False, plot_data=False, filename='data.h5py'):
"""Runs one shot of driving the speaker with a waveform and collecting the relevant data.
Args:
start_freq (int, optional): the lower bound of the valid frequency range. Defaults to 1.
end_freq (int, optional): the upper bound of the valid frequency range. Defaults to 1000.
ampl (float, optional): the amplitude of the generated wave. Defaults to 0.1.
decimation (int, optional): Decimation that determines sample rate, should be power of 2. Defaults to 8192.
store_data (bool, optional): Whether to store data in h5py file. Defaults to False.
plot_data (bool, optional): Whether to plot data after acquisition. Defaults to False.
Expand All @@ -33,7 +34,6 @@ def run_one_shot(start_freq=1, end_freq=1000, decimation=8192, store_data=False,

wave_form = 'ARBITRARY'
freq = 1 / burst_time
ampl = 0.1 # good range 0-0.6V

t, y = util.bounded_frequency_waveform(start_freq, end_freq, length=N, sample_rate=smpl_rate)
y = util.linear_convert(y) # convert range of waveform to [-1, 1] to properly set ampl
Expand All @@ -57,9 +57,9 @@ def run_one_shot(start_freq=1, end_freq=1000, decimation=8192, store_data=False,
# Function for configuring Acquisition
rp_s.acq_set(dec=decimation, trig_delay=0)
rp_s.tx_txt('ACQ:START')
time.sleep(1)
time.sleep(0.5)
rp_s.tx_txt('ACQ:TRig CH2_PE')
time.sleep(1)
# time.sleep(1)

# Wait for trigger
while 1:
Expand Down Expand Up @@ -111,7 +111,7 @@ def run_one_shot(start_freq=1, end_freq=1000, decimation=8192, store_data=False,
if store_data:
# Store data in h5py file
path = "/Users/angelajia/Code/College/SMI/data/"
filename = "data.h5py"
# filename = "training_data.h5py"
file_path = os.path.join(path, filename)

entries = {
Expand All @@ -128,8 +128,12 @@ def run_one_shot(start_freq=1, end_freq=1000, decimation=8192, store_data=False,
rp_s.tx_txt('GEN:RST')
rp_s.tx_txt('ACQ:RST')

num_shots = 200
num_shots = 2000
amplitude = 0
for i in range(num_shots):
if i % 100 == 0:
print(i)
run_one_shot(1, 1000, decimation=256, store_data=True, plot_data=False)
amplitude = np.random.uniform(0.1, 0.6)
if i % 500 == 0:
print(f"{i}: ampl = {amplitude}")
run_one_shot(30, 1000, ampl=amplitude, decimation=256, store_data=True, plot_data=False,
filename='test_30to1kHz_2kshots_dec=256_randampl.h5py')
# print(i)
8 changes: 7 additions & 1 deletion models.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,18 +3,24 @@
from torch import nn
import torch

act_fn_by_name = {'Tanh': nn.Tanh(), 'LeakyReLU': nn.LeakyReLU(), 'ReLU': nn.ReLU()}

class CNN(nn.Module):
def __init__(self, input_size, output_size):
def __init__(self, input_size, output_size, activation='ReLU'):
super(CNN, self).__init__()
self.conv_layers = nn.Sequential(
nn.Conv1d(input_size, 16, kernel_size=7), # Lout = 250, given L = 256
act_fn_by_name[activation],
nn.MaxPool1d(2), # Lout = 125, given L = 250
nn.Conv1d(16, 32, kernel_size=7), # Lout = 119, given L = 125
act_fn_by_name[activation],
nn.MaxPool1d(2), # Lout = 59, given L = 119
nn.Conv1d(32, 64, kernel_size=7), # Lout = 53, given L = 59
act_fn_by_name[activation],
nn.MaxPool1d(2), # Lout = 26, given L = 53
nn.Dropout(0.1),
nn.Conv1d(64, 64, kernel_size=7), # Lout = 20, given L = 26
act_fn_by_name[activation],
nn.MaxPool1d(2) # Lout = 10, given L = 20
)
self.fc_layers = nn.Sequential(
Expand Down
43 changes: 33 additions & 10 deletions train.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,16 +21,39 @@ def __init__(self, h5_file):
with h5py.File(self.h5_file, 'r') as f:
self.length = len(f['Time (s)']) # num shots

def open_hdf5(self, num_groups=64, group_size=256):
def open_hdf5(self, group_size=256, step=1):
"""Set up inputs and targets. For each shot, buffer is split into rolling data.
Inputs include grouped photodiode trace of 'group_size', spaced interval 'step' apart.
Targets include average velocity of each group.
Input shape is [num_shots, num_groups, group_size] and target shape is [num_shots, num_groups, 1],
where num_groups = (buffer_len - group_size)/step + 1, given that buffer_len - group_size is a multiple of step.
If the given 'group_size' and 'step' do not satisfy the above requirement,
the data will not be cleanly grouped.
Args:
group_size (int, optional): Size of each group. buffer_len - group_size = 0 (mod step). Defaults to 256.
step (int, optional): Size of step between group starts. buffer_len - grou_size = 0 (mod step). Defaults to 1.
"""
# solves issue where hdf5 file opened in __init__ prevents multiple
# workers: https://github.com/pytorch/pytorch/issues/11929
self.file = h5py.File(self.h5_file, 'r')
grouped_pd = np.array(np.hsplit(self.file['PD (V)'], num_groups)) # [num_groups, num_shots, group_size]
self.inputs = np.transpose(grouped_pd, [1, 0, 2]) # [num_shots, num_groups, group_size]
grouped_velocities = np.array(np.hsplit(self.file['Speaker (Microns/s)'], num_groups)) # [num_groups, num_shots, group_size]
grouped_velocities = np.transpose(grouped_velocities, [1, 0, 2]) # [num_shots, num_groups, group_size]
grouped_velocities = np.average(grouped_velocities, axis=2) # store average velocity per group per shot: [num_shots, num_groups]
self.targets = np.expand_dims(grouped_velocities, axis=2) # [num_shots, num_groups, 1]
pds = self.file['PD (V)'] # [num_shots, buffer_size]
vels = self.file['Speaker (Microns/s)'] # [num_shots, buffer_size]

grouped_pds = np.array(np.hsplit(self.file['PD (V)'], num_groups)) # [num_groups, num_shots, group_size]
self.inputs = np.transpose(grouped_pds, [1, 0, 2]) # [num_shots, num_groups, group_size]
grouped_vels = np.array(np.hsplit(self.file['Speaker (Microns/s)'], num_groups)) # [num_groups, num_shots, group_size]
grouped_vels = np.transpose(grouped_vels, [1, 0, 2]) # [num_shots, num_groups, group_size]
grouped_vels = np.average(grouped_vels, axis=2) # store average velocity per group per shot: [num_shots, num_groups]
self.targets = np.expand_dims(grouped_vels, axis=2) # [num_shots, num_groups, 1]

## FOR ROLLING INPUT
# grouped_pds = np.array([pds[:, i:i+n] for i in range(0, len(pds[0])-n+1, m)]) # [num_groups, num_shots, group_size]
# self.inputs = np.transpose(grouped_pds, [1, 0, 2]) # [num_shots, num_groups, group_size]
# grouped_vels = np.array([vels[:, i:i+n] for i in range(0, len(vels[0])-n+1, m)]) # [num_groups, num_shots, group_size]
# grouped_vels = np.transpose(grouped_vels, [1, 0, 2]) # [num_shots, num_groups, group_size]
# grouped_vels = np.average(grouped_vels, axis=2) # store average velocity per group per shot: [num_shots, num_groups]
# self.targets = np.expand_dims(grouped_vels, axis=2) # [num_shots, num_groups, 1]

def __len__(self):
return self.length
Expand Down Expand Up @@ -152,13 +175,13 @@ def scan_hyperparams(self):

self.train_model(model_name="CNN",
model_hparams=model_config,
optimizer_name="Adam",
optimizer_name="SGD",
optimizer_hparams=optimizer_config,
misc_hparams=misc_config)

def load_model(self):
def load_model(self, model_name='CNN', model_tag):
# Check whether pretrained model exists. If yes, load it and skip training
pretrained_filename = os.path.join(self.checkpoint_dir, "SMI", "f63rieqp",
pretrained_filename = os.path.join(self.checkpoint_dir, model_name, "SMI", model_tag,
"checkpoints", "*" + ".ckpt")
print(pretrained_filename)
if os.path.isfile(glob.glob(pretrained_filename)[0]):
Expand Down

0 comments on commit 2117900

Please sign in to comment.