From d77811183647ee2508e739cbfd23af198e23a50f Mon Sep 17 00:00:00 2001 From: Ran Wang Date: Sun, 14 Jun 2020 21:29:02 -0400 Subject: [PATCH 01/14] residual+temporalw+attention --- AllSubjectInfo.json | 42 +++ ECoGDataSet.py | 786 ++++++++++++++++++++++++++++++++++++++++++++ checkpointer.py | 3 +- configs/ecog.yaml | 57 ++++ dataloader_ecog.py | 310 +++++++++++++++++ defaults.py | 11 +- launcher.py | 1 + losses.py | 1 - lreq.py | 115 ++++++- model.py | 119 +++++-- model_param.json | 37 +++ net.py | 422 +++++++++++++++++++----- sample1_1.npy | Bin 0 -> 131200 bytes sample1_2.npy | Bin 0 -> 131200 bytes sample2_1.npy | Bin 0 -> 131200 bytes sample2_2.npy | Bin 0 -> 131200 bytes sample3_1.npy | Bin 0 -> 131200 bytes sample3_2.npy | Bin 0 -> 131200 bytes train_alae.py | 126 ++++--- train_param.json | 50 +++ 20 files changed, 1915 insertions(+), 165 deletions(-) create mode 100644 AllSubjectInfo.json create mode 100644 ECoGDataSet.py create mode 100644 configs/ecog.yaml create mode 100644 dataloader_ecog.py create mode 100644 model_param.json create mode 100644 sample1_1.npy create mode 100644 sample1_2.npy create mode 100644 sample2_1.npy create mode 100644 sample2_2.npy create mode 100644 sample3_1.npy create mode 100644 sample3_2.npy create mode 100644 train_param.json diff --git a/AllSubjectInfo.json b/AllSubjectInfo.json new file mode 100644 index 00000000..f8da3f3b --- /dev/null +++ b/AllSubjectInfo.json @@ -0,0 +1,42 @@ +{ + "Shared":{ + "RootPath":"/scratch/akg404-share/ECoGData_Mar_11_20/FunctionalMapping/", + "ORG_WAVE_FS": 24414.1, + "DOWN_WAVE_FS": 16000, + "ORG_ECOG_FS": 3051.7625, + "ORG_ECOG_FS_NY": 512, + "ORG_TF_FS": 125, + "AUDITORY" : ["cSTG","mSTG"], + "BROCA" : ["parstriangularis","parsopercularis"], + "MOTO" : ["precentral"], + "SENSORY" : ["postcentral"] + }, + "Subj":{ + "NY717":{ + "Density":"HB", + "Task":["AudN","SenComp","VisRead","PicN","AudRep"] + }, + "NY742":{ + "Density":"HB", + "Task":["AudN","SenComp","VisRead","PicN","AudRep"] + }, + "NY749":{ + "Task":["AudN","SenComp","VisRead","PicN","AudRep"] + }, + "HD06":{ + "Density":"HD", + "Task":["AudName","AudRep"], + "EventRange":-100 + }, + "HD01":{ + "Density":"HD", + "Task":["AudName","AudRep"], + "BadSamples":[[1,2,3],[1,3]] + } + }, + "BadSamples":{ + "HD01":{ + "AudRep":[1,2,3] + } + } +} \ No newline at end of file diff --git a/ECoGDataSet.py b/ECoGDataSet.py new file mode 100644 index 00000000..add52832 --- /dev/null +++ b/ECoGDataSet.py @@ -0,0 +1,786 @@ +import json +import torch +import os +import numpy as np +import scipy.io +from scipy import signal +import h5py +import random +import pandas +from torch.utils.data import Dataset + + +class ECoGDataset(Dataset): + """docstring for ECoGDataset""" + def zscore(self,ecog,badelec,axis=None): + statics_ecog = np.delete(ecog,badelec,axis=1).mean(axis=axis, keepdims=True)+1e-10,np.delete(ecog,badelec,axis=1).std(axis=axis, keepdims=True)+1e-10 + # statics_ecog = ecog.mean(axis=axis, keepdims=True)+1e-10,ecog.std(axis=axis, keepdims=True)+1e-10 + ecog = (ecog-statics_ecog[0])/statics_ecog[1] + return ecog, statics_ecog + + def rearrange(self,data,crop=None,mode = 'ecog'): + rows = [0,1,2,3,4,5,6,8,9,10,11] + starts = [1,0,1,0,1,0,1,7,6,7,7] + ends = [6,6,6,9,12,14,12,14,14,14,8] + strides = [2,1,2,1,2,1,2,2,1,2,1] + electrodes = [64,67,73,76,85,91,105,111,115,123,127,128] + if mode == 'ecog': + data_new = np.zeros((data.shape[0],15,15)) + data_new[:,::2,::2] = np.reshape(data[:,:64],[-1,8,8]) + for i in range(len(rows)): + data_new[:,rows[i],starts[i]:ends[i]:strides[i]] = data[:,electrodes[i]:electrodes[i+1]] + if crop is None: + return np.reshape(data_new,[data.shape[0],-1]) + else: + return np.reshape(data_new[:,crop[0]:crop[0]+crop[2],crop[1]:crop[1]+crop[3]],[data.shape[0],-1]) # TxN + + elif mode == 'coord': + data_new = np.zeros((15,15,data.shape[-1])) + data_new[::2,::2] = np.reshape(data[:64],[8,8,-1]) + for i in range(len(rows)): + data_new[rows[i],starts[i]:ends[i]:strides[i]] = data[electrodes[i]:electrodes[i+1]] + if crop is None: + return np.reshape(data_new,[-1,data.shape[-1]]) # Nx3 + else: + return np.reshape(data_new[crop[0]:crop[0]+crop[2],crop[1]:crop[1]+crop[3]],[-1,data.shape[-1]]) # Nx3 + + elif mode == 'region': + region_new = np.chararray((15,15),itemsize=100) + region_new[:] = 'nan' + region_new[::2,::2] = np.reshape(data[:64],[8,8]) + for i in range(len(rows)): + region_new[rows[i],starts[i]:ends[i]:strides[i]] = data[electrodes[i]:electrodes[i+1]] + if crop is None: + return np.reshape(region_new,[-1]) + else: + return np.reshape(region_new[crop[0]:crop[0]+crop[2],crop[1]:crop[1]+crop[3]],[-1]) + + elif mode == 'mask': + data_new = np.zeros((15,15)) + data_new[::2,::2] = np.reshape(data[:64],[8,8]) + for i in range(len(rows)): + data_new[rows[i],starts[i]:ends[i]:strides[i]] = data[electrodes[i]:electrodes[i+1]] + if crop is None: + return np.reshape(data_new,[-1]) + else: + return np.reshape(data_new[crop[0]:crop[0]+crop[2],crop[1]:crop[1]+crop[3]],[-1]) + + def select_block(self,ecog,regions,mask,mni_coord,select,block): + if not select and not block: + return ecog,regions,mask,mni_coord + if self.ReshapeAsGrid: + if select: + ecog_ = np.zeros(ecog.shape) + mask_ = np.zeros(mask.shape) + mni_coord_ = np.zeros(mni_coord.shape) + for region in select: + region_ind = [region.encode() == regions[i] for i in range(regions.shape[0])] + ecog_[:,region_ind] = ecog[:,region_ind] + mask_[region_ind] = mask[region_ind] + mni_coord_[region_ind] = mni_coord[region_ind] + return ecog_,regions,mask_,mni_coord_ + if block: + for region in block: + region_ind = [region.encode() == regions[i] for i in range(regions.shape[0])] + ecog[:,region_ind] = 0 + mask[region_ind] = 0 + mni_coord[region_ind]=0 + return ecog,regions,mask,mni_coord + else: + # region_ind = np.ones(regions.shape[0],dtype=bool) + region_ind = np.array([]) + if select: + # region_ind = np.zeros(regions.shape[0],dtype=bool) + for region in select: + region_ind = np.concatenate([region_ind, np.where(np.array([region in regions[i] for i in range(regions.shape[0])]))[0]]) + if block: + # region_ind = np.zeros(regions.shape[0],dtype=bool) + for region in block: + # region_ind = np.logical_or(region_ind, np.array([region in regions[i] for i in range(regions.shape[0])])) + region_ind = np.concatenate([region_ind, np.where(np.array([region in regions[i] for i in range(regions.shape[0])]))[0]]) + # region_ind = np.logical_not(region_ind) + region_ind = np.delete(np.arange(regions.shape[0]),region_ind) + region_ind = region_ind.astype(np.int64) + return ecog[:,region_ind],regions[region_ind],mask[region_ind],mni_coord[region_ind] + def __init__(self, ReqSubjDict, mode = 'train', train_param = None): + """ ReqSubjDict can be a list of multiple subjects""" + super(ECoGDataset, self).__init__() + self.current_lod=2 + self.ReqSubjDict = ReqSubjDict + self.mode = mode + with open('AllSubjectInfo.json','r') as rfile: + allsubj_param = json.load(rfile) + if (train_param == None): + with open('train_param.json','r') as rfile: + train_param = json.load(rfile) + + self.rootpath = allsubj_param['Shared']['RootPath'] + self.ORG_WAVE_FS = allsubj_param['Shared']['ORG_WAVE_FS'] + self.ORG_ECOG_FS = allsubj_param['Shared']['ORG_ECOG_FS'] + self.DOWN_WAVE_FS = allsubj_param['Shared']['DOWN_WAVE_FS'] + self.ORG_ECOG_FS_NY = allsubj_param['Shared']['ORG_ECOG_FS_NY'] + self.ORG_TF_FS = allsubj_param['Shared']['ORG_TF_FS'] + self.cortex = {} + self.cortex.update({"AUDITORY" : allsubj_param['Shared']['AUDITORY']}) + self.cortex.update({"BROCA" : allsubj_param['Shared']['BROCA']}) + self.cortex.update({"MOTO" : allsubj_param['Shared']['MOTO']}) + self.cortex.update({"SENSORY" : allsubj_param['Shared']['SENSORY']}) + self.SelectRegion = [] + [self.SelectRegion.extend(self.cortex[area]) for area in train_param["SelectRegion"]] + self.BlockRegion = [] + [self.BlockRegion.extend(self.cortex[area]) for area in train_param["BlockRegion"]] + self.Prod,self.SpecBands,self.UseGridOnly,self.ReshapeAsGrid,self.SeqLen = train_param['Prod'],\ + train_param['SpecBands'],\ + train_param['UseGridOnly'],\ + train_param['ReshapeAsGrid'],\ + train_param['SeqLen'], + self.ahead_onset_test = train_param['Test']['ahead_onset'] + self.ahead_onset_train = train_param['Train']['ahead_onset'] + self.DOWN_TF_FS = train_param['DOWN_TF_FS'] + self.DOWN_ECOG_FS = train_param['DOWN_ECOG_FS'] + self.TestNum_cum=np.array([],dtype=np.int32) + + datapath = [] + analysispath = [] + ecog_alldataset = [] + spkr_alldataset = [] + spkr_re_alldataset = [] + spkr_static_alldataset = [] + spkr_re_static_alldataset = [] + start_ind_alldataset = [] + start_ind_valid_alldataset = [] + start_ind_wave_alldataset = [] + start_ind_wave_valid_alldataset = [] + end_ind_alldataset = [] + end_ind_valid_alldataset = [] + end_ind_wave_alldataset = [] + end_ind_wave_valid_alldataset = [] + start_ind_re_alldataset = [] + start_ind_re_valid_alldataset = [] + start_ind_re_wave_alldataset = [] + start_ind_re_wave_valid_alldataset = [] + end_ind_re_alldataset = [] + end_ind_re_valid_alldataset = [] + end_ind_re_wave_alldataset = [] + end_ind_re_wave_valid_alldataset = [] + word_alldataset = [] + label_alldataset = [] + wave_alldataset = [] + wave_re_alldataset = [] + bad_samples_alldataset = [] + baseline_alldataset = [] + mni_coordinate_alldateset = [] + T1_coordinate_alldateset = [] + regions_alldataset =[] + mask_prior_alldataset = [] + dataset_names = [] + ecog_len = [] + unique_labels = [] + # self.ORG_WAVE_FS,self.DOWN_ECOG_FS,self.DOWN_WAVE_FS = allsubj_param['Shared']['ORG_WAVE_FS'],\ + # allsubj_param['Shared']['DOWN_ECOG_FS'],\ + # allsubj_param['Shared']['DOWN_WAVE_FS'],\ + + # spkrdata = h5py.File(DATA_DIR[0][0]+'TF32_16k.mat','r') + # spkr = np.asarray(spkrdata['TFlog']) + # samples_for_statics_ = spkr[statics_samples_spkr[0][0*2]:statics_samples_spkr[0][0*2+1]] + flag_zscore = False + for subj in self.ReqSubjDict: + subj_param = allsubj_param['Subj'][subj] + Density = subj_param['Density'] + Crop = train_param["Subj"][subj]['Crop'] + datapath = os.path.join(self.rootpath,subj,'data') + analysispath = os.path.join(self.rootpath,subj,'analysis') + ecog_ = [] + ecog_len_=[0] + start_ind_train_=[] + end_ind_train_ = [] + end_ind_valid_train_ = [] + start_ind_valid_train_=[] + start_ind_wave_down_train_ =[] + end_ind_wave_down_train_ =[] + start_ind_wave_down_valid_train_ =[] + end_ind_wave_down_valid_train_ =[] + start_ind_re_train_=[] + end_ind_re_train_ = [] + end_ind_re_valid_train_ = [] + start_ind_re_valid_train_=[] + start_ind_re_wave_down_train_ =[] + end_ind_re_wave_down_train_ =[] + start_ind_re_wave_down_valid_train_ =[] + end_ind_re_wave_down_valid_train_ =[] + + start_ind_test_=[] + end_ind_ = [] + end_ind_test_=[] = [] + end_ind_valid_test_ = [] + start_ind_valid_test_=[] + start_ind_wave_down_test_ =[] + end_ind_wave_down_test_ =[] + start_ind_wave_down_valid_test_ =[] + end_ind_wave_down_valid_test_ =[] + start_ind_re_test_=[] + end_ind_re_test_ = [] + end_ind_re_valid_test_ = [] + start_ind_re_valid_test_=[] + start_ind_re_wave_down_test_ =[] + end_ind_re_wave_down_test_ =[] + start_ind_re_wave_down_valid_test_ =[] + end_ind_re_wave_down_valid_test_ =[] + spkr_=[] + wave_=[] + spkr_re_=[] + wave_re_=[] + word_train=[] + labels_train=[] + word_test=[] + labels_test=[] + bad_samples_=np.array([]) + self.TestNum_cum = np.append(self.TestNum_cum, np.array(train_param["Subj"][subj]['TestNum']).sum().astype(np.int32)) + for xx,task_to_use in enumerate(train_param["Subj"][subj]['Task']): + self.TestNum = train_param["Subj"][subj]['TestNum'][xx] + # for file in range(len(DATA_DIR)): + HD = True if Density == "HD" else False + datapath_task = os.path.join(datapath,task_to_use) + analysispath_task = os.path.join(analysispath,task_to_use) + # if REPRODFLAG is None: + # self.Prod = True if 'NY' in DATA_DIR[ds][file] and 'share' in DATA_DIR[ds][file] else False + # else: + # self.Prod = REPRODFLAG + print("load data from: ", datapath_task) + ecogdata = h5py.File(os.path.join(datapath_task,'gdat_env.mat'),'r') + ecog = np.asarray(ecogdata['gdat_env']) + # ecog = np.minimum(ecog,data_range_max[ds][file]) + ecog = np.minimum(ecog,30) + event_range = None if "EventRange" not in subj_param.keys() else subj_param["EventRange"] + # bad_samples = [] if "BadSamples" not in subj_param.keys() else subj_param["BadSamples"] + start_ind_wave = scipy.io.loadmat(os.path.join(analysispath_task,'Events.mat'))['Events']['onset'][0] + start_ind_wave = np.asarray([start_ind_wave[i][0,0] for i in range(start_ind_wave.shape[0])])[:event_range] + end_ind_wave = scipy.io.loadmat(os.path.join(analysispath_task,'Events.mat'))['Events']['offset'][0] + end_ind_wave = np.asarray([end_ind_wave[i][0,0] for i in range(end_ind_wave.shape[0])])[:event_range] + + if self.Prod: + start_ind_re_wave = scipy.io.loadmat(os.path.join(analysispath_task,'Events.mat'))['Events']['onset_r'][0] + start_ind_re_wave = np.asarray([start_ind_re_wave[i][0,0] for i in range(start_ind_re_wave.shape[0])])[:event_range] + end_ind_re_wave = scipy.io.loadmat(os.path.join(analysispath_task,'Events.mat'))['Events']['offset_r'][0] + end_ind_re_wave = np.asarray([end_ind_re_wave[i][0,0] for i in range(end_ind_re_wave.shape[0])])[:event_range] + if HD: + start_ind = (start_ind_wave*1.0/self.ORG_WAVE_FS*self.DOWN_ECOG_FS).astype(np.int64) # in ECoG sample + start_ind_wave_down = (start_ind_wave*1.0/self.ORG_WAVE_FS*self.DOWN_WAVE_FS).astype(np.int64) + end_ind = (end_ind_wave*1.0/self.ORG_WAVE_FS*self.DOWN_ECOG_FS).astype(np.int64) # in ECoG sample + end_ind_wave_down = (end_ind_wave*1.0/self.ORG_WAVE_FS*self.DOWN_WAVE_FS).astype(np.int64) + start_ind_valid = np.delete(start_ind,bad_samples) + end_ind_valid = np.delete(end_ind,bad_samples) + start_ind_wave_down_valid = np.delete(start_ind_wave_down,bad_samples) + end_ind_wave_down_valid = np.delete(end_ind_wave_down,bad_samples) + try: + bad_samples = allsubj_param['BadSamples'][subj][task_to_use] + except: + bad_samples = [] + bad_samples_ = np.concatenate([bad_samples_,np.array(bad_samples)]) + else: + start_ind = (start_ind_wave*1.0/self.ORG_ECOG_FS_NY*self.DOWN_ECOG_FS).astype(np.int64) + start_ind_wave_down = (start_ind_wave*1.0/self.DOWN_ECOG_FS*self.DOWN_WAVE_FS).astype(np.int64) + end_ind = (end_ind_wave*1.0/self.ORG_ECOG_FS_NY*self.DOWN_ECOG_FS).astype(np.int64) + end_ind_wave_down = (end_ind_wave*1.0/self.DOWN_ECOG_FS*self.DOWN_WAVE_FS).astype(np.int64) + if self.Prod: + bad_samples_HD = scipy.io.loadmat(os.path.join(analysispath_task,'Events.mat'))['Events']['badrsp'][0] + else: + bad_samples_HD = scipy.io.loadmat(os.path.join(analysispath_task,'Events.mat'))['Events']['badevent'][0] + bad_samples_HD = np.asarray([bad_samples_HD[i][0,0] for i in range(bad_samples_HD.shape[0])]) + bad_samples_ = np.concatenate((bad_samples_,bad_samples_HD)) + bad_samples_HD = np.where(np.logical_or(np.logical_or(bad_samples_HD==1, bad_samples_HD==2) , bad_samples_HD==4))[0] + start_ind_valid = np.delete(start_ind,bad_samples_HD) + end_ind_valid = np.delete(end_ind,bad_samples_HD) + start_ind_wave_down_valid = np.delete(start_ind_wave_down,bad_samples_HD) + end_ind_wave_down_valid = np.delete(end_ind_wave_down,bad_samples_HD) + if self.Prod: + start_ind_re = (start_ind_re_wave*1.0/self.ORG_ECOG_FS_NY*self.DOWN_ECOG_FS).astype(np.int64) + start_ind_re_wave_down = (start_ind_re_wave*1.0/self.DOWN_ECOG_FS*self.DOWN_WAVE_FS).astype(np.int64) + end_ind_re = (end_ind_re_wave*1.0/self.ORG_ECOG_FS_NY*self.DOWN_ECOG_FS).astype(np.int64) + end_ind_re_wave_down = (end_ind_re_wave*1.0/self.DOWN_ECOG_FS*self.DOWN_WAVE_FS).astype(np.int64) + start_ind_re_valid = np.delete(start_ind_re,bad_samples_HD) + end_ind_re_valid = np.delete(end_ind_re,bad_samples_HD) + start_ind_re_wave_down_valid = np.delete(start_ind_re_wave_down,bad_samples_HD) + end_ind_re_wave_down_valid = np.delete(end_ind_re_wave_down,bad_samples_HD) + + + ecog = signal.resample_poly(ecog,self.DOWN_ECOG_FS*10000,30517625,axis=0) if HD else signal.resample_poly(ecog,self.DOWN_ECOG_FS,self.ORG_ECOG_FS_NY,axis=0) # resample to 125 hz + baseline_ind = np.concatenate([np.arange(start_ind_valid[i]-self.DOWN_ECOG_FS//4,start_ind_valid[i]-self.DOWN_ECOG_FS//20) \ + for i in range(len(start_ind_valid))]) #baseline: 1/4 s - 1/20 s before stimulis onset + baseline = ecog[baseline_ind] + statics_ecog = baseline.mean(axis=0,keepdims=True)+1E-10, np.sqrt(baseline.var(axis=0, keepdims=True))+1E-10 + + ecog = (ecog - statics_ecog[0])/statics_ecog[1] + ecog = np.minimum(ecog,5) + ecog_len_+= [ecog.shape[0]] + ecog_+=[ecog] + + start_ind_train_ += [start_ind[:-self.TestNum] + np.cumsum(ecog_len_)[-2]] + end_ind_train_ += [end_ind[:-self.TestNum] + np.cumsum(ecog_len_)[-2]] + end_ind_valid_train_ += [end_ind_valid[:-self.TestNum] + np.cumsum(ecog_len_)[-2]] + start_ind_valid_train = start_ind_valid[:-self.TestNum] + np.cumsum(ecog_len_)[-2] + start_ind_valid_train_ += [start_ind_valid_train] + start_ind_wave_down_train = start_ind_wave_down[:-self.TestNum] + (np.cumsum(ecog_len_)[-2]*1.0/self.DOWN_ECOG_FS*self.DOWN_WAVE_FS).astype(np.int64) + start_ind_wave_down_train_ += [start_ind_wave_down_train] + end_ind_wave_down_train = end_ind_wave_down[:-self.TestNum] + (np.cumsum(ecog_len_)[-2]*1.0/self.DOWN_ECOG_FS*self.DOWN_WAVE_FS).astype(np.int64) + end_ind_wave_down_train_ += [end_ind_wave_down_train] + start_ind_wave_down_valid_train = start_ind_wave_down_valid[:-self.TestNum] + (np.cumsum(ecog_len_)[-2]*1.0/self.DOWN_ECOG_FS*self.DOWN_WAVE_FS).astype(np.int64) + start_ind_wave_down_valid_train_ += [start_ind_wave_down_valid_train] + end_ind_wave_down_valid_train = end_ind_wave_down_valid[:-self.TestNum] + (np.cumsum(ecog_len_)[-2]*1.0/self.DOWN_ECOG_FS*self.DOWN_WAVE_FS).astype(np.int64) + end_ind_wave_down_valid_train_ += [end_ind_wave_down_valid_train] + + start_ind_test_ += [start_ind[-self.TestNum:] + np.cumsum(ecog_len_)[-2]] + end_ind_test_ += [end_ind[-self.TestNum:] + np.cumsum(ecog_len_)[-2]] + end_ind_valid_test_ += [end_ind_valid[-self.TestNum:] + np.cumsum(ecog_len_)[-2]] + start_ind_valid_test = start_ind_valid[-self.TestNum:] + np.cumsum(ecog_len_)[-2] + start_ind_valid_test_ += [start_ind_valid_test] + start_ind_wave_down_test = start_ind_wave_down[-self.TestNum:] + (np.cumsum(ecog_len_)[-2]*1.0/self.DOWN_ECOG_FS*self.DOWN_WAVE_FS).astype(np.int64) + start_ind_wave_down_test_ += [start_ind_wave_down_test] + end_ind_wave_down_test = end_ind_wave_down[-self.TestNum:] + (np.cumsum(ecog_len_)[-2]*1.0/self.DOWN_ECOG_FS*self.DOWN_WAVE_FS).astype(np.int64) + end_ind_wave_down_test_ += [end_ind_wave_down_test] + start_ind_wave_down_valid_test = start_ind_wave_down_valid[-self.TestNum:] + (np.cumsum(ecog_len_)[-2]*1.0/self.DOWN_ECOG_FS*self.DOWN_WAVE_FS).astype(np.int64) + start_ind_wave_down_valid_test_ += [start_ind_wave_down_valid_test] + end_ind_wave_down_valid_test = end_ind_wave_down_valid[-self.TestNum:] + (np.cumsum(ecog_len_)[-2]*1.0/self.DOWN_ECOG_FS*self.DOWN_WAVE_FS).astype(np.int64) + end_ind_wave_down_valid_test_ += [end_ind_wave_down_valid_test] + + if self.Prod: + start_ind_re_train_ += [start_ind_re[:-self.TestNum] + np.cumsum(ecog_len_)[-2]] + end_ind_re_train_ += [end_ind_re[:-self.TestNum] + np.cumsum(ecog_len_)[-2]] + end_ind_re_valid_train_ += [end_ind_re_valid[:-self.TestNum] + np.cumsum(ecog_len_)[-2]] + start_ind_re_validtrain_ = start_ind_re_valid[:-self.TestNum] + np.cumsum(ecog_len_)[-2] + start_ind_re_valid_train_ += [start_ind_re_validtrain_] + start_ind_re_wave_downtrain_ = start_ind_re_wave_down[:-self.TestNum] + (np.cumsum(ecog_len_)[-2]*1.0/self.DOWN_ECOG_FS*self.DOWN_WAVE_FS).astype(np.int64) + start_ind_re_wave_down_train_ += [start_ind_re_wave_downtrain_] + end_ind_re_wave_downtrain_ = end_ind_re_wave_down[:-self.TestNum] + (np.cumsum(ecog_len_)[-2]*1.0/self.DOWN_ECOG_FS*self.DOWN_WAVE_FS).astype(np.int64) + end_ind_re_wave_down_train_ += [end_ind_re_wave_downtrain_] + start_ind_re_wave_down_validtrain_ = start_ind_re_wave_down_valid[:-self.TestNum] + (np.cumsum(ecog_len_)[-2]*1.0/self.DOWN_ECOG_FS*self.DOWN_WAVE_FS).astype(np.int64) + start_ind_re_wave_down_valid_train_ += [start_ind_re_wave_down_validtrain_] + end_ind_re_wave_down_validtrain_ = end_ind_re_wave_down_valid[:-self.TestNum] + (np.cumsum(ecog_len_)[-2]*1.0/self.DOWN_ECOG_FS*self.DOWN_WAVE_FS).astype(np.int64) + end_ind_re_wave_down_valid_train_ += [end_ind_re_wave_down_validtrain_] + + start_ind_re_test_ += [start_ind_re[-self.TestNum:] + np.cumsum(ecog_len_)[-2]] + end_ind_re_test_ += [end_ind_re[-self.TestNum:] + np.cumsum(ecog_len_)[-2]] + end_ind_re_valid_test_ += [end_ind_re_valid[-self.TestNum:] + np.cumsum(ecog_len_)[-2]] + start_ind_re_validtest_ = start_ind_re_valid[-self.TestNum:] + np.cumsum(ecog_len_)[-2] + start_ind_re_valid_test_ += [start_ind_re_validtest_] + start_ind_re_wave_downtest_ = start_ind_re_wave_down[-self.TestNum:] + (np.cumsum(ecog_len_)[-2]*1.0/self.DOWN_ECOG_FS*self.DOWN_WAVE_FS).astype(np.int64) + start_ind_re_wave_down_test_ += [start_ind_re_wave_downtest_] + end_ind_re_wave_downtest_ = end_ind_re_wave_down[-self.TestNum:] + (np.cumsum(ecog_len_)[-2]*1.0/self.DOWN_ECOG_FS*self.DOWN_WAVE_FS).astype(np.int64) + end_ind_re_wave_down_test_ += [end_ind_re_wave_downtest_] + start_ind_re_wave_down_validtest_ = start_ind_re_wave_down_valid[-self.TestNum:] + (np.cumsum(ecog_len_)[-2]*1.0/self.DOWN_ECOG_FS*self.DOWN_WAVE_FS).astype(np.int64) + start_ind_re_wave_down_valid_test_ += [start_ind_re_wave_down_validtest_] + end_ind_re_wave_down_validtest_ = end_ind_re_wave_down_valid[-self.TestNum:] + (np.cumsum(ecog_len_)[-2]*1.0/self.DOWN_ECOG_FS*self.DOWN_WAVE_FS).astype(np.int64) + end_ind_re_wave_down_valid_test_ += [end_ind_re_wave_down_validtest_] + + if not self.Prod: + spkrdata = h5py.File(os.path.join(datapath_task,'TF32_16k.mat'),'r') + spkr = np.asarray(spkrdata['TFlog']) + spkr = signal.resample(spkr,int(1.0*spkr.shape[0]/self.ORG_TF_FS*self.DOWN_TF_FS),axis=0) + else: + spkr = np.zeros([end_ind[-1],self.SpecBands]) + + samples_for_statics = spkr[start_ind[0]:start_ind[-1]] + # if HD: + # samples_for_statics = samples_for_statics_ + # if not HD: + # samples_for_statics = spkr[start_ind[0]:start_ind[-1]] + if xx==0: + statics_spkr = samples_for_statics.mean(axis=0,keepdims=True)+1E-10, np.sqrt(samples_for_statics.var(axis=0, keepdims=True))+1E-10 + # print(statics_spkr) + for samples in range(start_ind.shape[0]): + if not np.isnan(start_ind[samples]): + if samples ==0: + spkr[:start_ind[samples]] = 0 + else: + spkr[end_ind[samples-1]:start_ind[samples]] = 0 + if samples ==start_ind.shape[0]-1: + spkr[end_ind[samples]:] = 0 + spkr = (np.clip(spkr,0.,50.)-25.)/25. + # spkr = (spkr - statics_spkr[0])/statics_spkr[1] + spkr_trim = np.zeros([int(ecog.shape[0]*1.0/self.DOWN_ECOG_FS*self.DOWN_TF_FS),spkr.shape[1]]) + if spkr.shape[0]>spkr_trim.shape[0]: + spkr_trim = spkr[:spkr_trim.shape[0]] + spkr = spkr_trim + else: + spkr_trim[:spkr.shape[0]] = spkr + spkr = spkr_trim + spkr_+=[spkr] + + if not self.Prod: + wavedata = wavedata = h5py.File(os.path.join(datapath_task,'spkr_16k.mat'),'r') + wavearray = np.asarray(wavedata['spkr']) + wave_trim = np.zeros([int(ecog.shape[0]*1.0/self.DOWN_ECOG_FS*self.DOWN_WAVE_FS),wavearray.shape[1]]) + else: + wavearray = np.zeros([int(ecog.shape[0]*1.0/self.DOWN_ECOG_FS*self.DOWN_WAVE_FS),1]) + wave_trim = np.zeros([int(ecog.shape[0]*1.0/self.DOWN_ECOG_FS*self.DOWN_WAVE_FS),1]) + + if wavearray.shape[0]>wave_trim.shape[0]: + wave_trim = wavearray[:wave_trim.shape[0]] + wavearray = wave_trim + else: + wave_trim[:wavearray.shape[0]] = wavearray + wavearray = wave_trim + wave_+=[wavearray] + + if self.Prod: + spkr_redata = h5py.File(os.path.join(datapath_task,'TFzoom'+str(self.SpecBands)+'_16k.mat'),'r') + spkr_re = np.asarray(spkr_redata['TFlog']) + spkr_re = signal.resample(spkr_re,int(1.0*spkr_re.shape[0]/self.ORG_TF_FS*self.DOWN_TF_FS),axis=0) + if HD: + samples_for_statics_re = samples_for_statics_re_ + if not HD: + samples_for_statics_re = spkr_re[start_ind_re[0]:start_ind_re[-1]] + # samples_for_statics_re = spkr_re[statics_samples_spkr_re[ds][file*2]:statics_samples_spkr_re[ds][file*2+1]] + if xx==0: + statics_spkr_re = samples_for_statics_re.mean(axis=0,keepdims=True)+1E-10, np.sqrt(samples_for_statics_re.var(axis=0, keepdims=True))+1E-10 + # print(statics_spkr_re) + if subj is not "NY717" or (task_to_use is not 'VisRead' and task_to_use is not 'PicN'): + for samples in range(start_ind_re.shape[0]): + if not np.isnan(start_ind_re[samples]): + if samples ==0: + spkr_re[:start_ind_re[samples]] = 0 + else: + spkr_re[end_ind_re[samples-1]:start_ind_re[samples]] = 0 + if samples ==start_ind_re.shape[0]-1: + spkr_re[end_ind_re[samples]:] = 0 + spkr_re = (np.clip(spkr_re,0.,50.)-25.)/25. + # spkr_re = (spkr_re - statics_spkr_re[0])/statics_spkr_re[1] + spkr_re_trim = np.zeros([int(ecog.shape[0]*1.0/self.DOWN_ECOG_FS*self.DOWN_TF_FS),spkr_re.shape[1]]) + if spkr_re.shape[0]>spkr_re_trim.shape[0]: + spkr_re_trim = spkr_re[:spkr_re_trim.shape[0]] + spkr_re = spkr_re_trim + else: + spkr_re_trim[:spkr_re.shape[0]] = spkr_re + spkr_re = spkr_re_trim + spkr_re_+=[spkr_re] + + wave_redata = h5py.File(os.path.join(datapath_task,'zoom_16k.mat'),'r') + wave_rearray = np.asarray(wave_redata['zoom']) + wave_rearray = wave_rearray.T + wave_re_trim = np.zeros([int(ecog.shape[0]*1.0/self.DOWN_ECOG_FS*self.DOWN_WAVE_FS),wave_rearray.shape[1]]) + if wave_rearray.shape[0]>wave_re_trim.shape[0]: + wave_re_trim = wave_rearray[:wave_re_trim.shape[0]] + wave_rearray = wave_re_trim + else: + wave_re_trim[:wave_rearray.shape[0]] = wave_rearray + wave_rearray = wave_re_trim + wave_re_+=[wave_rearray] + + + if HD: + label_mat = scipy.io.loadmat(os.path.join(analysispath_task,'Events.mat'))['Events']['word'][0][:event_range] + else: + label_mat = scipy.io.loadmat(os.path.join(analysispath_task,'Events.mat'))['Events']['correctrsp'][0][:event_range] + label_subset = [] + label_mat = np.delete(label_mat,bad_samples_HD) + for i in range(label_mat.shape[0]): + if HD: + label_mati = label_mat[i][0] + else: + label_mati = label_mat[i][0][0][0].lower() + # labels.append(str(label_mati).replace('.wav','')) + label_subset.append(label_mati) + if label_mati not in unique_labels: + unique_labels.append(label_mati) + label_ind = np.zeros([label_mat.shape[0]]) + for i in range(label_mat.shape[0]): + label_ind[i] = unique_labels.index(label_subset[i]) + label_ind = np.asarray(label_ind,dtype=np.int16) + word_train+=[label_ind[:-self.TestNum]] + labels_train+=[label_subset[:-self.TestNum]] + word_test+=[label_ind[-self.TestNum:]] + labels_test+=[label_subset[-self.TestNum:]] + + ################ clean ##################8jn8jn8j8,n,kj8j8,kn,jk,knj8,nj,knjnjkn,knĀµ + if not HD: + # bad_samples_ = np.where(bad_samples_==1)[0] + bad_samples_ = np.where(np.logical_or(np.logical_or(bad_samples_==1, bad_samples_==2) , bad_samples_==4))[0] + if HD: + bad_channels = np.array([]) if "BadElec" not in subj_param.keys() else subj_param["BadElec"] + else: + bad_channels = scipy.io.loadmat(os.path.join(analysispath_task,'subj_globals.mat'))['bad_elecs'][0]-1 + # dataset_name = [name for name in DATA_DIR[ds][0].split('/') if 'NY' in name or 'HD' in name] + if HD: + mni_coord = np.array([]) + T1_coord = np.array([]) + else: + csvfile = os.path.join(analysispath,'coordinates.csv') + coord = pandas.read_csv(csvfile) + mni_coord = np.stack([np.array(coord['MNI_x'][:128]),np.array(coord['MNI_y'][:128]),np.array(coord['MNI_z'][:128])],axis=1) + # mni_coord = rearrange(mni_coord,Crop,mode = 'coord') + mni_coord = mni_coord.astype(np.float32) + mni_coord = (mni_coord-np.array([-74.,-23.,-20.]))*2/np.array([74.,46.,54.])-1 + T1_coord = np.stack([np.array(coord['T1_x'][:128]),np.array(coord['T1_y'][:128]),np.array(coord['T1_z'][:128])],axis=1) + # T1_coord = rearrange(T1_coord,NY_crop[ds],mode = 'coord') + T1_coord = T1_coord.astype(np.float32) + T1_coord = (T1_coord-np.array([-74.,-23.,-20.]))*2/np.array([74.,46.,54.])-1 + # for i in range(mni_coord.shape[0]): + # print(i,' ',mni_coord[i]) + percent1 = np.array([float(coord['AR_Percentage'][i].strip("%").strip())/100.0 for i in range(128)]) + percent2 = np.array([0.0 if isinstance(coord['AR_7'][i],float) else float(coord['AR_7'][i].strip("%").strip())/100.0 for i in range(128)]) + percent = np.stack([percent1,percent2],1) + AR1 = np.array([coord['T1_AnatomicalRegion'][i] for i in range(128)]) + AR2 = np.array([coord['AR_8'][i] for i in range(128)]) + AR = np.stack([AR1,AR2],1) + regions = np.array([AR[i,np.argmax(percent,1)[i]] for i in range(AR.shape[0])]) + mask = np.ones(ecog_[0].shape[1]) + mask[bad_channels] = 0. + lastchannel = ecog_[0].shape[1] if not self.UseGridOnly else (128 if Density=="HB" else 64) + if self.ReshapeAsGrid: + regions = self.rearrange(regions,Crop,mode = 'region') + mask = self.rearrange(mask,Crop,mode = 'mask') + mni_coord = self.rearrange(mni_coord,Crop,mode = 'coord') + else: + mask = mask if HD else mask[:lastchannel] + regions = regions if HD else regions[:lastchannel] + mni_coord = mni_coord if HD else mni_coord[:lastchannel] + + + + ecog_ = np.concatenate(ecog_,axis=0) + ecog_ = ecog_ if HD else ecog_[:,:lastchannel] + # start_ind_valid_ = np.concatenate(start_ind_valid_,axis=0) + if HD: + ecog_,statics_ecog_zscore = self.zscore(ecog_,badelec = bad_channels) + elif not flag_zscore: + ecog_,statics_ecog_zscore = self.zscore(ecog_,badelec = bad_channels) + flag_zscore = True + else: + ecog_ = (ecog_-statics_ecog_zscore[0])/statics_ecog_zscore[1] + if bad_channels.size !=0: # if bad_channels is not empty + ecog_[:,bad_channels[bad_channels= 128 - - block = EncodeBlock(inputs, outputs, latent_size, False, fused_scale=fused_scale) + + islast = i==(self.layer_count-1) + block = EncodeBlock(inputs, outputs, latent_size, False, islast, fused_scale=fused_scale,temporal_w=temporal_w,residual=residual,resample=True) resolution //= 2 #print("encode_block%d %s styles out: %d" % ((i + 1), millify(count_parameters(block)), inputs)) + self.encode_block.append(block) inputs = outputs mul *= 2 def encode(self, x, lod): - styles = torch.zeros(x.shape[0], 1, self.latent_size) + if self.temporal_w: + styles = torch.zeros(x.shape[0], 1, self.latent_size,128) + else: + styles = torch.zeros(x.shape[0], 1, self.latent_size) x = self.from_rgb[self.layer_count - lod - 1](x) x = F.leaky_relu(x, 0.2) for i in range(self.layer_count - lod - 1, self.layer_count): + if self.attention_block[i]: + x = self.attention_block[i](x) x, s1, s2 = self.encode_block[i](x) + if self.temporal_w and i!=0: + s1 = F.interpolate(s1,scale_factor=2**i) + s2 = F.interpolate(s2,scale_factor=2**i) styles[:, 0] += s1 + s2 - + if self.average_w: + styles /= (lod+1) return styles def encode2(self, x, lod, blend): x_orig = x - styles = torch.zeros(x.shape[0], 1, self.latent_size) + if self.temporal_w: + styles = torch.zeros(x.shape[0], 1, self.latent_size,128) + else: + styles = torch.zeros(x.shape[0], 1, self.latent_size) x = self.from_rgb[self.layer_count - lod - 1](x) x = F.leaky_relu(x, 0.2) - + if self.attention_block[self.layer_count - lod - 1]: + x = self.attention_block[self.layer_count - lod - 1](x) x, s1, s2 = self.encode_block[self.layer_count - lod - 1](x) + if self.temporal_w and i!=0: + s1 = F.interpolate(s1,scale_factor=2**(layer_count - lod - 1)) + s2 = F.interpolate(s2,scale_factor=2**(layer_count - lod - 1)) styles[:, 0] += s1 * blend + s2 * blend x_prev = F.avg_pool2d(x_orig, 2, 2) @@ -336,9 +500,15 @@ def encode2(self, x, lod, blend): x = torch.lerp(x_prev, x, blend) for i in range(self.layer_count - (lod - 1) - 1, self.layer_count): + if self.attention_block[i]: + x = self.attention_block[i](x) x, s1, s2 = self.encode_block[i](x) + if self.temporal_w and i!=0: + s1 = F.interpolate(s1,scale_factor=2**i) + s2 = F.interpolate(s2,scale_factor=2**i) styles[:, 0] += s1 + s2 - + if self.average_w: + styles /= (lod+1) return styles def forward(self, x, lod, blend): @@ -677,7 +847,7 @@ def forward(self, x, lod, blend): @GENERATORS.register("GeneratorDefault") class Generator(nn.Module): - def __init__(self, startf=32, maxf=256, layer_count=3, latent_size=128, channels=3): + def __init__(self, startf=32, maxf=256, layer_count=3, latent_size=128, channels=3, spec_chans=128,temporal_w=False,init_zeros=False,residual=False,attention=None): super(Generator, self).__init__() self.maxf = maxf self.startf = startf @@ -685,12 +855,18 @@ def __init__(self, startf=32, maxf=256, layer_count=3, latent_size=128, channels self.channels = channels self.latent_size = latent_size - + self.temporal_w = temporal_w + self.init_zeros = init_zeros + self.attention = attention mul = 2 ** (self.layer_count - 1) inputs = min(self.maxf, startf * mul) - self.const = Parameter(torch.Tensor(1, inputs, 4, 4)) - init.ones_(self.const) + init_specchans = spec_chans//2**(self.layer_count-1) + self.const = Parameter(torch.Tensor(1, inputs, 4, init_specchans)) + if init_zeros: + init.zeros_(self.const) + else: + init.ones_(self.const) self.layer_to_resolution = [0 for _ in range(layer_count)] resolution = 2 @@ -698,7 +874,7 @@ def __init__(self, startf=32, maxf=256, layer_count=3, latent_size=128, channels self.style_sizes = [] to_rgb = nn.ModuleList() - + self.attention_block = nn.ModuleList() self.decode_block: nn.ModuleList[DecodeBlock] = nn.ModuleList() for i in range(self.layer_count): outputs = min(self.maxf, startf * mul) @@ -706,18 +882,21 @@ def __init__(self, startf=32, maxf=256, layer_count=3, latent_size=128, channels has_first_conv = i != 0 fused_scale = resolution * 2 >= 128 - block = DecodeBlock(inputs, outputs, latent_size, has_first_conv, fused_scale=fused_scale, layer=i) + block = DecodeBlock(inputs, outputs, latent_size, has_first_conv, fused_scale=fused_scale, layer=i,temporal_w=temporal_w,residual=residual,resample=True) resolution *= 2 self.layer_to_resolution[i] = resolution self.style_sizes += [2 * (inputs if has_first_conv else outputs), 2 * outputs] - to_rgb.append(ToRGB(outputs, channels)) + to_rgb.append(ToRGB(outputs, channels,residual=residual)) #print("decode_block%d %s styles in: %dl out resolution: %d" % ( # (i + 1), millify(count_parameters(block)), outputs, resolution)) + apply_attention = attention and attention[i] + non_local = Attention(outputs,temporal_w=None,attentional_style=None) if apply_attention else None self.decode_block.append(block) + self.attention_block.append(non_local) inputs = outputs mul //= 2 @@ -727,7 +906,15 @@ def decode(self, styles, lod, noise): x = self.const for i in range(lod + 1): - x = self.decode_block[i](x, styles[:, 2 * i + 0], styles[:, 2 * i + 1], noise) + if self.temporal_w and i!=self.layer_count-1: + w1 = F.interpolate(styles[:, 2 * i + 0],scale_factor=2**-(self.layer_count-i-1)) + w2 = F.interpolate(styles[:, 2 * i + 1],scale_factor=2**-(self.layer_count-i-1)) + else: + w1 = styles[:, 2 * i + 0] + w2 = styles[:, 2 * i + 1] + x = self.decode_block[i](x, w1, w2, noise) + if self.attention_block[i]: + x = self.attention_block[i](x) x = self.to_rgb[lod](x) return x @@ -736,11 +923,26 @@ def decode2(self, styles, lod, blend, noise): x = self.const for i in range(lod): - x = self.decode_block[i](x, styles[:, 2 * i + 0], styles[:, 2 * i + 1], noise) - + if self.temporal_w and i!=self.layer_count-1: + w1 = F.interpolate(styles[:, 2 * i + 0],scale_factor=2**-(self.layer_count-i-1)) + w2 = F.interpolate(styles[:, 2 * i + 1],scale_factor=2**-(self.layer_count-i-1)) + else: + w1 = styles[:, 2 * i + 0] + w2 = styles[:, 2 * i + 1] + x = self.decode_block[i](x, w1, w2, noise) + if self.attention_block[i]: + x = self.attention_block[i](x) x_prev = self.to_rgb[lod - 1](x) - x = self.decode_block[lod](x, styles[:, 2 * lod + 0], styles[:, 2 * lod + 1], noise) + if self.temporal_w and lod!=self.layer_count-1: + w1 = F.interpolate(styles[:, 2 * lod + 0],scale_factor=2**-(self.layer_count-lod-1)) + w2 = F.interpolate(styles[:, 2 * lod + 1],scale_factor=2**-(self.layer_count-lod-1)) + else: + w1 = styles[:, 2 * lod + 0] + w2 = styles[:, 2 * lod + 1] + x = self.decode_block[lod](x, w1, w2, noise) + if self.attention_block[lod]: + x = self.attention_block[lod](x) x = self.to_rgb[lod](x) needed_resolution = self.layer_to_resolution[lod] @@ -850,12 +1052,18 @@ def forward(self, x): class MappingBlock(nn.Module): - def __init__(self, inputs, output, lrmul): + def __init__(self, inputs, output, stride =1,lrmul=0.1,temporal_w=False,transpose=False,transform_kernel=False,use_sn=False): super(MappingBlock, self).__init__() - self.fc = ln.Linear(inputs, output, lrmul=lrmul) + if temporal_w: + if transpose: + self.map = sn(ln.ConvTranspose1d(inputs, output, 3,stride,1,0,lrmul=lrmul,transform_kernel=transform_kernel),use_sn=use_sn) + else: + self.map = sn(ln.Conv1d(inputs, output, 3,stride,1,lrmul=lrmul,transform_kernel=transform_kernel),use_sn=use_sn) + else: + self.map = sn(ln.Linear(inputs, output, lrmul=lrmul),use_sn=use_sn) def forward(self, x): - x = F.leaky_relu(self.fc(x), 0.2) + x = F.leaky_relu(self.map(x), 0.2) return x @@ -884,24 +1092,61 @@ def forward(self, z): @MAPPINGS.register("MappingToLatent") class VAEMappingToLatent_old(nn.Module): - def __init__(self, mapping_layers=5, latent_size=256, dlatent_size=256, mapping_fmaps=256): + def __init__(self, mapping_layers=5, latent_size=256, dlatent_size=256, mapping_fmaps=256,temporal_w=False): super(VAEMappingToLatent_old, self).__init__() + self.temporal_w = temporal_w inputs = latent_size self.mapping_layers = mapping_layers self.map_blocks: nn.ModuleList[MappingBlock] = nn.ModuleList() for i in range(mapping_layers): - outputs = 2 * dlatent_size if i == mapping_layers - 1 else mapping_fmaps - block = ln.Linear(inputs, outputs, lrmul=0.1) + if not temporal_w: + outputs = 2 * dlatent_size if i == mapping_layers - 1 else mapping_fmaps + else: + outputs = mapping_fmaps + block = MappingBlock(inputs, outputs, stride = 2 if i!=0 else 1,lrmul=0.1,temporal_w=temporal_w,transform_kernel=True if i!=0 else False) inputs = outputs self.map_blocks.append(block) #print("dense %d %s" % ((i + 1), millify(count_parameters(block)))) - + if temporal_w: + self.Linear = sn(ln.Linear(inputs*8,2 * dlatent_size,lrmul=0.1)) def forward(self, x): + if x.dim()==3: + x = torch.mean(x,dim=2) for i in range(self.mapping_layers): x = self.map_blocks[i](x) - - return x.view(x.shape[0], 2, x.shape[2] // 2) - + if self.temporal_w: + x = x.view(x.shape[0],x.shape[1]*x.shape[2]) + x = self.Linear(x) + return x.view(x.shape[0], 2, x.shape[1] // 2) + +@MAPPINGS.register("MappingToWord") +class MappingToWord(nn.Module): + def __init__(self, mapping_layers=5, latent_size=256, uniq_words=256, mapping_fmaps=256,temporal_w=False): + super(MappingToWord, self).__init__() + self.temporal_w = temporal_w + inputs = latent_size + self.mapping_layers = mapping_layers + self.map_blocks: nn.ModuleList[MappingBlock] = nn.ModuleList() + for i in range(mapping_layers): + if not temporal_w: + outputs = uniq_words if i == mapping_layers - 1 else mapping_fmaps + else: + outputs = mapping_fmaps + block = MappingBlock(inputs, outputs , stride = 2 if i!=0 else 1,lrmul=0.1,temporal_w=temporal_w,transform_kernel=True if i!=0 else False) + inputs = outputs + self.map_blocks.append(block) + #print("dense %d %s" % ((i + 1), millify(count_parameters(block)))) + if temporal_w: + self.Linear = sn(ln.Linear(inputs*8,uniq_words,lrmul=0.1)) + def forward(self, x): + if x.dim()==3: + x = torch.mean(x,dim=2) + for i in range(self.mapping_layers): + x = self.map_blocks[i](x) + if self.temporal_w: + x = x.view(x.shape[0],x.shape[1]*x.shape[2]) + x = self.Linear(x) + return x @MAPPINGS.register("MappingToLatentNoStyle") class VAEMappingToLatentNoStyle(nn.Module): @@ -929,26 +1174,37 @@ def forward(self, x): @MAPPINGS.register("MappingFromLatent") class VAEMappingFromLatent(nn.Module): - def __init__(self, num_layers, mapping_layers=5, latent_size=256, dlatent_size=256, mapping_fmaps=256): + def __init__(self, num_layers, mapping_layers=5, latent_size=256, dlatent_size=256, mapping_fmaps=256,temporal_w=False): super(VAEMappingFromLatent, self).__init__() - inputs = dlatent_size self.mapping_layers = mapping_layers self.num_layers = num_layers + self.temporal_w = temporal_w + self.latent_size = latent_size self.map_blocks: nn.ModuleList[MappingBlock] = nn.ModuleList() + if temporal_w: + self.Linear = sn(ln.Linear(dlatent_size,8*(latent_size//8)),use_sn=PARTIAL_SN) + inputs = latent_size//8 + else: + inputs = dlatent_size for i in range(mapping_layers): outputs = latent_size if i == mapping_layers - 1 else mapping_fmaps - block = MappingBlock(inputs, outputs, lrmul=0.1) + block = MappingBlock(inputs, outputs, stride = i%2+1, lrmul=0.1,temporal_w=temporal_w,transform_kernel=True if i%2==1 else False, transpose=True,use_sn=PARTIAL_SN) inputs = outputs self.map_blocks.append(block) #print("dense %d %s" % ((i + 1), millify(count_parameters(block)))) def forward(self, x): x = pixel_norm(x) - + if self.temporal_w: + x = self.Linear(x) + x = F.leaky_relu(x,0.2) + x = x.view(x.shape[0],self.latent_size//8,8) for i in range(self.mapping_layers): x = self.map_blocks[i](x) - - return x.view(x.shape[0], 1, x.shape[1]).repeat(1, self.num_layers, 1) + if self.temporal_w: + return x.view(x.shape[0], 1, x.shape[1],x.shape[2]).repeat(1, self.num_layers, 1,1) + else: + return x.view(x.shape[0], 1, x.shape[1]).repeat(1, self.num_layers, 1) @ENCODERS.register("EncoderFC") diff --git a/sample1_1.npy b/sample1_1.npy new file mode 100644 index 0000000000000000000000000000000000000000..bc9718d0da11774978f46470dd011776b88b14b6 GIT binary patch literal 131200 zcmc$`c{G*Z`!}wEGNnOtks?$yXdtvjMWP}@p(H{jGDK3MP-#$7lnNzNGKVr;=FIav zp5x53p(rZP{=V1eygjS;dauuKJCV_Y4R&Mr>$5xTZ*}l?k1m zI*K02OlZ*8-tnTB1xCYrHLl?2`1Sw&{vZ7dI=FJyhVNXc7=E6q%437T@)C_7WlXqL zy(y^Pn*mouY)MQ}222#vqfAM35D>av9GgM|@s5hjCQmB7*bqxSe}W2PdCMdE`e>kE zw)mzPhxK>6{P9oxr(OOvT{M1_A;^HOGF!%s63IY{aLltV8iY#cANfhvgRtcM`$FMl z666U+$)}G~AX-+zU^|TtqkFD%r8Jljyum=h1zW!~q4INaPmFry((ICqiQlm!iYZ7v;k zndTpUeX}byE&G45Ay)U^L#1v81pN9qb+(EDMHidSlsZhe&mZ4T^?QS}*Kpy%sJBw% zAr|BX+0yc0P<@Jih1{+tC%kLNp3N?0IUx_6I(HU|PHt$nud?^F(~c38J_Pc{d9=6uxMyO|3{`>AK& zd+=ay^T+GVY7%UXQ`=N@f&!~7_Oe~&=`gWMb(;;=1N`;n&l8_c{&izo?)m-Z-{-%_ zr~GpMIQiEpKi@ZQ)@Q-l$lIrkYB7HrAAYJ6NrBMc!r|NU$WSGE|90AZ3anW0^5b9z z6`XzMWSc86q3P@%wFnybAAG^D_fPs$wrb~8IvdCalUwC$@cRPnEQ%anpOTk?Ps(}V zT%J~*(#e6CPKWn>XV}30uy|w71m|x)zkkhEW2_UG&)vLcX*^!vRwe00u^jkwe1Epf zFe}l?TuX;_p}Kor%Lc(x@S6I#9}$X|Ju=eBB!SXraeuurIxLu&$i6|Gp3lk~$yz3J zxbRL_d(u#Cn*aFq|2dERe)M(9DsRo^LbZgXRZu@0ZfOK$G!-zw##VgsL9CZtYtD~u ze!>9%nQHMRZFK1Q68wJtI0NqP>fEkCV#3#B8ATVa(IG*ft=FSYhnI@4`USDv)5pgr z6xVX0D#0_om&XCKC9@^>c{0J8bL}Q~9s}!1m$z?}nXu}1SZot|rNjjWo_T14Zql0T+Lp{%i2{U|k z51sGf{H>4q_3>$x%l*LDFS&3q#{YL>JO^wH*7}={bAYNUC6ZP9?|id4>bJ7NB`#bv zo{Y3=VndAKn%Sopa^QicWiR<62eh`TUp<+~h3JB&j_ut%XbQQYFWtcfF-Fs^$~inZ zSjSu`eToZaXEUA|ZsJ0eez42^dM+rxjp@x9oL=AmHZS~f{2L{-I0%HF;X%%t*;AkO zIgp!Edo|}b3zliU5HWFQfxw3gM+wico;|PG6EJ6bJ^rjOT%O=yaHAjSy5gPXPX^$I z{?4@gJ_5L}TE3ima1eq<{6o@r;C<|Ix9lk)EPumX_Y)84a5`Q)_>LY6l$3)QUaz^p z-+#vcVniN{{fF|)`Qzp539k=Qo7ed9AZFlhT6Pix&T*_{jGjU$A`7{5TScyaY)8H5>SuK7BusufS$#e(Y+KZkOjXTBBxPd-JGBj z$vz6?r?!hO6s3TKMue|Q4H+my6pM603Y2EpjC>s=LkDH8S8NXnh@^~LB)f6=Cdsk49_bR3hgC;$n}BVz3qMAlFI&T z>5dyS`@11Ve!FyWdpBOMSkc<8y|8Iph5X}P{je)>&YO%m1h}JYxBk*;0$8lg8dbyl zd8*jX@thZfkhw+GCCHn5gd+8wki8$zE$bvuj4ZhxTYnJ-5E1C-_mnGj;R8!%gMTCq0 zWfBx^-Ys%PmJF_E9zK@4N{2OTz3d-pP1_$y-Z$0@}xikp#wy%h7ri0Q~g{i;=EU4~_ojN?4-TZ*t;9uoV=#&#pWKR+)AT&$bZ4LM4_Dw7MlVMfl{ z#q8BRkag&2@8O_6cr|;S*Ih9p`1j0epOHd>S8F#jHCt$4Ka`^#Q1nl|!e8G%DPR8i zlz*S7ZK8W%f&sBw5y$1uQ@}Q=;#ig2AWVqO-nC^85!QR<{#ZUr0PA@4x%!=07?d+JSuf@LN}>7SKr* zGQPE}1*#c#Hj^vb!SG!)Pn+UzVxwf#kZW1En(kp#MVF)6jSj;%?clm8Xz_ zwURpPrZ5@W!q0!Xb$|?S@5qD?Jg35{rMIPa5}B}JW7OMJZw?&)>^+g6%!Um&W*5~w zVZxfnuiu`zi|q>6ukVw0Fd#hVmh`y|bT~M=p<1Jz3~Gr5nS}EsIOkIJ+!^~pIb|~y zI2_fRiSV3#}%!2!ak5EY#A<1=pUbp3{`*hcS&$N#0{U;3TlR zz-*!$B9`HW~I!eypp&dVYoB zmvAQz6D~F!KKp3nKj)pl@AJohMS9D+#9v&P$`HPjVM>SV=W1QOL&)&+o8IR^`$0f@ z-e>mTBf{V@a?h7qBJ^*1ym7Q*5E@pc+@39$9`(A+VkIQ zFESx_CHF~}786bfW|+LIWq|bj><7;iX`pO-V@-WG6(rkZ(;mI0LX)0a>NhPqJf>ap zv6r9rXRK^2P6uMW>UEWK!X^jHcj3bP%6k;poZy@MF_jEnR@b6jYpGy8u}yGh6bpL4 zCv*)Yn0M!>dLI>j+O;eC<9%~(&cI_1jSLS` zLzM1|QK3a9w{z8PDjW{3kP)6qgU>RX4@tczLwt#POkFGq?yN77oOgl@jH?UH$-NYq zT=c}CG@J2X*W=H1vvLfe=V|faj_3WQ-(K=SbG-9SrX>d?8^+W>VLtpG(AV24PJz4k zYRJpFNRTP|F8Rbk3OEhVK_?gtxO%_o)y_#ae1GfRmi3JRKB8y49){6C=IcC%*N)iE ziHr#B!~ArPq*%6S8517*NrZhKWI{wxu!8As7Hl|oSE@~n1@FYFterpM_!fDol86`P z!&&*SjU1U!{;D^j-I58hv)o1m9ckbhH+wXQH3+>GZ}ODw2@v6ZMVihdK*r0j7fc_J zAoPV)*~|tC*mtnEW!lp4x+g9BF3$kLyVS4iY?**QM(?e^%LXd~a<$oyY4+Beyh|*I z@ZWAbbcqc|h`L2PaC~Zp^k}%-RRTP|nEj^8vLE7tO;3CJ^}~V+!+2690T!;UjqeO1 zf#7!~#dF&s22)UZAJYpW+wy%Rd(r5wG4 z+TaDdi%h=I0>&;0c0;YrAXsmgs+`#jv*Z>pG&5;~U*VssssnrAeEG)j$6QF*j&GO< z4`aeR+azftAv)~5YBKX@Kc45+qJ7lnX?bMl%07CroDG{VD^w53GGO+O1>4x|bdY)Z zx#SIw@A=#)(P%qHhnV{%57va!;LfcFM;C9WLGKdTuG%?t5Q}_uX}lfJ|6hh0?{9zq zPkqJbqr;Y~Lc=y-`>Y7^%Vg7e3>q55wr zuzs1~bH{f~uydzK39;A^vb$C~wv`Qj8U{7(8Z2YXW1HG&*mWRJEAzZ=Xy$PW);~^NLzTrx7+@uHyEf8`1eKbShjpNgTXB^(_m|%lz=c8ZsX4CP7f3yF) z4Sk}iIQ~{||MS*(3k&R?$NKnVKg?~_Em^H2Oh~9{OZ_%Q2ce1pwy_Den+5`$kMciEKR?pxv!&w(9ZueFrfd~sL!B1ctKk6$ zNY`PPY?r~#Hd=APhvf@uif*R9~rKs1$BDGV)+e-NYiudhZ~ty%>J}) z*g2a%=`^Dg^u7!~bfdRJbMfj7-{O6=vW`)?0v(TjfShMJ*66Et*F62r07i2@-bT=or3JvEFUl+PmA)D>Ti)+u6Bgw

$m+v-zb=0X6`@aLIy35y!x$p{?$*!nN9J?(0ewe$aymbny!6q(L70m8%8A-Zf)$p z_0ykvitk79<1Bp2*I#_fe~+)n_>^DHe~w?Dq(Y;XK8~+nXVhlDJIa92w(+?O&fvI; ze^7GTOfu*{w_&QdQ($j4MLPuR+1!SPHeFMkzhKC9eQc&ez0YsL4IeVh(oK%@Ps9A> z?KA%AIt>!rfO}~f3r5dGWk`@&K%aZ>w!~`|+zmOs&r%QDgMa;|r;`|i9{Lg&Wjj7Aq4m`zi*7{>NRxIQ~_M#l~^QWfw0sj10vpQ*;>-=TqhdA$Lpx`xWhn$J2J)ztzJp zKa?6Uxo15YGHzVbWzHIe5RF_5{p&vw@8qG77rS~apF|>i3#*8Fw+}!J(HT?M zEr#HA6#dAs-?Uw!Bg{ z^IbQrDsegTv#bpQ^G_w%#HN7{w>m{_A`jJN3umQ&t3p;DENKB=9kO>RebL3OK_TN` zpFgrMN9+v`uoxJ4fRKZhStWRW3W9~d^gJ9dNEf0F?L36)DfX?L_SaCRe?fpd! z5Eei1`;u=f*hF}5>3q`*4syJtips^#3fm!%=ce1-mSn;Auo)IB7qX$cOPsh* zkPbf$g0x(;2SI$1bKYgU9_$CsdHQTqJLIXv_+0aBg^s?{zPH=jfVpWtwQ5B-w5N#H z`*{w4&gKix$i+S3mA~?XDKS3ONEA( z;sYT$*uLs%D9Wg#!~W;_y6xDmr=sxG*WwHGE-Y+8>Pe ztrVe!V*RlAy=_MUj>ARmU%LrS-{;FEza6_VOZ!h{)A#x94Wlg{MlC$BJ=*`KeHIUb zwn`OFW#W7Z>z)};f(uDqM8(kZ|L^m0G3eogS*Lm6^W)e^`_QyK!EcXG`TgQw{~P5$ z$EW<~>|K-vhVi~hoqu(=o_9CQ@_ymg`KlYX^~juH5xb#I^qz0vk{-}0&JWz`&MdpHK+lEf$lqa<9wZ?9 zwoeY)!Cw$DZ1TjqfEHxj7u&@4?M6D`zSj$-2T=IKO$Vng5|9sd-Lbu111Qt%;B>ukbGLsE-OXqMl}{J9iXZL=18VNmaM^xfdek+& zi0T20mwC6gCUirroCaH^u@lV6_shzwtD$rUc(R_SL0$gCV7d0|D1=FAda8R9jkPL^ zpIMm*lXYJNIfu(2WUWAjNL33oePVuIB7^;bP@x4`nt0#k5g1)mGK7ZxY@O-Q2XRsr z5_HX0&@|M(ptz|4hLf&LUfW#<%1JDpB=trJHM~4(FVzB>zb(G?>vY1`mzoEkyZ7UI zvhuPD20^F2^7W5)612zbM1+wPg>XTF^9`HD|#O~Q8DnPWdf zKz>rLh6S(4CoWm2{_{OPUn;tmbgKvAxOEck;iY;e&U;jlp5wfrURBnCH#okS{cGFy z1zZ1J?)m-rS-5sqKpF>boHD-eu!jxnXUxrL(P3jhwmA0+j{D~7yt?;*@lSh$|31Gz z{A)htm-ENRw?qD|od4dxJ;$%-&-bGbeV~0T{HLDbKmXt8pY`#d|Fis7(dHj-oVs9y zyyn_j({5N&yVtG!ZZC9hx09BYBmhU>{5H#C0L-%NUtt8c~9Ivi{$jsImkYwjRYbv@idy*7xd8+N@D?j)l9*L-K% zND`3Zl4YbLpSw_b*v^_d*G3due1a{Wm5Z9M9ggxID2L9=BgUko{ov8yZtMPz0CT;| zwsAN2LWF+dn+vu*u%uk~liq9sB&JR}PJAMRp4YG6`+`{T=lJ;JmXMQjYd(W<7`MEn zIr)Q-xIOOlFG2-$JbOxtuP#AU+ARZmV+Ybwds3NGK|o- zZ@6Y28O5DyR*&`}qPn>1DbtEBWUszzVM}Ti((G!swyVvC<}dT!oWS#CzmIixap3g& z4HWGvkD{<)@eMhjJ^3V1{@|GTyQ~G+HXEeB>*s-rZC>*EXZMf_DeC+agA|l|z*g=F zEfSS(so12pG#j?=7c;Y_HbGOktcKKMB9spo9}*N>V=vk{-Ahv|`-TU%w%^^QczO zWfu0w4N63}w+z6Fla+&%r){wRhDdy~aSLpaG@lpA?Eoc-spT&niSYRR`ES>1=-@T6 z=Ojmx34P}0e~;q)ZJFZf&?t;+*cEiUFu|S-+_e5Ty|1aTX;foT!8tn2ja=hL)5G>c z^(G0gEi4G%Y`;nx``h9hhlG#UQ(@7Gc@jl>R46(gLVcS>g}l)k)iel6n;Ub5!o?ac%bplq10QridVjI3F#L%lF|!oN1* zViyP+wXe8-z7l-8Z5FD@#3Pm1eUs%6s*&+a;m{fMb~Ltd4M8x!4-L3#6s4>nqQp=$ z1990wlrm{XOZ!Sh?Ao}6cW)4olaubU^^rY@=&Yi6G^82nxyra7`kjOHwz&)BZ_k5$ zR*mP&*j*q{<@CkiOfMA6&Yly$yB8XIN@9PA6JS#zOFQ=`2~v(aR$gnOL&W!Zi+J&W z&cBP6U1-Tu4x}ciM0^j#@s8bEuNq@8Za~-4XJoP&h!f!(bZAB>-APKpdRY^i?2A2m z$hQyK4aQwa8XQDhn`*?^G8AMo*}7Kr6a@uGEgW=BBBAS-9=sGdKtP6$5-zK@wIiR$ z$&~s1SPyQvDsIa91jCn}MqCZ=1Vh!}*`c*$ka^BrE^dkQEudDjTA2mH4kNPrd~iGr z*{PfpYy`IT#U(0tGC@(+!@ur&7*d(~=}WVSL9Sa#Cr6jR0@r1qukF242EQs9MkC}l zuy(zBFr@*eMR^IrU6^|Ga-jNK_?EoK zJecVFNVSz;2YlHVJh)Ougz1xc8kNp{OSmQwR;rkv3oBrJ{=C}KQ zN{J>VcRpbMV<&s7=CyScFk~F^R$oDZaL1t>=>UvJs@*vhJ2~Axf1l^mKilP(*B?J5 za^p1*DjHgTNdDl$hUi)AFJaslU!TS8TO?w^=7GA-Dch6U|K#gG>;1pIR;>{u=3F9! zQFN~7uqX*^-}v?4)gOe4#>+vTx&2`8>PgktZ32;EI)u!-gcS8<;2bX0)v9tZ9RXPfAAZoCOG25O=0?7qAfti}9);Gc$w;vF zO2LjzM6}}GKAER&ok-SFaJ_|m1sd~C`7N#e39J;tzHTw@gF1^ZGfm{NUo16rI|%0| z%{?4KyuERL+v-N)7?!sUK06Im<*3SUeii6te4B7OnFX$D*9+ILih_>7Tz8k> z?;&d4lCR6&l)~^%EBBuYZQ!w5-E7~NK^RCbFI)YT4f-;MbDoj8@HWcR_46qf7`rh_ z**Gs(uk0rw|G5wPO7(ARzw88+)Z&eww|2qTh6m-#P7XlcF*VJEl@ySzZS)(*dHddz z9?xVXNib_Fl37>W40;DgZrfXZ1{Iy+>pkL;ApIqF#b93|jP*2K&w5?~OF#ekq2=8L z+wl8rmEL@E>0vq?zDqN$^~HJT0H4W`3OZa}@g-oP856ciKc!D$Tn0Zr$G3y| z^1v_GygqiQWt0s*n}zkpG0q@omq1UK72d}sl%KYIV1sIOENLvA3EzdzN?E+X_&@V3 zv;3gxeUx7xpAY_|eEH(jff*H4gCrg(Y1vM##{7}2c0)x$mIbSPvX?IZ&VXA&iT#to z7|*2IKuN}WZD-3uIDZu9Eeb-~&IaTB-J^`8RU*^!$*=d%cKO%*`+rjY{r^|g&U4el zIg1D2$Eqf|9&ZB7qcv5A@QCn1a9Dig77=7`cKj|LYk>}PjY)N>R0vK~-Y`3*3@J0@ zu2g%sAkEnB@!{f%jn|KDsAKaR8Q>y{Q&Cjat((ZuS#umcK zF9$&O5^JTGCkYPHmn=u~>EJW`UMGn8@Af`_KAj9>Z|Lc=Vf&Ac*RrBGe)&o|HP);M zWF?pG7bKLS-2J~ai;i?5=f-7Jkw-+7dF+BjgccdC@JwcX>LnvPwVEIma}u(m&Ggvm zG=QkQxu+&PT2ZNJaN&1_EEMKbPHvZ~h9y@--C|!6q4MsViI7MJ5c5uzv21Rsv&H+L}T;!Mv(gz zwLcuoZA2+o^1+Qk*gyHHEZ3a|m21YAIN&@lA*y0YP8=0#Z*mP5V_Z$siZkoNrv@QH z^5M6KrWnuP7kz8%F#_xyZhP_$>&(0?efT4Pp}A zAaF-o_+olB*s~e>6ZW4V_VJM|DM}wfGgHvgrlbUHh{B&##G7ClI&*9XwHsz%kYb)F zB|?b3&eu;?WH`1&aKd0dw)btnt@3+GhSV8^zCvLt%$E5&)RK<#d8CosRM01IKgb;{Drp=W(~ACOmj|@g&5IVt*n-@@@K+Y5RcR z{(qx<{qWzO`~T~{0&nrM&JiMT6EEh**kjz~?lI$Pj6eOgX?CizYBPvDw2=3EmOJCZzSSX?9Bhk6};2A&8ap#2?E9r34#C{`(Hp~fTu z*<@aSEL+!)M98epA#x}3bp5U@=Z5|LtW{!*f|5|+>bcIGZ#9s8wJ>_<(Et$6U-VV* z_~-cy{&@eKpMTT){C(w$jQoSNDGF4l3pEqhw}E}dx*Gz#+ep*wRFQCPBQo?@-`Db_ z4_)7%Fe7?=5X}N#kK9!hR8B5b(KVx>a~%eIJYq;F)=EFuRGENMY(^SB=Cz~Xx9{W^ zkCdX^$~B4)3E8l_=6K73@h(`}CMUY(G!>RDI@5k>hzhb@-%B1`BLmw{xm_WV2)oVF zu6?rZ1t*_h74yeC;AC>efLnA2Xc(H4GIM$${rk3Gy6>?6eyB$w0^@V`>%ao{V=Smr z^9VETWI^=(kqbKxVcgKDi++p^7xYYzswJU=I~`pS^PNf@2@BSKGe5T;Brb&JR{P z8Z`k`zvmeIsgX+&$`;04#Je0;g*H#`=KH4{!EkVPEaU*YmnaD0a|83 z34U9zZ%UXe@9G-$4rTV#DcmC;Y0?xR5){@fVTd0nc7gB*%2x z&gSz`pF!zJ+HD@BC>I~ko5z7&S5>LoaJ<1$w}15aF&Z3K&~Upd%mBB%L$_~7PveUD z{8+4gWZNB#A6$3PIL6^K8<@>2sFpr#pr2iysKLPJMKt&IKEeL}`E+~VC27<40{{I# zDZgKQ`rq#JpZk;k`QE=>k9Eo(k-FIp=K>lw=9u)usNRBz4?i&9P_-NqmSDWj`%Iw) zvV~Y~!fh8QrXxMGgnIpx@*{r}Xb{BiTgmvZ}t!mE`mIJVIH=0GnE z9BN$N)2s-P`+dEJ(*DuYbEp0#{i9ySCX}#z{@DhWwkqY;t0_ITf5@2t1`X%!3F4(k2 z_Nml?PPnTgB+*dV1NH6)KbQ{@VcxR_ot+p*b1UT6>w+RWXrFpxT6c|r?KuxMH@{Zs zl|H_an_dH&)=zbLp4E^TBXjvoKre3}nln=w5P|CZ1HH$Bc@U+c*7 zH`y3(e(}j^6F(depvq4=dUdg&MJjH5yY}>cp}9BuaSZmazXl9!$jAP<<-?#ZNogKX z?^EdZC0ux76ClMh$LAPoyN1-?V0o4A{qb`n8>EG}&m?|gyx2G943ZEHwhOqaiUJi* zmJB(%VLOf)aVY7z1sgnm&m7s*&jHTy=sTN4r}>fJe|~=-E+a4bRfprUfucf%4NS;7 zo&MI~3=3lCaJH!*o_>$NzAeXz7?Yt?sidqMfLLZNd2;M}MqZy*J?$;ni zS@#w9SGFSw@y+y0^j?(Ge)`D^KLV=TmM{4I9FFrF6Y7p{BcjrpnT73b14vd-?7*aY z528Le{A~5?Hq_y$a7^8#66x7QE&N;-gREk5LoeHWf>pdKCDq|3h}u4D^XV^Lu+nq3 z_KNgA*!5+GtwA+DC)2lmX`}=JW>j<7WIqCsco&(~2MA!RwOW1Nkv*1R z&o7^}o1u4^NYp9kT2L;({Iqbi3fL#Vn>eSIV0%*8=a*eM3RYnplUUb_t{dJCKY-(V zu`dq=2NzJ$LRW`kbu#?jYm|N0X3oz3;QE@BK*Ca{pxN+BW3$JNUg- zRW;62rY{a!Re+=g_jm5ya{<{KtQz^TB_9OX9*H_SHBfn9ReX4E5lBX#s>zXmhf1wx zm;U-$igZ-XE-2a6ps?4)$v=jxQAq0G(yuzD$aqQd_R62{kjTr;dwc^v0a?CkOi!-~ z!nDU@iOKyC?{^}|596;(e+#TycD4@`wOzwos#<|@;c@uX-eypb+^_e^xebC`{Z!~@ z`oQCsuhXr)_}p(|NsNR64I(dPe+)WCf&!2CZoJfX2+sQ&Aa7p{A=>xM-*C%7`9N;N zh7Gk~w4}0DX?F*_-sdboOd&vJmy1dx#(%lSIVf?v${^)wa>0%{8IYS0a&t>i1}G+S z@2(gs0}EB^Q^SU0uZl<1H(vW|nebeW>T}HVGb- zy*cMb3&HZ=KN9*pmjfqrhN1)({d1q?%hR9oawPapfvY^m@!Z*Cc4PqK`-9JAuNS7n zhVU%;8Q-W7Q$XSl;dn;f1u?mtJSNP&$)phWv0!zI+og7l`+s;f=kn%W7Hp^b`tO^~ zgDV?fvV>H5P|UI0k}1If;Zm*j8!&D%OmCp;_+^a8Q<5&!jbuZ`aizdg<$vmdKl}IR z^}mfH%5_J{*Wb8ZdnFXx%L9=B{T=)8xoS;?g)L5AToBB(worWe&v_9Oatvw2=Z4E} zF)n#`lYv0;qwP}agDa{p(zuVBV2n`N>-6{&q}*Np>d3oHH0Hm(VIr;pb)4?C$y?ol zGQ-6#`0VaO3x7&TH9BE?KlG~Q3n?Np_uF{TC4qp3FXS0LRp>|dNv1D9#&#mbeIs}1 zOPY}VGAgCJCc_rpZ-F|^Lz)^p2_U&K^uxEK1mGHX^gJdG0J*y7 z_aVyxSQ@gm_?+(m1Ti`d@0Sd~B5P{edMyGFXD!G#&LsfVn+C2tVfRI1C5m3E`xc2^aKeGMu_yw3tv zZ$L*#N3YbW2Fk^w_ivABf?kWl;qj~*h!x&jzg+ASaOwBM zn?=b;ri+lI|Co%zS84S4bq}K6sC!!5Ci;*_t}@S@)PzKqAG=@b5QBz$CSka|2~3KX zFX+@EK)FeyE_&DkPELE)P&OBV@eq;Q;u#Cx&6I58Bz|&*^gh#hZlrZhC59f z;i8H%7_9As5DrRBN^S<}uyb-HJ?UWVo5FZ9GXeRS$abHoO-0gs?R|`XJqFKRd$L>B zR>D$i#>vW4IIqB6UR|@k8SacOak#441nJjg##K(X1Cw-AUs-JcM%(KbzI#c81-VO? zoV-ndgtdJxT~CQ{py80H8$RcB-ZzB4^92J=yS7UgbV1=<Pl)Vf?hPo_cL zfc^UWIDYe8q40z0-VU&Ov?6!xP7$c|NeOzc%s@^-!RZx^4any~aYf;-4pg^Uo*TZb z4`n`53JzBxpmIZYVbezf`kuS4JY){`mk%oId7tY=t}CV*9vQczx>;*9J#=y0FJ5Bt z#XHexGDfGHMyv$Ac*)Jb^1DE3*@N>3d}z>Qc%bGaaaupFc$oG2WE>Bl`^&qbMP-8$ zm2Tg$VfsFQJb%&&TjQTi9%LZy-&=R;Q5C#MAzh16FGqpG*S^fy(1ZN?Zk;G9C7}=z z7^@7XA@j$QM!w1nBr9=mMei3n%Kn8B`r1^KzGNA}HyZmh-9P5CW!sU;fcMgOo6`{W z`cEAL{|1O^YjI7Zcfq*EUxtr}$^mFiZF^Wh8>v*(qw1olIk8$@cZ@9ssaG2~yQ zSrth6Mxj()X(>qGJ%2*+dlQ6oR2<(`UI>aW&&CDMItRv``-MaEqLBT=`7xHtPr&f0 zO4#vbrLgJbv4kvgJ6LQ&OWrRZ0H=}2H*KaE$N6ZzQ=R$Us&LecU;5fe;Tk=dhM5Mm6JAAbdR{z$2bfkm`&vQt@OI{E`sDN`} zsCp2#o0;3lQYgUfkNVl9NP{C+SGasTjL#3=1~(_{$FC5qsa(3RV~A?(-Y}0)B)m z8>;Q9K>4XKyZm|$)GcRDB^R^+>4^HVn#mq0cvx&7v3&q2H5YELZ|Z{&dg z-p#@m$4U3Tw!~jsA%gO)zFK?EAdH?eth7Bm2r4g)Y+Uu*fLK}l`t(E|NN1&-?@0fM z7M|QXGU(Na5_{4<7u@bZR$@{3Ab%e!-Mg*EZ7Bf}`vPP>`(U|GR2x=w9YC7DTckx( zdeB(lsb$CBw4u_UtnwdTHK^n4((@<1-=gLfMvLb5l!1G$k$>*at(U=Mn9U2db^wlmSWs;ifC+#qG$_JG^310 zT}#A%&FYK94^^--GLt2@vjdL3_O+E4>H@`V7mtLg1n@L_xzEqD1?lg-`3Ou&NcDcq z)&LVG^2xePrQGA9|EwcgpO9elD4mCF#I95*d|@Mtl%UeJ)ryh{HZDU3S|m@3ju zszJ(|UGLxgSPXefv`-tV;d2>2?+mpJ%fU)?OP)$d7(VBG^KLxx8G2E*x-RuZ6ui)6 zez-YW4iitmwA(-Gf|m5wm+3A9C_Ov#sftA%gq+D!tf;m{sl86j zivNTPjE7_b3^9%~V!^>7>v~8`sY&S|D=n z`mf7eJHT!1*6ktzd=9F+L_89olOFEN`o1E82%YK0^Jc1(0f%w53~>Iza(>w9#sDfD z|J~=r5+i}QJJTfjWFJ(eHIE6qcY(9|_C&R^c2K$JAYEhD2*JCmL>ymMLh0^p%jHyx zp~GeS$75CL;JQKM*$~!qiSEyv-`}o=>gQd}8o3=%D)v@xFUHXob=j(wa@%3eTT2z< z*-m`E>3!gdkRDi8Bt2ral>oCZD%pCsVjN0Qw_uwM{=SK?c3K4f-W6Zo`IIl;e^Q_5 z>cA)*_dU4ql*BQdSG6CEN@C)3ez{ht%1#%5zhy^+x$pm+^J%To-dV^2`!rtQm6;r< zj6a=_iof%cE^c5*JV=MtbDABOU&7~Z7fRo1q+&7wQ|j_ zDoA;+t(Tng7F47wmg|)jBAxtvl^NHXuzhds`*orNsW$R9T+zVsf5WZu&qN4F`86>v z_c#GXmF6V`&K^M0dBfsv3o+g&?3CPiVJp(K3%Izmv=T`=m$1AKo7P;DK*Af5z8){J0rEZ_J;cKg;>g^Pda%QM|QpJ3enIJRhk(#^;&edOjrP zWq|R#_fc;pasJ}|R2<2?4?WnVbLI#Y+y770)?DtVBE=g=lp3pOsQZI6N4uViHZ6Sr zp^QU9_Fsj@JsSH_te=N+^Y#|xqg}J8tuGx(93d)B#8<*_$8rU>BoRcd1zmxBT<1;n=3uD^ZuWQ0O!73_w83N~u?jLwj08V4I=T5rd_(ni|umh(9DGzrIF*Z#3qBp<)$MdBsUpH2MHKyvVt)com=;%IQ4vvmRR4?%F6>*$RR? zx;N;`HbOyau##nO6;%G5adDW|03jP`xtcYd5Wde?bj^7JT=+&Uw$;Snt8lhA5~K}6 zy6nhBCdN&OIA4xxT0aOeSL)6a&k7%dAq3i37u+Ap2dhy{$J3U{V7z_xo5b==c=5|jGufgFEbj?8?_1IV*7fo} zd&IGyc`>K&0*#I_Y z3%eaDhDhq0CiWst?IaY8z5~uH*RhQZCXA__0g$ zL>;90lV(539)u$YzCXzHrGT7;Nw=&Y{=Sb`)q%@jA`yEn;T%i53-#)Xj;uOH#W<(V zr~*aKKjfY-|5lkcX&d`F=)rFZgL)M@3a+b^nx{!Xl{syS#5)YJ5RxhSghFwt*b5xBl0 zQLV6FtNLWG>zfE%&lVl2zz>;7mS=iEFb|)9sQFp_fQ#d@XS#>u2l7zI<_P#k#JG=B z^Y+}H*9eRn^j))@m79+l|K$F=L>7AAZ9mb{fnob;N z2|8-nNb5wdi;sRSZ|Fygj!y^ba{Ey3IrRl|R=1;EM*P=7sZyl)zes!YXs*}q|68Vn z$dHhTNQTIeNV-f(1EQirQWBw(GK4ZVNQQ{0NM@PmdE;%Ky$x^kJbNP%qT#;2_ga4E zUZ1l*>%Q;b@BDKu=Y3AA_seTv*S_|CJ|9~=idJ#uC4}!-X1luW1CX^er2N$i!TTen z>BFIBFfwN=dhOi`iRrH+9VZ$fnZN0&^y4+|AB+`N_hdSYQ<27O_l(Gttw(Nq!Lce7`ZoL{}@4AO!R_f zCnUCsZXZ%4!Rki=tvP`%Fz{puyPwwu#{ym_c=~mNl%&3QDG_s=a$nGk>W;z6lyuR_ z$GARV%5E6Z;G?x2GiiK+5NT>IGxG4R>bj7Qw$JXX3IrKHs0! z5$~coNR=rYlqAi9t>nqLJ{&&-$Hkd5unyL{AYV@U1@`;i3_c$sF$E(*YSoQc2nDEzo-6pnS|GvYwBi-4+e$uy4F|Gg+wOdx?DBDnY zYnU)=2iEaeOBBpX4WRW^md>yEzRw^3%Z6Eh7=?43dAPzbgfutr=u;i*Lz@ixPwGWF zk^h0gQ@;N7XvF8}(p{TKWSXblLsG2e?;EIpjaU(%mN3g zZ!GUoH?Vt%J$q9h3-`L-4^UrjMMkSl@?(tyXivqnvZJ5y`5*e5ahz!qIjd{X=vhyq z{FpIq9<2M%zt&Q3t~r88iR?BlJ8=C{=j?Z#&s8Y(88vmQQy#o}pV^!9yBj2|!v>y_ zXW;q0jRamD?CYtRYB}echRBm=S2>INkk3l|$hNani0U|iI7 zSv`p z{ngvLW}EdYG-==PrPZ+-8KIPixB<9N1RE{1n=l0tDenuNX<^mz^AnvsjZy;I~q2Cm?!Cs z`D}6B<=j1ijTRyrdA9d(OG7Tww4{GfndXaDuSNDwDSbvU>Saf!e#cw*Da#vL? zexrZR{jGEEzpd^1R@e4@`)408w$_&q#Pxo@L*jbMO8CBiPbb{kcOF(Z%c5z6=CQ6^ zEjzSv9^}7-%<^F!$);{xFrD=@%v`2O%q*XRm^%~88SRT;lz33pVc#^wZ7Nipv>E{U zA;+jX(?*EdKXo(ecm#we?M-YamZR05%zxiz6H#!5EgR#XZbbN)Vo^3UfXsi0I`m?F z%pQG)Z};@D4shG>LlqcAeETZbGoyMDN$RQJ9nB8ZBva4vl)n}^cbv~NSh|mDnH|gb zE!IQQ?V2b2Q9a=L$>G#7n+X`Pyy1INT3am37Z>F1gl z_E&gyTw|^cM>JEqMkYsbJyt>XtM7plB$8JBBHgVWsaY>xVEx&H@}oO1Cr0<8)k|rz zebb$Yne&wA9*$Z>Aa#E@Mv(;U=R6kv%Huq@XB$_TP7$yxD!P($vr)`LUu{WzZ!o1b zV0)xfiG3Z+-103+h@#gcd^EfqJ|(iir z)h7AojS3)tz5ZJ8Ru(X4a#-_XKe(Hz$XskW3H3LLHhP5OeNe*V#ojmFs6U~h{;z*C zqG(-M>hn)WMjEv$b^Ei>NV7Ct_R2=|@xNboSbBmQ|BhLfi3*ssVlMToApl`F-NJcC z0@!!y(;2p4U-j#Zic+o~%%8ejPh{^0m$a)Fo$LqTT6}XMH|~3`%j%xW$s@tSUJqY(t0bZMgqICR}_#0PkI4iuCdsm?t>1nqZX$P4TH~ zr>ODs_Obk3l2?lNjhHh%lGR|Yn?qxIt_?ntq}ivPI>2NxFZ4$k?r#_#k!+aKGu~9!sWv_*j)yyK8b394NHj{A9#B5d+hJ@S9{9kzw_aP5uAA z54g+@B&Dn_f@p!|%E5$raB;|@biux9gP3@xG<@!-28B1oV!z{=J&_TcOmm=lXvA9d z!!+1DIIS#v9P=(}KR;D2ocv#P$46%#chlg$2u)V|h?>I)bmZ?;Q59>2aKnnntT%CA zt1%%p>0kxg+!5~oYZLo+bnO^BrFxLk$idb>y#vU|@{!Og!!R2Eo87{W*Hl~4)MFEP zzi;sOUU}4mMBtd!IRX)pK9LleZ&o5g4~uP6!)M67_sM%ruNC|zGmdiZ8-U^6fsz#+ z*so*d?c{ewlE8TS228>hEJtv|npOCA)+B(w+{YmXLVo zJ-%Zob<{%b@8=2Rc_-i7E@lE{CG>1srj8-&sm=uF3q#0OC#(HqP&;a3Fr-A^GEs@8 z-*er|jo@_gLC(%ABjA|zm|rPw0%~;){%EPS!0LxftOLtch$>1`vqN_nC7zDRvZ$R# zZkfMtiro5l{Cl2^o|73_Ks1u?^R?7wQF!^w2nI8(JARSAvR|hIk=p4@CQF=voWF~U zakUsWU$t)ExmJPfiQh+1O()`3p(0JP_ak=W#hwmdRcAg)K!I@p-mBCv!G*VBs=-BRslGZFioIw_do7KSh<)M>qGcnL^+;ro60@O7II6jzE}ood-!jtkjg;IM|kMXi%QUZ^s-~Zu@Y+Y zQ&zW4Gy?rE$)v9|9iWrL#MJS=9pvZYa{arTLB8FlOZFvxzC5SZCS%)yhqs_SI)((& zf1>o9j0b_Q=w?=OBi6Bp)tE@*dpP^s=YdXE^KfgeiiZ03B9t-F${ocz;;nnY)_O=` z`+k8|yiXYU9CE`x1?%sz9~L#quv)Qh^~M5ohr%0dV=zCEW=-%h1MYhWTK8K>dtx3| zaMJHninBm5a;bh=b{32-{2CH2oCYcp@8l)SEuDDMen+F}fBK-l9p>LgVXxJ0d760= z_+&)RYHwD7QqpXQ+3{PbHnP+2#Dz-a!;_flg83dLx8=TjaQ7gCvwDK_P6J5OgEO?) zc?ikdglFG8iPw`=FAos=(a4dXPcQLyquM@=cvJp1FF*Eszn*X|7MO-<^4M;pIqWBky&XL7f_Za3x*~>Am^TJL zH{Wy2!mnn3xtlo8F)rn+YZcGb$8A0B6c3~7H;bS~C)UbXxEGZFKehMh|O#HJv%EUyE7 zhsqG2+WMEIOZkXG@YnY{fsr8JpR2C7F0+!bF6!}VMB9$& zPrXWHy+S~w)Gn@CCP4C!-45%#sz;HVm-o$Iz&a=fle(3m0>rNLst2#51weVcI9?r_;EAlxaVzj~n%g7x*c z%ZQbM(uuvl#$MJ!@OjmVB857zeJHl=uX{5{3`ui5@N5I({F0K|-*$+qDP-59>V&1v z!-Quhov`P%i?2x%zRx}%{4#d98|&!%ht!V`z+S^hH$gwFugf@+ycmNykLBmjIOhNR z+~11(t$V=M&;B+~oaS)*#Hn zPRt!yN_o_+5VQyczUV7T*#Ay5i&|42%mVrK^QfCiGZ5}ONcD?x93C~+w+3AugGA9k zYgz|}AbbxmTgUTOSe5heaPvwAb6&cOUdyG(+VIEiOPOs*>le$pKr#stx@G-1J^N8V zXV0wO)!$E&4P3B*e#Zq{@3oD@x_;DX(BCM$W(YC5sj2 z0rAe^Qc_9>*z4KwshbSLx@EQBuFwhStv45NZW@Q`X!DJNgmEz2bh;CoHUXASM_E5g zO@sf_?I#?U|Cz_J^_-H}-nk#beEwRGQnKbw%)1Ubvp?Z}6V{nkM+QiIMMk>P&%VdD zBbvXQpIC&3klehq&JN2l#G~=)dkEJAk~+QG;Zu$KeP-WB249aN<}9lYr-XiF_8_j6 zy0aCLP3lTde2>CBvmx1x@fPTnlx#nEcLel4S`Ch8VNT<@pd-uW)j)n9^2Us!2@zba zg?Y;5`f>IYO~7LarLTqEPi4Cbg@P296Oii~c5Nj?$P zj$%GjH~g-0gAuO>t@%nZ;5?kEd2de>sy!ocs2+0&G?h)ewqso)g{N({)`daD?$Jef zaIG6j*wa%m72)$gI`7x@lmz@9tMT>w;{2cXjta+E0Z>$G3|>9=6_GwtrxywpBlf@K z?hvL-;w;Fo@AObzWxrahP(z zlq(++EvNgV!|P$c@v#kOQ4;ubTL%7G?t;?1w+6 z6y1H&s2%!~ueb}J=z=JI@!RVQ0}#kud6t_r2DuC4c3~6v{%y&ae%=Vz?P>PLa2)*i zKCty(|F_laClX5%!~21etkJGx^U%e;jegq@p7UW=p7V7U`$I3Cb#EG*2eRbTA7Ag! zfwKf--|J^s4-q(j?Gf`V^p5FprRpuf4eh^YUmn4{M4Nqoss$%ud1`*&+m=qC5U`G} zuF3;v{TpkSP81>o;WvBK+1d~>)iN#GorDrkvgxD=^&{>>{Vm1E2NAQNqqEX)tOsy+ zV2yl%`+kKMVnQw0C*-ZNOSY&5X~q(6`B)U=`*UXYHrE{ZNGD{H9oY_jX4h>aI`RC0 z1uvaZqe*y6fc_`g*FKWXp_ayp`Ky1K_ljFjz%X4+z;&xB*kHfPS%i6W_M>eVX)ocq zK}u&tD)<+mXyG!)9L{@Af5`6j!aU7Q`LO`B-Uu=T^VY9AX-Mlp&+`i$9mqe1@?DYs z5Tc7ytJKvSL-{t36g(QR@6S>!{M%8i|M%gP4v5E`k4VxU_rLgFeL}aso39@6DNL%A zmBc~W{yeJ7c3rSkvQnD*b{w2bIlcIo#v#1(ujvW5GN3!f^gTxx*GVJjZtiLrMKf&a zw!RN$k&lAyCWZRH<3D%Twv(dq^T^Hf04Us;MtoCzd)PXL5w2f77<lHt0^sx=4UcYC_w$X=ZGRJAVgfLeno>|~n36B3Wii(HZNr;}N+CJiW z4bmG;`q5XH2TQk8RiDTaf#T6i%@0v|5F>E0RxI%=irGBga6YmC$q31t>}tV!zKN=d z#DaX##Im6*)eg9pk@0Ehb}x`F1P#9JZ-oAz27~7AWe}5>SXEq74w`(zVhMyMAg#6K zf8Z_vpI>77E&1i>?O^u7-%_|QLDBX*tdWQ$=B~I+JKRo(^Ne-jpz~BM zkVw63!S2)!Z?vCklo)hEhS`U)JJ&jazrWat&y@%}Ew8G7(Ii5x%-x7Pm>0=B_qWoi z6WI09SSGD)4@p(q@7fQnZM+usJ=12b@ys@owekdC0lb5~v_9Emeb0e**;IPWH@canvD}3D!nr|W zC++chIG=R+uEac~mSrY?#=LO~tRHQ}_w0m&*8F?4C!sE}!t-;?FetiaN!=0e1T}>| zZqbGcFbZWoA^rL-GCz9i`>0(NA`IuT2-~!y{G9w3p}(r?7~c{%~=0?Zi)0aAEmCJdRf+-kNswb;Rnas zAt>*J?j+?99PIyoG757$RW>aQOIvU~ayTZ*pD+hLRCLcxQs*J=)vKC^crJ@&JZ(cb zCH8&gfBlt-xl%mig0V&3b8vRV^!o{{`*gDs8@*yM3X^YaNeseGU>^56cyTrj^`H6W zEUD0eR^vI9`JN1+YeIFRZ%r}hhsoP>zGocCyr_6QqdAVqabKc^>PJw%{V}s2&vE_o zCdaFweT^ue(#$X`I}oA*^cWZ{+u`r}cDoj#QFySsSVCQ96etGlR38WxK=9(>i%A(o z zO`|5H6l`lAu$%!gQ7pIaJV{3~`aQnEmK~_XK7id{a}eb!uXq)3Vc$j9bfTR&)?sjV z|8>QEM?Lp7vB<0S$T|NJMT>L>7&-qi&+=#ncKQ21k8Z}{IV9O#L(X}a!^$fun_GrP z0`}P&u4N)}^4k@cnM`opshlx^bwBqyX&hKxNkGa8q*f7W1j6}_yKy4~Fqn>ZZ4Idb zqMX~>nP>Rio8OGHx?Tf%zRH^I)*sNOX@hD+VHslQ;g5~h1C$>}Yx#u72RY|fSB4fv zLrmQB`Z$XUWII}TjVZ1X5q>&@T?Dlt#^(*2!B7cmCy>cZq-CNw;e;D^e>;n1jYUab{yPsK&8f6ga zA47Yzvl5h6Z+?pQt$_tPCGkp{Ht6+FO}mKoYFcc)#{FI$KvnbV+#i-U2tRwYbttJ7 z=Lrp(!8DlP*r7qClhO*G4O?!U-6Vm->Dv!%B=CF}xaTp5`2zF;j-AnU|K=BO#sAjI z=#u#Uo$~^&ho{;L4P#Eij2DAI`aE!iw3`Lty8n`5wCVc->}zrvI>xq&Img@sZo3iY zr)jr8UBx{3wsS#2`!CD^Uok83*UmZ664CZC`Hp?hr{7*UP&5v*sbQiPbmPEnKwmGv z(hZ55>E}FNR{~#b)ZaPL_sE-;QQ^c$HFB-%Y_C-owUz0Q;ObURTn^{2I_%MD1W_}HHNqUnflf`#vB76EK{ z5#Ny?J#hBGPixDjG02$PDM+^y&-+_bO|j)6Lz*p%oI}Px^^k`=C5Eq)vA-ehLdo}l z1)y4IG@)4grysJ1mtXYh@*>F6G7UC#VZP?M+QfO6M*MpSEeUVUKxz~1O}|n)P%1s> zrcVzcGsWO`nXoY=CEeOJ#5sX-A@zA7I_aXX}zbq*}*e_T; z(9*6JfN0)z5-Z)?K(X)^jXU7~$q@G( zUy8wT#Wr`-^Hu_Q$4iS%f2#+j>sbX1#sMHQ9aM2uv=l`)j1y;P2}r3(&w1c^0W#Xr z#kn#S4}4mL`Rn+7Z_z7xzp}3f6sOF}t#LmkvBhR(wsRvWkaq?G;(z@)tomI@p17U?x9OXBbSdSKg4|k z@1JkpM!s)F>~8tJr>-`j)m|q=l+8q|hYCK{JxBx1CAvtZeV;)l>?cfxqyux$f!s%B zwUD^-vEZ9!H>8C&KUTOg2xOL3ZJvlhFsW(kaJ)={NK@Ty{JBIhS03|}xzq;MDvVLr zKURSZZ^xpZa{-v=9?Y!1UkcXZ`fi^O5@1!nwNq`R9Q#EXKIDC<0q)$8palVe}xqQsOW^4W1_y_BLY%}ci7urC4CZ_O?Fs=(I-5B{h+5$sG z6e)J!ux`R4yLddP4}PA#b!l(I7+f;0U?oXmKhfuSUWW7kJ@5a|d|+!oc(^89ZRoxL z&Z{-T3VB#3(`&(d#uL~1$Llu`_A$0E-8Rfn$2YV0RsOTRmLAK>Hl=y&=3y zu&73aa@H`heH}<%QMBUWvmO-OHox=oAsp{l{Ia|6527XM#<>eN14v>!AKRdQFEV(_ z{^-ea2TBx6+HKcbk4VK``lqAPQR1E_8X7msLGf^-ijQR%WM;h7X>}h4ZB~6V_nVme zX6`6`2Xj6&OAjC6iNkZB)|@2E8>d0TzU>=l;S5ww+Eo;MnT3>kwm;f!4V?L?sj>BiNJJ+(OH+1HU3M7%{nm8C zzK$WP?wkXDv=e9~k{}dij(I452kv&ikMKa>Pi#5Bt z`kz{y*HpXa*yciL;qvQ8hON-VF3uADFcDJeA3Y6xoQu4d)&jbBlp#V9b^39+43xh= zb$OGR4UTMP`kJNK2mJQC{C;6v-?Bg0TK)vp0hU4#7}oy!p|xE>m6Rs32%2RYxPph|X0L7S55t4;IR z{}_CP=8zr{`MYZ z2`sfBum37F2cPfUn$;Rjn-!R25>)u?aTS<8@9Y)0KmhB^QRz?Ib)a>nfhg7A3_eqm z2PTZ#fc5a>_X6JSpy;`&U19+)URmEDgy{*@N>uDSEGAeA(3f9u$PkN>TB-&%bQR&9JY7op0v z`mdzhKj+@o=Y{Iwi?uBa&^`A_Kq`t1&r9^wC49+HIp8;TYlsZ<ML;iW6cM5h$=$%r3i2ZU_hiCut5zv+$Y6sdwtNC!gKP#x_i|gcicb;8x62<+%_urdOQD8oTi@rut@NB0ReQ1S z;Xgl@?kuPMAUX%tuaESU;r@>ni>TZ5^-2h58ce2qS%@gSC&K2RW1S3DYGK?Qj;{-@ zGS}3{(DLaWm*t(tk@=sl)z-8z6xrXlLH&0aku9gXHN3HYEzec$A#V*z^@yILHqHRK z(+0fjXYswW)?fS6B%Zs_8@!zY^M;MuUdXX{B!PJg=4;z_Ac_vhpTEM#kjWz!ZdFfQ z*ALs{7nk@?{1cAG@{Nuzpz)7|+Ae43&_Zp{o^PHLn1^IQK}m^qKNm!=j^EEmS&Tn< zjdxT5-^htz)};azEQ096EN zu8#DpMoKMiY|F`o5NWc5*-E()*o~yPg4JVzo%C6PVloR+JZTf<<0(Y`j$ zv<-jFUBLaX%!}l!zp%b7^P>g5SSPr7GR6M%t^&>Lmwvx~gt=#=@xN9nW#Fbm?kb+g zb0Ao<6BCq6K&f|=O^>w@$sFZ4bniq1a+_^k30rJH`I+9Vd4lCg@2F*I-Y!5(2gJOt z+qWUo*yAVfojQ?bOmCHO0`4#Gav+)KRG^WhHR;goc%V4*fKqD!?=yz`2WKs@A1C^0 zUGIxd&@UDeVl*6vBgd#H*fEbNoC_t$Bn|?RNLIwY!W`SKls!RN)#Ls&6t~`_nDjWhHoo~ z7r41z$9=$cWed^$c-~8ZoJ#tsA*@4A2nx<#nF8_C;yxO@&E46WYX6q<&Alu zUiT6Yd5Guz(4Lt+jqev^rc85H%sonFWqQuwhxtTKC@gP|3~JAUr=s@t!INESHwvcu zVB={0y%5$O%#G=ecENRzRb}Ck<1yYq%vss#J3~O>Zk-l)_qL;O`KM_e3EgOw-^;4) zem{zFP!w>h8bF&B?=IWb^&^pR&$lueJ-DCD@!Pkk#7+S2#^x98JK zOcr@vxCj#yB!-*sv0mujY?D7*8+h9eG1iL3AtlK<`KTl9i22;dgFJ@^QIlCiLW|2N zBKi-XmdzeR+*dVD@l1`P)nh!W-U`^ieE(=kyC4Y(QuLRu9IHakobME$uxG+Erk~a> z2l4#Rd=5K>5IjHU(Vn(BxYXXMPRVX-oNaRpx`@rRurqb<97_r}Tc-QvgjC&(;|hKcm&YSLHERGcd2Q zJ&(!x8IrkFt@z+9W+rEuwu0-o2{?kp$Hr``_|Cn70lu>PJ#`HuA&bzHam zx~oh5M+*o>MaVK%R)gLuD|>lT32+w|>(H@PfbAd7$f8{}Fd5Wk6>m@tlNM2*Z_+k` zeR}l6dNS_k|2SF~Bh(71W-S*}&$j^csq^V?Rh!|SK!U|0?g#LMeKybgit8UcwZdw? zH-mN73DNY|by!y)7?Net0>%{IFN^)@2G7Pqg}t75Uc&q1In^E)LVJLv&+Gvou@7!Fr+lgZI0(*vWJtL8RX_=$*{b`0RpxKgZ46AM0a3>l7Dzdwe^3^HB64 z>G~KFH#gontTv0dX@~fa>Mx)e+tS{$4><3qR7k&bcL5!onVG-QHjC^c&NWT&O(0ux z2{fU_(eWyEF?s+}*C`*xuVm1v}2q-PyMM6ZuDpO&%i z+AJlsj&2BrdlBn1cVN9@1<%Gw^-8qKC3;a2>;HGUj>@j#xdi5mAyoIIi-2Bu)brOSYk@rtGE)7*xF5pSEBk!99>{g6OPS7IK<}&RQ*|c?Dd{{qcKcN(vK`1w zJR$r5&xECg_a^dSHaiS9ur9az1JN6-P4cEC$c_MNr zv1YeGAUMY=DEypChj1g)7T(D^5b_u=UE%766q02ul|VlX2P!D_+V){y6+wpl8lT5A z4>#2C{3a2ni8vALAEGJMFjT;PNqSYunFk9cV5@KANpPx!)L2=DFUOj|=fei$LB$q0 zn--rZr-nH+k3@P_tMU2&)YXumwH1sscCS75!~21+c0mfBqd;HIU^;ZL4P5JI{&tQzWm{AEo1ZK%?;@@ne$6l55!&LmU^a+>l7{reoqKh0KLQW ziWBT!km|c<-|?#@NJ9A7g9A%VXz7L4kB7`%NK2IdoSc0xBHIO=e`kSpCo-=Zxbb;f zJ6avImfMfQEj_CLT8^N5F~KEU(VOrKVK})ACuUB{#kH;=5qU4djIf1BNE zhNT`%7}nVLbPU7J=8pGzBG`X$?MV`!(E#?t)vTvayCd$CUIHJ5NT~kg$qSiw<7mY0 z_T{X;S(Hc?wsOYjy-scRM>@|%)GzQgAfIvp>+=u!OLNYm+T`wH9`}7s{eEi!Y=fM9Ry$p^IGDCpZ|fixxls zrl)d|x8~5{LPS+@mVC`hgjb~TKbKOvrZ-@qG z{tJ|ED(=6um<2D@I*g7ww?cIzqc8dWAXuKyX@|ugh@_4uhT{8daANhfG?_-ASvkU+ z5Y+}I0pb&U(jPx5}#D1AS-D zMb@qwCyTR72M^Mnc2b$;<6!lK_fW7o3=lE%S9tSaTM=#XLvc?0+FjrR;TUdBGCJmK5xd-1%6kn#_der2G^LzXNV!u3BnxyeOS0@z*?qB-SN z1&ItNXa=~kE?_zI0)=4>MBW!v<4SG?9*4xi%sw11e}8JZe#QEg;5|3G{cvCPr0KZ^ z+#fM#%(3R2#rcHZS^EruR#>2Ne4)1518FTSXP!)tf~(8vl`~FLkT)s3?HbnS)>^wY z3RnO8KD+ha|F_krYsZb@ghfzilI&`M1E6foBGNA za?ZfIU=u~FUR?i|*c|Y-JvsxdW=lUB@LXLc#>VSmRQUe7dl$>eDiYWqjG)*~#C4q0 zb8#%aiD)z5OU#e59eMN8A_MBmV10JXzZsC$8RH*7O7!~feH{bH^oM5jW9fbr z{Mt#&@j3PfT*w>$@sNlFCGJv6{!jk*$$M(a`xe$`)i5iUHNl&wC-v5chrma1RGG?T z3T7IJz3tfVq`#tGz=*kxQ9_(F)E^gspX2FMITOrNYmv&mC^-w3!vb9NA~SG^_wRIn z1D=apMRjY}J~D)SuD+=FX%0A58CYF^4dOaR(5yxrzW-0fy*NajkC2qt>ta2T z$kK@*w>@)6;uG@(f9wJ(QJC!A{df_PRxgRpx8S~C?sM@!l{3ipxX`!8_%Rf|qG!%a z>Oz`LZcJ945vb`ABeAEl5^T|0%raLlDoOlm{@$(~StA>+6YIT*ImmuTEY>L*oy*X) zDDOu3zTxNYK?|~VXX@qq5{1m4_nl&@Y6UfRV`8sy2fqJ)eHN@)0^SV^18=6RV0G6o zvyaqX$X49oVv}bEBpx@I46bd0st=`3zjFFupN#!2H$f7xe>?lh=K$uN2!>t=pKOHs z%GtbMaad1y-8?(QrWXvJeP(?;RSgu3DY}9?1CjqOZjB$jg~*v8{?`HfiDdYE2nDaO z52KQ+Zl(s`O9(e@-zZ`qU`58&irYQNrzbGJpcM1r;ve4XW3ENyJCP#aClirBx0Fmr zN+P0{IU-_+0zhzQpH!%26>z^z?yn|wfNP+Yss}3xT3mc*)oMCH=}z(@g+U{1{@yWn z^-whkJ~0+plCFh(zAxulZj?b&|MS@GGkIV;t&1nimqTjh!o{BL6_Dj&BmJVQ3`FWa zxw7H?QmKZ0;(j>R3GuCsP}(*@iKYElw~1EBk``ywaVLWOQ*jC5|KV2LeZP6SwH^0A zln{GLBSc;=sX2oAC#J>HN@fR};d|*2efaxsP?g%gQSLGd((ciGYj`fCxWonf_VfSt z|NrMXzjfaK=RV-<^nK?b-Zvy8Esg|W&Z5jmFu%2b9!x_oxa}311M)Pxa-%7pyR^qO z^wTLa)-PXY(WNFsiRP=v^CPnmu3FEw>pRvX+J(=5eKrQhJo^myo*Mw>0P6ak&DG%F ze7TpAEegqtcIka%twS+3G`D(pbs}=ms)@(;UX)reBwWWffb4G#+&&m?~L zA$FJzd?nY7Cgsle&~7K9)E{g+QZxz3pI9c|Smy?6@z(<{zQOO8^8LvogF#TfyK(J~ z>jYG}pI2_jd^i8Xq?E3`^RS%9cH3VH`}q4fPq6yq`N*9)#c^~vuNk`ki{|SbxCigL zvDXF9G5@JoHn!_*=l?R#|YdH%lpzBfbY8| z>NYm&s>7h88WXb{^XapnRMi(+$YI_<|54eD zQ5*+fTThD01|V;(h&_o^^&nDvk&#aQ^gtn#ep_OA+J4LpIen|ImLL1^ zXoD3=S6iUt@W(rHTqM}!*PN%+!W_ZxJ0G5}@dbL@M`4T88OV9`oZ#QWBqValxLtRb zJ2HB(`u**_8nk+OJ?S$E=VNyEO*~6oxLz8#CS^fHGORc8j5zFDB$8*1>##r3$YGJt zo`c9N2TM2>{qfw1-9V2jz`nM?HXF~0=pSzuWQ^-eMqF=+YGY#5}m;#uKMV0I82147ytYH2w1aK1jm6kB^5U zTD>p_sKu0BR<#9Az9F2Gd_IWxFuX_;34~#q>Y% zzZKvAE#9|2|8MJm{(nZN^U|miuuh13v^(C73^Fo@Ja7D-0eL;YV{Z@5;Q0}w4;HcB z-#jMpo^)?3Xi7T9m{|bW=AN|MA_bIAv;v=Y5_(;Q06xu0J()++MaNBDL&<<0~?iNXh3* zzL3jVP;)$eF07#us;SSdTfG>BgG@~YQrMT|-KKKuttpN7M(GV`!KTY8aj6~V;IMu&i1920hB26rZgc8pTCtZNnPJCuPSv{ zcz{MUc(Q1p+aXDW-_^~}*i}YhKpE9a2Fw8QhC~2w1)i7aW}KcPlZHgv+4Iu4dr@5G zhGW@FJU=Mz=+QIQbBHfij>hKw0%GOlt6pGPM0zR;it4-P5q%z&^GMn>k_y?u!!A3D ztb^YLYT-Hy`@_dW&0lc+n`ijNsf8BM8xA7|r2B!OLvY}Ul^Von*fP(Cbp|5NlvFSkEXIaKZc`o+~wG`F`Cb8!?Z6C8yJu zBOgqXqHe1~{sl1`9LI|A+^C#jF^)u_*l8-O=F$Xj^mbfE(kIM(};(?Y=)+=jwsg^O|8GH4!qBE;Hmm#m^_wmW1^Z#78-uM6f_gg=2eNMc&^Redv>%{dlK>?qey~z99*K^*lalilW$shI?vHqW<|CwubKhn%_A90rML0QPe(@X{D+pmpe zZ);Se80yEYj-P{&NQMhtwrd?+R7tYAbiN-hF;{we`i(vNCVkU7xp7a`|S&cT~s z0p0!TvvB{<$fplJvv525)EjH8`&*53ynJWJKXsF>@0-9`=BmI!hT@J7^W6IeVd>PI zr?*rE)_3jQkt|(;f|Y8;7^gZAv94K(I5vRzL-fw;JQ+dbXX-snzKx*%j`yn(4~7vt zhxI+4!`MeVeD0TaaWnG2c7o%6a1bIoT%WYs(EY4O7eWHw^#nto&sNi9{RH=M!w|COLZqw)o09!;ek zMI>YW^wT|m-cBRWHQl>1hNH-vRpx4#S_dMx6@B&pYzFkl#RUCGjS#+V1%!lCz%+87 zw7m}Q*Sodulr?QcrbNwM=~noj@`}wyHJO0eCA$6;Pnsg79U6XBQjHMQfHG;Z|AcCn z8jbPMcF1q?sy)P23`*axzEYm_gH$W}cZU=LKqiNDN&jRKNaPhs3N~SXtFp;R>i1r7 zI1X37;(PW^D=$OsV9e<&Ukc~PxRmE3*OZd7pZi_f9KP(waQe z$yA5vk7mX17|%uFk8WIflU{)+%=6S4UtoVCbB5>)eGxKWeb~^nBN?ncH2E96nqlY3 zpv>HncBmJOGxmGcfqgOD`l>zk5R+N9{`_YZFc)V&w*OlPYBRfn4pw1Z&+EemqcK$= zs1f?rub~b^41Y2A4Of9);kDN?3%I|T@g)24M_ezs7589cstJ^qCKO&@#JWGLtd1*~ zpAxlHtYcl*3FLvGm0g4`NEA*fWlO~z)?B9DyWAQ;o{N#w`#8=!$g2^Pd^q3gK6)$R z9p+G(#XaQ~$Mab??pzpA!{_i(3d(JU|Nr}cTXDX%ZoU6o>n2H0;!Wftq!=;o*l`8t z)7*o%&n4oyi!U~;uI!lxxi0mC;_8_E*Ly0$vuOZ?o=Z_?v<`xoZT$Y*8GXQaqW}JK z9M;V!x|@HV3`6~tB*sT?YLUEmgyJ8}yXic6*_()OakK{6Uq!dbbK}-+ggl@#%p& z_VvvAbv&Os@bcaE&{^2bCEjp(JqzOJjZ$3&XJOBX1d*k37S_h=4L)N1uf>PdKNVg7 z%wONSFICPFw?|knfYh&J@7eIV6M`#fazn zvM_f$T^L2ZTJr;A+i;(*)J*$%=?F@->&sfaJb-E`TihtlwWHL?{>#5R@)6(hH_aVJ z70^|3tu>jx4-z`;G=06spgG;_Q@KqK_y_zTTbab6&Dy!KQ-`_{E~Nza=wM#~Wy$(^ zMy&rqD%Sf1=MjIgtdT_MJnn~$zA5`ShxlFn@0F-bA-WwN-hVrX5%;T$I_xg3NKZ~& z;EQlBM0@MD)12#tOH{-)RXo2q+{BjJALRqzX(fq6EK$gN*SYtD#0=!kmhJYCArP^b zYhUjES^`6hHX=F>9neQ5rhWs@0jYm;D3)Q#NA zxK7v|UnuU6=YzgpyQO)nA6zZ{>qKdBJQOv|6S5j%iPPQPOdIoio{Y(+I<&(oLu$a$ zsaz1Ta(is3o`d>nR~Jm@%TfN6H)UIU9t!9B7A|nY45db0mok^mMg99NXJ#17G2iTE zxuj?oGH04PDnQDD+UbYQ+ds8J)pH3^@v?RZo%qeb(A)vG{FCSI#x;PsqmarDjVkai z>5tUusRO=ymA$`j<9X_nLSrY`Ye8vb*v}BpHS9ME?VOLvOJ_a*n|HbcH280yP+vYUz-p6 z4fs6y%-gb?q2+tk?(=Nza5yQWq09yxdGj49O7v^n=oqm$?4tJFz1z zc@9o^bsW-l$Nf{5vh5*pMDY2ZtMrHh_sh6VmgXP41^;6zKj|z>(4_K*irCI(#J(_F zysI1gw?mJO#XZLNe1|h(A43O`(YA_<5^h6CLZ4&f^dam&skkj~*4&FCwPMP1_I9Gg z4);hY<9ZasB>$&$HVO4Va2(XXQU&F&jA>KKdf}t3>G44MaVT2nd^`Sk4)7nh&)8rN z2nLf}moU%pFyHgNuM+;9Cv824t@tO!+;AI8nS<~bS(hSh24LjME1LG30RB7-vzHE6 zAd>oL?E|XVhx+Hl-XdenD|n?UGA)4lp9eJR|F(`H+ZTGuF5aVP)2iP#G#`HtPc{vX z%h-QAwf55KLm@H^&(n9p`J-1~jBmdP=0Vqq_zpBpfT)&9{inhqV6P2ucvApj5%s$v0|;UYm;f=9efCRQJ4`Yil*QXkKaD*p~+d7S0c5#iK5z z`24&D@u&l??G!8TR0(~JwL|R1t#DFO`xGQmq1;t4zYyP7yuW`u%AbMyyh;su6<+T_ z%h2M=>6COxok8C9nvTCqLLsM5B)4y&r$IH==#TdHqn| zM)ccnyQyaP6LlO0=RUT@c%MKI3+@S-1_*eww?Qzw2};K(2jNN)J&s^7c`Lcbf40CgYRF- zx9{gQ{oT*S>Cc>T|7SnvY;%tH=@{`OVE&-Xgix4dI2&58)>YN690aNQX1>nmK@hp# zered44dtzC-_>pF2W3-T-HweozLNW6Zw2;%QJA*f9@8FxIf=jDoa{h9K{;<>F~&ii z#_}kRPYAB9PLj;`=)eEvFw$XAPk3xgyLUUMg^1xf@hQ=!gOHy{mGpbxNrdz;j~-6z zAPV4Q|B?PQZIYs?8w2BRUt}hQ#jpB-j8BmVX)S zT1*8c&pI9r0Vcfl`MjyIf<;cdwM zJ=P4WV%%)Xu{0?EzSq|CeGw63cG|3?p^12WGQ#JYD3u62aeP5*NuL+U+?0+V|*a(#-?G6OS6_T5|nwKOIRG;|HJ1^K3q_G_xgTr8_4G;f7xi$ z16?`K+B#ahz{S8bHr*2aUq(qs+NC}cH9O)r(>huRWm9dNuk>!>h+es|b7~J^t&te` zwUUYYLLb#cZ}kvTc|wDxUERcNkDzUl7Hvd9@P|u|_b^^^p+DS>&w(t-x70qO9Tpfa zna{*Hi4rO6=gq}ba2-A)eohS6ZFeFhE2xcNo+Gi!^Dz2z$5-V2z&Z+Q!y)n4<}hG5 zJc_wj6YFb)Jkpr@)d{>`o+U1~sDX^z3+#irFs{gY*jcAN5BqzYv)!B$IHJft8y#H- zu@tMJ!#gRk>V6wn(6Mfa{|Sq;M=4-EQ|RUotiSFVZv8`7qYFGTRoYbT(Vz8+Te0Xd z6U6h*-10q!b!3x24J!Y{y6x|eNS5-n;ds5~adSl{jL)_D{qhU?4eqaDpQ*=n$i?7m zKjuEgyqu#aT+&{^==-Ecsfw9UZnftAM;~8;B|fIBAd*4wF09^Zqm@F$q$RwFmwW|N z7dlEav0h@u&0xL@aqS>W7dG*})DB5;ZCyc&>cD$Ko`3LH1@Kvz2r6De{ZjLZQP+hS z|HCDtaFUq~=kp4pHvj$zH8Wl{t=wJ?Qja2{wr#Hjf6)nQ3bh7WT(kqaC+p$K42|eX z^+uTSq-yD-lqOi?#J7`sOEXZ52S2^1wSZ!VRgpxe@&Fk4w4_fS) ziM=zLAb#ckI#-NuFTY#ocWZ@8TiZvB2fn{}JSJ7SAHG@!2j5=uPG+ZM|#^v->W0emjyV? z-`+x`uGR_vO?03=UD7p-+DTAVcmJNbvy)(Xi+l*V+(wja|EynotdZafwhyRhRbrf) zm*7RnBm_dDOtul3kR+7#EcISJ=*9)seOpRJf5QIzJ?8V zDr90x=dqy3Jy(AW*Z*2G{j$o_SWvTl{8R2xHXN^gwDkh!N$Lt6a!GvB4~ZsO{2^NH zAb;nkw8l%+Z`*BhZ%=A5F==qJU`ui{AukiE6#s-uNX+tk0}b{pt~1$VH^ z^`UN}x8>Hmwhq+&?95&-71T(~4nLJJupf1cLQSlj7iEB7kZ#8=t!B7V*S*Juw+qx} z-fs{pr9iag1G7}rqY_vK!LyFO1g>G3pGEbRgg{Qo+Im5ZkH~3^bvV>b%zHbB`k{zM z%)5SzT~mlUL#&IpzM^j8Xe*r3xQB7DJQ+c5sToAZx5bBCc9nsrOP8rEK?Ry(k%QY) z21LC|th#o92A8fimx>5e!0mUi*XD72|1EF~s%~!r=kNq$`<6E7>z!%0<#`u`-W##) zLY<_8qYSSXYnUJsbH}h8>vdKyd|NH?f(q*rFVNnP;Cg9+4Eo=xaL`xCd{=BYxCm}s zy?-0lW3XG<9*=by9@@Gv^w`}ndtT`%%a;mQs{(b!ZeiUH#hB5Am)$TqT=+_1DFc=# zuk^dTz8h}eTytMf3%~bKhdIm*T@aprq|DH$3+z{emfqre(D&SZ$a()>{NOn5E+jyCo7laF3fL8>kGTu2knYk#x-cx!85i+}LQS6nB26?>LEhJI~o-XzzH zT&!okX{go-l8yZ37``*hb51XMw zS8j$c1MNk#uLr(MXTZk8nnl*0EXY)K^H}Zu_i@V^znoStiy^PAItUI6)_A-e{P&;d z@Pl){zJ`v_t3=EbT%LD@=7GB4S~DXB)3IJDI4DQuFoE^7aD|a*H#}Z)_{oa4M#$Mp zEBa!L_K-s#{0thri1WVQEG73MqAj%NQxV$FRjb$Mv2h$(Gv8m|$c=ITJNcV)e_?&x zhK>uS1)T(6to`nFHmC#eg8Pe4WfRf3p>4v2T}_neAK&pw0^_>B9P^*pm;`~lG(CHT zYhmGbxg|FmC_s}NS1L#S0?zm5w157dKj%lBZ9S_sp>(YC?a@T_`Mk9j3Y>Irx|@eU zLBYbD+k>UV><-rYiv}%(M+T#%2FJfy)U2T{LtLN7mem?cztn zCBBL}^wr&ICe$U4Zl*0NC0u5y2Rj=_07L87-m9qZ#y;|5#`VnBC6YrRUYVvxRF@6+^G|WYkS)^SE9d(H#cc&5eu3k_H53# zLpzZDy00a+3`h<;aY4(94oY4t1D6{zz~96zEcpo5-|DC`y+70o!KdtEHKZ}`dR5zp zzvQrP&Dy}%`DIv#Tf~I_Ls<{Fao3$VTtG)V_C@8RialWS^Y(ryqp+ zHO5w*qr=<4M2dbq73?NG7eq8k0=K1J;!tx)M+zBJsG0vlhIm>&7u1%^TmDs}I%j%(3d*RE4o7u;(7 zzKkzDAeqywA+WLsxEO@$PHj5ux}Yzc$=40ipU=5zV!brxFX@aWR0=$f?>?`HcA}@T zsb?_eZbgM9yAN{+VvjnkeiG1cTPX&^yfeOKj-+L`#Ia3_Rr68_W#+2 zj+l!8eVQj4b>AnhE~mpC(Rpibh@l=>a-j{^@r?avylQ~dyhmbbP*nRQzB0u z?;;A`tu!{DfprW_8{a7hVjRz^*{d^Yp9tRAeS7BMdq`^kVTnOw%zM=eXsE2}2K|^# z#0et?Z259dja@^Bk`0V;9CY*EdKac?27{AXgjf`xt1`#G{@clXCqM} zpb>7hsgaO(8IqnCh5BfNw`I>x0`Yi%mAy3TT!eMo8@eerLUe46BmYPjSgu!&)5@m8 zyjrnYn>KU;LsR2HKjHuPn)N zS&4NpnG#K54L#r^bwzeL#;+WxF`hk}zXSRNbk{2lw}Jb2^$lruxb9!J%Q3!#23u8x zVA7-qMBmg5iOxWs>9_3-KR0%Pld5dCtqLpQ8HzCAgrg^G2$FNU3YPzPW08g<1F7DJ3!+mmf3Xx}*8t>!BK3C@es*o)Ec zM$x^g^7S;v{Z<%w$j@(w;tO?0qt{SiqPl4hWmhZIoH>3+f+*qf_LQh866zc(oMq8#E zXrK=D#4hy{bvD1nxL-g_zKGgWuRNmhe3;Fn*)^!o(h_6NkNH0yX3vkFM*Xk0 z=)ksZ9fYCrx-*Th+X)XI0f!ksS_rAh^Nq8XH4rfy9)IY3i+(h-#k*%!`ehdTEByY}09L+n4r+87DE&HH8qo63&*{ourKY*%{lNR}fg9bS2aYK_ zrrRnr;EeU4=hP3>2b%My{h37%Fsr|_+>YVE7OH+(hu|e4*>gpl;22ry3{Ad?F?(&`hQL z6_^#P<`y{Cg2IODXYOaAT}@5-a?I&&xM8$^9&d0LT&rGd*DH(TOR^+)SOVjZlIa(h z?|OwZ_&rY}oJt78{#Vfq>k8uQZTT{>R@BK{FTy3R{hmnj^vk(ilMO+;dji(tJf0}F z_A*?II)*uVUxtn8@c46osX#py+NYk%SERIo{XG%$8lwi7id~`Vn^^}DmS5hl4QYbD zQTlh&Y6>h`DM6*+^D#RAlQ#wPBt-AaUGlBMI(cuNM8;y=Ma~J2-bbjLb>Qal9X`OXJ8Pv!L6Yp*)wvWExZYo+gXuLT3JV?aH%7V|r9JiN?YU5@hy z!-dgVzBDL}bSk}ud2yLO*87IXx}mr4Otr^j23W=me&uf$%FII&djwT;QOj}u>9tCMKS>G&odKxXSg=P9g5SV1>@!c_)i*ph-b4fPe!BQro#98t-#V}sqD`C0$f`1f&&uhU*BLaW0;sjToFDS zJDa-7ep^4rx>o#uXMsnXx$kM*$E6}9lEP4|6HcWZx%R?xgQIf z=mn~0Ou~8BJ}5AIs_@I51(m^_vdcnQpm52oSMvlL&UBrlztb3mVaI@|tysq@yZlCW z$;@6DP|yv{LY?Y>Ede4KvwC5Lhnt%2Q5@F>aj$2@GeM5;$&6d#JrH_j?J~_xOd!O5 zuDid!A1)@cAhp?SNfBxK$QDdMt{1(P6QYL5?&q&UB%hM9|=g{J{dTTip<9 z_TDdPkO~q3Qj+@LusoLL?h( zZ8rzZ!{@@v=5jW9jQV*eXZmc!_#Mu;`ZFG_{A0?V@X$fv{%}?zVICWL|EhT|y&t*Y z9sZf$qz@8n9b+?e*uXiDa~;mO{j>dZ+;fiqpYHFp?BpTvebP{s7U?UC`kyNC*7u!T zz@lsE=3?$5i1DrMm9dK?_`I4v|5W-+SQGEgT?(%!zFyNkk|f!R@qnMYCgyh%XGUm@9t^@@wctMu@4#{#I{nR zl&2iH=9S$l-<$;2*PM(W?ZAB3;Zp63hdvTIzeJat^A{4(@St_8c6=Dp7a)?p$#+;B5-rFmY-D(z@OUjA)3``IZ%9DFpq3j)B?9#ShtYR>j85Bb=kVN&Ri$(6zz214Gw1ZbwJChQtsCJdazQo zwsYH74UE=czvu6&;V?!s-wz4tN%p9OTo0VUtXiB`$YD`Ocea4^~q_^y0~cO{eIvdf=9b%B^X?5N90tU0e{Z zt~~&QWorv}f9i*L)wx8{8?1BvA%SZ|ycZP12kc%?_QSC^CoQQh)AN<0d5!@=%LZYk z^_e42ux^%XW|@yyAJ*C0q_rAqSg>$l&j~S%qhtTIY*|{s0Ngs}OL>L!gpFR7cX__C zVPE7DJ4J3>kIoBdGk!S$89%#(*n0+H{YXm7^?7(*m7>TFp#cajEmCPO$2xvX4>dim zpT0lm`~P>_f8xNuyUvpHkr$)XFpl+e-Q9+~Hpp7FphP*Y3b=%-TMAF5081gO!?-w| zP-VtM=tWf!v!AefOA4``TYg~zb6Go~ICQYlz@0)!{O;Q_X+$N&PIJYnOH+xG(QE!Y z7GgeXK;!14rLDx{(E_3q^VJjX_DZ88j~HdOooFbHBr@KvDvQRvYwMf3{)2m(P&eCw z`{_I?9FXIxdoDpE7Rkh#w3l(gy7`@wgx(iGa z(&QrIyCK$B_lR9C1CCXVs%$yUhVc02wx9O~A&t){TK6Lbl0G-d#A$v8s&jeeXyy(O zdv*Q5muSqVll{oASX@Lz&q3Y|= z4!35EI&W?2f}letBb)CrKYxr&*5n((`-wED92&lOr?(B19s8C( z_o)RDcDmeg4SoQGTI#^RbvSgcL6P((` z?%a+3X4;H5#wD&axN7eBV@GNa*e7WY*FIxn9?08!2QK$Ok%{M%!#;ECgmfHuz3GG6Lr%``;Sbfi( z6}`~nUS#>Ykp{U-G&8DyVqL!D?{<%SQbAYtm-Ik1J_qN$MJE?f;KxXR*eW$DjEAe& zUeD=lO6|JJ?^0-w?8}g+B=$j{#Gv=TbYTA+51egIn^s_?;ek3j>D^zr zH^0Yvc~38Xn2Yf+t)F6k^(|#V*6n+=7Z_j4c`vV189KTJ*X_%k3w7l2_wspUU5&fM zf}Q!dpGKh{omuRWn%;$VDrO|F-@gs@=Tp0b))!63t8=cydB1bUC1;!Sy*MrBIA@#F z{#W;NKKD;8=X#v`^ZO*Zq+))(m~Z}p{mIS1HSz9H-km(GFV>Xd&V@Q`5nh6GAC(h) zX%EgXe22Qf`RAUUlSlhrOtRVDU<#4*`0F~wGgKnRs#5EM1eNee%Bw6)=_J1Lt2uBr zU>^II9TwYP)e;QXBPrj93khTD`V7gQIE0c&XinU zLP_}dT;%)?&hw4AZm+Ne*J)A(or{d9bhx>~Q{mvERILf~7lr6x#@BElo z3mt<97yGT2Z8uS8b`MV`l;)!DM3kiHlJ`V8%PM`ViWllQuaNcAC;*EIx6bV|F}|Dk zn%li)osf23b^g4XZm^MCxcE5gC0k!OotT=?4XMjxB>V7v8ffSzA+-zhJfpV_M(o7- zbnKnNo$5Wn7~k%6PoEBkYco$BzKiuj0yN?l&Sb#C7iZomc6Y(d%YyN9GpVqYu-)#k z0^g(8yz<#J zeRdtLulJj->KvoNOPRY3{Bd+pzbbzE)eQ!WkDi`ifc91I2D#mn!AuA$9gPe}eYpg> z_x(YvzcPpCv2#Nh1M-CUZY^cf;Y+){;Aj!rAA~DXQe;^mt#>2Kw-)EYhr}DJTX4Ll z-RR@NxP*l@?$Ud+8DP}2t_(9+#Z)!Rbewmb3r=w*7x~y+5z>=k}l9^WPoke16N=f!#G17!cFa9d~3M z>L=~EHpbXn2ctF<7FQl-LExzqr)<665;3*g7k3{0LQJmcbj_%5AQHxJRfueFBhsjI zuAjiVziIu_tFr3QpQYk$@z@9L=Ew70j2$`%iAO6QC?ugy;K-Z`#|DgtTdDc!*jtR_ z;ywB6QD7LTHfZnP7*-B*I(!us^jly&utK9^9@ZntG@US?!Gcbw1;6_rvmtheL+Zyv z17J`w)V=0^<1f8)xUeheD8@mZn|SekW*-FkJXThFh4ylgSASeq2T9eJbh+*FA#ji2 z=?UdPpe~AgqFxq9xMT;7ERKmLsB5nrUQvSYdktlC^}mWBoYK_Pe;o5l8$M3jrglQS z6VL5@AFPv1^?jVIDD0g1P~6BYfG}j)>U|Fxz@wYt&#m zE{vxnUsI!k4WV$^vPcL0_}(eOm7BUic+ksv_Vsq~m?h73(Z3bakN9+b;KTY2w2hjh z7|)S*?g!y|xfcSGCIzd+`hniCkLd31L0yXFse3TrYR9a`_gCE*;NoNZ`P>*2^e)#3 z-Nby<7R$IBzLZB`!Wo8-^ zzO7m=uIEIDEv(z01`BBLTXeCo$j%;EH0!V4TR;2Z)X=!W_A@L{lwdSw3^T!3`jDoB zc`wX+b!wn#(e(S7)1Nv0p3`#nbKVb}{r}W*KL1~BN1G;pn_kEDyVq)MYa2Gm8%?5h z6zgyF-J6`r$A+wEL-#59>G=rfzMSj-(>CY5%sKw=+JCy$uxN1nzgr53Xq?T%s3`E7KYXfAM8CsqvVP9v<~TDKv|aq7}^e z)t?R?ce1QzZbkpoku>Si>$uK4UB&)ad}4<9S>s3F`XIYZrt?i^AH?&VDizw-4|Dcc zt~`eCt7?_&mre2cvQ}Pj*_X%yecja>`L9qf*pso=^dTEUCKlbHI^(+VOHbCaYcw#- ze<>iT&;o({@5(!t;>+XHSN*N!>F`*tWrtj62$YKoJ>Px85yWI}E)s5f0oLuyj$0|` zgXf!HJiC&rAnmN(msZsKYlz?yKcLwOUy=*7+@rdomhy0j`3UvO4qUzwex?`JXXPcv zTK9pLTI0zPxjsmq8=O7t#DJX_-g$1g)DGdvjD{(V8sHV@PwIMt@#v|Vr=^`sAjmkK zb?Q0h&7D=^etxhSoT`Q!sodSbYnf>z9McE-15*Xp^9G=CoA%O&m=~c~eUl+B+zo#5 zYa0C6t?+gIhpJe51MpCbH!NF)acY*mpEk+0LF#~C!fTms$a}xG`Br%!goeW`ZwEH` z*~{xM$L}|1D57Q}u^0GalHHdnpuWPgQsr~)gK!`};CPM=zF%jw-#v->SP6!|jeJCU zp@iTXdUlNt(Us+gKXiA)n%j$x9!{XajOt&?W_uZ6v1>}ri_rr+7Y84($v_?FzgpUz zwy>apZz|=6DhsxV>G`uUF8#i`WkECgT8#bVhksD0q?&AiG2ORr_#TQ>gv{6EKw?AYYM zLg7Ie@OQZCg!X*-sbn8JjH|I}^)N0-VS$y1s|p3<)zse%ORPD>hJ$bWg7i8o-4#hn?Af2erF?gS#`RhV@NXN7 zmg@k+-~_d+6Sz(;v_Dsfbyqm|<9x5fwzp&#M_|12u5;JN^6B93O;KY=W1MX9#|?U( zbzod_&v%CQC&+lH{z_Ih8#3%vjyY}31g+00k%9_XXYTruFV8Tqc7*RG^W3aG`gR0>$#YYOfws;bEUlgcJG; z62gy%cz(m*FM2A)ynw}G`9D(lso4Lg{FR6< zu=ylvs~drF4Z|+y?rvv5pn zU0qxyz-)9a#t`aI|S z*Pa5aqYD4Ne&Ae>b3Ui#?EkanT<_2Qob&#v<$RuVKIe1)w9VQ7Pp!3Tx~8E36SiGG zIwvrh0vg<#g`8K{KqKp9dB=Cuo9@pS=qyhpyp6^yhc09M@6d^gT5pWwV?C)f=SKZ` zo2cgKtOmmP`PmQkS`CEy^=`*CKDER~)VXM=Dkofa=va>=0a0+`CRJ!p5ZF^c7NpyK zf-k&xZ3E9W!zQ;qPU_dXfv<4++6DN2`SW{n)~%f1$r;z2=kRCy=lPuT5%qFzRlJMJ0_)+44>ABmAx~l#;?mle;uqo$Di`nD9kDIexxhXF4XgB|Q>!Dl{ z6TEak6dy!CTpHzKg@ia0;~0y-UBtTX6Q(+PMc0^+6Mp;s(PTPs{StOmPs9B0gRgRW z$LJ8a!^Cn{B(4u8kJmP19T;<=(?6N$AHY|Kiwyeb&JW#KHG(`^6RLIch7=1n&Dmg; z9D?>&t@qr-Fxr1M|LoGmIC1M4{WjV8Y`Apw>{AnB0P}JM*zEtLv(+nJ<5wv=2qVuU z*PI+00OMuWNA{wA?4SLJ&!FgH#{?VZ@I7XISdMxW?z0$Y()&RI!(X#ePlL%t`BX33 z2fV?2T^qtMu6K2f)b@hEU+2&3{?j(6-~Q9_f7<`wJ+GzYN1XYxE+Cv4w)>Z%zp0|j z+J9~(Sf5MZ^!4R?C{VJd^gW6t0zIGI7~O#Nf0>W3?78rT;Ge&2yLw#}QNBU!bTh`S zjby!T%+anQl;aiL#~CGrK4Uh;IVh8mVr-EpHjKnNQmR&qlFGpGxY&xi1DLO3^v1|< zM(^M3QvdY3{@l+Q_kZrc_DpV$>TA>uS-t<>;dN|qQ@(%M9OuhwN=((VRlVTmlo)S} z^WD7LKVl;#Suh9!^U`six>THrFG>d74))U>a>97E58YvR(H{Qws%eWY`kA+0p?=+= zK0OcNaEWt&Gf|hv#pZ)x^)U3v5yij%?;F<>byS&ta9Eoi|09A9*DQi$YB3MIvYl0C zq|ShxtCt)WeVZP4fBwEd@5i|>r}czu!cv{nubvXA#Mzq{e;IL~}smqg@7gx;UFhj8x8X&3OF@D?r_087#LEN!s?pdI@0N|Cl_v1%!jGe zn6rtds6W3j)#pPv`!d;kA#x7B>r9DmpiZZ~CS<+rziqNVmt4@X>|4x6l6Uxs?K zyN(C~I|c%cefC;heo4es+iOIg&m#;!kAHG_SVWWycn{3B`9vg`OEkVbQbe%seK&ph z5bM(KI(@KDCYI3h4b9dNTCiak|Hfolfpr6v|M*f?~K=km%v2)OV}?6ofo(%Q0i zd>CW^qa`5dm;wzJiYs3|L+u6;PfwLN^mB_POuYCu7yGM?rIQ*z12XcqYxVBN_*N@H z?ZsGciqql=S@#Xilw^Y$y%QtKdCNQ`{=>mmR(fHTv-#>JJ@!jZ; z*Ne2%h{L){nf@=vC6O~F_D8b)F|Pl2=SnHmhtuC?H7B*H4+bWwd*&DSfUD3c%K^Tl0Y_F=>? z!I1*9_i!gZ^rL{sWP^bV#!nv9zuYuoO@{@un2{Y_7*{Lfaz&z)0RndE0d>`Mc%UdB zASZ+MVdpe>4Y9hR^G8_O5;sE4@gtS*FyH-7 zN2s(^KX{~{ePF(;A1aJQ;tP(mz&~S3QVin+SOSBVuMhV_-P`-&1nSFK9F?=rvqJx1 zkkS0~XBa;zx?k`n*59&ElzQ|{iVY9XEKS&r`FqKw4yC0S$A7VJ_RW>4)Aj@Ll9XDb zH`5&Aa7AC5{j=jg#T{@sz&Xy@{-18wy&DZ!q{jf!Q>SbyWEpVWQ7o)*PB$dGRw!xn zv_Q@Ghpo1#cRm{RyLAx{fLO2SrL_AGL15L(&F7y)5L~{tv%@aECX%M~p3j{Bk`PGy za3_(+m&nMr{24Kt0F#2ezx(nPNR=sl+g&pt%@_Q(eM)VFl*Xhzt>{ zw$+2i$@L*MO%1@b7#En4)&f?BfuD5KaUHNUOuoFA3c+0MTYi6UhJ<#zzzY#I7=NyP zV5}eOT;7}HRhlXTi4<3Xji?V_6J2~=Pq`M%r*zj(ZEA*6mDd9cE_TA5?rY=Y*%Xj? zd|{ceK^vGUFU1l@jgY0DarYs!9(L5s2#ajRxc$+b34@s}(EHJgd;DiB%vqCsaw@(F z)FfW!ZcAtYuZ4Q9vke+SdfSE-$pqj^q= z=pVn7U`6|ZdX9ru4@M~1s*TZ@ZtgS)-Ys-c591XMT0|#sFJyqj74!c7dK&zcn}W8f zZZH~7uQJDRzEp72nIl6?7-MA3_d`7vns;E%7XvzgwvL$dd#uyoTNKOErozfip~)*} zqn=G)L(4=u6Rxf0vMyG_`S79>Uychh;NpF1-c5|luo`-AG=cervwNJ9_p5b+8~>#T zr*%+oX63J3iI#R~QCU}0cDxgW&6aCCMgNN2e9a~%>JJF{DbMz(s)okT9T(3Tqpnuu zP-t6G1?rEFe;s~}`B&@qF4N!Jf;t`RPJj7Dfg3Wn{L@r14*SyGb4Lo%|GB_6Z6t>V zqdlteHRumzZB}q|+{J*Y(eG<+3G{;DdSYfA>R`pn>FikYlmV*GyZwJ+T=PImaoUb( z8Z0-sFD@b=SCzeHftcAgS0^ntG~Qmc%a(5d zc+-r;j?JCE59j-RPUU4@zl!-xCDCk+w^%Ps@hN?*68*6=68k^=paNs(Ic^(_L$^Q7 zFnv{k^-~T}s!%z&ih!D0*adO)kJH#4>a{mz;Ww>FPr z{@&C18MeH17>>IhqV*E%rB?qi%E15AgB|-VHiizWnbgdv6Fs2uDrs&d#+kV7Pwr7D zKzpmm)F+Au1HvDDF+YIyf0r7wZwBq}fX+a#+a2ajVDyXTwDoEYXi+vkw&N=Y!?@f% z59gpBglGxsg?$0#+a6Xm?NyMcq2YY@SRJsBg>4xgYy_JR<5Nm{EfC;zM0VhAD{v1e z2ESO-2EKtxPGOiwV`NVhuQs9rpLzqW=Q#sP*QhTn+dn-Y=h!VWx2?#C;6?w0fUnI^ z2m`Jjd!c<{1?Kawonv>UiUy$yI_#86IxG)esH|;61GmTfhWAovAbU?(CS^Vo`t0^O zxZ?cM<%zvn;uf4|`n{iW+KzS4#KR`1FyGEz=jHF`XlEbEiR1bpG5vpM)3e8=(SP23 zOhWdDTR)6{>KP4kWkPTI(WGtYe@~AXNXq(!dDpS?e^%l8ee87NFN^yFVE^=|k2Bh@ z5AQs%)cy>{zs@a>OuC9#;~3YMWOZIX z9XVPVJ{Emov>j3+l*GIjc7h4DfsfAL4f3P9d)Mw}K(^?>YIBobuzW3ZLKxr2NjJiD zPGP+9EN#;}bftc9R*~vf_=^6^uTOpMr%vlWar*I}E$4Z2#@$8_y2JOK)8}#4Q=IY1 z`Mhz@a=}`87Vt5f`iuMjUSIyt`*6|MNDh!3AUQyC zfaCzl0g?kG2S^T(93VMBa)9Ij$pMlBBnL|MNDh!3AUQyCfaCzl0g?kG2S^T(93VMBa)9Ij$pMlBBnL|MNDh!3AUQyCfaCzl0g?kG2S^T(93VMBa)9Ij z$pMlBBnL|MNDh!3AUQyCfaCzl z0g?kG2S^T(93VMBa)9Ij$pMlBBnL|MNDh!3AUQyCfaCzl0g?kG2S^T(93VMBa)9Ij$pMlBBnL|MNDh!3AUQyCfaCzl0g?kG2S^T(93VMBa)9Ij$pMlB zBnL|MNDh!3AUQyCfaCzl0g?kG z2S^T(93VMBa)9Ij$pMlBBnL|M zNDh!3AUQyCfaCzl0g?kG2S^T(93VMBa)9Ij$pMlBBnL|MNDh!3AUQyCfaCzl0g?kG2S^T(93VMBa)9Ij$pMlBBnSSd HIq<&#U!gAx literal 0 HcmV?d00001 diff --git a/sample1_2.npy b/sample1_2.npy new file mode 100644 index 0000000000000000000000000000000000000000..10e4025517b4d898fd37457226e4073d18b7f9bc GIT binary patch literal 131200 zcmeF3c{G+?8|XDqL>e>~DMCeq20~jjNK_);fQj`}yO!);;a}zW4Ck*R=14dk-4#KRiLiQ^ZHf z?xee|o6?r0O4{}sN~@PD*}J%TxY;;cySUk%{MUXnxAQ0QZg)qUb0_hp^19Va*R9w1 z>rZ9%(#uQ#H+~W#fBpJ_kpHRyq8^BTK=cD*91t8JI6!cK-~hn^f&&Bx2o4Y&AUHs9 zfZzbZ0fGYr2M7)j93VJAaDdi^PzO$Nt%X$3nB{&wSN?|;d;gDz#4BBT#&G(u&1$LB##+!kirBJvC9R~ zi42fxElsQUq{Gvd(e%@Y=^&ZCAiTSq0fxnMu1WGZ|GBU5{YUZoSNx)n-G` zN+V^LGfb$xCi#doKnHX8^5V^^3`mill6q(<3%=hrdEAi6g?sM1-8L7Fw~z2Sg!K}1 zrJJAaz=3=3YF;lY^(81}lZOp@XHtf67H1#OrgRtI(?=9RG%2SWe44GxJ z;X*vTpES&XsWNXWgKjaw=|H1_Xg3GQSN+oy=>j0}MP|LD^C2dF(ab~v8$$LLi`yPz z!mQ~FE@~*Uz%j&qM~gA=FlnW5DwICvpKilWxW3g%bQ%LbEM}N7ovCFzNgy30{>qh z#!i;AApdOri6V#b`wQQ1lh*CN$xHcgchFn4ZVv~t18pjGS99R{!J4?ieH>^xN#{9e zaKYL)KBB>l2QDM7_t^0~*wFAou-=vr37Pd%53hfVfaDdo%KZrk z<{ip$pcQgJp=jGyBRw7jj9U9_*u;bHE0Y|fdvHDb%719y!-BEV*Dgo6Y*2`t{^LYC z7yQdCw7rvfu*6~crmYz~@R{;q*S0l$Fxf#r`OZTC+ZsMxW>-*PU5w`H{KGU@Y`Kl= zs>Fnm#Tx5vFb9O~E37A>9R2IS__!B7xA3^|I-wNq7rv)(JMrhcx;2IzIQi_xF_S9X zKTY>O(tk#Skl*5A>$9m)KJCtp|3zEnCm`%KBOP-a8@$*r2<4F2Exg|OT| zw(*i*VCvinx6WpNl>$5rA_^a!Fzv52D%}-gTeg0{8u#Rh=We z|Mc@amu$6%+VK9VYnIM?i`%zJRc>}P5B_|=Kkv&jFVs(4#)Rb|ySBL&_rVd-OIpLe zWGI+_-$Xl&0;->+{0xSeFmq%i<0^T)KFh8q=$cL8!<$`tqsE%!{ZCl_pY?d3ZGvnrF7S5ecZwYw_R@HAruCv951#%Y)Xgl z%b(I8>M`JWPQSe2C=*VzJ8x=p&hg=_>F6`7dM-p6FP(gR77y;~9O-xt&2zF_z;;}-@2hg0QJFV4CPw+Ajzt~UN%JlyQ|rY-|9v99{(gsjSeDVCj^kWbn@6ILmp&hR$a{e&4Ky4PbJLUIUw@> z%zn~i%-PdAo&HnC+vCsj!jwo3Mpt`)xl5|8OOgR2lFX*NL2UOJpSza&sAe?`O|LTYW82Yyq?iaqVkP}|- zW!EhA6+l$)t>lb&7M$W)$(uZ+!R9&c7KP1yu&KDr@vI#lCu{ZIF%`+6V(~V#sgn#H zU*-g-zM%kp-(*p9PZ}6l4w>9eqyts-+a78%9hOfCER^Y{K~7T3v{}<=AgvvKMZJ;= zw0@dpiYN_=Qf2!4A(5*=<+I zV3hVaG@eI>nuubwxGJru1EQ2R$Q877;P#51 zR<*7RRlH4?$`Lqwx)@qnNdPIkv zjE9TA{$hhghSkoY7i_q>Yqars2^~C_opIP7O@&O88t-}y8Z23F;@IZQgvKTb_36KP zFuI7NMLsBi!AgnC2h!QF;>Q4mz?x=)fXHPNACx#ERaNp!GS%PZE`WWcB@bIY7< zOpyCX?(;F`z@PJmkXvpIvY$6c@*(lO%W(VID`8 z+bvka0O{WbN-we)5bAAT8r{GI)vwB90W&#J(H%WwY8eld2g*aP8*pLt@%tn6QU(P5 z=qR3bodzRXVjm02XmDhagei3o6W04qJd%RPUDHXwwZl(%;2+Vopd?NJ!u$U@?j8?r zkkYRcKw74ptfoC5R7{IkUfL*tx$`fi86FbA=B!{NpN)JNx*axf@eKzgw)a@X53wLf z=Y$pNW`VTQ>`luP*)UuE_*{+=@3t~DIPcEDf1e$d&)GMLK8 zcFqQmrJL8%;mX!@Fn!*V59YmaRyTLT zuq+v(XY-6?H8Ce+1zj_F9Z)jM;zz4P3s8^meeLSh2>MB4rq|~;LIumtW^`c-7{7^R zUHaGsPmK0v?!18eHKn&-1JPje{ff_93n^gQDeAsdjsi_@JdXvxqJnf8`+8Ca6?Ri& zqDHa4z;*9xu#P0bq}@7O8XWq-@v&)g?lu~9rBy208{&RAl;*JiGYt+!JR9@F<$vj^ z*>-NY2PWtpKQCX?2?^C>JB#Fd!0@Spr?F`t#9X&srbMFxXAynUHE}95hn@a>eJ2%O z-;@vQeL{!D^KQs)BC}!Ts)*M~-aI(;$$KOxfeS0IPR_4<$cCj4UcNqY3+olGU*9Ec zWIRGGid*#N_t@^*I-J+;jO~ON-wJ9H15T1&GeoyBK*Z^Mh(ZbG%i~WnNQDV^vMtss zzGp()mW5%K2RSfowdz*wB>@-@h0@QT5rADr{};U{d@ys+F6h}c-p_^YE|d}*{hf3K zd}uObgp>67V5v3b#7fMc<+n_-tE2d!mA$}hY6uTj`zg*&UCe{H=y54`4qSSyaY{$J2Zppi#(NKSf|JOST=S6*kg1uIb;Ga|W@(7~ z%FA}cy3<-E>GWRUgvDMwD%JOm%4K2_!F9ZZFv-wZzW+4li5@IJBQHW~U3P&+?Yk)dbx zgH?m2eNekNIeUID70PDseY>WJ4mTTQ=9S)Iz*)J&+fTe^fXrAmd5$>~dVPBB8j`U+ zdCjnDM(X(UJayi=SsCkr%*=u-Bsv>@RWbKp(_=x>Y$^LQK1?vT%Ux#Y!GPgoneXT2 zFyO$dy>k*`F=zhz{dYgn{J=N!MqCB3UZ;2ypT>j8ps2oZ2R>{UQ{~(%<$&+ixPz+L z&dpqLz)CEU3tF6{M#*75%%|D!N_Gu=ierACFnKkT6LpB^`q?n#! zvmh-#*7GB_qvP##78@J0z`$T|>$dfH-s|>KGsJdkmgbY+%g(YPYZ3opyDl4!2c(+4 zsbYcLjEuWa;uxT2dv$3|2OVTuqLc5xrbE4fX3{rZCOlxA^RZVNw`Z(uERP3Zu6kYM z9k$8D<2!FwPT6f5tcksn@F9r`URIYPT&w6{J+fYOVgv`ezQ?xpwQzu~xbjZ?7dl*y z&KN7KphA#?@x&0^Ut#*q{aW30_-WUo;)mzWshPbGcnm7sO$t`MBT0uw`K-3ZH|Vf8 zs8n8jA_G3juh}E}jtXxJwW6w{DR6T|q0IEdRA614WkKzt!RYLVMnxH{|K)ZOwx5-w zKQmib05?7F%=`9S06N2MuhNe2K&Ey`>m%-m-~GG0nxtrO>vkn|eme!yroBlxyqgA2 z15?mp77H%kseiF)lndWqdpDX@W3GDgIhST^3*NaW=Rgbku0}%{)qis)Oo5BUbr7l%6Vzx$cB;^U9l}k z*bqI*?Te@*13Y6U4+e7jpsVy%wwgT&!o4rZF$E+@eg5^#!MhX)c`92xv6cq*t=#o# zwhY|v@$kvxMUTgLwY5K3{)K6YHbD)_Ib(+MxBVtfQA$Gd$(CQ>kYf!PF(z zuD_`PL~HDl)Y2MYlH#0M=4Q?CE9_%=ML;KcfC~ld@wFpip=@|#8!u-f#)M55 z%_jcr!S%dkS~tC6d^~cq6z)G=z=bvEl`HxcSTK3x%=O$BCdfbkRQL+}_k6AvYBwKX zLe!nYyGz3uaP#`z{c|=jplhx|d({*sNIrXUZny>4|6hul;6L{MpS%+GpS?#GhlH-g z`eE&SzU5ISXirm}PJPUP#WQlh%*&vI{xk1Dmoyrv&zM*_)0_sV%reYmI~ACA{I7v^ zBsgH`TO-Knf_)RF{5oIP2|=fx1*hP7d1y|)s^!8yc(FfEYJWK$a?f?hzGpKa@X=L+ zMga}Z9$vIMX|NAu?-uEDfCN$MzwHiV{XWTaW>wsbK5+ihib4OYw-ed73r4R-D{ zSuqY5g11!3MK^K5SKFwvMVkXoE6*$~YGQzG@|tB@4{*OJ>fWOnN`s7?4J#|IF~A^w z?UB8o+2FtYht!fieBdwQ7V|9xFzJ>_|GYfxSG)N93|oZ-wH+n}k~u86WHRv(4fDsT zy}ztr9}BGHZ&W?=qCnX}(eE8+NHF+p{@sxKBzV5bTI-0Mf^-9}(E! zdB{{XSrT(D_SUsA*V}ZU$C4CQKcGXzByB50T?R~uIHG^B7$3hHeJV(i0a_MIhieTO z(9l1dCAEYF@)dSv7{$`0Y zk%3sh$a9jM`h^Db)@t6_G)4yv!4$JD?2nZ;l&UWhXT#2owW6>KJt^;TcEg^}1lEqcP?)y5GnxVGK@RbxKxU-&2ezr)xkl8*gvHTFNR z4>xka?n$(dAGX8X7GGD;-N%O5%I2hRV@wb$_2-(JVZEu>-+8})0*`drBqwagomx=n zaaoH3)9D2zR&`W}8;)pMgzfWvZ9Z#TuQK82od()ENiJ0DQoU;L@}S70LT`d52Sj`J zs&odjz)6y>d;Tcq%=Q+~$G%j!kQ~_N6^+NQe|WNiV-H+SD`)p4cfh8}%u%NaZD8Fsf|5vr7H zU_*2VG`TvDe^d;F#Pq~4zX`E++QZ{ z*yBP4U61UVb-4bu4#${}38>I@GBMwI4GroqeQne^%7CjTg_ds3-2d?DPfiK#D50N4 zD24nIO5t@vjtQl3zwkU^c{0j%x`x=lewkI3@n$~@LYjxC&OCwrDt>_p$rGty_{4^- z?oNYk88p3M%-O8k+U8vcar}a%*#4n`4mCc%NmqTSFlkppj9)VDU*0~$A1^Z?t{M2} z=5t{1L`14Il>^MFw{J+lotyf)2`w8 zlyLu_@vN(3=hyFig#D~F2d*xh#fOa9nHHyyjn4zZ`myG;R=Lj+z^2W|X3u=agVU-l zx2&wkpHo<$@=r-wL(15$XxaO=2DjS*=NZF8#XK<8vz=vt<0hM%YUVx+A1_~co$x-w zZQ=6@xBry(8CQk2BQ zUOl&qJ*f|ZwX-Y@FOxyzR$Wd`XCJ&V@#!7arh%Ba;YljiKO)Euf_uyBAj^HhgTd(^ zfUNkqn!hFuSuyV^ZF*jgiYA?xZ!u^@R^{DB9;t0;)XVhfv9(=DeWIe$ zbZjr;evmUyIo6BfTzSVF=k_4_Gv7=7SGrK^14ogB+wCa&#K@wyq81c4cf+)jC59xqXWbM4v!=*^B`Em5Lo7pJ%>E_NO51%1>ORMl2xA#B})*sT^RRBTDX@<}C zmV>XpO3w6|wZM|TTC3|^2LrEW$H%;`1?%!JUu#tBAYg%Ma@*2IuoV<>*>5^vaiPn; zpT*4(kaH~7COR2>_!WtoBiX1rLp(j@TRF1w;K+#xs*$}*(bIN*B?=z?`sBWS3F5B2 zi*=t2%&oPfd(H>L(6;8aKWy@Vn`g+~)QZ`3F>h-8@m6^EIH`h& zPyPq7o;>%_Sni`;m?sJcqi)`Bh9KMYwRbyfKxc=8zyA43ShzQ1*`3+75c;jSCT&e`YfInQHLCeMZL##(XYpKJ`5?Y9HGhxS*oLwzguNC%VVL5+F3CXCY${Yb4(38^m!sElJMEk&*FXQ%LCzQhH5^n#K6rLxP!t?B1 z)I2TbyQ>elw61J-xSALfudpnBTvD*rmMu2@^%%V)S&y z^D?EyS;#8w)?KsNNyw+`mbJ$BU_|b}awbdcEwmnbNa1T2KwM3&Wl>lyOr6mfDSE#R z(q%r4jhK@`K4JLax=|X)*v?IzfXEQa-+GF%q6w z+1J;_c0jbEHdno_4J@d4ic2agplBm_a-JkZbZWpynK#9tV_w51x<;yL>9T6GX&e16bgwh_{PTYl@&Z-cMTb#^~-@4@AyXQ$`( zfqqNb%O5QiXnCt29-N8onEM{9a;g{ZynFLcdmww;~u4yCpa!>P@cZ?74|P?{93Q_WNe(zqoyu^>=+kc)jp>gxf+X z+%J4Tp&s&2`-Rv2(|N*j{=7bN&wIv)yuWisc>aHrf0ida|Ihu`^Vj@%<p&*M#>wdTLomP@HBKXiDEQ&m z%SoAyNd0$j_cyanhp zmAUgN`#!a!lF&_+)vk3Yy5KNZDm@D|T-qDq-CF`}=f9Xz_V<8Ct-G!J8xl)N$k^6%4$7{oWqPfj{3*_}1sYK%Pb|K(9WH6x0X_n*232x>8(^mAF?-I zI-QDQjx}gS`jAm|OvTv2(spF8wRl!zQaRFTZ?Lwj%7BK?(_bCN^<}@Eb8=3=c>DF{ zZ!U?TabeC?MW3xX6i|EbnD)E45x6!h<-QwcgSu^Y!s*Aikvb*f^h2XWl(o}V@gd_G zDq35*T610otlJ@JZcVR;`Y;7;*#~4O8OYmXUB!g-z)ef?U2wm)J{{CBfdhr#N>)4? z!*Rq>g=LpMx53@3FDqWPG(yaV?Om(xHN%F~yy@!kJs?WC>z><$^Pc|t^~25kRR%7T zusv>6IBi2?FDyJ-)<=8P3_Gq$ylpUTgq1QD)1UEMK~;Ke!E;A4JUD&&+oeh-c#Uj5 z%9CM3x5eq-gE)R$ta3ag0_Qbs4!n^UYflA!a?h)-mvmS?s69LP6ceUCTk6X&!1_YP zYH6>v90*%uzgQ02+fu9g#ShicVfNwa()k8-$UhWJf1OT;?7>QnWC<3y?~wIgr9aLM zVLN|1eOj|HNdQ~aUFLLTaAB|c$E)|SpPO>Wojr!*a3S$#OWd(vzwy=E>h7NcsQP5( z-Th4fk|)IUd&LCsyx(u}BFu@jnd0&dr?Gy1OgH;ypa7)ij7*6r=0FLt(RDt(|NQ+f2p8WM=mR?bkb>V_0n`XtVtE*LuOR~37< z9Yjr97G6GG20k4&voz)3BK7F)qa}AMkm+;rkO|BdG&E}|Ni?S$^}1^3CoUwTxDazA zDTO|iIBL#F{z^vNs+d`~u9A?G)2{g|o^>L!v%1Ru;09#iD(}AMcP27e?=F(FAse<^ z)txTpwu4By(`Ta-T~MGfdCJ=@T~OOu82v+v1grBndRadykhtHm>{2}w!oR<@d@J?$ z`ghT_3n_fWgQQsX@b3ZG-?2saMO_rm8`yQk=gVjVkVnE+>N8AGij%Ce_56A?+8w=r z&y{Xu*B5gpzON6htFM&g%F~e9Xw$N3$7m=hVpgAPJOy1oclWu-P7*S9ly+IXz6JR_ zNTAK=!5mz9QR*P)BMh8-6n-(R4U9E{CWll}LH-GQfz%NkZvoB9C2AZHcle^P-3R-_ zke&J|(K_H-pPj3IGYwP}Jp8IJha&Z{pH~=`QOI>2<>=so7vMVo)1_@Ui{V!p%j65S z8IE)r#H79>!?WiS3!6%)5aUpPU`Z(rkES`eT`zHqNV9B7d;$Z4*e_J(8!s#r_r2 zE}fp-OdxAsU$HMT2O^Z}!h4G=fqVR&`j)zOFi>&ZGU7)C_c=S`%wN!9-tt2~!cA~K z#TipNZQ4Nif>_S zvmXdxr2Ciqa10N2zE!N+KamSFrITM8z2iVc>{0%$ChULgNsYn8frhe0y#g_Caag`9ROC9FeKFz322J*o))4j(EkWE31 zXDAs3ZLpfVNDJ%x;#apP6p@kH0uj~v%~&ru zCu`;4-HC#`cOUF%ZA4{G>%7cVOOR`Czs>6YNTfFG{N?tNVz8L$)xRvQ5q^4kO&i+L z4JOg^<-JI<9aykuE zQa4N*;8%mh=I~vcey5`dDcAb=$Y#V}QZ{Xt9SK3{kqn)8>%foPc&1)cCl># zjX)fqwD52U_V&i{ZL6z!LwLNc^x0&jp@{jmd_v39oN_Qo{WjusG#y+uFXydT909EX zS?(^s-$BIkxnJkMDuRJcR_;HQo55p=mihM2ebAdwQoQ657YyZ%r#z!)KJ zFm+=UadBL(M$K1R=~Fj!7a88rd)@}>Nd>Dut!sy`wRcPAAMJ(g1DZOqi)f%wRp&d5 zL&06R7JKT<(;526CUH7xs0>!BA)Y<@6_|Fz?fk zAG+S{z`wnHu;wQfUKq@v7VTxi-dl`=RabD_+23dMODPjBF8u62%bX4C{*mjw&R@LqI4>dSR9C|qm(T`<%Ltrps&TCzzH6sNXw za$+%3V<}#!@NPsp(H+ABJ3G;Llg9^EDU(p}v35Dezd++n1=nA zFXWP<&GSJ)X5J1_QZdTf@k=LvUpsQHn@^XxPey46&Pa#rQqe-s1kT4UDzek83{vdhLVZf8EwU9b_rf%{=oe%tyY*@$_!$ex*&3@! zbMShK(11rg93TF5t0I5DJ_Gn@>CWvDBv3gJ>8v@o8CoqzYA486gO$nUn2muI5IRt( zz2~Vj@r~x-yqfxj zCzgke^+CAIy>Isp;(Y$@$m{D4kYLk5^TRioo6|?1J`()Gc^a2Xemur`yS{-fiWxRE zV8{2Yl$7oOk(+YjXHzP`p35>EvHu9s5B9B1RQ&)tX`)AL3Jbx8EdEhlsvhQ}69+cZ zJ7Dq|S@z)~G6dV}fBk4hg#&X%M~r4*ec$%mV&CUfNSZ+E&J(A@WcjcCjVU;uN1146 z9FOx3XQh#svKSC^e&?L4+t{F0^)0~^w-373P*64r=iQKlPV#VGm&|(07TPTa*oCee zdcTRw3b*~Eui^v7?UaQ%( zm3f_VVO&2D-v7TzAs_zRdH=VLD+}h#Z~H<9e%#rtD0`fDxn;<-0_UIpT0J>QO``!M z?pZ4N-phhuUqO$*MjA3W>ub2^TMd#Hw+&nLxdq9bGA^i;>qcD;KLZX2lF*J;+19tm z$S7JhewOwq3E8AwexOj@gCwY&wti|G@^t;Krs#(4{q)6>vjgK%z>=xXyl<6|aWOBl z|9&r!PM^J^?D6;Y8N&Ddvwr@G`@(tUg1pk*GG>0f*VN3{8+wt zRUI<+Skc}1up3?85j!DrxDQQ&D;`;kX{dynr@qUahEBB_ZS{zvplB<@tb=MKlxXv% z_Ct0H3VQuUY0j4-lvTD=tvybM`Lj>7oa?89Li_i^yO*fI z^;K(8jw8bs^W;k(t-HX<=U3^B;Z``BP}=Jj*$Udm7L>HiPDuH_{@1QI*nZ#BsT_{; zb9U&%O!osEDA)7|J=n&9$U9%oY}|wMh6Y^>qipzKaB#n7!V)G-xiq$G#z~yNtReOD zTwf=osdwr|ZRv#8>Xwbyx_TixFy?x&BNe{?o|v=Z1kRVz-R#`El@41@UO0Qku^YHc zY+s+RX#@-ByNeu6>VdBP)px`DX2|Gr&h7~5h25ejU2PBc!I-A;+F2`lpf>x?M6-%E zP%e0Fl+w@&y5@ngzUw+5AU?2c*H#kjJgcAZuA2f;v1{hu?V$nvhE=h`F&gBvGMV2e z&|ud2Lz0zRG~ixvHju)36Z}^1*EwvfZn(x^aKJ5WBPH023&)oq_AO85L)HM# zPeNV*0(()3%!A{4wy+;{8x?&?z9E1_wSq(0(|NG@q6U3E_BS~0>KXiffB}b;wcRd? zv%oET&y5?A<9Wrx{#c;5Z~aZ2Ke&9iX_Uh!F0dOG(vSFXfq8O4oHh%u7tz_?^$^?p zr&H{&%uOEG7lhaUDTU7^l>hCx@Vr0epV$4<_E4L`KG|y>aLT`SRi;@F3>wS~fBysb z8@jGT>|C6$^Da$nra~SbH(|CjRZ@_F`7AG4*;*u*>-olVdMn~aPF2=F*^NSN{f%T^ zl2Fw1tNh(($f&pL)KnWuGBP%e{BdGoFOoSqZIOFUJM#3p;5f;u0SyIzG6|ZThl0mS zr{2o_041qDhAWTP!yGf;g_j=m!9&-TG1|BvOn%U68@&y;i(2A=**pHuQ{j6H-!JjT zRpl3pIB;N=_qEDPxsgQm*{t?NW8yUn-TE-pu+Vllh) z+w&nQiV~D(+72UYn{F78$sj59waJ`^^T_^sFu{NHULlVne%`599%h1H$EmDIEhKm- zt7FuJ^ZP>|u8vu`6~Fh^4c3~*P>hE?~y;R|m?9Mj0r7k92o7V|7?z`Vx^pj!w<68YqIFIIf@UNG-`ApC| z_Ud5uB@))>JT%>Wo1jbX&? zoEoyH-2H^}G#&5m;h1+r(2Kdl7vHu);s?ty1(hzarQV+9nM48gHPAMO_4qG74ZAN* z7_Wzais%0m$A#@{{jB6w2F^D>`|!A#FZKt}l|~)C+BwiD8#BB?Z+yPc*%tXA3ftFT z{d-sDVEg>Yy}))EIRVh`(3tjxe0XT%FUzsO>lmuq`?X%-@v7AIv8PxJ@HR0x#018;+NGuJm4LQyt#VXcz+Z=zwmkQ z&8N=&RgL|!0n^0tYT1x}Jms~~2@XV0;jP!&JAR$8eH#y{nMwXnaYbP{E<;I;`fUP; znH?uH|9|o)3$Ob>m8z-d40n;*L03O;)3N?;a9!vkK6<(jDB?~Z&F|oNwDan$a(Xq$ zh?;bf*%jD7Y`Yv#140ORZs^V|Jm$mg5hf`jSxf`W(^kr?8*jlvI6a zJsA~MPRwg*?nMfsk~>GWIuZTu-p5NOH=|ZZ{F@BPg`LI+^Yb^PWkQU_u3tOZ6oIMR1l~v6BS0 zx=XaC@9PFBn(@Ks5^ca*M%bT%3#KM0d1p+j<7s8!i6wV{HzIa6RJm^(&$5 zj)v60)O?VMJXV>h^ad4KO)mQNvk2*Hn4eL#t3;tM3le?|RG{FbzIk8ui;(Huf(>Oq z-yn(SYqnnT{|Hp2@*x9*dI;4UjwUDcz+2zLfxbBZy6Csa;`t}LK}F9stg*ZaSZ5xD zjcscHt!Fz7J~}r;psTMs^F%jzT)*OUeH&i)8&?=5ZNz|Q=Q2J79-u(3$2&JcQVRrS zfAv?guYh2^+ZM0*#h|t`t9IqeDlnN_R;9Y76<%(4RvMs@psd|Ry$#PM$}>@NmOdVEOp2OMYp(f7d_^VjpjFwHHG1i4 zs)X}+Zf-T-*NgM}gHC0v5NE>5uymyf-{=sPOX2rpe@68gNyW@;HcY+7rjfRDU`e9e zxfY!FfA3=E`88b}*g(JHw|%kzF06Xa5z`Pr0ncu2nlul@i*#44#CemU2EFZv&f|PM zRk^%f&$v)}NHw5H?e84;^ZEYV{-^hdaNSt=^*6q+y()_C5`cui;l}NFUA2z#tVSm< zK8U7STdF+&d%Z}CIR@9^b;HHiS?9bvs6e9lGB&Gr!v&3}$^85EFhnZra(eI)5^pVd zvF}Y98uHsvI}%fiT90?xWG`t&XSwE#^>zuH@A9C@?Q&}>y@LjdrC6%%{tC{1{$n(Soq)}3t5+)P>s}MBWvklTK^^%Dv?OpYWjkXEIkdLx@7ku zn=&n>p`s2H)Z(hnvadu%NbpId{1xbn=^GR|RYHkWNQV(YN3ud(KZ3_*Yg_NSaY_HNT;+a53ozkO2kX)Va*?R$3o^+yQ! z6?Nl#MFJ%5RJ0$ylmkymi;Xv#)xlYHH85J%4Z%E=6ra!la-pXb3p-Q5^hzS@;lx9j!sUS}{of34Sc9c)kd+u3s>^q4?dbZ663 zTl^h^+f~w2t}vlH-k++6zeiBx*I1I^$%UK76=P)T_;?hKYoQ+TrxbEPc-}v43-=4< zpXUqr3#G+<+vuCKnc!dLa``ls0oA?sEAC+b&3EOz_XoGNg4O+nSwlDTLA_g6)N@fP zataJeDXptTK6eXB^ES7l>NQIIu=(96?Y?SIm^ulS7<2RLKakM(tmP%aQ?R|fTg|}x zR2OnxI97Y#v;|d9TB_r*3;X@vO3yiaGZKwP>31;5Wnl1DX3ejhc2J#v_w-I52Gkqx ztUO8{=i|bA=`WAQ2=KbU?5nzTE~wI(_N^<&j|<=DPdQ?1`m^4H3gjKzs*l|-ho^~@ zOHo=SC_wzu=Lsu2k#G0)!}&!N6f6NlWkC#N@j%ApiW&e&Gy#JvvI6 zJD+qV65BH!Kc;dOT9A6L_q;c2k`ev#Pkkf5T8L*Ck2*!`pLl4a_g4IV4#HLeQz;ZR3`))xmIDL8cs`((! zc|NE+xlS5%?Yd4TR#w=RgW4l;Zpr0Js9wMxOUP{m%08_Fm7|@I zd#}Jgd_ynLD$m?lQQr;k4N~|&jC&yc_w2?glpe5&b55n;^{m_-TOt*3e{6hdEER#r z-O*)-PkhAR75KB?3dgOG1AiVDZo7WkVDm174^J<>j;(CP>%a0F^tKQ4z}Pxq(?vW! z)MljYjWy@N0gdPjkMMo0N)4xdz~3t{u5q)p#eULlFOT4_t&l;@XRJemBZZOYB5L0mtSac-4%GesW5FcvYg-lk-j=^?r>S z7MjeS+F1Y6k0@y(=kbR-PSPm1m&uA~}7@=1dgkC()$r&p@t$ zk6sx@Qc+jCbMHKyx12aFI#Fd>BT_M^4Mtpw!*Xu;^JDM{!dt9C&%fR!OWK zR5DyVV#i{^)BO2%U(ZHlxb50~I7mSnccRw$o3W8k`guC-HXr@F9np%|SeyGP0%Rk3 zp;Y-Z7g;7=4*2nufz+p$`DM!DyhH!7{9W;tNNtVloogQoAbYOfabr!qF2m=Iv956m zSWR1-tsWeT*EwIi^_Kh?JuP2Sopd+?p6amQUmGlekw>3f?C-ZjV@lKW6c-W{ot*ec z-Le{jPh_i9sjWWcPfOPnoZ4zdxZ$F}3(|&n>W1PnsK67`!bq&NNR;KvA zZ-mi1r(WOOi}O;=BwP9(;PoeoU)#L08{yg16<_DOwu0Nx^&9yjcpX$nq0}?HPI{m_ z{rkdLGPI=>OrNMl1?2f0e~ItbcaF5&pR42rg_U!bU70Ieqs#9tg5Yd z6V5})?+|Ua!QVHzqL&zMQb99OmPi->3Ab$(e^ zsN8NB{(j5GaEmMdTIbVLskbSQ2lmN=fD03OQ1y^fCa&uwpocv?7fk6fj>$SiLJ>vTu>q5NkzDqGl-n$v?-P1V zaX7CD>Dc+7-BeVDWSk2*-n);Y2J;T7TN|q3!kq@Gp3Pk#F(qK8V~S#=GUJ0$l0l2I{jV5YZ)BBxHA?*vFJv3SL>hH zN5}gAL(QeOR%Fat7*n@66Myp`+Ea-n}p8QIP#tv0;z89u)2Cq1LdW5&7s< z&Tj5bLDKukDkE>pV4!t@GFOHS)2u~GZYoj0;LBm}6!9{)~NwiERYP(m!hK9 zh=lN$PdG@lH{PS-r{F*1U&sNU+^&;>4Lr13X3{k72_~8v^)|ou83~CTFUd-NUWL>? z)K(ry%7@k-o5G^Ba)|Edj!b-f3h7vPw8(Pv(bGn+9HopBr2fNHRIw`$sTUm&v{Up4 z(~jV_meW-*q?hrsGPDh>0z><(Pn1GYeNN_N-M7d>e5T^;BPFP{^+~n8dKr@FP}rl$ z$VDR5opa;f;PyWfqk15`78+8`uPW!2gZ&AkxY_65fygX9pOy z5WI?!rBm4kVcSioEj>+wGvDY1wmSHG70&i1qKrOBQTTF}jq@fXoXt*g0E)sn5LEr-7c*Yq*i5r1E$!TiMcU1B(2V%3=DR{Z^tvSZ?# zCVk_9MG7zBIrc+DYn4i@En`BO(=x^)23}`MjlSNQjN_|K)t_d)!hR0-MSI0@e0XKg z?X0N_v48h!)B$&o5)esz9dDqUfIFC9a`Q-MZ zEKQMTHxUVSou3##$m~I`%2CCSXLq9L-M3%m@S2cm%+A*(AIp(kisk-6@4@W2Zb)$i?y;CC|Q;@`myD=Th>1f5tr;MT7bVRp# zCn?!QMIrgeef@{9Uz1MnY^OIPl_&a+kIrQw9seJjryZ__WIxK}hZ%jaZ|C>BX;)~V zXld4=;ETWSBU!%l{O4zgyNvWdNPF{WuGjDXTc(7_kR%b243Qy`beWQpv7$m!5}}eZ zBxP)n3=vV0%rejO#@jr58{X!5_C_K^!+m}4wfxS#K4*Q_ec!*|`R7{i^FF7Qm)E|o zYw!JhKDO1IN)PHk!|aZ^KesakB#UmmA7^Ppt1ABIsd9;kE9pku zv(Y9b-Ny4SLoFXgc59EgF_nScr?E+6oY%?)DcgN{kN=#bsyyB)Sx8{#{d`m*R(t5J}=a{20Nyk84b)UN8|MoYnIEKyoFJm_?dqrvoA8q?T&)moPL! zZ*lee(@&r{+y22hJsU{+gz1KETxWT%tJONyjqG;ou2i)QB4NvSLrwVuDBntfW&8dv zls^-{GRi|h!eUXh3M(%me8)1|m2Dq@tfemLuUZJ+A1Tcq3^jwX1zXW;?^Z}me;w&G z(E!POO;4pBwZZ1WYZty4_kyYOmu%v}Vf_9+7=1lI4CXb~*DJj-zmR|CQ5$&}S_-?2 zX`c=P$#c?k`C1>C8rZsT!}^Q$zuwnayxPF-?0|x>lW#*e0@lNPz^aANjTB_`AT+ zlOgP0UKbn>c%9(s*A0>q2HvGa%yG(nK`){=1}jriMW-I&`hXd`gLKS4^@*)G-+G_3 zKSoaMH^SVWlJtWqpZ}>BjPW$wp0Qs9|C*eMOJewZe^y7lgXSPrx@=H_Gz)eTr{el> z{0y8BW6r=jSnq;-S*aJ;?|UQoe2DlIj0BCBI+Dl1Vz1+jsU7y0w`z;g(T+oiuKy90 z6GLFiSMPO;*oOW4j^B=UX9N2w-$I4B0z}kqS&^k|L*cDqLaZHF$73U2Fe^EL)>m0N zzv6tKFaDP;v%xS5=Q#Udg<%M3Y~D7YI^Ksi84MoR3wI*_gM+7i{p-<)&#|RDwvosz zPrHYtQVlxeiOJNpU64BFSD2)T`CREfp{gLzHeq>WR|uCqYA=j7mq`Hx%AjciD8}^f%);(W3lnMC<9W7^zU_n&{YrQSkg1d$Ti zZCiHW`lZgf?>e8WQ0g;k>Q?7Gc=bNBH|KXZh}(n>JR{G*^Sc`fJUZCdQ!&+|>6(Vf zljl}Bi~5kyO8m&Sb5n@wHBWs{(SMJB$-akAgZ#*7kG!kN#`Q@=;L z%7gNp9ORw5E8C_d7sv~blQox#K)9F7m^M%i^uJk0?tV&yn8SbS;p}^enb8Z&a=?1w zBg_$JlQY3P`sk`>a~)E(*D~~J?MA9AXa0_c4It)8Iul+yTzApD%fHb=L?h4k9cgLE zMH*K0_bbzU(dyO6-YLb;C`PU9=+y5R%v(DEUK`aAJenhTm{bcsm)uX^t*^p$3$fMS z{ALL4wcI8k(gWG0*X=AC2H|39<2T0fA#k9q*XpGm24)4)W45ck5bDRg#>n3dbUFu_ zE-ChaH0@@^G-VI){bHK)wC#eCQh^tpqK%OH<Tu-dtBL|md1xMLdCc+=WI)tJg49s6X~J(UMCVml#A z`-{ENU?03pTRD1Y19LleYM%-T7zeq=^xvbn9#QV9qQz(Y&$+*K&i%Kw-N5?l{%`;6 z112^Ga)G$s&wE%*Ur7@9k13XALj%a-hlpb@*2nBIVEA@dAL{_O4L?wZLBzYi zay>Ju7m*~N`rX#(Kuyy198dXbkxR$(c|xAk316W1gh77t1Pf%PLxf=zEvw_%<6Xr#n!DE7O`bo+bT|D!X1cDnTN z^z1ww2{nF{Rfl<6nI+05No~Mx!GBfNBoJ{uay0sM4abF|vM>GUAX>;IKj7dwX8%~wBWp7-#6Yb^cU`H=)IzGRw5x`iLaOFd6olVgvo8a(S0Um$n65v8}jEt6imhG41&{0Be=ywfaRaO zf9u>&e8q9$z4jt9bd0ks8JBN zGGBcUmLTD@>KEy5?MT&TQJ3{+56X}3yp$N-i&ihD$@EQkB4*Cho_jcI5rNeG;W$MS zuxolO{FTG^+@5WF!*q&(T|vQ>oSThe9{6fY;Jm?%){yO?P9^qrFmuVZBq54kkMPm( za`>27u_^Wm`;yl@VozIRAIQt4d7r^dTt{GWai~v&{#D!L8`mp<{Po&vfty*toXKIs zgZ+iuXZrj~Dyibff-+hWfw$&4{9PVX4nQ9T}_FD%TyzLL<#m zaLFqh(Z~OO*GmGb&2=_F(?h zoq8gBKRijha>3bQ0ItS27jogg=emsE>6|+>tdC;n;*NueD`f0ZJ!SC zr_45T+}?)!A7sLX`vmab6{bKhmw|bLGph;KS_Ae=>LQ2~SgjmNn1?5h zS(Hz(Z`v>>o+%BV`>8?U4YAnocy>=@#3s`mXdE7~5&19;w)f8{37x>ai`vgml?o^S zSKaa9*+<}kj+$Qf{X4DveU_G{}|Mcniz~I(YH)gV&(T-?@}W;U$~#M^YREdWj*3k zjGKU3J;Og*sx7ek;Ueq6auuSA($MJ88%Bv|BC;%Nr;%Ic?;FB5{~iCHr=sViM-~u` z#QS_L)mao?{xX8W9P5r>q^}&%=|H4*I@8G#XCUYA+RM0F44bc7x9?o5Kn}$3BdDem zaVb-gCfWNDyUF6uSeJfOV#L%{^n`?B$k#fGy6Tbf>$PSs_DCRJ$*b1i+YF?HA0kBM z4A9tk`{B$+0scMer|aCn=l)*pV51OxALsYY<9Hr}`~~TfIT;AxWB8`}g(j~5dSYQG2#XDv`HC??;E!*#jsPY=!t6G2#&GHn3+om|d~KKUAk`B6sR*Oea) z09DBeh1^?%ATI8~m(0-vw9b8kSp#LS2^p&OqbaI%O zI^MT~++19)e|Iy;wcB>dyu{C!`;6*jY&&rC6qHBDkU;8Bl)kgcAn+F5$VzU+I`*&{ zQz@K>v%h^F=xjX?H`l6YsBbMo856DSF{~rr$^*96!}8k?@VDZ9!r14q8}=#Ke2@LG zs6mF+igoKZmY6#f-e4Dl`GGWR0*@GQ-$THr-%`pG^RR-GexFvD1&Wc2_0ux5V66LV zNT_fcsD!e&**1P3s=1Jg_5jm^9Sp|wovmxduZlc=A zPQR17mB@!XG4l!Ldz9Rg{qDilgAC8<3(PwYAPo=B&|;S%Bxf6*ed82fPgT7v()-40T8pqz9@_AX3o42B*XX>YyVCX9*`0${n9;m*04XZewX~kiW87VqVj*bIG6B6r`Hvb+GSn8RAu4|B`euA5jSW`hGhw z66E@G)imBVL9Unc5wFkK|9x|pa)m%T0qvtVa@;A5^{Uh41$)$qSEtda-_5!P8l^5A$ixWs9kIK}++uoW;0KxO*^8BV=(8j{QOy`{C}VOTXDaY2W)-zw|(q9hx>QL z#KxaxKQWgy(r>is2kz%NpLqH32(I_7opTUAig{nhZb^4y?#NQg!*2PYMIi7-UslBa zcbZw$nsR>@$giJA-AI~&aNj|yUyS4Mu(7^1=*k!*iu_sAIyeO3dwAG7p0~oPtcQo2 zS29@e&|UCaE=4v*KW<&jY(rYVSTqC4Bt+{NA4|T9^mK+eb=CDIMUTZ_BG@It=Sp z)qcA|C!n|9g5RZS9IB%&HVP8P!F<#Cc4*oJSUDeK{U|vN{!h1`bXxvr9>>;mN@9EG zei-xlYduQI8apxXI^^tugnLa`XI32q$NP9@~y+{&Id|5gbCY^HMrHti}+x z`ls(9dnb_Onbi)TYTWNL|2{JKdK58dS$8-m^ds~8ajn#yt%z(|S9tv}NREb$YF6ncPj3YCcqjDv2&*~^LzV#*fWK=tf`ApsLyUGnlyzaN=E5?A! zaHhuHJx!?gEdSwp%puTFGV9uob%_+7cG+6GgNWUui*WyHHxhTCr(i0==YMqGuWKm@ z_&rwT?f1p^f7;v19AgDQQK>$7Me{2neWXq=6f8#Uf63h;Oqs|#~bekdYgLbd3r0daHCt+NW=PX|KOTW0@a95htn=?TN~mt zwX^hWJmM~pz$)BHb}n!==Ty2H`$ee_W|YlHlaG8sa+2<%V`9zKhLVZ zlG}g=uG0I|w!_EA-$ahFcffk?;eelkZNQvxfTP$Ce@`j0`;>7z^e10-7dqJmQG8;z z))xjKkh$_47ikP~7slFiC_Ux4e{f6u)>@hx{DB28ol)aScuRo($Jp0ClFgx- z#)f0_4**-XGNT}{9>>nYe^zrtCBd2H7NBV162~0A_ni5V z-Rp&UnwxTC0cgDuqzM+SUv<)u*1?|Vx*Q$IKZf#Mk--q6i&L%C(;q|mwh!e!8nExr zN;Lf2F|7ah;gkx9$DEHy(jND}IIljbSKrNBk9g%LmCH)vpzJ^%)g}8bSSnd5O?^8K zE~T7ad`sgH-uc(;q+1!#oo4!;qlfFH5p*|pHH@Mewsbq+`?JVL-fojZ?cedAyKCDi zk@$J!=6Mk0uTLZ1Dc(J79m5FMukMfCZ$POu--@w^7Td69KWr;xX z@TJCws62?_zfdch_!Y%$o@h89S%9Pkw2l zTS5Wum+C1hQl*2mp%*qPrUCCUb&Gh*TnNsT+Lq#9557iD{B^0_nAi5_WbRx)^csvC zr-}{0caf9;K^+pDc2i?Ih;`tEkyKB42OOVM+xgco*Mml6ETJs19CIGK<}b=rfOMv} zH=ScCj0|W0J?fkTTKmiz(s2B{aP=K&r^EM*b)lg1R4tH5y==+u+zxNFpQ@J_c0z{v zhq2pNJAtpi*qPUr2s^E=sD05OLap?jh})PK$vpSB(zz4o2N#$Q77z7-N6)L;d(`+m zBlrs@VgI8z&3=nx{}0~S);Yfw?+Ine`#^HsU-x;gAjA9_>k}ORVtx95W0Gu33_|#X3P%zK=_!p#qFUSx-v6ev2%Qo&G** zUxf(6c`QP;5ymq1w^{3>y z$f|ZB!vJ<6CY5Ha|J7U~{mn!)9qHRof$nNj$mv33Z`JE=EGIRuCLzn_Z2+)m|9 zOQX^jT#p=%N%AMmfe#hkbJNs$hf4NA=cc9gHP9@&QL+Gktoyc1= z%=uyR_MGn-N764U9?fWsBXZoAXu`HnkrBLpyz|r-0qfPY*m_O+y*hxZ=9T6jmNp1KcdT_NsTJQ78Z?4wFu$=wok}OA6+Roa zTtBx-0>?AA?%Rsv`7UtRV-WKN==q&GqwD_7FW!p(t(Eaf;`?_l3%DMh>L56bISDgf z4E*Wyz!B1J9)#=uOA66u?+dW6>B-P>wpGkI<{EI@jW9n=yZz}Z=E1jV1_d3^ode!t zR^qRnbD$-x?PK~K`<~Cd)je1=4l=1>B9?UHz-35ZFSpVSiJR$~9S{^7 zNPM5mKmLxXF-a$0F;P@lQSE0e6Yf>tyV2NYgF(9x`xpYyb2^_VDnDJY8M{ z8Cs^nhAzz4)T~XMf6|D551}RDtrAI~)QspM(|wuZ>s-lX(`(dMUUq@G7qR z6HbO^$(I(A(*qYQl{MVbr>0k?-Tr`(~G2udrmCH;J9L&yWx2=0led-M5n*i zgW|QU0tS-+5S|XIxFS-DA{)kuv$F)GSfuYV@Vo#S@95%OnTiKqEyDaY{Jyv7m%Lxu z-vbI$=H)iHpOV;OyE5{y1+=JBB$ZGH3>>-VpN!98VRdc4ch9S!X~QH*eJ%lUjqSWb zJB4*Xui=u>-byrbWii!)Clm4R++)3vGTRe|-?h zEUVhw5rbe_)70T~i3E{mdfWJNiD02L<|%!#4Q!Mdqpp3d0%@L(MSYh7u*f}>S$(e* zY{U%QJ|7~$s$6TQ>PR{Ei!yx3`%nX1xgkOK%Nt-xYKdm;MGGXedc3AM+5t8#3c+*v zn19XI`nT~9*8SLJ*c&Xgf!It;;cX#Y7f`%D@^`cahKeXs?7v~%gk^T|cu*hwJa_Zr zzJ@WlXi~vSlEi+Z&+$AA=l}b?|3B{oTl>L-HKA%F_XTiStr3#X!#bH>OP;fyxXwRb zzk#riv3=>5QHC1k8E}5eyu*vDmq;@$I~9g9g3G@G1Ml$IBfkQ8a0{eQP};6?Yk& ziAqO_dmgK+UoQuRBaO;FR$Y*p@lvPNeHgS^4b0tdVD6iRlhkd@`Oqjma+Es`&wW~R zmMCwW26czFZ=8iQP&H{^QSfCJQtH|MXyds@cA}ML`iIF7T*ysRw2JE*GmW;h6eBPa zO1MA!tp>cAea|*<=A)*@))&GNo#-4**#$M3Vf^=7(+T@JhN!x84*Jnfppi&|V3Y;s z{c!f)pP#~gf2Y)$xqf{9^Nb!mC)$MQ9k*q2WjzL)Gwe1;Slgl8p{|5IeGJ&m@l+m+U z?@P|qx;BKG&MZiryN7*O7Y}cYo*hOGKs^-6i2E)!>}qO%YVp0M+BL^67eWh{Uq3Wz zg(h|}mgonGkV^mXY2c$=*L`pAlRQ`zcl-3Uy%@6ULp}N~V*l`k?V?o5I3Li+I2P8A^}jQ{ zlLFK=*#FVF9MOX7p`q5ruMKjL%UuepX;Q$lUEX&(C@gOAc2)+Zu=_YM-%yA~8P zyuI&EP7(564l3O_m;e@&kN4iBNC4w2?|L3D=7PRp^KOgsMtE5o)Wxrl^LxQ#o2i(K zxU^_T-0&U(dlAd04M82Cs%-arrnnjOdA|JM#`pIadF4O?OD)J5yh_c%=R221wR+QL z1?HFp6+U}Z1!m7Xd--($S^-Hk@xz{9Q^7dzGOTw3ynVHpZ9pCV28N=X|)H~ zFK2yZ_Ad_+q(yIqkW=&Vd;>X)ZxN|Tb**Wygl!$N(Ospp!u9B;?~Xh!={=}+pgpvj z7vJ}11yp@;om~I+vy09mxF7ibd-G`u%*S>7Yk$PO1I4@v`hLQ@4(WH4(tfT?KugTr zri73x=#samR{M$TGIvNWKCFBA&+nx=%V|G|%t7_*qdjG~ z|D(ks;x>J)62h4VlPO;oA`0(`u=(d$CqtE57&nLGOV?HUs@fP@KC|PJobx!c_|vu8 znl^?a``b3C{|+Ov)l|2-H`cG^xvD;$EM-}jnoD61NDnP-)h(1iN z3t1fTCY>@GLObhp*!5LMP~;?Orx5k$%-kvBiyTIk^xbO?NO` zD>VYUvE<%hwOC*$eHN#f%t928+eCP|3z5Im*oP}$KBEzBqhE8nxc`-Tfqdl`*0*JT zw4@j91UFBn*q`22pmFWu@7E78_lz|D*E*#P+;qrY#q)R$1WR^ef>H@6_HMH2vlb%h zV;qO?o@_vFv#l#(iw!70)0;IKY7O9h#^}J{tQGd-L_e+TebEU9#e#y2M#FIQI28pu z<`ISOMG4Z0gFxiC?_40ziRaar2+N4zb2;M9IhA*R!xK-?|5F>HGfM^XVlf3%@R0goz0f!;SY?FLZad z$)BwayzPb-*J8d(U^qBNX+*lyA*vt-snQao49%Z!fX||d=oox`g#tM z=G46%_@)yH9DGVWvpR^pZ;ic>5*kNyZ(Q4nT@|Y_bnAg~z$K>)1Nnfm1_;9iU%APw! zN@D%==IQ9W-v%Y=1#G2ySG((1uV`<(ErtGLb;ef&5*_617s zEvf8D%0S{{%Z9dERY+jP&uZyJC9?Z+Bj>ig+Xo4(KB`*a;3zi7*A z>)ntgfANhxp4Zs!A}hwL)(_$*BPnOF{+>qZw#`{JT(|qWt4r-i3kXC-$S_t`gZ?Tj zdwEg`a1|Hp(6Lp3-5<`#qFpsG8PsJRZ&(eJmQkN?&^CfYdh~;OGVbU9I93-U*b1rU zEf-SHw*d3$^XYF@n&B>gg5@Ia2XKddw#fU6>mNI{!fL)ZgH6^+k@VMfSXUnyl4aQf zCKTT#Z&&EReeV%w;!@Gcsh5Wb<_kaKM|2|jl|Mb7}SAOmqTetw0T%x=CF(>)# z>tMks9A7dy)gHQ`J;2guexH}v2REBjzSMsl1eZV3rQZTtK{)u^0jHyp;BwJwmX42r z`qLEC-?Cu6*$YY6qTFsovwL?L?;{-Va<$54g#(DR^k`)*sUI1ZQxiMgdXPY@tmYl+ z4m5fDD58kKz7j(@e#gRCV1i zn|N;fducLP>omB9_ApZ8d?s8{mVc-3EaWO%vAK=mIb839Us_d=0jl|*aNznVqwTZb zS#PI7jOxwon%W=;oNGP%sk#!_pHG)Bf6PaQ*Cgc4J@9?R=Eu)P{Xt|ySf}m6oZ`q1 z3z5|AxDPn4HBmp0`K8CT0%Au8P>E;P7wK;7_hEX;aeJl!k#D|GG2KVNI!;aF${hn> zJhg5Z={F7fr*hOkKf%7A6Bg}{46vVdYA<_xd^>vcK;#hV+87eEFxfe*I*Yhyhxm>e zET9;>(%!NU_})(`pMLw!0y;D^Gk?8p7THH=HcjwOAUkphr*3#JGX5c#C>2wHgcp;) zx^NJ{MTU+tVzUU<&KJsl)9pef>ggBgS%(nO>m$vlWvsh4PYJD~8$#h;#QMw~Sg%;Y zy>Uvd5^e4kxuAgc|2thrW!CUq0*l2Es=HD}Krb}v`E$hukqGXWovwzW5h>u-e31;o z5(ho6S2co}Yi$zCn|_c&MMU?zL@p*ADOZ(u%|&+re@>-CxUpFa z&tx44dW@H@?CpjWl2t4fe?JTd$}9HT^ZSQ&;d$D6?C!v^Cag%&uM7M~}pia9h7g?m=3@%jJM z)rgO^6^zw)uRZd^`+=`^K?BT>WxHgpjRI9~mC#FX?87@_IeTFDKllItJm?MB-`3}Q zFG)v|mKH&8GbHw5I<5yan(WcT{eN;!h^~Mp_5m6T-a0aeeF07B0*Nu$2ax+g>N6+i zFh|Q?wo1o+N4{pxFIhbhca=->nHsKBJURG#La+kp9iLa6WcPwp-#z;0APPphW9-8*C^0oaCE-pbJWeNfIhT-NFMlDmAFhjzshd7DIgfpb zZYyTi=Kstw+S>n4PArSq{J?QPHAOXu=UP7ed_jjt67ypY9DZeWWfm?rtvZ^O&q6J^ z>9yBh%$*rDbcx-Cbsd?{j56s#|Nrb$9@m6*OKf(EGtnbxk9SGhK5aag zcNy@jeSqYFzjsZc&{&v z{RdYcC-E8&U_V^Vdiu0G;yUHU|51>H>Q9~0&9om!Blfp0W%bRXM6!^z3qJ34YO_Dm zc`l-U{;vV~lnYp&f7oA&a~9PmcNcTBkK^b3hPt?mgjRidYkLF}P}7gVOy$D_2>;+g z8_ipayvfeBhiSS{7NvQ7A^jjyTGk6)+>3pJgAu!E@$+wrD&WvP)PZU>T+EcQSobv!W?=d;0y)mPJ`8-Zr!C{IFE8<+-&P4G^Wu&>B>V5f0C zzW;trwU+z{F-hYx#u2f|Uy8Bd(TY3b74E)SR{b96J9{p$cGV#N&38cpN4n6GXVN_- zz80tD5y$EE8??(CxPNc5XRz@S_Ce(d z-CEy==QV_sf1va$0}XDnM9C1Y|H;ZuE}9X*?y4ZoX|F0sWH?DPz=d@I%b~gyMl}$5 zPe65Vaw~8LnRG#hY##DX!$hI1C*C-l!bWbn7b z0+rJX)zuzIYiT+AczP6EpPX4a>pTT{lS13BVtsC{ja#Eo^}qAkt@r-Ftv+2lt`8?H zf*O-VR|_m)PG{`eGy(SqS$|kbljdOLkNW|0c|70LPv)0P2G#|eDp>d8`oH+*fVbVT z8DKSE`q6;r>M}7lUJIkb`RndoET^hS;BY8{VmlGnan8)evGgXQ&3rFWKgxQfx|q1r~4c5T-++Eo4fXtA>?!Q1%*#@ zz^TH(>iTOC*D-=-)#GsfKNa`lFm*m+mUd~FdynUp=xz?n_+c)Iom0}2TI^e3e>$Wf zH-gMoX0%7_hmeDu-`oT~k5s?Fkey=_VxLT_SnGhZ@A3VDlj7I$ z7oVo!fUrAbN!}2Q(Cd6!zW5LkszsEn5=e;jjz>uk>xqPyP6oN{nM2~Am?!vR7f^}( zWbf`ri-@#(QDnXa_x*C8i~XsbL3Squzct2>q3{)b3m#Gz(r9vHvhIvPO%EA~J(ZPU zht^`2_vWIK#IF|b?c0$JvfX=fy%#YDIqZnVIwfPx3=PZjZj|pEu6Y+)kexeIFYlKq zWbwT3G*eY8sIr?7drdlU{`>V=uto`ZH!KXinX-n}UBApfQhOmgF~bW@o*9sM!f-OU zwh5{}lsf;)>4W{!4maHdNWlK>+$W!dn0F!&svADp2=$e-dB5VYp75GQc8F~+7(V;V z`e>>eC>T@p1a<}@|6N?_KX?j}3qkC!BlZ(X^ZF19USS_b<=(oP8l0C9ZrHt1z&^l= zj4KtldXP^~V0=L-=EKE5xY@^Ci^#Vlg}+ZGB7ZJP>5h~{L@#|**a!uHz|KC&P^&87 zdYjx|P3i#GKx-8bRuZ&4@tswz=>)~w$%_<*jj;K9$J~{})gbWLgnvn@7V>$&oM*XS z22K6XW4F)bf!(woo-AJusg(;CdbU?UmWQp>i>@*duKVQ5hWAUw8up2M;aDfcyEZ~; z*90Y24qx3SS|LkHj8Vs(2y#!w#D)HcTXE<8=9$)Z-2YHS>?w^9d99@8DCVD-6-z0a zA8Lm0r9<@L@4G=oa{EU4lTnazkLF#&b0NjVbsgHz|J(onpXdD6dHX?CSXGdy=`k89|s(`2k)zQ&?QO@Wq9C&`8Lkaw0;i~QTWD$AM(b!nMGgcw9O&3`bU7r znr;~JM%MEPVBI3&%SkcX&~{{}{uP`ea6c^XOK$+qcTLr7ZPiqUK}RJfW;f>3XFsm0 zFSNn)Y0!%c$JV-mJT6>zP8s(%K1U?7&-S2AelOX=hZCrmrM3R1{TzCEiht6oVgZ@9 z49H!%v4{w?$BLf>&7-?-Z!GoQolA*z+2FN?WK!Cf0!xK3f5$4B~QhN&V@-P6 z1#ojI`-34R_WjSkT-$yV>v%8JrFR+T0hvabf8cu%Tb#~7E~AZSFh&m?>w zvv+9XUh2a2(!e!IOCpkHy@6-MVc#N=JZn;i{fWkoi-h(ZL}ocu!m;R&=T7VfdQ<@p zwFP$BcuqwBc(VXwWEUu^6_4sCW4-sid&Z_0Fz+Bkg-`5C4V0Ww{rr;;$!JROfI86dbedK7^-2$NDm-qL666Sq;JRH&Lg*iYiX6!O5EpR%=xQ5~q z5zd{pxO~2)9jMkEX?1Zwr2Mo+3wK;Ml&O#|hG9O59k&j5t6~dioX)iV5>XFy_q;Cs z?Zmt`jtr$nm45tO{s_GDz;hbRemyDMg?Yw*+t)0o|B3&t`2KJ4zV-coTmSR_Gd`1- zMwNhdLR_QW@or?0mOkux{r3#W>H8gjdw2%Vj~Knbi1q#!F@blbdRsw5!YRhw62LAe zZQ-zOUl0@=l-+aZRN>I8clEze36~C$avI z_h!XuGjlxe^ZY@lN1t&0sj=hMvKn!XK9t|~r!S!$M z;TNYDT0nm|j2Mva2Lg`4fhSjL5U)|oJR8;-2)j^H{Zwd24g`JbvcyK@(rp^lxSoM5 z77ps4S|h;NoMD`?K_56d*~C)j;rbs9=_^%S5$InWdy@My6bM<(61Egs;xQe#%{*G;n#^Y~YCI(<3v!6Yf_wkqUb5VOH?ya>;Y z$_Wi#XLI{?7`gcLO*R|V{<%MS5-t6(zXR>ewJ71+FrDBmgB1WRh(x1?g5 z!BFmLkEAW;ujagVT<69-5@uo*aRmDbew^scWFtWoi;51V6V?TAVO-J(&Ntrf|D&Z@ z53HV7jRL8OkdbtWA^$0UJ_*Nj9v&Wt{C?NCu{+rRBSgC83;$jJv-O_;=eqU2|L4Eo z`n>f$@y5b`*y|K;VjnP#)Va%Ti8F9}y@T+cw&*XCE5>y3NLZHsm-TQRN_}scxy!s0*Utp_eQxw3?{8lNh#BGrSkkg2D+GQMxWHkP@iUX5a?AF(=p4no2iPw28; z>)?WNlI_Lw{cw@F($mv#9I{%Ud(?)^fnL8bIgfG<-uw#a?pK?Idw)hgeejuuThXWA z*kIk?YMj%h+dKZLn{55O37loF@*HF+?)WgzwSN$nPS1IIOIBcg*S;OeQY9!@u~w9E zssj=0niYv-1Bfp~|GduQ5j1|b-oy0U26zZ!9W7_oEM+~q!keYC@xzqE^+k^j|` z9QT5Q5Yh44r1g##NbvNkDfSzN&{&PryAF;5*)!%xpd<-+&C*}kUCTzol3!nDTlXS` z#ivh;A50+g5fj(+qjN}dsUp4MEWZC=4k|P*UqBYoRN7HQGS*K&-Sg+|H1b^2yCZEh zio98+uY{>~AaYyLSO3rEKz~A1z>m}j;oDX~P$&h=BKJ!<=-__6TkB34vqoe_)Yz47 zjq{XOY_=-N1jH`h^{05!3@PqV_p6d@grEkLNsIj_RJ&AZOpdifev?=2;l0J6`2ET& zrAa?XwWfb}SUv!xb4V8rP8ET8UXg@A6ZW?%nU17>?*+#baQQ3Fvv*p18EFS&PG9*_ zI3Lyp(*0Z(FZtdL%c;MgJ)Ir~zNff_9rrDh|JIzA&b=q4NW_e!Nx;_ufeMscAg5#%pGZmdVx3-zgHdD7sF+s z(o+vHnPuzGe^vo=apoh3zjdHGvn%LO71s5Ok1&7jxfm73ddU zeJ#C!`+zqcHRbu$X?7M~}>P(LM!@!^|V zBpN&!`i?IIh^Jm|;*9VXxt!&TB^&aH%#YX+J zLI;xmFmo_1vliKHFZ7ihk42L|sPb3uRDs>QFODoeJy6HKo>{++=Tiq>y3-yy3!Ayb z>rYrAGE&`rVO_cu%}(?l7)^u9B;*$@G1Y&|$CP>oo?= z>E@rxZF|5!;0M{-G!AXn&W)Wu+>LN4C9p>a`wA#a*3UCy{SQ*M*)K4U_=;tW#Y5+D zKWy|(*~dA==jwmAM0pC)?eOsa+c}K5UR}^(f6|KdWyScv2<1Yww_ZDqW-nZ%BCe_6 z`NiR;cGUhTA9&9wiXUc)Lf*SH-wzTqkT+Yl+XIF`#9pp_srzdQ3@zFU>o|5mAC;)u zbvy^8{>}Akb5A-!E(&SLZQ#E5AKQC`^RXaou%dizPYjrCcfCh-w+x8-D=vEK?eLr6 z^EPhBFkGgRej#+JAM0$=k}gMK{_njm1>vbi@HeGy}s9~Ov)d)+R?(XK=nBVhwOeWQ_9ab4q1CCAQg0QvQBO|pO)K9y*U^ZWl z@~6Bh+uHL`_}*{f{3p#(YScAJ3#nYxf52*HhOr#;&0dyEh-4uPrm186q%5eNe$c%A zQ!7+G7Z(vLYlqN@-wX`R9bm^dsd*=^0W6#Zm3OFDfp(lSXC0f6cvu55UB^sbUTXm*ww9fh=kUJ5^!dR(?M{f% zW6NVu>4FfBv#4P=30#rYVZFf~{P(cm-s#j0O)&=Ayx4ER>&a`;mfZ|3->Y_?XKRNe zNnvIR_?!!Kc%`;Ca17W&UY_f_JOlRv9t6$A|5Nwdis!9$E1v&z{cqo&r=pNlv%_&O zADpjKF%OG%%#yAyn1{i`#dOq&4ADwmrzJJOQp;G|c_VLf-;KV>P~9uh|c zpYOSf4=HfJjLURs{{CC=Kd$_f&Z-1WDt)Mk?QBNu3$w+$y0L#d^!QlZBb?_uo(=mL zI*5$7Ra_8v8$#j+92;j2WB*CTEjjb%UKFVnQ=YT06D4-IM@pL1qZlT+|Es+(kEZf_ z`&LrXphP64qB1rSQo5By358NJeo3W~G82UiDH)11m_-?iLZ*uhiOlnS4(A-laSoyo zsrULlYn|1z`mXQ$d!F}wpY{IJUwhs2eeZkkYwvwspX>AaQOI~fbX|4oIdQTKiv3OI zrxta<6I%=AU?mFVO$miiehvZNZjFiI5R@3wRv93lp{(fb)&5C;pC_^Ff!+UGhZH`5^MQa6xK`g1k@1rQ{%h+!c6o@hM~ts}M{^H6{6S)FHsd z<hvYO$8}!v&yHry}(~_-|@C%0vHzg-|eF{5DkJw7loV21o=qG#P*T_f~$J!aT?zc zVdgjHdIfMjAX}H2!NnldN^Lndj8loA@tY-=UiA`&J!xCif@=sB+xrIR$yuOSL)R`g z>V`p^nnmVUND$KWteRs-J-BMvwr*h*L5YRSgLz5FODVRfXmJwqfNMD?$T`;nqqT9A zwX73POK6>eG%{4X2^5#&c*XPc+oR$+$j__TQdHyp7BmelY|mz7L*`u4#7toYn9WZp zSy9*qmStbs?3F37ldt8B+aU^we%DQE4eo(Gop~w^<{j{q=d{@5xh_!o5@E3xd3iqu z)%(vyo=XnhkvP`+W}sPH3_U(m3w#0`N9-Kxz|35f->(#PnD z)=Rd*8*yQgs~S3Y_ueFU20I_Ix?_P!dgyn2Zw}EV;J?B}u$16$T~_(1HlK*xxzfJy zW)#fy+)Wp0LVu)r$<3ndcF;NfYG=AY8;DCSYR^1S138z+b|~H}hqSWHz3cf9_v~Oq zpOveGiaQmvu0ds>H1p18Iu3ah?|=KNyA}N_E}KRfTQF|F^OmapcjR#xSrBQ5c%L9o z3(jfj76^QGphY0R4Js$Ekbe}kgZAl%%4xyfU`;u?dO7kAm`e?09JfSXA=4#$d`p@k z@XUCxhe#VZ?izDR?nWIuRns`)IRzxVJZ3dt;rJ`@`t8EDzsI@Q?U~*0e{bjPZT4}7 zwh?zS<`23~3r0vpv7qx>Q(fbl5s++d=k0AD0pT0nSH_H4P~ExVP1DXHP%_og>Dhw) zD}5;8cF+JAMQGXYHyr?&pYrq7=^l&|RP&UUBM#~;%A+_vCOCFDOVHn9{QigIc#nQF z;kh&G{+)sjBA)xyhZLJ0LT)-!!vAeA5jH?Sek`kpD1p;M2C<#QMUZcgRmFJWjRm2u zR@H>htAvt*z_)~Cmwr-DTp_#;OW6HEpamQ)|I**Pj0}oiP2B4Iba?IiackLyK{)v_ zL1HwG0oLN*i@rgKYM#eib?h*ok0lWX({|U*$yhAoGj9bEU5l^z|QM!84+)G z)~u(cjfgxQ?R#B>Oaz@exp=U>pWs;YHoMf5La5jYP0g3>$GF>%H;G>nA4s{mbqsN7 z){;g7($5MBi{ppB`MxTKONwt&gA2Prt~mYE7MlU+D|p(~)7b~E`d$gymKgssN;Bw| zd`~p&PTEH4=^&I$wQN39`w0WxYGaqo0m526CFpZ49r=actBTwnAS8oFJY?jmgOT`urLA(un1e_QYb^Ky=#a?N@U6K~TVWh&%CwblCI z_r87vQ*2U4UO0!~SyI2pMl*wm&q{utB=HhvFZERBqF!Rntx(=eiQOPW6*BR;(hX^e zU40=-o4{vUj&I~g4e(l+2q;`Z{!;Vl3AZJP|KX69KTXevi$x`|+kU=>hB+_W*6gYV z$w$$#J9pJWfXFmCgWLceu3CZpGtKaLj(XgTS}V+XT({y;MjNbm=H0`&y&cHqBOl&U zIzS=Es?Eo-9gc*koqofO`p>np3kP;LgC;9}`oNqvNLq8K$qn)C)%TkGZ;#U;ku3J& z{IVek9^|^Hf&OlGKW4Y%e=6DMvCGNk`28C3Be3xyeZvQ@U-Jm|@r9qdy51umIC$Aq ze5TS6e6|b?y|WDG(Vm5+_u>%;JTjL`J4b;rnqAxSQp{s9x&Nf|uMRN&u6TXl2dt-v z177tdcM0C_#_yjsloJL4=LX-EAedeEOP;TA#eCCDoKe$L&~ zOE7(e--TW6A}V%$)N4G^N^pca1U55k5vS%Ya2aw5{;*h+okT9A3FbY`yx$Bui9tG;ZpOepgx)SJZhzvf*3yy`3_H0+xC zPDjB<4z^AG`(1hZKy_|#i(n-Q;v^oJWg;IH|4Im*cl-r#j7fhl zYpx~u3o14=3m`tCpf$nqXg{&=^?dTXG77Qq#%)$ZDe??4FW>%*yonQ?a7O(;;$XRR zLfkWRh@3CWj=Jux0x#D-QyGE`6ooQJ_a`)neVI~s{V)ZtTyL)w79xTB&rt7eQ#k%D zb_%KQZUdL7WMhYpE?^AKwcq}%55n({+w~z&(vb<8_w)615RSiVSdDs}^-EsYi$5pB z#*|Bxx8t~8S}cw6cQPFD6ExqO&=0NxTh<-giFyq7Yr2zAm*JtED@~Wx4_pf?CzyU@ zxKZX3Ub8CIDt|q?samV@e&3zD+ZBS)s z)CUghKvQ>FGw6BkJ8C_Q{94)yUnUxoAyPbMdh>K9@Tea2ZD&*fZ{j!QK(7)AdP1-7 zh{SwB=FXi;I!_?y(50h&3yNT-D>>V#unF!I$452dcXX+BpMLa$43b?57sA?bUi*{f z&(qid+XF(!KjS*#v*^?GNsMcgi)J{U7owi^*2x+f{v6={ znLQ~U?NDoMv#b#Fb|+k`N(xPTU>RxFV=I#kx9^Zney&12#AEu=8}9Z(3wfzVVP*%o zZLL`E|E&!;)zn(TH@AZF*8^8G9=1b|j_e#i8v2X4ZUnu_roonD8fDgAOvqJn_gv@m z_kPRnzwA;sk0z(3G6If^*L%Jg`S<6u?FajMJ#}rtmnoPhxT?sO;)%TAnsZ|WvQe)T z9F<}V=)inZy4pyjA0n3@d%U`<6$*Ax%03yRKji2;fBhD3;-a4qQ_-W0=n5bBP=@|< zmHJIZEbK?t^tU&*a3b!14_|xX57ftP>A7TD(o66rIPBYKgFFDwIY0ToDMsi^bq*+%yLxT6tDoONz_n1?}0$&!LQBb5YK4|CIH{SLx2ht|=6{ogEh-e{j8 zuFn&y8V#ke|L?G@vXP_^uU!^T#9Sm3pG6!QO?TS~HSy!yC`&5|*LiB8F2>P7)BJJZ z8uGib44%)qv5f@xhq(8>NB(rl-7+8xxP z39RgB3#Wk2+SE&RI~cI;^?|c)oiy;@|Ajg=OM=wbS9h=6(g|f`_c?1lF%EGvrF4~Z zFN`$~c5SQ0coR=y+U!y$v_wdK!rF z%h7gC-RgbHip<_wmHL+(6 zP~8;i^L9=RxQ2}6@gm=!W|Z&u;qg)ke;sVD`n(?FR+t)m z>;pr=7Uib5sN-7p+O6*l>VjKsI+*im03-_9)%n*900)gw*`q~;y_fW4a(Vkf>f;4> z4b)4c|B%XAP9{NQQvXE-^bMwhu-t=|)+A1pC6QPy&p(|`QK)j~Wltb<+)Tz1})!C2w*go6d^bFj`_M9ZxAPpE7 zoLt82rD3~1wID-ae-j~mMoePLw33*(FWS9%@C70F;k=CNOaYV~KK@|zEKK79M4*=b-9!e1W&@j{qu1g zl00-we8d>@UNr+-YU}zzFMca=%7_NrKV48|HBe!ETQy6-tP}L?PAwb$k^>gY-v5>G zyaerNH?h6Fkubh8-y`6AD^bC(9%Z$)m5_5Cm0B2!{AeS0WX{h35qYT2K?->;BKjQ+ z-4$9PE}_ARZ@dpIHz_4*=96Jzqv*V?n|pz#q5h!tNjvZ^_s>|hw*?xrPpoiqXoa#> zUp85|b%Siuhs7&}kx%&wd0?ME14Ne9bcQ5TVZD+QIiyb|PQ<-~r=oNA$@)2y;^0nZ-E|;eTqz>5hD5=@6o(x_x{`J%!REP-p zNa94D9t&M*Ayd=?43D$S)>lU!>gl~|rt#Scv`ReT++^MZw%=FoVwu;1&+!yx3(n^dUo5Qp#Jh-Sy%=Hhh^qnl zSvum)`7r;-)9l&tv&jF|6&KXCvxhJ=-gvGxwVUwd=69U)t%H!9x!5{yWeX9%Ir3fa zYmB3rE!#H-^Ud`*F6`QMDi#{l8pRd(FfOw!K>p{K7O?V5bX28MK=H@<%D|3)t|!|A zifs$3hk)nH19z(90Gv>A%C=LY!8z*@ui0bQ(_fv}jpx1w8x}U)DSZ_u6Bfxce zk*({i{+C2f$~V%rHsm9ruHRy=-%614#eQB~-APz<`sMX%bQ0E2b?)pjZX zgw-m3xlIsl`RVP3ur^>!P`{eilVIr@aWV<($LQjBo(#;B5DAvO;#Y?{d9NPFBp~jh z;FRa!Bjn9GeCy;%zZK1}jv4Z`A`bcL;-7t4-b%ta@FEk7ZFCsEx9zDJ##Q+HEInRh zeub)O_2%$56tM7KSgz(t0oC_ESPRY~pS_1%?*p8tvfJ(Tmp!uiVHUgwaL1YZC8v>q*w4(WM|kM7|f1U*-+W3)G@tHt@Q)bTkD zZYZ^XeB;><-GQxeTbT@%SM`qQ9VA26Rxy{CN@$0eDL+ZM7sN)6G~33MLB(_|9h=0b!K@F!Ih&mmYmi_e4Zbc1b%Hq z;#sNqDARg^M?Z0DzQ;R4Y4y!xgFMAh-7m4YD!LtFV)Y|V4K=|+>!uf9C($3(;wBsA z(FhH<$$0@y&5$m^wbSG$`k^Q5Paj!`erAr%fnV2lgXI^0TZusQKhI4boa5FCcS#D{ zFV!_eivNPO(iY9otJdZ{$khVNH3RITn$_3kir)Ewj8AP>e zg>6|P@>Ck{TeUU{d9v1J`0XAkM4g}f#P9BY_#a;#rW^)H{>!&o5^>{UIu@adVdYR8 zcf_k1^%2Yt$R)r7%Z8y;Ls0rZWseJ`G zWVjxqFVavpbX!6i4(A`vj0-uIN$j=LI<4l2l%c zZJdOtFMly&!H(h! zxt>3n;56xRB4m0H$X@Zu7u^_8V)jJ-hX)gCLwjXbhBHC_irJvXDHfdTyFh)TJ_2J- zfw4PK$11=2W`4!oK^T_T3C}~G>cH)R!a4H>VYR2bs?Krj*ZPU6bCT#F%lmlFZLt9e zx81N(V=Em9(eE3BHx0p+lxgC&8WTP~;iVk59|HH<(npaZ$QK=vAQ9q30pna(s!22j z9?Kmm5xw0H;bw3B(?-Z39w;fH_XTzD$iCZLawv#nR_HNpp+Up8wF*hcFpp%Oz3767 zVYpits{%1Bu(R71xDe}wna|;35{dkIr|0@^LHrK8U;U0ptA3xcKPr3#INzNYPhQA^ z!M_@wNgYDF;1l(c&x8RfjZO(U+ALsS$G#7{-~L|y-tXDR|4;Y#TJ~}iIG(iBWySc( zApfUwl6A042Uzs2*jCP22JwE4gVOdf1h047$M1?C32Wlbg)33b#OLce25AzVhzI=8 zH@&ErIA8hbh1+4o&#qW6&!V`i$&BGVwk|IF-riOad)?`m z@SqjK>}tg;xvPO=Vb$&GZE0YA-P!ojZp?QbtJJ!D^gW^dLu8dXUnybjD|J*a4*5yt z^X+%8enc=Q>-~)zUjkX2&ut?;6ZAY@e#}iR1kX48)3JFUp_N0}LjiFgV{1Iu1-o}a z_uG^sz7l;ff7{CxzWZcQ)5zZPEDYmai}$W5dRqw0V?mk8ED-V)xsqTgdDEfIf`8Z2ddtZsdQ0e!8#vM{*fG&~c`c zv$MGwtQ4&6-FMaltuxgB*_(PehG^!SqOEYHscfr~NG}{Yu07QG7a5e!+>-IVhd71L zgW{YA2jKi}sU5wGsPK`;HGC8DW~^)Y`I*Kb!81x{jJHZZluT{k`>A#SaAnv`kfgGJNPC4k*YA=!#>ECfA!^t@b!G@oygF@3?)WudDO>0i z7_xE%)>xl2c#OJPZn;&y-VD^)+N!w@8kn$Tc>gI;#L=<-TDdYSa2Rf%@FTs%dBPTN z%X{2kSa2|AxxE4>u16OJb{W4IhMezxf~@@`uxUJ_G>-`!{V#hA;ns)%F#*mSR@s0;GeF0N2YtOE|g`i{~w8Nig! z>oG3RCRFJ0(YmoU1lMEcU_~kFxfPct(^qyA3Zq9_^*u<0_)o_686z?wdX^(mO^QrZ zOk5Axy#(`716#Kpuk0iuCrXH3%vVpoHz6ritp4?FUn&{OFX7py#qy-7wq7BDaZ6;&>SKT3(Ph_yOHF~q< zY#*2;XURq<^+ST6j)8q44NlZeC~rT@f~chSuJ89pAdA;1PUk%d(mu9HCu)2IvP*UC zMDA`7eR<>Xr#Q@~lX=glP+mrmPtu|i9%5eVV*%IEIq&d$B$_j%qoH7{)OydVYS?Z_ zQ}OfchTC%{Tz0heLC8^)@oo2MAbPxed#5?BM=mp+w2_A?Z|0Ria0&%#N5?MT9qa-n zC&r3rzKtNvYF2O#M7&A4?{fJ|6~OZfGD~h&L+>729T;ea?xGVW{dY*vF}!;2i?0}u z^%dDzgYBiUS=jrMC=-&u2c^c~dcI)unaUb}{9jX^$wxjnLrz+#%CUP*u)cYG`A&s4 zIJ1}4yAR{dlsT`AE8Hk>&D`nR?#uyjNYfZ=d`ibWkk|JQUmbul6R*d|e5p|RRz%l~ zn+gp%E!JKSs9;z8?G*FG04VQ_qtsZU&PvjCt5M9eqz60f;a|W4$CE!?xt&-*@|`@h z;}8o3uL~c4b{_fZmwg%W=|P_P@2h{jYhZv2x#D}^;1GzZ8yi*?GGHvZThQ<<@=uSc z?Z3Nv5IQ`{EK^%4P`F$pr~W(Y@|}FMZ_0}dIx;_`hU2gvTn?0-UQB{-<3kZ^Rmm_F zrPg?(pdSvZZi!VmIsj|5nrgpFqCb+6BS%VM07HDl=U+UqzxN0BHoMF!F;e$L9-Zv| zPn_G{qF&yU%kLH-9;Wj{{13(oCgk0@PkD~`QuesKQF-+Ec3iiwbSc%A!|&zw%)6F& zg$a9#?>vdcI6A%DGc&snbt>khZ#uLS`R6nHLpGKD!mG3I!yez+{gS=SelK>(KF;1| zm;crM?DPJqWZ#edd_Lba*G$Z>7xgPXd?>vgIHunmExKET`eJQ49vsNS7VRyt;88Wf zoAu!0k~hfvTYTZ^1v&KZ#iyGc2qh5(k)JmzoFfzQR<)X!#L0wbT2XCjMlbQ1Pt}p5 z1@qWH?Y7wUvXP*<8DxAND$;5eg0Xp+tsSnv&Q)Ap zPD=jwc9H!Z?D~zr;UK>p*J+X^y-SVARJe7ZVMR6C5x&rF)*Cm(aa4WXBU6HT-^KAS zmpFwGt`0jayKW)R?0)WCs4PU@iC77d0P zMIh!qOD)=6h&od~m%?3XF{u0W9~JehK6PV8+c`&BklwB-q2-SJP}A$`=VdYAb5OTK zeiN>*51Fp*out4E>3c1FiBwR#CU*AaO&Uy1oLy9c{#BnA*?lvibO@=Oh>1dexn!zO z@CfR!%;%1DX^Eggks$Bw6?7_m>UI#AC`12)P)$aL3=^buZ|3W=8da^#EG=`H!Jb&>&m zVFwdya(?wocK=|vBX-H&|9@Nlzq;S=_xXMM_xJpF$Jys^pFF&;;SvqvJNgq1HX?u0 z?(37Z15GetGi_n}Fb{&xoH}FY{hEkx+_kLt_$Oj!b+21aa|@9?b-PA*R~L~*o`2&M z>i%X8Nv+Lm!g!Xlk42;}`pr)kyBa(85aN$kKafvDp1|?>HBK#vhg+lZ;lyjiaq*n~ z@hB()R9dtSZHcIc`8|FL@_HRG6;z{Mvk>)2a!ser=P;qydGXJoNERgQcFcT#bQtt& zM*G+QZ{nqQkCpa?97i0~h3V&C=Q1F~H&RLUCHl)jPA#&k3DWAX=y2K>L(qPKv(rjJ zKwg^oSgk6NaLo@HUlt!nkT=*KTU~+Uy}FXQ+FxZ5MQUpsI*ECuE$?USGJ7G(nfp$$ zFX|+d{hnwm^n;G+jjRotIDhp|iU?pZ;PUn8{QN-{6x9#c+}(xz&SiTn`E;qE9o6O~ z!c7L1Nzlk?>Viv-`bu^c=pU#%{6_yE<`HuztLdZNh+28RCz^`}W;-rwPU!!_g>hFD zYpB<-ARMk*mT6-g-zOuqc55F9jd;6o-RK6-d2$?=13Dqwz_;%mFX}f?wrETso+InR zH^S}eAOxn(2-JxV0k!2I(ceFSycDZ4_hY`*?s=_muesB})z|Lhg-JT-UTqM(h54u* zmWlIjTMWbUw;~4>D@P!Mf7@ae!x8Ws-n-UOodKP07c~Yh4!}KmUh3T50d!Ny>p3rC z!1~d*`?;3;vTpM5{}@f^zIbE?CG5F)!KjPGg} zkkuziU8~A5j?3ZUe&s1)YZD)F;8+DAS$FjYWfb{*Uj3}zt&VYYzs2WM$58*fz&-kk zA;$f-$*T6;>mh;;ITwdEbP}#{%chhlEkuCW%7)A&%tw!keV`bgM@Uji=KL7S2G6^B zR&#e?{K+6oYT^d2^Ul_>{)JD>5j$`E=nDh#tE78h9Ixt? zZ(KFS`em-Uyp1fT6YJeIjeLh%gH8=ft>Q8Wy#JGE9k#JmWF`dc(Hp?>gBTHoT&>V(gm z-qj^gTY#HfzIo+Z#Hm>he%LD81)0PC$*I!)Q1o^~`|WB5gh#9r~ZeM*#R0@?44EhrVYTJWub>{a*)UQua0i#?Mx`)oz1wZ z!i4Rjx&bW2r3b57mV||}AT^w1Jefd;hmp_ zI{*>P+!gyfsZc!?urNku06N?Q)LtMyR9VjUo_#(8bc-qtk4_I^yyRoYmm9zQ>+JT; z?*HuVOB=$4j$@wb_d|N+qWMf{d=TJ6zs3Yh@0C8=xBUJ3-}{Tq z{(QOFbYFYK)!1};8kb}+!AjUonS^*XwO3=}>yNVF$m=1&mpI=F9W$EK{r;CdV874* zuhP}}rl0mBIy9+XeIOc&?dh;kPKR(S2=5Dve$#<|`iq6~4z;mF&WNm|rN{@OLA~$b z17n=WOBHqoVf>nxo+6Y#(m`;ExJ2yA>?Dl$HdcK#K;F*;o-@ytF}^)_t;LW~1CdPT z+Fle?PE5q+<=ibo9tVbu(jtRGFv!f8Vu&|F!d%|kmknLeT2G|PEv7OMO%C{q=dEx~xY+Rga~{rPJ^ z?XKIa%&rp9d*_peZYpS)U37Ld6TiDAO_9a*5nQYHC9B_PgusBV$vD{_Fbqvry*7>O znL(`FF-oZQ-T$LTOX-KtRQA>s&id`;&a-uPzZhG~fY}BO z^ZIv8nEzCHsq^hYa9AGo&Txi?ag(0W+AoNgX+0rx^dsV5^l7Y;ar6hAU&VhTdJt|V z^I!0n#`s(A=!LIekdOMu!;ELmbTHn&DQn{}4dOVP^wx9^faLw2yV6UBK==CEh0%wG zvAz}=Y<`My2>QPBu};7G7rWkmZy)Uc_SwTpZS$L7>!N=;pM8C*7yr80(!Z}C*!N>! z&o0^fe=phh`+YzAx_>I!=d-V8pZ8DO?EU{#TB~Gh81mC$=hfr$gVIT$&bdv{WnBZb zGEZ0cd_}(Lp<@2t>J-AqXsUMfD&l`fPt`Q~AdZjuxYnE#`R8q7+vDSUjBYP`?{3(1}U}^46re4yZaY00Qw^G=U)in zc(R)>Pv&wToDR0OIfi)l?~UAfTQ~GV+fh?yo+TM#%uQ7IHL1Y0r@5|FhXFyUZ94Mk z#}$9s(`R>N1n#ApW^lC(gDurvP30rvGwl_F-KoQ%+v+RRdw+VVXNo&$48}(du&;uT<*Y)jOX=f*8v1 zUCz@q5Su$GB3t!~FVdX%T1UePCREnDdTTo%-+S=M2EEOLAeic6eZ>j=ZREKJZ7hGa z(-ZoK%MK!5BE``wT~uQLJQJ*|8_@s2wYj-@W(N&IvSlHy7TsUIu$s62sx=`VgC1#mj#29R0!H_VmU7c*9S8v8{1I_ z#$531cRI!gaOiNA#`xUD(VJ_>(H^Z2*F1ewk_lVqZ?;MgL;tJhTTWsO{Xg5j_vs)` z+p$^q4T?4R*UF5*__LVxr$>juc%`+$0pySUy&duD zmtF3eX2E>kNanj$$VcHZk9ID52*eTonvZ-MbPm#oW)TMPg!1-njzC=Rx(3NzC4axq z@Av(uZFal;r{n*$|G#@(N5!{9^Ob!-xX|nlt;Bd!O`mnZf?BY?kiGTui?>jsXiZ`~ zN+5!~p5C0;jQYRyNZb9FJ`sG2R_;=3sw1j5i=J&q+}e2F>(&CTIzlN)-eZbZLFm!A zNG>6{gd}adc)4K=>PV?rElsNer<04Ow^S{;`cLa90YxYL4?|RYkf=)!IRDcTPz%#`$j1oo@*-5=4)K!L_XNv}=kim_79T-crsHm=OMJ<@;dF?{2v^#k~mECDDb^ z;laQBA?)X}%f-B>e1yt|!BXTcQ%iIhC`Z3NK79S}|AGCz?DC)L=lA_9%b!-4&yK+E zTQ#a``~KyJZ0|axF${scXSVG^{XE+buAIo253`x^=Tl6Pe||~EX-?`t*BAS}|9_V| z8$N4HJ?sbfThjB2ySqTaQtOtd6RuCkOxA9yLcZC(1_Ho}hah9$0~S|b5b^a6>M<9K z2*Zz49~>W+5!L)Y!(28Wh-7o|))xk41oQq^(}xdHmwxZrBMj*TLerl^{bdE}w-%?k z3z=17J{V`veco1>)zZ1SvVjaWK1YmY;s<~?`}y`TrC;^M{y)685|w=9aoyxYa?mZM zLwIA_b|DYyr&bT#Dv4#y z){@}5zFMV;1qHT@Uf{mEh5><>o{FaWF(IofZ}+TKE$_L31}@ty(vo9G*zUA1fvT@?aixDKjG4o zh6Cn(;30TRF{u-u=jHk;YKjI;E=LOHm{K6A@hIW#Kn8gZ+PVr?5~P0*SfPRIl8;-T zjF(IH!{ZIU1OAeIu;I~z108$Gkh{!7K+%m1)s_~XZxJ7tVi|RF1=^2=B_c#>Ivo^e zH$Q6m%m9wAfwPGtbhxWKeMf&C6~`ZHfY?5a$Lq$}t0$taQf|QWDG9VQ6%GdZ0f_7W z*}Fy(`Qh|-TFuXFW5DnXdHxVwIA+f?>W_~NfE6oEHHI5?VrH*-a~o2@ zuTg8ws~`$^o`3U6vx5SDmgj%m#`s1SZOx9c)ikJ)ci2*yiS_pUMw~6?{rId-+5bMg z2L`pwiL;Zvz*Km6Rce|HR=%@t=F9rP?A^G3vJ(lo_H(8@^e2JmOpCrN;wO*jU2U7T zro!TR^q8JL#MMf>+KN}wfZsklu&JI34;17AWu;Lcc7BWZD6=1WzeQ9n7odXG`%@Z& z$rz7qCg*%((!t2@u&1M|H@P-7&VRC1CD0Xef0 zqKFe<@{d@i9vgzD*TGQ)^2=Eqmvtzz!uVi_(W309h@TWWBybD$w;WO=AAONz!NYSa zlJ{Z$UV5ctWhLVHFEhAqt;zi5KM#7cL*M3K8d)*Hdk*P$FFTdusisGcv<|j;W%&-P=Y|q)`x~+GfQ-M>) z`SCpCe%R%|?0Eia#BEmDk;SUXU{Y-QT3DeE_C>vWT{B6BO&X~GquB%8_XLkJkeBB0 zE>(L|)MJ)Ri7*OTfH<7`3Z6@=F`q_wlvum78Prd23TtR<0lLK$|D3E2urdt#pp%X3 zfE5vP)q`XR<>=o2^J_aKciRVDif%ysxz^#yA=J6NKf|LqTLt17Zv0!2AHE^3{D!Vl zBbd+XY?|HL4wcHO!;3HX!rlJsQ&agQ5RbgHQb@lG%#>E3gi$NxspZ^zNNdvi{G?}Yj5(@)PPwSlVmi^84BE#SRG*NscR6{L1F!iPNR`I#Y@x9NQKec=ZRG#)Q&E%HY`7K%?$ z!6$txfR?tX%UjfG@GDDTYLQ{h*6{Q-T*zm`Xz7^Fro;6O9MeER98e9%0 z7u`Z!hSlg>qiM`9&wlzr zf}7H}1F}>QhkfPV1%pzIe=fGm8ZV&0#DGdt1I9y{+vMGy_R?T>;_Ld`{DWY)iI|&+ zJXi^`+PjxOp@GV?{($d@YaY%h&)OYFfmQm!QgVtE80X;VQ1wQ;hb-4F7+2QZ=2*YZ z7uPfQ-dAZgF~G8R_UD3mzvhSRe#;)m*!{YEgGJ&G?hy#DP;X~!#r*t595=+P|85^N zQ>7Qq8V`eP=@Xvw!c4HeBbN3J*Lm~wt!?^T=@7Z_1=9olFZ{!^CYP@df=^nh+;-U^ zDA>@vwknkgqGmhYoHbd{dS~fgJKka7$ubf>vEbKp*zfl-lZSre8s;-q#Ie+0qh6T8 z6Y69w#$)HC488kC2HKtroHmF1WI>2ts1)Pph#Wa<=%-VV4aBA zJ~q+{Ht(ip6?Hox(Ahv{_+BS)4l9H{U*83OL5j{1m`7veK$NdDA_K2l3uWLL4Jy~G zEvY*6Yd+5QTjcIsofFN2@d(Xk>=ie~j-nNbc;quz7j9MzJ3SXk6 zWkUh?$b(}CNEDE{FC?9@hz<<3bSlRpyRQ^S)Zl9 zo>OW#YJ9+M7!>k$>^zCMzBH?ga@lA{YojLP4o`GLMzo@+&yrp+A-C{S`T9X_Lg&DS zeKg1y8D3{@G6a%LdwyNVUQe<6C;NQkfmH&Ha!lZ*w+)pK{as)F_vf&$A6#|d=K~hT z6aM3Yzn;TB&c2Sl%`Vyd8y1B*hXz#lvC#}53l X1ApwmA3N~J4*anLf9$~jq#gKQ+!`+n literal 0 HcmV?d00001 diff --git a/sample2_1.npy b/sample2_1.npy new file mode 100644 index 0000000000000000000000000000000000000000..4f3472a105f029f297dee00a4080237b5d4df45b GIT binary patch literal 131200 zcmd43c{r5s`#!F+B_b)3q9RL{vL)M{BoZwuvL%IxloXYe5=oXsDNBgTzQ;sd82i4@ zFk_j;n6aj03H5z`pX1x-bsW8q_wo6zzu#~Ea3A+Gk6E6%&+ER<^E$65^7vWPla{O4 zyx4rC9UMI`-H_Ilkk)q6kd~K_cDi=s<_&vyyK6Tb9RHu=N9^4^9kJt{&i28vpS}R$jta;(z|Z;~SmpsFcEl*@H!WlL@1+Z>`rg{6i)P)fS}@uZ_d+|9DKp z@_!%y%-R0q@fH4zuVXJsd0t~e!rC!U6_;Vq3v;7uJ|)0GzT!Be=uRk${bYJ%O9ybb zSBtVobb|5n*&De2Ua0P|xU6$&2v&btXJfpX4i4@cyBZB>K=jkoh!h-#YgYt=v_21j ze^rlxi|YWW551QTw;F`jTOG4c?FZnt-v!;7vm{7m-?_KCwhxAcjdCPdePA4MK{Y9! z1TWU+O)Q@rgnB2HpC7&tL%p!O)$$}2bh6CX*vK=$jPLeHd(Idv{q;TZ@3$)4Tssao zl#M>e*p7liLpo`M!2tH%BFRE#Oi-TuyoW+!Lb&{O>ONf-2$N>mwePavNpA^5dW;6Q zd8^s7_YZ=;#Rg?r<6h7>Wpe1JP%Aj;cq79)JfyI%)-?W63Mt5%${HUAD73WqM`WR`;0N*d=j>;w}b_|8Jl~ZvFoIjn6`OSEE7_S z4tYLY7>6CrcaQz59)k+UJN4f)si3mM?#kt8GK8-F{!($VAJ{49Uu1_4!pt3)7m;cd z=s#*DTyT^MR}9yxt!t*j2~yR3k;@3^QR7`iW*J}#MQra?S>RdPWTB_Pf(rXpH;?3x zLO|4tO6pZ6q$#&?Y|k8nsjXM-Z#zta$i4l97g-EQe7L$sR&@ZPm$TQhntFgW>}0kw z*998WZ@mSUtdm8r4P*g@AGBwq02p**yW9?>C3wrHAJ;2qSP(VS3pFM+IYAppkAJ1=O4IqOI>Kz+YaobXSQ6-?#JJb3Vxc#!_(R zdA{-g#Lpib{TWyO`+EE{|9kv*IQ;v(zvqAd-|P?3E4&A(aCS$?%wb$Bshel(~NL{OJ@#kyll2 zp{IdPs7Z4gvll47CkX9^{)CwBsY7CX-M%AW5wp#I>k=KZ!hpx*02M4wE>^v99)cYn!z<694MO#c zBXK2LDc}(kQTg454h32H0-jYP;BzNDVe&o$D&DMm%SU8_dY*#B+nW>c_wN@@d+Hv0 ze*|hW6M0<2i6CY=`C(U62Uy%`^I0(Kgt_yQc~2C&fz5Y|$rN`hNWR)~OJc4O{C{oV zVZ_-ELapDdoXvY6%fZ;=ei0dnL5wq~f(k441zcScG%(x}&o@FIhUK)xKx5e|1p?p9#YuK2a&=g2pdw^>U9*aJl!v&7RKJEI)rMFoiYItex@ka)%VWc*QRnS4hM7FFwas;+= zy3Jh>8HKdGyC*fH=uq2{@=Y(E3Ip3)uLWG9fEgH?Dq;2zA(ON z40P7AYtNh+fCOe;a-W3N&T})pbV$yT;0orZ!Y0ydHqL#TYGRz_ZS?L7-K@?``ZS`7AAn*2`^PbnEtnM;BUPCjpzT} zxz`x`-q_pykjf?{D}A{S4wukezXlQ^jbga)!h;ATkq`U({rh0H`^+|1*M8vtu|IE7 zhX{5mA&TZ&?cme#QIb|#2tM^n)p1&Rh+pMR(whVID8PL5B5kr0*>GnJD!CI7{!ZS( zz|4L`KI_qV&X9}(3UeJV`;n1ZM8ELpc@h$L({)^O>_Z7O@qsPey~uv1|5ebIP9(`3 z=dD|5K$M!aklWLMDCZM5Mw`AxY$x8-jy}i*9nq7s!v+noHZdpT$Xg=Btt#A3{7eTQ zw!H$auf}0RVY_d4KMOd@_;Gf_OmGQVDa!uF1hMtQ1r1-Y_47LOXwMI9JuCk>SL%e} zTi}W%NrMc=ilc&!ln$t0$1@r{*Y~J85}5?0`>Qeb&hPU z0E1iomvh&3KKzA}byiN?oA9c}CuCH|8+76^Z}uGo(Tzul@-MH5zb7oz+v48vqS9 zPC*+bB3Pst>YAqzVMF&di=y}e*m_1(((eroJlsTn3VX0XuCngqxkKadFlO+|-Of=U zt}jmZx;p~MP@dwOM+3#*n<;m5si1UujjR`7xKuwM$`~90J`1;HR_Q2+Xyza0wPk^y zK*f#KEG#d&v#)GaaRTz4D_XOAM?vh_Go5*UDu@Rw-HewQhS|OIc{Tf?;ltdMZNru$z!9ctf~R5eqMgiCv>k&Gi+O+9 z-Z3C0)umRWQ$k4^qty#LSQ`QOEdzmNZ!{~7uAQVD?{-3Was%V{4i|$c9h*fei(~;eS8Yh?I0dteocAY9Hj|GC@6?F zAdW7hXuGdHh`%$1vMqi9G0J54)~Zp^ny1*dKTbuZw6A$q)ii`VZmi?eKtsOPPdC}h z(2yYAY41lBDoW5T?CG-}MjjU_$Fx?+$fzww_S{(_a#|avdQ`9*3FS?GGa6|^*^>_i z?V>9XPl7>UY~veb^sA?Oqf`+DhZrwXtLuT>Vv(crt^yA!`<&PF^M+dd2Y#mY<&}?{3?;oi%161a&686n;OK^ z9N1i*)rFiMn9AT3h{$jE?oW8RKIDWK5WS{BK(Z!E1~2A1k&z8?pJ-GgiV@I0c<5~f znhV)Z{92ul!l%ZhE}l$BPEOD8n(xDq&85YcwWqQ`C$MXeV|69?sOK$cov4PG&Y1Ap z<}#QF(K3Q@Kt=8zc8r*KSjOeb?bUy>Qs4!OX z^o_ka6)t`;%7`b>fPR1G^IkI=JS})aTzvt1-t_17xSdqcvfe!Ci1D`COB++IVhQkA z!!G;5X#yxVp1%F#Sr_Q9z4`8jTL*lX_gX!s)eXo#EHb@-2ooPnG{z_UAaYl8;^+|) zRQiu7Zn#Q<))ME|D6auv`p#_Kjm5bXbG-w^Y6e8Vc_Na&jR^rZ>sWAP1jOgQlh`~* zV8g6ILpYTIVLy7av;*kyD!R|rQh*M)YO!_9aXKt*bl6tiO@SNlnS4EKhp^*43T+L; z5J6e#bX}%Ef?nub@m>lzi#Z%<*hqsglfCUz_z`H1g;N2BOmMjNi%->u3AZn87pgeH z1W)0KUg1+L=*qA(pUC|8`TZaK{ofw{#{1vz7jrsS0?my=|0oG}6edC8EtmMi#wl=r z-ug;3fdLM&AsyyC7;iq2AvdtG8v-;9E7>K=!06gU`yrJils3rd9?NV%DbsDN+cQ0= z^OlGFdgTFh0{5nAT`&c?$?(iwRHPx6+N-8@KWWIIhS(m3;h*)h__wzY6}54CukKJB zMuaGtUA~llBsuWIVOvHgh9Bay?#nfZO}?LaMKJ~8mU+IlmZd_r&RiOde*wQePVWb+ zYJlUekKnB@&Cv4k+_lF=ZQzo>mQLN*4t6FP$HKfiU~cKUtiw<@h}qc7_#EqpoKwds zmHA|7IY0Aw%cCX`w+Kpkb}kv2Zs}a!T-$m{ckd4}qwbTdC%6nMSnzfGzYDG>4HWtcz$;e${bNZ7ED$;P3_%$3c zf(ocUYAJLE(qi5_!Q;t5`wnH+*zTnxv6p%?#%qR={VL8zHWTx`fLWW(cIXr zHC7Aw=QjP~zbk-p?{TDFLKf2CE9k(r)gp?QZ`%#o7KFE1Oo)hTLKYn|K}pvukyCBQ z>c#;_u)n2F9=ELpM$$sD`u27(l@DN>Wvc)ufidRA#zZiBQ)Ii;6%9H`vTw7t;6SK% zQC)4N1uEznc|wy!unNcVjtcgGXeLKyj!QL!kFSwmv-K0ml6{hijzy4?e5?1XUO9wc zwiMRH(w2h!lDh+5so@<*xTq$eDi$c(sTp4s{g zIjLLXpL9RRKJO@fx;`4R@BN(J|- zAC@hz(&grekWD>iDy2yP?y#p7$0@zABKEjwG@%PDWB|v1z72#nTXwYnYzC+6`=70D zYXZjPht~$i(dOcAYhV(Tbig#eTgxofEZ6y^7SfeX} z5_UuIp6M66d~5)WY8KMFFAjlBvUtzsM^tzrNARf_9)WxQc#+1VEGUuUU!|w^e=ZOB zgKM104*y~PX;4hE*|WMG;}<8THFEPNz-?8jghlHl&l3>JCpF39;P7tU>Zb!9h0!VGzn8NKkP|Z7ZQh%=>xm5`9%s!%MuRIXs4p!O&=tc?og3B zW4%N6@GuI#7^`VvPeLwK%?zWv9f-2)czhy14%HvO6msut6DSM5j#JB}{Y#$2`DLr; zPRU95Vu#yAUi#YM>ZBiRa>GM zsDV$;N5`@u#>xV0xlwM~c6-1^BAwauLf=k<+EKQ0`lWYj5*3ipCOc~^XM z01YJB3l2QYrNB&u(}wC&OwY*Pm9Y+2ffQ}q_Z`wIE;WV{o;u$C{*{gx#Zo_>OZOw4)8+2Uzw;5R((ZGcbT9aQ z{W;<2(gw2Blx_p@JR}r&L}GJY8=6bvzh>3egQCu{-Q5(?gO(g##S;x%QHsr%+v@w@ zBYVnkB|=gQaQs}$_ISP*@ZIIRrQd%6qqF`ej-GvoCbr(Jn6@uOF?zZ@$93Ky9xmnH z)$SFrA}#%z(a{6y)%W~s**YNHK)RUJkPqS`GrFyI&yk1k`k@?GYb4$p#4HH;3?8yU z5tnA)LD{LvR|8vHkZDv1?>-J9nt5u)pr7nVW%zB<@l`#j%&}|h3F|r(@Rl!fMKK?# z1V2m;cd9~?;|3AM9hC@wRwS@J@eR`8J-X@j*E$H7oTCMN>4Ao%!@b$v1TdZ9-gjxs zAav-gq1+-2Lbv(&tuqt-U?I*Wxir@gJlvCCwfwpOA7N)mv~Pf=8n#o6rwt%v!Iz{Z z-U)&V3ji@>Z$_MKGZXT+~0DJDq7@T+{B>8ah13RYLh$?**% zd2WG{lv)bXcRqP>U@sMg=SmoISyGT+!_c)?A%iHvW<{Y;mVmr|Y`p(Ir2*;adYk^{ zO-34es*cIw&0uUS+w$IK04yfmq#aTjFy(km=G~8YdsmJLh7diyNhu&2y(1BB^o*cp+dI{4zn|%@HdIft3ZM9DMeO6 zKOXeeT&mOuY7viTp20>^KU%ZP@J;@#O&l!YmD zu#JsZJZT(}PvY7_Wf+Jj^3Kt4<{%5~4h6=&MS0U_lVhy6CaW`<<6VaUJ z9>-1Z2T_2d^}@BK0c5&De`grii-POU(3=bKC}sI!x3X6}GE2Sm`nzQ|@|YCgXBXgy zW`_52|8}YXajBrNk^?Q!xw>cQMtLjvU2`%x-`WYmpWpEw!FZln?3w2YoZTR#pP!y_ zv=jV8@4oQ-)Cq(Q#eMyyb+E*C80BPCg5R-vk$vmy!6&DBi{@A}bcPR};2vlM(X;nI zIA^y(3&;5tu}vMY60Rj=8rcq08Z{r(xA#EE<*<#10?82BoLKDI)(5`)+gW4t?O;>t zCc9du1-`#E;W%@t3mjhGxFdOX0FKZ{wMQ`>FgRwGlRUtJnzkOA3XoT>7Vod%>PG61|`RR%yJZt>^Q)_CSVj8p%#I*w-^wz zCO*RY8x`7gU-RvrC4pj}@rvTl77%iOWZv=UF%bH<`|b$GqnUtu`Gc#v5&2Ddvix%r zS~FrT>)}jLnRkZnH;8z0FKh{|B%leq^K(~@ zw;+PpJ=5I|pAcc(A{!~-Vd!>0(Qt$a&JPqKtba28B@g)X{VdooWUXLzsGNc#mK-k& z$g_>SckEc8`n}zjR)^Iud17eqYNo)LE@EHo!Gyfw1BQXC2@qg+ezEmM9PCivo8~^z zjsjk;Zqs-$h*HYK!e4jL(9qtxC)+ME5Lw-0_(3}Z*>{%TI3PEI1U;@;Mt`KBrJJd5 z%+-m=oP!vj&DqxmP*)gaVu?khy)(qKf9M7lfdo_ywW9g1D=)jY~#j6lpVMz zG;y^bCG5X}U**I=f+d>!YeFZ`fB!!JIZEC*Kf57*8r?p2i8D2J9Pw3XhG|?KK`}L> zM}Ft^p%m_5k-_7+Nc>d;I~l{jlXl|4(hHfW^xZAH`$ioIr;}uu=R`!Z2X-E5sP995 zcB_dS^?Q)0PwZpl$wSHUx>k7wEmS z8pEy7T>r*P>42}1x^_V~3anN)6&`MCM?R6HMD_wQB3#}cKlK>nPx9&Bmd^(fewKbN zys!rmx&w{FL+~j3278v8R0Uc}TnJS5szgG$JI^Irry~`+hwKNJ${=lQ6L*V6Go&2i zdAXav8M+rzww-bAgr!|aC-`3Wz?oi8E__r6NVe4RJviL~XO4fr&dluqopO4M&DJ__ zdhh!oETIx&*zEfzS^>-iJR``4$>34O`J6K)0Pr3yS5{XRfc@IJYc2`3P{y@KCGA@a z)bO75+5z35v+DF=jgCGrjdixP^6Ujsd6V>eGFW{!T<|zGqaEA~Z?ZAjG2SV8pug=n z0bFDg#FO?9fcP_Y(junIhV9(ehI`Hg`Ji(V<3|6A=RfQF|Mr;oM(W8nM~wfUFIYP3 ziq%a^zhwquIyroK-bqv&2mke{VOz8?-S->!;3?ZqFxrsVl^OdCD5u{)(y6OPW?@W! z_fOrZe%Fb#XPlUxeBcasAg%c!m_tFq_iLN{izukz!fe8sH^Zp2RII0+MMeyJ zZrhEl9+a?A;=bo%1H$c>K5}_aJ~G-KG`1iMP`2zQv-?mde2|kU++#2R-}J8>RE?#= z+WV27MOF-;k6z{VI>-R4lshs4*GJ&PuOrB1{T1Ie&uK_M*KzBn)InrXM<*QQ!E}lT>Qgl5cgT-_OJ~nQ2c*vQ z{45Tmz`4_!%E561VD;(@5%FXH9qkk_kYY5naL$mm*v*A|Cz z=oCIj*4WSkHtSki6}?JfPT|$f!C_3V3_jYuP8E;JI7RgQ(<+deNaUG2)CeT|@qv5Y z*;?Rm(W^Ju&!9#gF*c!EOb$`^{z@(8yg`oT7}? zQS&RUqNh55V~VT6|5Z1{Ieu_(Iot(NW-%*o?lu5E;}`y7TP@(ox7Ytj0E`dp`+520 zE6|y`yy$|Fe((8UH^1@8*9W|NH;{JpcEYwtlvx!C?%RH&>=~-eiG-`x|2VW~`30 zRJ;!v70GlSe&2?Cy7!B+ zl+ux;ifid6K0IjK#VNEZ3_@H`Y27MOI*6R?o*OtpgUWW2&Ts)%r*cTV@yU7wmVSH^ z3;Q+-LAXzfVVF>^d2vHI=ug9`s2z}6H3hRO6(U24lW-xhRf%zT9G%`W!9KrVWo~o6@V|0YyG=A#J0|uHR&xUhOk07=k z<(V7@X-KiTR^Ic<0D8g0m20@E9kGeHUhKsAL4{27+Uy)`x;!4oh_P5|G`?84f*MJHl7ZZv~VhRK-k#powXD|8Xd z)9Y%6Wp5QChIG6HPM{LuvVLFK=Td|y+c&H(h|dHb!gY(o+1NhUF2%j8wH?CqdP@&j zeFB|EwjDdmFTvcsDdTskv0zk}=zBHuGX$6Wg#LEMBU4d%TxV(@5?xPrNR*%a+lfk5UUG# z{yr;I1E$>?zYd{FPzlv}lU`p19GV4v+LoS zW!>z98QIuV+^TrM*?hVcMk_#bJCzRXz21V%UT=yK_((u$JRb`61_n^lWpA0;A#9(2 zWu0Pzt#iAgsuN2)C}{4xmGN5gAmWeW+cK)&huUP!Gz;|F5zn_wn*($C$fLymk?f1l z@OxwJpwnd{9Q$RT$&dLm^5S@dOvG4cNrzT*z3&q67?0WMkAMq#HUA=y_6>zts;v0@rAx6PTSh5-O6Flxz z8`f`t;F)x%dEQoJ_t0&_rBhTyk7?7n%4DG;dD8DgvHyzyw8u*AM%I%k7IZ*N-|n&4yI38NO~>Q& z`x2Cv>#TXtuo}gzD4ErzmLZjm0vD75U&6$NrzgGlV|w8yqn+cJerCo^VE!`s|P<=H4Y=u zL-9Y!Y9y4xT-Qu+Z$T472%L1Cgw~V)-7^}134YHy6aWC*Z%a%)v#pc{_K60@MEFSNL!Gt#8R}LXJEH{E* zvFd*NCWH0Ju-P8vS>b zLV{#W^2{8j7g7BSgg0WZtN(IpT(}tsS1+D+*w+LTCLeu2Uatqygi?08bSpIOeA35N z)&(owm_y(_!Ybk z893}>Yz04Id3}Ga!p~P@K*CLLY8adPx7X`WJoqyb2h8kwM<$^yzqjHbnF1Vr;%&zk zs^@URMN^~oEG1_KT1f5FXJ5}mTInHFpZ8SLSm}TM-b)3fqk^pDL7A*dzK=t`bD=Tvgk1kPO>XULW_SQb5Sn zDWiU%8F;9jl^1c%NRa2)F=aWd?m6D~etjtmg}UVMC7Jz$f43>NsPh@qs9(xVRP^Np za>&}XAiZT2Np4y1DPw@u8>Dme=VR-TPN`}?DI43DR@n&K1m=Qh3edMr)FT{7O>SE0 zMV;nn)VD~Jko_ayVaiL)r?Bc*#%=>lr*t~>@St`vN{eN^dhx6V$o=x>XWun~(D9|i zx2`-wPA%_3(n|0s_S7&!klu0i(Z9VGI_Ya8{I?+dMm(mY6Lf%_6_3XN=Vs# zbhGi7R7Cb|T-Ut65@i#_;F3T&a!Q%^Zh4!8X5MZ3#AV!o#6{FBSRY7ex83O*>pLmP zKd@>^RELTvgWFh=-58%@#`xHaVfD<%^J(m-J|XdE$FJ@bc!zkpa=9*;M1WbD<)fH= zcqp5`(6*7Y85B+3-39~ku=HrN(uRO)@Hp+Sv7FKdHfQ=pu1Iu1%1>>!1Hx@kR!6s} zJ=g(+;9E8K*VF+|-ONeWU@?S0%J`DHssJWBLSxD6{82Vez(zOV6PmMXUuCjA2k~^Y zI;^UC4^Ae%#D^{zKX@m5_we@`5M6aA$9kaw(jFM}Dx7Nq{?24~1AnY;7@=N&VXhwd zTO8DKvAD#T{}67U(gDKSaW59I`b>=#pP&|T5Oj_lF+LPS2eb7%<@cztK+mA%e!*(2 zZuK9J>Hi%6=z#y#@jvn6&v}35cxIef8EqOUi^kU)HKs8hZ1kW`X&UO?_rAilOkwNv zz!BP&DTvPBdqSCx`L$-)M_jgzgQ@CT@0Uv~SjI(uPspKxwaWc%ls3$d!Ss@Sup19^ zXAkIlClw-o|8*BIGL6VQlNQ@FFn^B2gEjm?gNRK~uEr8kP<^U^x%yfv5;psGZhIO9 zxjY^CvQc6f@hoKf&2A>4sIT^i?om3Ck6oShn`ad$+-#qPk=Q#hJup`=WY-1TrG_b* zm~UF?vCC>j9wzW{-_47xWPz>I!LWC}6JSJ|mOHv(8mr@RO5O6ChMI3~eyuT+utaHc z<8B!Tuk1X7MLo>7S}Lg4o5X@2`$tk=B@IK0aPr$2=L*Oc-_29-5dKV<_~p`lHJhQ^;@UnOQt>8r7R<&E2e-LTB8MoI9>Qj_fDJ4({1Eg3Q)# zsn~y{AGKVSy-pJUgv=BV1qh5{{RF!Etaa~(p`>@`HqevWk>p--j6zILYcP_HYXA#V8^N65c2F5uz$g7q1hWuC%~mc! zLK=cQtM*nR9yi1K8xQkQK+~7=wP}SQe63S^KA{~vk~=RrpD%*&NpZ!L(?uwp`suPi zTQ#CIG~}!#03sBJ3hFJsM@y>0<#)B)k>3%kGtw^xkiPNq>1h%LopHDz-Rwv~DsDa7 zOB_gu@Iyx4r?v^P`SBQ+cwYmG`pYj1%?aQm!0)Y{gaiGs)aWdFE0nehJ8f=n2hqf+ zjKm`99OQSP(FX zy-&~$jjdRopRLZ#`-P(gbk6r5^|fjRl_sr#cbLA&D%&SNalQlG4i-K2{@evI^L40+ z(giBPlbDP`0A>G^XCGV{h9bj)_Io%wD1G&5%G}L_Nc)FwEBgP`-~Pt&A3Xm({#WPO z+}Lx?%X$hpVv8mTCKF)0@G06+c@lU;dE)ai-27Ls*XUZXa_7SoT)ncByoC9(v#S-X z`H$Cw>>Ghdm*zY~2+n+P4tgcSGM&#L^u^3vO%_T4q@KyyvXmzm0+k+|5{eu?~Cs2|^4)^ed6 z#@>GwvpPkAlE7aX{1 zhdkq8%Pqed*T#a+-WTQgp46y&Vi|081GQjcfBQ*PeeH5Sj|24 zIVk&r)BKVW#&b8BSl1``pa8XQi8@=Xj?r6m@Xkmd64JaUdV0?Q;xHLz=dSNZeuN)g zAqAZ%<%8_x+H*xHz^2sIY_JYmjG6j)y_AxL&HeF!SH7+br3052{*b<_M#X*3TJtJ z6>@TK?%c357NyjdQ+#c+k<)eqojJd!NLEA5@0%GwLV~)<>e3ddU;difYuf}tpF(AZ zW7;A3fZHW+_Fm{*kQl2cb$~c4)Q05O2I{r^Em&0#TtXFG&b{sgKgS7Ce&se`YiMJZ z^f!Sl@6|{JuQF&+e3-dqAP-V5|Jbr|2*9Xitz&p`3uuHh&8F6OLQ)i6s!6>YLN9(> zTvgNs2dQ^X{(RXDZn7^01u);!gw2C{FMnhDVr5sG?%^Kb=vt+Ev!Vy$x_tOAzU>FD zdxYm#vA%+PQ+g*!YyQFe-#Gk#d+yJs5p88a6!Pf7EJ#$yWC(BnUsrk!td&O zca^~gPlvDelzvDYz5MRSuMxOrq?}Xh&Vp_06n_>SWC73qAd8`R7UaaMM&)KF31P|;1ltlKxHkuVRX;nd_L;+F~_ zC@rv%e_ty_^CcE9d{SMnd-fvyHdnRBA3~7_*<+)^n|25&Sn+0&!_dU=LGiW`5eB^oCu(e; zX4;SHj}S`8mkFpq?3tl>Ml%Yy)W-);qEU8^RXdwZJPK!={}fbKh{W%>kChiDAzWso z>i+3ch>9_vy#BNumbNTBxue$(El0ywSw89lGlit$+zBGgEUZn}n!$WHmyZ`FoW<&I zX&W4r>U+T6##+zuOB-lBJ5|$@SPyJ8r8~EN*TG!d?ekc*6e#tV*K%Z}f(?7jhhyXt z2w>ll-V@mhotqpwDVUCzD$F=!vyTY$M#2y{_dxwi-sjfZJz(1TG?FK#8yIdkc!zg) zLmo~kHa5E#=F^>8I_-#1yfoJBbZG#p_ld5$f%&B4w4SSlEVDrDc>~=X%ftW1`+v2s z|9$@d?p&Y!fULnTEKl9-JC)8p0y9KSk(wVPpw7h|ayOXE#J|0#ADlf3le(d9$8Qlq)`*LCvo#tKQ18W`KQX@m2QO~R z#SvuRpy55GJ&uaFB|qC>_?MM>72>gL8u{h)vh%u6A`6F(5*sHL@@cr=`U%TB!h6>_ zuj=c;d^o4qT3>vD+)S@L7_r81>pbro`zQkZ4EA{Tug3WLobP_QCQO%1j^8!$tsCiA zX4>6%Yey+6KNY2daLBCds$N-bB}iSTI=v3T{BnN%&9xHsFef7E-k9i%oJ!bEeP}2_ zI>B#JlmGp52KX2xGe{M07|*BA%StQ z3Q-RAd9FY<3a@`TS9C8AS`w5#uOIq^)Ym=sKX#%W=?m}BA5!Q>QHOKy$Vg$x^NCE~ z9|kD=PDxi52M%Ids|>g-t02bC{CVotGDtYG`TO->H!&l@TN|)lo*jl9Pd2H|BN*TF4)~d~ZvdhKxi{C|!u;!B zh8j+&Vt$_eOMoL{y618Bpv=n+(DLiL-`m1!Ab&Ea?{&oZd>21ghe9uO9}-hyDVgAC@{lA+Z4uXoCI$Jvz2`Y+nKHO#R2kxFTPk&%_ep~tT1dHE8 zkl~$&kCvo?va3woLJJc_70Pfrn2+aw7q|bX{$HNGP{bQuD!Pe4y67yb9@d23mm4xXsuWweUt*eD>~=ec69^A zX1im-xsRx`gR&@is2df1F(1&$8Aj1%NvXt2ItAOvP~+#EKt?eg zkvBRx>P!uzaF39hd?T#iXM572`F4}#c_u_h(C!+6{b=79>iqJKT!)`P1rIpctmY?BIPQjr z0%;7z?=oL(UOU!?bZqHw6|P}?AnNPc?-{iyd;SC3cKbfc)-_IAh4EttdWMTd#vmBo z{k{-?ALH}!8$!T0`A_tKvq1aZ{Kw7wrH<` zm|IT+pXa|qvXG9mB2^&MMrFbs^(xFSI6jl>m4(a>W$z`(VZPraYNOKoWZ)?7{rT9&(+)xcgABfY1SJ>blat z>?FX(u2`GTP@GplYs7Z`GPZs%R<4OFV$ygkcE|Ms|!a#%emM- z;pr$WJ>`>-XrMyD8q*sm4aiXH3WrQ_eQ-{B@XebA5)?g6CHNbXfnAL6>MZ8x`($}J z$Z#zMK87aV+fhpewd~KX_a9K8^w*gpsxbu?-rZ5rY`{_g;*)*eSpV|wA4{Kq>rr6o zWx)IMsT2sgP*c5GiUxD{12=_zrU8GujovqD1~mMA9|v=O8-t{~ zIZe{9C$Rey5Oj_9VD~dfQ&C7wC4>IZ9dpUbdeC8P`W6uyjiQ>)g|N>xA$(Nd(^2VO zRDX8c?C~Ws+IRZ?)98R<6k~oMF77Or{|kJw@vX$_WQtQGsvQGp;&s^KM;-#grFS0Q z-Q9>LT;}G*i!uL7z0afAQ7nIYaPDpucE5?V%r2IR0uxBAg&OM6B)Fd}9;!|m2jfqg zNjAd&Ji6m4x|BM&xJ57RgAFxsb+{Kt(R%p_tT<@$3^e+{kPMx#`Jmm6D>P zY~!aVi_c;8444r=bD^U0K_%wPOgg>` zQfXHf;WZiXRZec1UKk&CRXPyvUjPKpquW1-`#`{1$sLb=CxOS-`K`Y8cqj;uv@&Mv zfi5_A#d#=5S9M9ppZOPm1SB1?5DTd+WX>?LavQUDV*R!nc9SiLXBH z-}51xCiFeIs1zm8`)*2G)*)O~$@J$9cqIP$_R&7;2sHOuIrd3#BanFtT<;S|U=`;c zrF3lwvVBT74eq31eVwDt0TEQlYczM;okNAL29Y0)<}}ck)~juOK!^4-y2`hku={M7 z-!56i`c>_QWM-ec55o?)v$1F^4H~RQA4LD4!iJo~<5vu5@NxgCE6+F4KuhhUcD>pF z;CFi)BxMjmUurb`_j(faWiAcU@mQaToM2|F)hN{7tD3xn?Kf){<9p5wkAlHlf1#pC z2K?;uE}6mN4maniW(8XseA83yT%Aq>%6r~iKE4rH8+z=p+!H1w(kG4yv;VVx|KRWc z>oI4sD&zA$8mOgi%6v3SfupuAETem55IP~Fcfqh7#0}W1PgduEPT#9Ldz1ksG%x+E z`q7CzEFbADJ7GNTgv5UDvshiPsIuyNgz>fv-1al~P#r2@CSc8|8C1dy zH7`ELy9QeLZ@!#&#OlqpnX>afE#M?_ZFuuc3lz-kqfM`B#_rE>*X``?M&P+%H6=b> z2RJor!q!9Okdhm}UHe8f@`>OooqUS<{8xCM(-eA;QF@NjIjp}}{;J8~qznbI_CAy% z{i2|n`~8N528^%lGH26&hUGO4R!5JnYDOswyyxq*Qo(KcS=G}`&G2DfqVQx(C+LeU z-M+@x2kslQHih3ILY5V+*osU56;l$UO{gC_r7RVh8_2M>^8SAw|7 zYC#+y=GW5-+j(VQC&JfVie~TcLG|Mk61s_9XpZQ0^Jr*2rbkD7+0y8W*dj!P?J=Kw zhvnw2hFD%Z&7-vCbsDz5H>_d1YuN#^&6j>J3>86(_Tw2gv)h=Dtz*rPx=7%e&DkvR z6(H_8dpRSn8~DGDmhfY7R%`X!-wIc$kQQoxmv=WA0*;){+BDt?PPgUX-HB`f)AY}~ z`I_q>=&e9rgC^!{ITLB!hxy!(ZBpZyQl&uX_@@A$re2`5Nfwrvwt&iXVXXCwMsV0^ z;j+)P6XIS+8P*u}gIn(*i8Y*9U-kU`X`AbDAaruuFIBMuL~+~_Yqlj1`5a-2b3+Pf zi2qE$v$a8ZO}ApV#{lr_Z&Af?Ofcq7 zX?l8T0*n`r6w@RBHGlVPA!S(OCSg~b+UZW#IEchv@VYZR0Uq1KKX|+RbN+wD@Bcji z_xSJP#Gmv2tLOhd@6Y_nfn7oMw}zqOSn)vS%OR+pf6>7nO#&lhC60xJ7R(Q9I3uqT z4?bDiwQ-%*NcOm|+xrHr|J?p!QKl)@hjM98vU3jwiCuqFlG8>*`#4t`UCXdMde_S5 zv`jiuf3av`nM6auD*uhQH;;z;egDS^C6S1TLS;{~L{hqy6d{F@Qc@C%OkX zGA$=tj`_gf?PZQd4I(vZt`=5?ZWMR6TB-O_8&YggIihnO=Yx)3460i(-h+R9?M2B@ zWT3T5wIMwYnzA&qA?YLJZ{iJq_2Cm}m0wvi&;-CzxA4Y)u^c>8G=FeJU_P*qjx4uY z4UC>rbzz;u`&y9+#)}b+uv984DZ8f`OsfcECype7;+@A~(O$KviRQhN*Mx}tMk9J` zP7k2|`RU2Y!a=me^Ii7V_I@m0cpPk5)`K3atMFR@jaCL{litTRu7QvpmBE^>M5yc4_hIN9f*FM~e65LCH%$Ecn_U?974$wk-XowD)X007t+Z2s z;mo#t1-AkeRxfzw%&r;~meTFJoQmrSW08`oUb!f8sr{O=NG>c#Tt9UopcA5-ghr2+ zU_LtSm0!ok5t!f0roS}{>wcZ)TiJ4~53-uiE;%J*T%&TN$6cpvyl;|mVO#qFh`TQO zw!SUF>sF>LqedI-ku_UuHbw#lJPUZhI8Y=byjvW{U|SmBuJg)N7(F_0{5Za@`@Lqc zXWsyDHBBDd9!Unv^IYCNU#OsQO4@p~Xc)Mb_>xbK^nlFqHB+J~ZD21kzqoo^D^OF? zvIRDD!k#eo)5Bk|u8)^@hu`IXNRm}4^k>KW45oumBhAWztu5Zdn714_`Qomqy=#UE z#-C0>zP&K1`mUf(sUO$|vqHR6x*@ueuT5;Y4?@MIZ~55{L0aDv85fUXm}K35D9UvR zsAg9v(NR<=)nJNXW*vv4H^zTG#yD2Tklp+nJSV}o;pWT3F_;I^$Maz9It_|{Ycg60 zO~Z%zp@x2u85q^KX{%|R0Y(V*_$4w8Sqf?YCEF zP595k>n|r(y?*qk|4+aEe|!8s+^>^-fCOmgq)31U*8R$QU-exc<7A4qvdMcBz?8Ij zyYx^z<~f8ICEu+^VXle&k%x$ADSGuC&!2$}qu8Tswt{FQ{))fBz4+egfrF@DHRWBhu>PKTWw%qq z7z8}wd|bL4*8%?5zdyQ!^y7x=a)~A1A+Y}~^Kgh%0V?~(J-Fga(Odxc2KVcoNKe~l zi<0C3I==Bs=8xU~+Q*zGzY8FtpmhQ9M=N`g-eoDK7l#N)_@!Xf7T}gwi{=Evu z)47X#kCNfG`(Blea`^u}c;3jEZxWcSTNxs@jo?0S)mRIzqwuB1D~deBJW`qX)O!05 zAVt09+g6*6M2EP`s&-XiJ|4yMiyt0y?wvW+lZWReC8c*)BSG}ls!99SI(SijYKQ9# zt}n4X+J2g06zd$lvOJ9IwS6nu+w}&!L8bG2;tF#;Xf3ck>9jA#JaxmU`+jetuKvpD9tnze0T8$bpg(%$;ybNyghC1hra@%38Iv?^bGsRwi63VoieX7Feq z{4%rH1$@$Bk9uE`VZ2}4lEs(;S6llOvZP6%Hd~-9uFwr6QbAUZ{f{}8*?$;J=Rcn`+A;)Nw=MZb`wm0>Bq`?c@e#1;F!IX6`9bZ*;$gQd zBe1N=q4jxs9D?-)x&x-C;K)G}9+7=C*tlkk(0n=#wpQHypy+{q(5KncHSG2{0wZ1*(n|$^=G|5&X9L0Sa=pRO%A!S6l0yBxexC9 zZE0{NioIL<;0%aXQ+6=A&ft2^?Ae+NfA$A|zd!wR{vQ9n@9$&!{ePdQAD4N-mRDUnM`V*y3LHae~KtMVhpZ;(9pgg2u!`69W-&K~3>^qSA# zii{riGgMD#4Wos(YxV4BsYv*9$}_3ExL&>{?YaB_6@{VV=pEaKv3^cSQ=cjsg@tgd zb7piSHbRuw+unMV_UIbZ+C~q=;LvS`ZdO6%kNo;Km`@$qZg-s0PK7Wzrz!XLKmC6C z-|e?|_DEOH46sgwJ$^qq1uAIoyN`RPz>!PXqG21x{YbmmAC{N^KkvmCKMvz{GqgUp zRUEHBFAvg=K8itZH5S*79%?~)aUW9Bxq4B@ueR@hr4oWVEvw;b+=0fz$=0{vU>~ePgaxLZDS|k zxD#*bi=Sh_sCVzhCag2(9@1iPUz!51f*XZ~w-CXg%JyrEax<=vsq%S7Re)u8h#e71 zfpAutVFyme<=j?g^$35bT5(hpVS03q)BeUeQDupDrMO0Ra9UE00VLxO_&I(^xX z>M-x)fBm4d(8Yf6P}$vCD3sHpfO?W)NMQ}QGzu$e_}9i4z}s+Shl z1XDo%!Nry29bGWt;lQ}}3g*SDr%4I>wt?n_rcch+c%SfgDdwxxFwBnS@pyh62F1F2 z{DN3l^n%afUtZUHq4LH+Wb_gd&L^m}s;c(GB**)hOG|iv&)>SCYs(m@Ol)S`l`#hW z{E`J$toBuil z`5an5SS&{%b=%y?Rjk(+d5gy-&U74BdxRNn=$?S?4y)^i2c}{5>+=B#7PD|5M#gjF z#u+%Td)nvC&1vvUF7^E7It{^1#f)a3r$PVzQ`wbB}fAZJQw8|E?lwCrAW=W`6^eWc~RKcx)+6I)Ql_FkkQ2F#yx>RD^M6Zg-jbY`Q!rJ-7~kbw#2Luu=&J07_;08x8SJN%Fu2ZOWW zDFbd3pj9xR_9%K3{JbuO#|Mvq!%u%^8!gORWGMC66gC3A=O%Y6H;+KL-qGzVXUU)} zT6^gT)>}+-?$PgdCqhU=f%z5o0cdJ}Z==dXg^-kXu!JF4I%!~tR4$9tXwVD~qO!3*qPcx1CmHJiI;0=w_HX)ky;u5k zNq=AZNMFDH9_iQrX{6u(PuKrX&l^mYFSZ<=fQ@?&O|DZP0wd+PBVL4dFl`W`u5HYQ zCOzvHSNe+3+)p!n4HXeHj=ef!G=f_6630&8 zA3+OOsksimR3v|X>%NaxL&!fV8w*BA$thUhOQjwdxrIGk6%BWUV+zz>0Ii^g<;?)u5n4q83i@P+_)!rJ)$0a z_)$8O2BFSRS1t5RVm#-W#a{uq@1ve;$dB{AJvm-3-sedWqLZO z>$b`->}o@{kwKKGlwM>kXDDd?2=^~#V(ohV75Be)KmKly=LHL=BfbtDNKZCH$Tp=E zNiptG+qAzJGK`bOwnp~B>rl5@K9dP}IC5{~m)k77;#;>Tsbc~r#V>t6J39h?*YY`f z*zh{Y?Z>#fZveEiR-SLZ+YYixjnMBB)S_HaNGs0E`cQ_|6!Daqd~_ z4n^#3a6Vw$_v;D$;G#0satP~7Czi<^Wr&`E_6^2K2hwN3BJEn)-MLBdIh_9K&7%pd zU$^kIUh5B^gg!6QpP%Eao4F0J{v^_tOQq$GL3DPhS(g(TJdS8+yM_?Kh{0J?)2ahZ z%NEu*QoF$P*h6zo*8w1zhQH;Crhx2=&>0CvypB6%h7I!fK>HK>fwDo2dx|@;U#hbo zhP01!OgtW>><3|Hx4%{qU66gOhDJC=2h93Cm>E+ zPjYwDB+RZ8YCR;3f2WkX*Flm0J`bRc+KoJgvry!xSJsd)0~7T#eKfN_>+}tuyOia; zS&;EHPcptT1BH#R>$V*IbN#=K#~J6N3{J0Sptw3zvRrHi)MvJtY}4A)m!xLFvY*)w;P`#5djh`^&cCVC4j6Sx%*3I1)^-#)|_?fKt2Vw@%cfRbn&ae9{^YUdXa@x$`68K~UnH_alv#e_Br-E$xPygUltbeImy=B8lDv}!S2h-!@{teIxhoC+jOw#S`|nY@Sru;+4(I5|4EtLIs^K z%nZZv2+A{?uQL!j(Ce4=a0cQw3D|vpk9CH(z(}av2>iGaETo2?YbJLeli+#+uy9^4 zuS>@K)%IuJRabG}U6gt{?pG`{(PlY^qO*Z5;>@eDgBYJr$V^ia>V~+rr^Sc0he1zs zm9=I4zw*!Cd$VR!5JBa23?|?__ngG>O=WoBn{&fT!eI#B-KF)K8&ToVEZe;zg^fPhCxu-bgk$-2{ujz?0&06f?1lY)L_RD@U5Djn8)>$ z+hX%O&Ua&6OsOl+v9JUB-^4dZp6>(0unndr zYbfxx=ZNZtSHqCoz?-}Lf(qGJUNr8;JhW`7*g4T{190D6J0~Hl9}M$Nf7zU%z#f#U zwK@{(YtLIOFJnF8hlVdN&ihkA`abv1rtRavshM*v?ZhNpRn87RF*^yGxtrxj%cmjz zNr0SI{4BH;RBwGIPs4qIaO)gAPncJ~(ofhk0oAHhmOiW>ZR@>NBQp=@>Ae)KeCKIM ze5b;-Eqw;OF#Z^M+d=x8(F3p|7+;g``Z=Qd7Bg)>J_n0oq~M6<*_6% z75y+sC@aVP`?uk0xPL8r`&p4~(*Pox$gVQN`hE$=qn2GxP*LjL+z9oXBk1Xur5@p1 zBPe6~{ocOQRFwPZGf(#BA*3d%DbVROfWo4L7@v)IV4lFNHGd4&pDedpaeK^xt0a9!0?Ub(PBM-{{vKJF= zsDG#RK=WJ7kI~d`44fZCgxl&mB5Wk2_;%B2Kb1ZdtLSLB{#HBcUr1>miY!GIBCKb>bA2AAb`6)>UD6Hf%{p%# zP9($*Lh6v`T6abwXj~$k^Lo+&xlRmgqG&`o^hPq}0|OcN?*?dI?N~2RJV*Gf-Y{IS z1IB{AWAG&KCPQ8DpWh>a94@Yi>;LIl0nVBCNl=wDD5Krl1yQqEhn}H!sA4oMd%TSC zPUckYFv~s=lnk&;HpV_4!FN_n{BZuH{_NlfMG}xwvK0(j7B()%Yn zp!>I+F8a|2N?S%NSFrAaAg{zun#Tm(Nbp-EKF2;74P0gtTzG%O^l>@+Eyj;|m9aF6 zPD1|A_i-1sGOxO}L5j0+dCr?gFkT8_<=jL7k|uxkE1rIE{r)Mi zR)GqS^~Y$ez9ZnePTfX@lM0-HT^*;EhalE{&LE{}01S66C5J~60huhd%;pfGAjBl0 z@8@*AUZ(d(g51R%3D#@;;k{P_7wOCtX zeG2;L{-2J2$Nhi%KL7N(>59+nud%O9gll5D+`3C*{`6DVDl4X8(V%$u6JYn+-jPnsw$ls3jFnd37N%Lc#+R`ee9S(;ve#VJ? zJqPB?$s}LS;K1{Qxnd@`e8#%}1EdczzU_!1Y@$!@dNxwqza=SrO9O=3+8W+{PXfxX zhX)=OO#<<7XDn5A3e?PF(!yi@JoohP%c69n>&0H2$I0CFd}}fS3DM=!>K((7`rTy9 z@9YtbTg+Wkk9|H?q!}mXo(;f)`E1*yXCW9=>}r`0sYEnA4bxksP9z+uDW}ze|E{!- zR5L0GX)r%$P*lcwz=ki^8-sh1jD+Cbi&xuFn&an^+Zt&ovQt)-#i0>wk#l^|W&RYf=+lzW-b;a8;uhPL??WIy(fXF*Jr!CQb9yI3 zsBqqYW7n7k1tJ^<=Q41A-P@RTkCOKwoK*~H=D>UvyK{lkV>;u|acW({B22-lb>aHf z(c{2Gh~$iXJpw+Qsv9TnjsV+48u&MiLUQiJrk8^hsC)Kx{L~F1a8`7uY#(Zd$O96e zy@KkYUSd_!nW|P`v9Tht4|GHJAy!T?ClcsKzWIDKauE8rc7OJlAVU$4hnLH>Avjzq z>EdyA7%cm^o~de4z^Si@`4-08Rj=mq^msiCM%f~14xgyN{E?g!VowFKdj3JhE2E&g zYY)dc%RhKK`tu)k)?jdeWd^jEJFaVNp8)wrr^M~JF8K73)bHb!lW^OO?YEJ|G{jOD zx*P{5fiu9O)`dC>kK<0ax*ouI07>iJu5noBJI(OknWNbM$FsL2tr+(Q>&Y_Bcpazz zzth)u`sdO|`gQt9KTjX&=jr1=ou^;_r}3X&_xJgY5Xv96ANx~;JEb38z&wc|k+{!G zwNPPlQFVjMOYpg5?-Z!$dOPswQ>9Y-m=9)4!$d66KGVVb zHjnU;>znrvLL9&2mEXnKAE@I&-r1!AxG4L0Q-Kl%nCsK#FSAjhKz`mjJy}U|$|-1Y&oc`t-zQ3IyDf zk1}ObA#THlU41*TKO0NtttSfu5WmM&AY}~W#r=_9*7ykMlb7EW-lW2MCh2b#4Fh1l zuQkF5?+1=gQXgF)41!nn-Q`uMMxd4<|Hr$EF<7XVu+ym;0=;{Ng;lsuAQN7FO_)Xo z1!bFk**PRICRq*gxMH8TtmosY<$X}`@tEEvO$zWm*|3o2G6r={n-Bk7`cwA^-ap|c zhxg-LMlmlwss8i28{8|(NZLOQA%_cYBwoTwC}YPTWMF@@Qrf+ELFWlz_5J)&_ueG1 zn}zU=7va1|%E`uLY8*DHuPLVGiXy?+&9dtU z5CwF%=4H&aVmzdCf<;7o4J3rU*7=+hkIZkd?|A6lh!*q@P)-G59*fAmq>A-;opgCx zd|}lPnoGTt;-fi?<~Ms-+S*|rUDO1z;NcKzO0!x%=toAycI?v(LVZXpb5bg}z8(1) zEa;!ptwIJ5^XzU^VH_A6fAKkicc?;hX=Qpc1}VlrcWVw0K>0#6qC&p{5QpV=u}fYC zS`MGFdCN0!>-V>oW{d#Snh|XQjY#lW`FZjjF#^xOk))6P=HMf2eXX`M84+)2zm?1? zz&Pbog%x)zkead+m!y0R8ojf>@~J`tk}BFZR$EL&=S+^cmz>17KdtfN*nuJBLRe*{ zoPzrrS1&xIV7>o`UP&hdtH?;pJ@3wwnm(klI!fa~GOmXdt^4tNS3P1#zaGZwnT_nF zw%Z6gq=3Et)8vt<0-$Ys-1FvrA{uRLQ65Rby4EL%m)#5qNX;krZP;`NDxO!o^f9>| z350bi?fB4$gymO7hUe!aH))$|o0b#6Aj{f2jHM1(q~@;%Zfu9*=%}3EsTDvB4Y%bW zKY|4B5d&NE3?zK|@cm6Kn1?HDV!<94hZyX1BY8+UkR;(j&NFL+S9d;KFRSPWqlG3n zS=Rt{D*cLk1R$Jr zAiU4U_{yLq7EwQdNV$$r*HAGCv-oBj590O0nc_pW>H@#bY}X%i5h2zruvqIvJy3p^ z-u`wUz*tM;V8r$^V4G1|88U4GJFcUw-^aSZ;_WY~FPnNH^59Tq(3c)~K}c+RG}w>( z8eaRs0rR!af8soeaf|9S@q-1ATVa9kk*q@)0S<6?X`(CL(B^4!Mf_JEm~&ODuEq1C z6Z3hpg2*U181fH?)!;nR=}67m!WnQ{nK9>Zn1R^?QP+uEXkd5Gm`d)Sg0{rAMrUgp zB$me8d*ZrCVnP$Q^T9Eg@9FLeV;X_67ST?%yHr?UAZ}N|^;dOnSw4?<*hfm9Z8F7n z0%W-}<+?K_;d@Khcaw0O4{Q?dXJYshpMS?O{g|E~(8vE2r~m2t|LOaT{&p2>8zzC6 zY>D>w(xlaOXOY3#16G;=8b~8WD7_o~XWxajNHD+Xqkd zp~r>C1Gid}F;DZO4`(O^30u@_GX+pk#$k8=D~HIKzn!JPXWfUg9wh0{8?~d_gBp9@ ze<(%vWEK@C!7^}JyC+Kf2@ff&nM~*30d{RiLl}-IS&d5hJU6gew@Ji&|QSmE}W;H zy)N-}z8!dSr_>)zd!yXVTBR4++Ys@%>@zE$ev~gJw62Z`>wU*HCNBIMMuqd|__Q=J zZlPAXAhHGHneE<}3AEw9h579fztbIv?S9Cu6E_Qy)N$+5{iEgJ;ZP@cb1M;6S2qoA zxHAAOG52;F8{+)_cKDrRa&h33=>KwWO%tkhkWE~=+K(pg-->;lhUb^y)t+2k7$+(B z1(fG7zi2^VbM6MLKftl^5`4fqD*MjuBp$_nLvn(i&(@})td+$H-PQ&$YDw%o6@c~S z3JtvubY+8Ov(N5RR+)$)WIU0du?!Kp1f#y(EkMFzlhMo`ZX+#xmcH9l`t)3 zm5TXR{PSTOJ|JRk6m!q_k7%LNMKfdi6`1p>*2>+(`+@Tp&0Uy?5T_eDa6c6H8x>mQ z2CiY=ou#i&qeBNMnw~nY$X*Lt1Lj4e7{|;R#F#Q4UI{D?4rXgM;O9g1nSO^>I|Nf3K8h;a?iX-8SLasM>2 zVb=58*9j<`6F9NnbprZ!^{n_`8iPrHW8o)Bcz>}_p?f!x46*Wq{f)C^h<4d^$WI;P zkiw<Np+ly0WZ;ynhT#Ti3Jb`He%xhS+ykVn|8)JI&eLBZ;c#YUfqriI8?_bC&A!wWG^Wb>uO3> z6sqnT250MH+deAJf8Wl=h#6xX2nWS*WlnHL? zKLgm)YfeLKR&us{-xGSm$doS46TK zRkVD19ZIc6TF&gNV}EC0orx($Z(YpqZJ>TR{uY1U_KU*4^&=p*>-F~A8z@j~7}BOz zQ3?6-4xg@hRU@D2AgYK^KT;_?d1UviVPto6SAf~!QIv8*<))|IDB|0ttn+Ip70t=p zyPms>b-u*pKdJLzUy+NeiK)S+sF%(2!w=mKupHgqY4E)V(jLWHZIP}3hN9rxMN+XS z=(=Ogy1EjS5XRkQ(p!Sm6yi4u-j7E<8^d4a>;v2vQPwdMBZ8G#ptiXZ#v^|lb03N) zfSS)a!;@|$U?A(fN`deegaa&2#m8lVRDy>5QO`PvTWksa*Sb8>aL?q`5fVJ6uWXGQCD1Zdd%hUn&?!Js;;dZh+(0 zwc#7_^B8CerJByZ#QqV}mG`P82O)4}ljP6ceXwZ{%Z=c?K`5*oF}!sT*HgauNWWLY zeEDZs)~aa?L}xaoH0e!30u0@#Q^vkKwc($HaNTzG`4@eP*0@fvc8lyjTwl1k8iOpnL^cBHTO|Ly(%6#rr&Y<+4=eem?c^U6rf`w6s}Yi#!?g7F5`(cQv0f4lXe zF(2!4#YP+q6uwo8h->ttRes?0)1uAr^wDmVRhe_*#f|}FD%);N^22rWH#e21-;z+j z-?3Br&e+%Cd9rWGOgB>Z>GFR~#Cjp88Y))5DMgktazU5NKS4Kh%>di>UU>CRL4VPB z4Cvp7{`VSY4t1CGkAWKdB=3$r!yvs^CdAd10^8rWZe%@AfeePh;}pseNY#EfDpDQ= zGtOfLTUv14u=RVirOhOG&JMlQIEDMh=j+c+?Zi6uCq4_9lvZJ0d?RnwSt3dcsFj~8 zB_XQ#t{TBPtW)@$Y4JYu5SrgTJI|;@MxsvpNzv2Y$W6ceH_fyW8L%=WPyP%#$e2;BhXXYshFu~mi+&k! zVl8!j3V~8uiSjrRnmj`mER0oeY=p)9IL^u)%9q> zr9gYFt~d5$>b_sblg4(CP;DIH81i>@k>nn?a zlX=*)FSritj5Fr0>A|{OITasCF%96C$Ri+pum!eegx_v$B7$S!7O!hMxSz>EVfcD| z6a)sHO_Y7c;N7)5VO9ELkoQoen89}pnoh55ev~u@o?=XdFHc5+ur14(1J`${%4IFQ z+sI%#`Kh&KZ~%PYaXanLB?GPblG;1xVQ{QoEXa2rfk4B#g#8HLd?kEk}BC!i+&)>qt|Rd`=-dm($RHdKr# z>JKDVf$7jpsWT(S9c_@^7}wYj2bsdon&N(BXG!|TWpN7jk)AhHJBsymhXwpB(y))U zerkNw#UZFGh?shEe;6_j{@%`T1pC5kF0!s4!Sl$>LEh)*CV}6_nA|>r=Z8D0O8!_c zi)Y>9i)#*!C8K57yv73x6#HywhWH3)jWZiDYUi^K8y+cQN zh{(3aUU;UY8U=;OPWUE&0L$8!W%q`=;On~v>jmsj9VHu9SAzG4=@MB&0?TBeRBJE^ zKCJ?^y$vg23)mmz*Cc6;V=r=3c`CmrYY3H&&6V6dF@lsU{KaLiVcew35lik>!-!J0 z|D{->9Y3PBvFY8vo7P@rS;PNlCtBw!yEDb$fC!@{JJmuMXYhSwar zraCf=P2-oIGL~WpndW>EnMJxBqnie>zXU zP9LA1JgpTPF$y1leo~I(rGk9+6}H;rxG!AnvQ-QF(wlmZ3OsfF1o^up@6N_!-S2H7 zO;19bk@>9tEXPk=2YC9ZibJUn8OWO@woeYArdQn|@kWDaG+<-t+_ip`b|uhLdS^E> z)w%e&B_~3eHA(7tf`Tb zqp;&^Pu|&`RB)9S`0cj^`!4X4lwE#SgWAqPJE1d$C_(bm+5nyolz)E54<{|`!?!=v z%!dW{Gpc*ubzGpJ{NHO%@n9drEEm5GAJV(Azwqxu^rI2co?Z;G33-L=HJfV&OB-S1 za!lUgDl+WNly5Y;jCB;0b{|aj#C4)!jq6=+u|I~6RT@h!UYE0aTBrT-eCey_bI1_u zBHf7K2(zT3Z4I1*@!zl>nA($rf;_{B!TZ_1>YoFMLwv8Rlv5ir)sH{7%Jd$dk8&PU zIPiMSceP}tco6Dao%MAtU_6uB$boBY84!8wR4;GE8!?qm>-ASZFy|1> zh-}4q$WaBs!BQgBrb-Q_&62=g_jaQgGuA2CW9r0{Q4LL9X^(R8KFqRdd#B2^a$s6# zaz?4C70!C^d{S254@~SaN-w{Wf%B`lME#Er2ud#2K7;xC3lds8&+M&**pG#g-UO^i zINWh=`AH@?1tmQ)m2E&v@8s>i<#i)liRruhab82X_}n651NMQ>XAXP)qZKXK7}Vz# zyhm=7p(9SbnLxX0F0sOdeVoNCqSo-^eE%t^TUSYXQ!e54u)~y+dMw5 z`wjClj`*+E}Ed=2|l{>-`D-WD|qujY^Y4RB4s0g8j_051*d zkCd|)kIjH#k#r&F{|k5UcbwDX@xLAazrLRSIqnDa9eY1Hcb;DrI1xmD6S0StHg!X$sZ6wA$_IbY!i}Nl!_I4iTzy4=4sN)m=CZxo^zjw z_W{H;q*NP>qnTZBoXYJ-6!xF0X7&HtKXbOUrVtQC&Ngp-P8EuLxk>l2s6Lv@B57zRRLoY|j1HAdbbIfa?!>w)!wZMcgw+ngELZ=!t`k(KMgW(tzu5zcdCF9lK73R*2-f4YAC5v$elU5H0Cc=fw` zRY)shTgmyT<=q}@9#>D97Zj<2UXIOFis*{(sOqw&J%3? zh3?igAmQC?g=!P2V7>lR;NEduU)O_EZ(ossFNvmT|Dpqm_g|}m++UpTmpEYau&$S0G z;=XGwSy?O+@AtPA@p&9+0MlHtw=(C7A*kST^YGm&WS&I6{+_oBEhwzwmO{PAe3)~a z?H$bjC>MO0?cRu_+_l@1`~1;D&6n<{!`Wb(ec5x%P&Oo(-Idt4rxJ+K>vkH3*Ff4a zj3BDd#yYURCFJ;VV@~x9uIumrkT{xyaZI@q@{v4CQ*cEck(DuCOxq-??&9W2Pc>izj z?`B;*Pu-3A)XwGeD{Yuxd)RfW!4wG=1eVJOj0unyw~IoMD1iK|r87?yGO>S)u#81Z z6B53+uaY$x>u6ZJjkc`AdYG|$!d@*7AU9Lry|qb$$nTZ4!B#GO{Lw?UUabd7`5SA` zZfHkt_urLIO@0E9I=9zzcr?wzs@ws9){x z$ka=MYhC;w)>V?hb$+kzzkJ_yORX-NhhhKa1A&{|jIlrci`S=*nGeCo((_rjvCfMN zW2UM(c^DG%IHFWi2EZ~%V`Q=&`%-tLyqgaJhKNO@K@#W_GN~Pe%N_qb%Yyfh-qZj)}M!@4y z0;~ViUKmx)yH)$M4i+jo&tDzIzO2%g+pi{Ng4DHntNm5wkj7pu^5iM@q1~myd*lTL zY>LW#`*6Q}kAW}mEzGNM=H=$tYt{`*VtfH7@>?OS_nT^@FP;xF`iI)FP7ulO#%A}( zeu$KmByG{}2hJbsqS&#nZtb}*%&dOgmrj- z#9RzX0tOpO#a4k*pa>7dy{p8!-HH#IOQ-XJ?ZRyqIWepgEv(GFa+_uIgZ z=MW#ws}m-eZWbsJh|nY*aP!sJAbi}fT>S*&qONXA3us@5^|)-wp5{Iyp!ag3Te)EZ z0-fUy?cFy8T@Nz&m1HI%v&vHcE$;7&$crEgnQ`F1tW&Z-U=-#|w2TgWW1W!s+tWKP zj=)tu&4V>!6R>|aiJx18hUZ=LiUOx;m~~Ll8Z4LrZSvXb=Q^{1nA_#VvCe1ud5eY< zeKSCdCPT{dzt;!;DenJopU*-K?PCU*&l&03AAsu|+M8pl&V3q$0D+h4ydy*yrR=J- z8^tKKkP|ih^{WT?|Gn6%z%r&C6{i^AFxSMoHb-gCtzu$OSXHFrugOxdC48M$p|t>` z+e688Bb>K=Gb-OHhIQR^(`CJrD9~=_@{98)1zzs_a{D}q0_xi|_PI0RIL|)o?2dV( zQLGZoikRoUE!ZjGD8}0v6Ghh=nNNa7U1(I-Rs5S5R{m-ossV;(a{5495wd)^ml)KC zeGC1LOn$>$`a4<*5ZA&*s=PsU=rAg$=3RM7{ z>9+a=DUx4I>@eRZsMTc48_XlF^W@&S_HYlxuC3|4?1Afmy%%{^+;c(EB&KtYAr%;Q zxu@@QD}a>mLNx^sn;^LHR<6qKe$dk?9}~syU_1~yK6u(rqb~O z)?Zml`*P-EcO7u{wWv?t#(qTS*O0G#E=OVZl(IZL&r+r(-Cx@Ep#F;t@7H2~lG+QL zr~7RiQIpW7Lp&0%vF?A$>e<4p;4>v3&c|qqg48y=P)p2#gtok2Jh*?!Q!E+%tiBk0 zboSL=qGdzv;0>bCo(eF&SBci$Ylqbvn&cFfdf}>vsgE{)Kir?wub|$h0NcmKW0zlz z0tmbp_q;a&@lH)ww>06ty@f>M&+c*X+8lT*JY*bDk*wMwoL@$4>a0n`I3LSji!WDd z$HC&&2JHiQU;o1L^D)!Q_;+*sEHkz|3jFh)?zxuZAlaSc8WJ!G%-%tDnwW3w$C|gXni@tO4B<$pRZmU~my zV%?Usbti^X1u!2U1v0lX^n)$&IGN>GDX?X%U1SWdKtTir&as>>L{!(FJ47L&kj;mF zt}z)x$|hHGS_?5>`MAzJw*mzj3s-RS8ucN0^8#aH3daAM?(v_nxJV~ z)t9f244G@9??*?C!7}3=pOqbBU^)BR#7Y|b&F{KYtMB;%c!X%q+R9Bxv0be0t#BVo zN@lu#lSD=?Pl^WLyu&_t<^tg*%OoT``zUKss2k1kn~RvI*WiBSuN zO6NAzm*Xr zPGLOe{Ifhe8f+2N>`NY@L9j}m?H(S?C(%rD5dAR_0X0 zF~1;i)}CoC#!ai=@X9O0_{%4AUq^_c6QKBWo3qo$Daf|yxx+xf{Gx~CYG2bCP~g#X zB^cp+pjt^j5c3Ho+h;g}I%gq&aL)PBI$AvP9#+7c-SIB z^VsbN*k3Z?P=dFA9Raa1I$Sas#`}6sZ1mkbfY>~wx`#EeJ}$4SAf#BTqteL{2sJeIohU*h%HvCKBw{7Wy$)?0R6!u9PiZ3m4LMde5>iv9FK^Dg8$ zYxJ=34GFP`Iy!dab^obyNU`=C>;rb2g__8ZeN5&z>Ll*OI{F%hCbU$nr<*Z$?;`ab zB7D5hwMVo8lA5ZUxy;Dm*}r6ZnKT7u*L91{dM075ni1~|#;>Klak?9;oedgI7Ijw$ zb;y>bz)$&D4+>iAxJz>c`=M}a;%zL(8FY)V6zB~i;cXeO^Iv2CF2$l8pUrsvuRh$2 z;`3q8{_VD8jB}cHeth5OIOf}sODi8-z&O~$;#RbUCNN<~m%@I>q}bY+(?hua z#`s}IJG}!u19rIN7F2?8YN+;XWeF;dF`C{`OF&^S_B(mEb|KRYKek!+4wS`^xjZOW zjZ9w=n4U{S01-PdM_w%ih8OG#A4nyj_bplKK3fCC>YOhopK1Yu)cz- zSO7dzAAc1|)BvHYo2B^`-tXV2v(zf3K$p)FgEQ`LWS+RvSE)J%1RL(Fuke09p-G_6 z?-SPVB%iI+a=|<$wW0iMCd>!i;NoPiFbSqQvGu`)8 zI^QY4hy9#<8SST=cZ_0vrAARH%L%ZZBwb3!xabH4iUm(F4W1nO=|zc}1~1e6`6sw8 z{dZi_kLe>lzUiM!KmT|9($CY!zpww(@qfDC;Kc_$;R-4UZ2Yv=chfkC@;1vjB~yXn zA=@u8Xa_Z8u}HN-?297Vxp4h44Q2Hvzv527z7i96mun;j(c)RY<-t@k zGAgOwc+xEP-i&JoibT$?p7W8@|$x4SU=g)i`TWIyBoEfJt`9^*or7B&rT~yB%}T(Ekah8 zn_$z~HWwp*GGx6nwz#}^3^uZFeO{nB1WvymWq%>S$v&j9a;+}c`Nz4zy}Kf`ss-xFlZ zK~4AOM|+bdXd67eqT*F6Y%!VTb3KX;yixm}Mx=DY(s)_ZNStpAQ!;a@yoKN6Nvg5m zBD>+`sK9mO`U z{ND2%_|B7g@EYp>#)$qn*f6ydmhmjhm;BI)KELVHXF7L7zsIGsu4QBIBmG|fQ$75j zZbxtbe_LLh9=bpY`^Q16%=l+0p&q(vyh!`9VM1F!g!7TOe2Sv3k=C)AnueE94BLc2uZc;&mhQytnAY@#h~3a$=mgQ zDHzT=eI*ZG3^z)PJ!?s?`t#3UcQqq@Wq`WugZNaV>vUozndaeC#HUb2)x zURE8&Y2RRa#H)!?->zIRO|6;Y&aBWA#{7nz5oM8Umef-&`{cSAW$5=;Py#2`zoBZ3 zK1jcMf%9Zfn~R2VT&y|E!qE!9%O9N*Wkvw~9GS0Xnt;RAKbyj0cK`w%o1mDTEu zltPpO?B?NLWby!w{wDC(z)4ZA!-U&She?28vJwm`(u ziSGjMFu`XuXE9eF^*eU8?hn8`2Q^nb5C29M|%r5G)hfg)Mcn+zxv~EJ@9Jeb zpTl;uCm8?h`_m^!q!&bgK9K8{9oz5Ua^iQ#|DUesSPMT@$mGC!B~$8!3J1D5$6}24 zw!rgTr;EP?nt;)}JGNi24i;CNX(p%RbLeoAir@)>n!`aCdmXZ<820mtyFD?kx7s^e zWKtt#c#`je#pz}$zpQh1Z5oRT%ol6&>%_bOsr!wo>ZliR_f4XwGZW+0^&GCetfyiQ z9pd`dqhEcJUf}xrV)WPHdh*w&Q(VjaA2oH|snFmog}0qaV0HAmQ0*O@hhSLEq}q}p zM(w5-J2W1on4$-CroRM3eR1KKr4b+&y4U^N+3qayK@Tx-A%9ka^<+Ws`dGM&sr(N;c=$s}B82ig%@o zwDYW}^`tEXZ70{UM~;$(loe$Vj85k@qno?kXCjsG=x?~(2mJzN0S^ezn@M1MM7 z&guG@HQu>%gPN4NT}iK4W;9Of4Lo!9MOTTLCNGw^w`_Er;3t>O(W7F`jR@ zPru{}69#uZocPcl^LLjtZ%mlq4wF|bl?^s)g_Hb;@9e|)76$j}U*+r;=%3>Sf&*;W zw0cfl0OnPvb!tX;g?B*5lB)HM^E#l_=<6Uio&)lht5mZ*QJ+t1o37qr)R8$}eyj2# z`o=$1^yP^L7>a&vvHN@;$f_#b<;S>en}tCtFYeXBRyAo`?Q_*IF-vmAWsMr}%2X?y zyT1vdmShPep>FHoXKqDfSu?ba@Q5xLXo3dOZx^mwp$>Uwa!8pN2kdTV_>^AhguJ=x zr=C8*_Px`L*Gr6gpnu|Q-w6-K>es)mALHxcY8zk`N;Od#-A&ZJ>+e@^u>EgjB7S&+FZzIFPx{DNhy7jZ(#fin21+sg zN3f$@HMJ&5XH?;MF?G#XI@#0oJ$3E-{v{v7!@5KM#5=-^0zq^3k>H*#2(4XLEWP0~C+kYw$geakH5vY~kV* zSmVezRp<~5QvEpwD?f*U=a2nEyB+{Axl3czY)ZjwUFo^B=?pli?_ia#*$#(2OgDXM z>wt>`IdZa9YzRB^>8~p-ZSbgO)7&|gED+x0@MuUJ^ZAAT)SM9C2quZv>qIP>pnLoD zaOidxWE_f&*@3z{3*(mjGQs}y!(nNGpTVt==Ceh0A%6#0TpDz|@V)21`^9uWl`f^+ z*lIqG-N5wR{zDey>zsKane&7>5UoA@AOPDbvhPa;D{o<5&-=BS;TG7Bw`|*-FI5P) zho&UXuPT5XlyJ(pjd{mC8k3eUsRb4@T`6^P1L#YXl)4C^9+CV{`GsaUf7iAmT={V; zG|kyBG=%eb0zugxj5Tdg!%TlHZ`=V4htZqKpSe&J{*iq_bnJf9*J<}4Fz0E19~@3P zqqR$=7iuGN0?cdveLbb0BV7;2FK;x@xMt8>UoquL;b*$O4I`kOi$W# zwwCf&Sj;jNY^20=_GE1hWl*PN^lKvLF{la2`IWa0*HgSBeMi>pK>xZn=NGe83aN_$ z7yUgPUQqrSHl04%nV`a%$)AMrKfE)$=Goq;f}$16g{ph1z$`tsYUk%_jN29V6FE}{ zXV?_OR-6fLvrJFk%|*Qn-^Qyh67BF(ZFt$smUhVAApG+W1Jt+9);VUorv<*g5bOMK zg$2z^#f24Pn?dC;{UXBwN?-+d3AX zbkuMDGdZ7LI5lYVK>1uJ*s`O3RGdeheWy$p9!2!2h?5F`x0?xyVxDy(&D9XxvSVax zOc~Uls<3nR#d5-h^1DgZa9&t)TN3K~yfb_m5{l2~^wH(n!81|!TFln?63&NySs3tf z`fSwM?unAOsc(YBiUot7sN)_!HI0)~gn3cn)>gS+aQv!cudvRQ2GH=AUyXxH_r?c`- zM07g@Tu~O@T*iihrS(M~SI|dtzK|cohz%D52W}~29LDn4IT{Li4ZwfT>!re-YPc7` z7B|(chQ2$oe<~Q&LPDX2expnycz4u8%68rGQ9?9CRB5bzK-cTs#>LNdU3=j# zj7#nN$b~>-2OXDeF8T$y-F((LrteSJvn)AQ5Eb7Gze*wPef-~#M<18op1%L%OZs=Z zq<^QE#~+7YPjB}>EyEPQi`YNLe2AS+%KZ3!)h{vdz4;#X+Bh?E!orh*%L)>d)P6|~ z#NLs6n4U}3AIEqAJCBX> zr)wzxh{YjBt1yr4MvA58!yKySjBD?CK0iumUiz2w&vSvJ{nEuIt`4Tub#|W8cwhr^KgE$95Or%a}H!4$QNxUaWip^DU)nOh>Go zxnQ+OcwSaaFYKT0-=-HZE}xSN0ZX^|1?oSme;FDY~d41N^#`LdVw&?f0c^*xm71%r~HEB&Ee^&Ys+e~%t8O?3s3BVF&^&K zIR}Xp2h^F2+`{@R1LMB@C34p>JHW-L%F5FN$DJ%igsLUmA?K`(!I1>i?JW5^Ytmcn zmmU;<`O~%oq8tWJE22M)bd}%PTkUvV!+t||P(RPxT6(fHvk@XEa@|E0@jLJH<>=9* zX4Idm+a`|nS*-7HskkTV9f^17>e_a~!|iriYglYJYwK#aaxn|i_12#5L*J`V%Z!N+ zF#a)L#m2#DZaZu-od4m{dW=u}IlE^5s~*spdr)l-=BwN5ii_LxVt(4BmirBvWAje* zdsIEuNch{)ZbJHx)I5*^tEY z@)NBPyMsrs26eiZiL^eGT3QQ%_p;mHilIJmUW{~}4C;nY^52xoL0!+%@2)7x53?gg zM5`vEUTVyj77>IUBDs?K1y0aq3n2D4bPs58>BI(;a&q!lDj z9XlqD<8xxsdG6A?I>9_`;m8|4%=-^F4%?aD1wL^~Db=qrZ?u``mQ3Z}$K~kyM7I;< zcqiH|?dt^*9d$O-uMY&A3vQgM>4VrFY3+!meXxel&DJQh7w%a(6nB=6-7mTx(&f;9 z{utx!`|l+Ty^%iHyUTuui_6%4^!e%i=w-U3p96h;^!oA3^!~pq>EqBPz1`CLNe51@ zYJpImd;Xs%qK>>pPTexbN67jy`K$EzC!nr-{_&P~sg$hz)`BSG0?Kgm=V=jFD=5pS zCFc`ss;SWFmu+`EtEC!;@7Efd)l%h0V;5>?;dg$KbI6QksINDwc*&qAixMl|ZCs{% zm{JsWuY8d80Zy&*Tr+85Bdl2Oe0WP>3nVMqKDnpS0)|t$-R_C4P-r}D#@9ABT-Avj z+`J3*SghY}3~cL!6(MOWr>S6^=9@~79+OU}=NG!uiaISr8|*S>HKHH5jyZ2vO*`y7 z=!jZDZO~)xE67{If_OE`>xV4^?Dss{YbJuaopl1Ke5D^ypKet5$ea{N5j-Js(k~no zyNu&Y&WEA@*?hqooM*NivQl{w@(~`^RUH*gtpe}L=Am11m=E6X-?dMl4PtIrms-Vi zz;)H8K?~JxaAR5S`n8G!W*!#H#N=AQRp3qi?NTO46sv}ClrZo1a_76V9WAia^+k;E zFU&WY8{&Pbz8xfkix+=8*#d4A5kHHLHNkb=l@mNBVji^G1{pUECP?!bJ{s|B0=v6| zp`0lUn6Jo<3Zl?G|eIA68UBk7Iy*mo;yp3j^hWsGO3KePYwB^>8ARMBg4>3}wc zy#_6)ul0J}?dqw2;W*LvP`AI?rk@#uf12us_(=27e?UXi|TVddK054=u5% z0L!#vJ&umW;430gA&Y)xtu2WU=Xs<4*F{J7d1@?}R59C4->n^1ESzA*{(|FCd%FFn zV?QnHlK+(puNg3Uj?bvBiUB)6nT6@1u9;1Db+r&L2Rt}$6B@O;;q2(QHB+|!eZFn{ z`%W*9FU4bJR>+w3K$wd$GYRzo@@BtRy8ak-$1RN}w3c>4q?gZd^&)I1HOW})aOeY} z!Ne%>{9Z6o@V?&Y(F?1;^_H^|$NC-g@#*_bFaO)}|8&29dwhDkWye)wJ;@J`};JE3^Y1X;?6TcUpRg8q zdrET-1NE#9js(jcPepw!XV0Bwvs%DRc68wAG|acM@4Z;_2z|A#$GrNJVqtt@{@f-x z2JB+yuaeVf0F(avgKIE;=}1H8)lO?B%&9ZmJe0u#>pt_pPMWp=OV%k~Ri+iZH;cvk zKW>9P62IgtP_JgwBZ(KCPuig2fz6^u7rY*wxVNSl@0!Rq@YRj(hCg}hw-#glzQp?@ zF>mTvKiIWIpvca$56lkxan(%F_wLuMGY-C}vk$cawY9x^yzVu|f2T{9z+lR*=031{ zw)XT@wO;rGO^+>Fxiir1$%$?fz=;FRa7wh0wMHk3=c- zDP>gb8pRmpw6hBSYCp@Ms6ou?$L>_P79O24ck&Zzt6675wR8q`yVc!sYwkyk?^%}{ z%qYZuz;xzlMj@qgee>y|T^}ivfOv%k_uf-68|zh}F__}_Vlf#%k{~9gW|e1lAq0Fa zH=a7N9vb!4H3#;!f>znZcI7plu*H3?f=qB1+`JIN`jXHNea7>TsLHd!PRO9;rCTc) z@0GlsaRJ}^rORzy7ZkGhnXkoupl`_KfSM_& z&p&BGAp_@qg&mZfd@pdppyg^of_4WOyGs>|cbGXmv0A$nLn?;%SHXD8bZ zGzrJ&pEsTmy>R6B`;9-&_+|gxvR}SyP?WoiQ~H8XO=LSEzsdO`@Y0p4m95PleGKBfq|H#o8P8m zUd}=19ZCTm826~RtT3|^jAO)9!%t%TzLbvV74-ADePi3eNd8zmhW@{}p6n-iExlmb zFE3%|(F3iMyBT~}#;(_R_2!M|KI8b{wye8b?*4s$`Z#pyZpvDBSf&qF2d|a*^9~mr zrP-O&`@29|W;k}%KsV$_H`+gQ`{(1#YrUgmAlL`3KTj8Z8|i@<)Da$Z8@n$0e7`II zsh_KU5#+bu*Pxw5W;{DM_QOX2vKwm?TmI(}F|%^dC|aI{ZYp z&rd=f5j9DJmO>~zm?b)bdK{i3Mg!p*90>bkvtc2Qi%#k}DfOcRbrs+2t1MP#14uoN zl)?V1!&@r_E7a4OWYT9T_<{*q)5@BSaIu7RZn0)Nc2Z2&RJL-GM$EErfOzugLbe1zQ#4AmxaKvDEv{Yq{0 zb&uY=J6gB{E+kdN1decF{QVyI&x$@6{NLqdO@~)$JY1OaL3dXQvmF#?xr;v5Q;LA9L%hpk4icwFB>M`@O45 z&>!<`@1!mq$9rpcj#2G5*3P2eGy4BnoS?B=1=|CK2M#=!pM>*X&k8o&%0qvXl?f~3 z(H{V8YX@a-pueWGtIC!Iy%2x3$hSVS2Nd^sZk*%P1*uWjO0!S5gKyv3sud3HP}x_w ziy!Olo{wL3%TRA;7yq}7ipx3R&{P}OgYyoSZ+0yf24&qpf#gyJ#?~DZcv4-|I1bf22Ax z4_u$t2L`q;=6~7Cg)EN}f1h7m=zZlJ9*jO<^mg=l>E(nx-rYhn==--bSgZe553s74 zT3)Z@2s1{?WB6PnK=qz+wl!OZwvYeh3mEPZ`UVd+{yZ&tGYM+usyI)zwa2fJ6FGbb4L}& zWy|*m@*Ohng_~NfCuGpiWQMY1$+JtC*KOJy+g92Eff43Y_3AKQ*yUaFLY&|9zp3r@ zYddlxkNsp{WF5HOxTo}!RSh=_49nUU)Iy(l>Z7NN8K^@*#kSR>pT;kSS&mRUWK1kz ziE=RhYsR4|66GCWry%Xg_2_~#J>A*PSGpm0(emi~M>^q1YWuG?^eIwrEG$WDX2Wa; z&D_mcy2s^xnY$Y~lwDPop3?>gm7eS^Lf^2rrH$uSJnMx&W+t1Dm|H?jEUqxjx zUU2LD2h&IGdZEmGLFjqIzt`vS$D_|jbBbQ3%ipc1e<$UC-GRUR{gV4a?hAP@NWCB& zARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZ zARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZ zARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZ zARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZ zARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZ zARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZ zARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZ zARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZ zARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZ zARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZ zARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZ rARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARPGrodf?1V9rMp literal 0 HcmV?d00001 diff --git a/sample2_2.npy b/sample2_2.npy new file mode 100644 index 0000000000000000000000000000000000000000..187d061a432d6ce6548ad403cc932d13279611a8 GIT binary patch literal 131200 zcmeF(c{G&q-!OiaEfGnP6ct&r6p=`uB#~%Qku51iq@<#xlt{89N?Agw?E5kiZ;XB4 zXE2tTVT?VJCDe00_qn^j=kz__b3ga}JkRgjf?2b&Aq@9iVzz|L{+N2C&s|MLLy4h+|!;ruU-&;*c4e+jat&f2eA$D7MuMSu<(-M0>X&!YnS;od2cEB$cHOqd+VPXRNT6+^uO z-4Hf99wDXQ18Jk1o_Ia&2aCQ<90e`?aDRulgsVLjz6NEzR5(M2o)1~Q)H{Qa|KRYe z_?1lX;dm|deETRgyl;N-d1(}itMlG}Od0|9r-55K@`qt3V`GOM_ByG@By8Le!GyTn z!!D2JM`2stz2m>iMxfZ{Zq@f>8YpeEym}>!0s$+(zmi|*0#@pUm#G20Fm>1dWr#8r zx{jF&W*wu!Rh`w!YwBolvb%IH*M1N*X;Jnu<+7>cb{+&Y>$ z1m2;`O7Yj2kf7Mewl#SKCN^KQaq5JztFH;y0^Jrzc^xhr_TS{F$T-y%A{kDe7 zv#p>y`F6>fvje*7f7P94c7w$o z7<)$Gj>4l0Nu)tIB{#XGPoRMw-ADIeB^6Zah{6pYslZ!QzIac84&S$O-M2f%0LG$U z@&%qz_&bjN>@WZO@%U%|@9n>e!@uwM_x|6%H}gaID)%88oZIFELKflce)(a? zL8oZ)ZTWua;PZ-2F6{$*;_I2a2YbPi$2^VssT&@~ypFaB>;&V;+;1YYZD2rm;<6E| z18J|-j%=^;AudL2k3#%IAV}F*)+ME&vCCYS4%g)(!pp6(HqF_{HhJy%g)?!8D!W&) zejouf0`%(=m}L-cy))MQMIE@O?-p)3)(nPD4#kIf$pGKStfcv95ciaNcdyU@U>v^1YxCj&qyz$&{(c%5pIRt=Yu5+cocotwJnx0F zmq#P>H&ekmJhh-W;NYXcUKP5Kj@4Iut{(`~WY8u0qH zb(=1G69_a+o7x$*LyEPY^MhOpkbN0vQ85jct+F^;#ps~3DT-&1(ho}sF+O_IeV{*A zFCG)q16PdmyFbL~ho_+F}0jn7C)gr|MpsFuj*c37d zo7o*_FA5DoLdLyQYM}#A(Hu9e8AXGhtqs?`FVjFS-NEm)Hx0C@ZQO1SG$^@ul>c3D zKj=$!1Y{oVh3E%eYVWr6gF@p{sP6|F*!zJ1H^$+9zJ`9)^Kqc|C#GH7&4g+DpmW?_gTQ&9cT?M41_)T3 z7UCUdfc+DgniCrWYl~QKi6;zL^Rg?1!*u{wY3A$h8lc0Y@^3|*Od5E&9Js~pLxWnr z%SDe}=uq`A!rgl%1FTd({VLC8!ahf(NS3R^;C%2T|3~f-7&u{+aDSW$7U!;BlP?+s z#=`H>ZucL@$%=tiQB2W=K@I#D!Tv}Y4%2eO*#dhNTZ z1&K39xht2e5w$$Q-)#~Q^+L@0FoSo9<>cFnp@-?9A$)44U%MJs$D}14eMg4K71>+K zUk1RPWj9~L>rq&j-Q>~MH4JQpyadaBCfNHg=cZ0GL1b-zR`plx_<56jto;XeJS+Y< zUto*HH{Vs0ZdD5C$q(_@P@AD@4cCzGcroZM`aR{@9tT8=qblpKc|od$iJy;n98^8f z(>S`h7_@J9T}fZl3=RF^lX_<b(1c7AgOG8o6{95jj}!@9QX#<@{Fu=%X8xaV6sI6Dgc6m%X2nUc!S=MRs8FY~Uy^(q^odyb5R!QFhEG|_p1TcCBfydZ!X}Dkrgw!&R za9a$6CtvZ+mBZM&=WI}hhxpXKsVYewfZf#E*LvcTks@<4WE{A1(gDQ{6}}Ou+l+u z`@Qch_GYl>2Lca`CM`4~;u7r&?wZLdU%-* zH%E_XaA4~vFD82$W6K~s4DsJ4*fIz^_fglBObo!M*{56jO$LE2P)(mm$Gm8wFy$>q zz~6YzOS)qO$eZPfn$e>WS3EfJjb{uF32Yd;SvCs3tRHTD+&>Dp%o;zvR%U`}RIY;F zD+aU`I)y?26AB8ATbakl|7E=YSL69#`SADcfA;@u|NNb4>DL-Xwb<+3Z8EGW)DJpa z=BlJ+`eAsqkMriEK8Q98W9+!v2TxA5Txccs!TPsganuMU z2n5T?30EVwR^2emZ|#V;C62l!ss}L&rFd2=Q_-qt*lB;1h6?E4GEB?ph;Twr!@Zi0 zJj|YLu#lo7{sG(FpOt7R`e1f@r&&L8zC=B)zDz;7jp5Sg&ykVs>cG9n_}h>`#`v`E zU@b}=f5dMYR*bl!wS6LL-Xh&!?PcpFa>37EZ-G`;1(bT@G^O{=pf_s3|9-Izx;9_5 zJavr>Ytj4teA~LgusFK4Otce7K66usPg}9L`&}p^)(BiLOjwVvt%bN>`H~sIA3(V1 z0DG2sIpV7GsVhopMYazOqzH0k%u4?-JZwq#lAHo&o}LYTR1w<9fzut4IS-BT&#SUOTPtW{rDD0 zRj5Qn^?|)kqOmA6K(z+d62WcyGkse@l|#u8r#RpLQokHL-8Uq%U`bPJ^+n=8r}04}z^V zVrB26!ASA5w^l|pxb#&wDT+jgfd^Axb{o>+S=LkX%8S^~4SwE;+)e{^vyHtrSl(7~ zd40TT1PPv~TBbfcLjw7lGj2bgw}RH{Tkmf=Hp6$>J1a-j+W=Vwh9qW@VeFH>>gaeU zgzTt`89LexC0>K_>#lV}L%v-@=$#&5dQ5HJi8&W%q`9A5#(=Q5PlXb`XI*7)Az#knc2fPR1by%l^3Euz^%0$*MM+abWz4ex|HY(iw$mD5X z-G|-YF4tJy55d&s7KbG&L~91T6YZdaorv}R>h*L8)Zg7SK^%m-2srJn!vyQ=zj*e# zGr{fhR)OM^OmGn#>kvFW46R8fMq|nUdVT*V{{C-o|Bm;+|6cf+bTKqL1YJYjgkvxc za_=}qAJt5N(~E}J!qE({j__|b;==OglSwi?>)XIvO{auatPpgsk2M`uibV;%jJA>F zY7{rwIP5mnj#_Rz%dS=IK_>}sYuETuk)ssX>?L_R8diQ?yXGeyX_u3mLb3QC{+ajN z)t!bK*dB_ZwIFKj8~6PD2V}6RWoctYJqow@;&3L6jG~V(#FcUM zBR0Dgu0dC6XgCp)TJ7jacN@=OOa~2ZyLBd1>{UNn6upxW`K%L}EBBd+T&Y94a((I( z#9$y=wy15FY=*p}nv1`pNMKrL@+DZJ6{NdN+sv+hghllkt6<*-WUFm%thk$koa8np zK24$_RR^(O{lSAMi{`EzH^4yZ%=;&~To`E2;pB3Q-2+JEmFAS*s(xg(La(tPuN8@| z8J)T@lZ4FG*0-sTQ~>dXd6($#VxZoC5~3NMf>e33nhA{+hl(A_%&~<5_5B3$rOQ2NJhdAgi8tUICw)(V_L1C_Zh?NR^i=Z0d%sA0g$)wCx4<{|*gIR)s-oWKhvUb}%jKLzUM0&Yk@KTY2^uv1`uc-yD@sU74*(Ne*CKrdz|2Jcb+QsL*lAWdE2nM zgv=J!0}4u1Fhf^;#4P*ZBhxct>39$5md_`)UFrj=52Ec?9@F5Z49UH?e-Q3_5rt}w z4MV;J?+Q)j|8whrKXFaa-{#fNI|=f!=DSulVfn==N!9d>F>qW_AZFY!4w?ScuJ4f( zVECfbM@fGY+PK%}*M<*6oLNb5xiNYIYxG%=^A%lmOBXbK>l#n9m zt=dFGej7fCE8L|a6~6*s2@Z)6|l5?=>UpjuTNayaZHr;G4(ul=a@0@OHxLR$OgYLw%``6{51@w@)>=A3i^^gQXa`ACd8xwoXm+*i6Ol!5Ab?v9!rVjvbZ;`JXa1BkHtr0@ZQJ~X>_{A5L43nJW^X3>fa zLUBoz3PXY&phejcRp(6yan`K;kJ70yRcyPitN^QLr0z&sLnub_7EOCj9PB}ENv@2M zg+Zir!0$@@;0W>~(TrG?$B}@@hfwC!7@C^6TXsTX1Vulyar^#l05S3;e!P(ELKg@N zv~E6WR*Agd@q{ePXCfuPM<0T0OOg1fc5q&E2_l{o@@a~Bi&VLfZFuvo5`x5M>E2)4 zp*r?RM`{}h45m2uT;9|R%^Ityw@JOwW;A;H>{u5Vi*krB&UOJ8=lC~u&sHD?Tk4Rl zs$sF5$&A#CY%A(jUU6mNT#e4+`cBlXkL zb1jfHu#@o1qZ8;t9vzKw-Jss9PxR|*hd9ldR2Khc2o>sS5IWQfzIsgK6P8_ID)F$g zs+kJHJ9*TSvf`P9Ez7J2Xgeo>BAL4X%dvmd7yf?S z`{KGAe-8a)o${y7|Hl)wvuSSS5(DrfVCF4#TZB= zSWLwH%n-8t_Sv`d5d#HG*#&&1(vhkukLuIPUQ}qLp*fA!Idtdf8B=jd$oAXhxnqrW zkUI3RLM5{Ys{HMlPb6qi&&l#NIldUGCSt|t4_c7$DH{trWjeBtJP`8z_b}39je4GX zcmmmeI>Az&2B|Tvue9+Hhkky*<%X|8CCTw#6VS;TxF0zaCmvw)fpEY5>pcw%QjOTEOqi zd+wuHo+lD<_C+*%8whA+CMF$g0k432FI{q4fV3{Jv#X#I7Fmvxeqpx%?5JvAVF?!hO!)JCXhyRa;>p&6Eg)CCMenqWe;{FBPocJRLvxc;yY z1w!g#@*EmF!Gm|}@W@;fm=`!ouav5X@9*^4&R%W>>o+&=il6I&qXR<+hOj!IU-%3= zrDqt*8{4H@cqaa%PW&e>|E?SVxt;RaAet+D5>`~oTE$HNBcA`n>7V=k+5cbNGN@@z zBPK&|blZN`Ro+9u2r%}sxXl3nRZ+oa(==#2_=acaOgG4P>MhIvtOo(7$41SMp8%V{xbIpH!`|;r7W7K2f5{b2e_uWUdILgMdCpA8 z=-;p7vyuefmKPQpUPi(;mE8$WV@=5W)yhWIhrKASC@|n-M`HRwu)jjHLH(KU!VYi99V=lGkgsBVl*Hb%mQN5XoqBc0hL~c-$`bm=eL( zE3Iai_Xk%(_&Vj9V{2m&!HGvPnhQJMzPw$Qr&NNxPg@XN&J`^eTmbbHC$8 zJDPz|gZx@k^1&Pq7(JcwN1`A3I_|E-;#Od`YyIU!AeKv9zj!bdOjp)sAE|9Z?jhYV ztXUL9y0SHD;t7^N$tJp*yy!*5nSt{`+3kqb=A#$nPeiFVSyPlHiqT@sypQ6Y5+snm z{d|mBB2uz^#CmA45E4cG%UQb?Sln@JjOSH5ob7PoAci)B zczq?$!!ylr_Qdxa%=Bi^C>p3Y-&_f{A3Z(=MwdW1i&f`X1Arl)OE5*}12`A5zhIB^ z2BLHQ)s-b#V6}Sox_xv76mslRN|>&Pa_)0?wm}&;y*!4Y63@{TP#dgEbkQG-_>}61oqO=qOtpWK=ipv_X1Xz4cxw^k??{EvcBho zM|J->p8p)*|F^dpZzY~?vBC2H3t5Zj9I(D=!E~|@RwswAFIvdTqu{kRK5&ydR`;Fe z>^*JK0=nyBT9YH51NF?u#~PJo$S{!U<&@Kgs&<@Ac+QU1^KS+Y#@_5l#oeoZ@TXCc z--C);uUsn1x;PVk_H93EDG+II8m1tI6{p4e;dT_gUhIL(LNy}nlRSE5S0>Wk>N_$o z4N$n`D7Eu&3w)9h%ig8k1Jhbp5ABVh!RiMgF1e-*7#O<7ediDZOyllK@!c4NPgjcd z+6)FEZR4`~%vlC3AK&mIR)Gm)yYoLCa2)~Fxx6nS>xMw3VeG!@U@M3&n6IeIjzYpe z^oc&Bt?1K7@5Q}Z{m3w>qRam101{3s*H78aK-Pt1KJ;x6*{{9ovFZgKX{Fn2-Voo5 zj4KC7hq$mh#Y2?|y4`!^$-AkgeZCpur`mt!1ybSs88t<}$R03#eUv@e4}0B?+uf7T zD?v)jJb}(!{itSKV)rW-CNeH53aO)y|3}>aall+=KK)YB1Y!jL*td&g1cfQ2I`QqN zBV)TEn{o9v6z+T@A$=ko7RyZL)GAAX<%q(@@|VTP&uQq^u3c@&@S!D(2(c3_N)prL z0*EPsO~pv}di0%5)jPiKviWNYg8!7#RwMoV`m6M$(@jI#r&l05*HgD(!VGz)~W?^}Sz6*68e{_9;zC1Oc$+`ZtQjp7bG+JqViU{(Ke z)1q%VOubc)8uY4!opQ<#>I|EqhO;CuP7&*)W|o+SO*8}B1V^>k>o$nA`DAT>q!mI9 z!+1%x6|nwnhuPA|7p1XyGsea{2Fb7G3&B{`x1*|D4DFZ2!LfU+w>W``^F+ z=jVTK6V}e;S6h$3(#DdwmRrLh=k%7GxDo5)EIRKm$NDSLCWr1k5yAR?pR6RivMPZX ztub}?-AAOgzsG8IXf-0q&#t#u=|D>IPIq6CD9G6+(2wNTk36!cT|@1ui1Yox$@aBW zL}cH4+@-Y_jr}fq{YE=qc9QoLY+}rjE4=W@haV3X>4Ln3RU>PabAlD0#z6F&l zga<(ARNHLNNjj7?b!+rzVSOs=n439fgRuA`MK8>X25$ z==Vv*QV=z%B-pDK0!iRy#C3Bba$Hk#{6|z9a{Rg@gm+6P^0Pl8*4jZrmanGRGzm?J zSUR_Ejz0q#AJlrcGq)PLj(*hnfX%(={v1{hSisilmF4}?x3dvLGD?iVSAqyBzc22w z&qdU&>sDq(B?A}fhVhY9>^#>b!MUTM34$^@3ig}kfJP0=w(UiiVfOxn-uw6n(5;N| zxR(3{{EFNIe%ld|fv_y0CB730uccVWNKjGo!Lgz!3Ke;4zdRin+l#6U{U81gYekH! z>Ft!x5~R9D*>(HrGGv}SOWd0Th!{}nB(>oY))#R3eNLbp4BAwG9Y!Uf6rk}ov8oi< z)UrAcm^1*zOZH2wWg`$vc3opTSq;($qDwAy)`9L1Zwvaz2H=%+_@#Kg0$7fB6>WAP zg0q(wr)=71pvv@oY})YwktWM-gwa9~m*$n8-mh5@UTx;6{hSB_-9C?apJR0b{%)sj zk~P5lD7K4YQv+iU*KF%GYJy>2*Ho9pCg@V|d9xsm^^<5tT0588;mP9{U*FD173Y8?N$|DUbnH6F7dGzq%uynd#! zB=Zt2OzZB#V--BfQi zr5Evr@@yJX=|qiEhH6=wO^9nc*?j+OCUVZVdMy3&3;bSR(QA8!499<2CG%o)85xn> zellwR_<#RAzdzf5_5c6*`Jb)(ntj8@5124OT=C&GcAoI=UUg2dBOA?%l(6qrBO&!m z`K>`ESbak2>$QTQK_tz;J?*#l5OO>ro9f@gK%rc|#?JNwNS8eEbZIyC{9d`2doQ)3 z@QCmoZy#qO-e}8fH?B1R!7VhZ`UDNa#%u(CoRhTTGik;m1sN1-GD3~Ij*~W znuZ3#8#S&mhf%I<_wU0I{~Z4bPZXMT&BjsOHTJ#AtW4xI;&o6pp&v=_`^hH6T89Lj zc=J~e7XwSmm?ii2Vo>6Cesi+66wR9bs>=*$LslPH7tU&QBBg*^gk)YaN?u1hTFlps z3>70CLf&Sfs?F2@F40O5xV4Zm49yVUxpO4q9@YnB(Qy9qF&`zQ+o|2xDMR7Q3Wk;O zg-B^V-$ezVS1@+**(uk3SiLYucl#(-KQrVcvHmdg0fB6R31NxTMIX8-S!EZ>=?zvO`=aJaowivVf4YqsD7T0Yr<3f0&xZ>P0lKEW!2I z<7&N{7!|Ao(zQ!xtoPKynEq#v&o`<-IJ$s!K(Ya9wm{clX%4}N^y7Cry(w^UYPp*e#fuO5I4GOSXhFkFGVC(v>3b~)J zN1&T9xuJSw;$OaAe?0iJCHELwaSx6|V`fM3Aqo}PIz=0g&zE7x!O#ny52eV@9Ij;d9&BN*bCNsv1_XW1#u?PA%58Or*Y+a`jcp5DJZFv3PTF5Y@PP@?UqNV)e>Q zwn17)Am1X*{up|A>Fb36{_cNt38c!cbWF)flo)*)cDx@3*cy{`d*6N^V~Uww>h>)-n)5yhisP2Ii{ zlu8nT%X~%1Hg3+f{#`7ZdcPruL$4Z%3Mm^8f9gg%EzjIs+d@TNKBbGo8Z<=h-7+lR zhUHVtaCa*ate^R0E`jxQ4ibHS;@Wn;_lT=Co#Uc@Fc=n^JPzMOgu=;-jqBO#K;FQ~ zvDb$Pi;p)dtn)4d=QCcaOL2{0ezr^Ks#r6`{XD?3U$7AhD+i1#4mAVG?{@ivRh7V1 zIdy8dHxGgyCw+}ykp*MT0TGlnUMQ8oXMWH-2hEx`t|v5V=gh~J_T9D zHG|-R$d~h2f2Q1&hhLrC3mQj{>KzUr0K>K0Wp^nJgQj-`l6w)Vwx}bNxMs*U)gLNNPDojF^)9%-V`U&hf-G7vRbppaNcb`-o zz~;54SO@L5jDo@5)vm7=hhd2j@;y3@4rWRZwon_fc?{+q>4!UsFnezQLD$%9#Ot-@ zB9=@e3fH*t7FBFM$NJ$aUf*8C!Y@;9f~cq}p3g{SH4OqlJk zsh%?%yHV&jtHbxHEy&%n((LW?ViaV!$5>b7Js9kt&FZskg{>0(R5fgFTH%TPN_j3O z@NnMC2q_r`3)@41?>)vqw|i3N*t$uqkH;=?+jA1iryV^T!pC8eTISRB!g3azBT54(dg`av2Gu?kx3EI*{EucNz-8cC0&mc= zi5Jydx(zfJTU@oRhaf`NdWDhDAQb*yLp` zDBz09<15A!$aDMI86tTSRq3nG-YTC!XPu6oKcO;;tj0wS?bn#W7qMN|^L(4_G3%L+9E-D{)CKmlp$($b=qdDBf4qj!sbsd#*xY9V$1r&JVt>c?rs`&F zou8`0$^DhB9yBg=9rG}40Hs=W@Ap`Jaky}g?AV27a6FXz%=JqvgwIu?T52mO`Hf>` z6cQ+UojUjMYCq)aWHsF<41mHn_uAy0ObD@h-rtD z31ExJ9Vh9JfyI1In2q8%a0zooWnyvjpFLiUL)P-$PZMzM>UPQ^HkX}RCTGTbq6(zn z@`c#fWgwDY^24h?u|978b~QO|GLoO>zE2Op@_R3jvM+nFx?g*ik-{KW_uKN~;5Mud zz!=p!D0;gO$?vwBQTln2KMJsL04ZvCMR+8r@h}f3XclK7JE1 zJxzstpI=E{gdqsZd?~_zcm!P3Gb@8#{y85Ad{RIfcbNd8%Ny+swvK`C!in3DxJJQ( zQ+6YvaTvaQubUoi>c`$oA30Yk0B6C+-~7`nk@U09j|**_P*8(f3#TKAKYK$U92#UG9?+^&2=vAjcG%i*>}CK(a*BGh(OrJ>Y| zwsVULSf0B<->fRy9eFFaiB(!)eT5SxBKD`!;~@+AFe_0MWS zai65eSD(*C-sS}khP{E5~l0CIKaHyvk_D(c^fBD)u5ZR?)W9wH;ZEblG#SpCoNO0|ASY&y~ix_8jE zwGh_1Sbwvkc0tV0mG?h>4Z?L@#k2~iVc4=p{%7u?Vc^>5Yupz#3~5n&L(@|*n{%Aw zx>t-qs?o{4XJ{i3HurU>>)J73x9AG~@OvCcLbX{Je~&=@{mUFI4|{>De%9vm!Cc5P zp`J-tu0f6!TRA>9Q&6(VQ~#nzbkw%P<~G5Sfy(ZX#PT_1^Z_j zQd)URFY$IaA`IG#XuPULRQ|VXI~(HQYv6F0awG}vx!E3N!LBn|>eKm^hRus=SO_lm zaOQz?fXCod(RP&Ep-ld2K7hiQD=TE0N07U1Vx5}P1j@bTnc{YH5(#oqt51)QBVGw_ zlEVBj^6G4$s=dOzaF2Jm;nIPKTO5?1dfn~YtvCfowE34U(;=!CX{L^tt zK6tZkOKcBmfR+u`EmW+I7ca;-Y`%vK12v>Ruxp2^SKKem4zzjy>Q0Y|MAku=BC83N{}==n4o?4jALu}cP3!m`+W{;IFXVXI zz5t6-qy43=#i)3_hw$v8jTNJSK*}J5#eB4dQZCzyws#bNKI53KGImN%2WAQI7@!H>c$0YJh>tN+}8b`*~&H3iG!^pk*K|>C< z?g;8wW4EHS9h<{Bv)b&^OXO&9_2Hly7PoeD4iS%o!BcyeOV>&)U!V2ZCsT{nB|k*% z7@KZGS|!Pr4;-6NoYGHu310#-Y`vygSWyBJH)ytR{IPjC&#t-(u_~Aq5_hVJ@j$lu zET=zJ=OYb2neL&}`G`wWID|}homxEN+Jn=eyvI%JkZqw?MYeS(&((g)1 zV3Fq@lDIDrP|)4{))Y1Zgg2CGbC{GuxTVpH_-lm_eRSjZ8#^kXAZe>z%0cWrthJj} zAf*{>t%P2kKZ4~y)yGcdKEnF`uGCC@!)|zI4i-x@{gCFuBC&B0%lBNpf2Qr}flwdL zjTN`C`Sq`T)hG92^E~?&fk4LUo+ns+ldn`m{jVFI@3PB)l4CTm+Xl<$TX{K}XNo8$T4jobfIzb{LF7IC?d0U16S$CVYZ z_?Ha#GYp{vw|Z*Ou_0{TKgpYQxU?0hk{4DC#%F^0XitSxQ7O_eD0zPz%O}IroTg$N zyRbP=QPIIGeQ28p|Di3v`Vs%T3xNWwsVJ4cq+K=IhYYP{OpE=yk-&pTVjef!5f^*P zOwZ+NWH?)_Y<}b;((SvqVd+o-h+LRV?e=VkCE3n--*eddd7<*iJ2C9K+>Codr2W|S z-Yu+-)=cs#U{~xsiebe20Oh_+!N5YfK2&5Wnx#F#t5jm@2*_G#IK7$sQP@ zg2%GP`Nkb>KsDcLlXdkNxnGTXG}8J}SYd2DxnuyDvNt;!r8Cg%k2h6s zQW&V_OPUWOZvau&Tia+%^rIkW|ME;-?0TQA0b9Z}OA)dEL6FD&B1j&+@l0F|J1&x5 zr}c$0fvhPpI$BEx!+4R08}_CkmXKuiZ6es*?gv?}G2ub9vz%`q+B1S$zP_ha630;1 zLv|L^xiJ((xalm{J%R@Gnd8EDvHv5`h(9c-4>@+fx^R4{4%s?>(>ojb6=qlIRn3OB z0qJ>+C+U0=XvtcCS!Q7G%UqsO#YiL4uo!qJcOAYNS=RTn=ZVyoELA}@& zSbl6hkYsP1)C;=zzRyQJ!1DR1b^hmwiQwK5&_QlWL@ZTJw;P?4k@><-xnpNyfN+0~ zFC_}AZ{KogGj1w}@Y~OPUSz&T(vV0n?Jh+Rid9 zz_|B_Ka121t14R)zHTSM`VVeYw+m;LQJb z*8P9{2zl|uk9~RsV(+EZO1>GxKA(VePufC|hIT$1lI%cL=eEq8SfrpmXC6EY^X^CCM*AZp&tdC-z8rIp60A=qKQXws zxd)BC30(NhMM8wcmLogcYS5Vd?3`#GHosEk{y1U?TR%NKf3Fn#yorS5)?s})CUg(a zm(%*j!Rb_9Us>EJ=;f%znhXA8y>v_GyCZM)1jxu=m*~d&DA7OJgEZ*ab=+MFDdH~~ zaQ2>;u|p&c98LQugE95MLMu3%Ao>A$Z9A6T8jH=D49d=oEKra+d=R}5s$Wc>fvxLL|O4WOID(^hj&wwe>vlqLNQs$mrBAXh~*rpHl?CPON_<-5t z*`|81Og_vNiOrWsJ520rIMj|^f5S#(?JNbuF0$*6H#sQi^5lAh?QMwR_}-FDrXNwt z_8J{Oi_IBr?tC5=z(7=SiuiHN9g^HeyY)!}$os8qjPgC~I-g@-OP@)#A(GZF*?mK) z$c=AdJckFHGqs4ay_eYvTx3qCBbduU>?OB(w-L}Ct*(yT?GAg?SSxg6X2Nz@&@&xvn) z{5uw$H_vVMup&ZMP>87>OFOI@T9(!5#^zsadlNpVmBB3GX^pkaXK;^mIVGnU1GDd( zHYMyY0)b1a9HzuZP&)a|-RXNKq|ybxf55g>Q@64t}KTKq;4>cbP83t-Uq4f1si&|Q?cthhw8k8X^>H4^cz{{^SPJA*i@t zI(`>B->h1QYCqdQ1lsSs1adFv&mgQ0 zIDSOtDHCD_#*PcJ{^R)l6Mz42Z_^e^lfLYsgL3?aJ(eq_A!g!Zx+1sSNsMbpl-AyGSJ2ZuoH{JG}MT4Hbv2(YYr z%6L}@j{BIqXx7Cr<-Kai5K3T*mJyZaS`PKRw_eTJVEyKbWa&BgdaxC`-oJ6G9Vs zx`}BD=dtUHWv}V?j!RL|@a{(v-M^@){6UuvsT#{yb{Mf}J;&BH)uzXet*AqB^V}CI z)#Jf&>3Qk14R!EoPAvOWeG6!bEV^Ci=>(_sDI0?Bk|D*Eo@YuSfs#Qtqfwv>S|m*5 znd>O9I{!q)ye=7dKm3q!H7Ws7{gwO#9&BDuJ#hQgJuQe>c{z-=s~uI1j)@(NX+^W- zJGYJnRAKe#;IEr%91u&ekf0Sd=iY3xakCD#uASsk*z_g=JHFSgV!3D14AOO%f6w>j zLj8d!Q!IvV*c@B)svnghz%`S$QS2K)}3sDZ#TBzW6%PTZ$fp-b-Tc^+9xd)TxHzGmc z)Rtd+MY0gpW>bXWrVQkMlqt#yaiA*tGn&ZK2tnm-@@>vNz^k=MC0VTxGTokTF5&Ni z`v*>hKJ)GYd!Lzd>#6Sl!P}chQ{BCP;}j*4h!jPGImw)qwvr;GP*O@tGNq`Hp(s=+ znUXn~OXevd^Qj*{PJZqij{;lqHuh0E`e(U-5&tC80oa3C{*WTB@ zuGawa{mW$+9`^y~mF;}LnaN=K_*2C9D6IP+f7st45Z8yCTUA?hFoAmQzD=&nQ?Q@x zt8w7UB(y2&howeLf~kW0fZo{&nE!p;I5L6?4!qx6l`wDl*wD!7bay;o9Pd)$=O2ZZ z(t*p#zThjMLCoJA)EZMz*-nBFpsWY#R# zTbJNGdiV0jlvFBG4E$ke6g!N3lWVnE|!CPiuw=sP|OGR(vsp*se#cm%1$hEcwf6?g7I=_BP^8)iAl*fgK-sM?9{OY zkh}LZILf0IHPJlx@E8%1&uD0m)!6~mKR-P=Su}{2xW7x?-qnxhKD)2q@Us)SGlj{f znl&K*jn{YUUCl%Bswc-T&%}c{kIt_|i5dvpT^XS6OoX~#T`z{tA()Xp$J?5Kb;Crg z-|oe@FaP&Bac+LCphA{svd~NchI2a$WL*kTa6SLIb9-x0a7wrLaw@JTjD?FSd*q?; zrS=<&JMv&T)cVXNzfOp15*R)H3G>lu5k4K8MqqwFtM2w}towDAcV*j&KFDr9zvPgF zagB=MZucE>@V-gXiFMrvAnv{F-J1LfuUlDCjB0HlFJ-dMWQ+t1couMjaiEBWdbZpd zgPm!-doLiH_V=2>o&y8G*)(}#S2!8WFLHYJe4&Ec841hL;$h%i;!Qd| z(gTtw*G>tgw1J)I{NkFOtw2pl%i-JD3G%_JXNSLFT^|q64xg+2kSL{8u_X*0wk^L!NTr;Elbe@~#;s7=JqWd-uYm^1H%1g??Zi%ntNS>4vCA-ZtUkJ_r(# zxb0&z1ZjQGB%R!bVUp$Gkx1tupqgBxL`71eRBd%A6U#Upzd8QvDaNth3Ea13qx&Ry zH{1$48jX1%ecX@6tZ7j4TbZKm4>k1fn1NAUtG1fP8DNAUw_iJ^A-iGych*H3 z)b-RCFrgWER$%^)_;eaJ-M{ME-9iINnsYb@#_bl@uj+UFNCV-*t&F~wQ_z0HFOVr} z3gUQ=3YXeU|8HK;KgA{eyxp$q>i^TP|KA?J5BF;&9U=kRGr7Z0 z4eNeozpwf(gK;v&+gW8i31CcGyjyxC4)YuW4U+CxqhRNR{_rD2v=p`Gp8L;1^!WS> z){K-PBtuHAE}3CQ#HxPS^72tmpbBotWQsbpMgz1;^8r{4B%tIa_|LtJH5dn+&>kK+Et2amZA&YkJW$Mcec z!n^C?AQZ7`(yp}*UYDQQ?L33)OUzGpon;usI!6)aM{&KjZzX4!&R{nvbzV$ZVX6m> z1=eSsb|sjnt{?euD?o}C%j+3p19aTGs;%GL15D+^%097F@Zna~Ax`1G$nk)K8AnGT z)Y#n5Z>AsOHCzcj+q*z9_))2!cqdr#Ir%e*V4kZ<+pcwfWZ)+4uTQ$r59U<@CgvDl zukli&^7WT`FcqxO<<4#fxAws=GmBloD-rypH-Ze~{hH>?h7`Eo+9#VWK?0T8LPZhT zZXl5gv!nPs;NbOv(@*bog2$y3BDd0dfsuGrIl!(T&X_O?Z|Nd|^?v=*1IKWFdj7DK zcmFW7mG|21D93t+?6OPmUkm|p!{OWyb5vaaV~Sz&9R}n1&!-Kx4Z-%EOWsl5!%#m- zihg=>1gts?JhE|qP`jyQ*yY*?EUU9?e4ZYM0A0Rrzv(GBcG!q}#{n8_TDwhPKAi^J zD{g&sU7Ussht*x%Ud+PHbNdi49z66A6yUG(BN7mTermF84#+b>}GVH!S$Tk^EH?L+8_M+{`AlJ zbNutZKac76|9PH%T)KnFlcYKb<6nPe?wIL=nTr7|&a(s%-*H7@%&!<&N2fJ@Rc2uP z2FXo6q#32j6iYXD_8i5(CZlC?L$GUoEfMp{1>HEnkP(u6PW$d4VJDh@M4LdRJ zN5aYOsOSXvcrL#FaTKqcLG^j9B6$4?J4`$NBpSKYnB6#jqy_23en?5@>_r{F+PcfQ zF)sClbe_Xz3aZbl60jB`BdP2wom|GfXlI^ngHjge%_?g2g?MBjK3?X>dvj}GgS`Lx zyPpQYBa}F@ZyWAUR+HOpVkY3E15fGepJTwN^WgPntTX2t*rNAPf&vi%jRM2lh@e+x z^R-2>8P~^@dEFx`z`Q%qmI$RlIIqaCo3jRLBV+1)p03 zd_$1cEad3hzkRke0>=q8x0kTqsmGw#2JdGV@g;)337pnj?8qoXC> zCnPULe-$5w*|B_X_piesSNC8GKh_n!v~KL$HU>%)TUqyJjzRwxu|kV-DrlLe^2IIVI)GN`nlX*20eNo&;1?vwvi`weUe;tAXc8wp*<|B~0b8h51*6Rzu&FvIxJPvEzf(Jp zoLZgba-ez&)bErrTbTTX+oQ*sO9k6Xu}P zM@mo|<}rJ<>|1s3b2R4VZ9>Zu&1k`KQPA767X@e5j4RfV(cGo5dbXfp6h0&qaceWy zv4}nqJ#`P)w>L|7Xs1(A;80o2{)QoBF8iC$=@|(bGpUHq#}bk8tNaj`H(2L|zd?5Q zw#VQj|7>-aQw^vE-kE!}xeth|96yBIAA;w0YD3xFW1z3zZ2pRC66Sq0XD;F2k^bD$ zagu)?=Oj1>&NS2D;JYZMn-&9LoO1Kd{M|}mNZkMJ;Xo1M6FoL$fcbC>QER`u#Pp+* z>9ee5)nqim^|Q_43h85MV}2&?6%g*9o7n zJ>+L=hcvAe3uytY9bH=Mi|hP1BHMPQ0qgC)CtP+l zAk|*~VxG_q`-k3KyMpz`u5TK()xJLpv#K%|PD+f!%(qVI29;4@yuVoc<}S|ncSL0F z!oOdzrpLv_5X|%B?aZCVeOJ5u#eIh@$DqR3UUT;II9RRx*kG7G22x`Oowo|(^H~W$ zczh<{%Bv8s)82orxAb@;dn~`J)W>-79p;w5alFquBs8*j(m)?6T@|6xpcx)T=3sq9 zQ@c(MGS&gKPe01#+w^z6SNd~Fe_#4YU%&nw>DT{hq~HHf*Z)t?>rIs}wj7^;P4Y)3 z*Q*YJfnw}24?;T_HwaMIHReE*j^*oXeZ^?*I7x%&GR}Xq?-O1=>_iinvfB90l1pcb8kv9k|H(86_Up1n5}$z0riAj4t^IY^Y({FK5x?(KeyU#hYBIhOS0Wcs{!axvcwLyzqh>4& z!xrLNr^MV*P?5`veTLT~>WRl05?M3|a(upOp=T1~InOQr^22=})ja(zINy`c^>FgM zNCJP&!zT>#8^A6t=Y*J421>2lF0-(=4cUbIQzBD(k)gCczv&a)zm$xz?fFmK|K9iX zyB(ev%p8t++jk%xsZ0Tzlu{(lD6g{lUk0h1zEKA)c*0iPQM>^-b_9pv(1T+=rI8rdtaw%%_Csq0<0xl*cuvbA+YZCx=` zC`IXASW^gwhd+F048%D1Y*qVWwl=uvxAVJod_OoTO|=}s`qBwylE)dMW}tneVdA0m zSujhxQFecB61$u-SfdRTuFX-cQk^2Q)4r_`j& zfedcP)HIy~iD1CssIG3&0mfwu8ycxyV0_}Ssk-w3kc>l;Iin~b^*U%qlo7Av4q3s2 zTY8}VncYCyAjUn#o;oPr*$+dSC)p>T3;~&%wxt=@DZgAhwfBx95mYi>D2qnoIwfiG zH@9XB5HGwBmL6>ZmGg@hXI^!}y0RCZD;0zAaAl3{H*_o9#`S~lnwCd41gu{w zvzALuuoq|_j;YvV9ZfTTqt^>?mZ|JL~g1E^?;q3P#AUoJY2sBnas&dUhD&-|^eckKu>_@#eZu4NT%e%7@(j zC948awri@-I&>hf!jkvHjDyJhip)E2HeBb|ewlUkDit|wWpH}&Yy_Dcw_h|589~9f ziZ?Vqr=mNl9+93_LrBL}Q*-6=AZi+NSC-t-g>|fUsyy$iL|(pM*{n||0^gnA?s_L1 z!8LD~bon3&l;CF8?V(Yu>p`{^$)5s?SH>S$UQWS_r@NZFF@93r72aK0GX>-wvY+CF zaNkmP>($e^p7TOX;i_K63|JZ!4}KlSc<0o5%P-bckShP_8<2|iFO{peZ5&BO;^Tch zYIoZZ-^F_(FGG3}t<|EIRg{EGvo21VhGBgFK}JCi$$rFP^xHSN5%cXEx%NHVP>Cv@ zv9;xJr@}Um93JL4tOF6m?|5lu7*2*#URr&ffuMn2pX|po5WAVr_VatJGrSE(f}}^_ z$ISo%75rSUcJ(p}s3!n3hjn>f66UYAzx1rSj{ELH)U&a_VxWmO%P|y{1FWIvBE}A5 zd_Ey7O-Y~|V%MD&8P*&I9raa~=Jo%{KYJg{noL0`mFG?X0q40FL{Dxm!~5Rcn--$> zL-6iCt=H6m3P)yn=ZYVV1OINPxs}J$@OAodqgyT3du#vg#1=XX{EEiwgyu=GY07V3 zvH}TaY0lz<9YesoYIn_G|*kzo)lDCxtj`qz@`iChH-lkopqTLH~A2LgOG2hb8!mzNd595Xoz1S1E)C|E2 zUHNy4I-vh;TyyxvKF|-|Xl%5W0?9qcls85ULtX<<-tucIQfigmw)UWk$=VA^n-3v_{-4v=vrweqpyoZdc37$9Z}$MWeuR8WP?qaW1)H9<8=e>ZSo2R1RIb^L+U4`vCfUOCRZRM?a?L z2RqHKzSZrTfwCq4m<_jQp!B(andgVU;{VV4>Bsc(&-bMt|J%`?-|uF&F~vAwrLtbL?fwH23X%O{$%8` z()W<4Z9Rqmjq zW@{Hj&SoEZiQ1uxQNQfzGR8ZZQZ<9k`+#4}&pgQx`*;N0TQTy%`IG9)!yn{GKuXE6 zUs&4(ElwO~*AfZfMypBhpX`9{-_qLXM;|C`8?9Wyx(oa~qI+m=6L2%$XOZ{{`(QM1 znuv1Z{mtr(<(y=UAM+?ONzz<3f$b@GRcPUDn9D?%W0`j)vvAZyUjI zDUgL@GXY5ITdE_t`@#A9#}~D-RCua8Mq}|F0q6CqR!SUH;CRv1ab|f4VqE9+Qkn)p zfA3OKNH`IY(NfE7E)fa?jpF-$4gmMs9wQ-)vu3i;dlrrR`d4Zc)eJLnozL}!%=^1o zf7{1@O+r2m*aa&1Q{Uq{zf?T&Bd&AM6&n0Z*7_W5I0O7<7lwu%W+CqF zn=7ipv!Gig*0uC$2Cru=mgZQWg8sSxr{kY-|8L*tpI$dz@tN%n_O%IhPDqztn1G;1 z+i(1Oj_bP)s;NpjpP

J9{Io2>DYwUg~51h|DXaV~X9Z-ADS2hgW6!`_WXAZ%KL&AD4c)xLENO`%;mjY&e8lsx}DUe6p zX0!5r2t+1YlNsJqp@lKGcQTL)7kxK%jhRs()P8U-6Zh9W4O!$BJO|;toL@6L=BwCV zcp)*SH4YtT*2gcx6r5QfqH7s74yy^_9N}+9z>7n9)5QG|V4X+<--c00%A436Hb{ZG zmtV)v+#~`=MR&@sp=Jm_B>LIIzaHvES0$dSY6WI13liHvH{={);ShEpfo}NQ&&R_D zp?`b#XJ1h=6mz?IINcb6qm^P#Zs&)=ypQvxvIYel`ihxuW4vAU8cuh&H^X3%vm?#^ zBNdo3$hm=bR3NJs9G1H_3d(!s*)N#?g}0+W|B>hQ1_zjDK!d5nT5Z<^$TT`6?80@y z=TF3cpRAmOyDqH14a}w?hPu#oXK)fY{LE^dsH5;S_H3*3A&dtQv)tz#i*>%!^dFo% zj{Se!dq1U>;QnAeS+W_g41AP9N##=_CC-ef+2M^y~jL{?qIJJiiHo zwgex<{uCh&>Bko^Phx0C?B}Igs4%*$ywNEPysj8{nWdE>Lc?_Jo(hcj4e#s{Ve3b_ ztb^wtu~CpyoV}UQ?O~MJ$NXSJ4#vwl964DwO+~uxvcCJ}u}Lr^DTEwu~4@e z=hg3f-uhkbLC+aCap~vbdb<7Aa%;B|#0u-IRhB%#ZqzBZkGULl>et!V+^@rbPt8$L zqgJRFLn#yQ+rd13$janXJJ`&>dF73DmQ3wN&OYa;0gZ+U(VMlUKor?PiTwEy{*weM(eHP6itl4tLSpI7;Ag`c2cKZ~hL zhs3dOS&?C7d;r#M)1G~)h4*c4AtTmX4-P`?mOIyemtcROjz{_Dmj>Xn)YHv{3KU?f zPn*BWN`*q1dCQawxL?LTlI_<>!nk#pu{EsNKgs+{#m8R+aIoK5`e>m8tc+euU24Pn z3xvW0`J@qu*?Z>WGp8xwb4@zFI)@6e8#nIl+k^etn6qv_TNr>id1tJJgV<6uR1dVwG0J6-c^jjLcOT1 zR@D&bJkT$y!hHhCkm?(PG&0C4S{=yAC4nKyVvySz`@Chp8c!|ngNlq3I#<*w!24|D zLYmVU)H!TD`g7^8x<|ml2^VR+ALlfPe*IDTpV!@hULi)(!D$FQT6i09>noGP5_Jd=M3!!lfY&Y$U9z)^B!>rE2F7#*sQv?oO*NuDy+2FeV8VozAhH} zC8wZLade6_|Mz*|&p17_QRAr;X$Je2P1xS^|9e02&p5p>aeGz&;4HWf-G3tL{mE;I;De^aWxlo`1th z8U3c>3@WulmZ`x@6TJ*Hs2|HmGQr(aZ&k%nvjy=OIjNNr7|+M^^~ z4=G;%o$m9bk*PwU?R?tv_I&~xZEH~+Nx{0-r-)Zw z^ax1BD=#^Cx&xKW%U#JxYDav*T?)HDG$KKnRpB881;|Ch>c-~fc+ktX^bBUM17`90 z>n}F7LrGL*?(ftJAO?llu#=xayyu9XjcFzlJbU!v<`&Gu6*Mwqi;YDLw%Xy`q+Cc8 zbtC7Sv_ZtZ57uQB{a~=rltp=p5@y=y29G?GMwILkA@c$;DBS)B^V zY-8B;lo{(U9;ni-ct8NcX?w!^9E`8@Ut$*W0SK4w_;>@AfFQGXmf;{?FB~aeREsX~ z*~)7Dl#>WCCND}fPSpeDcj?`44*?7{)DDO4Dg)LTg_R-WCa~o^&hmY%3(S&#iGSJL z3*m={D*eCoz-vN6)04q|+}H3p2=xMRWqiZ6+`oNU4T6rCwA03!3l4W;{g1!Eh;ousaM>-s2$|c3?_73|<$*@kQ*i3*FSC({l<|KS?>H2OIg7bmRg8i!*{)*2(9|L_vaP^_r{wC@Ax&tM9cVWX#{r zmgTkVL)niKb>|J*QSD(h`S%}6ksX;?$$`HNoYu)lY97P9td;H@cC6cC`nKs^(2r)& zjZw(5HO9UUim&df`-XNtLu4 z1^7cgQ{*xxus(Dbp|lIC0?A|!s& zvh?6+Ik?%^N#EK|gf-PogB$M+0CV(%J%;)?zrP!D?}T(LcqRCT?XPJ8zFd*M$DytqFmLwScg7+MF$9h$Y+)=zL{9$5FZT5()kKKZ4jOnEg!_%MEz$!wFz?RX+pE#O1LTa)oRnj$1&smI;!%ua=J01snGdN1 zW_x>+wHxvCA@owWL!%x1uOB=T_=W%nN7j$9N;>*(X3_tDR%pXO}x zogW149etOFScbr9IdpKwVhC9DZY)StV4nA||9*iTX)w1}CC7id6q&Y$4==B2M|vIE z<`Qpu5Mh`0Q`^8nq;^7Z>81G^o_ddOd;J8zQsEp%> zf$-phK^@}0`qPNbAraY89{=vwF9CzYZW}HHw}NSCA%QkO00ADi(qp&)FZz>DNd(r_E3G`cCIj~~$RfM} z5ksh=%W2gMJ2KY!TFl!a){QD!KE4T}RwE5Zwly)oGqKLZl$@tF=Jz&Gzno0QpSSC> zpm+TU2=9Hf>+VJh)anPesZ>-#fsFmf8y?ljYucZ>L!cii6`ek|FJc(k-rDPDa> zoKm{wZaa#2_bO`r+CxQiGIq`vu4A1qVVRGr+}Ky-@)}}lfHCT2b^q`~y93NecXjH0 z?}4-@F&5h-DuAIl;BK*a4Dz?WQ?tJA6N(S!>N4v6gj8hXHt|1!sLafsRG6$e;achiX(uE*9HC4E}uY8%5jw}AsGby%+AEcW`lUVn#^(cI*46t z3Hr~vJkjvLDB>6io>EwLYjqL8t|uvEPiz&$7igsz-K>CqhrGB$*gr?m@^0}w*2|3l z_-L71BOj@lM2+NGx1f^9vMz-z0+RbY!IQ?&jGCCczr^0pL3&qnHB~bKYa{flv+9Uu)D%5Fvo%m##Fe>)6Msc}|I@i}BrG^Q^WUt%TLg`ws+Qyz!cQ&NnR0 zh(IJeHMgDX1a4aD`rU>^@GtXXH0v9LAO*g_6%HJ)!NU#vM5$o6g`3Al7ULb;E?vp; z7=hTmaSwheVI1{*?45Bv9KX&D--w^bKtmwacs30CM@&~fs1_T97b}~^e(vjo&GO7Q z1M&xEYaQU`E-;no@;7IDq~Z=5ha9{=q~U+@3z{r?pI!hx)PDocIv{L-t+aLoI8 zVKvv-?n?y2jmo3@1aba$`$J;^*5!%`J^VuOb}1sR)s0g6f!9y7HvO~5yHR#!?y1+i z2avH;yCum7*UjJFQk+gEp?;qeXLKF0ufwY(?@u$`NY$&$_YD#2g`8=qSo5|NnM+Fh zUoHO#-ApwDth;(4;+?GSqTv|OzYqQI)z2F0{?tDPDr}QHyXA*LV!vddvoi&Dy>H#b za*+a=41*^rlpzqW{ccdKI0_~lCknT<;JRV!_b79#NpPPX3R63S`^FdRFHG&hI`yYM z^BI*^VP1SAPt|!MO7p9gnJOhAs>t3N{yD5u_-ggyL#81#zi)P)QGtwv91fDArn`}g zZuxJTaU;@WVMv<%`2zV%OH5HOVgIXwctK&4UQpL&+H|#P5FWF-xecr$fhmn+!%o%; zm}`hpJg8WMVjQl87cTdqIldUz)rP}JP9k7Txp4&LsqOn!`)LHJpPVp1jP-}?Hiy0C zY3fIYj4D0ss0lIb-J!JTlL@ERQ8z?WaNK{`GT)|w>w&fJIeCZcA^eQG>1fw$;HKzZ z+jcDzd8y2o#F%FyJI?m~bMY^MmA&sP=SnpQ>iR|evG&G){X#r%rV>V{uCav^v+SSaUWEo@PtQ63l(f&c5n*Ims6K-}eXY($s< zI?B2HGsmmJaJ+kU_O>cey1o5M_2y;}|H#In+K~_93k3l*ts10u?*((HOcyf0cI#!= zryk6&{HEXP-G#j7SoHR;sYeS=g_`TMJ+U9#mR)U30Hb+!N4}MpLBHF0$J`a{Cmp7> zSHQgyR6h0v3@ib}^Pl@(Us(biOvCPd0d-hsoH1{057y<%t;irnH-Jw9H=p3)7TBH{ za<{dK2zOp=^SGgf`=7`(@z>FmnZCy8U= zF1(uX<=H3@c4j-WCmD<@9odZ~>_W{Fkf_V$- z|8%>5`X$zL5uMpHUyJoTrz;{oZ({$9a?K~ZD+Up>*H1^6HoV^5y|=0&sT=EpA5(6) zNI*?nS|hkPs_?$v=2Ff&O{f@=(;Y~t0^^~XQb$INJK89icz(F2tl*3FvbfhTzP@3fg$N>ZEo^u_BAB_{Rfz6K>VCQ}Hg%Jc znQ+y#tR&_s?O4B$u@`?ILC>I3ZX&X&u@jv6RE_)tr6#|mHl8un2T^>6e`Z9)sSvCe_cX+U~tM(V;))2Sc#ZeM&4O@WC0Oicrx zr<`fglLPRZxRbDvp9(A6p0sta^n;Mwh!@XBtWR$JTv&P;`y6jfzI0uv3~ImDe?L{z z1P0$;zOt$4g~%;Tm%h|fAa;1o+Sm%>0=rUe z>~n#b>CVI-TZbGdj61h45s`+9NLmj=KUxSC`FU^$)+IN-9;qMJf{5u0XVTfDQQG|_ z_Lh|b@QQ3m9WyHeo1|}-o@mwJd1xh|k5CROl188GC~t8d_w1S>Ed(@*S1qmmS_qV8 zmDfYeO%Ti+E4=DSBi095BT;^TanUjv+Xg1E z4-nHiU(G3q%{4l2a9|Q{u0KuQcWe~oGRih^It)X=ryr%{@*%kSrigb*jRGB8_b7bz zA_3dzjv_4?GAv9gcnH-|V0i738_FZY*ff6WIb$j2p|5rmPFPO^%JG16jTwKBYm0p+ zf_`xPweHj7kUsv~xc#U5|J!-`b^7@H^jVFd&{4?v`B5>JhYB(|*H~*$;=XW+({>H) zOKi9OxOSnKlVPq(p;ZPg&Lfm(_Rh7Kha`R70|Ht*!-?Sc$@ zo6h4g6i6H--@MoP{GCBKO8i$58uHo6E9}mNs*Kvu03VyFW!;O6mvz>f4en{`e{=&bD z(2qt$dwx04Dlh`ssW;aQmNvqs<>>sQRb<$oCDUkd73(M{>^q$5j_X9jYSvxJ*dIg7 zB8@o@uglpzt<%1EzVz1dI--wtk#2^v2b)vT&IXRbxNlewOy${Oe(qt!;Q8`E_0Ivs zF0$WQ+@TE_>&9JJW&8loN4ZZa?0CKAz5Z#XWDx3G9d)%XVLX${$e|monGk;BOfOHx zTU3xJu)4br*HNEC*PE|CV9G9(8QzNXkmIuagQY~MO%)$ZnB%INWhz`B@e?_$NLwmTEvt?__Mh<#!_+(dqjKab82X z{K_nJBldwWUDA{KzDF*Up<@m_SwOpPD!Q^7`#1}mMXud~^Zn1jT9cF|`6shQ@^X9MGR$;h*Hy208N+_4mAb zp#R*%&CjGTo}GsJI~c}+ckB4L_BYJSIOe-Xn`;cm^R8-h%W-%m8P@&@>&8`S-??L^ zHv>5}=L{FVjf2tK-M<=FhOi#}hPZ9m|9VQIhg1AH#y53J8(aAdfz^p~!#A)`<@=bMoD zqIirz81{21o2N-EV?My*cT4!#MSq6$0lmetdSG`7^UD8G@X~)+Ox3 z_%5?*;W@j%_eMJ1m@!wo;JH?hx*q`zeUB zj^APd`_uL7j##XT>q6W@0c+kps6rZ{JEI4FWkRFarC%R0kFECnOG%daQ7DRcO!^f@ z18+Hlc1tvZ`2Y&Z-o98jsj~HGi1Aw#6O=5Pi1j~}7%%ZT{v@Fe=K|s-T<=q{>+ebp zA4V;Chn3P3F-{^!%za-c&J%2W1@6~0Ai;gCMJf}iV7cMri~ZxczODmjk|RjKn@E$h zd))yg2XE9(vz0>uf!rH-FAfS4&nS4Xhl9WO4moy>k5KCsq@AzU3iW8Hv-TI*1(&zimO=ek3eao@F;tSB6g_xn4GdEJgRfN`F1vgCyl@GrdDJbb?j znI@90-}7{#1=&?x;;0vy4s-0Zxrg~5<@{keu8m0CRkJOr&lfG!eCd8ZoCC%=SKYS_ z3^?ukvI-`KAh3PIL{}r``@RPZPa8 zhj5=DmPOw3Ko`_c?6s8$!a7_rPrjeIL7IpuS>u7B`D!e}nWG3ANMgmW)V!8KJx zR>XKQO{2)V%UdTwCHY)u#p5wByTn~MUNZ`cm!lrW8IJ+WR?AH{9 zjVWzYPJhn}=<)bZ{WJ?wM>B_HPlCG;3)>f)5T}emnXii>B;?iPE-~~I>jPMsC|!MChr~CA zuG+^N0%lHIEu*;F!9e9_zIwx7IG_oPsynPS1&-(Li}@<|fC?8e)u|*JOuIN-=I*v4 zr5#eh(T?#GlbzFi{@5Qeaz=X-_D8fF$~ks&E9M(soiCAgAtE*JE}_;V7^gJ8-270X z6g*fc@1Fz@K%QsRV*khpxIKwy@qOM4qssZWYk$_kLM6w=>!aA0RlU4TKfh;;bLN>ZMyxy@nd}?8`jmWz44;G_(mCI2b_*wlEeE$ zvx@=6PBJ6|bK1P5Z@Rbt(4xkt^V(*?kK=`ORh zFxH6{)NSghDg}dd8Gi1EZD7lNgqP;g2@|Vt6)F&j&?Mn^D`IRAG7c(MKf}1F>zmX3 z+Sg+}E*rADsn-bTgiUlSHcY?^$JitL4@^PVqs%P|l9P~CWv-iy`};d&b|5p!aoBQI z>(fEMQJ6E*FgWUobwcLvPVc@v0@r!f57!7!z`@zXEnI3eJnx!T6go`9ti7zpVBrjC zlFwJa(wYUt)GjT8bw1NCnl+s2n*mxB8B&)2ULW|Uxc|R>J{vW(kLh7PXSj2}AFgv~ zZjG+G@Np3Q_`+0qMu;#<*;{Eliglj8^!jLBi9?1uo?UB88gPBdy7!EDC!%?DN7_?+ zk=Hu!6Z*fgzI#UvYc}@9)$5$ys)YuS12yDL#3TIwUSd&b9^H;gQVegJs$*T76 z(a|WlDqQZ@WGUDXzD_IAT7c2zu~@nR&fC5jlf>Q1!-uB&mJ=bU$R#k|o-7EvZS%=6wE;NW*0_Uj zf3*(P07El5eW0xvnLpl7^zXyIg+9k7KVm;p8|No`_3CllpA?g9Iy{8D9<0%NQ;Pcn z2Fe;Q{V_gx(}lq+g00x+!236WkcgzDh!=Qj@xEI4M161o32cR>X%zgNkn*+*>EnF} z~GF>{wymJZbuPmi~IhWC02ONDZs*`uIAJN6N7Fxwu~f0^O^I_ytUdx_(0zfB`*64-o%Tl5Xq{ZCmlTXY?~res2R8I6&@%Es3! z3Aqs8mj8RFqs0kAuh77q>$K#{m^f zsT{%iW$5P4ngopVG5@s~wo*F|W)T}T58-|NYxB=1jIZL~?apV(vE@0hUgc_={=M>W0fOheEILB*`mX|P}qHC*LA4XUE@5;B;#O^;`K{hogO z-`49td0c-U(_c>?>F-ZJ{-@FW_0JGlJ?u~J7P`lQ)(d8O6M+%z129^nGgvW$@tcP| zH27a*pEU9HcT$fX_=k?I`h*n%1oqoHPOrjzV2=~MpQTASf8W`$zqb|CKKb$;iY*0| z`K~?Wzyg?i<1yYcUIBMBCu@cGw}a4axcIPv43U=6gLg`Yp|njyq^6}0^jtTbv;Ttm z9_N^u=2kZVu{Gngh+z{L-`yd+=z{e>Vwbd|v2R5>@l_&L?uXj38wm$6|5sAkF;*Pw zpH=Mt`r_*@?5n%wyvmJi?7OM0)EQ<_1@1Oh)Ez9PX!P~x<1Iz)$emTP#z`IbTZ7+} z7ANp_3{t)S?1Q(MGTPpVBwVyteE{E~Ut;-5!+>vA$*dk9$v!H^q zi|QcSIsq3fRZm{SIGxCnWJ!MF6vkuDzs$F#!8U&NzN8Tv1SsX($a7;piF%^F(2r55 z9Two?EX8?NIG@YdQ_Q1c`>9@l`2{a#?N+bDxM|g!9{FV$fB9_g>j*Ju0_1+~bacp= zf*iA+dkh53FM3R__BNgYS#BL?f&tD4sug5jU_PN(`wY8(=PVQq%AfM?!TMlZeGl<{ zo&;Jj(?xG_8rHF}BzIw6Aw7QS<3GhOJwE8y>Bs+eEHq?b^xoG4X3@-ZNBz1XUjO;! zwn75T@um$HOO%6P0-;jN%?1&g$L>DD{*v)W;yr!q2#A%@{)*l(-q&+rqwn4U#OfyA zJ*6=;8>pl=D~z+HT!ZC?^`9mMGno3 zLYIGQ@~)XMa_G+h!%aKYX+u6gWUC4db;Bn(y5@Hs*bEg}x`_B~vOElkNAF#X3)PybA$7Ft! zR>B^vqpzlKL`%hbx|w4SE>qtjLdHW*d7%bKY^rYNG$Dg~|B~@l(iD_gYnPbxOu{-9 z1D+X-UrT%Ia6d*f2h^I(>aG#$kPUO8kK&0Q7qa-;%^1vNk}B z*2NO?nHC_3AAB|{RR%Pbvfw>~g}^Ek-@&Ep`%(=6$bL_g7w`Jw6Hhl_T*3-%I-~y z;EKn-5)=2AYeWap;(6ZX!BjFb_;mNp9lUPN#r7P08$w2Qp(%E2_?ClQ$n zlsQm;HzAi}?+h1buuuG#aPfe^W{9L|aB3dGywbewrCGgy*ZW^XbxHTLo`5e|<&GBw zM!`(#)~y0tj2l;TaIM37O7!o)ZD9-T`DYqDKP0^Qlh!1R?q?5C!g;Gr2&3`i2NhTc zW<$Nr?Q*2{=KQ+`!7fzd-gRX2Z|q0?MfkEyH3e}kJ>a{~iTTRkwY0--kx`6EnY^xB z5AymNnjB!)i2Hx+LWNc?z}5FIyx653;Ja!24JRr*YZiWQ!Z`!QJI=4AVLgb(4FOZT zoyb^^V@u&_u`)5 zX;e|7ga#tfNR)U=X*5Jc3X!P{86x7!SmyZ{kK^DR(?J?gruX`N*L!}}@~qbL{N8uH zf4pm*zb<>9dk^;B*XO?P>%OkfC*`^E>67Z+AiyIQkg3-JjuW1#@jn}2&fs9pq9@fb zcEnpmAE!sh+h;^^hG^mAf+X3k2WaC4br~g?QkRY?NA8L2&u(xxRwEmN1r{ z&lc-giTn^*U$@>!Cm579@uFUzAUjW=W9hvwb_|f-9W43OFrsEhf$6^^E=4@n;`JZyk%Z5tmjx?BKEl#b$+uK zE_UgMVXqrk+^T23N9K3=Pk#76ZO3f?e`~KVj9e*?^W!iLQ|?9b$cJtcyUO_k*4Gqi zM!rN|ODm}X-uS{epsw7$PjlIOT-W=Y7pGoM@O^gEZajhYb2od#&s4S$BWg7#>{hoE zn*t2iUa&;|oP6t&$){9e^2f)p#&_+6qn+f&Wk<=xp%k6esBI0z$h!q;t8B`M{03Qu zGxE%PKbi1ZWtR$czY-7Eq5@D+s1V|QhV?ODdwf)*kQaXW%0ruT3Lsm1k;MI*1(zBsK(lX-znf1L=r6mR9cEbpbmLC96}%PIEX8OW~_aduoi2z982 zC95tSY5-aiogS%!Jgzr)?Js$a>t0`TBxM%S!BYC?tIW5^o4B=YueCk$C<&A9yGyT?GC-p)6Xy9|BHOjbUOwvp+C^n?8C_?;UdVV;_o)SNUJ8MyXpv-f*~d#B$e zrQP`iL-_Tb^J>{dLxhvs*70pnuCdsrL<^_qE>0@^+jGH@+Tll1*dZFx5yg<;JKDZb)8XS?| z3#ws*ykR-L@VH5LIt}y3&WK&znA(7Gq!RU`f(qE~>i!Vdm%We~u-xu_M=w-9G4RdP zV!$_7VYAa$XXf{R^2_W64hKwMVE%95?|{N3gCOwRSGr$n=D2^06Ms7Y|8zgyn*XJ2 z9v!yJn-Fgl>CjI=Q#<3n7BAX_aay3!#*`JNvwG8x1v0w=T=z6E_fVg7`5F(4Ru)Z^#PSZEL}I5*UZ z>%|;FgnupSy;kx}+hhKy$d8n3srASMsCk*Bz7*?GoIH#lCB_mDc%w`6JUI0*@EPYllT)Y3@q|iA_53XE_uzj- zVo=~zDcsS$F>a3fbX@e!J0HrFfzGk?+rpSx^?XFdl6Y>m+^#_LE%pwGB7s#6|@*|Gdemr*b7r@e>9S0L+ zQSXN1_Gyw9@_u%w`DD1Z0Qoe>x%+BuVD@9=Y3oi344+#xxn8mn%oowlc}Ug5CXWNP z-=}L}>9ET9VhPOWn;g=syhVXAqbKZ7?6H1#ZTqg272UwOah+7SX(wFZKK1Yz=C_a; zFQ3=YI$(I2AMlRQVE5)_Ng-HQoz<%r-xu8jJ!|W?w~F*Yr@@agMlu~_EH#w!y^)_! zeZP+GDddqkS98DaI_k#1lndmD2N;ikZ*kbE7^IYBA8}(|w#}+A#W##q{ zu;+_yys6p%etF7e%a6A~+}eDeG~{g^`^Knkt!{^|DGq^^BW=(k@blVjE94>1OOL1) zq=Vgq+<>ZEy->Vd<>E_U9N#-HdcW3S0EXF@2D16i_}9PnkJ)~>*#=s;L>!--(X?n8U?3jaC;qlzK-eutXd&{y{OLN(^D^!1cFg&}?EU|*wncts z^4LoCgXvQRw}%HXZ&vBpZ4ws^PMNDZm+(+wd%T=wcM2JfMBJ4W!FZtfn{SV~tTDe! z&$nLLDu*y0jklUEs3EdHJW>}>Z6-p++lbu$HsbN!PaEkt{x>idKE)P@Iv@pezRSvWzSU*cLF8Sy-hw5cSet|{SqAw?k*d%j|5b}0^eUv zH|Dm3s&evfYvjq}3Ru2$nI7`Luhy(rZfyqNT+!X$STEpaJp8=n1O)^=c}zR7Kj0!Y z$ou}reB7paO~q$A;L;Ll(ph6VY~@uH5vNh%vZ(ykm#3SM2Y0wM6ZtRPB0TQIV0{rI zBK^a!{gse$_kfD~jaR^?m*7IHP9h?+ir%E|PQg06li3N9o|#o<9}ZCOudJ@h2_&bcX9mP`Pk;tdNRnJe%umx z4)bR7Drx)`8KC7zzF6iE4&uXwJ2rib0`FhP$BldeD2#Op$~IMCx~=MJ)p56u zt98REFO%J0yL#aINTIY;Jq@BRe|>(dqYIuk>|VaCjtcy{9iEO0V?96LUuta0tzevL zy={pl1#})|2?vpPXI0YL31gf;Kbe%^`5oQ~Spj>MR&n=$#f>q?Yoi1I zJuha?Q<+xWou(Y%*bfx%1HYs&zs`jtmLkGWhj`6N-w+(9NPVi}t-FtPJ)gF!MO)xJ z-m+^?iFg@27@wQEqP`Rgk-{nW0oEN4sLt82wh^e5Z23&i7SI!|ta9Z;J|dalGOJ8+ z{jO_cw8FDaXj^uiZyeY0c)|+2$XZ>{K*@e4W7q>^hv|Fi-xyFH{e^Z-VCH!<@6*mV zwD9Hd5S&W8tZt+@2#qm?A!ZH#-cOnDk?DuCw|AN4-m!8Wfb`sa+D+v7rDQ+ZWSKMo z8-I)zJwg8P`0II^tz`_b&<~lv;n@H%{hw)>=Vfj)EptE9{%Qa0-~Xu{v;8yJ#BCdj zakAaY0VbdEdl2d98bW@x5z}@23dqN$@0eyk$ejX*C=sVzC6Wm5tmFiWN-+^kRypya z4*38>v(t`TX(WPW*HBG(TM0q!Bl-Ix$;3rTy@ps3GQpN!Qg{DUGr=`AbXw~m>eqF- zyjiMPMqCfM9_;1th6v8J=?y5z14a5`?ljE*;ac1$V(VECTz{>up!jZg{Iax&CcOHx%sT|Gi}g@>>^ZpS3;G z0YBad_I|!ah4yvA{Bnuypzw8Y)M;KL?3)@Lq2H_kZZ`XvEXiEB*u`C9f$Pe59-62w z6Lf=yrKYL)(JrFZq#qMFEt!melO>sY89SMq)e#^AyB7 zw;$U#lb>`ZXvHh(fI+x8X5*`HwHIt@aldMvkZ0dH&y_uX4_gc`_@CL4j zeqR;xW#Llf*&c|Kv1x9D)Y_F}-pJz~Jui!1SdMj3(biT)-!XpGvukxn$IyTKG4uUP z&9zZ)|AFgM&d0-4g!{l;Mlwe8d^hyet$A5LAFbQ1EyjwyuteX&eo@yG5tZOd!x`jHDEBJ!Q1~j-HI&xnD^Ds6fE>o2)ZUOGces5(T zlHhR&P1r<-1VaxK|B^Lmgp@K>y;jLq@b77ea^lAN^Iftr>d4zYS0zWdc~vLahV`>` zZA0GiGZz<_Tx)^IkkxxuZfXJh2b@15t0@rE(lPh}$NiJfoE7u-(V;GR&|wVg#kg|G z(+U3lFkl*4s*^SVvbqfQW1;;(|M6}89_v2n`YZpCf?Yp+5fz9Kke?YJF#Woybd#`$D`2CrFmM={WBa#PUq6)G;CI9_=%=0qaGoSx# z%lw^bnZGl)XP<|;pV{tzYNO;vm)JkUdWb{L3f$Pg>Q(Lty!Q$D+USc4qoUJ+K@H;- z(|k*eBtDdWl3heJ6dtIES#H}zsWPZSZXp{R0I<2)RaS$VEKIvaUyscn{ps2_dpg7`Y^e`Y;mpL09f~<9$sAjXy;GJb!BmP6F@I#ng?jeJMKJvutJz3}9KUA`8T23Ks|>^7~TLbmSKOGBu86=|8v z?u+@4C5ko<&da-Dul|b9H@0Jb;_symE8Y!&>T+}CWmvCnuOlpM&xQ4Ab2@xl@@CeZ znBNg;o&o>QGyRb5opSQq5Djwm7Eh6sa6C3|KIaDZ76AXPP5if7Aav8t<89^bU^*_k zq2_!iBp&3@Z9tyx^-DTmiLYyf(8mSc9|e&gxHv(gSQ2@|=LGN0q$96q)uHfN6)Rd#~d z#j|IHF+L|4U+f`a)C*==tEN6Iz`Flv!>B{qeGrf&pFw($b))SZ_a*E89+zYK6LXv( z&BboFZfFpeXsggDK|{dfQtElJVF(fjBs62z4T07IcUyzJL3nKCP|;g8^SqdT$h71C z`C|reKaEzBb*F~lsFD3aSJ#>2nAc|>$J}OG=6hh?A9MfgZRYWRvdr@^EwkM^pR^Mf zG&&%X<8kmecI1(_C~R6!{sQ^GIDbftz5o>+r)PU}G6^Y}eWh`RrG!4`xA`%*YYEGj zl}@P*BqDO*P1}R78j03PpGJMtMxy3S;wsI2?B`dzL@Zj5{Cd-JH+Br<6M_|o4Xbrd z5pw(-L%f>Wf_pcdw$7M?5B_!SG0n{%b70}i>pN#LKr zdXwc5{x_PtK`gvt&Cd%R;9eW^yZme$+|}8{=EaV6(8@a{-Bl?d!J+?jDyR+Y9*srP z=aONC93w80pg>sRsI@NU@o0J8+j4KB3x=bcrrLz*$nSgc(6n?9@at|%s}-8zPh-A! zil>PGZLCKxZ?D^t)sE{`tGv=aXwLLo`62jgqDV5ant}Nkcw>;-#PD?>o}bxn{W--%FVj}=R>WD?JcUP~o!T4G9$=CNrFke=6dc$kfo#qzG@-6Io4eqy1o7Y?}0qGT$ zOE#m<*s1N8syy~|!Q+M8SrZq#A#Be#Psa+ZpWr_cSh48u-|Z&n90y+Hby?TeXfTi3 z4FPI}Ua{45u=$p=C<*7EV`BPwcg?yX?S7P2`Mxe-go>Bv9>)0eSl*>4vN&Gc`EWCX zryYLmS8?}m#e6p6sJf>a*0(>7)ehyTfsEy^7a#dt4i|3;4c=p~g0b`B_Ln82sOLkLBWhs9LB4Or@qr&dkSpEBnFg4Np;5>u$okzX&SkCzdR4lO}@^r9?wo zwFQiaea5sff9Z5f@9kb|3M^|f-7}s`1?wTR=NC*mfGXvjtR&e9{(A%ygP(Q55zz^m zTI8$Q{Z#Z#?~5*I@wHjq>WcTHo%GQJ^IcPEI|6m$`{6IH=6x0D-#55^r54YdnFqU7 z@|4?I4uR>ZAcnFr>fTKVU3LgWo`ulWL8rbUkKm0y&YgQZA+S!2H{JyEEw6-DN>_G+ z!PjT4fzv&3Oq8M$Hc5l=g%R)W`BPv%X@SZ-NV z$UC;u`Y-LaHVBFSny!g?9+oBjHL3+&@Xm%qR8$*z3WGLD^6lsZAq`r-r#13|vR7EV zMZf&+n((fPW9Gh@-}CJLk@@*sR$ z8vdZt8S8sYBf937^}!fh<4XTW{cx;YI^fMI)D4TW?F;+gnjcM z?1)>r%^r1YnAc<87jv6wnb)0dneG3{GLQSGcF$Xa%bKvi;M;TLi6s9nOS1(eW9}IbRU_OnX~M61l{KP7lX@MPD$#XIoJ?xeVt43n|mN zWrX70J(tFfz7WPC$+9aSetl=_1G#a8Qur?u0>G4r*y-R;fm8rGBmK`+tKmXy%P+Nirvk;Xd$@d{y#+Xd>UQi9A|U`5it;r9!gVD}F7d*|I-z^?ZFs(kF9 z!hH^gF3F<7z2T6S3wU2$`!=8bFWrQRI-LVwP%mfPYQf6Gs0XoB!G+&=e;@2#_1btO zyB7*iemyLS`6HPDWMO-mZg5!q`)*DV@<(}!g(wMj!+Q~hwjmno6qqe$6X2i&a`nA< z`yA_2>&{mT$oIj%<$vWk2hQ+V&VHYtN^*GwcMU>5St)sg7V1)%3QwnDeExOod4X%E z|9sr+^UU53F3R%F?qR@tftUXGkk_KNSuMY83+iaKQa0b$zgGdp*Yo z^)Dn}e&zTu5By+c4!pzR{K}(|9l>i&WGANc!!|#_oecmQv;BIJi=q{Gxx>3-kSTinj#v zJHE~MbrpFXAKg3IPvU3?l?%NK#Ws_`nEP?U=L5K2#Vr;G-!mYx>Q=qQ*kwY+p}sna z{T*?9ygAhGUM3-DQ6IacGK;WUoFM05oJx$$5041adqS}F1PL5ik%l}X%3?b@%Am|V zUtkLPIJ~C}Mxs^e5Vd8Gei_C^=L}pB|J8%MiXV>ERVdH^#Gl1V;{4U&qm`@`^6AVm z9qHMiD%Y6FX4@zV{NZ7@dB7~xt%f%K?a$sCN6ZP|9dC3|^0gg0^8`zcW& z&m)8u-_ik_Zk)8va(wJKq6e{fTQjQ6F=kP?*v1^KRgJV1HDj67?~!49@ApINnFQt7KBp z%s7kroiYE9H7Tlx6>&UJcH+cqnK`)b^{RB|{bJNN*_5&=8TA3cwsB0#6ZJJ++!Xh& z9E9ZC<$=wy10Z+Ad)G4OKFEx_Q&n)O8v=*6)^Bv^hPt6LBX0EDBVT^#R3qPx5%T~71ZxjYc$D^rh)PLkDTm6-H;lis*qfd^MtmN zZzjbUPd)YJ>G6zih*1|S5#5bE#Z}AK2bj*pi)Qzy%>OUc(Ro9=*&w*Q8`7w#?*qf* zr~RdnH_qdi(Tfs%E~mKSt&MP8G#1yEO#1$JKW083@t(XBcjphm4%;^?z8_^kzE@>% zzyt#Z-?>DGqYfCe9rL=(?UZ7!!+esc`*$c@efa(WP)QVZKdzbUFxxSY`;+~*?f!Ip ze52xv^*iaX#$mw?Jz@Mlcnz1Bbz+^PHg(xS%=;2hObNYU--0~PPZT?&YT%H;#>iMh ztfP-u=wqUu32Zqg_Rds{i<{{lUg`G=A3w~yP?G4Eu1*lnGAK{y^*r8jgI$DNx$et4*aaoL*Vp$kqL4#GY4&hwI}XR=7a zvGUaotm`&uPwcAdfzTMUdAd!QFYKC=z6#ejgYRkjO&ma+D5kwQ7TW~wo{!~!Q%P{I zRKL1wWg`p;XFh$ohKxJ}L}FJn>S;`nO$+(DA(y?BDnQ5luSF;4iq`aioveg6!>bQ2 z5A+wf-0Fv-)f?h{PWQs;%MOwW2*}J(7yQFF%YpR8U`^UDyTY z@-L2-qi)#8s@AI;Uk$>R#pz~KrkI!W%k*iLNiU=rhfrFSalD=68oCzqbX{GR`zX$B z1nv6p;Q%2LxO`b7wosuFGI#g$%&TpK*hO&-S(|9kC0;6f$q)Stx?d4DQ3vBj_hd0= zA9&aAWRF8Xo&7!h=61dE^ZgJMhuyxjt8xGa?9O)GmK*@%?e#8pYW;9$ROS4tAmqj2 zajT5g#rehk>A@-s9G^R{=y6M+gOrm-&u=Qu7nJYxWNTsFEbr5d2!l>&l~BDe?~Zj` zm7_Vg)yZ)0faIPv^%RiG<`j;ix5I{^#_$DSP#2+I`=|UU8B$l<33&~)!XYjLk&|r{ za8zkoa6t;=!CQ~@SYv&+__&Brh7Ib~<(-Lnf_iW}#{?`-uyWTJBxv-uK>$_n21q%mQIKaXI77nm*fQ17r9AM!93kO&@ zz`_9*4zO^5g##=cVBr7@2Us}3!T}ZzuyBBd11ub1;Q$KzNlAu82qhUJLm4s^3)`G= zo41X<4STYYA&TU=?)!M$-{a_a+{g1i@B8jQtmC})d0p4um-DmEwLa^!cI4@chG#Fa zvUsw1OIh2vU3Zl_EGng8t17i$RLa)L)!o&~(bCD)+U8%!FSvTxV29mqS~=TbL+OM2 zMGq>d{`-&2eov1@7IR@N(aU1tnzm5*p^r;kAT4~MOWheyEukH;Oo z!V$pVKla(ze-s$3o44&gIRW`qDDY)(u((ee1&WPk*q`F45Y+qY-CY?vEal>F zwI|U5EaflyJfTBPmHWHXZ>SLXMpl{)Hww(&z4K~T!*H*#^2^N{5+rQ8QgQb)39PfZ zj_~drhNKz`xf6<`(3u7Zj7r?p6fe%8i{M5*N$0b4NGCaK7Am>TMT9obx-M{=Emn4>%o& z;vWQH{g8X_E(yX+KNrlH^@Fi(@sH?D{h+Q9B>LU5AASmd3nhQ*f_ZjnDJ%0%Xf;y* z;jw}T{66W@(BvlIV0$-Xyx0OgDTLGcN^PLX6Y|Wlyb3(;*5z$UtAHhU3&QTb_26>w zs>Sw;-$3Rv-&_8}jlje$1jRM9!kPPH173SNfO1kQGW=l|Sg-86t#Gpo8Xsp^m%Qr* zod!PkCqhFIReN|x)({z#+m;j7tC8X7!P7$5?Z^t^brN}Zs9~fU8RENsjX^jSZQ!~_?2sT@)+cN=zYUCHwu*m{tK56 zP+-<8Hl$&M3O9uu@jW@?pjyA?fOjk%#Pv}EE}9J1GmrJ#47wod&F?QSY3`vRVmSf5xWXNecQBy0_tvrC8X7o2=&3jN>9Bkv zQT-2z`Jel&-tTX_^6%F_*Iga2+6ITzJ)2FZfOn(u_khs}c>HqRSEW1u%>Vm(|4+w9 zPwGC@ZKQ(!glk5>%P0f|bk5jc9R^Cn*1_k^WYB!Z(-`q$7?!8@=869tg?fwEj7@e7 zSp8h9_em0YBP4X20RcXf=O4{b;KV^L*BjAPI3IoSwXq`uT8qC1#p+CgV#J+&3o;Z? zyx6j=(KQ4P8N1Gem-j;Ak?#*}2wkvNbQ6B}fiB3v*Dia0=z_a+zifO-H#F{0>MobX zaIV1--J0D2_A)wnLPsm~+*pchl&A**pW+iL8*or&ePX`SyAt9q3}t$Z*8oRwh)?L< zHn3k8ALP;A2ViTa^ZO0~bnnQxzFcSr-u7;FPn#y7lw6PJn<`9R|UA#X#rKSDBzWPR)15l7^U16iZNKkAJ8MRl(_=QJ54pH`NXUCLxyE&-hI4EYAhRR64*04@j=LYhMQX0{Q>l; zW+MLGC4kQry)M2`397|eZl)LjC!f^mN@ZNI!8)RlRux z&V=3wRdN`CV5`=Hq17W$7o#pDLmCFZX1UF=al^pj__A}KHyP+0zbplB4gs-6S)i?) z2->#e@^|)ife6p_BJq=TP<)f=AhSOo6djl!R-|)4WbR~sze6RsY}eJin}>&%xs{>} zaWYT{>LyRukAaXEp+4;T7+CC!bw0~R!Td`Dhr`q;)V4f1dA4yB%r6LixDii*L+Lqh zhOB8|HPs=SAV&kUw_zJ*PSD`6xaQ;2&2+#Cc;+23VuA|ey`)*s6uganRQ0rJ68MZy z*uHV60ZZ+VQeOKJ;IFSrqCFwQ)|ZMy_Y#MpSYiLwgTyfiVEiz6nnH(kL*uA9gK=oD z8Hi$Pk3*n=D;>!&py#s8$D9&6l-W14>DOWSPgDAAn~IIo_W0wJ#^Lqg4!vZrssG^T z_rHPkKXJT@uT{H>v;V(sVWGh4De(+bnbmyh{0EpkH=FnpugyaYDoQ+QXFfgodCC%Rp5;rEUvOVYo>)3~Z zqlxWc^!d)oP48QPD`5(e5(Aio8cs!+dp_ef|U&63xBjv-@yadvBn%- zIUFz->{EH4<^h?L=3#ARiHtS{A5(u16=HfIi1l+IJ(#0oZAaG<;hRTr_zO*;8C#EC-D*9Z-}k*j;-2!1+uN4IOOxY3gviBSnpLYLoB5G>{$a3 zk!E)!Wg@c($e~>=9`DP+Me@Abm*>^M<#;yW^XVE8*=K(H$^8ZpvEcSlB)35)$5ktz zqzl7!Ry+2rUDZ1A4Dx^)a@_u7~3e?1DZKW*--o*#yl_iN0DPY%QN!-r!y4~zk; z$w%w#3_4W4O^Z4@*bPyOMw zrbCoe>ZYSUOz8CO$cy{M0NJRI&tLeB!^h)+C)V$s0DA}C(Vg!mV7|cok@2_5|E!;d z@W-*vX_GLq=Z4uERVE}J6Xfys7>7}B-o=-J3`l-;Xy@Yz251=%Xg8mmfc#ymN}1;- zA@9I}CvxeNKw3%JoV;xcayU6(_3KRmTk(vdZ(g>4@V<)wzj5BlaPaJ(|Hn`K{r-RR1Aot39k1HI z&ntXme^CErFU-7=j_AGG1Ny95Tml!nVaxG|eHClE!9Y!`BaYAw8C>H$z602#R+o2ZG82dy8Wkf;!^+@7Lj}21_v(2 zr#BBt;i2Q=)Vapx9=NI#l*4t20AjALgt_!Cs1F`>sNCKH#aEv+IcVb`oYiV%(}rS* zdve0DWhoc*-6spK8Rr6vq1Cmd&H3P6H!XW?J|A3IZ*XWAmwS4+Lxz7+jt%h<5r=7Mr)cE>vT9i*aqtZwHfOoK z{+>Qm*7fE~SS23C`MeYS?StKiy!}q@-Y&%U>e#V^a^0w2eYiti8jn((@Iot#wJ6Yb zF3i=V5wZ18YKIeAQPkr3x}=hC$c27+h%^nzHhO<{xw@ZK`;b<2e|AnyM7s(Y>v zk_c8YtrJ8zmvHR%o0dUvc;Z9*kWB*iKN}No?i>KOCEcN<**;Lvx%H4N>sGJ@C3JAjR@6z#{}200-oc^>!Y1M|Dj8uIZ%v~+Tq8Vj{Zv%N!EqNxtyLOUEQ z;=dx^Fju$q9f^?W5X^n(bu}aow#qwftA+VTji0Piz>53-QlT0T$Q%tlf7uqG$vf+KHR2yT{AD}4ZPZ6GzA$q zLqFCvPC#-^KCz850gK6%7Pn=mfIO{J@FRZ;n%6SdW}Lw0$xCuadQU;3zfO0F-#>n2 z75|3>m8NbbPl46z!MGLd^X*vMX<4I7f@_ZNw!Tmy!F9H=Y4QOQwDzq3B)^*kvR{j+ zDwzGrcW!FFkOX9}-foXEGR$>c{j4%H0NTsWJ3HPqfk>r?n)rh^up}%kaCct?(ofG_ zjC|IFRG%0H{PMt~>Cy##NwrQSv2hUy(1F-Z^QD;7HWWp1W_x+A0nGl;BSsU!8lvmmidYrQO@9%q$c*UfZ5(Ke*oh}k* z2sMbjZD&OLd=-i-aT(Hh(2OJ;cPM(^A)rY$xGWzrj0$hGiN?B(A;To@u)Aw0s7L8u zKxEt~@;=Jl#deE~Y#X&qdVG74+&Y4{A|DR%a(Jo#SckNUGdip@r# z^unj1BL*X&dQ|(-i@_n#JvV0>zM&Jy#3zU9Sz90|Z5i4QwL!{S?fk+dEdP7fl-6a} z3c-y$qp=EjATunBi&MLydHD1O{bG#wdDwDO_%sQNAFk)m%aLHNSpHz~Og|VTU?aT58;cDAL=%>xpsDJ1ti5 z13v+l>%wVu7+_gClrZCCOBx2+wpU{*by z0G1V1ryHNgp;%j8A;^UZ_be!N6_{Vi-!t_~gYi$Ca<7ok@D|H=JQP_wFS20x%iKeW zEyeQjKWAM&gkbx9LfDBawm1}J_*m=wyuvdzxJ_=U`93yWH(W^K2!zlgN3fUZTi9J z5ICsLkHOF(QBey59efS;*Vna=LhCbypQoRZfmZzEjn;M&=n$m@v{XsJ;W?1}@+BG2 zzDoS2t`S(@W${$elmg2l){%AL6o_s5!ujGP6;!EwVRzDq5FVqrPtln;foyFH@Bi{R1*+zy?{e5Xz-nRv_gJn8dKAye&kA`V=KTe<9`ma#MT(35Z}3R- zq?@HB!lN=xwuKF(S~Tyo^?d&02uKPZ@3ec@0XK)jgrlPg5O{1(SX`ruc_%@j3CV3Y_eThyav1b*%!8aJqQyRP4bY>U!`V2y;HYlj zy&BG5uqo(F-LkO{8Xn#0+;emQzP_$MGb7vw0y@vh;;EP)DR^`Hv3Mu=cMqT7>urG) zcpsBlfCG~ddWnp0B?P{z&>YS#0OqA1JWU%SkiLiTn!dK@$i+8Z)jK5*{ON;b-5OZl zb%5#fEUXElT&vl|LNH$U%q~vnXYJ5Dzb*BSUL$ClZCbo%Uj_5y2k!HG)__S$68`0l z7Vy4T`uSQs_T1RzUV4Z0Vm!xh(Npt7;Qxd>%+_lZv<}%?shy_7dfwMXZ@)7k$#f?2 z%$5mwJNHUkC4U@@Wo9&aZT<~E$aW^C0G5&s=xSqyj4sk?77hh zaCNMA{J7YO#o2Or&XkW}QgZbxd887Br}?kz3&$h-2gkP!ZX+N$bqApfHwO_e>1UY2 zMG~?N+)GT+{dLt-1V_h$TIyrrIe1mP~aT zahxsSU@)IorHT1-!H;=TvU|r z;f>?}`F>0P{vuPb_`j|4X03=8dm1Ddz@d}(dLS<2o_^(0?vdxFIp;x0C#Y?dt+@rh)uDUJl))m2-AC4 z<{inXrUCdR&8TSh5D&pe`)#5Z8M#T>o)7I|nLLyxj zT_chK&*aUN3~DBe}U%9S&vEAHpBVAOye1KtR54uczF=3bKvAvq@H0ssS73ayn=TF z@J3ioy9l;`aPY0&GJ`#kz()G@_n|2aNKmIu}NE1M4h% zwAnOP@A(>ElifA|w-!!c>uMuHFCOPo=GqH21xYCp>v}*jHas=lzZI+`e~D+`tO7a7 zptxh$e#2xttn#}E2Q2R&SYEKf^229L-jKpCV9P7#$gxxp6&ZIAsE4+}!Sie`X7U~I z=t|FPqi-QPgpwrsc7&L*fgTd_!`w82_sg?&!R zo$%GUA?e&cBG|{Sr#yQ%47Ro>LQXA@L2u=gb_P~I`&hvJP;uu3Y(Bfr=|TAj>wgg1pea}CJdkoR_#dpMR zV<4i1{U1InCOT-(7rI@7iRx#1!!_U0Q4~_UUH)ej1qXFkdQ}pUXT*!a7@K+&E^ssX z*J2D9EzY0G%})VdzbSg@#Vmw>a;B+tZ6o?JzU$$hSMA6;`)z>k+ct#1cgQ95S~cQz z9F%+E@fb{cbJvA^XaUuTfMX8SE?}`G`G0r9>W%l*6yjKOA%(WVxMv_7I5aoehds{% zi-7gc`(M_8g=BzfIUNsdLM(2@j7CuG7&WI~c?ogTK@LS7xyV+6zEKYU0g<_LZQUGl zz@;Hi<64 z+0szlA)s&YDgw6O7xbbvYT+(h?M?aa4zRo^D8IP92b85fi7f?qDCXbOEWEx0>L=&C z6ij-+zHC;(%8~$2? z5Klh{1tyqz`h{ko@bYZ?=)riz5i~~Rvibx@U%0RRGz9QJTFU;ysUDba2VSq#wnDyU zw>Nu0D|iZec_`hkf?_R0e`B#Cz*)KZMe=@ue7}X}E{tCxI|o-$ed{5k)ay(H#*^1| z+I58rkl-9CJLT8$F)-yg%NICG10Pl|-?S8rpFH&a{w6+*uT*aO(~i5n4EcG>=F#{@9QalnxhNia+a3`0 z8T^1!j6TV*_te2ePtpbHeFP{taYJq2hJM(l(-B%G*bn^K7eAX;^uy7MTneUGJt911 zjdkBTtlre+$Adpi0`vQs%U!$4kR9Cild~Mlvw}=58Z5B*V6;X8USj^Ne4<1mg$!|* z_`ZF4TLJ>#bb?31T2LIxYBTTrAY#rQbyeRuhH^J$FD~=bkQ>QR#e+yg#ixotdfHJ@ zV;QUQug@b$PWz1aHyI+DzHc%pe)}7;cP=X=O<;Mw&GHAz!y1HF6laLgYmmvdgIi@x zafp1a$^GHpN`$+vFq}T!iFp4=rtk?2qm)1+@#f7m#G76o?dv>_vb=W0Zsunq{i$ne zd)XOiZHKz7gUc8ask`mCVdntyv>i`Vuxm!Pe9b4DXcdSFjzxj8Nl1U+>?aSdk1+qe zWxKoHQ@}}lIN!+yC_mM5@eHvVd57%CHNIJbY}eG7e0ugCC|9D!&yQ9>utvq13n3L? zQI8ARVp{+#>C2w+iHxLvx((ag)X*@TTcW)%!#s+yu|Xh@daX^Rb0zglH3z)0*+G8tXrRGS}d7d`})#)Fx=wh-Vo zWZ6E)bRe@a`?fpo=mch%^V2DdP-HhLq)eUA5&mvL{u(-lf*6YLP z9=QLr#=cal2j0G4yH)T(4-6ew7}+S<1KwGeXP8=jz!vADJd5pP8s*Jira#8usQFlU zv=|fqj<;8F8UJC){>aTKXjNki-5E3i_~85tx4W63eNOL8apVNB-+Hf7&^Qh%FVdRW zgy?W@?Q0>OXd3WxhBybB)8YR0p?3ym3u)f@bPtPYtTYpXjoh+*G5wDHOQn}dObYzU^83u+Ndn%r8>Td_G=lApWOIe^2o(2h zU9xa;3!2~JaHUt7fJSMQWBdom$S}u^(+Z6uIiuUf7fJ^r1MR2dZ38nvjg1lHYjZTSQcq-tqQg6Np{Oa{Bsg z5FXLXW&MID|KqPT)sMTeEHQyyx8>(Z^Z$H)dUk1f!rqf`z#{4Rwckv*JNL+JZ4nv! zKiJPd&!~p^{K2Y$3}p6tSeLVr zfo4~vh%@7`O6wx7D-5qHQ&a7>c!SX=Ayy3|$ zxxm!f>C@v^h{%RYgRd~&+d_~azDcYTB?Yi;p4&Z))@q-dck!X2nj4R~C|j9m%Z(HI zQZy#&EciXgx_ca1DUMlOVxgh{LfY)Ylz^S<2M&$yPUs1D zZG83q9Ry#pqE)?bL|m0o(;|#6Utbc}k@pSFJ74{2Q~46f zwC^{*g5_D(i%B9EINBhb(zZ|KNj8K#*MHwj&qTQ0Z>@ERxd@lKVl$}y81O2DbM>cc z!IAU3I71YxD>hr-ePC4qIbVXb)Q#eh(Y*DC+Lw8lPGLLM@!VT9A1ihF9-A|=^||b= z@vQ~16Vrt;G@Ui;Nk3BcRZY(QhDXH;MW<}IKBMA+PQju1PoTNx zt_t_-46tCN&C}j~0ei=5Cr-z=!SeE!N?IHd9we@0>vj!-?0iO^WeXM`K1f#Yk(5tZ9WXUNfA|<%hSf=(V~!h_gY79EzY&b@ zNa>zEaVq``$h=jK=u)Z${YaWD=fOr`v%Bo6!&eKoj}BkcGHroW@{RKh!5%Qx68}<- z!q|w}a-ix9qJwP4Hr%k9LU*5AFNR54jrj!$JRp8>)^BgK638T%J(cfAIb{ z?pJyL{SGO|3oxCJIQfnVj}sF#tS{+Wm5#&aN?fMNdnRn*_i)d$nE=v;{K!r{CKwTn z*2oqzflJ}^sjl@C(9Ax$aZ-W_lxbbs=_Llpc^;$)V)+1@OhAa*b2_~Kyz8Phruz^h zde_;OQXq{s9;UvX3fdn|aQ9)nno3;D1W%~%XAnK=RK`hj}0QZ)@^>=J25`6>WJ862M*BCFn|I+nX5$mD?1 z{vEzFw7)f9UH9NP(w;RmKi@cx5(Gv>3~d>RZ+-Wy-5M(5RhavjK_(-Yr*$%agu4(g z;k?~odlC{-ORKrs-41mA?SI&3Fy3jMjl}%d4$!>(>C&#G&){vQkh|kf2NF2-KJrQ8 zFv8yqc(uSyM^+&_S7KV2C`swOER{5Y?4*9U+FLVG*4MzJTFmm2(;Z}%J zYL9w&ssWaM&9m_*VC#BN&fwHyCCK;|n(OA|LCWvxn;+7@0N#x4r~S2Bh&mB6Wplj| z&ZLr@kG{k7i-&hd`v&zv^Ag!b_(3-)uThDPGi?LSYn;)A>TQtap;j&#-VK=zF{6(A zNtpg~=gWudM?ulGwbkw%1u9t1Yw==vs@eW|nNrRFxqq$h|KUS~quV5 zV4|~(KsEa$d_DQ5Z1xfp2%XXz4+#I%9asI%>U|%aX#3h$M1{uN>ih0#^g-aS0WCTk z_J96%ZpYboUs2-I_dPG~bRxMYyRt2DgJ`<%4kH+X}m>p zA*}xO_$p|=$1IfIqZHh09{*H2%5`9l-chHZtQQ*J z!Y+|f3g<+?`p;d6`RRBPV{bVMcPw34*p-Gj+^Tn6xsr=yn#v!BuE*+{FYG*Ino5zk zQ_amP%T82SxNR70FgzO+Z`t~QhR9c~+oLGssB_O%r1YMNe$H}yZJ;xdSg70;*Pj%W zbN9!-ym^doY;D?qts3K7LUyP*Udcp8bD5HbFLQu5r^|Y}!yiT6`C_6t+klqZ+9PSL zorvqy)ph5GFn)p&uk^~J15Jy6FR`W8AfsZFl8LlekV8s(n|$jVL?z&PBzbBeP~8m` z?C=5(yT^OE$1(rdII)9T$SA*fn^s5cG8-Zg;g*V9jHE{4q zU6$=GM!4Rs-#9N;pyIg?!aw$WM$CT4K>pY7u=pXM;8al-Vn+O)9+Is=hKFoM4~=1Y zQ~#2C@<lf}RE);>x@0sK|^AO-&1i* zdg^8?;2PS-eh@Id*{tqwGtFM$FN+LHKG_Ge%~#kRo$#R8ira7~svf2vGlPTA)I-b_ z+xxL~*#3W$^AdaiAc)$OiEOMLgPL0$yc0qH6Yr~Wz$y+`$N$Z);&j!DXbZV!l}&*n zF~MFFtK-Q&XtE}r{|Dcz^Nd`UU+J?>fvhqkx$f}PfAmqSdZY*WF1pW}sW8XAzmg@C z1oq-j+=+}fu;B9E{q39^%Fuh%B-T-nf~!kf*kAS{mIq?b8!;Yio<-oC!nIMPY*K(z z&8MJ)+@@}tDO5B)7M8kljfyN{L&)irF(f3P&}l^-M&Xx=Htes*^5>05tn(;X{vW3% ze)?uI3U}UcK-#wjqB>X3d%hyWA%ACSlAN^GNTdT2Uil(>*~s1_k75+gW9oUEuLjj~JCq*VScP0lmbeZD#Gt@Q&)<*j>Ok3A^8=mO50CTa)`eqn zbp002t@+lC7=O={bdku0oDCo1w3IJHO68D&oqqYj#bBhtwS>mmHA zqqD>ihBF#Zm;%Qjq|EIr-uG%4^wsar?Y%n!B8!4zer^<4*Z%POdR;1z9Pe-Sfl;8T z4Op++Is%V`*E`?kAVc8xMx8uutdAoqOoqi4)3uh=-)qe5grry%uARN!AI-1yM?Dk9_A%qf6_+oKp|bb`9ETmK$W7;s z$p#r3VrC8D?tP@9Ktqqz2UuK?L*5qL-9C&=>H~skjzml3<8Idb9uq?(-Hja)7k zM6IY|d{k%j)_CmmC06Vn1@RF?^pbXwJW597$k~&|?EOeaF7@Kt@m9n(u z`scisaG)7*PTniJ2c!c_G&;mEy?kxguHuGKjQ?>^y52*9hU^%+X<|RPxKJZEIlVw4 zeLI%dG;|;i>#_Wo(?h7Z>ZvHt#u0>n*4Hm+GJ@`kK9oOXa!EwI z4k<1<@ra%-2Ksi-qgAH;&}|oQH>NQH>=9=NBNuvr`P*Yhd|MR+cI4tDq7uM1jBnec z(EzaZk@oyTD!}?>SYC6_H-pA?4v+OjB9IqwW@>B4Ky^jXcAwlJ%sNe+*mSc4qO@Xo zX#Fj)ta@M9fr#;5^LP4;*$Kd^dDmm@_7T{q`t|)bbqWX(oleav4MHeeX6CF+7s%8| zn`2(WTHN;JF|daqal z0q_0fMrkUDrByZNEC2UC9Y@?UZu&k0-acv8R(L^!$DfL~^7Yc-%bb#T{WeU094hAc zIUnQK6Y-}Ov3`m9@(LLj%x){DiVDA=!j{6Vu7Oyel3Te)+|luIFg%=k*Lmh29&2^o z{(rUqC%pfC{RWTETbF)LLEMos_X~_tM{HEiPg39tt04I-{9LF9-C>0D4^xj|3Dby@$qr{3clQ?L$S(n;UB8l z^VDrmN}8Vp{x`?(Zuvupe5o{zf@2hj*|^N7aeoBF1#RW2GDL{u`cW_hzM6b*4KIThuu@XH5c1-1m|GUaTM4GaPh#D!Y-k zwz0eBd?RxCv2j=N!@z&_PiD>D#zXpzc{g?5AqaXNqOjjB{MYGa55{9U+N0motcp9JzxAy85sXK1o7l`5jP-e%-@ma?Vm$(7e9C!m z0taBr%10OWtWFrEsK+x~u)NCm=A!)3E@-)NwJ$$$5cEE@Im_^k0Xc~|?zM9aq=olf zp1}4!oBGViTh#-A+gLG^xrGd+u38_3vA&s+Pyyu6 z(pC-c3!z7{p2%*GgM<(2Z|dthpzk0SE#_kS+R~pY=P0AF$I&=t|DG|ps?fM8Itb%^ zBEt1NcTym`XX8mPqfywOBPV$Yn-_XAIol(b3NKo(nvspL{+yet3(b`nKeqp&*YU7P zSew$8sUXUP7ln==4Kt~b!awM~F9Fk!{Kl#d7qI$J)7vQ{GzLyCKBX0fW03mrU3r=W z6|8P{&Daf6LGtJK#IY`n7dLxJKv*8V8mIft)m8si#q=fVA6zc#PeAs@Th!_L2`F^V zd3y}&7c6a=$>KhX>2f>OzORY?N4FOp#3O5m>G@W1yL!FV@v2=tzH0wIzB>LJ-+zz) zo_A39@}(R3)}8L_M{8dj5KvpbxFuSdtRIE`Y7@=T`+AtMMYWF8V9@h=qSK3Bkxfn9WAFT zvTCu@5!a!r9KFj_l=}n9G%k&z4Cg($`YI&k!ZH-TRiGDf)QXmED5yn>`Gzn4bld?B z8KKjX)JC{Jp?F$Dya(o*%1mcg2H};$eK7||5)AWmWR66UV4l#r@pT9ZnxrgTC^xXN z^n`!#QxZJcJtuw~tMl=)8R!pS{i;?Kzg+p&GGI>?vB;@_2+a)MbNLZDKt359yic+L zNu00Kyf5E_qU^(`TlWs2dKp^N#0JYT>|TWH)5g0+?V;gGCjj!p=+&d zM(G6GjL-a~S95{m?ksat`d4J1O*UFr(un*+4b-ADnvrV%cHUAMtiDf~I_iukQC(|CN|C|;j_oEJQ#r?If9+yI3 z>iubfn6E%wFE65#(*U#cX$skg|J8r<;yp*>AoOlc6aYx)kk`E86Q4ig!1Nz`dMRO#V$uQ9JNg}RTUd7?GBon>07g~oGVsbOv zVO#KaISq?;sJOQHdBLq4csu`idWVrgcEfXyYH2zsFT~zH^MwvVFBWqOjHy5o@eUBf z;v}+GaQqTh2cO=JoWC1UVM*XwPT)xjbZR9j8*HI~tbgG5mCs{fk`Z}ngD}SD?-1H* zIx-0Lgcw1~=0Vtoio_3L{5qHZ8uz?sRG>nfWkp=tKi72OIZXho9jT=TF#%cJt0r7*m_lkX;^u7Rpvv4znI03tawcq@51}g!eN(TJ?H^k>I2ET1V$U#C>j zi}1E1D&KbwB7c>Z8T(B`C`ZWQ%KpTDRCbH?$HIY56sR>;cljjNujgQVOt2Z?_c)6< zi`XCtF+h>2&je-Kl3rUs1DL*wTPV$=pdx4@vWtf4|Kfr|sq;k05AmTBUv@)|uE=n) z<2Uep>!ui`QUy`paols2HBgrLSYnac0fB~l9U|Xjdh36`zQZa%vpQb=KX%KWRbShU z>Dqm@o_k^X;_w9fltbITUBnPof;8^rpqskaKxcY>lzh{vanHV~4BMRQySywDlC=nCKMu4EwWR8b;1kfB4> zKI|~h2y9c#5B9r@@%s8Z?r~yuDcs&6P9EGCFtyVCKHjE6aphyffUV<@FVJ3EXF~_U z*ZB)Ku-`9O|EaR!FxJmJ(Z)Y;oP_b&E=Hz)+vr#ylWBrS!8k}}emA}NkPZs?C8wJ& zM<9?8cYH*g4EKG6g5~)zecs&Fku6K3;3U8P=Yk>?l8s}Ei)Lxi+2$4d;1UDQjgEfJ zGiHFm@KgEWV65NWQqw2x+&FB@Ra(=0asuL&9~O}?o<1Y(<3Rh7N!Yvmtd}MGzvF!M z|NDPySJ%U;{rmjYH zbUG4Zi)31FW}wh*JNn~RXeh>Fe{eOPf_$R}&CEnbkj%5<+>>yz{@HSGKq5v~BHAEzn0(+@5s11A ziE-HPsNgpbTirGvgwZ&idCtHNVCgOp^$%!(#2|66$_0#%&M1vzTGT_}4$b$c6x$(& z^3h{CyBk8Hey!AU4}iPL3DLz}Bf#Q#EbV~>1xk~1B zt;n|$BS5UhEf!BqM=ZelWrslRD0pw&zcv~B{SkxgqZOSW#=wZ9F+~Hb@1406;eOPb z3gIE)W=?7h=nfOuy{(20DUUWk+;^T10Y7-Zt;c?^BkjoJM$r!p_=%g^JF{yX$Qc`T z_ym|x`n_+>`2}qM_Z2$akLBO1>tYori~m0GPdvW*{(tkktLLrG``fPW$E)*nm*tL8 z;-}z@8SmH=_J8iPI&by-)%kxP|NH&bdH<*5|Ly(eVZxcv<{|i6%<|~yS~6sb*_fYr zJq(M5GmoVENLWQh*4Nj%4VYr{w+}pjhWv8{{9n-8ki;Qb`-|zAZcm!4bA&jIdeT)z z3hq%*nM|9{VHyoth~7kJ_cBnHc^j=NMj6htngcC?=!LEbe| zu0`7iP}ICQ=|pY^n%`CRa$d0j$&|kJY&5Eb(%^OKqilVUowD}L_`oo>Ug*^iH)8k? z)vj$x8inZJPaZ$S;%Uaz=Ele`WC*h4TwH7 zDkUXuC51wysK`($g%U|hC@QHalB5t7GGr!n88c*_XAZ}4aFB=!^}C*Tt@HM-@3Wru z{NDHX{_*?kUVEQ?w!M#gxbExv49gGqLxzCgM-fLFn6z)8@nb${(fpcCWl?>=7*t?c zW?YH+<8?N-X46OuyI_!WMm=#%X++lme6C*>@-LhfsBaBkyMGAv z4?5a7#_2F&KloyFVh9v(LzMlaesI$R$wh4kZVFCJ=vaB1ULE0cQR*-;zbq1}UUJ7?=058^uWWw@85UGE0@bLZ`! z7a@Pu)O&GBdO2`~2xuy^m%(O%q`IHdwIK1xd{B8m^7?$Y8a!0&1JNn7?{{Gk*xkz` zm>-C%3)G+z&=y?3ys*3?5+!t^0Fp;kzVslI6C(F+aVMvAE7G_dpd zw&+hZ>TZZS-jlNLgY^6Q+%+-C6SW%AxE7DRyRtq%FVi(R@Mz} z&h9^l``V%CRHO4r+YZQk-B!Tw)`fiB(ziJqDd4y@VB9|D-}x!b{?2TvnRC}>)JNem zZv{BSg`2AHh--8=?C4BH!68=Dzkiqcod17qCpny&+n)`>vnS@=F?s_KBr>2y zr}Tk>>V7wH>VW#!XVd+hP`9Ia&8j%vH$;5uhuzlA^~CB7*AK0nPU8M}lh;Wd#LGla zNTik0i0(`-&v(HgLU~u+)U{=poQ_?pcu{Pa7|$v1I^9htt_y1Vlkd<-)+V0J!RS83 z{k2+&QQAq4e+A3jo?4=9U-*^VE)Z;z?2RO`u0{PSbxl_q*ZE#WmG#a;a6UkxMzZa1 zzMt=c8Xsfl2t>Wh|6Fll0B-ivWOnlOfx%+Kd5Mp`(Byt%lSenMi!1c^1&&fcpvGua zHVt{I62^(OVTf1zbavT?Z}~(rX87fpMx1KhoeihU@?CwXf38V_oo! zxbKTx#H-yYe=}-|{29s4%HvHL#LRHWNj9h$1S0OO`FNrSLJubw>^B$zo2@rfdQs;^ zH@?d#clik14x5=CiyDAn-gO7l&rv{W_ZQtgm)oJ@%@Plt<4vIZUU!0TZxhTfIx6Zfr}-Dg;hfVX4mWT!6fBX8V1 z{TAb4RhjJT7cn1Fe|-1aEl=9PxqZ*MHotbLs*edDe%uRpE^bwMEI)6`RG6l zuS$M?g?dkWpU-UF(h0Fqdm}tHG{G~Ey~pgPYeD(*sI~5=TBxb|9{2}wwsv~5s|PB( zK;m|?Sx)!>gj8uo+iCxeW5dAb%8@H$z$yQ7Y6ap>%pUyCXT$t=A!8$N^R0g9jB9?s z4fB+yV&Xas<8CmtotikZr5ENrOH1B6^g(#tg%yJ=h|^l~QtpQX9aL_8%Gb!DgVCB4 zJ#YVExZW(1R60Be^u}&I-et)1=&BH6R~>-PtB&b0x9HIB)^cLXkO4E}an{T08Q`p} z@=)J^0ilAaA~$6aFYDoM*mG(a?2UyU79)?;<)GN1rB{aG@Z0xWeu^=m=&1TJk&_so z@3(Q$T>JOEnB&*tdTv`#$0(?XhpYb%M_gT6A-_Na1M^26{A^kOem!&jW`6$#r|J{}*tm$N;;Fc-;9w4-8 ztJ}wA`v{-!Ar%epcG6xSblhhc$G;A*&<^XD#OlwL8MD`XOV*V( z>h%z|FLrNy`@4w>WuKv4b~};0r}mKtd24Ka_78>k=M!-~JL4V8GhvrRO7P$9) zlw!^{0BhcipFWyRgYB=9mmFL$0Lyv=Kd>oKK}_4RqE@6Ic2DTM3&Qo(>hbAwGpEZy z@Lf>kk>F%7Gx61VHRTUkA$f91CnG`T#g*5uc7KARixc)I?$$znsM@b-%+Jd{1M2P> z8hGArH&AUIfvV#MyY=4=K;(3wpH@aMMAe^njGF6$&Y?x8YcL-va;N$RmA4<{-!`cH z$-#QL#q|w`WGP@VUs`ZAi~_+~sToo>RA>p&-PI;B2sP_njZ0GKz^3Ez<_E^j)vJd3 z(_I)~@a68@FMS4#30KMOyfF-vt*u8DHqoGfSMb`EH&l=i<(CRWo^`kwOHZ-c5OA8l zNl!gS2j9Vwe)dc%d=j|ScLnu>^l1{#EK4a+sXcNe>t-)p&HEO#QKlb$%--msQE1TK zky3n8j}AY2m!v)H8v+w?|LKhy3~-sp|Ry=grivMP0u?zdH_OV7_nT@7qtLXt=J+`wSNULA{{(4pA-C zx4UsavYD4Y2yUZq&hX&+IJbA-WP$AvxW4ffUbBJ$B8?Sa);=7A1#DNoXf*xB&-^pa zWwN|53f-my+qq*{NnfB%|vK_D0^5+ z8F5qbA0BFgOmKZaxSwAlW#iV2IL}1e za-MWx#vVkpxy-p*Ot2DWvsgWAC~2prti6F4~r&fd#(Z&Z#0 zW06Et0@hWX)B|O`XDP7HUDBB^z76;u*DVXd`igDAg0%w`O;EH%Rj%nJ*2BFPMG6Si z!RZf!`?Kh^khp5!a+b5r(4YSLP@Y9EOzsdJCP!#6CdqNR2J4z>-zpzJ8196P`_|-p z6}P~tJ&_*w*jr(zfNj<7+g*@fvAKt`h6-byZLGh2hM?JmR&9*?hi_UFQA;YQFlrTU z??Iu!*qh^W;(Izk%KK$brd%+@-{3Jp+m;c z)K4pd@xEe_4>Q&JVAYv^Qz_INm0u7i-;Mt-4@>*>5nj~wES?dpKzu@ZcYgBr<2`VG z>Ex3YsJkOO{=M~OLK}p3ak}2$-vLLeGO=x)i6PFYUPh=TTa8YWi>8o;lu_Ly-+7g zM(En3wq(Np=HRHBR~>PAsd?geX(#DC9K<>}MG~ zNNO6jxQn(Uk3C3VN}8QUBvm8!%~Ei`KNVS8)Za^d!ao{roogkbYRCFEEU6?c-^Mpb z`XWEPNa2a%zAA9bTH}_?(F^>6k?YJct_$-lK)fq*9aZ~^N9sc<{>1pgd zxardf3b^T5UmoG$* zgH`B2DDre(+9_XXK_1KQd){S^S#aBKsGYoQfJI;Vgua(|!P(@kPkHNmVZk2Woalmn z@O`z*=QJ;_Te;nb{)IaUnGp9+xjO`s*LkPwcH;Q>5b!1db=G{YENpIHTMZ@(w;NgG zOCWmGnd7oqG2H(>^Ydh1G2$AE?UoG}W1WkkQFG}FXlPe2{1Da%an;pJ{IE`E!jq7k z;)nURJFfZb_ha7UCcn!){tn=f=Fj6j(h9dddyg%5YlqbwjOyR`eAB}F_AVR4Jb#u# z)WNH$OJ^IfjNte@b0su0#is`nymy(7ly^dl_YAcj_4=Ym;)sId4_ z?DH|+9^iA1ko<`}nayYYx3}$Y0-ElpiaVRy;LpNMXK$eXpnLk-He-xGTGwwighVt~b$2a<1;k)r+F((xwmh|E$2!eW^T4%-JaGWXO2dGTqV8uB_8K9FE< zMBawo!8fsa*k6YNLugMkY0!CPCg-_29X4p+@7Yl}2!BG~xkmn>!JDp}&|o?Z*rtN~ zwAcnAXhAERxh<}5R~ANabhiVg%gi%ep&X(YYcqmsV&VGRWcxQsktDRM<4s{mJn^4q zn{R0jAk&roQHp_SppwbY+V!Cd-tLlC8gy!da6P5tuZA&hb-wJe@i5{!hn>TMrT%?B zlKX7z!Yeig6~~;AvPa~)n)VQyyL%s2=8+sn< zwaDA+^k%Inh182Cd3w#rPs>*H^E0T0hZ~B|CuSht{Fa5nplJ_ODg7B;8`cG?=R0%P z?CXYcb(ITJ7Jbm7vUr#(R-gOt^CwTso_>XO>HHYMjWMWO`Rk73muzb~ytUld`|S*__ZI3dQwIj& zZK=Lh;n@Lj8w=65szh8xnn!c|Cn`i*B&2(7q{8Oq4wm_W1K?QkdYhp%9n?JEj|AKq z1g+bNf3%$j;MbPws|EM5?(F`&WiANgwm(`3}uFFgd8YsEizX%2y2i%;rw zAsyai^W^#>zAr>fDoWjL7zFmceD$&S?{hQzb;=2={r8X`DldGrE`H%C@^GnkXBXo< zyz6`A2F#b-TNmQB5p}%N4xIb&Gx6^@WPYF7cIsu!u?LQWiwBjy?SIkhWPUD`tNyux za~!> z(kL~p2TbX`R_w^)O&gSnqFq@Lcck7qB_H0ZURv zP^F;$JaA7oOkXRw+jKMwT>A8$1fDJei`8-h%iq>PkgO-iN#ibXr>?iL{Ll+@{kD)Z zsMqkIX z@Wd_X13x|A;ybE+FjLjMkX^AKk_}YPZf?c;o0aO_lVQU!Mck{$%OAJ}`P+Yg{Ev=@ zy14q6O_pkn!Go!%B^-exV7o+SWK%N!E?cc0@vdJp+tm6_-57K|bxv zg)J7(hhgHJQQ)lk2!xm34ZEGmfP)4lw2Ga7>x|5N1oQsP`~LIzpXZsMGoR=GR;$}L zerDTt)JuJSN9I{TBP3GAs1<87LGZR_8*6?xQN4KiC)ak=i+OYP(vDxqBi-*jAac5! zl%3i&sq?Lu@Z`maJx-&Lx6wNs{vbZSEO3R8^h`G~qgj4T#{IL(jlOUm$vTqg5k7EV zI*+s*(NI44F$!q}WrmLZP+X(Fq9Bgj@>Y|I*N29;ww!!5R;-f*xP-y5qYxV`z! zoim}$@FXVd)X%rAP_RXe!Fjn8%$tJZF6s1w($>JX#}E1-Rl!9KBj?H5IX~KwU`rR>eK7P2hR!V!+J8E{IyS^>Y9D ze%M+;f5?)Lc(&59TQ9aSVAa=cG;hga*!o6cON`(kG#(DRydb3?o|w`GT~8w)_3EPM z#!;A;nESpZb_EUMn^u%@VSckdO^7?ghz?oVZpt_Jply=p_e z>^;sw(0|U@r_@1%tCxg+>LaftR7Sn+%@R7SDHnZEXF>Ld6x+~>QqlH6tlwhzIICao8KV3 zIdAW?`2`)QTR9au2k`GGdR6F9s8Cw=r2RoxKb-C#KW^Jgg}0J2`o|GxbG&*c>$}Sk zSpCS$OD)BHgUL!tRQe!r8_1pyZ5aUNqXRN-_*^&E<=L%YLWPB$UuHH~Q(&=t_5q89 zePHm5#1K@N_+d=~UL`pQjyzf{>hjacXbT;h5{a;Qp|82y_sWBo_Ev@;%J_ftp1)MfM8i!NIXrnq97ksNv z+0;oNf~(v9{FL282U9H`*7XsC@UXg9$k3Gz4c0N&TTzE^sOlxpUc?JAx@X^1+0wws z;9UNxeKc4D!QtAN7gUkY+2V%x84`0^Scmlki+y1$Im=N`YxyVA7K=RF=B0|iRmY%a zkY(uW-BAEhNB-ipF%bX$TX-yO9E!DmeG(NIhdDEm=3c}PDpV|1R=P9>Sth%@KlCvm z-u}g|@5u9yxFBS@;5*j6j{ZLAzIzyq`1r4>2IB85a_6!X`VHK1t+@FFaZhZ&(;khi z#d>5{wqJHK>UhcP?)W6mfQ}VhLDFG==WFKtA#z$h<2A-xad%xs_n>Yui}H_m-bZnr zd~vH?blq!ED`^@E`&~#VH;)DG6KNnu7bxrGFb=4pW+f@g_mX&b;lxd?6r#M+!fVtL z`8Tk;R~IT%ejiYTK`uxW0@tJWro>uUG zp)|s)LhEY`JH(OhG(W4e7WotN+t?`{bfB?blg-gWzahG3MwvVXs706QK?yyuP~7h3 zp#bE6EQ$EFT^(_uq8}WLUee%+{__IY@4X=Nj4}I~vjg#J58fT&Z-sSY2A)rmCwPeE ze%@--JAQLV<*4LPKU6$?-ld82g~(uTx``p`bIPsLsmFb5k*}hb9*&nJGm`xNLl;c` zQgNL^z3C0Fg>z5kVx3R9n$_e4^34ofRM{sf@K_<{;r?2Dz8&9}xFa5&A;{L-q=x$g ziF*s4Jfy+(Fp1gaSbvPLJ1cRvq#aI|UNT(ts0nhC&b^F5|DlH>qJo1fTEIx|TGV6I zli?_~%8gl%_`92hFH)a1g9FQc)7Z8ySf3o)D)NL1RXJ}$V&t%Hjlw3nYml$;^4ZLR z1&C)en%?QWeF!w==6ip155dd@`BL^3nD;ttuKbPfU;WP4nXWhl>jjpcjh{gt>XX!> z%@~jO8svxvsUV&$`pCDe`M>KGv!7(lEAKcTf;d)!LKhw<1>c)EF;I8~fy%70jG|Q_U=ks&=Hl-YWFm^3%VI%VU=66t!UC2UQ&^-~+ zR&#vrqdCiVJg1@0hw{$zABX=ZK9c#o<+VRPE?Y1Lr`UOZu?~*n{P48D1@rZ4EB2Ue zY#D`%M{*Zl;lTR6!ts0eq8T9AdRmbW^|eCz_q>kRLp?o@YbBd@pnuh1qTI8lVX&G| z=%D-=fm5dvW-~EQP;kZK_O0KeU|ta+`O*pViMJIJYLGvdxO3BwwZRO?cjYOH?)mrl z{eO2IZ+@sb>hlNZg^~9%i>?3te$4m%KW)owI=`}K52KDg=XWDvTrcc~dnN_jA@;zj zxGgV>z(zYJLGf8E39?WAkl|HHX8G9m{1R&=dGenI6YRPPYqC-7#rGKZU);3PeYTG% z^F8$l)Ihz=eR@kbJ?SI2DgM?2{k??7{aeX0u9L*>e=4v~4|#U2vGzkXUr1HPweb|* zbfR>3WU5W=CD;hL80cgnFV&m2ukULEcus00JViXaXH!m{(-Gt$+;pp6?$-bn%j#G% z0_s8L_@{)9we_g4HeIs23UTKjw(~3!MxI*WU}U{I>R<;+iunI%0$!`(R1KdtkRK4{ zeLYzZRo!-*%Cd^V<_`PwM`aO^Sa;*z&n@p!=VH<0;pd^s?m>Pd4=e_d4#?;fNQiv-3HVBS3?P^JLDX ziU!VO6UvW024J=46W9A(RM4I@aE$6gKdM1L5p#?$*yh;aqSY|8Q*N!?`D_>-CfP9N z4vfHn=pL!&p`83XgOqZY~MMxoQs&w+jWC`>rr_rIVv2HW4J=&x8b z4kBZ_2D?xF9oNkH!MsmBS{CaM({ffWx{F33v-+Z4)b?n82dUhf<)a>=zHhHjJ#-HdXx5cK&{w4jSLT@VOX*`8YKkd&u zf;>9C9eQiM@>_`H&%lHt^)I9)+T@488XY8;#W=ZO!QzX55D~KTd+c}G8EyNL|V(b@p z6Q`shex>WZgf%DPtfVsP{d37h5TYb76$|NlTl ztMq0CxxYFfke5+Tv_00TB`?V*MvHDov!4k7yM#}ZZ93>*HWnFkANBlwHCAefE$f6s zr|#XfT90vu_5(q&Br22#$UP6fN&zkF+e44CIzict*VOzz{ysIg!~7fTVV+a=`Np6I zc)K@mDp{fd_*KTeLXh7+{dIHd#P%wv?`z3fG*}L7rHH z(*Eot_U{ns`+6hhsMBV`!9TNU3}lMW?3cCp+h3XcV&;0s952lFG@I>$D-B~Xtdq9j z)X7n>+|Ih~!s{^*_~I~?9*T89?ZVk@L!+>Lk7JgR6Y6NcT~9Jmhs&II$=SqW+&_hF zP80t~hhpi?(bHMD|1#dTE%7Fiv4gk>d~((cX(EZI zJ}iD`iTanhCN~xcCBrUSl+&G!E#TbtZut)6Gkr9+J}`p*9NJ5tG})vULZ~mrIrNhY zp==cw9$xl=aNYgHvA{PH^*(>9osdZdBg>*%N!eORwNHNFUDXLO5yJD-bv+P%l3o45 zcs*!mToo37g!%;oHsz=8=0cEs73E%fs#Q*(>HFdU{}81 zE2z2xb;h+eG#KPVk&>d4D)NhtmkK(b^Fu$=sofoO3Z3vE{i9CItv(Q*Tk)%YFOL7! z0W%j6XMR+gQ|{8Ge$3UyXQwJu~YEZ-?QaY5INP)KLhEtm6w9#c?*gbs`n(dAp+y$l1Ri zhevx%8^##u({-w)-Sp}xPb2ci?hL+EpsNyK(qtwK?WjZ^%j=kVITk_bLze z?Ojm5LZwhArWF_p@)Ms?Z@_hF|MwGx-SAdNYC#f%0vexBxNA6~U!uj0AEpXeXPQ~I z$OL(dX$f4`re6K1|Nlh2c{dgA3)87TTnE8q?%F+r(<9JKyQ}xU=r3=WIgXk07xS3e zGS45hDiDZA90~LM|JQbLebe1r$VbYLRpgI;g}!e;4yGtS83Hf)J!7I6=hV+$S3G;R z6ZmpZ-;@$Xe42>-Y>-+svAHd(r#MqhWP%w18f7iSXWgXpI{8tVZpnf>b&_aZ;oKFG*_8{+CE&)Y5=Xds;==JKD_g+zSmX7;+HA0VgW zu<&B@T1XhasPHepU)7hF{93a{fcgH+_f>rt_4AYi;%Oa?kM^HKT)=0k$%QX4pX~JJ z;V|;Fqnkct$=lQ6JxjVozke@GY?-r{3`gGumSojic?qP-Q;W;1x0wvB-(npd(MQ;l zG9E8og*t!Li5eF!j}WsZ8hfq!7=+`BO6RW!4B{hc8o+m9n5-|RjdV& z3t9AftHF>YVA+4*vHGu%;D1>1z}qKP;5e}<^|e(a$T;S_pZ=8%{vkVy?tgnnDDj6h zx7nl4pGH`10DCO4dBX4DwkZSZC&ub<#KW0XW6F~E-S8~6f?Z0n8$$1Yh!6_xg5zCO z5jaP)EbLV#ws-Qk5$mR)QIcy!Q3k@*T$fzbD+w>iUCREsmAe_uz#|O zd}OSp11q;k`HHQm>m^c}t5c4Co^@9^U-gW^9lg@}pKiFmUVFB6e!&RTE2;fm-hT0x(&7=pLb)CS9X8oaLwUhsK4^2IjSub`Qu4~$%wdAnde#7Ya(g%?+W z%fshOV>9Z2_o(H?4c%>^a>QQlctAJgb#!ppL?S=1n`{0!>PZGGS2xtMQDAt(fZB#q z#5ccO5pvuT^UjWAoyPb2po@Raio#!1U{yW-ego?2Qgm*MjnDl1`v1@V&OBze%yHl= zX5+Kja~ymEu8kk>{Cgkfb^obN-Fnhea0LAkZ)eB9ogV?tj1?|f=x5r?c~3PJ=U=s| z`_sBxFfU$ta5hx|bxh?IOAfqDK%bwkj{=Byy}rJ6O!s>|i5*X0zxQ_=@r>Dd+BCL@ zgf9&`xg33|97erM-}O+4#kuIURhv<_CMHh7yRMs1SZjB9|3SP@bLy3-qp0it=<|!; zuGu6SINU3FvVp(s*4_9^br4T+zjwq5f48m1UMDagrCrm!OFNhGZ@5mug^u-4qY=-s zN<-`f7xK3w*y{AyhrmqVn#(be4qu;shSX5x1;?diE>Zmq6ak4X+|NshDdDxP@#u`g&bt zx#c2VPSnVQtzQa~k(Yh##$oMNFh14a)V#R^rt1VK{L>lGNk{wJ=t5E-U%A_p1$A=| zv2o?yY$P^yY0g|aIDdF_1h*Y5Ac;bfB@L+=Fwq!I6}pRh)n3vv+#a~@b67U_(lfwj zy=m3xj2BTmUVdX=j2DSEE<5ySE*;!|DE`=mx^I&suaCU)X#o=!uJ}u93Lx>qIV;hx zkBQM8?R5&AiNq{PPGQHp%OF#saz$kw`U1pVFn|0Rc}B|zMNi^7n*6xL+V(j5=LOKy z8scysS^4FdB>Fn(tDmqxf%9aZtmW&cZJ0;g9Jt{o;%k|4FtZ>3r@9$)JTR|k9y7o9 z&+~POujLa2#=z8iapBu*gRo!Cb#2YdJ}ByC)Ei=bY4XH$$U&)EXtCDd^{}l1k!O}9 z2lGv`aa@#@Yg@tMMvC0~QPe-8k1Sh{d1ev6ta*`dRFHcuDQO{st+VX*)N5Fuj^Z@7 zC5WFob40g+e*pL6&zsXTXdt-kcT-b29qSB*%8cJ5AmCEd9~n3XysmK&7dZ|m_B}h* zZaWT-hfbFBuKl~-Gv9~#e*bKzjr(kzPLIQcaHrXty1(Cx?eLj3L>%fwovM#oBi3W< zA80$P(!r@=MQlhl4NjYth0Ejk&&%68Zg{B=lqV$hheTRnR&Pz9Yf}+$ouGNl9SSAa zYt&hbwv`dSl*EjZ=hk918={~EzK=EEhv|4L}IjI66 zD{b^=yxP{_#X*Iw&^CQE3^51 z*4fIZU697X(OB@a4dzW+7kGNtf{)?PB-4Gb$@RVQn*>Z7iEZEIq5U3x#AQ6yj_yh) z$HO0A$_F= zFu#&$tIBMhIoL*A)&*+#-svW@SK|v0G+@2-+?-AD!FF<-&tEbK{f8LNR*wo>l1X@m zL)GENGFa>Z*Iwmx0iV$-+XoAgFWh=tt}4tMrtKH(xxOCp$n84^O8L-7bg9#Yq7l>u zP5F7*a#I!I`ss1;{^fK+X|Fe2U{MI=*&It%<=Vg`aXkMi`dwa5)5*CKg2&x532jdus(Zv_Q^vUCRmIddT2zEIc3E02&^->C$na8%~FuTQm%F72U3Em`}_K$`C92)CX%Q z*H*Jib%XMxLr|A-3#c9WCOpSz0u%1#uXa;Vzoe>i*Vjh$7wYjzSUJ)TCVK=uMrIIq zd46Q*(#-+r*X9lSBsc);&t|)<;vN8nli$O6HVnc}y+Jh>>tPsrvbJbr;3y~$3&(64 z9tF`Wr8HjT2{NCX`J8TA5v*rdBQC+JswVP((dA@*elYO)7t^3|2wY05DZ=|i$9L@5 z6FCanMRL2=o%=iP|2aRHF&Z+zVg9l6*@drX`iG$)Yh2uJ5`B(8uhXa!r-S1oU2if+ zhbXt9x?4twFH0;5krG3k9AoVt!+>C7H~vb#DWM#Fr>^|ig8sG?%E_&X>F8^_y~D@$ zUN0%qJNbg!l|mj`Pr3UM3b`IUpd8uQLxK##KJ9Ho-RT3u^P&$Lh*kRfpOnaAGTps4 zkwxnr@Z`k7u4Smtdv=s|SfB@#)_o9OnKuZ*4`M#AfFZ~c`>k^nhQV$!FtMj<7&JfM zmt=o622*8mBVdMID3J4}3DsKzqlN6C*r)FO<>h|{v#5Zk?DWa80|T8TF! zWLjm7LDLkC7=)$MIB>nL`Y!xrbF5$GHr&g_~`i+nB^ev1Bm81aaHy)*+dKM&v^>P0FHD|~W>4z+?CDsMD z$SY8K?&LSo15r{{kKZGnvm!5G;igzB?6&3I8G`r(f!U8)GJE>qQ=@(M%`1HncXgZf zQ&B1iL%N&MJ=DpL-m_$UU>M$5UNYL$i1qYSPsZbT(eIZ2)4ul1C`JG(kaL5|O8TdFTEvp*>hM(qj5!Cf%yyq8Ui$LA+MF*<}M*E>7Z$@() z=4o2KUQZqsY5_sTOqUwvJc#a&2sCX-CnnDhqhfyz5fU}owT6m%zfbcY_#8!j951Jd z7ntAE<`r=CiKP(MdhL?}(iGBS%=L5iX2bz%J|C0&jO$%p@>*-;4VRs(5iQFtz&yaA zgd{s1`1`5P@xxG%hh5A1$seF@>sJ|I=X+7lhdMzvQ zy|pBZDrvm&8P-MiQx41=Z-=aJP64^7Bh_>G;rX%fA&`@PV4m*K3P!CP#e}O8z=-7R z+3ow8Xvei@a;YQFaPyMyRKpfhe?5gW_&^gWdh%qiyniJLP4}wYdF>rx=o-z}9m4sy zD@yVH!d@u2G-V{-jy$MOG1mFV%0Y=|!{j>g2%=35EPkk0Oq5vmv~GQeddylENxWMD z5lr+iuG_biblysfJ;VP6M9!QMXW{FHdFP&K)%pf#(e=^SdzuZ5)sySWCWDCD&8-Hq zUNNLd>$=Vbw+EnkIB;~Sb1|?6Zxl`TZHCn9)nZ1E8$gMAU%563eJww1Stz+Y9jb0b z7ZxPtAs;?_?nhoV>|1wDY)J;j^_H_qyHF4C{9>)0SKNERj z|8@7M+q5UnuP3xeKCVN3gHch*4;%v>$cxtHVPkCpQ1$#%@6-Z|_avJy!#susmv2B{ z7!_pRwH*D4c?pND2T#mk-EFhdd!OnBxNi+P<~O_rbx}>J#*E)I7%tv$?mgBS&nW41 zJ|7u|gtn!8#<>4y#w*O{XC5WOkOAi-Edt9Wd*BDXrdj1X;!Vsy-R1Wy0Hwi(&0$K>gzK5Hj_8## zqNLW>=NHq6zI?p=*~>eL1aD?m=>A?Zf6AoM67hcy5oJm9EPbSro>KA?$G_41lG3!u zZjvS5P4o6|C7Q>Dvl!a7#9t}X@ODZTv0A#%wOu10&SkINFOK`FxRcGfx%m5sb15~> zE@Z&IsnMMoe&bN*kk+yZ@!Oul3kRuA$Xnl|Dy+1hh-E{sl2{=sR8N|fR4nw=U7)Algb_`YyczaKl7LU z+JL2`nfK~k4+JOhByh3yLd&YzBvrl+=uvPM>s&tdcf+v$xvQl?*$6y(r zNTmh&qZ}3b&oc(0`qH!FR>X(yzE~^WSC9HJHooRxFSJ3bQwd9KQUmlnIA)N(wFzw*aNr;fw2%kBm30(y2{N( zqJk(z?NSn(!G6oDPF;Mui;T5A2rO&qC2A&Zk^Dk9?uU&*YiA#E*tPWYLq!TPnXlCz zLO)jqt3-mra0_t>jQ+^!Q%(GZQ~H$_r4q^alc)Hli-G5?}GYKx9*boIy>aEFP_-$<>Fv8W{UH6U< z>Z#%rYcfYk`>P{oY;hjQ={7rj(w#wuRwmW2LcKEo{2mu97%d~k78!#hR7y(DgM;>O$h}W)1_8;-$Hz!McSj$q8`*i9i0qDyyjHITA?{VTo;d? zGT%9cb@Z3N_gpwcgN)Fr(5z6@6TF+tp%#w%0V$=)ZPdT@U1q;dSzmJa3H9Iixj#y` z3;jZU%`P{Ib~69(pEJMDY>QQejz5aSdZBRC)WQ<<;W~H5LkauOW6GXId>H4e@Y6XG zWUD}4=EII{exHHwGMlASKsEaQSe#RuY=i8hT2JnO>4!UyF7G+`lnV3LCh9obP@k~p z&yy^j0n~qy=Dl$Y>rn+U%2k|*(;aAfbhQ}q(2KIV>jsdIB!A>-`2T^P2Xmcgw#@hW zXUqKjpRLiXL2P`+U%o?y;g5@tYH_`4I=pZ#e+$sCoXg*Yx;O(r1iqIflj{kL=f$t9 zNGR)6fq-TU@w~Hn=i_7Dgyws7fU%WAc&-?HT7JBb%-`kYva#+XuB9)P;xw`DS5)FW z;L=T^3;bsC7NOr1d!_g771%#pxm6#f;eO)2k&2^e0l2zme|ezZ0v8ueCtXZKJiktY zuRiLKYCdHt=o;^bouy5m{}ds=|J;RuOQ`q2`-S?xZpApf@VS+99Ptz0Bc<#4l!h?R z(wfBKPXkrjoy{Mv{%ibAa!1BzLE@gxlr;|3#K`^O7TfS1Vw1b>=q6Vhk$HVYeNovk z(c-L?j}RLr6&~B?ZeJKBo+=Yh>gq>G#i6d53peSg_kCJN=3qa$zj4WGJNGsc{`}*b zu;OCWIheh}8I}ht=j>Y3UbTbv;c4ygd#Dq=^MQqa7uJyoE#-`RE=X>6)w%k!kl20b zl$~fqAD%a?9Is>1?^nudd}2`tN&UPaxcX=_38j>}=DL)VmL(%~Wtxw{VsKt?NUZ^N zwyr(mmg|T5&+}i+kq_aYYnpvc?H#^fPXD+5BJ$(+`5pBdwS;vnZdN55eY1|~r6!yx zMSsEI8=-P9N$PRdLCuBb5Xe}znFDpbgmf1@${$0%vBoilhu=D&<#nMTV=wB9B){8p2zj4R zPF@c@d=PbC^@7COJpZo$%zpl##(}c;raN{$4Crbp^w@%N0NWOgM=#YzU}f|+qYblk z*r|E#m{Wg0%yWt7roKg8vb;u#+Euk+)Z%u%^+P!*w&Q2Qfn`-rCw zrBlJAk63-*Rw^ut_3=@sDSg&%VzXOA-S#|wFLrV3y2L7?owh-6+r1Q`ec!S1*XknB zO}gi3+T9A?3+NUaV-%n{(7hLVjWwRObkJ6OZF2MZ?t5=uZ zR!h_&X*{laOMRF;VF;EE$S{am8o68+Ek3f+GViQnJ$9^$Tnw_d(RNA@*=(!+Na@fDw;W!Xp0OQRp5 zsIO(a|6l<~#NSW(i1|pzc|9f7lRXf#sUl|!)?NG6ZwC8KAfIE8zw(TIAF$pNSW@SU zxIv>A#p>S&;bfn@z49U|oTGj_AuEG=6B&#>B2Ovc)@0cxi+tiA?7r>3t%wT@7ISc3 zPJ@d5QzPNh==ZYh%wtK%AuvDqNRv0>ZysRwe{_Vu>ppY;ywhg&nLpFGk4&36|MTh? zWd1+2y?Hd1Z}&e=QYb@)l#~Vv4TuowQWTLCib_dBgUXbY2&Ez-kurn~A(5HHHbmxm zo{!-;=1LTb?|!~(onODzv)=1@KEL((_0L|{eeQG4eRN;&6J%T-GM#?ck33X<xEg~jfi6!!_>tx#Fd zI6MxIz0R@0mOhXe>n@3VNUo=JHZn*P_lW^%`#l^5nMJriLt74?!nq3(qO6x9;$If75I)^836s**Ov4 zp1>KP;F#e@zOIaE$Xi-ZG=9bY9=O>P8WL$@Y%}^6zxSNT0sr@}!7Dgy`yH2B z@UT}-EL&Uw3EJz`dL(^_@CIVKe6o-PZnv|`2`(kh`&iTx9STY1==$`Ut$rl-{pDL@ z;k(P3)ct?uTt~5Z;t*|$BI{ZzabK<=+^6NY{RBUq`go^IBa*YZ-AQUuP(mGZHVr@a^qYmalW|-CB1*wlvH<&N( zaj6X2X4gq>xrV%jK_{Dso_%0#Z>{v41NSe?!N(d$sPIBq_3cX2L1;@n@kI3!4a9Vo zejBYry(Oisj+KIc@|FK~zW#5Wm-BsR=H;IgApdy(j#Te)h+4gM>z&`|BhDx)dPWP^ ztHn(6qgP9y^5_L~gV|@`%U<<@iJ{;@G||vn@p2JT=YQ^_@5J{z5MO+Q8R99FjW@@#lRaI=nBY z&b{*k_HYB=VDzg?-5TxqCK!2#O-*V$@&A$j++!6lB~hrWpuy=M&uKag2RRj-52<0@ z=F;fv$dv<7p5+pH%Bv5UrDs65i!Cb}t+OSXH)|H>seiMDM{g&9T z>8>Wm8?qVBUTh$KH&To%r0WUijSZd?KT3$()-&wcC7vX5|HU%KD8%`{_$Zp6T?1Ck z53}YqAA-Cbt6hy^K2d2jG}vPIneZ08i(szJN56)XCyu4wBa!CB&Bn3>G7q&KU71h` zY#mGU+GW$gYHh#`iG`;yeeSCF>V5A(?A=F=YD>5+ciShoL8iv0vQ>|za@dW60i>&=-yA?`1< zIN-|0h+!b#`q#yOq{5_5ZE*2qC)hE}eQR!p}V;=wZWaKC&r znG;qmkeux#7xtGq3^eqR$>@;B>gmY$ZSP-gjQ#)STj!4yvf_H?#I#8GMmtf7J0iq- zwt;wa$kTj+QBSLRi#6w4H^RB3(kB)3*}A-7T?Xw=xKQHvVyXdkgfUjbaINy8S25J2w;i*(VeIQ-05|aACe^wYbkpZBHjC znHm~Ba7Y7b^(8-!S(lT#ZL=FRZ*~xyE>W*X%lnCP28nwRI!FwY9`uXJ3=xMh-j}So zG{UZ`$E$dnN(!a!P4USf-Y2?g&Yio3SX31`DRk!(!!{fK4rYMqS07mVR-tZzMdyv| zT}`k`S|rm*4SiCUv?>g|N&vh3yxq6Y*ASI`yOXc?c9Za{eFJx;;q`{YV%UWNM&dXwDQ*8o)L*M3ESxaH` z`WwZoYf^~xIx|j6cnz_8!YHtkvxy`WmZ~J9|FGMw+uRXl)kMteM|JIuG$OC_M%LNX#69KMvqg<#-9R~hr=IOgkbWn6Kc~J7plsU- zCHof0lMihxHfaPSisiN6ST~n3KVkcW)&`Ry+0>u*z2MoteDb><>bF~8PMWdqg>RH9 zzgn#G7N)6f43?n6hJtl2=Td0!RF7GyTayOYUp(AD#De;Xv#i;B4vc`!H<5EcYe!&% zRGHs{U1PAKXan0xy+8K_uT4h?73+c5riz!uyhFX%V~#&E7)IggUZG+R^zj=%cE#Ot z82y1O?6c+KsL&Vn?vS|v`hqIiD$~$^Y{!EEANg72Up)}(dmLE^3<)!r1p}+$vWm;A ztrG1JtsGr6C5-jIH;?b$)fj*^$43N@DfL0Ga`zPro@sf zqF(5OO1<|{@mJd?P-2)72zc<8#NIo9mtCiVeAQSh$?lFmbW+v%jVC&Z1v2gOJ$gva zW6#s}h+h@_QnizIfI@Okw(KPpi2E7O8aZ*OoiGbcP7WPyAUZ<&S#P3?$*2o=^a*x1 zVkMj<{MfbvWQU%ox%+lOVwQ4Elo%Dzz|Oeq=AZw^|Ma^b*&JN_9CZ@(4j07G)(^wL zv*TI01{Cmb0C)jhTL?_JSYRv@te;PMtnpw6HrNWcn2mDLObjhBB_nhCs+;KCP zWj~RJyVJdm|8y%H_S|ea^A`DsQ}VxBcIkjtQ?1wTqz1wrtn`4otCwVU&mSML9VATW z6}P#u4U@UmZ)>?xAG!ZPs_>=PgQPpn?aq}Sy(IS7(KpMh5D(NA^k5qY5S0x~&OO4V zV3(e`?g8@h+}wIiIMcJpW*d&Z3C5_$cvgY4eSZ_tvMf-)?@jD) zkuP&NO>((oFL-n4RZee0-FDf-feMHV8Ww<`N1|!)ZO^)eTP-v&Sv_;;;&&RbPAWUq zuR&c4-N3w6)~NHt{M`JmH1Y-Zg?3V2BEROuScHJVI2<_K>?w+I=af>MoB23+Cl>Uq z6vy$NqwZCx&mO+cVdwR?sCV)l6`E=26Y&KGL#}j!g`BN)HXr79dOA_Z!-|IMi-v7PcD>`ix{Pl4H`iqjsZIMq*Kkx7O_dKD$ zPCwrKVu@2(!vv(9bdhxk9S5DlxC_SY6JXuQ$eMwAa;LIR@0Z|1e8<~QZ`S@mKgNzH zdmGRf(_wbg?6Hegg!(+mLBFGk$ls)x%I)nW!m>uHlnbcqcc{`$@*ssAcswwt$weWR zg0n-jhKT$5q7Ll82$n0r693hZN_ot<3V z11ALU8<=8!w~*&c*O_(y-2ap?RiI=%7z5vznJ!5`hhT@i-_Ik`G+6mH`++#>%Q+`~ zPdt*?1v=G%bLI_MFt_wHN8@-2sY_`x^yO$L!pWrHNw1GIyPTbN`bH%lirRylLTN-G zy+n~wjYiZbx{@9(LLT4|F2^6^6e7)^^ug>N@|;4Pq<6*uai(tET1)!`^|h7Db|Nn4 z6m;vz8T7+}ftXhlIt|dAs3T-I^nvJ{=Bv1T8g53#Q&Yx7q%J7nm#)dpGNXrF= z)OsQ5=g6B@e|PG`Z2*VIaMAjHDoF?wyg7|{Dm$G_X7Qv?WK>&k=6zorXJ6D!#n8mP@cNBA#wI(1*Inw%tB^Eo!>g}w$IpOt_YaCYYc-c^cdcOx+9OjeCiAM`7^~O|HgR)bsH7 zUq2l=2vW}1!cPMgT1kDpsAL7XFYSNkF@1j0BYpn=9qF(CJ+JA<|0&YX4?TadQ+(Y7 zNK2+ZdBTc)|F1!z(WryWQRB4dy%Xxv)QIj48f*nI4g=41cTs;rw(_f2aRdnsxW%(^ zqLgU4j${m~BQN#k^k~V-4&wbfH+Z_On+*LBv*zyZA&xg~{ic(9$n+)k4V08F;-_L` z@2uQLR20>i+I7(h{8kL}sgyiDQen$|m>tht#?4Ls%^0djf zF9o3c;%Lc6^hbNSr?lJv`6~?SdE-*+>p)9*=)g(H$G&`Pc1=bHv0zl{cv(L{ygw*A z1mp~pZP%q9?qL}t`r|gX;(4P)jIsQk1m*+3&Fdl^=KG05PIA?yNz9vygRMmG7Lv@m zZjUkV5?~9;FmZMH3|682+8t_%Bv2;!`eqmG%WiGlRC=tQB=FogbQ${xp)LlJ*C@rL z^6**(frGlFL#^A|F}V`9MXnF99jn82X*J`4duc!^(ocUbm_&qxRVmdhfWGJV1jMUS ziF6K^bg$kUpq>ycv))q<3>M`lL!ZV0?{TxP29i&NB1EO0P|Hc+n&uNU9cwQ&@9l#n98zW}sL#%#Zq}lYeIAuFBOlkH4)DH1gslc~o}0tf41!yRK%|oA zt>TR_IL)%+gHGlc^iMx>+a5Op8j4r?s?Lu>Mih%}_{}-jmzkcN8~4=1?wuH z=JxY$@+ybvL*1T=S+!s+$@+l3un~p@znia^LOk#}P4O$iosi%?TetaOI}~0Gf8+A4 z8P4r3(n&aoxZ%08r-m9Spm20ahR&BE?A!k9-p*(SGUm1bk#( zX(W_A0ZY=9jTep}@AH=Z)Md+2;HP9b8kzJ1)oT9h{-!d}S+c0o{nSU&{K_Q31otmY zU$Rn(4u$w~Wo4Fg3=pHJTldZvQAs7+9Vayl^e?;H^&)>mKRMN;Y8A5#>lkLQ3+`k7 z;+-~oELA3%2+cNL<~>seH)(ZVSE725Hkh|L!c5HqhJ zua66zL}g(2sisNPBlozmM*Cep1j@{NcO(`;;r=s$0!L7fTs56>n2J2?lomemHK zyzPn_8|GOw#nr_NSpU;xh{+atMktNBdU8+!>VjRX?j$#Y4xjswntd8HGh1!iZI(_{ z63CPQ;)hdc_7C)qnc|~ z&@ztxo*bf&3D(*7q%^W0o2G$^Q18~&Vk2;3k;$COi4ph}vddbbe;D$fOKcd@ABJtG z7iUe*V*Oy7=jW}yV?@vm$^Og z$UlS0f~5nJvOX_u+=O`8yAdJx-y;sf{_#4ydkr9NRk+*FycrlPD&7kSqJPutyH>u- z+Tp%oO^b^<>Zc#)VW2%jo-=~1J!^X)Z<*RN6AJoBsdyXzNmgr&#~wJ$$F_Fy}C&uX$5`fVT!J z8#`$coJHVpYRz)>Gm&68E3)fqY9_I;Vlr+#RaPzNF-?{|CI{Xe`<`t{Qz{ddtL{Wv|+`@imLz3z{Z7dSe% zZqdRTd~Q#zIkf+7CZRn1$p28IkqFKh+pfd@BtuV>-ss7Gl4KxqUUV^)xN)zn4tX&^ z81^#+no6V2hi>E*3i5=DUIFEuaxKZc#<0d}>^Ygdos%K*y9V~|-x+k>p$~M;EzD$* z BW%x>}7dJv-Sw#co?BW`P2MP~InNVa^wR$klyVLQvt-o6kj!0bzh3awli1@)(Gw9RBr?oyi`CD2#QSCM=U?qr zAYXm*bD=Zh{;zxW&ILvTB`R&(@`@Z%DXs5jWL8W(SP#_yK9^4dFHJ@4P_f23Eq`_g z>XYew=F+&<^8x*DjoS7qrW5LDTJtIGViKEpMB(bIPbBk5(c7y}-x7zIUiniC`M|(E zTxcYYevM3Re7{e0L7BqG7s>)%AQ`fSCX`44t$WIQ^G{GgDp9%Jd@B`HIXcN>3(VU( zz5qiJtG8K9E`cwuVe>Ey3@+PJG9PESZg(AE*JY8_4 zZY)F%c}|CWGM_EgYK8zk&9`T+BToAKcAMAdt3jAwg5r9-9=<-wdk{O>1XZGpdh$W& zi?TDT|i73qHmdMx2M)R40Rb88d17=dE`U}<^XQE*SrIw6pz${ipo)7gGa-Q+8mT=KhYFH;xQC-dCZ1SqWS7Y`MV{YNP1jeOZMA{J?Cu-y*p`(m7h)dxPn*Y)SFeM`f7@bKzr!-aFekS+N^O)eE5 zN+rv;kHixzzRzE0-@FI?vx8GvWwlUMcbLn}0dbss{$(s1d%!e^Zw-~R2U2r$Y@Gsn zA;jB#ZjIsqLU-4nG1bFSr_O_109=?6=q}Z_TIj@=?Fxw97Oijj& zE~pPj3*-q_i2=jKORpTx26FAEN@^+85CK_!xvTTdL@6?4>>Ec1QMtHQRamNv7$4Z4 zlf=+Pl=MU+6`hPn{PNLD7gyD@bXjE&o6YZ(ndJ! zUz#C}c#r;>jm3xd{yX0L@89Ls9(ubF#^73lu;x3=_l{&m#FAR=gg5-G%u}-}@^~h<^~}{MolSgoJ8U&VR1Qd}&Ylp@AmE z`)I!o;p|7>+|flT7HqXZm04`U*`7yA&Wdp+GfvH5YaAainl~Q60a$*uGDqxpKfl|mQ!sYg{DI-E2uAt;>4?;1yAcB@KNtU zpX5TYO0<0OKrM|3nEB97YE%<_<6TMO-SvdG?aS=!xjOPSsBp>2&|)HV=GoIGVKVW3eJ3GJbhj+(jlWfqp!9l^I zX_YAIv4>UZ@CgjTS4BC~)5!DUFFs_`GSUw+70P;R&?n>)2?;4(JOFa8!8}E%Q*J9N zYO&SqPd%N!zR~OI!}%X1)0a;GMXQv@WE1N1`Z(xYVjX}bwcBOqJsP<6{g@j;-LAuW zOe(wh5Rd%*VW9^4-&N>)jZd7%{fjJ1{povk;GwPL>vfEnNTAN_TD{BO_GzMP=bg?I&y_P}G3x9~a8$rC5HvjUW&Uufu= z`h&5)c$P(BKGEZ6%{(PiOPoDCpGUE@kWih#7V1zt;zVqjxiC-Ga@5hth({ee)eCn- z7TSn&EdQP{}0O@;>n{ zjX0Dxowe^8BwDXO*DieSC*J|ramDdvuRczv$^qO11Z=cFa zmx3A)`u${6jq3wqp*B-_)1s6JJ(jUFUROtiO#^bjx?%mFGd^t0wwO5ZPX8A66(SDv z7rv~%TZy_Ifj1XUppKi<@lBMKiG*5ak}kNUoQ$sT)(sm+-&KQeFAT>jh|>79wvQs} z9y^{~_OUw)!jCej&L2S>ysEZx&)q_x9KEZoJ(31y*J{YfdaRFM36y(diGD6dC;Dyr zI^ff%n9nuX_pV5wZS{vv7<-Yijm5DO;#L*;b7P(P_m$g~1qxKCXr8K)a6z4Io)gZg z4TBIn<#+KK;%0M>rI)?li|g{p<~t$|m{&Ej>}^v=J%9OMk!5>EVCC&KT;1pXef|6U zeh=nxQDBvr00X&S+A&|ppdxX__Hn#lZnib}=U^W|;O;lqAAq_SWn98*dl8ph%VW>2 zhxW+o@bQ{>LtX#3(kpdgCYNc2yOINRMu7r7DGS9UrW~tJkjofmP`du+k2kbBJY?*?0`f<0OsFH z!OP#dk3-^j=Ec8e|NZyS@ArR-fB)WpI`2OnpSaq<*U>cq2Zj@`8T0f&;r(5a18vnH zwpYS4v-mZXoXyvld7ekCu0+=RmDdpYkxozZ&Ss*oF=gRTX(t|B)j#EVx`^0v{V7kz zE>gJrSLcUk?TD*6**$}Np+`({Hyj->e|AU_R1nK0nP=}UEt`u)f6>Pqw};e$*GR2s z{nlQn%N@EpX!eKq|Mzw8Im|85z=604Nrw41A(;2qA35!D@K4^9wBJpcy>1+~d6fL1 z@lwD}q07Ri`4e~1H?J4K!3z_kf^im zWcg@0NQUeSzP8&85W6_8!$$EGVs|AdPc^q0`9e$U1%wKSx7R7DD#JYJS6>WV%ALS) zO_QVj;xIUE@ex)>{Sn?T+5+o$zXt}+0P#gxnD>~fNT(Ea;d5Zq5rISfL>AUG#duN4 zwgAm@u?kco`)SW}2HQTuZ1nb#$mq^-9sFQVH+d zFdJFMLag5_gqY9a^Mifg4AVAmp!&VJs(+z?eC67oTK1rn81H&im&t>?rseOStxkGQ zsLH?H#8#G}4&=wZ`*}M+<>jf-{lDwMFr~UeRkj=iDA8vYSCzwtp{Xm=_6_i`V7OOg za}T8R=l!^HzYnTi-(;#9Q6O@){H={4J#cBrw?hx>>`}J0pTA?Dhri(}*=~gO-a}!J z0C9Y^CEWgOAIIQOz3Z)&?qiTC*rUD^?}zVv9lPeX4#CsQr9Q0H$fIqXG^%)s{=8FF ztevQn_4j@9_c(E^eBmhS{)64!q^!ar$k+S1E>LC=GIm;r+I^eRLc-P@7QnUt5b9erW}RJK4$=!ndN1-27i#gc&|0GpHkiyV zyS#H4bpRKRzc4}H&el6+UdKn^wEIOi8PxfcADsynMLlzMv&v(p#UqgDu63Mk)hP6M zRdc5ajDYBwhQ&LX1|U|EdQ{*?H83#dnm!yz!FBQj4~4IhC~>$+tmUSV_~Sb!r49{{ z=X5|H%+4WLT`cSKIDggV1>3Q7GnwyF7x8A4S z!4%T{aR(*rSwH!D@|nw6Q$JDv`RLa9NYqP}3U06vMSWzUu;V2nIDd6Hi$5=Z1x0Nu zXcKPrP*-SO?!CSO43F)8y_!FQ#BO&Dw!K_QzLHfM8WsM&kN$rvPhB^>QZob^XEn7(T1R0e-;pouqloK&&hxdTd=##x z2r_(iM;_mz&1!CZh-YL$SJz7JIb`N3k2wl{r!(eF1Y z_RMkp0Wj&)5&oV<0ZIF6kA2Y;@J-n6{T1=NW)|Zn8;bkk{zN4A#yQmYiZPOPygi6G z;DMn<_CxTj#BajK5qZ5IRkfb!Q^C#Le7U441sIJq6?^4+!1mnFbHC8f#Ad@Ds%``A z-=0_Wc}c&{|G#2i-1a~g?3exh`KLEHtqom)K1j=!cTX2n zaSwZ}4`0q8u_8=kdM%ZNQ;09%M|cyV#tm)JkVJg1YFqoJrMU0^t=}ylg!yZkP{FgI z4&q1sJZ-JrN(AaAk7yWUUD|n(t!8pDQQCPhAwBd3?mJ!XT+Ju}XA_+>_r*Fu@xcB? z`RV<@yxJ!{EEs)kG`==(e>wybFV|XG4j|4)Sf)9h6ZIR8PS2;x{lQPrpL?Tz^vJ6xMpb!-WnT9XUs!sNmmhUK9QMgu zzQ8=(`r*0}78<$Md7)I(jY_IR1{Eu|^%1JzchTU8c0xH5SljrhoG_@a>yJ4X1s52b z4PAVjVD$5>!25jnxd-qE5=rzU^muZb86{H=F0z zH9}zCn+gK~)Xj|7e0(|E7h-jPT`c^Dyq)6@s2^PrU%jAcQzqI(JT@y{&Xq=eR?))! zF78-wje5O8ZY%P6r(X(Xlvjc@I%3@Se@4V?>ZRE#O9}IG+a0xA>X7F(QWbO>^~YYS z=Gc@Lk%X;Po#rQfL7#Ek@chgOO_JV?i(@;(}`VA|c&AouU9$6!)hb@8V zXHj}yCX_k~W_hhQtMf*{phU}e?D`1EuCCEvKt9O&`2>C2&7%+{c*8T=d$+t`2}t?7)o*dsjt)L7yUk=k1LDi_MSXB4nOy= z*+NiXu*dcA&5iBoOR81e+@gc}eYN2$Qc)js`}q6b@^UJ;pGg|lK8!wL)H{VDZ?V4^ z5nvr~aR`#QWBR`Gpw3^8&;7C;R4|F)$vIz%{zOt9Cm3o5pp3;_v+g(zEKZyFIJo`E zSLxS5kMw!@_n3a&^hkf59_jD0l%5B}zmo8Ac71~Hg@@2(D<4)2{r1U~C4=~&| zs_RjK>saT=25*yAk|V;TR3B4M)QvmV&4+&`>LI_s9XWo zQwx{Ev@$KLv465+^;R{kJB~&vnmxVL1ZEs!F)X@~;Kz5&?YMd+i4_*f$>SPXwueKLM-8i>A_D$|XAO?E+)!f4{x<8_D zh4L^crWh#ZM2)~)Q~EjScO!7{2G{E;9`wb`iG34-dfcJ;oj=#eqCcWnw@|Dc>d>W1 ztzx_02l0B0%f1ct!e~`eakpzf#2peH_S{5;fI!|mc37wTt!JyT>GL4`+OKZ;d)+X! zl&*CyL)-^_UeNCk`u*NhsPN1cdG;MmQ*k$x#v%AbA>(cPQ4q6Vd2bQc53}RtAGl8= zj_gQf`$5#@`N6&H=si))D=%1a4_Oa^EZZ*o&68B{*5X>SbAA9W4l`V_NuokkanQ^M zDH`ycfAf^F9eD|IEh=UEXs~BW#7Nqf28L6jH-9P&gI_#rUKsj(_}!X)B{GP7Maj1U zwF>{f{?f0T9`BiVwtr6ia~}Qo&|jyI|Bm$6|EE#9?)F#^M=vniPCwA)L!a${&wPjT z8^9rkEqV7*0BNmvVtTLM5P9RE&APHBq$Hdr@WF-#!t8v+(r$Sx@$+Z$J8s@Vgqq&Y zMmlv8GnM&@x5qmO^(13s-G){&o%Bpy^Eu+v^hdgGczh;e71fU0@R(xJM22U@38uW`9 zhtOjtThp+Q+QB-h6<0X{?QU^*qfd>2K<^?aOLH2mkciIUWo|*=yB4a!o)GlmC{($9 zv6@thn!Z2bj=H;cTyK^3_ajbWZx-LtLBb-}D0)GIMv@X*FO?n}B#tVxeJXDKBr{Ko zi~k<-87>|aHOj9e4(F@CJ^36*JU-ZO36-w~Gu`}K-l#iv{i!1{b)drWz#m60IrqZX zZ0iki1EuK8RKs(0G@Eoh8gmFm{DEJkm}XvOCnHc)*TCo zuU?lkfBjf7VJlrnT|k_ks)d%BO=uh3%q}yn#d<@Edd-=1mVB^sGv>X4`!Z*t82;qz z$OFnO(Q(2&t3;}eV(3*v=I)3*@n4ZoD4TE9)G8(c|KYd?X-%~tl=1jm-h3vwnGK10 zwkMP3#jT%KD;JVFhQi5azI_K`mfjSA&*q>?%A>(*q06WT^_I(`*vQ=zc^G-|4viNzRV7BJ=deN;`Yk-P@^8v+$*z5eI$dk8G#s^$#6Epie*8HN$?tvnC!t7t;qNLpWf&HbiR=BsWUJ8CSXJT&(%y9Cm`7^YuVFP z<4|(?{I=1vBhb&Nc>AO55Y(PXX?k!HaYrVv_3iItJ@TT|-hc5C|LJr8>39V*PgDAh zKlA&a&ikJp|4+{Y`s?)1rH`u`Z9AHckbkG0y`v(fQ$qcc@cYt7vVS4@1%C#F)(+VRVyU)RTrz z$aTm%;D$Qn+^$@oEEW)-A$Ih;u`Lxcf5zAEJ2?bAbyrq0G5yK^XaDaH`J)42Skad9 zdT>!WEc`2e=eWW3_VU8R9efUpS z-#U>#NEmJ(=?v_l5{o6@SYK@JCrSe=eP1`Cj>(H=D=HV_8128o-5XIPT4K1To}&g1 zH-&OVp|0hEZe@nfe)OljA-L!2S_()nny9=1Tu~fW&VnRZA@H*cl6LCc8aQywO*iym}GI&tL5p_v_esdt@6-4J?=iFnvkA%Un zJUGB86{HjIM}0JIg1W`$>RM-T-(pqBB;{BHfgwA$?<{|hyf^0Ip`m#2I~}u@?*!_4 zlxJoJZmkE^RF+w0)a$dld|i5bF!FYGI%xiA>W52v8pAXX_d@o`4bL_r|5{(|WV%Jw z0EF@$p#4A{-0-XKEhLcd!Y$h=`x~F*=VWqe+~SCX@fhd4piG5F)GtSM=8$?*yF%dyhhymTC-1k+7V_TakA{1c3k?{ir*@h;1~QTPdYB~M=c$@A%X zz&}O$eed}5-cVBH7$~HgQG#-ZLH6yJP@lkI;29m;cv)-|%HD43a2vpVgy`mt5s$D> zAaIrHq>sFkjIq$qs1tZjg_^66IHDn$2(CJX5%3G>IQne^)}3S-`EG7OJYs3-j;=Q& zutd{=V{OaWzvU+#=E5HSZzixm!oXr7_UFE(Umrcve+PY@(#Q1I|F>e`FW%cps9PrN zUj0#n6?GOf7cWfYA)ZyNP%Bj$^&KsBTXkN405|vf`aL0UNr#Pxz1X5sV%28)q0S2P z#`T@d;%cbxci?a!$5bn!c%7v=df@$itS4{p7o7jMDiyx6G?MAYaEu^Xnh@b)B888eSob zF>gIPp`7em2TFmVK98Fm2}SyDWWz!^VYb*2@ajnuQ8#4o@!@MDft*aWj^A5|R?j1g zh?zRV$s)GjT`(W}B=;Q;FZ%!@y1iw*n&^vAl5p4YDC%&E3EPiLB0t?S_xTR)hY*=r zUXp@(>Y0no-Ha|FPb<8MTSg9j24t9$g4=}(Sy2Bb3%`1d*F=fs@z`&eegC~WrbyQ zAIKlb=&W0gxCdrU_nYYyn4IL5W)$uP!Bo8^E~vwNn7?J=g~A}vy64|yz8iuIsWgRt zj^N<4Z)no<8mo!^u<&1ZBoI0T!MuX?M*lhQckn59D6+o>)(ASOF1(H%_DrL zZc#^|XCrTQ8TSa-^YEH_AkMe_l)cjJPV~{v6!~qyIsu4JjkebQ&+93D9{oLU=*Q`i zKF_2*vye-Ly5UM1Rku+m!+~b0P~S2Nr*s9m7qmuUN$D{x99(VJisIoa#6-xh``Nb?_5r!-sQ%8*2u(q;=$O(!~U{@2+h-2Kl32(+<9ZqP5vy5yeY<(`byA6iF@-a(vc7epn%T~wrDX=-pvoHpE z6-REv4c|7zE#}P`pPcOgDSViZe`y3Qp;qUWn18E>+_sQTjfc^?xzZ~CWa6>=pq#nVD_DjyQ|d0yFZ41L*@IYXBF z6cH^qrpj5<3X=0|MsM?X)NPKsD!(}+ohU8au`f{l160qP{$#wX84g_4`F?*_FQn^9 zN{${!9b1h!>bt;R;Lx}DwE7Aa{dlvA$Gt~@Vr{dy82x1az90LxF)W!dpMcGhv(iRA zxPHF={au=W2)4a>;c2sL2-0^_CI%kTU^=wXF7v_=d^cFUZ{hQwb^gcy{lGu-_UUGi zES)&hg~qBO^mCTe6z`%zrbKWlhZo`mocFy;TsaEG^JYA*mnY!G$<6!RbjBg` zqVe2N+X#62>|2tmG76=;S6`A&`uFRl&+EVE$*9KIZpIhmu;WC_rPVfLz^4Cr;r@B7 zQ`~6U6QhW{p=WkVi%$>3^6HBRLexg#UU1i;C6RwV|L@QJ`|m^_|5K!2PqVgg)YA9~ zh&_M3o!RKm-{tT3(a-G{AXXq9R>ZCUe?%3A{(OdWVcg|AbSIUnb z$6nlrDPFm@HVXA_xHUTX_tt=6rEbvXf;gZgt_Z7p3CI^YTBUimnz+GY&i5@1#JT8{ zRh@G)iB)vt56f>Rp>MoaRFpN6NSR`RuG(7Uw|~riq*Y8ZEmu8~;CM}@H~!{2o1F~_ zs-~wK)tW%2P^UqE74ib8&m@Ka#p@m^WZH-Lsb?SD4S%T*L${j)r#<>BER0E;yS9%3 zYeIoCZx$8wWW(S0F;l=RTeUx-pabHc4<%pPiF%FV)2sJCYJuovnMgmeMyQm%uy0Xk zB$0lwIl=>Z`U1PQ@qY;DCe&*}Yz}F?g#82eBh6QR#5;E9ZKj}JB5ii^^XSKJ68g#| z-scMPfh~0w&KG|sk&;3JX?H^)vM9bcKe7&1ro^^v_X6@I+ z$~-#nU27T`AFR7Bt62|;YkvujY{dLKT+OY~W&m^wj6&=J>VdcF24}#QSfJ!xjyRc$ zYuERxY`>yHLTwr6-*g7~`k*G>6(2w%zg`+1JXHYvrF#Na88pG%N)eFV-vz;Q0u^i5 zqK{ItkB66EFI+s=&gEw~2n&X;T}-7zP#nbdCQcjcWI@yEMR_Y<_mkoo?HMh*P z9e;3mbo|{v&5M9nZI*d{V?brfi9O^)gSbH5xnu2A2oGXg_Hic-W~;AP#;-#h==wUN z1K!BiwxGo@{k_GjNm{~mgz&lCE*pnv~AMf$#x{=S~e=j2qLO~5eKs{YEt zI7p;F0sp>nP^{1pAKf$o0i&@w)QNFWKf^h{vKpT|sXN#_*8G{L^zWr#51o%f|9<*$ z`j{T+$La5cCRU*#Kq`x=&TRvopR(v;bjw8aC!#X^5 zN{I6>k5rihb;RnIglc{r*1H(bime2!6Z9?q(kI_Y;?LiZ^fo{pO`lW8ZK^(z=Jo~V z`!n~5pR?!(#op|I(Ff<#_M_fl z!rS8sy1q||cZ!1NajDOQ&3a{6qXYKOwzQ^}&@|3JE z=zgsJNGOAZTaTJV3V-*VlXw{pY??iG1}UiHPmw-zW^*&_bCd{3%5H<4!eFk>18s24 z$@Y@rjXDrMbhKPvs1Ws=YD<0{$^hQ;+sX?c00^1RWOqeWL!z^>-I{;l@)mi13LK%q zcJUZtZoHrO{W{mKuJH$VSsi*gZ`1rZ=)U)`EKwW*eTR*+s+AIenZd_RKF>a{?H(B6RN$j)29Tb<`R)j5jWi0rPOKuG+*f+}%aCP3ez- zh}#(rL*yI%Jr5JFHH+DZqYml2pNsZnjKlX^pQR#F#=-6QG5yEB|Nb1Izn>oI*Z=pJ ze*8bhe|n!OSBcKT&2G3AEx)nD264EJ}JigRg%%}L@n`-lYJhw5`6-T{!e>% z9!=%`E&!Z#)Tu}jg^o%kl$^*=X)7s_gia|W8mW{z8l)&`Fr`sRNRcU2M1`j^PnB8b zVef5YQ=&N3&wb8a>s{8;?Vfwry7%|j^VhrHz2CiUd%xe$JA9vEsPmpa<^g=)<6fhJ z^HVYgs=D>XEG>Ta;vuX%7boTG=R75nC9uY~MpNiH)E-!R#A>%k<0Hw>YnGP@+mr58p4} zq~*mI?4S2bw5a;&Hi+)Hu{r5G#)I%<`fkTyUytoRD%Rycj-Qc#k6HiOIB=4Ljazi_ z`0vd-xIS&zLFRS;v{d}lSIqSJN4(B-aQdczd39!&C+6XOfq#14|MvNxeV_mIJ|q8c zX1!2cLN0g(_SuX(QSt)o70=;y$D3e&l!Zv%oq=um`3rb2xOks5=+w$|hj6?vC~D%c zGqVKv9}JTOZ(*I7H$q>3m55|jT8!~if0@Nn;~I_;#=3WxgS6)Al$WvujE5R)+i|{7 zAU#ucTp8|^vW zaL%EjIxu{yZuq#q9^4-$g}%C92SKcmNj#@AkP!ZSNlFpp)^pYS8r(0l40gtI%OZ8m#j}dw%jA1Wu`Z0b?GF75;WuOIgIgRDHZxLpRsT~OQFei^6|OR6=3eJ94&eh`&_P{dX6QA{dVQG9aVpCh2t`PFE8eDp-X0; zNYtn{kPm(n8+ft{;$CcdxB};`R|zGp5d7MX^FWufhA^-GsbuM7`9p1B#$KqC^Q{fa z5|iRBE%;!4@u1~hQ><^Tq{PX5hH<>BGNqo`;5dF<>wV||)}c%?=AP2SKDFM}mWi0> z*YDwR>4Y&4#@Stp7_jDoROTX&Q|q`;W7Xw7-w)@51q90q|p73E~Bp15lEYj>d5s&O=L+SLa^!Kf`!Z4~|K%Y+n=T>{SduJhuY04CY$c?a#t|mh}S)Yp=q%NoL+R zv|h6Gl9J|}vc*1H&wIiyZYyF{ZoVJCe?cK$N8eo8d_D{NY6m{C8k@+fH2qXF(1(3; zkCv)k-04RhOLLK(y4wbrH@jaCd_hw9jn@x4=F7UmIiJ#}gO*xms-K zE#t23@4>p;S(>~n6WgF=RZFakBo`ic?H5lwh;`1)$J^_8uwjvUd;W0?4wUJi)s$V) z3X9g*``MYbg4dA)6`tW-z;N>UuHCKhPDr~(*a72#{&UA0?%SLBD(370=`M(hj^2L= z>v8H%+Hi=s0`D_S&Ndd7$GDpJO`_8{TxhaVX%51=9h~zDv!rGZkN4~Z&Yq_gUGS&F z8iTx9AHl=uP0rXeA3++tZkJ=cPg8^KxiZYJQRz;Y9E|7teNleroO;kF_S@X_X&ull zR2Z;NqaA)Nw$eT8(+($3i%Q0nW1X=4eYplsKaOi=+%V&d8RsJ(XP#%q7qc9>9kb3n z&nz?Qv_z8d`hr>i|6aNacV_qEe$hGpMntXz8|15WSB%DbjMLXRFAZJY2%f6i;{La* zVX!B$HD0R}6cgiiJPFH&2kshgclIWMr*eq+o^3~2W)lKPga%<=|K6n53TKS#wG|(m>4S9;_YeN*V~Bmz_J_FbHP687S4FR$ygUxD zrDJo;TU$UmsCbj8bPE*B_1`*(ab?2u{BQn-aYava`PCX&U#RnLaNguLJWdetboY$>-P~3 ztD7&*d*5n+*gSt{PsJv9BVJH^9{b?vsp*?v6~+EBXUf9DvoH_BJ|2{AuNEO$2NGj!3r(=DQl#RoBCT&6;2e1B^htPlzm~YS z*zn~$tea)vXm8BHd3)XdSsPZfK}a-G$`P--w>#wI?o3pV!8T#%=ImdgW8si`%oad;kaINil6nFp_=?zY*)c0k59AZh)t}Sb23d!%iI`#kk3spFsjA*MuRg)_oc$p#JXtnZ`CTXn-pcn$b(C?z%qHm0 z%BCOl!A5?s%=bHToV+$L(y<@g3B3seGyAsk!9aD(bQb1~kKAtLI`e+al6ijNQkPX~ zIm6>}Z zZ^6Fgf4F1`G=phdM9Kv`PWc9pj-82h?W-prth_g`0mA)W^J;c~fQdN;7iGOmAa0wT z%}QsiOLvEF6GPHKSsqmpgbp-muPs=qZZs0G{?j8%Cf%K`hC0}r#8@St&zr(&;5 z2iQul;z~4QUdR)rQJefbVAA8fcggMTV4&+Id`M1NpIYFrUdPG1=3u@`URWh z>~VcsE98CXW!YlfigY;7%V;_mIxhw$aGzm)O0m1kIA zV84q~wYzpnV!!qPoj~?+%-^n++_}7*2U_WS$G>#t!MhKKGI$etur@u2e=~^DeO=S?`g++zqVe%`5o~37I#5i0M5UiJ|4M274yD= z6^xqjKEY)T`<&WrJ`60&GHQH_u5|J?SDST$($3~ZAJc}X)?%(yegX<-ep~WBJtHq3OLpaY9AX)CC(uwna z9Ot5YSGic%`oIGxP23;8%v-$>`xm@)x|cX~qZI-o&L)1wJja=8C1W#nt3YH)>yh0X zuwFs{D@xN8;{~|CYu4W`fAC!5sQ@k`+W{c_ZXZ%`xb~yuj*vb8U07xtNC{VB}|jeL&nE zClvicEU4Rt$U zoqE(6y`ol7jMJ`Fz9RcSpU>o+TKq}-~$+LiS-Ogt%9W`>c8BT}AxL;{ivN1-lUEmfu22c%u?D6FT$%Ud@o!-8jKUN@ zy#Ca%Ygy^r0kLydF4~iX<4uCK{O|bO8W+*}!%x~EvRwGZRaeZb(YZTOF_sVAt_ESt z@p!#u7h{SGagu3?CG7jC%5Mi-tFcyCor<{AG z*JtzKaG6JS)OU2&Cq3P!q8*C(y606f|K*lsxU=BdHkhvceN2NXp68DrKku_m2RPnr z&rU|aWF*ereZgvnO1Z8BdDv$Ua~uzjncWVKH62G@9~<_M8K1{bSfp$k)ehsdxAZn* zJl6t~E%W{GW}^8(iCAhm2P7WL|4|~?4jJ#xKiKj2xIc3%+zqc+H_4o>l~(@ocVOOk zWy`?@0)~NeuC#U9WGN+USeKD>0zr@j8BZnNmlgw;6DKF|H@GS literal 0 HcmV?d00001 diff --git a/sample3_2.npy b/sample3_2.npy new file mode 100644 index 0000000000000000000000000000000000000000..17be947158892fb118caf96daef5c986102e51ab GIT binary patch literal 131200 zcmeF(c{G*p`#5?kB^8PyLr57a5}87`G9;xzg`}iV$~=_}DJjX22%#iHWGF+X(uHl# zxXoJWe@0|1BdG;UO>)Gwe_FS*$zOMVag`YTQaOylO zi#v;_q@|V1RcFZqqLS*?Dw2CdC9NHtU7aoL%^jRAt^RfYS!XvZ>~5Fq7LHcfq13)T zqWk1k{{4^i9#L=6{|7e?mVe*A0Y3ig8^FH@{5-(V13V6J2XF^)2XF^)2XF^)2XF^) z2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^) z2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^) z2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^) z2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^) z2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^) z2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^) z2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^) z2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^) z2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^) z2XF^)2XF^)2XF`e|LZ{QVCaMBm2_|^=@PrDJp$HB2eq_PhamO4-A020L*V+`?Uru- z5D@Pk{^;#H45O^;H*G&Q0x{%kCgz){@M%||xK|1l3Js^(9}{R0(Dn1pZD|H9W)p9; zBrpKX<<5CMVnAiN>zfm=XyE@!Mv9Fv49u=wbE+1Da3{a))AdR+#I3tjdix_8EHk+d z@@^f3gi15nqYA^&mL2p_Yr`Pe*byHb{?rS5R#%&I?&|>G{raiiif!;?_w+7x%?=P* zKlKJhlR*628r4rHDbQxb->%_J0*4a$7YeSOFn5Q2WN%|T6jDm=TwKu!D`o2ZHW_uo zJ~LlC)F1m2AuX? zHrsseD@b4Dd(D5K4w!`bfSB4QIC*!Z&tqpRP>)H5hu&`o%Rjqs%3p7Xx`%0&MQ^%5 zyOxjrk^c>4$pF`t1<$>er{|a!sIiBKk() z7!d*oS+#$=8wE4Lr=f+2%#c8|N z8sjk#rHfC0sb)gNUB>$J5e(o+tQ-sdL5CZ6C_J6l#~@-s?IxjX0)mImU%K&X5{!R8 ze_0?r3D$~6>PN;VU`A=!($aJimal*BLN9N=vN#E~svdHpFPKoVa#P^(C!2Yn!&cJP6dD4_9#r!MT-AS_Mp$`SuD3^isiN7vbm z!t&=@exC%9S3*KJN5Ri);>?3-DjeO%<$NuY24^DAy)?2Pg{H#K0nyqMpb&Oz_q;R} z6wWm+skaY+UD~#jpdOx=ze5b!EIs=S5(C6-6$$~?;;=IlVa+gK%V1O|Bp z-);umRj~nXE!_atrrN)5^?=STY3JwjEx_B-q2_K?57d&a-7c4|cH-)0~7YNARFNSkMqzWx| zY2D0gsYWDUx3#)`H7IE|PvTa6C2BbOqGi8937T^qE0-L}L>fKp>21US(6y{;lz!HNZA%%H8;r2=O*%=k3n+ zLWsnt57aISkh~Wc;zNdE)0B+m@!1i$y{=t(4|Y8eOtn8WAwz?jGt;T5IwO#J^oELB z!w{Sdz80)#Hw1weP5Xi?hM+o1O-PzN2tEz6>!V`^fy4fJ+ip(^FgSji3tk@pQlpYU z^A{3mS&zxx+T9K!JXZ_Ek5xnAb*7#4o?K9{W4`+%l?5WR$8vk^%D`!}j>hdAA~eqa zDM%Bi0JTTW`0?ry5c24$3As7~X1k*uPq9%kUurq*CWoP_@zJqUb;EGstkAn_u~gWf zn)PbHk`5M=t)g+VbTEA#vS#`y9S(?VJUr3B0D^#f&Ot*aD389CFzuX#*U=BkAJGy;C3-}N6SF(B2zC}LKB z3|cDtBA8lZ;IHq@K+>boc~Sa(RuKbAY#Z40s z|B0Vp{|==8j^pL{S{|3`&wV1`ITM;Q=hZ7z}Z zt0CVqy;PZ%2(_ap{lspQVB}Djjaw!Smj3tmbS7DD$;Ds-PVecz8^%h3(D~n5CvFje z>u_C`jw}I~^R~&nk8^;+Nq4ifO2N)2QJSMj282e)ZC8>mgt<4L9b(jrA!}o9i?c!w z_V~EC(f6rfmd3H`Sa~`yqcT}9Iv0WeTp|~zQ9clUzLY9F76tx8>(Z1DJ_Fw40Z-#{ zieP46EdNw55%#!$opj>8kJy`JuCFS=xbbKg6dJ9QS z^G6#{+MCp0^eH0Zdi{c3bPEv$T6_>6;{A%)if>t~&XpqTit+tkzsgaT$GGJ#wGzZa zzRRA`cOPkVgj2`U3xE>b-stxB3ph!fQT_C^0=Vo?`F%W52_m~M+Vm z%P0uDFlc-}49M>}^5=)^D8!$2Gl|M#!hnyZfHbbL z0!=aOIvpz^BAU_zvwmiN8U=lzW1zXM?S3y%5|_$O^tFI(*H|f^U^z(hmOm}M76#I? zomLU!?-9W+@=W~n7bN2UyW2^SfT%;KYs2{o$WUZjY_PlpN&nFJ$kfO})~oiM$cjw@ zLP%Kl87l%f8J$=^AW4MQbCaj*mOA0Gc0d-_`5q8+cJ7%?ZHJn`VY{-;jZk>`QN5iO z0YX_VhSseqgqTN1?Hd=fLC`o6t{_%^&yMK z1u_R;A7XQq&FSszMkVd9K82JKQH<9c!Czk3`%pID%HGwE*j^kyyic|R)u;`&s!0)1 zk^@ob&q5XQx1J4gcB?~dy%SoYJxwTL;Y@Wx(O2ZexHv$b0%RSzCzj&k2o3sty%uf& zp4+n*Ht?51TGveB!I5&H+&wPjuTcYLBQblb|9k?4?yVX>yYhf-?4!*03uWNBq3`pC zv&}%-{{BVBY&RtISVT3Ali+mR;hV1-`@!y!7yVr(8Q6cXjlaIN4_p>?1`=kvL0

Gq}8e&_AE&YUj5PvYz)OnANFR*3OdH~us0W&-@H~(j^v}oV}rD4s6rYo ztx7xUs}Ui%)xI?LGvW<#c1hh55Ak+^-1}cvKzx6boZY4>n0run!gI0?Rw5&p+1OUF zsmqtM(d>l{7Dsj*5Tw9#U!3{THG^7=qds(mKnI5jc4`*n0fw zFg%g*5OsVr1eB)@6J=Hu$T8EXV|?xb^LD}mHBvi}YNpiJW)q>r!AGNcsu}h;sR~G? zH-me3km4t)R_OU|X(KV#1;Jq) z?hWwzUv4#=gtY6y@2l&^A+a)-)Jz?Rg~T$mn=+F?nbOYto;wK*E14_Pj$)6Kli&{b zoP>B^?T#X!e;iqk|NVlBlQ$A4!Qy3q%pdIYZCTl7Ua3QdEB0?TK2s*cRko2S%3d-w zb*}y(x19_!p9^Tp*!Y9*^yFMV87LlI9d08Om~FlMQF)*bw3ZyVw!W$dkuo<`@q4de zQCLdg_U=-omzup0{-hqMJTmnA=|)6T#q)X+s%>b;+6Cx=R>W?SE6JoaqX?=a+w;@4 zXwJWMm5S*{6eFfO`jwUphJgV>RZ(?74gA3I*|8J6`H86;_6-7yHj9Ij1or)=hLj)U z>4rGt}{ep44@KGHf;@SnJY&zMNR|i_ZrX+`z;FYw|dZoDqNHc8$|innnj~s zMvy@QcgXD(RMe?>$1gl)7NOo0^rve`V@p5>meP4yme>gSI zW*1>UNAE^W71J2-G`@B1ICnYnf5S>X&w+WRzhbjbY6Bw3uDZE-LoFiw5WDAkvlxY% z1y^egWdO(Ar7bd>Tj_PRpK) zO@+Bjn~h@5rbDM-U&t*+BAQ$5TbyanLA?DJwYR(pLTN7zeGRP3!Iox!qGJZD^8|cN z{qnjEyi@fBevyb!b3My$X;T+O+}P=;gVkYJNU4P%-AS-l=voXHdp9^QJZ~VJ!t##2 z>jVw^DNwOudr_h35EP&NFmOom zHJ0zVDX_MkW5MDteJ3@#7|X|hpK^K^gkA6BLXMWR#h?g-hnib+CFNFZq4jLxHOWKrqn?J(Vjvf9ivKj=36>pGSeIGTz4 z*^QMf_m=~Ef4=i}t6nhN4|XbZBQUUERMf180p9w1YN}g?q3MbIj}uQQKrj6MN^>(A zv`LZznkr=AaPLce{+t46cNuYA`w*;dH+!sLLWLy}%kb(@Dn!?R;(T_D1}ZeZkXtDv z2#r$MtzcIGxfS2lvi4RZ>nFOgOzUn`X#Oy>bUy{j3Tk~$jvhiwbEccsN`}zN{a5y} zq+oGq!sng3sTaw#JS%&ss@_B<*Eg7p`Ir)lR*E0e9jma+Cx!*(Rq+P$8eGX*R zlgxHgDuEH<7|O=k4u^EQ?^JSjfmL2x@`knDQ2XFk+s;FM@cCuU$!Xzk5YT=~5l_Y( z$$NG4p?Dkkb_^cn>uQ7~cpH_TM*!m>Mv=648Th{_)fmjo1LpbfJoRhBke-|Hitgs8 z$jLiZ#WN`fd>Q>E9qL%#wU_DjB%~f9oGaMHg0Q;o$!(mDPgRqD2`1CwCpV14>)97t%DH1;DA)DOQ*0Fc>2LI2?H`50vki=0=1j=? zynTUmiV3nUROiDM3|Q468TYR`WaiY5pyOCRmsRaS4tzfb((N&nmD*zvb3}uCSd;c& zb;)Hvmc97hFDUgDk~h~V9`Xoj=hVC~p{e!j(j{~(jF@V`f;^_pRgwseTohyBOybV5wp9ld}S zKk&4=`_OAfdg59f1o&D650N$!Z%F2@&(oNAgmU-loVn!=)jl7j?z^ zA{!amo+B)UeQZUAoSpGb(=U;wda2Nm77^?s-F@6&_d)zzV7RX*893u^J!>o*0K&ed zj}AU4Tb^mu&>>Y3X0XU?92Dr@_vKk_jA`CmAb7h_KX5G7J$3e#G{NJ_6 z);4$j>W4(6GL^3YS?;Atir{9 zY@LH3r!4sdtCKoWgU`r&)&g&s#gvm^BM1lH*e>1Q32|)X@9zu9z-7(;J@0J;7WYTE zx=D6WsA4QId<2`%bCl2jx&TP8AVMX*GYv`4@#OD3l#ix-osT$cwW5-@LeJPeNXV>G zHEQ3*eiW0XDEQ=VH}e14-gd;X9#KqR>;Ikqyhxesp4pSaTAOoA>V!KuW#3o7#xlEPMX zfggPz?{>cPP3bI z;2$(-UF^ZGYwr6Gyhx*iwCFij-K0@SGWn*fqB0Kg5%xwuT*pDy%`{qHXAC0mT(o_S z%>(u_l2R-s#(~jrdGJ%|7?>qSu`gCJK%XN#XhxX|#}{RJ`EHUyO;zbe<9stvj;uLT z+?fN`E><}|)JqY|Cb8xyCoE6>oxEZM$qOrgJ|(| z^2vAXgUIvvarx_fmDASc(eHZ9jaqE6!T!nYQmfo1XAxSU`c zuq(KWod4Dhd2$D;#O27KPw$U-gRN&-K8sgU-cJFK@cO|7y+M#D+hm~5G6McQ#0g~| zI_&VL>?WL{L)D3g@0xxLf&E5RQ$CWWAyniU^2b`9gN~N=Mu1M>Z+`hb zi~<8X$~?+Q$UW>?f0R`X3Kh7X_;Vo&3>W53X6GgWug@f-_*@1eJ~~-nys{2`8ryb% z=Zh9(nfcmJ=XEn8-r4UIe5C^M+V{&ob9)HJUD>Nb-Zg?snBQSLT05{vmuGT#;CI|6gV{2*@isL05iYUj(eV0f|-P$$rlC@*o0VI3P_BI6tf=~Ydi{o?blh|NcAeX%~o|?uA>#q&k4#c zZ0-alDR)w19uW%pcQy#GZiSkOSr2*RPOvSRk+(4KfycMwlT6rq;a-+a_ixE=Sgcw} z|De-5Um@a!?{;6t$T#XJ-cE2WY7xZvbyjc!~ng+f`Vg*33aPbM}{RFu_ z^9}7-eTCu}SWfeV%Jy?JYr^%T~KaY%n3CAfu|6w|Kv3hu?Bw_W* z{on4cbj2xwp6xJ8_WyOola*bfB_<0madPZusq=3w{IYD zz;kaKeHelDR>c-(k$4i_Fz--5dw+?pMD6smg;j4p9 zi1=&pp7vZ9+H@f0!!H3cs`=KwEsipXgbwXxbJQC~S!PD8A+f_KCLkv)IDZIf9F!XU zZAC%UgXNDUxAda;v^T|HM2X1LdatNg|2vdq_(7Vzvl_-b6V6KQ?t#3c*Hm|}>4i<& zt-&RNy}+M&?xRU*FC03@C2xYQM}#J=u;lkbYrS|O<$P8@$!TANt zvjR-aYR$0u!Eogcc#e7YWxQxd5(Q$;^L_pFx(EcmY6lL5G@=-?#d_Yke#D$PsXRc z9>?-}tEG3;`;~~OAU-O>s6@t__HC3lAt1_?de{5A$`IkI{9x)-8{+*fk;Er3h?4vb z#T(Ys5pU|3NN>k6l;N=@dObfA=}lfy-NinNR<^3i*g1_Lk?NcFYqs_wck8hfd7B1g z&DU_Oo?eQWU|--blYsPg&wOy>dJl8o8aKP@J_f>$cW2tT0Ocl|FPtP*AkUyJ*+$ok zkoAg6;}1{X0`*eF*qPx{2vjdUc{ZpN%xVZh8?5tyC3VT2k@OmI7`RP$w&o)iugm8S zmSiD+nIjqDf?2TRAg6AESpzHzJt+-*MFfuGshV5O70?jleZ;sf0X*rtk~Lmoh%nA` zE-c^^a(`&%9wu6kWHqOK%SSp8i$&bY3yJ+mW3-SI^puRu?pNrqjq5|Q+ymVA_qL;$ z%{SEDy{eFt=SH6Q9@x6)EA7g6X5q*)kNDz4PZk7jyD#%)Yb`9rzr4ax-3EX7T&9($ zNWffMpJn)(3=UPfkv5?NP-fJhzHLJfyo3zvr&u4zOw{hp_FLM38RGbO=ZYpUYv-ZX zG*^QNTc>lT=SMILwbA#5I3ho^R-zL; zGcHauHM@Z=#!G1iyN;=UY49-lJ_3g>jD$vtG2!p|_HtatzFV|CczqI@RM~>J28;tS zF!$`u4kl=w);(DmJ`U_R-YVzSje+vBlzKKH2HaWsQb;?J4!oQ}j{X-IaQEuK8-3GJ z2vOnXKDTKM!g+dp1JxPOCYtarWt;)ox8L*ETw{XCP8QKFUW_6p_nJD5`_~R(cA{ROI=DmXqZkt%)gg{46%&iFAGKxkN?ziWhQDeHuYJtkL1YO+!c`qQK$;R#(&V@){`_??y2~ z_f)tZ)gx(LMW3dJwym;qduMKRjT3k?{$f_>Wf_YDZjH7MZ}V)A(bk z;lIB>U7M5~Vb2NJYnE{2$}c9|o_%1tvVem99&G2Hrd7aP?nGZxH399=Sa@ypy&Lg9 z_qDjCF@$smvaWS>&`{~fdUwflqsa8-pblrvUr+a8 zr9d)@_utFHcA*J{24#z#FfIZc=XV_H9c|DV>Rk8Y?HdR@Z$U4ATZg#HBBn$}+mUAv zN6Ou(E=0{Nn67JXM|ye^gw~v|XwLES53920NV;W@(IqU;vRp_IIm^)uq15Kx%8xQ3 z)UoEcY=3R3P0B`u4QF(kMb|0h$Yy&q)e(sIljvH9VX_>C`phGECrhCdzHy5W0?q!%8MgJABcS1?*2j5#hOl}^Qpe2E;w}{@lO?49zgthh~t593usJv%G}sl56}9# z>F2qK(6al&erNq&*yp=%P5Hq=Fe!PN%@a)jPrUyf_sezvJ$6Y)@~}Q1amp=Y9tS3< zTb|dkC?13LWrTF&w@lc;@8+6eH4fx8x#4ZPOfc**Tp^Rs1TOg#$J0hO-Fm0a@BP9jUlZWg9~Ts z#!#HVkcffxDB@e)F=MlWhIr*?-=|S1$mwyl^l#yI#M^VmroSZt38|)3UhZfC2LI;Y z?9*7?X_eKExzDYjaq+|XZ3!R2(^Nit%dJ)CS z(6bn;c%EX4Y$1w%skU5Z2uJ1$&R@YRAm_3N)Vrt_pd873dm{+&n zx))gq(r>y3BgTo4v`3_KmwzEN%nD@RNz6bRjFgR`XRtmeN4Hvy?pQ>qw7K-F{S(Yx zmpzr7nu~H@)VnI!5fSOv3!4u!*!S+TEzUx<3q@=^OwUek(o#`x^v97g}jhux;$v zFH?YCfN0A0!}dxQL>vv8w7OacCzHvJhu&cQiwCwxdIxkv z!y?5=_+AGntx%4PF=+;kE1Z$}YR!=0rus!9v;)#>qlWGGkg@*HtDBF`}(zf{SO`JIrQ!1IN0d8UtAYA3U@{9daWl$fy&zSLGSK3 zaOgPb)?j@F6vElJ#}db3)I)cWf#vbO5u;^R^ifbbVI9bi)d%Apr3EV3C*bq3S0yv& znb6ZFrGBsHpLNG&cb4Dx-qGgI%>^{5yQ#MOj(Ruv|LoIbuwlRFucx=1dh;2@KYrW! z{8k&1eY7ppoY0S^x_|e?sSF{JRtY*kD-|iZuRE=Cfrd6su|G9^K|^c>$(sh!si
l0S+gre0ewAo0wFMM9F0Vd!g$SEwBMw<%d8tjg|G`!L z*m}{Q6-w?y%@C7cp{j7D4m=%R3@%uff_u#L6jucSP2I8G{X?!7iBvI^0=5jJ+;y8D za>Qc!zRvJ(hp|y)uHUwFOL+_>5nl56CNogB9dr1W8Wm+cQ~w%ro`RA%$Ng4+Y)8xw zM-oPNeL8_Lh$GzeB$ zJnI*3*m#eQD3>i;BB*1iZRcgA_?C%&%y4|JWsD-RVA)H~Kd30{_V?X6b6CBxseaFu z3as7|v_;kaQaUo6O_#`jo&~&F?Uqxmz9{0>Cu4<~TC~{Q5>9VwLtMu%uR1e;)h9+{ z6<@fuqABrjMb@-RWLRiiG@kMTvd9Uq6K{Nlh&UpT1WzUStGS@OEgryO^KcjU80KT$ z&_Y;C9unCpx?Sk0C+y%`&Gvx30t}9&Uaq}a2OI;+ya5+p0tb)eMVXF5MCjW1mGfLF zDx7^M{C(#~#O$^A=YRPIn?LyF9WTg0%&=cm12UD!V87My{t+y1>RohA9I8ZSv9x%B z?+J)`_3Rym`2vvsHJvznAqaREIC5X;KF1t+TPr-51S&=+vZFyd)M*%o-+Y&Pf%V^=-LBX;qr?E^TPMp;hzx_B_NjHvgdQk4Y9#5A*8~bc z-MFTv0%H1~ocjE<1Uwbu59Fs}{UN%K#o3u3z@O8+(C;eNr>1>6a`Ji;5Nex8zV~4L zW-~g!Of|ZIza%^$@mM#^G+bi0cOZg76JgE%h#HuB$P5fTSp!j*tnWrwW7q%doafnl z`$5#IL}YE%2vpwS;2jV6?|5IH2Q0_o^6~#PF30KeD553goKZ3f3Zyt&4Qw4x=3c!e z>C8Xzz5F;sr==Hqtdk(4G@4j_VDdlxQOo^E_i~+do;1*4mU~YbOE4L1#UHtnMw`Km z%X9nJ(=I4Y_d&f_YYhslC~9PX-i281i9M~u>R@v$0;lD#3?n7uJc3Ft7473TanVSk zp{bFOnLUPegJ~^oL1QTs~c(}A4|Qz)CKNhdg<}z-LPN!+TMfM`arUO z-9uh6GTi;Mm{65F2>S-!S(W+*P;x%?K(Ss}^iN zMM0#_qOs@c9f(le#PcAz0CAj8Tz4p?4h+;I88WA;fWT_=?3!sR_!rI6n<|nJ^G%eA zWK22IpcxoOK__C#TPLfjKZpznzSsPwsc7cfPN~U*SSzV5J?r^;2FjNa&Z#t}BHwZ7 zq-#@9{Cu{l>9tm57W#9NseTTeemPahjS|5{W6PGk8;CHcOH2L6nGXJ@A5)LH6{1ic z6Ze~Zm8gc>uK4iUa^zIB$hF@u3i(gC|9WUs4N8_8?--g6F=u`~{(krzX6)&cQLNddFX zSRZ1yS}v=3A(*ka-aH|X)fG;bYuc`Cf(GrJA{UOYU^X<6c;7P$V!ogCEi%qUl&Mgj z?=_e!mCt>ie7FNKKi3}O)~W!V_nyaC?lyzLXvt$VS`7_>VX2gTH4u8)-f_nO7H4#x z5P6P%NSfVUxckK*=&9YE-F15iL>2_ad|aros^$LG)jBjF+uz;j1;apB?Xz69aR?p= zuXeo6K>`2Gb=o;v*glSk5NQ@`tgp4G=1yID8ze+4a}^9X!`Z;b4~@}8FneSwdZz~K zFHzpky%l?2_!SY~SF*#ft0iu1VFeA+%r}?t{}}eOW}7Kb2iuJ)w)MBjP~uVgJNyW?zC&_1 zcKM!)D4gq_iv>5LzxI9)L$cc&QZ_bM zL2;}Z+2ynD|qAmHf}0JxUgOV?DH=FJjb=PyfdfKdOYM-1&R$cg`hN@8vYaypk6@ka%# zN3}(6jKw}*eChUK5FbJ$4=E>!Lli^_pE+j4-ixGVlh3UjYeLLpDU4F{G_)g4@3hAv z0W|zh%Xvh0f|P%edaD@LFJINZt*~|&tN++3UhSkpZDth1B)%7%oM_?e9G)SO?k!6z zYFiP9FyOY9zwT8vY$TS#qudijJrHvH_9C_J})9(jT9Cg zctlSW0%NoLp>mU6=&*^k8Bres_OMg^;q#rq{N=VKwz(YqTeAr}BI3Y0gm2S>VL!0; zl5+n<&cpW0u)O4+YXJ4D9B!*gB%sU_OjTEofXW|1>)o>bFykI2nVm}14rKitGw}W)0)CEn& zR_JR_F>)8^1YbLvzL6oe4$3daW%Roj5`_kfgfYK!yR~dqo3{hs2URhT@ea_Gv7!k~ zcSF|x`6oZ7`r*ue(kbPeB!E9RBSb^HpzBiFW9F$Ia9=8o8PKOdidMoQ6JZ(%wp=tI z#bNuC%)hh$u~=_rJf+vD`Hd3DW$$fGjPCSd(FpB1G-6HozX?iUqd`vg4q zQ0k;;AeK^Im#g$Y@6)j-ED@&ej>79l4O;Tg=g(~u;|th+iMcPO(oWd8sgNcr{EP-0@;5sBWBZg`zPQC48XE(H1If1?r~j#AEuXjl zZ^!?uc>nwLYu!F>T>LQ!F$YIn&nhrs?%=jd&e%SQN~V%-q9`4VDd!H^VD(hb8)9vq zx?jQbLJvVm9<$<(>t+sfWm2wr@RA*lxlz4ZdC_%y_)Uy&n!WO zamRFr4ZpyRF5S1I64<(Su4Nbl+c)@ni`#noeky2s_1+W4>iEQ%-FcsGGN4fTi12q6 z?EBPdNl2KR0RC4;Zg2R_fLzHG^}NGWh+4bEr+#+`#09P8XwoEz;rjkX1}Z`0vcaW@ zeFey3Az3ZPnurX3md1y)_8A}5V2 z=HX(t5d1>LYwc(&#GFdz=<~<+jTHZSrSJxO-*zsI%Fkn9yYpMJsT6iTvfbJDWyJ_Q zjC))DXIBqcQ>U&4h?juY(~#0#xmf+)wD_|2R4ZbOI4$d_(Szc5zh}G^>qWMsb~>G9 z9Y{;d$W>#m4mo{ayRGoP|G)N6X3X3qLh7|S7d74i2zVN#z5OB?+&SxGg!5X#dfi0r z1L-QrI(9UcRj(9$SLtT<$6|f7hrXp)6t+Td(d_89%w$Jmz-D~qjmP1g& zrKIJhqO2 zl<>}r~X=z5;NVhY4EJ+vMI$7+n;k?Wxk;dtB>uu z?{Oq#0#+t9r^|~n;aR@@dxLZuB=PsV?vBIyM}A?e4rj6Tq59X8hG+yFoVJTBkAJpO(E z^6}sC{rBL#qG`^&6GVC7cN_oPGE`k1HF@Ad=OKGq^H zYF%+Y7{EnW3w6U#v8Mu9HGQ6*ZkCa(%9 z40AnAYhMPD zp({-BqrW%&Z-R$rIiHeLt0GLLuk@RA{7DeDb+i~*w;s&=cY(O{dv>R)x(RATni z+->R3K`_&?R5GQufpywP{^HBoz;Szqxi0lHvdyFzt}3cSzQOvck!cM`rFS!Lu{5^6 zPn|sBu(JRuI3;!)5HhiSZ9xm;!p*S0+%wf|D;bI>jx5f6AB5z)nVt?Nolx^JB|`Rl zH4qAWt6n@T2LI%{Qvy++fwWpqL_4b%X691lGY|Z0|IM?v9CiKBwZ=gP9uB}J(O$nF zyuFZCU0jf_-3mKCDeO|LZvu{6;%*XrL{N#i?^5B{4F^jb!nuqGfu2hiamMm04u?hQ z$c*U=4OEcv_5b$g;|Lh1 zg`Zy|jMekE2yHYO>W7-1C_(dve%OQx#P?(MbuPUXt~pO=zzDRnc#f@(a=9(8%Z(j} z3HzRu-c#eSlJmj!d8`jEtXAQJtlt>qu>BUxh#Uirr5E;CuZ3(_Rn{;8^)tsW>69`-Gn{PWoP+IC{zSMSEHe&j zd1>YGNB(&p|D@^K_-m5@4_>`5K_hjeGlyv>Py{Z1Hq^=m zf&2|&B428d`NU-wK_C4_+#Zb}AL z^)iqI?eM6~Pdf6wQ9QO{ii#8x*0fGqV0A)ouP>}}>pMAEKCq55OdXt7$m zf<>qabob0B=1pSzNk>lV+WyAYAxPdR_UJemTKi1}?#1?}85@-h}nUd61iltqMXg#F-ZEHXBRc@TNT{nQTgzPTuiSI=vH(0;V?`=c=nj_U0 zk74`u?2HZzHURt@V-aT&>j$AxP+;mYL5aSo+uS<}OmBq^)P`YD7Bm*wM#uX9VgiC` zb0o+O@?wylcR-eo$Y7!US8#vrq7b264iVo7+_PnsP!j)e#{#nz{0(;5g}=r6t^a-d z2FvxC<>TewV~5NswUr%MU%R*FQxB}aI5f^SY5%5b&?}J^%jW$8o$G(yxF3S`Q;+6$ zCmAx3Ud3<+6+Q+ zuyBFU3NLlbtA)c{FtZYF>HviiY=qCmX-E$*Nu$ z`96>lWj}zePgyw)tM&nvCo)<*rX6-nOJs-+U_MB`ad539g8x0;y{Stb(DNnLK;&IN ze2Fae-t0_)o~_D?WIGD9s@R4c;2DBV3b}zkx3Rju-j+L@*t!&9*8nFEVFZ|(sXp&- z(x9;Hp@HAVF~}8YDXz9+fZ)sA`D@tUFIfGdto8u5pLx8QzwZbct7khIn)qyDVEdR% z;@tAaKqCE{$+`OskS8uWTz@_U{yi~AhQujw*Gni+jt}e4o4q`=VR0B7qk-WrY*fOf9NR6Y)@bXk&3({`b|wm zhLH4=!Xp!kBvf*G#RTFYqK3~7Vjrwxkk;&1tNZWkp|B^Nn%_#s;=t2TagqwmT1@?L#*c7BQ1lJ8yW%lG#;liu*hwm`Amw(<` z(!!zgDhgO+sZpPd)PacbhS1|4xrpOS%I)CwMC3k|G5NK(2U%R-TpN=&fQSWK0^RnJ z5r@cH;iJ5LC@sZaI_6Xd;*z7MPPSGfmfJg@5~Rbx^hIgY_PZ^xukYR26DKIJn0FxW z(D)Ek@Ox#fmK}l@Cv+d#Tpfal+Yldv{T&tJ`az4E7y4m1MthFazZF6H?Vt+X0o*5O2 z6SHJjTqc9*=>RKV`!*nH@ow8F@)g+1Ju(AsVz0X)EOdnc5%`Ku6|8^M2lek;Qfp+W zaQWguM(#@*)DDYSe6|{fR^8Z=om;Va-_L=d`(gvot@_-(4C|NN5xekVZw|JvEj%Uq z2eyCC{Ayvk&w~-LV2df+@D1Dl%YDVd1VV^_$Q0Eg2v0b%UDF@r-zwf+X zS7U##Bjw=3I?;Ed@Pjb9YkJ!lP}0_F^9eAa_gYd#uQ1JN=b>W zq)>Oq-}QOcI^Vvl_xh~Q?|FXD zAHTo$x<~hY+WRt_TAz5^LLo%F|TKy|M&jCk2BBvPy7Gf@rs^SgZmX-ke$Ke zwUdb&TIpK0H`*0pqFMMcid?PN7DBqq-(1o#n;Bq5JS^lBoxS6Jl~Rbz$8hH|qdfR{cY!LEtp(zvSYHlx z^x*e0PTf0<$RKW6AsTXBh>Ie1`TeJ=hCr0)8Jw z9B5$Dx{k(=`Je@Jt2UHG_5h<_fn|wtIp&Yo*xZ~+Ax-RpK~8D4#6e}Bn7SS2Gwsh_ zZ5{0*j`=owuL*RM&%tmUX*gJ6XegFwSQKC{8dx0g+-~Qz!fZ@smxvi8wC<;en?k?!~=_d z<-N%3^WAK4U$F;7C(OU!fqr0jD-D;c>ILQ5>(^3FwSab=t-a%1Eu8e)#+F=F3mx&3 z2iSy~;X`!g1#91zoSrhL)5`X%Dx9u@9J|`MI%qtdPw7H9P;i; zdK~1!dZF(7qKL57J+LD?<4Q_NC%8Jf{TS?Ng@P0HPRDH9Am>#}9=~e`@^OpbWUZ%w z!{&fd`{;l2Q<&d7v!$lbUY$}OhD*FO%EQGzTH{Xjek3L= zhd{Ke<@AQaksj>1(dVs&2=@rh}> z%^Pcp^(n4tt*mz9_HcveF&)IqM2$(L6w`?AR5s6d!2v>fTiMXDX^@*hw zD(^ViNhdDzs(O=d(@4e!p7j2x9>o1MTZ>UzNtS;Z%j~XdqHUl5mD?^5Y!dB_B(bhV z{VH`;R~qN}Zbg-~P6Kc@K%q*qml%RC?0Ru#dYNM8zDr&Zn7Gb zKMx<*{ZtKA72gAYBhJ=NPj+Qrc?U?`N;1z1>x1A5ttdO~KXGg*cwatrc?3A+Ura1R z+==%3+=Iu@3vr`(o{@bhhf|ahPD%9hcVuw_{~%mz6iF-|><4;%rylPT^_BMvie1DnZ==uAJgUov^NFBHFEt2K*geXQj>!L*RjLH$pEX4><5T^@Bkl@L5R@SnBiu z@0H{9gQne}CjDTA>~I(8<{s2e5~aYf=ibfdkM&~y?eWLHW9`V63ug>H&4px}9jk4x zWN9m;encR#r1k!gzbynYv0~ZqC(kYXqVYaBz@FA@*r=Gt;hbp z@ZMY^u4iYwZD~5JF}~DTm(T=0--js{Y<;lm_2|jNnKan?GHKENd3~^?OYl9L0u{ux z9m=XjdSU07&f6fIPpu!GJTrB&1O(p(MI3sP1m-5bIxi>uAtN|PF7a3d$UMLN>gCQ) zP;g<){^*@*$PH2ZIf?ms*{49=IYk4HTdfAF%|lROX0TKLO&>%|2Ks5GbwgzBS%=8k z4rm`(aIy;Xks`M%uTyz@LHJIht&4Hd2Rg9nxWAspxVdu0KyRuu0}Q_0nfy?)7PoVN9f?&Kh(>fPK8eb7ke(Fevm#*!ii-u1{ExUU_mN|j0}yH&X}?-v`2XZbnEj3!w>Mi94YKaXb$HHH^=j1h`~9nJUmE87 z*8jToNQ#E@y1aLP;cwInifa?qLVdgIcOx2k>HXk3{Q49R&X2RZ_l)P+4uH#RU*T2D z7$8z#_GR_`5tzqz`HM!wAN|z*Qu>9k%$I-~2^mZ!xVbfg%lg;jWQnw!xYsm|{#{0{NoqMC^>37J_)NrqwSV$p0 z+vP5LS5Qg4iK(D9;`~^iE#;hUrW3BIY)ROQ@xCmP7l}e%3y;I`$#LYb@84_t!VvK_ zoCjAakZz*wJ$m8$&PF1%H-tU(LkV$E6Zl$l>or4xJ#Latd~q$n_poM3FxFRW z^X9GYD{Fv)MXGWQFR&i&xgbJ7paxF9@86q2uZDybdzP}CZiL>{R|j$|yJ38r=pZ>n zgAqxNOI29cO!-#+@ZMlMtlzUL*R!w*PV9jYTdhqr$Ltn7uoN0wb@@N3utLLp-B0`6>AIYDVJ;D2mMchwU>wy)gdQGKJZ&ZF>tb8Z_zuc|t zQ-^p_*Rya+unh4DrJcD+Tg|%Q?Bel9%TRYmcJzDmi})4@>ELv^ySEJvRme9SMPAJQ zaFOx0sUDDXjbE^s(FZK2MP(E7Ffae4GU#^RAm~yY3%<1C`kzN>!HLN~b$e!iWA+1& z$(*#yPe$N{vZ0gCt6@lpjEe2tI1J+Nr%i-b4MWNmXBG1a24wYnbyP_)fJ^$D)^6n4 zvUv!`IpR8jR-rQGSWv2?`8e@|X}(i3%}WQ49hXh|aculEnDdDal;7n(2zfr|;!O`H(1^_jB#^ zPUG0yTbWky2RHTi@9_8Uq^Gfc|AtRPkQF<-VUYy|%2u`XONb+%-O@f&Ehr8O%whv% zXEI5}tsir)h9xBA>)P$-oxc!04pyOkA;{BtVW)hq33)6#eY{E>GT@foKr4As2MfOP z34Jf^fYV8vAM@6B!@OO*Sy6et;QMli_eoxyx3W7A{0nyyJSOh{;m!a^UgMpt*^d3= zeZcDg)LHYsJioDZbtRZ6+^T1dD}tzDCyqd)R)Q@95g@|h?v|BP*h;=T8M%BeH zprKtk|9xmZ#8y@=^20iv2~T{|2S3cW-FC@cyBG5wH~5`>_}hR(nm>p4P&3@}=svR4 zwG~!!Fe-oH^Gyox*}Y^0^ZXeKk^8TpE}d<_5`z8n)a8)$58ho6@3q5psI(oLyr!tN zsMi-Y6k8yT^^&6>&7SLH++9^5eE!7of1k%Z zpShpeGWY-8{-@)ApU>Q8Ugw-2_+b2c9Z=FQ@x6#CLH>wmuz}|rFmIbDyRJQ($h#H3 z^meKyB~M3u{d1a0(r(Ut~dG`;xAgkb=G3(s3qliOgaJ!eW&U z=ePE!rSGZ*^8C{tHRN^7zbC<7kGu`L{jXzk@OvEy45mF!r$PJWsjO$}bXcc-w`*H| zKl~1P>k{#s2CqA^LY~lRz%~)&r^VI}LGzl~ENpRpyF5Rfqq7w#9p)Zk3Z)RWP@54{ z6$97aB-y`Cj36N$ZLjl#DV<2KB}>uIst0S0it)-HWxX7?LlT})uFid2n!p{vIf#J*gVV&F87=7+P&!03Yd-5gLrE{YN*GHpn<Xv?r*1XzPD6&p4it9Z;JJ;^H2AI>qxM^bvfcHQrsKkK2agUGCtLFJry=C zJ#Up8*ar?}ueKOU(?QMS-B7^oe$cv=@LSul4}NZ{ypnep>&|Y^nr4GAZu_m}7=b#} zR+U}cbJ7EFsapJVhvoq2HF+mb=F{PICQr66;`@Tdq$1T_2SH%Zirux$W#5_iKN9}*L+16&wp}l6 zmOXG3oZYGPE&rEZC-b;aw)*Ef&Qb8HU38Ae{omh*dHz$0$SwPlMxo4p-G)^wM&OcF zigpwIpX0`{!eWQ$1(qfVaR_z<-2=TR|*v5 z9;%9d^qz#NSvQGoEF&f#$48ZGvA+fc?vAo-BQ&||e&;WDlk-a)ulkHph}wml3l3iG zA-Zb%*~9r1B5r;@vv^GxiMhsVE6~?Ol6U?*W~X0D1iP>Du*`b{&f7-2Dx#5hRVlEX zC8rER0=e8uC(D5v7hHH<1a+BqB@RbPo<^Xk z{JgsOHrD&=?>*By_yueZufvj50aPfcKMUNI36odz?lc_E0OuaPM}a2`z;dOWz|uE0 z5G3otam=^_+^B19tloD6UB4yx6zVnHYZw)n-%9}=`}?}p_xfOJMrwg=_yANrINoys zb#L{l>6^xpe^7Th>$-GgCwR_o99)Sw#^BYXp^{B)u>ZW{@B0qjkiAsTkHc^`I|o~44@%>`X@eSL6EUapO?dH|Xg zRy}NE8vv`1_jLOe`ynKK`>9r}BMOB*U;H(a3Qju=9oj5VMDBdg9X z8i7Ww8=9G%BcM$C;Bay6C@}wC_V0V&wxCX(ti;2{{jJ0BA(?S}ENlqO7CN&3xHky* z?tcBi>p2K@R>z_ru3&(6bqXOXM_}RbUjKocqhNbF_xaiHf8qpY|6#Vw{=czaia&g;FJvHHNlH%a7}xpMO?Yym#U7?m$eXBb=YJzUz9ev&kn~h1 zF{fF5Ov3fE%JrTw9?2S#;2zd@S2~9@9nw(V|1lD1`tLqvN|qpw|8WAVd_BmPU#QO* zYy_1f+run3q2BNlbH3Ns4RCAY>)WS78sSlN#)%(qnjvqK7=!atJ6JRX#a`6u2Bpn` zEf4SYK(d0fgx36C2$;enXbw= zcA;+ku!Zn``i?!Ig_bKlG7T5+bAC@_G>+R+Wn0 zt1+R2UcmzXYkV}A_cb#2^jI%cFOCv;cnSR-0=?1_rl_#|?G+B^I4U>@aIa@IreQv! z{H*gB9d)WF=8IY5d|y4G{LQZ)UZ1u1-uRpj)XkiVoPGFr6g|syC{!pedDMEZqZdy0 zj+)taQ{j!IjJ_G-Y|JXBGQK+xfc11vPI57>8%&l{B2)W;+d%egNK+puAMTTJ#pk-d zCdY2=A}Y*p|1!1iI0Y8UXYR9{-vfsCpCoVy@!vvxoEjti!-WUPTe-n6Y<6=9dWBn}R~;P$!LFR$l0OE(p^~B`)Q>!Qy{@;Z zZX>YMO~7&egHbqfgf^^$alyCR4;$L)18`-_?;oo1}vm5b3jLwXKixD`wWOV&ac7xf#sgi z<(#Fcr?vDGX^BCeZR29aU#cTe)z32U_0BMWr~`jt$_R*m|0O(&IXF%IBt{~~qKjSrX{17>* zp7sjkt=Ky*qPtMHm_>Q|t=D0kC!gPJ6T}Ai z(W%9CYk{w#mLB$(M01k#?tKT0|5S0AK)va8uY|KtWMiFAxsui7DDuq=oK@M!DezDs z`u^T(e7d9~vT4zVEMf}~3{O8F}8{s_5UelPC4p^HM(k$|b z3KdzegQMlJZjHhwx~q_{@#5*!zIljeGn(A)v~>V9<>tDlI|pFuoP06+GR%7&v{3%W z_pk5H*NLt;0BZ#npN^YC9_pjyf{hrDcN=7h2dN;QE$Yy>jJZGa74thun^WF)HW+cN z8fy-Jhat$_uJSI2iuk*bO`F0up)ba%)~JE(K1i2{cUz9Siiwv$_bq>h^@IBKDK+m*EW0RvgdI=-U)? z^uWl~l==0@@0;63J#sDsaX~&JqRkfg+=sK4YscezNut3%LvZ3m{7gFL3Gyym-n#i~7%a-dC0{sVKJk`9 zd=>KN61H!cUj2jtxh^~fQC>3wrg_wOOVmG}g02}S-c*UnNB*;GLeVS)Enc-vG^;4{wO7v5pqe__LNx0xQI%=g$mPy_Wc_vkI&@TiB}qWB;0>+L2q?q5n)vF#*g z?_+^IddRbDj0EMdwRasfycN;{A0wk zdo*O#I37YC!VTBTrG9l#wxot7Eua=;%s$1pt*%9VwaKEL6^J{3zm;c!F!Iy_`y*=A zQ3pFnQpA6{0eGzklQq0sK)z3y_tkhURCL;HD9I=Uo7?Qq9+ZSbLd|ubADiBx&c%X< zgV!>McgM>$`$9hw?MRN$Bb$)7pXbc|b?hsnZj$E_?;hEk~WocCV+;S|l1k^Zvf-lI4ic ziA&G7srv+iSLDJD{rm(KH5FaF{Y7BYTYa~*tr+h9=zgP3K7)CBR&S%!7f`;>wf9I3 z>RldDo_em@2@y}I9ixu6t5c`XL@wHe$K@K`F~VHtJmu-2bvxcD@M(7Gwpe zo$5w?^|VSK2VX z(NU-D+`hmYbf~@h`24NY{VYRL1v6Ul*YjTop(_p*Wphbz{MMPfCK9(TnVlTJ(%w?aGal4iujR;(Y}7aRMdl6 z8x`b2jfG_Hjq;KAQQtRj_CX0-J+XWKY5BvmZNx$7F!#-(ZjwO%c!V0#LumCmZ{m*j zl3QXEWbdM0Ql>W%{Wy+7CLj0a96}zQ-Zs6}p1Dm#@<(8Nf%+HH6n1>4d{O{WQLx@s z_o)c>$IfTJ@Vo^E^gjjFr!;|cr}J^M2;|AQUXrcuuY2X89oiv6(b0=!F6YCDGs(jsg zqW5r}?!u8gV$&V|TCE@*G#~Y;H+a^9RJRA;ElM-$5Q-^i&vt`~1;>=uhZYD@%@W(Z zy9fkdjj;8U1QIr@&sU4yBme)Nh*t59GIDoiKp-!plxVxJQA=8sONfp`poQWifI^b6s^$bRS`{dV+$zxk9pth$eYe9c0 zu$@?Z)nZ{0#GF=l?Y7B>Yv6q3CFV(IzORq5r`H2{c>Pd$RX50IU7(-i>w}FmL;XjT zdg1z=>33_KdZ4;ZMPhJaFT{VlB4EMjg<#H2B0EdacVXWr)hl{E;2E^^_OT)gXnU+4 zQFKMzfs)qS)n^d@$noOdk7VyvzR!PpKjwM=-8N_)^^W$oQINc1cU`UT&v+A@ zOptmaH=Q z+D?r8mZ# zsgg7;*=b8B@-7+A2K;Hn=auSsxIizVv{a1D_h~0=ht!JiHB^y=r+k`fZqG=``WMm% z6Dq-X#YV2t_1)m0vHZvq6U66l@9mxDtOlE?B(3s=NrbWE{JRqL^U=F7Ry42>m&<7e8vSNzR85Uy4)6Cuc(0EG|5_- z)YLlQz9iBYpts@$b|wWMI}|_7nv0cI-K!CKhuew zZLBLnkqr_MiNtlZ#=UP_)6hTC z?(Cf|5UM(&fde>VybcA3_VFwm##L{qEjm0`&3Xv$;#K?m-@rIeH_ zRCs&+;EhTP%&%UTmD(qXxTMY%?(ADTpmdo^zD{&AFcjp+KBL}%%i`YeNAo-3jgHj3 zLR_}!DRNT zkHN_yXr$fIdspy>H_YtE%<+r4&1{+b_gm))#37D^x&QxbyRf$5&Q0Va<;E!TN4-Sf zx9R;KlphU%r~IxFQH*nHXRaxpKHUy{*(YyEi6TBtM1CenEsEIO64g_jsw6T`7y%k3 zO~iZ6#WBagPI7JAMDC6K6cTgrJ*OM$S;}0Oy>k-l0W9ggYZP}QKiEFV$bSps>Lt(G zF6pZy?Iilr@0IyPeDOy1n#1oQtL>ohLW^pMAH1OOFTY>a7Z?4SGlqcq{>=APeH;1X z#CgQiIv5}BJ%hM_&r;*_pJP7R@%8;d{_TKkqS! zx1?zR-?>4uc7+cAf+!k^>QBAMxPm$$TP!t8R-mrtTfg(Vo*zg7@56$W7uDdiK0v@A zrWZDbCT`ViMIH@XVD*|KB@omgx?}+8-5p1|JzN)&71mzcB%bl$u*WL*)E0}#x#I-g-v0@3(LJyws&uRn~GEd^y?JQ`T zuw1uJ$a**VGZXxSw-?;~_LflM4rp$%N1Z>7(CPs87-I8?|GevlG^iaLslgr(r;?2+ zi{5p@)8sOCDaB3*x%)m`D5L|-I;h5J!Kf?MX1(1Fc^%1BmYKOExkXBsZAM)$k>YHfQuOnzxy<>pYXolV z71#c7#rgH>)6H}9hM-naMK~x8>-V7(_Fqv4xBbB~`jq7Wypg8XS=G|uUDcC$pSL1k zY-8;*ng#m6$aPq>3f4l5v@l(GVFftff3`R#tp<1xTU}V!*#atu?B&b?Iw7a6jl(7a z`GK8Wb7rU~`9!(0u9}SkgX{X#))gbZ`NgteGb_wHJB+j&`}9Bu|EgvAKdHc~YW8j& z>grN-Zi$Uf{d4~R`+H|@Gh1dq@D;Q1-smw3-T_xf&D#Imhk4$=Ym+x0vl1Laf5cmv zac|~^fHQ5Ga|ZgEc60iuCgb?4R&jSycN6Bt%lFSDE1-_4ykgP57xC!x)A3ON@vhg_ zHjn6juO%^~scU!tY9Suc+fSOtbdj*dLC2P&FV*>Buj02|6k>TMYIVg%)UAn*Rq(3m zBox-_ZC<|-@6(ukIr1>-xw#ATdC?BLE3 z(sldy?TN+1L}I`5<>!5<_ebdxrPHXSeVf#bLP0y36m|~OUsys|R<3Zhsmp{1YKcAZ z!7Xs}PDcOdzFs&xk5kYQeRgFoaOn$t^d!2}9QK zmhU%h@%yBUm6^fokZY=g-f z0Sf^X~v zpOI&@v|scX&Z9{Wi;mlxp?_WgJ*6%d$C2e<`=dBc=Ez#Tdfb9}#EpUL zZXmvv83!}J<9}B-WA+E;`OIzRb${=#Nq8k6FE9e8#~0?mx!MnV)m&Crz3730Zbq#k z)|bYQP6qFns)nZH8ochd6(I7|iezEFNj8>?vV3(jSYH1i_ih;VkLW{7)?%Jn#4lq` zf>*_4u>K)HE6hF8S5aP)f%-L%uTO z*ANIeSM^2&jsUMqEW}2P!qGiXPqf;O!oz`MrM#>E%=gUqVZPtr?WA#!jpNBt7!z(c zUsdzxx|lZa8AHUOj@77otJPyYw)UR3lPVn?>z2g?SJL35c}bW&_Wzum-J^yVdq8J^@{Gj;tLe+Rz$0{7MPXF6SCGuf5s~f2xtZ$oYg1Ryr-({RGf7}5n931s|KU!eUqX(=Lz)UES@&v(LDVK|Wv?ft@T54QAfm7Z#&-Iw z7F^c^S=V%}#(Zlbjk`YoY)l(GJ9LQwqGI7SdwZ~6_Fo<15AL4X&;Q=f>^J=B86MKbqd+&D z3_i195N69dUDz<6m=TmFR`RI_R#C35WR>a!(O7R%R7GgP%D`15_BJ$Lfqxqp@EAx`k+^vH|UdKAFMr{>AZrw4-}4l593+a z58L(n)trwH!oZ`|1?vNcL3vO(dc)u_h+ZzH@gh%<`P|Isbkz!HJ-rff3Dy-=5&xGi zC-eCJz-M1fgGM26F|Dcq?-Lc*wry9$FlZOZ?O1c>Prv_rd@yI!rGLZxWBb!{Ur+T8 zLSDwGxZODV9DiP;Q6WwThX=Y|WR?z*t^+kUjSyd!P!udBhB!IK>feR|Pl(;K$YUU+#Ci@k_h&Nv{2)vhD{mz^Hf}Vv?XjfgV_8qXV}&hKw|U)&-PVa@FGQm zI$eu86dguW=Mk5rmhT?jy2clTLT)sk_RJ?SZu{b7#LLKxMT6I+h$7BhFMt;vS0`=#+rW9PX^A0Q8`udIcO=Sw2CjEI@663S2l0VpZ&uz3AS}9ujidFE zprks+br{!u$zR!DwWzj$a)&~2r~uYmx?XtM%Y6hTM?=+|rJ<<%F0lQmb|f^<#jX zkB&$u*6qE+Lt>XBUTbKx)4WR<$8Wmx`rRpxPM{q)bS}6Ybt;Nkl% z5I(luPlACy6c=RKpP&w?ap{dlkGpgbGGZZ38K?tzp3`npUXd`G_Q%E zt}o*qzYtqE>W(kiU(q+*3vD@5np-eW)AaRP(y&ky2r8yKS1IQ}RA+dgXTkMsL761RMh)@oO{pvFzVxYI*vWZ{GK+ifP;4og|OCY9}|$KkS1fUA1gN^4p8&i zh}>tK?`o1(A4lGB$(btAlI%Rp100Ay)bJjv;uHD%F~9A&Z*cyxEnQF;QcNKhf9ko+ z@sas{%x(Tg3+{`E2Yn=+K0G?o3n$Ef*4(S3K(wdptHhhA|FfLRHy(ldyYjb+Bjr95 z78A#t+Rg1mm-6i@A02(WL}#yVo*X1Xr_JkSxJQUd3lIA{j!_a+UJ<*LXM_|zSs0vx zdf!UHZ{^S1p#RsgbVFlW7dg3B%UXPQHOZh#8n1tfb&c}(Pxad38u!+=O`@s2RUjr$4 z^k}!de>n+B^(^0h^(|rO8qL)l!11>uQt|HmZpgbhVI>#eW%I1?Bv$JdmM2NAU!n+;?=qe+3*HJx*=_dxSt;P7InLSTKeUNp(K5t1uc ziWxnu110KR6}W~v*xPUqBM-_t!5H;pdR4a zg<9J$yLEvbWs_W@H{vC&q!eY6s9=!boj#r43%4x8Q}(*xJX-eT^}@5r1F$M@VN1mG zqT&O0jbnXJ{C2-ec0cG$uve6~4}zJJ=H}JPP_vQ$x^vid(gVlWquN6s*Py<^u&CsFj=nbJMeFjg zu{Hsydi<_+Y=VWmk}Q^B9>bE$H=rk!3Nmk-4*$Ts#QDwpk4|CTZKKjV@5*_&ZVf)- zH@FFPQBA4Fj9)YuEL?Zy9o897De1I78ybZ8mc@L=xc+CxE6nF-ZZlive&%~ITV{V? z9%r`9{meGq?Y_3+y-^7Jxn|NH>nL1CigT~?F>crrx`oltfU^;nfu)jNFio#&RQZm0 z6N^uG`2F%gslRSxs8ST+dTOjAdbxxssrB^uMc1P*A1{CA(sm-jo1PJ}x0}qJFsZjf z{NMTTlEgWd9#T*LQ1k=)ztP;H;*^L^k|Ewn^YU*dnr6Zo4DD*-uas_h>q7>yUcASp zRU;S9WUk&Tj_az}V~yF_`1^-(Db>%+XTY9`;q7UDqfm1`rD+4=w>^aC_fs8_x4uiu zZ*VgeT#w~TEKu(O+XpA~$B$7US&$aL>^%iIEM3^=6}CeL&&t`vgk;dZQ6Q4PtBPDR ztSfjz?;^UM&g!qKkf*f!kAv+K)9LZs0<1)F5JHn6g}@JbzA(XG{2&uoWtS;RL5 z@w?}jD94W=e=n;0lU`Oy@P`e=GDeoKCVCedCeu8uC)8oz~u=SMqOuZ*2^FhJZw6?gj4%SmMB zc+)`x_Y$Jg-S(Qpw45-O>^RuhR!GFvmhdEwhLD7dm1XTd#i)lE|DZU$4ekyRx#U+h zV17GH%{e_8`31M1oviXF8NW_GnPhb#f=bE`9PA&VL~ibLbU+=LzG?J7zk~uR<=0j& zHb7kh(2;oi4C|^RQkg^fbzmg@d+w573$PS5@?M$kf+z7j@my@((6nMEQI)R^x)hv5 zIlG#mD#nSjcdP{*7jS&q>emf%iSDlZP{+0Dcs8H<+d)|S%*D!}WC$KS@E6^bj_Xs~ zBLM=(uK=LbB&`n>bKP!7id5IQ!!^G3uU-XHUOXSSj1cSO6h4THBD z^?5qh-FnT`bCbA-AWg=0a>18=m=@C)QfWf|C`Xz8v$TGwy!f=R8S$YzFH}qS)S`Zj zjjzSmb1jhUSi};OSO;DAju@nFZa|#++~s1ob|^1ATjzm#MpY)<2IW)8Gu05)ShlVY zRx~>a?AuQR`JYzX*fKFrU3_}enN~VXxtru>UqF4L59b&5*rG1rn}7u;Gm&RxaQbb* z$q^7yn!Un6+%$c2NtsjXe?LB8zW0A>L(a;OJs*dmkllf1@OBu~2GuULst>|h*-H=h zWOjpg()t^n0qEy2x^IU7XDCs7RpII^QAU&^cPNQX;djfcPF;AigN!uY3oL2sCTb=v z5&S~f?+1-RYkLnlzhm*|`-&7|GFPoVfPStFR*86p!6xDy81<3UyOQ_|f9O?OkW3`s zji2C`E(D&(?oE~G8*dz8wDj%S9-wN~Un{ji{D-xx1a+bZE||$oYohMky4{N{R-mqz z|K3ZLZ-aVKx9+0&8aw2(FC5$I>DUV;V*07a*{GnuTk9CNH2P7~EgQvL--ASiyFimZ z;+L(0L+_%Fmm|B$E%ix$GVy)DwkASEUb=)|ZD)+2S~m)oFh4m>Yu(EKjUm zfqG^By>D_hSoV?vdI`sx;U;2}&b>wcQ96kjVO9Ns{N&wL^zH*(SjUbOYgsCU`Rg>V z&*k~(N6%s&nYO14C=b3ZJ0qGz?(e>N>eAzCl3=Lt^7s+t7gxs)$~rOXw&H-@XV`V?X|jzpH`Ej_-{P^7E+^Gls$F8(NXBU7V5f0cL`_MP44y zB{B_*@s1Bfu==*oGQ&g?b@&zE;lM~R&(f8;CSMLLFF!f#RA>e7 zEUw~L38(|?#Xczr`M`B3Ki;e-2gE0%h6as_p-VA6eO*KYybynE{JMw&^{GTf_-dh9Kb#j2pRm|Ifpzp3zjmEFK!dc9iI9vC)Dygu&7l^C z`T-w`lUk^M>buPE`or3ygO8~HT<1;~Zx#B9`kEcC60KzJpU0WkGuuK{A+rauST7Wg zoS0vPK3r!`xhvuK^N_M@0UyTsD*SYgc-ab&mwCT!i{EG9yToSY7*L76KbB{d##Rm)v+4R7St!~`u!+Frw{dCqN%$E5+f49uzf44?61~GAIfA|h%hSL`wRO5WraB%)= z{wAPdIhVfyb#eNp1-=(0k!$gcXN9jSNC@jho`7Z(@wmNl`@| ziILm=O}1fO#3p;q;SDY{BJ=8y`ht={qQzM)A1*db%G|fk-a0o-JXFRW)zl7=vI8Ac z=WftZ@B5^V%>G_-cm1N3c5W>s?Agavp@oI0b1-w8Gc*U5&)PMoyle&SgOl1}KByDD z{hp?dOqI2a(KCye>E<09_K0L2kIbOw}->;PQ=-7fblKgqzlgh)5 zB!p7zlI>hdnidV!lxRK#%l7wYT_wS-szS z3&`}Zv)gJlstM~z?2Jk#`eq%`OO8KUjQ)a8u7}9IAjxK|{hISjA&{|TBM0hw3F$6) zkUN5WV~ryU;kk~4C6c{~mXnPB-?kljOVM}rymoDYd=kd_+6^MIHQ-41A98+;c%m&Y z+)5)0z&s{qL;5T9;oI6CEBfFK>Xm)63PRsM!I95GpAiS8Kb|D=S)vo>>nGgXbEg}G zZyC(E#CAeMWDITAtsC{)o=a}JiQ~DmF~t=12;4gw5BuLiUSgo)()GdUFDF))D07(# zt$SE*t-VeI=U4fHjNPa&lJs`h0pxu?I(9Aa;C|G7)e919@%S_UGr#kH*AJAvHr%%B zVn9bzzWXMO1K2icJb0lt1k0ng7_FP3!*5VU9~YJNXUjlI7G(RIjK8qbApD z&F@P=xg}v-sI?mG?R&o5x!($nJ`FDGG<#rWRMHGo(n)I9op9Q1O(8Qj)ZTF09^#=xX;(1mA=clw6bp-DeSFw)LZ7vh*zD9$w>^v3 z#Vl-IlTbmlQ`QM?@%ccs?>f}~Tv-6Ri9Qadoz37ik8Y_kLIK)&x+ewKg>xTy&b*Gt zI{LmsgRO<=+Z5P27gdM-n;d)*vwIM-c%R)}J~<4`_q!H+jMB(~dC_`lO^>8@U|C%_ z@@`cmXde+hJ)T0+93H|TRzMVj_FD^};W64gtxM^^H86At-G8^=Xci6etKfn45I1CHR zcDjX<1fKtgwl|Na^6mb|357CbNJ(j+(0~Y$E=7?cg`!fD(4aCUB|@o)NTduILP%tu z(l$iMJkK1%F;j^|`0nSk*7@~YJ?p)m=lffqU;pfNU*~qteNOjvU;EnoHSk<6qbnr` zMVh^D2-Fb4ZK>>w^HsQyXMV`>qJTJ*onJRvkG{33rt#~nYC)e_A|9X-3e9hnhzDW6 z&xhkfD1S2XyUr0~cO#FeF6lj+z5ZxecJw6-fWHO z;xz6fQ-5Cmb#ojtihOUgqi$EZj}wzQ@?*SOxAihK4g%K`;p>B_5A{L#!j4wWCNN(8 zZgdp;Qi`(gd2KB}Lsf%akF-b)oU?k!`0+$LL{@*f@*bZTn}4N9Bs@aC(r4?sIMhLH z{k48moD<^cewCda-HAQ~cWW8b@&}DOcV=KRgbPz0R@1)?N@F>ne$*V&9|Zde`Oj9IP|- z{$|L`B2{O$z(Phf@l%BA-8s#ulXGCWM+Nn;b>t_1Ud2AYAK#1LnmvSaV7-|3O2q$u zQ`L)#?j}qduiE#$$G+d3iqmrc22vYvmuaD)ga|hLewE_+k}$a~-{0X;24ZxDM?{_4 zq3mp`?AP1q&t#%9uHuM1gojM=`w@3t<$Zs2&A<3+b9pH(>riKwdX1+Xd2-urQ$rT5 zhoFO_nfnyhTX%ZBziGA)`F-9RY#fMhPv8iUcg*l3-&V!c=Pj!v>c3;>`)_s=opRRY zzfpfo;iI)^?1dr1R&-QGvU!*|d^HN+aB!G7muhaa{WL%-5<2JZ-svTok0NCTjarGC zr{8I;my?9F<~$#22841u*iUn{fPij7{H3UV7#QH%=E#V=KE~mMMTbIMKVB*EI+fyn@4a+;OP^4|}D=GRAU9(B7chE#X6iHWJenlZ7O3hn-z+ za4B)#&#ap0P)I69H>6i@^CPkEFW(v?pTTX z!%t2=#IUKGDY_&X5}wKm&3jitu0vJ9O0G_@Ca*%wr_q0VLWKA6Qp_)R80|ij+5^4Y zpA~v8_QTne3qQ`vqn?xN*N3hzQD@e~fT@y-Ix1D$k0|2v{|Rg4sba1HxG%E7=A$6; zDyemMKH83=&&#);6MbV4YOdKQ?Spz`7ud|0IY(f4wW`m3bM&t|R=nmc;{=>(o!GaK zF#$?3!IIaIZ?`rm{k}^17)WXKs~&d1{u^)giT6s#uk+YgzTI&E%DdjmHpq8_V9;zy z%UCstwg#P#I+O>QVO9edBtJmyK)#sAr7~!pT`#fq8uAtfoNOL?_JXy&wc>Mj+`ljd zA8Qz)!V4jlx2sGCpf&Nt6O~JYAgZ(M`)DodEh%nutPuEEDCVt^w+e%P z)DY2FO56Ni^vBk7ysf6)Mdt7KWv3rSzVD6spc-!E|3_1fPkcli+xO+Rg@~J{rdhkb z^G4oZ$fe&KdFzQoa{FPvyiY`j=f%{ycYeSYZr~e?es!taq8;A^Bk!=WQFRyoKhmCi zEMlc33Uw9KIsD@}Oo!nRhrIJ)Rjk`w8jXlt)em2?TtZKI^#YUB>>7E@8w0dLirh7k zKOZeQs1?`>tQ9X+d_tl@=}g@1@b%Ti?DY5a!l#|&-ofja_Vo3U)}03OEC&XOjOQ<- z(&s}&DdzC<`I<-4BL$r>;d z+5b^`PY*b1?h?yv?E?-Im1U#IN9LJX7AaKm0XTkckpA6TN+SIYKT7bSZm`WWk1VEM z5?GX|O|RcicOo!p~Sc}i&@>S}GD zT-^+mAV*3~*e7u7%ui~&iN4r=OYPToRT1NjIdo?)))T)QDaPeeb%f)_M$d_#CB$vp z8Md4fPZD|HVi|oD;{0EH5XsM}1}mnASqmBuK~9#%u390VC^r}yY_%4={VhqRfAW9Y{r>knr;XFrd)hHAub~~&{*HD`i?nfL znZ7$I_7mW+naS2d!uU+ zBrG?OldVfYe_zILmtrc22lKCo2jrT_oRCU^#B2w-aG=bgzrLGHMu$9BOGmzMTi+UE z?Eg32I)9{)1=lkth9yEb+K6)85kZ!-^~9T9ZqO$f^|YF{T64U0BOFUBd{Qx=t<4M8 zrPJo25s`|L)Io9)D=Q^f1h ziaw&0LE;{S4iLkn2YsT_L&RZ>=OxR>LBgh@$D?qXN(v?KP4P-2-Y2?o&Yi27SX34{ z$#>-w!&V!HI$u44(rVPMS8G(Q<0U0_`!>VtXx;JNN(ZTwtGQ;`q4mQa|zu`$~KaZkDS ztWo1wH&BY-rDyvRq~3|#&#fy1DBFHQ(Y_h-*mttCv1NXw!&mc z4t2)92Rz$WO#aYA{dViiNx!Un;5((#uLkS9g=wmrg2kz@v0%N+xs*Y8s>h_*r7;NC zUpzc8#Eklhvn)A#4~~G%cj0q0H6yT5vdr(n?lD+dw2}3s-kyW(y+jQ+so_Bpa~ROpR*ci5aCeL)p%l?Ks&Z0Cc1 zAGulNUp)}*eH>W`bP2yM3j|idWo4IF+r---S}D3{N(k$JZyw*htKJW5kBA_vj|Mk3CP@BYsujYvr!NgA|f`vUwjVN8HbN*2sy& zZG=g1a&qWsJ<$==&w3MGOh#R}qEE275i6lAp~tr6AT#tl&E2;X60?-5qeQ8I26o1s zH~;)U{-^K#$mS5^bJR)D(=3P?+%OFN&yHt(G@yW=bV0bp=6rD5cz&u=;xlpBsPOFm zSS#t%kXkS9+DmvAZn!2N93Z|^YHyM_hDeS<%cK6rK{DC7VS?==mFR?dT&oA#H^R|Wy^^yAyrV3q(7$99~Zg;Ny z>>;toj=ot@iFlyapa&z^*YLTZMKqRRu&lTynua{x)ogBl7ZG3KVM;$0GmmcMp%!5Gs zUZ%rmo)Nl_C2T5-?}>xj%aMm(zJw>R`Rg9Zk8p4wozqzdd|tFY`_1nD2`Y{(l`&>V zU*>1;77dM&$B?*V+g-6-68KV6sWjez=nSV9Gz^x4aePy4N+{~Hh+3DL-fjnm^A^XF zgxf&=-tY$B<=v2?KEud_cut4g^PAb;B40)`O=5*(4|ucdRZMS2-F6wxKzYOk4fDgy zk?2AAzIXlNt>!^6S@Y}g#UF#fGO6TLw-$9RbOZBNTcgeo({uB?QpgwBAKF2AiTs)q zW3TxQ#^Io5lcxyAT~ms2Zsz0QomkMlN(_(p9(AuoefIG64!f?uMZJ^fsL(u!J`rDG zAmmC1SjgH+7HjTrM9((;9Ps>F2_fdS^~5~VK`5L99eYANf1iUQu6^m!`uy_pM07yDDM`g3{@KwnYvxGnODY3KbN|DGqb*JuCwD6A^Z{{R#CN>?^k&^p^kZy)vacR}F&$<%&mOy2NvO}09Q4~8iQG+!sqDTE zA|zv^Lb-steupdEBo0x?!N>h`8k`hTAuu~MYlyg?uZlrRjje>^nsQ=o9qQ(8>u0@i ztB9zDryUfHHUvSViy!Z&R)T%(v$Kr>{(|0QIP|zOO96Ct&(@PZSRR@XML}${YCCCFj!s+;PoI<4dlCsV2 zA@5_JUahO&vyY2Nb7 zr%~73$;X7@bsuqjY5we1;1Kc$=-=4Pj$pnYlQa5dgcRME>5tewL;^G_ojbMrh;e#> zh1*ao@%v)tdX*LT@uh{2b-PMIXsg^_wZo_f=+!?R>6t{nUHg&9rcp`!-nT3{YhOp? zRw^}DcAy_?rK|&$6Y+m1_J?iw76LY;)kZt17Q%zi9T4kjfXF?a?7v=Qg5va7|Dk75 zq)FG7TPrt}M1Ek2HKlt=Mq@*s7^Hm!y3{&BsprU>R(p3U`!;~XW4LI2Kb0f|3f!DV zJe8eJCX-mwCo-z7_v?Lc9BEpeIk%yx0OB^ho{FexhDzSNx0Xw>FCvz1*x!q~$hKWR z`{KI5(Lq))7W?ECEl<~{;Qv>``=}n{7yTf8C}@k;7W8WhMFZGFgU}VaZPi)y>r&<| zS^H}UaX=RzrftRg&UMTBYU_|e@Dp}8%ZU75al^LDwWznV`sRe!4Cgqk+WE4+8ufk; zHa}!7t{#J=CF=JQ?~VfFPYuq7Sk&|I_unubIRKK**TPQ&6N{WJ#s z4;nQ(H!+|;{Jm1a9m_sKr1faD-^(0O+BJ3sXNp$@V26Cfp#`s4`<_Wi#Fg+`+eGJCbt-uF(ZOH(bf zFKD0zMA;2I*WX3`37Lv-Ud69TXuvJ*O%tU=%XK7UKn;1RFQ-RKR<#rFh>yY3tzBg3 zr>HeoS2uCIY3nzg+)bu0scodBbP_*h8+&J^R-&w+${=fmzCM*%Ws@t42pyf{P~$#* zA|=JS@Zd!`ES_PPk4x(S$pveBiRJz9Z02BAVB-ix1w;um%OUQEN1SyD^1<|TkIoO% zA-~6BnXiuf00=H7oabCkg~J2TzYp(^y*C5>zaj5;|D)9WK+iTX8=mt{)64=prpL}D zS3eW#(VfgS-&)9{y1L~e@mR0S+r_X{fl4k28W~r#4-kHV@ScZ`10>WzMzmkHpEycf za2ge-kc8yoEq+H3uj^wJ+vJ~19P+fu_pb$@`{HQHCiF*pxwrI-0rFSq)bhq9H`Icb z(9pq?kdJ-&mYnL0c49%V*#5GvpLl00IRxYmlkL|fAMRxyBl_buwqkjsM3nx^J8{eh z=FMv(9TxhCLvC{ArAf@2ii530?iP~F+AfbV?-F1Q$}n+t`3zQ}ecJ7+i6l@u`1%$X z?8|Oz*j#$7jwEp3ID8rV2ca$o64xljq(XC@JpUnG(yrQN?U-Bv+aosw*pAiWy0nJ= z;Jq}U6zQiw7f2$4LMoIhWsVr4Ny-Alv(fn0(2H%PKG{>1D@k% zo%JN22)-7Pd_w&~0@ps@-x6O+5{^q)<|d>Qv+HT`+6o`Qf?JBu4t)(9J!Cmtr&~c7 zI<9{}J@J5usr(3+ZU|}kp+A7USpi9QmsFE}{2gm9HSOz#rRn!_F*wb0d<8dy_M%j zAtQ>}G0|}h9Bp$=s1jo!BrIo>Jvt1bjZSWNcn6^Caki=i=HqpfG55@zQ6Ke~*-qhu zJ-}RGjJ@qn(0g+G$IjwL2;GzDC2w5`)Q^2Uo4vlk^x-Z~g{&GdmSB0nR@eZ;0zb@G zP9YxnoQBwy;0{Reo~_+-s0|9QhQD$7-UR3N73m}#Lfr7&*;7Le6p%l5cX~V z_3!Kd6KV64HvaeV-{XJ3PCKSW+WEBj_c-mC7F}k4t*fxaehW$aSt5Y>O1N5f?C(mL zR=YE}H`j&)ZW3Sj@x^DtA1s>Ua;2UKx;%_1JKjc|S6_X>w4saSF14#KLR`GJ=$!0D z&2F+Y`CUJAO$X68<-NK=u7$MU(Gq(tfPF3R6c&}*e4^tz5?e$U27)7Wr`WKcVtlDT zz5N~fKghOb$vM2A9rIkPgqu=f+As9X2W7g0$C>m4Um z3-m9$+xa4YV;?!ysA3hf9P1cn5e4@#fALP6J(en+Oax~eF7uozgPVi3URR>JkvI2o zHC+$|62?Q=T|ZPqrh53?mbgM9x_7Ss$piE?OTFXoxtmH(3HypgZyX_X)REX2+c9FN zdBm}9?--e^6W!$*F-!uZ#YGni`-z!Xkk^OB4x-$@=Tze)>XCcgSgZXm9|ENpyxS9t zpzy$%K>j1BN3N1iKTJg)c1km^*jm&j5!!x5l@;@>L4`HNi&+2Dpo_^7enuz_AN6FR z9Ml9lSKmo)03BZUAyxY{XkxP3yvHn^C?}99e#8%_4B9`?JCcw3M{ZZjD_;_dB^R@O zMFFSH$3vsV6b3rhor2}%NjN#9`^3*ko)fu2Vwts zz1_Wf5VI=W<7eIk^yTI6`32CwDdMh`@A5XdZ&=;zqK5kE$GPbSpCQi~LDrr%-H^9j z^_d9;eM7%*WuZ?%-pwlJ?eeAnoZl+W%5j>z{@MT1<|%DH)8gObv}0QQ`@F@ir`Pu3 z`X97ynZZ-6fAVPFY1QNyhV->>z8vJKhKh!c!Pgu`;Bac~3biwlU^pwh`)X_(PueUm(Lp>I ziz+Mvn@PfD)#m*UHH0-n)yqu_@oL-4#7`ZHg`8(wI$GT8KU`)%UZEgQxabv7-YM0P%xiRO zt;U{{$=kUZ!t>Rz@4&90>khr3Yi?mCgM3b%oaJ_mXX`+ay2s*UbslkB+af%x*G_We z^0o5f`U&e< zm|c1)YF=1K=yb@af=mhI&TORa&?^Q9JFeSah@U8#<<^cAt0Gnx#&xq4>q+Kr)!#38 z>qzYJr09v)B_uM;ZmZSIJ>vbc=kxEjN|38M`MJ;;asSu7dgcP7ffALreMNaLsgTll zGcqeC9xMmz=FjDmz)MrFcPd+Bot7`B9rej{K69#H>&{01Tcg%}3h9J8n$~nmyO_i# z9+AKL>J!O4QuOxf)3?MSrbq77Vm{Du4Hp`Tpmas4m6g+DtV6|N2L3f}96cuq-{bI%|2 zz^a^EZQ+riCELd)P|jfA)Ise4_9gG` zhs;nQ_*9td>_)_`xVBwyX+fX5a}P&fwf}S9pLYHKTaorV&|(Su;riT_sB13wTxbpU z*IM;F8*WE;fEB%Z;YF5iNUF*I&2ps{GHvf}{xltpIOdm&`OW!c%f2EL8QfngGy9}@ z^R^I=4~E_~YdVO-^U8brHAFZL8%_A183tNVJ_+GhsbHr6&hCe|q`0 zCDs)X9fQQyWz!d7G_`L3=2D<>Xu_vv-w+JO?OAF#`sX=G zUA^<|!(S8d^L4d}n!q@86+ZfT)@c-WzS2#L|2YE1e8Ez3x})HpoONVdb}vM}@L4T_ z>vZ5zMQ+p7Y~mn5bvMjvAg!ldf{OCHiG$vFO4`{z!ZstfFlJ_3fSM zzivo=)d*^=sTS#)nIu>9hU?0!xc_gk=s1$!M<)3$-1d1rL`L5<-d@=@LN47obmN%Q z2;q;lJfEIENW3ndxp^TH{g9259v11f5W1(ni)D9m3G3jlBlY*ofxkfc$D^yCAtBjg zJ!@hzp>nEoYfK@YQs8L%)bDy?%;t4{;s`zm_FP`I=ul2FHM`l{rQ?akCN}AhrpWWV z5XOG7xf09{y>ITyw-nCSYZb*{B(3VE<8T~dV*rg zsqazO$7J8ii|0mR%VDQ26Ab91!bTC4z10gFM);QXsrVU~c5GUyin;(LGU5$o=wD*+Xs6Le?Ix&b z3fNMi-vmV9@mcpf=nwIB?e2o6E_m?4f1*tR^<}3D?*t4{f$gnAY5?L5qieE?6gOet z!iPb>8TI*mb>~c6zx=z80qwf~j!!wnNI$qn#rR3r@0b;We_U(xKtJ^Bx z)6+5zdwKW2lVruZ=YmRlz-$|^F*F*}yP!VYU?6v}ats(UF1w^plqQNFlNMM$!f7$4k`n?%=16!lVH)|#~u){`54TT#-!*ZvIfxfFU^obyhq=!O~r@z{X5?K@9*W+7J9o7#^73l zkj6XA_l{&b#11)=NgOtc)0r=*0Y`_x+Vd z#6O5|%=9vbkWj6Ph0k@EFYWzuxW5tcKH3o>9DV4UJGvyrg0%*y(u^h?ZF!{RtSCo9 zE<(}Qz7}XAePp!qU7b(dAkpeSsWbLqn2fGZir7LKBHAU5@s`L(;xXmXmAsDq(=9F9 zvMTkY&~&JICG{myn0Pf)@U#{JAN3saNiGDdM9UWsRMQB*na|)!^(vxoygNy(tB&xr zex02?S4+MH6)rs)T1*7bJbT(G_|_k8p0r&$*=|*w(VjNvYV`khq|(xZ@vAa25x^eP3cLVmfu zw0%qzFPAQeN}nSRyM~@cyJbVbrJ5`3{k33yIq$6FB;sgFN=}rfBA?dcj$Pzn7wi`e zEZe#ibwKm#oSh^3;N7v=BpdW?aFDlXTrGln>|vEUy!=D(O+nW5H1fRoiVxc~kMx0b zxsu*m^a;5{LPAOz`$5(S&v`V>6HlseT zkAuD?)&ZDPyIgkN8w9uBpK~Lq+oh?;puC$G@yI_O7OJEFUAey3_{4eKzsNAxoxWEK z9@>h&UI%NSL@{APEWRGB~)EwtzsLr#8a%3ZD=xaa?Qwyb;ru9;evGROW|_i5jU7Bh_4mMXqMe8(?T zEX01UvcWEqTknTqL)!E00*JFPc7G7OupIe7%U{ogF_%D~*v}OW4L}UpyCqo*$4}M)gKxUXCm>>01N&1`3rJ6UD!2VFIpiDiXl1Z}led66g;!xUn*1mIqXhnRkS^V5by#041oKM5L>Ap;D zu^&~$n`>+;uR9tlS;aQ%H8q3Ze&tmz1=S!p|73Hu>jPq;`m5rmMJW+{ENyALzLp4? z27LVHhV_4r_^>hCV&cFvJul=dNE{X}d|h+50(CnAZ!Vrd9XF@rn<=Xj3AM~5U0~@K zGP!*vTi&q^26ONf6LoEzN=L;n4* zwb5TuNn1Hs6#W!`FI&g|M7tYeE_fZjJw<`Gwzt=oEa?UQWAk)oR(;@g+*|aAXg_!! zV1Dz|eGu$#?|o*Aykln3gW?GRn13qBJ2FVR1>+84o~xr zCZexCW#Lb0BOaVpGjiOWM0ADzlqY>BDctkBBl}q!;%ZKI{X)LbBZjyejt-bVJERE6 zi+&`TXYVa5n~Oz%(Z`#1gw%r9NR3F{wjQYcICOQu><{n%@9W;H$t7OTj<^X4x`j6( znD^HmIqh-iPu`Stz)gv*b{w{Ql>8jzp@5xyr-e<^CuqOEVS=|Yk9+G6~lnp-p#5t`%f5dfwsIl&1{$M#khU^Qzwb}F&yEslwqj(CjyAqVA^05i| zLd)v-1q+C`*D1+L!#wCyV+2m64xqcH!QOUp7@W5H2&tj|2+vn-{tbKH106?z*pe*F zdrXz3Qi?k9Ik5Q%|KUC&18W;&yr^V*fX2C4c`A|lwD&okZ7*RmdizK?qMf)gY>{>= zDko;gYc|J-ya$#$pM=(~Z2_KV(rN1JQD^+*>o`8qI6sVY~I z`2zfu=rfF!Utr_V)Rk%bdU#kc+#|fD8`AmmeqOoX3stUfGF6Ny5V=O~)~1kdxHRP3 zu7`E@DBGIPKd{fkSAUi4Fv5E8;jl-5IKG+^E`Qb!V{o|6_0}r)F~}6?R@;U5!w=r} z-E-T9;OXU3AC@ZQ(KbvPmA^!P-l-wjiux0h;Fa1pDoBQ+blz%}F0CQfkF75|8Lj`zMSSg?^|88xR9mYtx zb!yn}9DaWCdggzT-~N65f9C`LH1GfEJlgrR7^@T}mZtgVcT>7o&*x71VOO-LzdH8e zYYUiiD9=#$yS}VD{Vn?AdCy()Ysn=R>r;n53E?^sHKB3s7UBR+AB?nRw-E~opWIUU zPNJipy>9g#^aWaQ?qIaSeF4*q;_XMxq&CPltXR30@GKj=Z!=m*SewEEI1d~~eT#;y zvunP9)MAm|i#)`ITD&Z@R_}$4CbP>g?;1uOz{TS)OwhNp~7-gxF{t4_|8em!~G<>>rnhEod202Ty6c)h{MURDG#abCp)_(olc;R zsl(&lP8+jO?}V(F@vldGJpGoN0TLgAv^Z#X zy%ZF)YvfM_V81XukLy`e6EXhY^Rz3NLb^Weq=Y@|Bi~Lwa~W&wBWg2`Zk>-ry;RBI zdJ7TMM-~h_ULuV1SC^ysGvg~LYF#-v;Z_H=h1OrZH3Gp`_YeZ`PQoK9k@^AuG7seYJ~j{DqICn4^N?I-!{Zmp)Y2Pw5~7mGxlWJ zWC$Z}uULEZ7wT>B?AlzpYWFy(^TlsnV=@YnY{UF{o`b+)R5*Vh{r^^-x^8%-dI;3d zYG{qLjKV73BVXA@5!e5m`&-GEQMj5SK=;iZd3;N@sJihYp4F8(I$iD0{rva*p&dK! zejc1#I{{wzeW>Ed4;FQ_z3J&?DOow&!Nf{YF0%n~i&^y7hzq_PnCaOWJk*{}p@Vb_6nGzwGb#KfTduUFb^m zL0Z0|YjSxHXt}?CEG>)A`O&@lDHExPd)RBOc{zi`3Nwu9HCGT0LEeC$;f;hEH?&n< z0`a{nt!ce>oUno$DICOT*Ci?)Np!2?V3)BAvFjZb=5F#6c2e{0(DbO^*>uCuc2N1Tt4 zbW=J9>NgynUPzVwgP)*1_eK?Ze~2C*gE7{~xCE>l54}laFUEX%RJ{G{=)-uTJR3CW zkyl5ID)Tg@Bf5z%EW5|Uhq@jP`{gWOU>J;DgmNaZrs2^SLZ`C6FXmhnT%d02xEgLEOIB=+SPZ(I&@Mttwg-GEl{UZNE!RKI(ymk1e9eM@p18FU);$6l)B+LRWNgl&}_<|lnYpMLgu!=75mSkhJ=kksK@u;ST|7m(K@VoIS~CUO3zD&Qb)loujOV{-Ut|!X!(v^9|4&))%uIb z2idTYpl`cn6ru!fct)F#!Bw?FuC;!n5TwHR_Vuz6sMs)@oO65_{Y@rq?x8+E|BZS> zNzOm@b=v%*-6yo;ielM)Cr^&U%z?FA3F-@WyK3It)P}yKTE$JxI;h`Q6TUJP^)Yvh zzwi0-g$nLxl7_W4(IPmY1p-uY@m zomtZ6Wz$6hQy*@beBMV`r&Z^~><5TL(=~Nff_(pvt{)v&43I~$Cl1}1?2EkuTok^{ISGxGG8n_gum1$Xp{gahzwy9#>aWqQ7?CGUOFk=^u zVb+ZVKi*?*$JHuGq%i$N6ZTE{)Jv2kPE$zPmf`2IXL^WlsXwRAPt;FjxqH9G4*5U% z;S4q3QMa?}ve@$|XW(x&tL|~FgQTA(Y2t`Glj2<-vD7pPN>qMY=|p}aa;pcmL*CUA zk0wFh$!!fpaJtTyjlPx`{RTR15aPO4EETIel1NzZ+P6Gk z_(J3^I|NPh))J|P4LXlCkw3WHpuf2*pLjdhlul(N!p0339aaX^!{^Yk8&Tt^lQ|r| z#$FV42lIq`B2+@}#ocBG>15bE;$8{u$QK7Op=vTJnAaI|5^OU{~c?q)3%4PcpVegc%k(BKq7*2`Y zoRJ>}zj&6sF!cHGyEXetcmVl|5^woyb-hH_O&!55XxOqDfYC{2Ylo!h19&abqlk^R>8(YY9(la%U=ZH_! zAL+c|@tKI0S2=Fa4<(_+3w%#BN}&BytM=i@b`bokIX8g&q)G0FPi#>q&0xECQGqV{ zH63DC*|mBER)@8&{Jd-oo?iSk;1@Fvp~pDF>h9WczEwQXhd70OS-eLF2(xU1 z$OZL5l9bSLsr1+Yaa5k|Rd(wmnR!~AeD{#gaPg3cQGP9PIA8Vs$>%uYk!`;m!dCIHTTic9Mb-1%pnx<2YwZz8hI5R zq|nvKXHElsleQl$@7L`l{QD(Xb}k~mdVTJ~^<%|^wRAmo5pjAd7FuRDp{;N;r_8hl z>kTPt)o0R~^TEo^nCAxW%bW#c_>!+94=A%l#|iVS63JGIp;tATyCeL>e`P+QY`Ilk zqmTrAnsE=(8f!o>rg-mcW8xrwsOD0W>EuYpX6_Q%I!pSDyd}1d5{6|Sy5K&(K z!?5La5ghS7^X%KFHmLCZbSfG7Rmr@(JGUpHe*EFOZ`bZ2k5u69*|MG3mksw_5wH*Y zc3#fE*_BcMPD9eZ%norq*Q2xI`w;J2(UrU>nhKZgCUdM720+c~o~1wXZ^~}Ttb2z# zUF)vYAGMFfK7Cex{5f^xr5G4A&LBU}Yl3Q(lz{z#s9kIHU57zb=FrL(z8`{>yb=%ZJ-{Xu33YEGmyJ~)ZEBa;Yy z`}X|F`4+>3KkVo%X)8+h5 zr!1G;Y9)f1pL!;Q+KH&nn+3-J)U(h_jZz+JCPLZVx}M{8MEU@syM3f1u$xLOmVRe>v89hF_OJ4dXh0p4 z7fn`FPQ)?Ve}}s_qDZv(aCaShHE1@5az>%9<)UsyhRy->r@SGs_v$(dNG+MDy~=<* za@&w5!TV7p#gy&qgIzVGaJTQGReQj>#X2}0{JoHdQ zsF(!Oj}K-3j)CnAZ{!(<>VZwhPFP=}14PAkXGmI?fKKW`mBJHoL{T&TepYNLp$i!} zB)anD+YrRt)^jb2|xj%Y@vkKB%*x&SECx;zS-P8RjicjV`9lR%v3Z`VUj1u0Zmr|V=p z5!4S>KG(9Q(y5@G=SDTS-3f+I3aWjBb3kfnP(w|*7-4Z^xTWjq?_i%^nq z*YPOoaEc1qk4qpw-SXq}om>wgGV@DG3hJq6E-`mAx`aHf@J23aS@ao@W=Ixl&Lx?5 zrIqUUB!jo@?$aF-jj)mB$9*Qu_w44($4<+3!9~a3(5Ch_xcw&V<-$JnPbix`y>n$Z z>O##4KAP=@Gb*b;{x;}^x6#TgEu(uu?qEho?Fz&_Flo5oOsBx)B##umP!9;C>MeCa z9cE3w=EWEC12EXN@Fw%!5L`$dlIigi;;Aa*8UcH59#ap*`L><1SG?VUKH8bW z^9C#vfcVsCYwiEMp3>&g-}8nxPK&g8CgquhTq@KJS5&XOjXD_)gO>7j&7*KiSAc6# zYZR82?w0dGp25>S!Eq#i9HcHfGrbRcyPp5{{Qs}#ncQOi zRkaiS*zP!M*PUyJT`i@5`R;1FVRP?Ehd$i+RTNo(` ztub72z7X?I6=9SHBBJcS+QL!ihdTYj`S?lH`&E?Tq>MKc{+q|%xtvD5%T4(#k%@K0 zgT9lS?PWO;To`2e%#FNr=S|%=`NH7auSb(I*7?9@JkFc23-Knm?gjH-Zvgsa>vJEw zT4DGbFOzsjCy1ZCY;|0p0$ZXy3uBO1apX4K@NGrhV&0tb$=P<0#E1F#*9OoMY;j(N z`L|lgZ40T?co?mnE3NcTCLVhZ$r@b#Line7qLn0?35SMT;-ebmFIeuot=-s5su=bC zme2GNv&r|ztYrE~;8a@e6L$)s`rdeceqSreZDr+-(XSw8(G|bAQ;(CT>t!d;WLJW% z@h#2;hYrXslO9m{-UbYZl2^7`pwF)A9;&J}?z_z{7bS&lMxU@CtzXu;#E6}rzdKpL|8mgjj8rAuql7UOZbjd&kl@og z_<1E|-9_(L=R?AN&nr8Rp)Z>fN5~4FBBJHSP%&#-PI909(%bR_b(^EE%5BL=CyL8= z?hjPUhN@quKN;_Cf`eCee%#;P1L=Aa5~Iga$5uU#`Yx~s*!3+wt+_%)Ki;h3aqkhJ zSlcibqo2&*_havNx}_856R<^MR?4Uw*UyOgA5wfnu>H*oPn+FCkiLsD(f@D|rb8R- zGA|6l4+F;ii=Y3j^FROR4gbvBr&~O-bnc9R!db4B>Z^y)&skPOtaA`D#e+-Py$~nh zy#HO|s!=FjFynT;JOM9GZrSgqGY**-jpv41N5Io(|I$?DQ7GNB=8{y>zh5tHUjIE$ zM%Bmm(7zamohO>_WboWCZj*U%in)TJO6(xQjUfTmC1C&j?)^R zp<5}alU_ftbAK;HZ`<$RF-wKtDL=a%dvG77aOK*%DAc>*Qg7$mR}F?0xVI#J%UVcn&&*5lLHAVrl%WJ8$r5Ir(SmDg&*pK+B zXW8zCztx7J%guqq9{m*-$E3_%+s1$;p+Jcziwb%&;qQByDBzW&(w9)s4)M>2k}vH- zy+*O=H3uFwLv*rqq@QR5RLES|zoa9QNIlr{+5>s|{JXdFWe0Q->NP=DhqNBTmd*7@ z<5e&5j@@;eA*hE)nVtMR`k{-2zH*88xq^IPOP$5@#h*!}gdl&~-4KW@itovftc6u6 zF|8?Hh;yYr6o^Yiy*k^P{r9l0Dmj_9?D&sH5RwucxKvpV#=#O>U%e;={SUowBv|r* zay#~p+Q}!FCnTElb|;a9HH42zJAzo5N9Vn3Ndx0Uwbx}d>L795Z-J3bn16?>x;5DJ zgHC}_h+RM(@KoO52-q46l)THYPp0D9^`kP!uc(kvo5%S!pFzGpsET#Q2aw2bmxc#U z6#!r9-hkBxjWD-L7-SA~Lhu}a`MP!Jqm=C9;pNu@7tghE`WX(uqM>UiL+KC{2XVfM z)5bbk&~$oH9?nNu*_q(7VbHnemYKHm4-Supzx$_o5%8+jGS6=es0_KWhkXViE>L&w zSQ{0>gIJe;*fj{VRo5%x*CP&eL#@$4Z{+z=&Zv6TVO@ub(mFZ#XWvKr99pE!6WY9> zef~d1+P;zYciorI$tpjafMKdt-Ic|05Kn&s{=MU%P_8aEx_JTuMq_iS6XT$EhGSt> z6+U-Tcd~k{{WDK#pG&(Q8XtxB`LuD`F)h-@X@5@}rybL-k2X$=v~k*bwD?cswAcTR zeP5<74NM|Gi|--Rr`}GWoS2uioTh;H%7(2q$g90zXT)QOItdx4+n%iz=>z4nT(9ja z5a(vn*OT)-A2LPCe;7E&5uwXr?VdU%#QC>Js`SBHV)a{GCBGKyUG!%~R{_=udKtg= z$~BPq^EV{C4NynZ=hShV%1@-JZIS8zuY1JL+4a$(9|gb^Q?l1kss#l1DW(d)?S)`1 z!zXH=M&R&B{ly0AFvKh>&st7V;bAg+?>1?I!$z=2`k`4>jNbiE` zyT8`Glt=!>+Jx<~H``(K!TGcUs5hAK_IQG>?^EKPBJX)z@-tzzUKQ5hfc>+rEh%Fi ztz=rOPb@m6lUVrfmE)@IBK#E}UMHd65s&Z#V`fS-3FmG$tq!avhL81kZvB)*LItj7 zCbSlTi(tv^XNp}g8`Cr&FHHrOA0~qS?5HCjeAH&kN95f;NYvJ4L7(1IcKXd?Z}2+h zRu0m+5>eaoGzL!e&2*1?{AeC|O4b*2KU94nlmWt}M@=Gy^S$T9Uxov#Mz@_o3hMY% zq|Th#(ggb*#RHOZS|PVEn6u+xD_nE3y<~W!7K9ET{URq=i26-6CBF}60MGgDUkV=p z2%7%N>3m%UiO$A$YyXAITjKdCaAXj6h{XtT;r+b-_qjGT^*^}Fs?f`Mn-|7G_q~T@ ziNXlzJ8ZIz***xdVfB8Gg$JN!OLMpn_Pet`ryJ18;Bhj2;!Zl$OU_nC&F!((?>6XJNfHZ6NlbZ`*DeOjkvI0ybuZPy+R<+{d6sa=sG)291W z_C&UFuk?}Z#Fh+giWGJimqeuGUXamoEMGX^tLHpQln z^PIERx2!d-PV207&R@@ZzwdoN-+bQZy*LWRXeZVp59yP!X`6FJFyi}a{ljM> z2;FUE8+QH6afED7TJrtH%jETM*{`)bD955t{?-Y;NqQ^WP9P3W*i> z<>P)ZF4ODpeki#!w=Elaxz`6(iWaJ2TrKOGi`w{~(4B0lwJ9Kxm>5Y;ti4)>{Z&Kq z)GxI}M5<0yq9W=9GAD%(OfV1N^Wnf&b&O9bn`#(#)Dn7vGVMv^oy(wHx$3hpnUL6i zrBzodAKH#?I^l4r0a}usk240kL2&uH&*CuD`O7;}BOBiZ(bJ3lW^!6V2M%0`&1ph@ z^@&s`oX-Pk-gG@lHoShwE=wJ$2mecK{Y={ih*Ng)$gXIHSz+_S>2?iJ$1c&1c0oR; zF2^O&_BSDVewFU^>9>gtU4H)`kx@Y7B&^a{g8YY1dREH#za%8&JaaV{RuFmfM9ojA zEFxp>gZoa5e8M{|*m~ZkCqTJwGvS;02J_HQzhrFefbyK#$~|(O5Yrc_o#NXD3d%9z z<<(6fvE*j?cJ*2?KCDj39u*${4wO$eVzq338vXgpX!=1XWq*q*^Mb;Elo1^ZT%dw14M_WL# zdGn22`4+I8Lz6KUw1L&dOug!%R!~pf-1vDD?w>A8+N@4Mz1^MfuhcQ|yl1Vh#Hcjl zS}4P5Pr`d4QPn&!q8a1BjZLC6d$`beEcz~2gz*?})2=bcDSu;L;uf#HCIZojnLw5z zgnhS%U$`M3dv|t$PCbqrW*oSyi8}M-d?nsL4bNPcU)dyrS)O++M>h+gZscsP3gSA? z{=@1+Pz}Zvjwv*28sqaSEPJ1SK>fTKsj^z<`XP1jmM!Bm;z0yyqj%F#*W*CA`o4xQ z{YU)ok>#)Xfwv;q>@mdoKa78DXW`U1NVfZyrJcMYXO;gB_nB_qpHwifjvg_u9M1*5 z)$YH&|F!S=t>=jUZn9h@udpoE2z53yeCwVgU-4@GP^Km3N7>1g-y3(t_pjtX=ixbN zjQ`dmw?sS_l(lsGwX_ZdZ_OBzcaSHhSnA_<3duy1-OQ_6FPMZT&upeN^4>+n=&dnm zs3#;WCcD}OFkUE8RH`wXP2?~SZRx{&T0P-~&0ecmM3a%dQ`5d@gg`sgx#L12NDOP!%7 z>xKulQOD?U^lGokb})OUW%fr$2LwK5#J_yl4lzXBe7-ju6r}&UM4=&WouxI}85lvB zx~%X#^=lpxVR_<&!A;zMKWJ;obH{bb&fMR&1XmDrH)Wu@?$Qr0nbaBXKw zY)c#R3fj(&S{&L1#_e{cjd2{<(N3*iw*>XTX1y^zpwbAz#4>0PGzW3K+e#@p`_Yg8XbbZ?iaeAI3!c9*>ePm{*k@y& z-&k<)C0`3Z%yzz%G`^1ql+q2s{y+1e)j<@db`|5n5|ZpnDi;ijvfkPx^I#~$UUQ6( zdJobc?pI*m9NCY>`#*V|^cT{WNfpjD$9#ZGqoxMS5ohdI<>INIC4#zA-Fxl0-?(z$ z;DgC>#1+1mR-2hm*uSH{9N)ciB;a* z#D3cN5XKC~YJVrx z(aIlAxae3-G}%7P^jKGg`{<~xJ?ELIs~!E+VOBQLWc9vvd=zzZPt|MQbT5Y^GAj=y zR3Tsd<_%9K_x8Zkfs_{PPUAM$3?gGxNFR3DJ{ ztJLX+CyH|uDv*Ei!m{?K(OisoSe;zx|zQ4;U_D7O1b!Rl20$(-{k-UYkD9c`mo2J7O4B^dohxy$AJ+xZ^zg$@@_MA`Pb(4 zL+`fUbU#HN{2}s?FFb}kXV!CE4T3qaL2IDmv>g}NCPBK3jrw52cGs)U^gg(J;%H-N zA`cKwu6A~)4_-^@w@SMq9_Zg4Z%Vhf^krJmIwcXLq^5d!As?sV{9RsrBRpqVP-Y=b zMO@9BZrSg-Jm_{%?}W#p{t}@;+%x+Vx}@xW6FF|8E;8Ei47GWF_#mYw19G@%S01?trN zI>LZZ)j0XXj;9FvyzI=Ybi|YBNKp8PFz&pc^C_dEh{%|@-(#SXPiR_El`fP%LVi7- z#XtC|;3T)iSXWpM){n;(-)pr(l~qE%JRkX3gk#U#DFWDUSs5RLdHqKieT_bd>vfW! zRT_>wh#nIs!_81P%_A=0h;<3>Uulx6d zh>Ti_xT0q)L5mLZ3kmPXmM`eXdD`OYFZEmTJnoHIh05YexS6L|PO(RQ8+o6CDmEc5yj#HsbC)fc^)!l97=PqJT-mb`xP)b|Nv zQG*a1&*Il2pMBiVUa=3=kpJR^yPoa=#9c)_KOc6d6Vl5geL`v7P%K|rdmeRgj5STH zugju-%Xmz4Yc`_l)Z#8Gv*N{Rp`s{2eVHg7R}^T3VO)K#Rfww> z51wvbqxw3Y3pLbH${?Eu^!+jSwswD+4<`Oy$?q%fCx4on8@SFA!bsNmiqX9SFxA+z zguuLU@p|HA@_D2s@Bg^TZ=2?;DZdoIPu3H+WW7gj{f^r(j*@;OXxG;NuwNnTlUDqB I|LgKU0qT%Z00000 literal 0 HcmV?d00001 diff --git a/train_alae.py b/train_alae.py index 9f1f30d2..2866479b 100644 --- a/train_alae.py +++ b/train_alae.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== - +import json import torch.utils.data from torchvision.utils import save_image from net import * @@ -21,7 +21,7 @@ from checkpointer import Checkpointer from scheduler import ComboMultiStepLR from custom_adam import LREQAdam -from dataloader import * +from dataloader_ecog import * from tqdm import tqdm from dlutils.pytorch import count_parameters import dlutils.pytorch.count_parameters as count_param_override @@ -32,10 +32,8 @@ import lod_driver from PIL import Image - def save_sample(lod2batch, tracker, sample, samplez, x, logger, model, cfg, encoder_optimizer, decoder_optimizer): os.makedirs('results', exist_ok=True) - logger.info('\n[%d/%d] - ptime: %.2f, %s, blend: %.3f, lr: %.12f, %.12f, max mem: %f",' % ( (lod2batch.current_epoch + 1), cfg.TRAIN.TRAIN_EPOCHS, lod2batch.per_epoch_ptime, str(tracker), lod2batch.get_blend_factor(), @@ -44,8 +42,8 @@ def save_sample(lod2batch, tracker, sample, samplez, x, logger, model, cfg, enco with torch.no_grad(): model.eval() - sample = sample[:lod2batch.get_per_GPU_batch_size()] - samplez = samplez[:lod2batch.get_per_GPU_batch_size()] + # sample = sample[:lod2batch.get_per_GPU_batch_size()] + # samplez = samplez[:lod2batch.get_per_GPU_batch_size()] needed_resolution = model.decoder.layer_to_resolution[lod2batch.lod] sample_in = sample @@ -65,7 +63,10 @@ def save_sample(lod2batch, tracker, sample, samplez, x, logger, model, cfg, enco if cfg.MODEL.Z_REGRESSION: Z = model.mapping_fl(Z[:, 0]) else: - Z = Z.repeat(1, model.mapping_fl.num_layers, 1) + if cfg.MODEL.TEMPORAL_W: + Z = Z.repeat(1, model.mapping_fl.num_layers, 1,1) + else: + Z = Z.repeat(1, model.mapping_fl.num_layers, 1) rec1 = model.decoder(Z, lod2batch.lod, blend_factor, noise=False) rec2 = model.decoder(Z, lod2batch.lod, blend_factor, noise=True) @@ -77,13 +78,12 @@ def save_sample(lod2batch, tracker, sample, samplez, x, logger, model, cfg, enco Z = model.mapping_fl(samplez) g_rec = model.decoder(Z, lod2batch.lod, blend_factor, noise=True) # g_rec = F.interpolate(g_rec, sample.shape[2]) - resultsample = torch.cat([sample_in, rec1, rec2, g_rec], dim=0) @utils.async_func def save_pic(x_rec): tracker.register_means(lod2batch.current_epoch + lod2batch.iteration * 1.0 / lod2batch.get_dataset_size()) - tracker.plot() + # tracker.plot() result_sample = x_rec * 0.5 + 0.5 result_sample = result_sample.cpu() @@ -93,7 +93,8 @@ def save_pic(x_rec): lod2batch.iteration // 1000) ) print("Saved to %s" % f) - save_image(result_sample, f, nrow=min(32, lod2batch.get_per_GPU_batch_size())) + # save_image(result_sample, f, nrow=min(32, lod2batch.get_per_GPU_batch_size())) + save_image(result_sample, f, nrow=x_rec.shape[0]//4) save_pic(resultsample) @@ -111,7 +112,14 @@ def train(cfg, logger, local_rank, world_size, distributed): channels=cfg.MODEL.CHANNELS, generator=cfg.MODEL.GENERATOR, encoder=cfg.MODEL.ENCODER, - z_regression=cfg.MODEL.Z_REGRESSION + z_regression=cfg.MODEL.Z_REGRESSION, + average_w = cfg.MODEL.AVERAGE_W, + temporal_w = cfg.MODEL.TEMPORAL_W, + init_zeros = cfg.MODEL.TEMPORAL_W, + residual = cfg.MODEL.RESIDUAL, + w_classifier = cfg.MODEL.W_CLASSIFIER, + uniq_words = cfg.MODEL.UNIQ_WORDS, + attention = cfg.MODEL.ATTENTION, ) model.cuda(local_rank) model.train() @@ -128,11 +136,20 @@ def train(cfg, logger, local_rank, world_size, distributed): channels=cfg.MODEL.CHANNELS, generator=cfg.MODEL.GENERATOR, encoder=cfg.MODEL.ENCODER, - z_regression=cfg.MODEL.Z_REGRESSION) + z_regression=cfg.MODEL.Z_REGRESSION, + average_w = cfg.MODEL.AVERAGE_W, + spec_chans = cfg.DATASET.SPEC_CHANS, + temporal_w = cfg.MODEL.TEMPORAL_W, + init_zeros = cfg.MODEL.TEMPORAL_W, + residual = cfg.MODEL.RESIDUAL, + w_classifier = cfg.MODEL.W_CLASSIFIER, + uniq_words = cfg.MODEL.UNIQ_WORDS, + attention = cfg.MODEL.ATTENTION, + ) model_s.cuda(local_rank) model_s.eval() model_s.requires_grad_(False) - + print(model) if distributed: model = nn.parallel.DistributedDataParallel( model, @@ -147,12 +164,16 @@ def train(cfg, logger, local_rank, world_size, distributed): mapping_tl = model.module.mapping_tl mapping_fl = model.module.mapping_fl dlatent_avg = model.module.dlatent_avg + if cfg.MODEL.W_CLASSIFIER: + mapping_tw = model.module.mapping_tw else: decoder = model.decoder encoder = model.encoder mapping_tl = model.mapping_tl mapping_fl = model.mapping_fl dlatent_avg = model.dlatent_avg + if cfg.MODEL.W_CLASSIFIER: + mapping_tw = model.mapping_tw count_param_override.print = lambda a: logger.info(a) @@ -170,10 +191,17 @@ def train(cfg, logger, local_rank, world_size, distributed): {'params': mapping_fl.parameters()} ], lr=cfg.TRAIN.BASE_LEARNING_RATE, betas=(cfg.TRAIN.ADAM_BETA_0, cfg.TRAIN.ADAM_BETA_1), weight_decay=0) - encoder_optimizer = LREQAdam([ - {'params': encoder.parameters()}, - {'params': mapping_tl.parameters()}, - ], lr=cfg.TRAIN.BASE_LEARNING_RATE, betas=(cfg.TRAIN.ADAM_BETA_0, cfg.TRAIN.ADAM_BETA_1), weight_decay=0) + if cfg.MODEL.W_CLASSIFIER: + encoder_optimizer = LREQAdam([ + {'params': encoder.parameters()}, + {'params': mapping_tl.parameters()}, + {'params': mapping_tw.parameters()}, + ], lr=cfg.TRAIN.BASE_LEARNING_RATE, betas=(cfg.TRAIN.ADAM_BETA_0, cfg.TRAIN.ADAM_BETA_1), weight_decay=0) + else: + encoder_optimizer = LREQAdam([ + {'params': encoder.parameters()}, + {'params': mapping_tl.parameters()}, + ], lr=cfg.TRAIN.BASE_LEARNING_RATE, betas=(cfg.TRAIN.ADAM_BETA_0, cfg.TRAIN.ADAM_BETA_1), weight_decay=0) scheduler = ComboMultiStepLR(optimizers= { @@ -210,17 +238,20 @@ def train(cfg, logger, local_rank, world_size, distributed): logger=logger, save=local_rank == 0) - extra_checkpoint_data = checkpointer.load() + extra_checkpoint_data = checkpointer.load(ignore_last_checkpoint=True) logger.info("Starting from epoch: %d" % (scheduler.start_epoch())) arguments.update(extra_checkpoint_data) layer_to_resolution = decoder.layer_to_resolution - - dataset = TFRecordsDataset(cfg, logger, rank=local_rank, world_size=world_size, buffer_size_mb=1024, channels=cfg.MODEL.CHANNELS) - - rnd = np.random.RandomState(3456) - latents = rnd.randn(32, cfg.MODEL.LATENT_SPACE_SIZE) + with open('train_param.json','r') as rfile: + param = json.load(rfile) + data_param, train_param, test_param = param['Data'], param['Train'], param['Test'] + dataset = TFRecordsDataset(cfg, logger, rank=local_rank, world_size=world_size, buffer_size_mb=1024, channels=cfg.MODEL.CHANNELS,param=param) + dataset_test = TFRecordsDataset(cfg, logger, rank=local_rank, world_size=world_size, buffer_size_mb=1024, channels=cfg.MODEL.CHANNELS,train=False,param=param) + + rnd = np.random.RandomState(1234) + latents = rnd.randn(len(dataset_test.dataset), cfg.MODEL.LATENT_SPACE_SIZE) samplez = torch.tensor(latents).float().cuda() lod2batch = lod_driver.LODDriver(cfg, logger, world_size, dataset_size=len(dataset) * world_size) @@ -240,9 +271,11 @@ def train(cfg, logger, local_rank, world_size, distributed): src.append(x) sample = torch.stack(src) else: - dataset.reset(cfg.DATASET.MAX_RESOLUTION_LEVEL, 32) - sample = next(make_dataloader(cfg, logger, dataset, 32, local_rank)) - sample = (sample / 127.5 - 1.) + dataset_test.reset(cfg.DATASET.MAX_RESOLUTION_LEVEL, len(dataset_test.dataset)) + sample_dict_test = next(iter(dataset_test.iterator)) + sample_spec_test = sample_dict_test['spkr_re_batch_all'].to('cuda').float() + # sample = next(make_dataloader(cfg, logger, dataset, 32, local_rank)) + # sample = (sample / 127.5 - 1.) lod2batch.set_epoch(scheduler.start_epoch(), [encoder_optimizer, decoder_optimizer]) @@ -260,7 +293,8 @@ def train(cfg, logger, local_rank, world_size, distributed): len(dataset) * world_size)) dataset.reset(lod2batch.get_lod_power2(), lod2batch.get_per_GPU_batch_size()) - batches = make_dataloader(cfg, logger, dataset, lod2batch.get_per_GPU_batch_size(), local_rank) + + # batches = make_dataloader(cfg, logger, dataset, lod2batch.get_per_GPU_batch_size(), local_rank) scheduler.set_batch_size(lod2batch.get_batch_size(), lod2batch.lod) @@ -270,15 +304,19 @@ def train(cfg, logger, local_rank, world_size, distributed): epoch_start_time = time.time() i = 0 - for x_orig in tqdm(batches): + for sample_dict_train in tqdm(iter(dataset.iterator)): i += 1 + x_orig = sample_dict_train['spkr_re_batch_all'].to('cuda').float() + words = sample_dict_train['word_batch_all'].to('cuda').long() + words = words.view(words.shape[0]*words.shape[1]) with torch.no_grad(): - if x_orig.shape[0] != lod2batch.get_per_GPU_batch_size(): - continue - if need_permute: - x_orig = x_orig.permute(0, 3, 1, 2) - x_orig = (x_orig / 127.5 - 1.) - + # if x_orig.shape[0] != lod2batch.get_per_GPU_batch_size(): + # continue + # if need_permute: + # x_orig = x_orig.permute(0, 3, 1, 2) + # x_orig = (x_orig / 127.5 - 1.) + x_orig = F.avg_pool2d(x_orig,x_orig.shape[-1]//2**lod2batch.get_lod_power2(),x_orig.shape[-1]//2**lod2batch.get_lod_power2()) + # x_orig = F.interpolate(x_orig, [x_orig.shape[-1]//2**lod2batch.get_lod_power2(),x_orig.shape[-1]//2**lod2batch.get_lod_power2()],mode='bilinear',align_corners=False) blend_factor = lod2batch.get_blend_factor() needed_resolution = layer_to_resolution[lod2batch.lod] @@ -288,14 +326,20 @@ def train(cfg, logger, local_rank, world_size, distributed): needed_resolution_prev = layer_to_resolution[lod2batch.lod - 1] x_prev = F.avg_pool2d(x_orig, 2, 2) x_prev_2x = F.interpolate(x_prev, needed_resolution) + # x_prev_2x = F.interpolate(x_prev, needed_resolution,mode='bilinear',align_corners=False) x = x * blend_factor + x_prev_2x * (1.0 - blend_factor) x.requires_grad = True encoder_optimizer.zero_grad() - loss_d = model(x, lod2batch.lod, blend_factor, d_train=True, ae=False) - tracker.update(dict(loss_d=loss_d)) - loss_d.backward() + if cfg.MODEL.W_CLASSIFIER: + loss_d,loss_word = model(x, lod2batch.lod, blend_factor, d_train=True, ae=False,words=words) + tracker.update(dict(loss_d=loss_d,loss_word=loss_word)) + (loss_d+loss_word).backward() + else: + loss_d = model(x, lod2batch.lod, blend_factor, d_train=True, ae=False) + tracker.update(dict(loss_d=loss_d)) + loss_d.backward() encoder_optimizer.step() decoder_optimizer.zero_grad() @@ -314,7 +358,7 @@ def train(cfg, logger, local_rank, world_size, distributed): if local_rank == 0: betta = 0.5 ** (lod2batch.get_batch_size() / (10 * 1000.0)) - model_s.lerp(model, betta) + model_s.lerp(model, betta,w_classifier = cfg.MODEL.W_CLASSIFIER) epoch_end_time = time.time() per_epoch_ptime = epoch_end_time - epoch_start_time @@ -325,14 +369,14 @@ def train(cfg, logger, local_rank, world_size, distributed): if lod2batch.is_time_to_save(): checkpointer.save("model_tmp_intermediate_lod%d" % lod_for_saving_model) if lod2batch.is_time_to_report(): - save_sample(lod2batch, tracker, sample, samplez, x, logger, model_s, cfg, encoder_optimizer, + save_sample(lod2batch, tracker, sample_spec_test, samplez, x, logger, model_s, cfg, encoder_optimizer, decoder_optimizer) scheduler.step() if local_rank == 0: checkpointer.save("model_tmp_lod%d" % lod_for_saving_model) - save_sample(lod2batch, tracker, sample, samplez, x, logger, model_s, cfg, encoder_optimizer, decoder_optimizer) + save_sample(lod2batch, tracker, sample_spec_test, samplez, x, logger, model_s, cfg, encoder_optimizer, decoder_optimizer) logger.info("Training finish!... save training results") if local_rank == 0: @@ -341,5 +385,5 @@ def train(cfg, logger, local_rank, world_size, distributed): if __name__ == "__main__": gpu_count = torch.cuda.device_count() - run(train, get_cfg_defaults(), description='StyleGAN', default_config='configs/ffhq.yaml', + run(train, get_cfg_defaults(), description='StyleGAN', default_config='configs/ecog.yaml', world_size=gpu_count) diff --git a/train_param.json b/train_param.json new file mode 100644 index 00000000..b0b44aad --- /dev/null +++ b/train_param.json @@ -0,0 +1,50 @@ +{ + "Prod":true, + "SpecBands":128, + "SelectRegion":["AUDITORY","BROCA","MOTO","SENSORY"], + "BlockRegion":[], + "UseGridOnly":true, + "ReshapeAsGrid":true, + "SeqLen":128, + "DOWN_TF_FS": 125, + "DOWN_ECOG_FS": 125, + "Subj":{ + "NY742":{ + "Crop": null, + "Task": ["VisRead","SenComp","PicN","AudN","AudRep"], + "TestNum":[50,15,50,15,50] + } + }, + "Data":{ + "Subj":"NY742", + "T": 100 + }, + "Train":{ + "lr": 0.001, + "gamma": 0.8, + "no_cuda": false, + "batch_size": 10, + "num_epochs": 1000, + "save_model": true, + "save_interval": 50, + "save_dir": "/scratch/rw1691/connectivity/ECoG/Connectivity/CKpts/", + "log_interval": 100, + "ahead_onset": 32, + "loss": "L2", + "lam_reg": 0.01 + }, + "Test":{ + "test_interval": 5, + "batch_size": 10, + "ahead_onset": 32 + }, + "Analyze":{ + "epoch": 899, + "batch_size":2, + "SeqLen": 400, + "ahead_onset": 200, + "save_path": "/scratch/rw1691/connectivity/ECoG/Connectivity/AnalyzeResult" + + } + +} From 08d7532ce33dddac36cd5ee5f120ccf36a4e397a Mon Sep 17 00:00:00 2001 From: Ran Wang Date: Fri, 19 Jun 2020 15:19:23 -0400 Subject: [PATCH 02/14] attention --- ECoGDataSet.py | 21 +-- checkpointer.py | 1 - configs/ecog.yaml | 34 +++-- dataloader_ecog.py | 9 ++ defaults.py | 7 + lreq.py | 8 +- .../make_recon_figure_interpolation.py | 67 ++++++--- model.py | 45 +++++-- net.py | 127 +++++++++++++----- scheduler.py | 4 +- train_alae.py | 115 ++++++++++------ train_param.json | 2 +- 12 files changed, 306 insertions(+), 134 deletions(-) diff --git a/ECoGDataSet.py b/ECoGDataSet.py index add52832..f9803ef8 100644 --- a/ECoGDataSet.py +++ b/ECoGDataSet.py @@ -8,7 +8,10 @@ import random import pandas from torch.utils.data import Dataset - +from defaults import get_cfg_defaults +cfg = get_cfg_defaults() +cfg.merge_from_file('configs/ecog.yaml') +BCTS = cfg.DATASET.BCTS class ECoGDataset(Dataset): """docstring for ECoGDataset""" @@ -102,12 +105,14 @@ def select_block(self,ecog,regions,mask,mni_coord,select,block): region_ind = np.delete(np.arange(regions.shape[0]),region_ind) region_ind = region_ind.astype(np.int64) return ecog[:,region_ind],regions[region_ind],mask[region_ind],mni_coord[region_ind] - def __init__(self, ReqSubjDict, mode = 'train', train_param = None): + def __init__(self, ReqSubjDict, mode = 'train', train_param = None,BCTS=None): """ ReqSubjDict can be a list of multiple subjects""" super(ECoGDataset, self).__init__() self.current_lod=2 self.ReqSubjDict = ReqSubjDict self.mode = mode + self.BCTS = BCTS + self.SpecBands = cfg.DATASET.SPEC_CHANS with open('AllSubjectInfo.json','r') as rfile: allsubj_param = json.load(rfile) if (train_param == None): @@ -129,8 +134,7 @@ def __init__(self, ReqSubjDict, mode = 'train', train_param = None): [self.SelectRegion.extend(self.cortex[area]) for area in train_param["SelectRegion"]] self.BlockRegion = [] [self.BlockRegion.extend(self.cortex[area]) for area in train_param["BlockRegion"]] - self.Prod,self.SpecBands,self.UseGridOnly,self.ReshapeAsGrid,self.SeqLen = train_param['Prod'],\ - train_param['SpecBands'],\ + self.Prod,self.UseGridOnly,self.ReshapeAsGrid,self.SeqLen = train_param['Prod'],\ train_param['UseGridOnly'],\ train_param['ReshapeAsGrid'],\ train_param['SeqLen'], @@ -737,11 +741,12 @@ def __getitem__(self, idx): mni_batch = self.meta_data['mni_coordinate_alldateset'][i] # ecog_batch = ecog_batch[np.newaxis,:,:] - spkr_batch = np.transpose(spkr_batch,[1,0]) # mni_batch = np.transpose(mni_batch,[1,0]) - if self.Prod: - # ecog_batch_re = ecog_batch_re[np.newaxis,:,:] - spkr_batch_re = np.transpose(spkr_batch_re,[1,0]) + if not BCTS: + spkr_batch = np.transpose(spkr_batch,[1,0]) + if self.Prod: + # ecog_batch_re = ecog_batch_re[np.newaxis,:,:] + spkr_batch_re = np.transpose(spkr_batch_re,[1,0]) ecog_batch_all += [ecog_batch] diff --git a/checkpointer.py b/checkpointer.py index 76f71685..7a363a98 100644 --- a/checkpointer.py +++ b/checkpointer.py @@ -82,7 +82,6 @@ def load(self, ignore_last_checkpoint=False, file_name=None): return {} if file_name is not None: f = file_name - import pdb; pdb.set_trace() self.logger.info("Loading checkpoint from {}".format(f)) checkpoint = torch.load(f, map_location=torch.device("cpu")) for name, model in self.models.items(): diff --git a/configs/ecog.yaml b/configs/ecog.yaml index 3dc1f0c1..aa2dbb64 100644 --- a/configs/ecog.yaml +++ b/configs/ecog.yaml @@ -14,7 +14,9 @@ DATASET: SAMPLES_PATH: '' STYLE_MIX_PATH: style_mixing/test_images/set_ecog - SPEC_CHANS: 128 + SPEC_CHANS: 64 + TEMPORAL_SAMPLES: 128 + BCTS: True MAX_RESOLUTION_LEVEL: 7 MODEL: LATENT_SPACE_SIZE: 128 @@ -25,33 +27,39 @@ MODEL: MAPPING_LAYERS: 8 TRUNCATIOM_CUTOFF: 5 CHANNELS: 1 - UNIQ_WORDS: 50 - #####PRE-TAKE OFF CHECKLIST!!!!!######## + #####TAKE OFF CHECKLIST!!!######## AVERAGE_W: False TEMPORAL_W: False RESIDUAL: True W_CLASSIFIER: False - # 4 8 16 32 64 128 - # ATTENTION: [False, False, False, True, True, False] - ATTENTION: [] -OUTPUT_DIR: training_artifacts/ecog_residual_lgbs + CYCLE: True + ATTENTIONAL_STYLE: True + #T 4 8 16 32 64 128 + ATTENTION: [False, False, False, False, True, True] + HEADS: 4 + # ATTENTION: [] +OUTPUT_DIR: training_artifacts/ecog_residual_cycle_attention3264wStyleIN_specchan64_more_attentfeatures_heads4 ##################################### TRAIN: + W_WEIGHT: 1 + CYCLE_WEIGHT: 1 BASE_LEARNING_RATE: 0.002 EPOCHS_PER_LOD: 16 LEARNING_DECAY_RATE: 0.1 - LEARNING_DECAY_STEPS: [] - TRAIN_EPOCHS: 250 + LEARNING_DECAY_STEPS: [96] + TRAIN_EPOCHS: 112 # 4 8 16 32 64 128 256 LOD_2_BATCH_8GPU: [512, 256, 128, 64, 32, 32] # If GPU memory ~16GB reduce last number from 32 to 24 - LOD_2_BATCH_4GPU: [512, 256, 128, 64, 32, 32] - LOD_2_BATCH_2GPU: [512, 256, 128, 64, 32, 32] + LOD_2_BATCH_4GPU: [64, 64, 64, 64, 32, 16] + LOD_2_BATCH_2GPU: [64, 64, 64, 64, 32, 8] # LOD_2_BATCH_1GPU: [512, 256, 128, 64, 32, 16] # LOD_2_BATCH_1GPU: [512, 256, 128, 64, 32, 32] - LOD_2_BATCH_1GPU: [512, 256, 128, 64, 32, 16] - # LOD_2_BATCH_1GPU: [64, 64, 64, 64, 32, 16] + # LOD_2_BATCH_1GPU: [512, 256, 128, 64, 32, 16] + # LOD_2_BATCH_1GPU: [128, 128, 128, 128, 64, 32] + # LOD_2_BATCH_1GPU: [512, 256, 256, 128, 64, 16] + LOD_2_BATCH_1GPU: [64, 64, 64, 64, 32, 16] LEARNING_RATES: [0.0015, 0.0015, 0.0015, 0.002, 0.003, 0.003] # LEARNING_RATES: [0.0015, 0.0015, 0.0005, 0.0003, 0.0003, 0.0002] diff --git a/dataloader_ecog.py b/dataloader_ecog.py index c8a91e8b..c3db1f21 100644 --- a/dataloader_ecog.py +++ b/dataloader_ecog.py @@ -26,6 +26,10 @@ from ECoGDataSet import ECoGDataset cpu = torch.device('cpu') +# class myDataLoader(torch.utils.data.DataLoader): +# def __init__(self,dataset,batch_size, shuffle,drop_last): +# super(myDataLoader).__init__() + class TFRecordsDataset: def __init__(self, cfg, logger, rank=0, world_size=1, buffer_size_mb=200, channels=3, seed=None, train=True, needs_labels=False,param=None): @@ -78,6 +82,11 @@ def __init__(self, cfg, logger, rank=0, world_size=1, buffer_size_mb=200, channe def reset(self, lod, batch_size): assert lod in self.filenames.keys() self.current_filenames = self.filenames[lod] + if batch_size!=self.batch_size: + self.iterator = torch.utils.data.DataLoader(self.dataset, + batch_size=batch_size, + shuffle=True if self.train else False, + drop_last=True if self.train else False) self.batch_size = batch_size self.dataset.current_lod=lod diff --git a/defaults.py b/defaults.py index 870f4c9d..298985b3 100644 --- a/defaults.py +++ b/defaults.py @@ -38,6 +38,8 @@ _C.DATASET.MAX_RESOLUTION_LEVEL = 10 _C.DATASET.SPEC_CHANS=128 +_C.DATASET.TEMPORAL_SAMPLES=128 +_C.DATASET.BCTS = True _C.MODEL = CN() @@ -62,6 +64,9 @@ _C.MODEL.W_CLASSIFIER = False _C.MODEL.UNIQ_WORDS =50 _C.MODEL.ATTENTION = [] +_C.MODEL.CYCLE = False +_C.MODEL.ATTENTIONAL_STYLE = False +_C.MODEL.HEADS = 1 _C.TRAIN = CN() _C.TRAIN.EPOCHS_PER_LOD = 15 @@ -72,6 +77,8 @@ _C.TRAIN.LEARNING_DECAY_RATE = 0.1 _C.TRAIN.LEARNING_DECAY_STEPS = [] _C.TRAIN.TRAIN_EPOCHS = 110 +_C.TRAIN.W_WEIGHT = 5 +_C.TRAIN.CYCLE_WEIGHT = 5 _C.TRAIN.LOD_2_BATCH_8GPU = [512, 256, 128, 64, 32, 32] _C.TRAIN.LOD_2_BATCH_4GPU = [512, 256, 128, 64, 32, 16] diff --git a/lreq.py b/lreq.py index a2d55c7f..8b66f91a 100644 --- a/lreq.py +++ b/lreq.py @@ -89,7 +89,7 @@ def forward(self, input): class Conv2d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, output_padding=0, dilation=1, groups=1, bias=True, gain=np.sqrt(2.0), transpose=False, transform_kernel=False, lrmul=1.0, - implicit_lreq=use_implicit_lreq): + implicit_lreq=use_implicit_lreq,initial_weight=None): super(Conv2d, self).__init__() if in_channels % groups != 0: raise ValueError('in_channels must be divisible by groups') @@ -108,6 +108,7 @@ def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, self.transpose = transpose self.fan_in = np.prod(self.kernel_size) * in_channels // groups self.transform_kernel = transform_kernel + self.initial_weight = initial_weight if transpose: self.weight = Parameter(torch.Tensor(in_channels, out_channels // groups, *self.kernel_size)) else: @@ -125,7 +126,10 @@ def reset_parameters(self): if not self.implicit_lreq: init.normal_(self.weight, mean=0, std=1.0 / self.lrmul) else: - init.normal_(self.weight, mean=0, std=self.std / self.lrmul) + if self.initial_weight: + self.weight = self.initial_weight + else: + init.normal_(self.weight, mean=0, std=self.std / self.lrmul) setattr(self.weight, 'lr_equalization_coef', self.std) if self.bias is not None: setattr(self.bias, 'lr_equalization_coef', self.lrmul) diff --git a/make_figures/make_recon_figure_interpolation.py b/make_figures/make_recon_figure_interpolation.py index f42b4c40..036e90e3 100644 --- a/make_figures/make_recon_figure_interpolation.py +++ b/make_figures/make_recon_figure_interpolation.py @@ -22,6 +22,7 @@ from dlutils.pytorch import count_parameters from defaults import get_cfg_defaults import lreq +import os from PIL import Image @@ -58,12 +59,25 @@ def sample(cfg, logger): layer_count=cfg.MODEL.LAYER_COUNT, maxf=cfg.MODEL.MAX_CHANNEL_COUNT, latent_size=cfg.MODEL.LATENT_SPACE_SIZE, - truncation_psi=cfg.MODEL.TRUNCATIOM_PSI, - truncation_cutoff=cfg.MODEL.TRUNCATIOM_CUTOFF, + dlatent_avg_beta=cfg.MODEL.DLATENT_AVG_BETA, + style_mixing_prob=cfg.MODEL.STYLE_MIXING_PROB, mapping_layers=cfg.MODEL.MAPPING_LAYERS, channels=cfg.MODEL.CHANNELS, generator=cfg.MODEL.GENERATOR, - encoder=cfg.MODEL.ENCODER) + encoder=cfg.MODEL.ENCODER, + z_regression=cfg.MODEL.Z_REGRESSION, + average_w = cfg.MODEL.AVERAGE_W, + temporal_w = cfg.MODEL.TEMPORAL_W, + spec_chans = cfg.DATASET.SPEC_CHANS, + temporal_samples = cfg.DATASET.TEMPORAL_SAMPLES, + init_zeros = cfg.MODEL.TEMPORAL_W, + residual = cfg.MODEL.RESIDUAL, + w_classifier = cfg.MODEL.W_CLASSIFIER, + uniq_words = cfg.MODEL.UNIQ_WORDS, + attention = cfg.MODEL.ATTENTION, + cycle = cfg.MODEL.CYCLE, + w_weight = cfg.TRAIN.W_WEIGHT, + cycle_weight=cfg.TRAIN.CYCLE_WEIGHT,) model.cuda(0) model.eval() model.requires_grad_(False) @@ -73,7 +87,6 @@ def sample(cfg, logger): mapping_tl = model.mapping_tl mapping_fl = model.mapping_fl dlatent_avg = model.dlatent_avg - logger.info("Trainable parameters generator:") count_parameters(decoder) @@ -97,12 +110,12 @@ def sample(cfg, logger): logger=logger, save=False) - extra_checkpoint_data = checkpointer.load() + extra_checkpoint_data = checkpointer.load(file_name='./training_artifacts/ecog_residual_cycle_attention3264wStyleIN_specchan64_more_attentfeatures/model_tmp_lod5.pth') + # extra_checkpoint_data = checkpointer.load(file_name='./training_artifacts/ecog_residual_cycle_attention3264wIN_specchan64_more_attentfeatures/model_tmp_lod4.pth') model.eval() layer_count = cfg.MODEL.LAYER_COUNT - def encode(x): Z, _ = model.encode(x, layer_count - 1, 1) Z = Z.repeat(1, model.mapping_fl.num_layers, 1) @@ -114,26 +127,36 @@ def decode(x): coefs = torch.where(layer_idx < model.truncation_cutoff, ones, ones) # x = torch.lerp(model.dlatent_avg.buff.data, x, coefs) return model.decoder(x, layer_count - 1, 1, noise=True) - + import pdb; pdb.set_trace() rnd = np.random.RandomState(4) latents = rnd.randn(1, cfg.MODEL.LATENT_SPACE_SIZE) path = cfg.DATASET.SAMPLES_PATH im_size = 2 ** (cfg.MODEL.LAYER_COUNT + 1) - pathA = '00001.png' - pathB = '00022.png' - pathC = '00077.png' - pathD = '00016.png' - + pathA = 'kite.npy' + pathB = 'cat.npy' + pathC = 'hat.npy' + pathD = 'cake.npy' + + # def open_image(filename): + # img = np.asarray(Image.open(path + '/' + filename)) + # if img.shape[2] == 4: + # img = img[:, :, :3] + # im = img.transpose((2, 0, 1)) + # x = torch.tensor(np.asarray(im, dtype=np.float32), device='cpu', requires_grad=True).cuda() / 127.5 - 1. + # if x.shape[0] == 4: + # x = x[:3] + # factor = x.shape[2] // im_size + # if factor != 1: + # x = torch.nn.functional.avg_pool2d(x[None, ...], factor, factor)[0] + # assert x.shape[2] == im_size + # _latents = encode(x[None, ...].cuda()) + # latents = _latents[0, 0] + # return latents def open_image(filename): - img = np.asarray(Image.open(path + '/' + filename)) - if img.shape[2] == 4: - img = img[:, :, :3] - im = img.transpose((2, 0, 1)) - x = torch.tensor(np.asarray(im, dtype=np.float32), device='cpu', requires_grad=True).cuda() / 127.5 - 1. - if x.shape[0] == 4: - x = x[:3] + im = np.load(os.path.join(path, filename)) + x = torch.tensor(np.asarray(im, dtype=np.float32), device='cpu', requires_grad=True).cuda() factor = x.shape[2] // im_size if factor != 1: x = torch.nn.functional.avg_pool2d(x[None, ...], factor, factor)[0] @@ -152,7 +175,7 @@ def make(w): wb = open_image(pathB) wc = open_image(pathC) wd = open_image(pathD) - + import pdb; pdb.set_trace() height = 7 width = 7 @@ -174,12 +197,12 @@ def make(w): images.append(interpolated) images = torch.cat(images) - + os.makedirs('make_figures/output/%s' % cfg.NAME, exist_ok=True) save_image(images * 0.5 + 0.5, 'make_figures/output/%s/interpolations.png' % cfg.NAME, nrow=width) save_image(images * 0.5 + 0.5, 'make_figures/output/%s/interpolations.jpg' % cfg.NAME, nrow=width) if __name__ == "__main__": gpu_count = 1 - run(sample, get_cfg_defaults(), description='ALAE-interpolations', default_config='configs/ffhq.yaml', + run(sample, get_cfg_defaults(), description='ALAE-interpolations', default_config='configs/ecog.yaml', world_size=gpu_count, write_log=False) diff --git a/model.py b/model.py index 8f191e41..837c1b51 100644 --- a/model.py +++ b/model.py @@ -20,10 +20,10 @@ class DLatent(nn.Module): - def __init__(self, dlatent_size, layer_count,temporal_w=False): + def __init__(self, dlatent_size, layer_count,temporal_w=False,temporal_samples=128): super(DLatent, self).__init__() if temporal_w: - buffer = torch.zeros(layer_count, dlatent_size, 128, dtype=torch.float32) + buffer = torch.zeros(layer_count, dlatent_size, temporal_samples, dtype=torch.float32) else: buffer = torch.zeros(layer_count, dlatent_size, dtype=torch.float32) self.register_buffer('buff', buffer) @@ -31,14 +31,18 @@ def __init__(self, dlatent_size, layer_count,temporal_w=False): class Model(nn.Module): def __init__(self, startf=32, maxf=256, layer_count=3, latent_size=128, uniq_words=50, mapping_layers=5, dlatent_avg_beta=None, - truncation_psi=None, truncation_cutoff=None, style_mixing_prob=None, channels=3, generator="", - encoder="", z_regression=False,average_w=False,temporal_w=False,init_zeros=False,spec_chans=128,residual=False,w_classifier=False,attention=None): + truncation_psi=None, truncation_cutoff=None, style_mixing_prob=None, channels=3, generator="", encoder="", + z_regression=False,average_w=False,spec_chans = 128,temporal_samples=128,temporal_w=False, init_zeros=False, + residual=False,w_classifier=False,attention=None,cycle=None,w_weight=1.0,cycle_weight=1.0, attentional_style=False,heads=1): super(Model, self).__init__() self.layer_count = layer_count self.z_regression = z_regression self.temporal_w = temporal_w self.w_classifier = w_classifier + self.cycle = cycle + self.w_weight=w_weight + self.cycle_weight=cycle_weight self.mapping_tl = MAPPINGS["MappingToLatent"]( latent_size=latent_size, @@ -67,11 +71,15 @@ def __init__(self, startf=32, maxf=256, layer_count=3, latent_size=128, uniq_wor layer_count=layer_count, maxf=maxf, latent_size=latent_size, - channels=channels,spec_chans=spec_chans, + channels=channels, + spec_chans=spec_chans, temporal_samples = temporal_samples, temporal_w = temporal_w, init_zeros = init_zeros, residual = residual, - attention=attention,) + attention=attention, + attentional_style=attentional_style, + heads = heads, + ) self.encoder = ENCODERS[encoder]( startf=startf, @@ -79,10 +87,14 @@ def __init__(self, startf=32, maxf=256, layer_count=3, latent_size=128, uniq_wor maxf=maxf, latent_size=latent_size, channels=channels, + spec_chans=spec_chans, temporal_samples = temporal_samples, average_w=average_w, temporal_w = temporal_w, residual = residual, - attention=attention,) + attention=attention, + attentional_style=attentional_style, + heads = heads, + ) self.dlatent_avg = DLatent(latent_size, self.mapping_fl.num_layers,temporal_w=temporal_w) self.latent_size = latent_size @@ -162,16 +174,25 @@ def forward(self, x, lod, blend_factor, d_train, ae, words=None): z = torch.randn(x.shape[0], self.latent_size) s, rec = self.generate(lod, blend_factor, z=z, mixing=False, noise=True, return_styles=True) - Z, d_result_real = self.encode(rec, lod, blend_factor) + Z, _ = self.encode(rec, lod, blend_factor) + if self.cycle: + Z_real, _ = self.encode(x, lod, blend_factor) + Z_real = Z_real.repeat(1, self.mapping_fl.num_layers, 1) + rec = self.decoder.forward(Z_real, lod, blend_factor, noise=True) + Lcycle = self.cycle_weight*torch.mean((rec.detach() - x.detach()).abs()) + assert Z.shape == s.shape if self.z_regression: - Lae = torch.mean(((Z[:, 0] - z)**2)) + Lae = self.w_weight*torch.mean(((Z[:, 0] - z)**2)) else: - Lae = torch.mean(((Z - s.detach())**2)) + Lae = self.w_weight*torch.mean(((Z - s.detach())**2)) - return Lae + if self.cycle: + return Lae,Lcycle + else: + return Lae elif d_train: with torch.no_grad(): @@ -183,7 +204,7 @@ def forward(self, x, lod, blend_factor, d_train, ae, words=None): _, d_result_real, word_logits = self.encode(x, lod, blend_factor,word_classify=True) else: _, d_result_real = self.encode(x, lod, blend_factor) - _, d_result_fake = self.encode(Xp.detach(), lod, blend_factor) + _, d_result_fake = self.encode(Xp.detach(), lod, blend_factor) loss_d = losses.discriminator_logistic_simple_gp(d_result_fake, d_result_real, x) if self.w_classifier: diff --git a/net.py b/net.py index 2438e3fb..735190bb 100644 --- a/net.py +++ b/net.py @@ -38,12 +38,15 @@ def pixel_norm(x, epsilon=1e-8): return x * torch.rsqrt(torch.mean(x.pow(2.0), dim=1, keepdim=True) + epsilon) -def style_mod(x, style): +def style_mod(x, style, bias = True): if style.dim()==2: style = style.view(style.shape[0], 2, x.shape[1], 1, 1) elif style.dim()==3: style = style.view(style.shape[0], 2, x.shape[1], style.shape[2], 1) - return torch.addcmul(style[:, 1], value=1.0, tensor1=x, tensor2=style[:, 0] + 1) + if bias: + return torch.addcmul(style[:, 1], value=1.0, tensor1=x, tensor2=style[:, 0] + 1) + else: + return x*(style[:,0]+1) def upscale2d(x, factor=2): @@ -77,38 +80,93 @@ def __init__(self, channels): def forward(self, x): return F.conv2d(x, weight=self.weight, groups=self.groups, padding=1) +class AdaIN(nn.Module): + def __init__(self, latent_size,outputs,temporal_w=False): + super(AdaIN, self).__init__() + self.instance_norm = nn.InstanceNorm2d(outputs,affine=False, eps=1e-8) + self.style = sn(ln.Conv1d(latent_size, 2 * outputs,1,1,0,gain=1)) if temporal_w else sn(ln.Linear(latent_size, 2 * outputs, gain=1)) + def forward(self,x,w): + x = self.instance_norm(x) + x = style_mod(x,self.style(w)) + return x + +class INencoder(nn.Module): + def __init__(self, inputs,latent_size,temporal_w=False): + super(INencoder, self).__init__() + self.temporal_w = temporal_w + self.instance_norm = nn.InstanceNorm2d(inputs,affine=False) + self.style = sn(ln.Conv1d(2 * inputs, latent_size,1,1,0)) if temporal_w else sn(ln.Linear(2 * inputs, latent_size)) + def forward(self,x): + m = torch.mean(x, dim=[3] if self.temporal_w else [2,3], keepdim=True) + std = torch.sqrt(torch.mean((x - m) ** 2, dim=[3] if self.temporal_w else [2,3], keepdim=True)) + style = torch.cat((m,std),dim=1) + x = self.instance_norm(x) + if self.temporal_w: + w = self.style(style.view(style.shape[0], style.shape[1],style.shape[2])) + else: + w = self.style(style.view(style.shape[0], style.shape[1])) + return x,w + class Attention(nn.Module): - def __init__(self, inputs,temporal_w=False,attentional_style=False): + def __init__(self, inputs,temporal_w=False,attentional_style=False,decoding=True,latent_size=None,heads=1): super(Attention, self).__init__() # Channel multiplier self.inputs = inputs self.temporal_w = temporal_w + self.decoding = decoding self.attentional_style = attentional_style - self.theta = sn(ln.Conv2d(inputs, inputs // 8, 1,1,0, bias=False)) - self.phi = sn(ln.Conv2d(inputs, inputs // 8, 1,1,0, bias=False)) + self.att_denorm = 1 + self.heads = heads + self.theta = sn(ln.Conv2d(inputs, inputs // self.att_denorm, 1,1,0, bias=False)) + self.phi = sn(ln.Conv2d(inputs, inputs // self.att_denorm, 1,1,0, bias=False)) self.g = sn(ln.Conv2d(inputs, inputs // 2, 1,1,0, bias=False)) self.o = sn(ln.Conv2d(inputs // 2, inputs, 1,1,0, bias=False)) + if not attentional_style: + self.norm_theta = nn.InstanceNorm2d(inputs // self.att_denorm,affine=True) + self.norm_phi = nn.InstanceNorm2d(inputs // self.att_denorm,affine=True) + self.norm_g = nn.InstanceNorm2d(inputs // 2,affine=True) + else: + if decoding: + self.norm_theta = AdaIN(latent_size,inputs//self.att_denorm,temporal_w=temporal_w) + self.norm_phi = AdaIN(latent_size,inputs//self.att_denorm,temporal_w=temporal_w) + self.norm_g = AdaIN(latent_size,inputs//2,temporal_w=temporal_w) + else: + self.norm_theta = INencoder(inputs//self.att_denorm,latent_size,temporal_w=temporal_w) + self.norm_phi = INencoder(inputs//self.att_denorm,latent_size,temporal_w=temporal_w) + self.norm_g = INencoder(inputs//2,latent_size,temporal_w=temporal_w) + # Learnable gain parameter self.gamma = P(torch.tensor(0.), requires_grad=True) - def forward(self, x, y=None): + def forward(self, x, w=None): # Apply convs x = x.contiguous() theta = self.theta(x) + theta = self.norm_theta(theta,w) if (self.attentional_style and self.decoding) else self.norm_theta(theta) phi = F.max_pool2d(self.phi(x), [2,2]) + phi = self.norm_phi(phi,w) if (self.attentional_style and self.decoding) else self.norm_phi(phi) g = F.max_pool2d(self.g(x), [2,2]) + g = self.norm_g(g,w) if (self.attentional_style and self.decoding) else self.norm_g(g) + if self.attentional_style and not self.decoding: + theta,w_theta = theta + phi,w_phi = phi + g,w_g = g + w = w_theta+w_phi+w_g + # Perform reshapes - theta = theta.view(-1, self.inputs // 8, x.shape[2] * x.shape[3]) - phi = phi.view(-1, self.inputs // 8, x.shape[2] * x.shape[3] // 4) - g = g.view(-1, self.inputs // 2, x.shape[2] * x.shape[3] // 4) + self.theta_ = theta.reshape(-1, self.inputs // self.att_denorm//self.heads, self.heads ,x.shape[2] * x.shape[3]) + self.phi_ = phi.reshape(-1, self.inputs // self.att_denorm//self.heads, self.heads, x.shape[2] * x.shape[3] // 4) + g = g.reshape(-1, self.inputs // 2//self.heads, self.heads, x.shape[2] * x.shape[3] // 4) # Matmul and softmax to get attention maps - beta = F.softmax(torch.bmm(theta.transpose(1, 2), phi), -1) + self.beta = F.softmax(torch.einsum('bchi,bchj->bhij',self.theta_, self.phi_), -1) + # self.beta = F.softmax(torch.bmm(self.theta_, self.phi_), -1) # Attention map times g path - o = self.o(torch.bmm(g, beta.transpose(1,2)).view(-1, self.inputs // 2, x.shape[2], x.shape[3])) - return self.gamma * o + x + o = self.o(torch.einsum('bchj,bhij->bchi',g, self.beta).reshape(-1, self.inputs // 2, x.shape[2], x.shape[3])) + # o = self.o(torch.bmm(g, self.beta.transpose(1,2)).view(-1, self.inputs // 2, x.shape[2], x.shape[3])) + return (self.gamma * o + x, w) if (self.attentional_style and (not self.decoding)) else self.gamma * o + x class EncodeBlock(nn.Module): - def __init__(self, inputs, outputs, latent_size, last=False,islast=False, fused_scale=True,temporal_w=False,residual=False,resample=False): + def __init__(self, inputs, outputs, latent_size, last=False,islast=False, fused_scale=True,temporal_w=False,residual=False,resample=False,temporal_samples=None,spec_chans=None): super(EncodeBlock, self).__init__() self.conv_1 = sn(ln.Conv2d(inputs, inputs, 3, 1, 1, bias=False)) # self.conv_1 = ln.Conv2d(inputs + (1 if last else 0), inputs, 3, 1, 1, bias=False) @@ -123,9 +181,9 @@ def __init__(self, inputs, outputs, latent_size, last=False,islast=False, fused_ self.temporal_w = temporal_w if last: if self.temporal_w: - self.dense = sn(ln.Linear(inputs * 4 * 4, outputs)) + self.conv_2 = sn(ln.Conv2d(inputs * spec_chans, outputs, 3, 1, 1, bias=False)) else: - self.conv_2 = sn(ln.Conv2d(inputs * 4, outputs, 3, 1, 1, bias=False)) + self.dense = sn(ln.Linear(inputs * temporal_samples * spec_chans, outputs)) else: if resample and fused_scale: self.conv_2 = sn(ln.Conv2d(inputs, outputs, 3, 2, 1, bias=False, transform_kernel=True)) @@ -418,7 +476,7 @@ def forward(self, x): @ENCODERS.register("EncoderDefault") class Encoder_old(nn.Module): - def __init__(self, startf, maxf, layer_count, latent_size, channels=3,average_w = False,temporal_w=False,residual=False,attention=None): + def __init__(self, startf, maxf, layer_count, latent_size, channels=3,average_w = False,temporal_w=False,residual=False,attention=None,temporal_samples=None,spec_chans=None,attentional_style=False,heads=1): super(Encoder_old, self).__init__() self.maxf = maxf self.startf = startf @@ -428,6 +486,7 @@ def __init__(self, startf, maxf, layer_count, latent_size, channels=3,average_w self.latent_size = latent_size self.average_w = average_w self.temporal_w = temporal_w + self.attentional_style = attentional_style mul = 2 inputs = startf self.encode_block: nn.ModuleList[EncodeBlock] = nn.ModuleList() @@ -439,12 +498,13 @@ def __init__(self, startf, maxf, layer_count, latent_size, channels=3,average_w self.from_rgb.append(FromRGB(channels, inputs,residual=residual)) apply_attention = attention and attention[self.layer_count-i-1] - non_local = Attention(inputs,temporal_w=None,attentional_style=None) if apply_attention else None + non_local = Attention(inputs,temporal_w=temporal_w,attentional_style=attentional_style,decoding=False,latent_size=latent_size,heads=heads) if apply_attention else None self.attention_block.append(non_local) fused_scale = resolution >= 128 - + current_spec_chans = spec_chans // 2**i + current_temporal_samples = temporal_samples // 2**i islast = i==(self.layer_count-1) - block = EncodeBlock(inputs, outputs, latent_size, False, islast, fused_scale=fused_scale,temporal_w=temporal_w,residual=residual,resample=True) + block = EncodeBlock(inputs, outputs, latent_size, False, islast, fused_scale=fused_scale,temporal_w=temporal_w,residual=residual,resample=True,temporal_samples=current_temporal_samples,spec_chans=current_spec_chans) resolution //= 2 @@ -466,11 +526,13 @@ def encode(self, x, lod): for i in range(self.layer_count - lod - 1, self.layer_count): if self.attention_block[i]: x = self.attention_block[i](x) + if self.attentional_style: + x,s = x x, s1, s2 = self.encode_block[i](x) if self.temporal_w and i!=0: s1 = F.interpolate(s1,scale_factor=2**i) s2 = F.interpolate(s2,scale_factor=2**i) - styles[:, 0] += s1 + s2 + styles[:, 0] += s1 + s2 + (s if (self.attention_block[i] and self.attentional_style) else 0) if self.average_w: styles /= (lod+1) return styles @@ -481,16 +543,17 @@ def encode2(self, x, lod, blend): styles = torch.zeros(x.shape[0], 1, self.latent_size,128) else: styles = torch.zeros(x.shape[0], 1, self.latent_size) - x = self.from_rgb[self.layer_count - lod - 1](x) x = F.leaky_relu(x, 0.2) if self.attention_block[self.layer_count - lod - 1]: x = self.attention_block[self.layer_count - lod - 1](x) + if self.attentional_style: + x,s = x x, s1, s2 = self.encode_block[self.layer_count - lod - 1](x) if self.temporal_w and i!=0: s1 = F.interpolate(s1,scale_factor=2**(layer_count - lod - 1)) s2 = F.interpolate(s2,scale_factor=2**(layer_count - lod - 1)) - styles[:, 0] += s1 * blend + s2 * blend + styles[:, 0] += s1 * blend + s2 * blend + (s*blend if (self.attention_block[self.layer_count - lod - 1] and self.attentional_style) else 0) x_prev = F.avg_pool2d(x_orig, 2, 2) @@ -502,11 +565,13 @@ def encode2(self, x, lod, blend): for i in range(self.layer_count - (lod - 1) - 1, self.layer_count): if self.attention_block[i]: x = self.attention_block[i](x) + if self.attentional_style: + x,s = x x, s1, s2 = self.encode_block[i](x) if self.temporal_w and i!=0: s1 = F.interpolate(s1,scale_factor=2**i) s2 = F.interpolate(s2,scale_factor=2**i) - styles[:, 0] += s1 + s2 + styles[:, 0] += s1 + s2 + (s if (self.attention_block[i] and self.attentional_style) else 0) if self.average_w: styles /= (lod+1) return styles @@ -847,7 +912,7 @@ def forward(self, x, lod, blend): @GENERATORS.register("GeneratorDefault") class Generator(nn.Module): - def __init__(self, startf=32, maxf=256, layer_count=3, latent_size=128, channels=3, spec_chans=128,temporal_w=False,init_zeros=False,residual=False,attention=None): + def __init__(self, startf=32, maxf=256, layer_count=3, latent_size=128, channels=3, temporal_samples=128,spec_chans=128,temporal_w=False,init_zeros=False,residual=False,attention=None,attentional_style=False,heads=1): super(Generator, self).__init__() self.maxf = maxf self.startf = startf @@ -858,11 +923,13 @@ def __init__(self, startf=32, maxf=256, layer_count=3, latent_size=128, channels self.temporal_w = temporal_w self.init_zeros = init_zeros self.attention = attention + self.attentional_style = attentional_style mul = 2 ** (self.layer_count - 1) inputs = min(self.maxf, startf * mul) init_specchans = spec_chans//2**(self.layer_count-1) - self.const = Parameter(torch.Tensor(1, inputs, 4, init_specchans)) + init_temporalsamples = temporal_samples//2**(self.layer_count-1) + self.const = Parameter(torch.Tensor(1, inputs, init_temporalsamples, init_specchans)) if init_zeros: init.zeros_(self.const) else: @@ -894,7 +961,7 @@ def __init__(self, startf=32, maxf=256, layer_count=3, latent_size=128, channels #print("decode_block%d %s styles in: %dl out resolution: %d" % ( # (i + 1), millify(count_parameters(block)), outputs, resolution)) apply_attention = attention and attention[i] - non_local = Attention(outputs,temporal_w=None,attentional_style=None) if apply_attention else None + non_local = Attention(outputs,temporal_w=temporal_w,attentional_style=attentional_style,decoding=True,latent_size=latent_size,heads=heads) if apply_attention else None self.decode_block.append(block) self.attention_block.append(non_local) inputs = outputs @@ -914,7 +981,7 @@ def decode(self, styles, lod, noise): w2 = styles[:, 2 * i + 1] x = self.decode_block[i](x, w1, w2, noise) if self.attention_block[i]: - x = self.attention_block[i](x) + x = self.attention_block[i](x,w1) if self.attentional_style else self.attention_block[i](x) x = self.to_rgb[lod](x) return x @@ -931,7 +998,7 @@ def decode2(self, styles, lod, blend, noise): w2 = styles[:, 2 * i + 1] x = self.decode_block[i](x, w1, w2, noise) if self.attention_block[i]: - x = self.attention_block[i](x) + x = self.attention_block[i](x,w1) if self.attentional_style else self.attention_block[i](x) x_prev = self.to_rgb[lod - 1](x) if self.temporal_w and lod!=self.layer_count-1: @@ -942,12 +1009,12 @@ def decode2(self, styles, lod, blend, noise): w2 = styles[:, 2 * lod + 1] x = self.decode_block[lod](x, w1, w2, noise) if self.attention_block[lod]: - x = self.attention_block[lod](x) + x = self.attention_block[lod](x,w1) if self.attentional_style else self.attention_block[lod](x) x = self.to_rgb[lod](x) needed_resolution = self.layer_to_resolution[lod] - x_prev = F.interpolate(x_prev, size=needed_resolution) + x_prev = F.interpolate(x_prev, scale_factor = 2.0) x = torch.lerp(x_prev, x, blend) return x diff --git a/scheduler.py b/scheduler.py index d92284a7..4bebc582 100644 --- a/scheduler.py +++ b/scheduler.py @@ -63,9 +63,9 @@ def get_lr(self): alpha = float(self.last_epoch) / self.warmup_iters warmup_factor = self.warmup_factor * (1 - alpha) + alpha return [ - base_lr[self.lod] + np.maximum(base_lr[self.lod] * warmup_factor - * self.gamma ** bisect_right(self.milestones, self.last_epoch) + * self.gamma ** bisect_right(self.milestones, self.last_epoch),1e-4) # * float(self.batch_size) # / float(self.reference_batch_size) for base_lr in self.base_lrs diff --git a/train_alae.py b/train_alae.py index 2866479b..f07dada7 100644 --- a/train_alae.py +++ b/train_alae.py @@ -31,6 +31,7 @@ from defaults import get_cfg_defaults import lod_driver from PIL import Image +import numpy as np def save_sample(lod2batch, tracker, sample, samplez, x, logger, model, cfg, encoder_optimizer, decoder_optimizer): os.makedirs('results', exist_ok=True) @@ -39,46 +40,59 @@ def save_sample(lod2batch, tracker, sample, samplez, x, logger, model, cfg, enco lod2batch.get_blend_factor(), encoder_optimizer.param_groups[0]['lr'], decoder_optimizer.param_groups[0]['lr'], torch.cuda.max_memory_allocated() / 1024.0 / 1024.0)) - + # sample = sample.transpose(-2,-1) with torch.no_grad(): model.eval() # sample = sample[:lod2batch.get_per_GPU_batch_size()] # samplez = samplez[:lod2batch.get_per_GPU_batch_size()] needed_resolution = model.decoder.layer_to_resolution[lod2batch.lod] - sample_in = sample - while sample_in.shape[2] > needed_resolution: - sample_in = F.avg_pool2d(sample_in, 2, 2) - assert sample_in.shape[2] == needed_resolution - - blend_factor = lod2batch.get_blend_factor() - if lod2batch.in_transition: - needed_resolution_prev = model.decoder.layer_to_resolution[lod2batch.lod - 1] - sample_in_prev = F.avg_pool2d(sample_in, 2, 2) - sample_in_prev_2x = F.interpolate(sample_in_prev, needed_resolution) - sample_in = sample_in * blend_factor + sample_in_prev_2x * (1.0 - blend_factor) - - Z, _ = model.encode(sample_in, lod2batch.lod, blend_factor) - - if cfg.MODEL.Z_REGRESSION: - Z = model.mapping_fl(Z[:, 0]) - else: - if cfg.MODEL.TEMPORAL_W: - Z = Z.repeat(1, model.mapping_fl.num_layers, 1,1) + sample_in_all = torch.tensor([]) + rec1_all = torch.tensor([]) + rec2_all = torch.tensor([]) + g_rec_all = torch.tensor([]) + for i in range(0,sample.shape[0],9): + sample_in = sample[i:np.minimum(i+9,sample.shape[0])] + x_in = x[i:np.minimum(i+9,sample.shape[0])] + samplez_in = samplez[i:np.minimum(i+9,sample.shape[0])] + while sample_in.shape[2] > needed_resolution: + sample_in = F.avg_pool2d(sample_in, 2, 2) + assert sample_in.shape[2] == needed_resolution + + blend_factor = lod2batch.get_blend_factor() + if lod2batch.in_transition: + needed_resolution_prev = model.decoder.layer_to_resolution[lod2batch.lod - 1] + sample_in_prev = F.avg_pool2d(sample_in, 2, 2) + sample_in_prev_2x = F.interpolate(sample_in_prev, scale_factor=2) + sample_in = sample_in * blend_factor + sample_in_prev_2x * (1.0 - blend_factor) + + Z, _ = model.encode(sample_in, lod2batch.lod, blend_factor) + + if cfg.MODEL.Z_REGRESSION: + Z = model.mapping_fl(Z[:, 0]) else: - Z = Z.repeat(1, model.mapping_fl.num_layers, 1) - - rec1 = model.decoder(Z, lod2batch.lod, blend_factor, noise=False) - rec2 = model.decoder(Z, lod2batch.lod, blend_factor, noise=True) - - # rec1 = F.interpolate(rec1, sample.shape[2]) - # rec2 = F.interpolate(rec2, sample.shape[2]) - # sample_in = F.interpolate(sample_in, sample.shape[2]) - - Z = model.mapping_fl(samplez) - g_rec = model.decoder(Z, lod2batch.lod, blend_factor, noise=True) - # g_rec = F.interpolate(g_rec, sample.shape[2]) - resultsample = torch.cat([sample_in, rec1, rec2, g_rec], dim=0) + if cfg.MODEL.TEMPORAL_W: + Z = Z.repeat(1, model.mapping_fl.num_layers, 1,1) + else: + Z = Z.repeat(1, model.mapping_fl.num_layers, 1) + + rec1 = model.decoder(Z, lod2batch.lod, blend_factor, noise=False) + rec2 = model.decoder(Z, lod2batch.lod, blend_factor, noise=True) + + # rec1 = F.interpolate(rec1, sample.shape[2]) + # rec2 = F.interpolate(rec2, sample.shape[2]) + # sample_in = F.interpolate(sample_in, sample.shape[2]) + + Z = model.mapping_fl(samplez_in) + g_rec = model.decoder(Z, lod2batch.lod, blend_factor, noise=True) + # g_rec = F.interpolate(g_rec, sample.shape[2]) + sample_in_all = torch.cat([sample_in_all,sample_in],dim=0) + rec1_all = torch.cat([rec1_all,rec1],dim=0) + rec2_all = torch.cat([rec2_all,rec2],dim=0) + g_rec_all = torch.cat([g_rec_all,g_rec],dim=0) + resultsample = torch.cat([sample_in_all, rec1_all, rec2_all, g_rec_all], dim=0) + if cfg.DATASET.BCTS: + resultsample = resultsample.transpose(-2,-1) @utils.async_func def save_pic(x_rec): @@ -115,11 +129,18 @@ def train(cfg, logger, local_rank, world_size, distributed): z_regression=cfg.MODEL.Z_REGRESSION, average_w = cfg.MODEL.AVERAGE_W, temporal_w = cfg.MODEL.TEMPORAL_W, + spec_chans = cfg.DATASET.SPEC_CHANS, + temporal_samples = cfg.DATASET.TEMPORAL_SAMPLES, init_zeros = cfg.MODEL.TEMPORAL_W, residual = cfg.MODEL.RESIDUAL, w_classifier = cfg.MODEL.W_CLASSIFIER, uniq_words = cfg.MODEL.UNIQ_WORDS, attention = cfg.MODEL.ATTENTION, + cycle = cfg.MODEL.CYCLE, + w_weight = cfg.TRAIN.W_WEIGHT, + cycle_weight=cfg.TRAIN.CYCLE_WEIGHT, + attentional_style=cfg.MODEL.ATTENTIONAL_STYLE, + heads = cfg.MODEL.HEADS, ) model.cuda(local_rank) model.train() @@ -139,12 +160,18 @@ def train(cfg, logger, local_rank, world_size, distributed): z_regression=cfg.MODEL.Z_REGRESSION, average_w = cfg.MODEL.AVERAGE_W, spec_chans = cfg.DATASET.SPEC_CHANS, + temporal_samples = cfg.DATASET.TEMPORAL_SAMPLES, temporal_w = cfg.MODEL.TEMPORAL_W, init_zeros = cfg.MODEL.TEMPORAL_W, residual = cfg.MODEL.RESIDUAL, w_classifier = cfg.MODEL.W_CLASSIFIER, uniq_words = cfg.MODEL.UNIQ_WORDS, attention = cfg.MODEL.ATTENTION, + cycle = cfg.MODEL.CYCLE, + w_weight = cfg.TRAIN.W_WEIGHT, + cycle_weight=cfg.TRAIN.CYCLE_WEIGHT, + attentional_style=cfg.MODEL.ATTENTIONAL_STYLE, + heads = cfg.MODEL.HEADS, ) model_s.cuda(local_rank) model_s.eval() @@ -211,7 +238,6 @@ def train(cfg, logger, local_rank, world_size, distributed): milestones=cfg.TRAIN.LEARNING_DECAY_STEPS, gamma=cfg.TRAIN.LEARNING_DECAY_RATE, reference_batch_size=32, base_lr=cfg.TRAIN.LEARNING_RATES) - model_dict = { 'discriminator': encoder, 'generator': decoder, @@ -238,7 +264,7 @@ def train(cfg, logger, local_rank, world_size, distributed): logger=logger, save=local_rank == 0) - extra_checkpoint_data = checkpointer.load(ignore_last_checkpoint=True) + extra_checkpoint_data = checkpointer.load(ignore_last_checkpoint=True,file_name='./training_artifacts/ecog_residual_cycle_attention3264wIN_specchan64_more_attentfeatures_fixINencoderwithaffineture/model_tmp_lod3.pth') logger.info("Starting from epoch: %d" % (scheduler.start_epoch())) arguments.update(extra_checkpoint_data) @@ -250,7 +276,7 @@ def train(cfg, logger, local_rank, world_size, distributed): dataset = TFRecordsDataset(cfg, logger, rank=local_rank, world_size=world_size, buffer_size_mb=1024, channels=cfg.MODEL.CHANNELS,param=param) dataset_test = TFRecordsDataset(cfg, logger, rank=local_rank, world_size=world_size, buffer_size_mb=1024, channels=cfg.MODEL.CHANNELS,train=False,param=param) - rnd = np.random.RandomState(1234) + rnd = np.random.RandomState(3456) latents = rnd.randn(len(dataset_test.dataset), cfg.MODEL.LATENT_SPACE_SIZE) samplez = torch.tensor(latents).float().cuda() @@ -315,17 +341,15 @@ def train(cfg, logger, local_rank, world_size, distributed): # if need_permute: # x_orig = x_orig.permute(0, 3, 1, 2) # x_orig = (x_orig / 127.5 - 1.) - x_orig = F.avg_pool2d(x_orig,x_orig.shape[-1]//2**lod2batch.get_lod_power2(),x_orig.shape[-1]//2**lod2batch.get_lod_power2()) + x_orig = F.avg_pool2d(x_orig,x_orig.shape[-2]//2**lod2batch.get_lod_power2(),x_orig.shape[-2]//2**lod2batch.get_lod_power2()) # x_orig = F.interpolate(x_orig, [x_orig.shape[-1]//2**lod2batch.get_lod_power2(),x_orig.shape[-1]//2**lod2batch.get_lod_power2()],mode='bilinear',align_corners=False) blend_factor = lod2batch.get_blend_factor() - needed_resolution = layer_to_resolution[lod2batch.lod] x = x_orig - if lod2batch.in_transition: needed_resolution_prev = layer_to_resolution[lod2batch.lod - 1] x_prev = F.avg_pool2d(x_orig, 2, 2) - x_prev_2x = F.interpolate(x_prev, needed_resolution) + x_prev_2x = F.interpolate(x_prev, scale_factor=2) # x_prev_2x = F.interpolate(x_prev, needed_resolution,mode='bilinear',align_corners=False) x = x * blend_factor + x_prev_2x * (1.0 - blend_factor) @@ -350,9 +374,14 @@ def train(cfg, logger, local_rank, world_size, distributed): encoder_optimizer.zero_grad() decoder_optimizer.zero_grad() - lae = model(x, lod2batch.lod, blend_factor, d_train=True, ae=True) - tracker.update(dict(lae=lae)) - (lae).backward() + if cfg.MODEL.CYCLE: + lae,lcycle = model(x, lod2batch.lod, blend_factor, d_train=True, ae=True) + tracker.update(dict(lae=lae,lcycle=lcycle)) + (lae+lcycle).backward() + else: + lae = model(x, lod2batch.lod, blend_factor, d_train=True, ae=True) + tracker.update(dict(lae=lae)) + (lae).backward() encoder_optimizer.step() decoder_optimizer.step() diff --git a/train_param.json b/train_param.json index b0b44aad..5a94da99 100644 --- a/train_param.json +++ b/train_param.json @@ -1,6 +1,6 @@ { "Prod":true, - "SpecBands":128, + "SpecBands":64, "SelectRegion":["AUDITORY","BROCA","MOTO","SENSORY"], "BlockRegion":[], "UseGridOnly":true, From 12b42bba49db6579e25bef5560230bb5239f3d83 Mon Sep 17 00:00:00 2001 From: Ran Wang Date: Sat, 20 Jun 2020 09:34:19 -0400 Subject: [PATCH 03/14] attention --- configs/ecog.yaml | 7 ++++--- make_figures/make_recon_figure_interpolation.py | 2 +- net.py | 2 +- run.s | 17 +++++++++++++++++ train_alae.py | 13 ++++++++----- 5 files changed, 31 insertions(+), 10 deletions(-) create mode 100644 run.s diff --git a/configs/ecog.yaml b/configs/ecog.yaml index aa2dbb64..07bcb587 100644 --- a/configs/ecog.yaml +++ b/configs/ecog.yaml @@ -34,12 +34,13 @@ MODEL: RESIDUAL: True W_CLASSIFIER: False CYCLE: True - ATTENTIONAL_STYLE: True + ATTENTIONAL_STYLE: False #T 4 8 16 32 64 128 ATTENTION: [False, False, False, False, True, True] - HEADS: 4 + HEADS: 1 # ATTENTION: [] -OUTPUT_DIR: training_artifacts/ecog_residual_cycle_attention3264wStyleIN_specchan64_more_attentfeatures_heads4 +OUTPUT_DIR: training_artifacts/vis +# OUTPUT_DIR: training_artifacts/ecog_residual_cycle_attention3264wStyleIN_specchan64_more_attentfeatures_heads4 ##################################### TRAIN: diff --git a/make_figures/make_recon_figure_interpolation.py b/make_figures/make_recon_figure_interpolation.py index 036e90e3..0c40e5ed 100644 --- a/make_figures/make_recon_figure_interpolation.py +++ b/make_figures/make_recon_figure_interpolation.py @@ -110,7 +110,7 @@ def sample(cfg, logger): logger=logger, save=False) - extra_checkpoint_data = checkpointer.load(file_name='./training_artifacts/ecog_residual_cycle_attention3264wStyleIN_specchan64_more_attentfeatures/model_tmp_lod5.pth') + extra_checkpoint_data = checkpointer.load(file_name='./training_artifacts/ecog_residual_cycle_attention3264wIN_specchan64_cont/model_tmp_lod5.pth') # extra_checkpoint_data = checkpointer.load(file_name='./training_artifacts/ecog_residual_cycle_attention3264wIN_specchan64_more_attentfeatures/model_tmp_lod4.pth') model.eval() diff --git a/net.py b/net.py index 735190bb..6e87e922 100644 --- a/net.py +++ b/net.py @@ -115,7 +115,7 @@ def __init__(self, inputs,temporal_w=False,attentional_style=False,decoding=True self.temporal_w = temporal_w self.decoding = decoding self.attentional_style = attentional_style - self.att_denorm = 1 + self.att_denorm = 8 self.heads = heads self.theta = sn(ln.Conv2d(inputs, inputs // self.att_denorm, 1,1,0, bias=False)) self.phi = sn(ln.Conv2d(inputs, inputs // self.att_denorm, 1,1,0, bias=False)) diff --git a/run.s b/run.s new file mode 100644 index 00000000..5f3fff46 --- /dev/null +++ b/run.s @@ -0,0 +1,17 @@ +#!/bin/bash +#SBATCH --nodes=1 +#SBATCH --ntasks-per-node=1 +#SBATCH --cpus-per-task=2 +#SBATCH --gres=gpu:p40:2 +#SBATCH --time=60:00:00 +#SBATCH --mem=64GB +#SBATCH --job-name=myTest +#SBATCH --output=slurm_%j.out + +cd $SCRATCH/neural_decoding/code/cnn/ALAE/ + +module purge +module load cudnn/10.0v7.6.2.24 +module load cuda/10.0.130 +source $HOME/python3.7/bin/activate +python train_alae.py diff --git a/train_alae.py b/train_alae.py index f07dada7..8094dc5b 100644 --- a/train_alae.py +++ b/train_alae.py @@ -33,7 +33,7 @@ from PIL import Image import numpy as np -def save_sample(lod2batch, tracker, sample, samplez, x, logger, model, cfg, encoder_optimizer, decoder_optimizer): +def save_sample(lod2batch, tracker, sample, samplez, x, logger, model, cfg, encoder_optimizer, decoder_optimizer,filename=None): os.makedirs('results', exist_ok=True) logger.info('\n[%d/%d] - ptime: %.2f, %s, blend: %.3f, lr: %.12f, %.12f, max mem: %f",' % ( (lod2batch.current_epoch + 1), cfg.TRAIN.TRAIN_EPOCHS, lod2batch.per_epoch_ptime, str(tracker), @@ -101,10 +101,13 @@ def save_pic(x_rec): result_sample = x_rec * 0.5 + 0.5 result_sample = result_sample.cpu() - f = os.path.join(cfg.OUTPUT_DIR, - 'sample_%d_%d.jpg' % ( - lod2batch.current_epoch + 1, - lod2batch.iteration // 1000) + if filename: + f =filename + else: + f = os.path.join(cfg.OUTPUT_DIR, + 'sample_%d_%d.jpg' % ( + lod2batch.current_epoch + 1, + lod2batch.iteration // 1000) ) print("Saved to %s" % f) # save_image(result_sample, f, nrow=min(32, lod2batch.get_per_GPU_batch_size())) From a07d9835d085ff0537f2c0aace3a1e834f615ba9 Mon Sep 17 00:00:00 2001 From: Ran Wang Date: Sat, 20 Jun 2020 09:35:36 -0400 Subject: [PATCH 04/14] attention --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 3ce11a7c..bb80fb8d 100644 --- a/README.md +++ b/README.md @@ -38,7 +38,7 @@ > **Adversarial Latent Autoencoders**
> Stanislav Pidhorskyi, Donald Adjeroh, Gianfranco Doretto
> -> **Abstract:** *Autoencoder networks are unsupervised approaches aiming at combining generative and representational properties by learning simultaneously an encoder-generator map. Although studied extensively, the issues of whether they have the same generative power of GANs, or learn disentangled representations, have not been fully addressed. We introduce an autoencoder that tackles these issues jointly, which we call Adversarial Latent Autoencoder (ALAE). It is a general architecture that can leverage recent improvements on GAN training procedures. We designed two autoencoders: one based on a MLP encoder, and another based on a StyleGAN generator, which we call StyleALAE. We verify the disentanglement properties of both architectures. We show that StyleALAE can not only generate 1024x1024 face images with comparable quality of StyleGAN, but at the same resolution can also produce face reconstructions and manipulations based on real images. This makes ALAE the first autoencoder able to compare with, and go beyond the capabilities of a generator-only type of architecture.* +> **Abstract:** *Autoencoder networks are unsupervised approaches aiming at combining generative and representational properties by learning simultaneously an encoder-generator map. Although studied extensively, the issues of whether they have the same generative power of GANs, or learn disentangled representations, have not been fully addressed. We introduce an autoencoder that tackles these issues jointly, which we call Adversarial Latent Autoencoder (ALAE). It is a general architecture that can leverage recent improvements on GAN training procedures. We designed two autoencoders: one based on a MLP encoder, and another based on a StyleGAN generator, which we call StyleALAE. We verify the disentanglement properties of both architectures. We show that StyleALAE can not only generate 1024x1024 face images with comparable quality of StyleGAN, but at the same resolution can also produce face reconstructions and manipulations based on real images. This makes ALAE the first autoencoder able to compare with, and go beyond, the capabilities of a generator-only type of architecture.* ## Citation * Stanislav Pidhorskyi, Donald A. Adjeroh, and Gianfranco Doretto. Adversarial Latent Autoencoders. In *Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR)*, 2020. [to appear] From 52988ee68a38179ddb536a2155f1cb0755b5f1ac Mon Sep 17 00:00:00 2001 From: Ran Wang Date: Mon, 3 Aug 2020 16:22:10 -0400 Subject: [PATCH 05/14] formant systh --- .gitignore | 4 +- AllSubjectInfo.json | 1 + ECoGDataSet.py | 72 +- checkpointer.py | 5 +- dataloader_ecog.py | 2 +- defaults.py | 26 +- launcher.py | 8 + lod_driver.py | 29 +- losses.py | 35 +- lreq.py | 231 +++- .../make_recon_figure_interpolation.py | 76 +- model.py | 384 ++++-- net.py | 1170 ++++++++++++++--- registry.py | 1 + run.s | 4 +- tracker.py | 11 +- train_alae.py | 250 +++- train_param.json | 16 +- 18 files changed, 1945 insertions(+), 380 deletions(-) diff --git a/.gitignore b/.gitignore index d0a10be2..42526be5 100644 --- a/.gitignore +++ b/.gitignore @@ -5,7 +5,9 @@ __pycache__/ .idea/ *.pdf -# *.png +*.png *.eps *.txt +*.jpg results*/ + diff --git a/AllSubjectInfo.json b/AllSubjectInfo.json index f8da3f3b..a90830d4 100644 --- a/AllSubjectInfo.json +++ b/AllSubjectInfo.json @@ -21,6 +21,7 @@ "Task":["AudN","SenComp","VisRead","PicN","AudRep"] }, "NY749":{ + "Density":"HB", "Task":["AudN","SenComp","VisRead","PicN","AudRep"] }, "HD06":{ diff --git a/ECoGDataSet.py b/ECoGDataSet.py index f9803ef8..54a2b3f6 100644 --- a/ECoGDataSet.py +++ b/ECoGDataSet.py @@ -1,4 +1,5 @@ import json +import pdb import torch import os import numpy as np @@ -10,7 +11,7 @@ from torch.utils.data import Dataset from defaults import get_cfg_defaults cfg = get_cfg_defaults() -cfg.merge_from_file('configs/ecog.yaml') +cfg.merge_from_file('configs/ecog_style2.yaml') BCTS = cfg.DATASET.BCTS class ECoGDataset(Dataset): @@ -427,7 +428,8 @@ def __init__(self, ReqSubjDict, mode = 'train', train_param = None,BCTS=None): wave_+=[wavearray] if self.Prod: - spkr_redata = h5py.File(os.path.join(datapath_task,'TFzoom'+str(self.SpecBands)+'_16k.mat'),'r') + spkr_redata = h5py.File(os.path.join(datapath_task,'TFzoom'+str(self.SpecBands)+'_denoise_16k.mat'),'r') + # spkr_redata = h5py.File(os.path.join(datapath_task,'TFzoom'+str(self.SpecBands)+'_16k.mat'),'r') spkr_re = np.asarray(spkr_redata['TFlog']) spkr_re = signal.resample(spkr_re,int(1.0*spkr_re.shape[0]/self.ORG_TF_FS*self.DOWN_TF_FS),axis=0) if HD: @@ -458,7 +460,8 @@ def __init__(self, ReqSubjDict, mode = 'train', train_param = None,BCTS=None): spkr_re = spkr_re_trim spkr_re_+=[spkr_re] - wave_redata = h5py.File(os.path.join(datapath_task,'zoom_16k.mat'),'r') + wave_redata = h5py.File(os.path.join(datapath_task,'zoom_denoise_16k.mat'),'r') + # wave_redata = h5py.File(os.path.join(datapath_task,'zoom_16k.mat'),'r') wave_rearray = np.asarray(wave_redata['zoom']) wave_rearray = wave_rearray.T wave_re_trim = np.zeros([int(ecog.shape[0]*1.0/self.DOWN_ECOG_FS*self.DOWN_WAVE_FS),wave_rearray.shape[1]]) @@ -495,7 +498,7 @@ def __init__(self, ReqSubjDict, mode = 'train', train_param = None,BCTS=None): word_test+=[label_ind[-self.TestNum:]] labels_test+=[label_subset[-self.TestNum:]] - ################ clean ##################8jn8jn8j8,n,kj8j8,kn,jk,knj8,nj,knjnjkn,knĀµ + ################ clean ################## if not HD: # bad_samples_ = np.where(bad_samples_==1)[0] bad_samples_ = np.where(np.logical_or(np.logical_or(bad_samples_==1, bad_samples_==2) , bad_samples_==4))[0] @@ -635,9 +638,9 @@ def __init__(self, ReqSubjDict, mode = 'train', train_param = None,BCTS=None): def __len__(self): if self.mode == 'train': if self.Prod: - return self.meta_data['start_ind_re_alldataset'][0].shape[0]*128 + return np.array([start_ind_re_alldataset.shape[0]*128 for start_ind_re_alldataset in self.meta_data['start_ind_re_alldataset']]).sum() else: - return self.meta_data['start_ind_alldataset'][0].shape[0]*128 + return np.array([start_ind_alldataset.shape[0]*128 for start_ind_alldataset in self.meta_data['start_ind_alldataset']]).sum() else: return self.TestNum_cum[0] @@ -686,6 +689,8 @@ def __getitem__(self, idx): wave_re_batch_all = [] label_batch_all = [] word_batch_all = [] + on_stage_batch_all = [] + on_stage_re_batch_all = [] self.SeqLenSpkr = self.SeqLen*int(self.DOWN_TF_FS*1.0/self.DOWN_ECOG_FS) imagesize = 2**self.current_lod for i in range(num_dataset): @@ -694,20 +699,20 @@ def __getitem__(self, idx): rand_ind = np.random.choice(np.arange(start_ind_valid_alldataset[i].shape[0])[:-self.TestNum_cum[i]],1,replace=False)[0] elif self.mode =='test': if self.Prod: - rand_ind = idx+start_ind_valid_alldataset[i].shape[0]-self.TestNum_cum[i] - else: rand_ind = idx+start_ind_re_valid_alldataset[i].shape[0]-self.TestNum_cum[i] + else: + rand_ind = idx+start_ind_valid_alldataset[i].shape[0]-self.TestNum_cum[i] # label_valid = np.delete(label_alldataset[i],bad_samples_alldataset[i]) label = [label_alldataset[i][rand_ind]] word = word_alldataset[i][rand_ind] - indx = start_ind_valid_alldataset[i][rand_ind] + start_indx = start_ind_valid_alldataset[i][rand_ind] end_indx = end_ind_valid_alldataset[i][rand_ind] ecog_batch = np.zeros((self.SeqLen+n_delay_2-n_delay_1 ,ecog_alldataset[i].shape[-1])) # ecog_batch = np.zeros((self.SeqLen ,ecog_alldataset[i].shape[-1])) spkr_batch = np.zeros(( self.SeqLenSpkr,spkr_alldataset[i].shape[-1])) wave_batch = np.zeros(( (self.SeqLen*int(self.DOWN_WAVE_FS*1.0/self.DOWN_ECOG_FS)),wave_alldataset[i].shape[-1])) if self.Prod: - indx_re = start_ind_re_valid_alldataset[i][rand_ind] + start_indx_re = start_ind_re_valid_alldataset[i][rand_ind] end_indx_re = end_ind_re_valid_alldataset[i][rand_ind] ecog_batch_re = np.zeros((self.SeqLen+n_delay_2-n_delay_1 ,ecog_alldataset[i].shape[-1])) # ecog_batch_re = np.zeros((self.SeqLen ,ecog_alldataset[i].shape[-1])) @@ -716,24 +721,30 @@ def __getitem__(self, idx): if self.mode =='train': # indx = np.maximum(indx+np.random.choice(np.arange(np.minimum(-(self.SeqLenSpkr-(end_indx-indx)),-1),np.maximum(-(self.SeqLenSpkr-(end_indx-indx)),0)),1)[0],0) - indx = np.maximum(indx+np.random.choice(np.arange(-64,end_indx-indx-64),1)[0],0) + chosen_start = np.random.choice(np.arange(-64,end_indx-start_indx-64),1)[0] + indx = np.maximum(start_indx+chosen_start,0) # indx = indx - self.ahead_onset_test if self.Prod: - # indx_re = np.maximum(indx+np.random.choice(np.arange(np.minimum(-(self.SeqLenSpkr-(end_indx_re-indx_re)),-1),np.maximum(-(self.SeqLenSpkr-(end_indx_re-indx_re)),0)),1)[0],0) - indx_re = np.maximum(indx_re+np.random.choice(np.arange(-64,end_indx_re-indx_re-64),1)[0],0) + # indx_re = np.maximum(indx_re+np.random.choice(np.arange(np.minimum(-(self.SeqLenSpkr-(end_indx_re-indx_re)),-1),np.maximum(-(self.SeqLenSpkr-(end_indx_re-indx_re)),0)),1)[0],0) + chosen_start_re = np.random.choice(np.arange(-64,end_indx_re-start_indx_re-64),1)[0] + indx_re = np.maximum(start_indx_re+chosen_start_re,0) # indx_re = indx_re-self.ahead_onset_test elif self.mode =='test': - indx = indx - self.ahead_onset_test + indx = start_indx - self.ahead_onset_test if self.Prod: - indx_re = indx_re-self.ahead_onset_test + indx_re = start_indx_re-self.ahead_onset_test # indx = indx.item() ecog_batch = ecog_alldataset[i][indx+n_delay_1:indx+self.SeqLen+n_delay_2] # ecog_batch = ecog_alldataset[i][indx+n_delay_1:indx+self.SeqLen+n_delay_1] + on_stage_batch = np.zeros([1,self.SeqLenSpkr]) + on_stage_batch[:,np.maximum(start_indx-indx,0): np.minimum(end_indx-indx,self.SeqLenSpkr-1)] = 1.0 spkr_batch = spkr_alldataset[i][indx:indx+self.SeqLenSpkr] wave_batch = wave_alldataset[i][(indx*int(self.DOWN_WAVE_FS*1.0/self.DOWN_ECOG_FS)):((indx+self.SeqLen)*int(self.DOWN_WAVE_FS*1.0/self.DOWN_ECOG_FS))] if self.Prod: # indx_re = indx_re.item() + on_stage_re_batch = np.zeros([1,self.SeqLenSpkr]) + on_stage_re_batch[:,np.maximum(start_indx_re-indx_re,0): np.minimum(end_indx_re-indx_re,self.SeqLenSpkr-1)] = 1.0 ecog_batch_re = ecog_alldataset[i][indx_re+n_delay_1:indx_re+self.SeqLen+n_delay_2] # ecog_batch_re = ecog_alldataset[i][indx_re+n_delay_1:indx_re+self.SeqLen+n_delay_1] spkr_batch_re = spkr_re_alldataset[i][indx_re:indx_re+self.SeqLenSpkr] @@ -752,28 +763,42 @@ def __getitem__(self, idx): ecog_batch_all += [ecog_batch] spkr_batch_all += [spkr_batch[np.newaxis,...]] wave_batch_all += [wave_batch.swapaxes(-2,-1)] + on_stage_batch_all += [on_stage_batch] if self.Prod: ecog_re_batch_all += [ecog_batch_re] spkr_re_batch_all += [spkr_batch_re[np.newaxis,...]] wave_re_batch_all += [wave_batch_re.swapaxes(-2,-1)] + on_stage_re_batch_all += [on_stage_re_batch] label_batch_all +=[label] word_batch_all +=[word] mni_coordinate_all +=[mni_batch.swapaxes(-2,-1)] regions_all +=[self.meta_data['regions_alldataset'][i]] - mask_all +=[self.meta_data['mask_prior_alldataset'][i]] + mask_all +=[self.meta_data['mask_prior_alldataset'][i][np.newaxis,...]] + + # # spkr_batch_all = np.concatenate(spkr_batch_all,axis=0) + # # wave_batch_all = np.concatenate(wave_batch_all,axis=0) + # # if self.Prod: + # # spkr_re_batch_all = np.concatenate(spkr_re_batch_all,axis=0) + # # wave_re_batch_all = np.concatenate(wave_re_batch_all,axis=0) + # label_batch_all = np.concatenate(label_batch_all,axis=0).tolist() + # word_batch_all = np.array(word_batch_all) + # # baseline_batch_all = np.concatenate(self.meta_data['baseline_alldataset'],axis=0) + # # mni_coordinate_all = np.concatenate(mni_coordinate_all,axis=0) + # regions_all = np.concatenate(regions_all,axis=0).tolist() + spkr_batch_all = np.concatenate(spkr_batch_all,axis=0) wave_batch_all = np.concatenate(wave_batch_all,axis=0) + on_stage_batch_all = np.concatenate(on_stage_batch_all,axis=0) if self.Prod: spkr_re_batch_all = np.concatenate(spkr_re_batch_all,axis=0) wave_re_batch_all = np.concatenate(wave_re_batch_all,axis=0) + on_stage_re_batch_all = np.concatenate(on_stage_re_batch_all,axis=0) label_batch_all = np.concatenate(label_batch_all,axis=0).tolist() - # word_batch_all = np.concatenate(word_batch_all,axis=0) word_batch_all = np.array(word_batch_all) baseline_batch_all = np.concatenate(self.meta_data['baseline_alldataset'],axis=0) mni_coordinate_all = np.concatenate(mni_coordinate_all,axis=0) regions_all = np.concatenate(regions_all,axis=0).tolist() - mask_all = np.concatenate(mask_all,axis=0) return {'ecog_batch_all':ecog_batch_all, 'spkr_batch_all':spkr_batch_all, @@ -781,11 +806,20 @@ def __getitem__(self, idx): 'ecog_re_batch_all':ecog_re_batch_all, 'spkr_re_batch_all':spkr_re_batch_all, 'wave_re_batch_all':wave_re_batch_all, - 'baseline_batch_all':baseline_batch_all, + # 'baseline_batch_all':baseline_batch_all, 'label_batch_all':label_batch_all, 'dataset_names':dataset_names, 'mni_coordinate_all': mni_coordinate_all, 'regions_all':regions_all, 'mask_all': mask_all, 'word_batch_all':word_batch_all, + 'on_stage_batch_all':on_stage_batch_all, + 'on_stage_re_batch_all':on_stage_re_batch_all, } + + +def concate_batch(metabatch,expection_keys=['ecog_batch_all','mask_all','label_batch_all','dataset_names','regions_all','word_batch_all']): + for key in metabatch.keys(): + if key not in expection_keys: + metabatch[key] = torch.cat(metabatch[key],dim=0) + return metabatch \ No newline at end of file diff --git a/checkpointer.py b/checkpointer.py index 7a363a98..c8fc8d4f 100644 --- a/checkpointer.py +++ b/checkpointer.py @@ -66,7 +66,7 @@ def save_data(): return save_data() - def load(self, ignore_last_checkpoint=False, file_name=None): + def load(self, ignore_last_checkpoint=False, ignore_auxiliary=False,file_name=None): save_file = os.path.join(self.cfg.OUTPUT_DIR, "last_checkpoint") try: with open(save_file, "r") as last_checkpoint: @@ -98,7 +98,8 @@ def load(self, ignore_last_checkpoint=False, file_name=None): else: self.logger.warning("No state dict for model: %s" % name) checkpoint.pop('models') - if "auxiliary" in checkpoint and self.auxiliary: + + if "auxiliary" in checkpoint and self.auxiliary and not ignore_auxiliary: self.logger.info("Loading auxiliary from {}".format(f)) for name, item in self.auxiliary.items(): try: diff --git a/dataloader_ecog.py b/dataloader_ecog.py index c3db1f21..620f5f25 100644 --- a/dataloader_ecog.py +++ b/dataloader_ecog.py @@ -34,7 +34,7 @@ class TFRecordsDataset: def __init__(self, cfg, logger, rank=0, world_size=1, buffer_size_mb=200, channels=3, seed=None, train=True, needs_labels=False,param=None): self.param = param - self.dataset = ECoGDataset([param['Data']['Subj']],mode='train' if train else 'test') + self.dataset = ECoGDataset(cfg.DATASET.SUBJECT,mode='train' if train else 'test') self.cfg = cfg self.logger = logger self.rank = rank diff --git a/defaults.py b/defaults.py index 298985b3..30161cfd 100644 --- a/defaults.py +++ b/defaults.py @@ -40,9 +40,11 @@ _C.DATASET.SPEC_CHANS=128 _C.DATASET.TEMPORAL_SAMPLES=128 _C.DATASET.BCTS = True +_C.DATASET.SUBJECT = [] _C.MODEL = CN() +_C.MODEL.N_FORMANTS = 2 _C.MODEL.LAYER_COUNT = 6 _C.MODEL.START_CHANNEL_COUNT = 64 _C.MODEL.MAX_CHANNEL_COUNT = 512 @@ -57,9 +59,12 @@ _C.MODEL.ENCODER = "EncoderDefault" _C.MODEL.MAPPING_TO_LATENT = "MappingToLatent" _C.MODEL.MAPPING_FROM_LATENT = "MappingFromLatent" +_C.MODEL.MAPPING_FROM_ECOG = "ECoGMappingDefault" _C.MODEL.Z_REGRESSION = False _C.MODEL.AVERAGE_W = False _C.MODEL.TEMPORAL_W = False +_C.MODEL.GLOBAL_W = True +_C.MODEL.TEMPORAL_GLOBAL_CAT = False _C.MODEL.RESIDUAL = False _C.MODEL.W_CLASSIFIER = False _C.MODEL.UNIQ_WORDS =50 @@ -67,8 +72,27 @@ _C.MODEL.CYCLE = False _C.MODEL.ATTENTIONAL_STYLE = False _C.MODEL.HEADS = 1 -_C.TRAIN = CN() +_C.MODEL.ECOG=False +_C.MODEL.SUPLOSS_ON_ECOGF=False +_C.MODEL.W_SUP=False +_C.MODEL.APPLY_PPL = False +_C.MODEL.APPLY_PPL_D = False +_C.MODEL.LESS_TEMPORAL_FEATURE = False +_C.MODEL.PPL_WEIGHT = 100 +_C.MODEL.PPL_GLOBAL_WEIGHT = 100 +_C.MODEL.PPLD_WEIGHT = 1 +_C.MODEL.PPLD_GLOBAL_WEIGHT = 1 +_C.MODEL.COMMON_Z = True +_C.MODEL.GAN = True + +_C.FINETUNE = CN() +_C.FINETUNE.FINETUNE = False +_C.FINETUNE.ENCODER_GUIDE= False +_C.FINETUNE.FIX_GEN = False +_C.FINETUNE.SPECSUP = True +_C.TRAIN = CN() +_C.TRAIN.PROGRESSIVE = True _C.TRAIN.EPOCHS_PER_LOD = 15 _C.TRAIN.BASE_LEARNING_RATE = 0.0015 diff --git a/launcher.py b/launcher.py index c5a963c3..941593bb 100644 --- a/launcher.py +++ b/launcher.py @@ -47,6 +47,14 @@ def _run(rank, world_size, fn, defaults, write_log, no_cuda, args): config_file = os.path.join('configs', config_file) cfg.merge_from_file(config_file) cfg.merge_from_list(args.opts) + if cfg.FINETUNE.FINETUNE: + cfg.MODEL.ECOG = True + cfg.MODEL.SUPLOSS_ON_ECOGF = cfg.FINETUNE.FIX_GEN + cfg.MODEL.W_SUP = cfg.FINETUNE.ENCODER_GUIDE + cfg.TRAIN.LOD_2_BATCH_1GPU = [bs//len(cfg.DATASET.SUBJECT) for bs in cfg.TRAIN.LOD_2_BATCH_1GPU] + cfg.TRAIN.LOD_2_BATCH_2GPU = [bs//len(cfg.DATASET.SUBJECT) for bs in cfg.TRAIN.LOD_2_BATCH_2GPU] + cfg.TRAIN.LOD_2_BATCH_4GPU = [bs//len(cfg.DATASET.SUBJECT) for bs in cfg.TRAIN.LOD_2_BATCH_4GPU] + cfg.TRAIN.LOD_2_BATCH_8GPU = [bs//len(cfg.DATASET.SUBJECT) for bs in cfg.TRAIN.LOD_2_BATCH_8GPU] cfg.freeze() logger = logging.getLogger("logger") diff --git a/lod_driver.py b/lod_driver.py index dcd7fa08..cfc6957d 100644 --- a/lod_driver.py +++ b/lod_driver.py @@ -20,7 +20,7 @@ class LODDriver: - def __init__(self, cfg, logger, world_size, dataset_size): + def __init__(self, cfg, logger, world_size, dataset_size, progressive=True): if world_size == 8: self.lod_2_batch = cfg.TRAIN.LOD_2_BATCH_8GPU if world_size == 4: @@ -29,13 +29,13 @@ def __init__(self, cfg, logger, world_size, dataset_size): self.lod_2_batch = cfg.TRAIN.LOD_2_BATCH_2GPU if world_size == 1: self.lod_2_batch = cfg.TRAIN.LOD_2_BATCH_1GPU - + self.progressive = progressive self.world_size = world_size self.minibatch_base = 16 self.cfg = cfg self.dataset_size = dataset_size self.current_epoch = 0 - self.lod = -1 + self.lod = -1 if progressive else 5 self.in_transition = False self.logger = logger self.iteration = 0 @@ -99,23 +99,24 @@ def set_epoch(self, epoch, optimizers): self.lod = self.cfg.MODEL.LAYER_COUNT - 1 return - new_lod = min(self.cfg.MODEL.LAYER_COUNT - 1, epoch // self.cfg.TRAIN.EPOCHS_PER_LOD) - if new_lod != self.lod: - self.lod = new_lod - self.logger.info("#" * 80) - self.logger.info("# Switching LOD to %d" % self.lod) - self.logger.info("# Starting transition") - self.logger.info("#" * 80) - self.in_transition = True - for opt in optimizers: - opt.state = defaultdict(dict) + if self.progressive: + new_lod = min(self.cfg.MODEL.LAYER_COUNT - 1, epoch // self.cfg.TRAIN.EPOCHS_PER_LOD) + if new_lod != self.lod: + self.lod = new_lod + self.logger.info("#" * 80) + self.logger.info("# Switching LOD to %d" % self.lod) + self.logger.info("# Starting transition") + self.logger.info("#" * 80) + self.in_transition = True + for opt in optimizers: + opt.state = defaultdict(dict) is_in_first_half_of_cycle = (epoch % self.cfg.TRAIN.EPOCHS_PER_LOD) < (self.cfg.TRAIN.EPOCHS_PER_LOD // 2) is_growing = epoch // self.cfg.TRAIN.EPOCHS_PER_LOD == self.lod > 0 new_in_transition = is_in_first_half_of_cycle and is_growing if new_in_transition != self.in_transition: - self.in_transition = new_in_transition + self.in_transition = new_in_transition if self.progressive else False self.logger.info("#" * 80) self.logger.info("# Transition ended") self.logger.info("#" * 80) diff --git a/losses.py b/losses.py index 402ced3a..2f357770 100644 --- a/losses.py +++ b/losses.py @@ -14,6 +14,7 @@ # ============================================================================== import torch +import math import torch.nn.functional as F @@ -28,14 +29,16 @@ def kl(mu, log_var): def reconstruction(recon_x, x, lod=None): return torch.mean((recon_x - x)**2) +def critic_loss(d_result_fake,d_result_real): + loss = (F.softplus(d_result_fake) + F.softplus(-d_result_real)).mean() + return loss -def discriminator_logistic_simple_gp(d_result_fake, d_result_real, reals, r1_gamma=10.0): - loss = (F.softplus(d_result_fake) + F.softplus(-d_result_real)) +def discriminator_logistic_simple_gp(d_result_real, reals, r1_gamma=10.0): if r1_gamma != 0.0: real_loss = d_result_real.sum() real_grads = torch.autograd.grad(real_loss, reals, create_graph=True, retain_graph=True)[0] r1_penalty = torch.sum(real_grads.pow(2.0), dim=[1, 2, 3]) - loss = loss + r1_penalty * (r1_gamma * 0.5) + loss = r1_penalty * (r1_gamma * 0.5) return loss.mean() @@ -49,3 +52,29 @@ def discriminator_gradient_penalty(d_result_real, reals, r1_gamma=10.0): def generator_logistic_non_saturating(d_result_fake): return F.softplus(-d_result_fake).mean() + + +def pl_lengths_reg(inputs, outputs, mean_path_length, reg_on_gen, temporal_w=False,decay=0.01): + # e.g. for generator, inputs = w (B x 1 x channel x T(optianal)), outputs=images (B x 1 x T x F) + if reg_on_gen: + num_pixels = outputs[0,0,0].numel() if temporal_w else outputs[0,0].numel() # freqbands if temporal else specsize + else: + num_pixels = outputs.shape[2] # latent space size per temporal sample + pl_noise = torch.randn(outputs.shape).cuda() / math.sqrt(num_pixels) + outputs = (outputs * pl_noise).sum() + # if reg_on_gen: + # outputs = (outputs * pl_noise).sum(dim=[0,1,3]) if temporal_w else (outputs * pl_noise).sum() + # else: + # outputs = (outputs * pl_noise).sum(dim=[0,1,2]) if temporal_w else (outputs * pl_noise).sum() + + pl_grads = torch.autograd.grad(outputs=outputs, inputs=inputs, + grad_outputs=torch.ones(outputs.shape).cuda(), + create_graph=True,retain_graph=True)[0] + if reg_on_gen: + path_lengths = ((pl_grads ** 2).sum(dim=2).mean(dim=1)+1e-8).sqrt() #sum over feature, mean over repeated styles for each gen layers + else: + path_lengths = ((pl_grads ** 2).sum(dim=1)+1e-8).sqrt() + path_mean = mean_path_length + decay * (path_lengths.mean() - mean_path_length) + path_penalty = (path_lengths - path_mean).pow(2).mean() + path_lengths = path_lengths.mean() + return path_penalty,path_mean.detach(),path_lengths \ No newline at end of file diff --git a/lreq.py b/lreq.py index 8b66f91a..43462fd7 100644 --- a/lreq.py +++ b/lreq.py @@ -48,6 +48,26 @@ def make_tuple(x, n): return x return tuple([x for _ in range(n)]) +def upscale2d(x, factor=2): + s = x.shape + x = torch.reshape(x, [-1, s[1], s[2], 1, s[3], 1]) + x = x.repeat(1, 1, 1, factor, 1, factor) + x = torch.reshape(x, [-1, s[1], s[2] * factor, s[3] * factor]) + return x + +class Blur(nn.Module): + def __init__(self, channels): + super(Blur, self).__init__() + f = np.array([1, 2, 1], dtype=np.float32) + f = f[:, np.newaxis] * f[np.newaxis, :] + f /= np.sum(f) + kernel = torch.Tensor(f).view(1, 1, 3, 3).repeat(channels, 1, 1, 1) + self.register_buffer('weight', kernel) + self.groups = channels + + def forward(self, x): + return F.conv2d(x, weight=self.weight, groups=self.groups, padding=1) + class Linear(nn.Module): def __init__(self, in_features, out_features, bias=True, gain=np.sqrt(2.0), lrmul=1.0, implicit_lreq=use_implicit_lreq): super(Linear, self).__init__() @@ -170,6 +190,213 @@ def forward(self, x): return F.conv2d(x, w, self.bias, stride=self.stride, padding=self.padding, dilation=self.dilation, groups=self.groups) +class Conv3d(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, output_padding=0, dilation=1, + groups=1, bias=True, gain=np.sqrt(2.0), transpose=False, transform_kernel=False, lrmul=1.0, + implicit_lreq=use_implicit_lreq,initial_weight=None): + super(Conv3d, self).__init__() + if in_channels % groups != 0: + raise ValueError('in_channels must be divisible by groups') + if out_channels % groups != 0: + raise ValueError('out_channels must be divisible by groups') + self.in_channels = in_channels + self.out_channels = out_channels + self.kernel_size = make_tuple(kernel_size, 3) if isinstance(kernel_size,int) else kernel_size + self.stride = make_tuple(stride, 3) if isinstance(stride,int) else stride + self.padding = make_tuple(padding, 3) if isinstance(padding,int) else padding + self.output_padding = make_tuple(output_padding, 3) if isinstance(output_padding,int) else output_padding + self.dilation = make_tuple(dilation, 3) if isinstance(dilation,int) else dilation + self.groups = groups + self.gain = gain + self.lrmul = lrmul + self.transpose = transpose + self.fan_in = np.prod(self.kernel_size) * in_channels // groups + self.transform_kernel = transform_kernel + self.initial_weight = initial_weight + if transpose: + self.weight = Parameter(torch.Tensor(in_channels, out_channels // groups, *self.kernel_size)) + else: + self.weight = Parameter(torch.Tensor(out_channels, in_channels // groups, *self.kernel_size)) + if bias: + self.bias = Parameter(torch.Tensor(out_channels)) + else: + self.register_parameter('bias', None) + self.std = 0 + self.implicit_lreq = implicit_lreq + self.reset_parameters() + + def reset_parameters(self): + self.std = self.gain / np.sqrt(self.fan_in) *self.lrmul + if not self.implicit_lreq: + init.normal_(self.weight, mean=0, std=1.0 / self.lrmul) + else: + if self.initial_weight: + self.weight = self.initial_weight + else: + init.normal_(self.weight, mean=0, std=self.std / self.lrmul) + setattr(self.weight, 'lr_equalization_coef', self.std) + if self.bias is not None: + setattr(self.bias, 'lr_equalization_coef', self.lrmul) + + if self.bias is not None: + with torch.no_grad(): + self.bias.zero_() + + def forward(self, x): + if self.transpose: + w = self.weight + if self.transform_kernel: + w = F.pad(w, (1, 1, 1, 1, 1, 1), mode='constant') + w = w[:, :, 1:, 1:, 1:] + w[:, :, :-1, 1:, 1:] + w[:, :, 1:, :-1, 1:] + w[:, :, :-1, :-1, 1:] + w[:, :, 1:, 1:, :-1] + w[:, :, :-1, 1:, :-1] + w[:, :, 1:, :-1, :-1] + w[:, :, :-1, :-1, :-1] + if not self.implicit_lreq: + bias = self.bias + if bias is not None: + bias = bias * self.lrmul + return F.conv_transpose3d(x, w * self.std, bias, stride=self.stride, + padding=self.padding, output_padding=self.output_padding, + dilation=self.dilation, groups=self.groups) + else: + return F.conv_transpose3d(x, w, self.bias, stride=self.stride, padding=self.padding, + output_padding=self.output_padding, dilation=self.dilation, + groups=self.groups) + else: + w = self.weight + if self.transform_kernel: + w = F.pad(w, (1, 1, 1, 1), mode='constant') + w = (w[:, :, 1:, 1:, 1:] + w[:, :, :-1, 1:, 1:] + w[:, :, 1:, :-1, 1:] + w[:, :, :-1, :-1, 1:] + w[:, :, 1:, 1:, :-1] + w[:, :, :-1, 1:, :-1] + w[:, :, 1:, :-1, :-1] + w[:, :, :-1, :-1, :-1]) * 0.125 + if not self.implicit_lreq: + bias = self.bias + if bias is not None: + bias = bias * self.lrmul + return F.conv3d(x, w * self.std, bias, stride=self.stride, padding=self.padding, + dilation=self.dilation, groups=self.groups) + else: + return F.conv3d(x, w, self.bias, stride=self.stride, padding=self.padding, + dilation=self.dilation, groups=self.groups) + +class StyleConv2dtest(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size, latent_size, stride=1, padding=0, output_padding=0, dilation=1, + groups=1, bias=True, gain=np.sqrt(2.0), transpose=False, transform_kernel=False, lrmul=1.0, + implicit_lreq=False,initial_weight=None,demod=True,upsample=False,temporal_w=False): + super(StyleConv2dtest,self).__init__() + self.demod = demod + self.conv = Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, + stride=stride, padding=padding, output_padding=output_padding, dilation=dilation, + groups=groups, bias=False, gain=gain, transpose=transpose, + transform_kernel=transform_kernel, lrmul=lrmul, + implicit_lreq=implicit_lreq,initial_weight=initial_weight) + self.style = Linear(latent_size, 2*in_channels, gain=1) + if demod: + self.norm = nn.InstanceNorm2d(out_channels, affine=False, eps=1e-8) + self.upsample = upsample + self.transpose = transpose + if bias: + self.bias = Parameter(torch.Tensor(1,out_channels,1,1)) + with torch.no_grad(): + self.bias.zero_() + if upsample: + self.blur = Blur(out_channels) + self.noise_weight = nn.Parameter(torch.zeros(1)) + + def forward(self, x, style,noise=None): + if self.upsample and not self.transpose: + x = upscale2d(x) + w = self.style(style) + w = w.view(w.shape[0], 2, x.shape[1], 1, 1) + x = w[:,1]+x*(w[:,0]+1) + x = F.leaky_relu(self.conv(x),0.2) + if self.demod: + x = self.norm(x) + x = self.bias+x + if self.upsample: + x = self.blur(x) + if noise: + x = torch.addcmul(x, value=1.0, tensor1=self.noise_weight, + tensor2=torch.randn([x.shape[0], 1, x.shape[2], x.shape[3]])) + + return x + + +class StyleConv2d(Conv2d): + def __init__(self, in_channels, out_channels, kernel_size, latent_size, stride=1, padding=0, output_padding=0, dilation=1, + groups=1, bias=True, gain=np.sqrt(2.0), transpose=False, transform_kernel=False, lrmul=1.0, + implicit_lreq=False,initial_weight=None,demod=True,upsample=False,temporal_w=False): + super(StyleConv2d,self).__init__(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, + stride=stride, padding=padding, output_padding=output_padding, dilation=dilation, + groups=groups, bias=bias, gain=gain, transpose=upsample, + transform_kernel=transform_kernel, lrmul=lrmul, + implicit_lreq=implicit_lreq,initial_weight=initial_weight) + self.demod=demod + self.upsample = upsample + self.transpose = upsample + self.temporal_w = temporal_w + if upsample: + self.blur = Blur(out_channels) + if temporal_w: + self.modulation = Conv1d(latent_size, in_channels,1,1,0, gain=1) + else: + self.modulation = Linear(latent_size, in_channels, gain=1) + self.noise_weight = nn.Parameter(torch.zeros(1)) + + def forward(self, x, style,noise=None): + batch, in_channels, height, width = x.shape + if not self.temporal_w: + assert style.dim()==2, "Style dimension not mach temporal_w condition" + else: + assert style.dim()==3, "Style dimension not mach temporal_w condition" + style = self.modulation(style).view(batch, 1, in_channels, 1, 1) + w = self.weight + w = w if self.implicit_lreq else (w * self.std) + if self.transpose: + w = w.transpose(0,1) # out, in, H, W + if not self.temporal_w: + w2 = w[None, :, :, :, :] # batch, out_chan, in_chan, H, w + w = w2 * (1 + style) + if self.demod: + d = torch.rsqrt((w ** 2).sum(dim=(2, 3, 4), keepdim=True) + 1e-8) + w = w * d + _, _, _, *ws = w.shape + if self.transpose: + w = w.transpose(1,2).reshape(batch* in_channels, self.out_channels, *ws) + else: + w = w.view( batch * self.out_channels, in_channels,*ws) + if self.transform_kernel: + w = F.pad(w, (1, 1, 1, 1), mode='constant') + w = w[..., 1:, 1:] + w[..., :-1, 1:] + w[..., 1:, :-1] + w[..., :-1, :-1] + if not self.transpose: + w =w*0.25 + x = x.view(1, batch * in_channels, height, width) + + bias = self.bias + if not self.implicit_lreq: + if bias is not None: + bias = bias * self.lrmul + if self.transpose: + out = F.conv_transpose2d(x, w, None, stride=self.stride, + padding=self.padding, output_padding=self.output_padding, + dilation=self.dilation, groups=batch) + else: + out = F.conv2d(x, w, None, stride=self.stride, padding=self.padding, + dilation=self.dilation, groups=batch) + + _, _, height, width = out.shape + out = out.view(batch, self.out_channels, height, width) + if bias is not None: + out = out + bias[None,:,None,None] + if self.upsample: + out = self.blur(out) + + + + else: + assert style.dim()==3, "Style dimension not mach temporal_w condition" + raise ValueError('temporal_w is not support yet') + + if noise: + out = torch.addcmul(out, value=1.0, tensor1=self.noise_weight, + tensor2=torch.randn([out.shape[0], 1, out.shape[2], out.shape[3]])) + return out + class Conv1d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, output_padding=0, dilation=1, groups=1, bias=True, gain=np.sqrt(2.0), transpose=False, transform_kernel=False, lrmul=1.0, @@ -318,4 +545,6 @@ class SeparableConvTranspose1d(Conv1d): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, output_padding=0, dilation=1, bias=True, gain=np.sqrt(2.0)): super(SeparableConvTranspose1d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, - output_padding, dilation, bias, gain, True) \ No newline at end of file + output_padding, dilation, bias, gain, True) + + diff --git a/make_figures/make_recon_figure_interpolation.py b/make_figures/make_recon_figure_interpolation.py index 0c40e5ed..680bb1aa 100644 --- a/make_figures/make_recon_figure_interpolation.py +++ b/make_figures/make_recon_figure_interpolation.py @@ -65,9 +65,12 @@ def sample(cfg, logger): channels=cfg.MODEL.CHANNELS, generator=cfg.MODEL.GENERATOR, encoder=cfg.MODEL.ENCODER, + ecog_encoder=cfg.MODEL.MAPPING_FROM_ECOG, z_regression=cfg.MODEL.Z_REGRESSION, average_w = cfg.MODEL.AVERAGE_W, temporal_w = cfg.MODEL.TEMPORAL_W, + global_w = cfg.MODEL.GLOBAL_W, + temporal_global_cat = cfg.MODEL.TEMPORAL_GLOBAL_CAT, spec_chans = cfg.DATASET.SPEC_CHANS, temporal_samples = cfg.DATASET.TEMPORAL_SAMPLES, init_zeros = cfg.MODEL.TEMPORAL_W, @@ -77,7 +80,17 @@ def sample(cfg, logger): attention = cfg.MODEL.ATTENTION, cycle = cfg.MODEL.CYCLE, w_weight = cfg.TRAIN.W_WEIGHT, - cycle_weight=cfg.TRAIN.CYCLE_WEIGHT,) + cycle_weight=cfg.TRAIN.CYCLE_WEIGHT, + attentional_style=cfg.MODEL.ATTENTIONAL_STYLE, + heads = cfg.MODEL.HEADS, + suploss_on_ecog = cfg.MODEL.SUPLOSS_ON_ECOGF, + less_temporal_feature = cfg.MODEL.LESS_TEMPORAL_FEATURE, + ppl_weight=cfg.MODEL.PPL_WEIGHT, + ppl_global_weight=cfg.MODEL.PPL_GLOBAL_WEIGHT, + ppld_weight=cfg.MODEL.PPLD_WEIGHT, + ppld_global_weight=cfg.MODEL.PPLD_GLOBAL_WEIGHT, + common_z = cfg.MODEL.COMMON_Z, + ) model.cuda(0) model.eval() model.requires_grad_(False) @@ -110,7 +123,7 @@ def sample(cfg, logger): logger=logger, save=False) - extra_checkpoint_data = checkpointer.load(file_name='./training_artifacts/ecog_residual_cycle_attention3264wIN_specchan64_cont/model_tmp_lod5.pth') + extra_checkpoint_data = checkpointer.load(file_name='./training_artifacts/ecog_residual_latent128_temporal_lesstemporalfeature_noprogressive_HBw_ppl_ppld_localreg_debug/model_tmp_lod6.pth') # extra_checkpoint_data = checkpointer.load(file_name='./training_artifacts/ecog_residual_cycle_attention3264wIN_specchan64_more_attentfeatures/model_tmp_lod4.pth') model.eval() @@ -118,27 +131,37 @@ def sample(cfg, logger): layer_count = cfg.MODEL.LAYER_COUNT def encode(x): Z, _ = model.encode(x, layer_count - 1, 1) - Z = Z.repeat(1, model.mapping_fl.num_layers, 1) + if cfg.MODEL.TEMPORAL_W and cfg.MODEL.GLOBAL_W: + Z = (Z[0].repeat(1, model.mapping_fl.num_layers, 1, 1),Z[1].repeat(1, model.mapping_fl.num_layers, 1)) + else: + if cfg.MODEL.TEMPORAL_W: + Z = Z.repeat(1, model.mapping_fl.num_layers, 1, 1) + else: + Z = Z.repeat(1, model.mapping_fl.num_layers, 1) return Z def decode(x): - layer_idx = torch.arange(2 * cfg.MODEL.LAYER_COUNT)[np.newaxis, :, np.newaxis] - ones = torch.ones(layer_idx.shape, dtype=torch.float32) - coefs = torch.where(layer_idx < model.truncation_cutoff, ones, ones) + # layer_idx = torch.arange(2 * cfg.MODEL.LAYER_COUNT)[np.newaxis, :, np.newaxis] + # ones = torch.ones(layer_idx.shape, dtype=torch.float32) + # coefs = torch.where(layer_idx < model.truncation_cutoff, ones, ones) # x = torch.lerp(model.dlatent_avg.buff.data, x, coefs) return model.decoder(x, layer_count - 1, 1, noise=True) - import pdb; pdb.set_trace() rnd = np.random.RandomState(4) latents = rnd.randn(1, cfg.MODEL.LATENT_SPACE_SIZE) path = cfg.DATASET.SAMPLES_PATH im_size = 2 ** (cfg.MODEL.LAYER_COUNT + 1) - pathA = 'kite.npy' - pathB = 'cat.npy' + # pathA = 'kite.npy' + # pathB = 'cat.npy' + # pathC = 'hat.npy' + # pathD = 'cake.npy' + pathA = 'vase.npy' + pathB = 'cow.npy' pathC = 'hat.npy' pathD = 'cake.npy' + # def open_image(filename): # img = np.asarray(Image.open(path + '/' + filename)) # if img.shape[2] == 4: @@ -157,17 +180,26 @@ def decode(x): def open_image(filename): im = np.load(os.path.join(path, filename)) x = torch.tensor(np.asarray(im, dtype=np.float32), device='cpu', requires_grad=True).cuda() - factor = x.shape[2] // im_size + factor = x.shape[1] // im_size if factor != 1: x = torch.nn.functional.avg_pool2d(x[None, ...], factor, factor)[0] - assert x.shape[2] == im_size + assert x.shape[1] == im_size _latents = encode(x[None, ...].cuda()) - latents = _latents[0, 0] + if cfg.MODEL.TEMPORAL_W and cfg.MODEL.GLOBAL_W: + latents = (_latents[0][0,0],_latents[1][0,0]) + else: + latents = _latents[0, 0] return latents def make(w): with torch.no_grad(): - w = w[None, None, ...].repeat(1, model.mapping_fl.num_layers, 1) + if cfg.MODEL.TEMPORAL_W and cfg.MODEL.GLOBAL_W: + w = (w[0][None, None, ...].repeat(1, model.mapping_fl.num_layers, 1, 1),w[1][None, None, ...].repeat(1, model.mapping_fl.num_layers, 1)) + else: + if cfg.MODEL.TEMPORAL_W: + w = w[None, None, ...].repeat(1, model.mapping_fl.num_layers, 1, 1) + else: + w = w[None, None, ...].repeat(1, model.mapping_fl.num_layers, 1) x_rec = decode(w) return x_rec @@ -175,9 +207,9 @@ def make(w): wb = open_image(pathB) wc = open_image(pathC) wd = open_image(pathD) - import pdb; pdb.set_trace() - height = 7 - width = 7 + import pdb;pdb.set_trace() + height = 10 + width = 10 images = [] @@ -191,18 +223,22 @@ def make(w): kc = (1.0 - kh) * kv kd = kh * kv - w = ka * wa + kb * wb + kc * wc + kd * wd + if cfg.MODEL.TEMPORAL_W and cfg.MODEL.GLOBAL_W: + w = ((1-kh) * wa[0] + kh * wb[0] , (1-kv) * wa[1] + kv * wb[1]) + else: + w = ka * wa + kb * wb + kc * wc + kd * wd interpolated = make(w) images.append(interpolated) images = torch.cat(images) + images = images.permute(0,1,3,2) os.makedirs('make_figures/output/%s' % cfg.NAME, exist_ok=True) - save_image(images * 0.5 + 0.5, 'make_figures/output/%s/interpolations.png' % cfg.NAME, nrow=width) - save_image(images * 0.5 + 0.5, 'make_figures/output/%s/interpolations.jpg' % cfg.NAME, nrow=width) + save_image(images * 0.5 + 0.5, 'make_figures/output/%s/interpolations_vase_cow.png' % cfg.NAME, nrow=width) + save_image(images * 0.5 + 0.5, 'make_figures/output/%s/interpolations_vase_cow.jpg' % cfg.NAME, nrow=width) if __name__ == "__main__": gpu_count = 1 - run(sample, get_cfg_defaults(), description='ALAE-interpolations', default_config='configs/ecog.yaml', + run(sample, get_cfg_defaults(), description='ALAE-interpolations', default_config='configs/ecog_style2.yaml', world_size=gpu_count, write_log=False) diff --git a/model.py b/model.py index 837c1b51..e32b2f65 100644 --- a/model.py +++ b/model.py @@ -28,52 +28,73 @@ def __init__(self, dlatent_size, layer_count,temporal_w=False,temporal_samples=1 buffer = torch.zeros(layer_count, dlatent_size, dtype=torch.float32) self.register_buffer('buff', buffer) +class PPL_MEAN(nn.Module): + def __init__(self): + super(PPL_MEAN, self).__init__() + buffer = torch.zeros(1, dtype=torch.float32) + self.register_buffer('buff', buffer) + class Model(nn.Module): def __init__(self, startf=32, maxf=256, layer_count=3, latent_size=128, uniq_words=50, mapping_layers=5, dlatent_avg_beta=None, truncation_psi=None, truncation_cutoff=None, style_mixing_prob=None, channels=3, generator="", encoder="", - z_regression=False,average_w=False,spec_chans = 128,temporal_samples=128,temporal_w=False, init_zeros=False, - residual=False,w_classifier=False,attention=None,cycle=None,w_weight=1.0,cycle_weight=1.0, attentional_style=False,heads=1): + z_regression=False,average_w=False,spec_chans = 128,temporal_samples=128,temporal_w=False, global_w=True,temporal_global_cat = False,init_zeros=False, + residual=False,w_classifier=False,attention=None,cycle=None,w_weight=1.0,cycle_weight=1.0, attentional_style=False,heads=1, + ppl_weight=100,ppl_global_weight=100,ppld_weight=1,ppld_global_weight=1,common_z = False, + with_ecog = False, ecog_encoder="",suploss_on_ecog=False,less_temporal_feature=False): super(Model, self).__init__() self.layer_count = layer_count self.z_regression = z_regression + self.common_z = common_z self.temporal_w = temporal_w + self.global_w = global_w + self.temporal_global_cat = temporal_global_cat self.w_classifier = w_classifier self.cycle = cycle self.w_weight=w_weight self.cycle_weight=cycle_weight - + self.ppl_weight = ppl_weight + self.ppl_global_weight = ppl_global_weight + self.ppld_weight = ppld_weight + self.ppld_global_weight = ppld_global_weight + self.suploss_on_ecog = suploss_on_ecog + self.with_ecog = with_ecog + latent_feature = latent_size//4 if (temporal_w and less_temporal_feature) else latent_size self.mapping_tl = MAPPINGS["MappingToLatent"]( - latent_size=latent_size, + latent_size=latent_feature, dlatent_size=latent_size, mapping_fmaps=latent_size, mapping_layers=5 if temporal_w else 3, - temporal_w = False) + temporal_w = temporal_w, + global_w = global_w) self.mapping_tw = MAPPINGS["MappingToWord"]( - latent_size=latent_size, + latent_size=latent_feature, uniq_words=uniq_words, mapping_fmaps=latent_size, mapping_layers=1, - temporal_w = False) + temporal_w = temporal_w) self.mapping_fl = MAPPINGS["MappingFromLatent"]( num_layers=2 * layer_count, - latent_size=latent_size, + latent_size=latent_feature, dlatent_size=latent_size, mapping_fmaps=latent_size, mapping_layers=mapping_layers, - temporal_w = temporal_w) + temporal_w = temporal_w, + global_w = global_w) self.decoder = GENERATORS[generator]( startf=startf, layer_count=layer_count, maxf=maxf, - latent_size=latent_size, + latent_size=latent_feature, channels=channels, spec_chans=spec_chans, temporal_samples = temporal_samples, temporal_w = temporal_w, + global_w = global_w, + temporal_global_cat = temporal_global_cat, init_zeros = init_zeros, residual = residual, attention=attention, @@ -85,153 +106,348 @@ def __init__(self, startf=32, maxf=256, layer_count=3, latent_size=128, uniq_wor startf=startf, layer_count=layer_count, maxf=maxf, - latent_size=latent_size, + latent_size=latent_feature, channels=channels, spec_chans=spec_chans, temporal_samples = temporal_samples, average_w=average_w, temporal_w = temporal_w, + global_w = global_w, + temporal_global_cat = temporal_global_cat, residual = residual, attention=attention, attentional_style=attentional_style, heads = heads, ) - self.dlatent_avg = DLatent(latent_size, self.mapping_fl.num_layers,temporal_w=temporal_w) + if with_ecog: + self.ecog_encoder = ECOG_ENCODER[ecog_encoder]( + latent_size = latent_feature, + average_w = average_w, + temporal_w=temporal_w, + global_w = global_w, + attention=attention, + temporal_samples=temporal_samples, + attentional_style=attentional_style, + heads=heads, + ) + + self.dlatent_avg = DLatent(latent_feature, self.mapping_fl.num_layers,temporal_w=temporal_w) + self.ppl_mean = PPL_MEAN() + self.ppl_d_mean = PPL_MEAN() + if temporal_w and global_w: + self.dlatent_avg_global = DLatent(latent_feature, self.mapping_fl.num_layers,temporal_w=False) + self.ppl_mean_global = PPL_MEAN() + self.ppl_d_mean_global = PPL_MEAN() self.latent_size = latent_size self.dlatent_avg_beta = dlatent_avg_beta self.truncation_psi = truncation_psi self.style_mixing_prob = style_mixing_prob self.truncation_cutoff = truncation_cutoff - def generate(self, lod, blend_factor, z=None, count=32, mixing=True, noise=True, return_styles=False, no_truncation=False): + def generate(self, lod, blend_factor, z=None, z_global=None, count=32, mixing=True, noise=True, return_styles=False, no_truncation=False,ecog_only=True,ecog=None,mask_prior=None): if z is None: z = torch.randn(count, self.latent_size) - styles = self.mapping_fl(z)[:, 0] - if False:#self.w_classifier: - Z__ = self.mapping_tw(styles) - # import pdb; pdb.set_trace() - if self.temporal_w: - s = styles.view(styles.shape[0], 1, styles.shape[1],styles.shape[2]) - styles = s.repeat(1, self.mapping_fl.num_layers, 1,1) - else: - s = styles.view(styles.shape[0], 1, styles.shape[1]) - styles = s.repeat(1, self.mapping_fl.num_layers, 1) - - if self.dlatent_avg_beta is not None: - with torch.no_grad(): - batch_avg = styles.mean(dim=0) - self.dlatent_avg.buff.data.lerp_(batch_avg.data, 1.0 - self.dlatent_avg_beta) + if z_global is None: + z_global = z if self.common_z else torch.randn(count, self.latent_size) + if ecog is not None: + styles_ecog = self.ecog_encoder(ecog,mask_prior) + if self.temporal_w and self.global_w: + styles_ecog, styles_ecog_global = styles_ecog + s_ecog = styles_ecog.view(styles_ecog.shape[0], 1, styles_ecog.shape[1],styles_ecog.shape[2]) + styles_ecog = s_ecog.repeat(1, self.mapping_fl.num_layers, 1,1) + s_ecog_global = styles_ecog_global.view(styles_ecog_global.shape[0], 1, styles_ecog_global.shape[1]) + styles_ecog_global = s_ecog_global.repeat(1, self.mapping_fl.num_layers, 1) + else: + if self.temporal_w: + s_ecog = styles_ecog.view(styles_ecog.shape[0], 1, styles_ecog.shape[1],styles_ecog.shape[2]) + styles_ecog = s_ecog.repeat(1, self.mapping_fl.num_layers, 1,1) + if self.global_w: + s_ecog = styles_ecog.view(styles_ecog.shape[0], 1, styles_ecog.shape[1]) + styles_ecog = s_ecog.repeat(1, self.mapping_fl.num_layers, 1) + if self.w_classifier: + Z__ = self.mapping_tw(styles_ecog, styles_ecog_global) + + if (ecog is None) or (not ecog_only): + if (self.temporal_w and self.global_w): + styles = self.mapping_fl(z,z_global) + styles, styles_global = styles + styles = styles[:,0] + styles_global = styles_global[:,0] + else: + styles = self.mapping_fl(z)[:, 0] - if mixing and self.style_mixing_prob is not None: - if random.random() < self.style_mixing_prob: - z2 = torch.randn(count, self.latent_size) - styles2 = self.mapping_fl(z2)[:, 0] + if self.temporal_w and self.global_w: + s = styles.view(styles.shape[0], 1, styles.shape[1],styles.shape[2]) + styles = s.repeat(1, self.mapping_fl.num_layers, 1,1) + s_global = styles_global.view(styles_global.shape[0], 1, styles_global.shape[1]) + styles_global = s_global.repeat(1, self.mapping_fl.num_layers, 1) + else: if self.temporal_w: - styles2 = styles2.view(styles2.shape[0], 1, styles2.shape[1],styles2.shape[2]).repeat(1, self.mapping_fl.num_layers, 1,1) + s = styles.view(styles.shape[0], 1, styles.shape[1],styles.shape[2]) + styles = s.repeat(1, self.mapping_fl.num_layers, 1,1) + if self.global_w: + s = styles.view(styles.shape[0], 1, styles.shape[1]) + styles = s.repeat(1, self.mapping_fl.num_layers, 1) + + if self.dlatent_avg_beta is not None: + with torch.no_grad(): + batch_avg = styles.mean(dim=0) + self.dlatent_avg.buff.data.lerp_(batch_avg.data, 1.0 - self.dlatent_avg_beta) + if self.temporal_w and self.global_w: + batch_avg = styles_global.mean(dim=0) + self.dlatent_avg_global.buff.data.lerp_(batch_avg.data, 1.0 - self.dlatent_avg_beta) + + if mixing and self.style_mixing_prob is not None: + if random.random() < self.style_mixing_prob: + cur_layers = (lod + 1) * 2 + mixing_cutoff = random.randint(1, cur_layers) + layer_idx = torch.arange(self.mapping_fl.num_layers) + z2 = torch.randn(count, self.latent_size) + z2_global = z2 if self.common_z else torch.randn(count, self.latent_size) + if (self.temporal_w and self.global_w): + styles2 = self.mapping_fl(z2,z2_global) + styles2, styles2_global = styles2 + styles2 = styles2[:,0] + styles2_global = styles2_global[:,0] + else: + styles2 = self.mapping_fl(z2)[:, 0] + if self.temporal_w and self.global_w: + styles2 = styles2.view(styles2.shape[0], 1, styles2.shape[1],styles2.shape[2]).repeat(1, self.mapping_fl.num_layers, 1,1) + styles = torch.where(layer_idx[np.newaxis, :, np.newaxis,np.newaxis] < mixing_cutoff, styles, styles2) + styles2_global = styles2_global.view(styles2_global.shape[0], 1, styles2_global.shape[1]).repeat(1, self.mapping_fl.num_layers, 1) + styles_global = torch.where(layer_idx[np.newaxis, :, np.newaxis] < mixing_cutoff, styles_global, styles2_global) + else: + if self.temporal_w: + styles2 = styles2.view(styles2.shape[0], 1, styles2.shape[1],styles2.shape[2]).repeat(1, self.mapping_fl.num_layers, 1,1) + styles = torch.where(layer_idx[np.newaxis, :, np.newaxis,np.newaxis] < mixing_cutoff, styles, styles2) + if self.global_w: + styles2 = styles2.view(styles2.shape[0], 1, styles2.shape[1]).repeat(1, self.mapping_fl.num_layers, 1) + styles = torch.where(layer_idx[np.newaxis, :, np.newaxis] < mixing_cutoff, styles, styles2) + + if (self.truncation_psi is not None) and not no_truncation: + if self.temporal_w and self.global_w: layer_idx = torch.arange(self.mapping_fl.num_layers)[np.newaxis, :, np.newaxis,np.newaxis] - else: - styles2 = styles2.view(styles2.shape[0], 1, styles2.shape[1]).repeat(1, self.mapping_fl.num_layers, 1) + ones = torch.ones(layer_idx.shape, dtype=torch.float32) + coefs = torch.where(layer_idx < self.truncation_cutoff, self.truncation_psi * ones, ones) + styles = torch.lerp(self.dlatent_avg.buff.data, styles, coefs) layer_idx = torch.arange(self.mapping_fl.num_layers)[np.newaxis, :, np.newaxis] - cur_layers = (lod + 1) * 2 - mixing_cutoff = random.randint(1, cur_layers) - styles = torch.where(layer_idx < mixing_cutoff, styles, styles2) + ones = torch.ones(layer_idx.shape, dtype=torch.float32) + coefs = torch.where(layer_idx < self.truncation_cutoff, self.truncation_psi * ones, ones) + styles_global = torch.lerp(self.dlatent_avg_global.buff.data, styles_global, coefs) + else: + if self.temporal_w: + layer_idx = torch.arange(self.mapping_fl.num_layers)[np.newaxis, :, np.newaxis,np.newaxis] + ones = torch.ones(layer_idx.shape, dtype=torch.float32) + coefs = torch.where(layer_idx < self.truncation_cutoff, self.truncation_psi * ones, ones) + styles = torch.lerp(self.dlatent_avg.buff.data, styles, coefs) + if self.global_w: + layer_idx = torch.arange(self.mapping_fl.num_layers)[np.newaxis, :, np.newaxis] + ones = torch.ones(layer_idx.shape, dtype=torch.float32) + coefs = torch.where(layer_idx < self.truncation_cutoff, self.truncation_psi * ones, ones) + styles = torch.lerp(self.dlatent_avg_global.buff.data, styles, coefs) - if (self.truncation_psi is not None) and not no_truncation: - if self.temporal_w: - layer_idx = torch.arange(self.mapping_fl.num_layers)[np.newaxis, :, np.newaxis,np.newaxis] - else: - layer_idx = torch.arange(self.mapping_fl.num_layers)[np.newaxis, :, np.newaxis] - ones = torch.ones(layer_idx.shape, dtype=torch.float32) - coefs = torch.where(layer_idx < self.truncation_cutoff, self.truncation_psi * ones, ones) - styles = torch.lerp(self.dlatent_avg.buff.data, styles, coefs) # import pdb; pdb.set_trace() + if ecog is not None: + if (not ecog_only): + styles = torch.cat([styles_ecog,styles],dim=0) + s = torch.cat([s_ecog,s],dim=0) + if self.temporal_w and self.global_w: + styles_global = torch.cat([styles_ecog_global,styles_global],dim=0) + s_global = torch.cat([s_ecog_global,s_global],dim=0) + else: + styles = styles_ecog + s = s_ecog + if self.temporal_w and self.global_w: + styles_global = styles_ecog_global + s_global = s_ecog_global + + if self.temporal_w and self.global_w: + styles = (styles,styles_global) rec = self.decoder.forward(styles, lod, blend_factor, noise) - if False:#self.w_classifier: + # import pdb; pdb.set_trace() + if self.w_classifier: if return_styles: - return s, rec, Z__ + if self.temporal_w and self.global_w: + return (s, s_global), rec, Z__ + else: + return s, rec, Z__ else: return rec,Z__ else: if return_styles: - return s, rec + if self.temporal_w and self.global_w: + return (s, s_global), rec + else: + return s, rec else: return rec def encode(self, x, lod, blend_factor,word_classify=False): Z = self.encoder(x, lod, blend_factor) - # import pdb; pdb.set_trace() - Z_ = self.mapping_tl(Z[:,0]) + if self.temporal_w and self.global_w: + Z,Z_global = Z + + Z_ = self.mapping_tl(Z[:,0],Z_global[:,0]) if (self.temporal_w and self.global_w) else self.mapping_tl(Z[:,0]) if word_classify: - Z__ = self.mapping_tw(Z[:,0]) - return Z[:, :1], Z_[:, 1, 0], Z__ + Z__ = self.mapping_tw(Z[:,0],Z_global[:,0]) if (self.temporal_w and self.global_w) else self.mapping_tw(Z[:,0]) + if self.temporal_w and self.global_w: + return (Z[:, :1],Z_global[:,:1]), Z_[:, 1, 0], Z__ + else: + return Z[:, :1], Z_[:, 1, 0], Z__ else: - return Z[:, :1], Z_[:, 1, 0] + if self.temporal_w and self.global_w: + return (Z[:, :1],Z_global[:,:1]), Z_[:, 1, 0] + else: + return Z[:, :1], Z_[:, 1, 0] - def forward(self, x, lod, blend_factor, d_train, ae, words=None): + def forward(self, x, lod, blend_factor, d_train, ae, tracker,words=None,apply_encoder_guide=False,apply_w_classifier=False,apply_cycle=True,apply_gp=True,apply_ppl=True,apply_ppl_d=False,ecog=None,sup=True,mask_prior=None,gan=True): if ae: self.encoder.requires_grad_(True) z = torch.randn(x.shape[0], self.latent_size) - s, rec = self.generate(lod, blend_factor, z=z, mixing=False, noise=True, return_styles=True) + if self.temporal_w and self.global_w: + z_global = z if self.common_z else torch.randn(x.shape[0], self.latent_size) + else: + z_global = None + s, rec = self.generate(lod, blend_factor, z=z, z_global=z_global, mixing=False, noise=True, return_styles=True,ecog=ecog,mask_prior=mask_prior) Z, _ = self.encode(rec, lod, blend_factor) - - if self.cycle: + do_cycle = self.cycle and apply_cycle + if do_cycle: Z_real, _ = self.encode(x, lod, blend_factor) + if self.temporal_w and self.global_w: + Z_real,Z_real_global = Z_real + Z_real_global = Z_real_global.repeat(1, self.mapping_fl.num_layers, 1) Z_real = Z_real.repeat(1, self.mapping_fl.num_layers, 1) - rec = self.decoder.forward(Z_real, lod, blend_factor, noise=True) - Lcycle = self.cycle_weight*torch.mean((rec.detach() - x.detach()).abs()) + rec = self.decoder.forward((Z_real,Z_real_global) if (self.temporal_w and self.global_w) else Z_real, lod, blend_factor, noise=True) + Lcycle = self.cycle_weight*torch.mean((rec - x).abs()) + tracker.update(dict(Lcycle=Lcycle)) + else: + Lcycle=0 - assert Z.shape == s.shape + # assert Z.shape == s.shape if self.z_regression: Lae = self.w_weight*torch.mean(((Z[:, 0] - z)**2)) else: - Lae = self.w_weight*torch.mean(((Z - s.detach())**2)) - - if self.cycle: - return Lae,Lcycle - else: - return Lae + if self.temporal_w and self.global_w: + Z,Z_global = Z + s,s_global = s + Lae = self.w_weight*(torch.mean(((Z - s.detach())**2)) + torch.mean(((Z_global - s_global.detach())**2))) + else: + Lae = self.w_weight*torch.mean(((Z - s.detach())**2)) + tracker.update(dict(Lae=Lae)) + + return Lae+Lcycle elif d_train: with torch.no_grad(): - Xp = self.generate(lod, blend_factor, count=x.shape[0], noise=True) + Xp = self.generate(lod, blend_factor, count=x.shape[0], noise=True,ecog=ecog,mask_prior=mask_prior) self.encoder.requires_grad_(True) - if self.w_classifier: + if apply_w_classifier: _, d_result_real, word_logits = self.encode(x, lod, blend_factor,word_classify=True) else: - _, d_result_real = self.encode(x, lod, blend_factor) - _, d_result_fake = self.encode(Xp.detach(), lod, blend_factor) - - loss_d = losses.discriminator_logistic_simple_gp(d_result_fake, d_result_real, x) - if self.w_classifier: + xs = torch.cat([x,Xp.requires_grad_(True)],dim=0) + w, d_result = self.encode(xs, lod, blend_factor) + if self.temporal_w and self.global_w: + w, w_global = w + w_real_global = w_global[:w_global.shape[0]//2] + w_fake_global = w_global[w_global.shape[0]//2:] + w_real = w[:w.shape[0]//2] + w_fake = w[w.shape[0]//2:] + d_result_real = d_result[:d_result.shape[0]//2] + d_result_fake = d_result[d_result.shape[0]//2:] + # w_real, d_result_real = self.encode(x, lod, blend_factor) + # w_fake, d_result_fake = self.encode(Xp.requires_grad_(True), lod, blend_factor) + + loss_d = losses.critic_loss(d_result_fake, d_result_real) + tracker.update(dict(loss_d=loss_d)) + if apply_gp: + loss_gp = losses.discriminator_logistic_simple_gp(d_result_real, x) + loss_d += loss_gp + else: + loss_gp=0 + if apply_ppl_d: + path_loss_d, self.ppl_d_mean.buff.data, path_lengths_d = losses.pl_lengths_reg(xs, w, self.ppl_d_mean.buff.data,reg_on_gen=False,temporal_w = self.temporal_w) + path_loss_d =path_loss_d*self.ppld_weight + tracker.update(dict(path_loss_d=path_loss_d,path_lengths_d=path_lengths_d)) + if self.temporal_w and self.global_w and self.ppld_global_weight != 0: + path_loss_d_global, self.ppl_d_mean_global.buff.data, path_lengths_d_global = losses.pl_lengths_reg(xs, w_global, self.ppl_d_mean_global.buff.data,reg_on_gen=False,temporal_w = False) + path_loss_d_global = path_loss_d_global*self.ppld_global_weight + tracker.update(dict(path_loss_d_global=path_loss_d_global,path_lengths_d_global=path_lengths_d_global)) + path_loss_d = path_loss_d+path_loss_d_global + # path_loss_d =path_loss_d*self.ppl_weight + # path_loss, self.ppl_mean.buff.data, path_lengths = losses.pl_lengths_reg(torch.cat([x,Xp],dim=0), torch.cat([w_real,w_fake],dim=0), self.ppl_mean.buff.data ) + else: + path_loss_d=0 + if apply_w_classifier: loss_word = F.cross_entropy(word_logits,words) - return loss_d,loss_word - else: - return loss_d + tracker.update(dict(loss_word=loss_word)) + else: + loss_word=0 + return loss_d+loss_word+path_loss_d + else: with torch.no_grad(): z = torch.randn(x.shape[0], self.latent_size) + if self.temporal_w and self.global_w: + z_global = z if self.common_z else torch.randn(x.shape[0], self.latent_size) + else: + z_global = None self.encoder.requires_grad_(False) + s, rec = self.generate(lod, blend_factor, count=x.shape[0], z=z.detach(), z_global=z_global, noise=True,return_styles=True,ecog=ecog,mask_prior=mask_prior) + if self.temporal_w and self.global_w: + s,s_global = s - rec = self.generate(lod, blend_factor, count=x.shape[0], z=z.detach(), noise=True) - - _, d_result_fake = self.encode(rec, lod, blend_factor) + if gan: + _, d_result_fake = self.encode(rec, lod, blend_factor) - loss_g = losses.generator_logistic_non_saturating(d_result_fake) + loss_g = losses.generator_logistic_non_saturating(d_result_fake) + tracker.update(dict(loss_g=loss_g)) + else: + loss_g = 0 - return loss_g + if apply_encoder_guide: + Z_real, _ = self.encode(x, lod, blend_factor) + if self.temporal_w and self.global_w: + Z_real,Z_real_global = Z_real + loss_w_sup = self.w_weight*(torch.mean(((Z_real - s)**2))+torch.mean(((Z_real_global - s_global)**2))) + else: + loss_w_sup = self.w_weight*torch.mean(((Z_real - s)**2)) + tracker.update(dict(loss_w_sup=loss_w_sup)) + else: + loss_w_sup=0 + + if apply_ppl: + path_loss, self.ppl_mean.buff.data, path_lengths = losses.pl_lengths_reg(s, rec, self.ppl_mean.buff.data,reg_on_gen=True,temporal_w = self.temporal_w) + path_loss =path_loss*self.ppl_weight + tracker.update(dict(path_loss=path_loss, path_lengths=path_lengths)) + if self.temporal_w and self.global_w: + path_loss_global, self.ppl_mean_global.buff.data, path_lengths_global = losses.pl_lengths_reg(s_global, rec, self.ppl_mean_global.buff.data,reg_on_gen=True,temporal_w = False) + path_loss_global =path_loss_global*self.ppl_global_weight + tracker.update(dict(path_loss_global=path_loss_global, path_lengths_global=path_lengths_global)) + path_loss = path_loss+path_loss_global + else: + path_loss = 0 + if ecog is not None and sup: + loss_sup = torch.mean((rec - x).abs()) + tracker.update(dict(loss_sup=loss_sup)) + else: + loss_sup = 0 + if ecog is not None and self.suploss_on_ecog: + return loss_g+ path_loss, loss_sup + loss_w_sup + else: + return loss_g+ path_loss+ loss_sup + loss_w_sup def lerp(self, other, betta,w_classifier=False): if hasattr(other, 'module'): other = other.module with torch.no_grad(): - params = list(self.mapping_tl.parameters())+ list(self.mapping_fl.parameters()) + list(self.decoder.parameters()) + list(self.encoder.parameters()) + list(self.dlatent_avg.parameters()) + (list(self.mapping_tw.parameters()) if w_classifier else []) - other_param = list(other.mapping_tl.parameters()) + list(other.mapping_fl.parameters()) + list(other.decoder.parameters()) + list(other.encoder.parameters()) + list(other.dlatent_avg.parameters()) + (list(other.mapping_tw.parameters()) if w_classifier else []) + params = list(self.mapping_tl.parameters())+ list(self.mapping_fl.parameters()) + list(self.decoder.parameters()) + list(self.encoder.parameters()) + list(self.dlatent_avg.parameters()) + (list(other.dlatent_avg_global.parameters()) if (self.temporal_w and self.global_w) else []) + (list(self.mapping_tw.parameters()) if w_classifier else []) + (list(self.ecog_encoder.parameters()) if self.with_ecog else []) + other_param = list(other.mapping_tl.parameters()) + list(other.mapping_fl.parameters()) + list(other.decoder.parameters()) + list(other.encoder.parameters()) + list(other.dlatent_avg.parameters()) + (list(other.dlatent_avg_global.parameters()) if (self.temporal_w and self.global_w) else []) + (list(other.mapping_tw.parameters()) if w_classifier else []) + (list(other.ecog_encoder.parameters()) if self.with_ecog else []) for p, p_other in zip(params, other_param): p.data.lerp_(p_other.data, 1.0 - betta) diff --git a/net.py b/net.py index 6e87e922..d262f4ce 100644 --- a/net.py +++ b/net.py @@ -38,15 +38,25 @@ def pixel_norm(x, epsilon=1e-8): return x * torch.rsqrt(torch.mean(x.pow(2.0), dim=1, keepdim=True) + epsilon) -def style_mod(x, style, bias = True): - if style.dim()==2: - style = style.view(style.shape[0], 2, x.shape[1], 1, 1) - elif style.dim()==3: - style = style.view(style.shape[0], 2, x.shape[1], style.shape[2], 1) - if bias: - return torch.addcmul(style[:, 1], value=1.0, tensor1=x, tensor2=style[:, 0] + 1) +def style_mod(x, style1, style2=None, bias = True): + if style1.dim()==2: + style1 = style1.view(style1.shape[0], 2, x.shape[1], 1, 1) + elif style1.dim()==3: + style1 = style1.view(style1.shape[0], 2, x.shape[1], style1.shape[2], 1) + if style2 is None: + if bias: + return torch.addcmul(style1[:, 1], value=1.0, tensor1=x, tensor2=style1[:, 0] + 1) + else: + return x*(style1[:,0]+1) else: - return x*(style[:,0]+1) + if style2.dim()==2: + style2 = style2.view(style2.shape[0], 2, x.shape[1], 1, 1) + elif style2.dim()==3: + style2 = style2.view(style2.shape[0], 2, x.shape[1], style2.shape[2], 1) + if bias: + return torch.addcmul(style1[:, 1]+style2[:, 1], value=1.0, tensor1=x, tensor2=(style1[:, 0] + 1)*(style2[:, 0] + 1)) + else: + return x*(style1[:,0]+1)*(style2[:,0]+1) def upscale2d(x, factor=2): @@ -81,42 +91,100 @@ def forward(self, x): return F.conv2d(x, weight=self.weight, groups=self.groups, padding=1) class AdaIN(nn.Module): - def __init__(self, latent_size,outputs,temporal_w=False): + def __init__(self, latent_size,outputs,temporal_w=False,global_w=True,temporal_global_cat = False): super(AdaIN, self).__init__() self.instance_norm = nn.InstanceNorm2d(outputs,affine=False, eps=1e-8) - self.style = sn(ln.Conv1d(latent_size, 2 * outputs,1,1,0,gain=1)) if temporal_w else sn(ln.Linear(latent_size, 2 * outputs, gain=1)) - def forward(self,x,w): + self.global_w = global_w + self.temporal_w = temporal_w + self.temporal_global_cat = temporal_global_cat and (temporal_w and global_w) + if temporal_w and global_w: + if self.temporal_global_cat: + self.style = sn(ln.Conv1d(2*latent_size, 2 * outputs,1,1,0,gain=1)) + else: + self.style = sn(ln.Conv1d(latent_size, 2 * outputs,1,1,0,gain=1)) + self.style_global = sn(ln.Linear(latent_size, 2 * outputs, gain=1)) + else: + if temporal_w: + self.style = sn(ln.Conv1d(latent_size, 2 * outputs,1,1,0,gain=1)) + if global_w: + self.style = sn(ln.Linear(latent_size, 2 * outputs, gain=1)) + def forward(self,x,w=None,w_global=None): x = self.instance_norm(x) - x = style_mod(x,self.style(w)) + if self.temporal_w and self.global_w: + if self.temporal_global_cat: + w = torch.cat((w,w_global.unsqueeze(2).repeat(1,1,w.shape[2])),dim=1) + x = style_mod(x,self.style(w)) + else: + x = style_mod(x,self.style(w),self.style_global(w_global)) + else: + x = style_mod(x,self.style(w)) return x class INencoder(nn.Module): - def __init__(self, inputs,latent_size,temporal_w=False): + def __init__(self, inputs,latent_size,temporal_w=False,global_w=True,temporal_global_cat = False,use_statistic=True): super(INencoder, self).__init__() self.temporal_w = temporal_w + self.global_w = global_w + self.temporal_global_cat = temporal_global_cat and (temporal_w and global_w) + self.use_statistic = use_statistic self.instance_norm = nn.InstanceNorm2d(inputs,affine=False) - self.style = sn(ln.Conv1d(2 * inputs, latent_size,1,1,0)) if temporal_w else sn(ln.Linear(2 * inputs, latent_size)) + if global_w and not(temporal_w): + self.style = sn(ln.Linear((2 * inputs) if use_statistic else inputs , latent_size)) + if temporal_w and not(global_w): + self.style = sn(ln.Conv1d((2 * inputs) if use_statistic else inputs, latent_size,1,1,0)) + if temporal_w and global_w: + if self.temporal_global_cat: + self.style = sn(ln.Conv1d((4 * inputs) if use_statistic else inputs, 2*latent_size,1,1,0)) + else: + self.style_local = sn(ln.Conv1d((2 * inputs) if use_statistic else inputs, latent_size,1,1,0)) + self.style_global = sn(ln.Linear((2 * inputs) if use_statistic else inputs, latent_size)) + def forward(self,x): - m = torch.mean(x, dim=[3] if self.temporal_w else [2,3], keepdim=True) - std = torch.sqrt(torch.mean((x - m) ** 2, dim=[3] if self.temporal_w else [2,3], keepdim=True)) - style = torch.cat((m,std),dim=1) - x = self.instance_norm(x) - if self.temporal_w: - w = self.style(style.view(style.shape[0], style.shape[1],style.shape[2])) + m_local = torch.mean(x, dim=[3], keepdim=True) + std_local = torch.sqrt(torch.mean((x - m_local) ** 2, dim=[3], keepdim=True)+1e-8) + m_global = torch.mean(x, dim=[2,3], keepdim=True) + std_global = torch.sqrt(torch.mean((x - m_global) ** 2, dim=[2,3], keepdim=True)+1e-8) + if self.use_statistic: + style_local = torch.cat((m_local,std_local),dim=1) + style_global = torch.cat((m_global,std_global),dim=1) else: - w = self.style(style.view(style.shape[0], style.shape[1])) - return x,w + style_local = x + style_global = x + x = self.instance_norm(x) + if self.global_w and not(self.temporal_w): + w = self.style(style_global.view(style_global.shape[0], style_global.shape[1])) + return x,w + if self.temporal_w and not(self.global_w): + w = self.style(style_local.view(style_local.shape[0], style_local.shape[1],style_local.shape[2])) + return x,w + if self.temporal_w and self.global_w: + if self.temporal_global_cat: + if self.use_statistic: + style = torch.cat((style_local,style_global.repeat(1,1,style_local.shape[2],1)),dim=1) + else: + style = style_local + w = self.style(style.view(style.shape[0], style.shape[1],style.shape[2])) + w_local = w[:,:w.shape[1]//2] + w_global = torch.mean(w[:,w.shape[1]//2:],dim=[2]) + else: + w_local = self.style_local(style_local.view(style_local.shape[0], style_local.shape[1],style_local.shape[2])) + if not self.use_statistic: + style_global = style_global.mean(dim=[2]) + w_global = self.style_global(style_global.view(style_global.shape[0], style_global.shape[1])) + return x,w_local,w_global class Attention(nn.Module): - def __init__(self, inputs,temporal_w=False,attentional_style=False,decoding=True,latent_size=None,heads=1): + def __init__(self, inputs,temporal_w=False,global_w=True,temporal_global_cat = False,attentional_style=False,decoding=True,latent_size=None,heads=1,demod=False): super(Attention, self).__init__() # Channel multiplier self.inputs = inputs self.temporal_w = temporal_w + self.global_w = global_w self.decoding = decoding self.attentional_style = attentional_style self.att_denorm = 8 self.heads = heads + self.demod = demod self.theta = sn(ln.Conv2d(inputs, inputs // self.att_denorm, 1,1,0, bias=False)) self.phi = sn(ln.Conv2d(inputs, inputs // self.att_denorm, 1,1,0, bias=False)) self.g = sn(ln.Conv2d(inputs, inputs // 2, 1,1,0, bias=False)) @@ -127,31 +195,58 @@ def __init__(self, inputs,temporal_w=False,attentional_style=False,decoding=True self.norm_g = nn.InstanceNorm2d(inputs // 2,affine=True) else: if decoding: - self.norm_theta = AdaIN(latent_size,inputs//self.att_denorm,temporal_w=temporal_w) - self.norm_phi = AdaIN(latent_size,inputs//self.att_denorm,temporal_w=temporal_w) - self.norm_g = AdaIN(latent_size,inputs//2,temporal_w=temporal_w) + self.norm_theta = AdaIN(latent_size,inputs//self.att_denorm,temporal_w=temporal_w,global_w=global_w,temporal_global_cat=temporal_global_cat) + self.norm_phi = AdaIN(latent_size,inputs//self.att_denorm,temporal_w=temporal_w,global_w=global_w,temporal_global_cat=temporal_global_cat) + self.norm_g = AdaIN(latent_size,inputs//2,temporal_w=temporal_w,global_w=global_w,temporal_global_cat=temporal_global_cat) else: - self.norm_theta = INencoder(inputs//self.att_denorm,latent_size,temporal_w=temporal_w) - self.norm_phi = INencoder(inputs//self.att_denorm,latent_size,temporal_w=temporal_w) - self.norm_g = INencoder(inputs//2,latent_size,temporal_w=temporal_w) + self.norm_theta = INencoder(inputs//self.att_denorm,latent_size,temporal_w=temporal_w,global_w=global_w,temporal_global_cat=temporal_global_cat) + self.norm_phi = INencoder(inputs//self.att_denorm,latent_size,temporal_w=temporal_w,global_w=global_w,temporal_global_cat=temporal_global_cat) + self.norm_g = INencoder(inputs//2,latent_size,temporal_w=temporal_w,global_w=global_w,temporal_global_cat=temporal_global_cat) + + if demod and attentional_style: + self.theta = ln.StyleConv2d(inputs, inputs // self.att_denorm,kernel_size=1,latent_size=latent_size,stride=1,padding=0, + bias=False, upsample=False,temporal_w=temporal_w,transform_kernel=False) + self.phi = ln.StyleConv2d(inputs, inputs // self.att_denorm,kernel_size=1,latent_size=latent_size,stride=1,padding=0, + bias=False, upsample=False,temporal_w=temporal_w,transform_kernel=False) + self.g = ln.StyleConv2d(inputs, inputs // 2,kernel_size=1,latent_size=latent_size,stride=1,padding=0, + bias=True, upsample=False,temporal_w=temporal_w,transform_kernel=False) + self.o = ln.Conv2d(inputs // 2, inputs, 1,1,0, bias=True) + # Learnable gain parameter self.gamma = P(torch.tensor(0.), requires_grad=True) - def forward(self, x, w=None): + def forward(self, x, w_local=None,w_global=None): # Apply convs x = x.contiguous() theta = self.theta(x) - theta = self.norm_theta(theta,w) if (self.attentional_style and self.decoding) else self.norm_theta(theta) phi = F.max_pool2d(self.phi(x), [2,2]) - phi = self.norm_phi(phi,w) if (self.attentional_style and self.decoding) else self.norm_phi(phi) g = F.max_pool2d(self.g(x), [2,2]) - g = self.norm_g(g,w) if (self.attentional_style and self.decoding) else self.norm_g(g) + if w_local is not None and w_local.dim()==3: + w_local_down = F.avg_pool1d(w_local, 2) + else: + w_local_down = w_local + if not self.demod: + theta = self.norm_theta(theta,w_local,w_global) if (self.attentional_style and self.decoding) else self.norm_theta(theta) + phi = self.norm_phi(phi,w_local_down,w_global) if (self.attentional_style and self.decoding) else self.norm_phi(phi) + g = self.norm_g(g,w_local_down,w_global) if (self.attentional_style and self.decoding) else self.norm_g(g) if self.attentional_style and not self.decoding: - theta,w_theta = theta - phi,w_phi = phi - g,w_g = g - w = w_theta+w_phi+w_g + if self.temporal_w and self.global_w: + theta,w_theta_local,w_theta_global = theta + phi,w_phi_local,w_phi_global = phi + g,w_g_local,w_g_global = g + w_phi_local = F.interpolate(w_phi_local,scale_factor=2,mode='linear') + w_g_local = F.interpolate(w_g_local,scale_factor=2,mode='linear') + w_local = w_theta_local+w_phi_local+w_g_local + w_global = w_theta_global+w_phi_global+w_g_global + else: + theta,w_theta = theta + phi,w_phi = phi + g,w_g = g + if w_phi.dim()==3: + w_phi = F.interpolate(w_phi,scale_factor=2,mode='linear') + w_g = F.interpolate(w_g,scale_factor=2,mode='linear') + w = w_theta+w_phi+w_g # Perform reshapes self.theta_ = theta.reshape(-1, self.inputs // self.att_denorm//self.heads, self.heads ,x.shape[2] * x.shape[3]) @@ -163,15 +258,147 @@ def forward(self, x, w=None): # Attention map times g path o = self.o(torch.einsum('bchj,bhij->bchi',g, self.beta).reshape(-1, self.inputs // 2, x.shape[2], x.shape[3])) # o = self.o(torch.bmm(g, self.beta.transpose(1,2)).view(-1, self.inputs // 2, x.shape[2], x.shape[3])) - return (self.gamma * o + x, w) if (self.attentional_style and (not self.decoding)) else self.gamma * o + x + if (not self.attentional_style) or self.decoding: + return self.gamma * o + x + else: + if self.temporal_w and self.global_w: + return self.gamma * o + x, w_local, w_global + else: + return self.gamma * o + x, w + +class ToWLatent(nn.Module): + def __init__(self,inputs,latent_size,temporal_w=False,from_input=False): + super(ToWLatent,self).__init__() + self.temporal_w = temporal_w + self.from_input = from_input + if temporal_w: + self.style = sn(ln.Conv1d(inputs, latent_size,1,1,0),use_sn=USE_SN) + else: + self.style = sn(ln.Linear(inputs, latent_size),use_sn=USE_SN) + + def forward(self, x): + if not self.from_input: + if self.temporal_w: + m = torch.mean(x, dim=[3], keepdim=True) + std = torch.sqrt(torch.mean((x - m) ** 2, dim=[3], keepdim=True)+1e-8) + else: + m = torch.mean(x, dim=[2, 3], keepdim=True) + std = torch.sqrt(torch.mean((x - m) ** 2, dim=[2, 3], keepdim=True)+1e-8) + else: + std = x + if self.temporal_w: + w = self.style(std.view(std.shape[0], std.shape[1],std.shape[2])) + else: + w = self.style(std.view(std.shape[0], std.shape[1])) + return w + +class ECoGMappingBlock(nn.Module): + def __init__(self, inputs, outputs, kernel_size,dilation=1,fused_scale=True,residual=False,resample=[]): + super(ECoGMappingBlock, self).__init__() + self.residual = residual + self.inputs_resample = resample + self.dim_missmatch = (inputs!=outputs) + self.resample = resample + if not self.resample: + self.resample=1 + self.padding = list(np.array(dilation)*(np.array(kernel_size)-1)//2) + # self.padding = [dilation[i]*(kernel_size[i]-1)//2 for i in range(len(dilation))] + if residual: + self.norm1 = nn.GroupNorm(min(inputs,32),inputs) + else: + self.norm1 = nn.GroupNorm(min(outputs,32),outputs) + self.conv1 = sn(ln.Conv3d(inputs, outputs, kernel_size, self.resample, self.padding, dilation=dilation, bias=False)) + if self.inputs_resample or self.dim_missmatch: + self.convskip = sn(ln.Conv3d(inputs, outputs, kernel_size, self.resample, self.padding, dilation=dilation, bias=False)) + + self.conv2 = sn(ln.Conv3d(outputs, outputs, kernel_size, 1, self.padding, dilation=dilation, bias=False)) + self.norm2 = nn.GroupNorm(min(outputs,32),outputs) + + def forward(self,x): + if self.residual: + x = F.leaky_relu(self.norm1(x),0.2) + if self.inputs_resample or self.dim_missmatch: + # x_skip = F.avg_pool3d(x,self.resample,self.resample) + x_skip = self.convskip(x) + else: + x_skip = x + x = F.leaky_relu(self.norm2(self.conv1(x)),0.2) + x = self.conv2(x) + x = x_skip + x + else: + x = F.leaky_relu(self.norm1(self.conv1(x)),0.2) + x = F.leaky_relu(self.norm2(self.conv2(x)),0.2) + return x + + + +class DemodEncodeBlock(nn.Module): + def __init__(self, inputs, outputs, latent_size, last=False,fused_scale=True,temporal_w=False,temporal_samples=None,resample=False,spec_chans=None,attention=False,attentional_style=False,heads=1,channels=1): + super(DemodEncodeBlock, self).__init__() + self.last = last + self.temporal_w = temporal_w + self.attention = attention + self.resample = resample + self.fused_scale = False if temporal_w else fused_scale + self.attentional_style = attentional_style + self.fromrgb = FromRGB(channels, inputs,style = True,residual=False,temporal_w=temporal_w,latent_size=latent_size) + self.conv1 = sn(ln.Conv2d(inputs, inputs, 3, 1, 1, bias=True)) + self.style1 = ToWLatent(inputs,latent_size,temporal_w=temporal_w) + self.instance_norm_1 = nn.InstanceNorm2d(inputs, affine=False) + self.blur = Blur(inputs) + if attention: + self.non_local = Attention(inputs,temporal_w=temporal_w,attentional_style=attentional_style,decoding=False,latent_size=latent_size,heads=heads) + if last: + if self.temporal_w: + self.conv_2 = sn(ln.Conv2d(inputs * spec_chans, outputs, 3, 1, 1, bias=True)) + else: + self.dense = sn(ln.Linear(inputs * temporal_samples * spec_chans, outputs, bias = True)) + else: + if resample and fused_scale: + self.conv_2 = sn(ln.Conv2d(inputs, outputs, 3, 2, 1, bias=True, transform_kernel=True)) + else: + self.conv_2 = sn(ln.Conv2d(inputs, outputs, 3, 1, 1, bias=False)) + self.style2 = ToWLatent(outputs,latent_size,temporal_w=temporal_w,from_input=last) + self.instance_norm_2 = nn.InstanceNorm2d(outputs, affine=False) + + def forward(self, spec,x): + spec_feature,w0 = self.fromrgb(spec) + x = (x+spec_feature) if (x is not None) else spec_feature + if self.attention: + x = self.non_local(x) + if self.attentional_style: + x,w_attn = x + x = F.leaky_relu(self.conv1(x),0.2) + w1 = self.style1(x) + x = self.instance_norm_1(x) + + if self.last: + if self.temporal_w: + x = self.conv_2(x.view(x.shape[0], -1,x.shape[2])) + x = F.leaky_relu(x, 0.2) + w2 = self.style2(x.view(x.shape[0], x.shape[1],x.shape[2])) + else: + x = self.dense(x.view(x.shape[0], -1)) + x = F.leaky_relu(x, 0.2) + w2 = self.style2(x.view(x.shape[0], x.shape[1])) + + else: + x = F.leaky_relu(self.conv_2(self.blur(x))) + if not self.fused_scale: + x = downscale2d(x) + w2 = self.style2(x) + x = self.instance_norm_2(x) + + spec = F.avg_pool2d(spec,2,2) + w = (w0+w1+w2+w_attn) if self.attentional_style else (w0+w1+w2) + return spec,x,w class EncodeBlock(nn.Module): - def __init__(self, inputs, outputs, latent_size, last=False,islast=False, fused_scale=True,temporal_w=False,residual=False,resample=False,temporal_samples=None,spec_chans=None): + def __init__(self, inputs, outputs, latent_size, last=False,islast=False, fused_scale=True,temporal_w=False,global_w=True,temporal_global_cat = False,residual=False,resample=False,temporal_samples=None,spec_chans=None): super(EncodeBlock, self).__init__() self.conv_1 = sn(ln.Conv2d(inputs, inputs, 3, 1, 1, bias=False)) # self.conv_1 = ln.Conv2d(inputs + (1 if last else 0), inputs, 3, 1, 1, bias=False) self.bias_1 = nn.Parameter(torch.Tensor(1, inputs, 1, 1)) - self.instance_norm_1 = nn.InstanceNorm2d(inputs, affine=False) self.blur = Blur(inputs) self.last = last self.islast = islast @@ -179,32 +406,36 @@ def __init__(self, inputs, outputs, latent_size, last=False,islast=False, fused_ self.residual = residual self.resample=resample self.temporal_w = temporal_w + self.global_w = global_w + self.temporal_global_cat = temporal_global_cat and (temporal_w and global_w) if last: if self.temporal_w: self.conv_2 = sn(ln.Conv2d(inputs * spec_chans, outputs, 3, 1, 1, bias=False)) else: self.dense = sn(ln.Linear(inputs * temporal_samples * spec_chans, outputs)) else: - if resample and fused_scale: + if resample and self.fused_scale: self.conv_2 = sn(ln.Conv2d(inputs, outputs, 3, 2, 1, bias=False, transform_kernel=True)) else: self.conv_2 = sn(ln.Conv2d(inputs, outputs, 3, 1, 1, bias=False)) self.bias_2 = nn.Parameter(torch.Tensor(1, outputs, 1, 1)) - self.instance_norm_2 = nn.InstanceNorm2d(outputs, affine=False) - - if self.temporal_w: - self.style_1 = sn(ln.Conv1d(2 * inputs, latent_size,1,1,0),use_sn=PARTIAL_SN) - if last: - self.style_2 = sn(ln.Conv1d(outputs, latent_size,1,1,0),use_sn=PARTIAL_SN) - else: - self.style_2 = sn(ln.Conv1d(2 * outputs, latent_size,1,1,0),use_sn=PARTIAL_SN) - else: - self.style_1 = sn(ln.Linear(2 * inputs, latent_size),use_sn=PARTIAL_SN) - if last: - self.style_2 = sn(ln.Linear(outputs, latent_size),use_sn=PARTIAL_SN) - else: - self.style_2 = sn(ln.Linear(2 * outputs, latent_size),use_sn=PARTIAL_SN) + self.style_1 = INencoder(inputs,latent_size,temporal_w=temporal_w,global_w=global_w,temporal_global_cat=temporal_global_cat,use_statistic=True) + self.style_2 = INencoder(outputs,latent_size,temporal_w=temporal_w,global_w=global_w,temporal_global_cat=temporal_global_cat,use_statistic=not(last)) + # self.instance_norm_2 = nn.InstanceNorm2d(outputs, affine=False) + + # if self.temporal_w: + # self.style_1 = sn(ln.Conv1d(2 * inputs, latent_size,1,1,0),use_sn=PARTIAL_SN) + # if last: + # self.style_2 = sn(ln.Conv1d(outputs, latent_size,1,1,0),use_sn=PARTIAL_SN) + # else: + # self.style_2 = sn(ln.Conv1d(2 * outputs, latent_size,1,1,0),use_sn=PARTIAL_SN) + # else: + # self.style_1 = sn(ln.Linear(2 * inputs, latent_size),use_sn=PARTIAL_SN) + # if last: + # self.style_2 = sn(ln.Linear(outputs, latent_size),use_sn=PARTIAL_SN) + # else: + # self.style_2 = sn(ln.Linear(2 * outputs, latent_size),use_sn=PARTIAL_SN) if residual and not islast: if inputs==outputs: @@ -230,20 +461,13 @@ def __init__(self, inputs, outputs, latent_size, last=False,islast=False, fused_ def forward(self, x): if self.residual: - x = F.leaky_relu(x) + x = F.leaky_relu(x,0.2) x_input = x x = self.conv_1(x) + self.bias_1 - - - if self.temporal_w: - m = torch.mean(x, dim=[3], keepdim=True) - std = torch.sqrt(torch.mean((x - m) ** 2, dim=[3], keepdim=True)) + if self.temporal_w and self.global_w: + x,w1_local,w1_global = self.style_1(x) else: - m = torch.mean(x, dim=[2, 3], keepdim=True) - std = torch.sqrt(torch.mean((x - m) ** 2, dim=[2, 3], keepdim=True)) - style_1 = torch.cat((m, std), dim=1) - - x = self.instance_norm_1(x) + x,w1 = self.style_1(x) x = F.leaky_relu(x, 0.2) if self.last: @@ -253,32 +477,19 @@ def forward(self, x): x = self.dense(x.view(x.shape[0], -1)) x = F.leaky_relu(x, 0.2) - if self.temporal_w: - w1 = self.style_1(style_1.view(style_1.shape[0], style_1.shape[1],style_1.shape[2])) - w2 = self.style_2(x.view(x.shape[0], x.shape[1],x.shape[2])) + if self.temporal_w and self.global_w: + x,w2_local,w2_global = self.style_2(x) else: - w1 = self.style_1(style_1.view(style_1.shape[0], style_1.shape[1])) - w2 = self.style_2(x.view(x.shape[0], x.shape[1])) + x,w2 = self.style_2(x) else: x = self.conv_2(self.blur(x)) x = x + self.bias_2 - - if self.temporal_w: - m = torch.mean(x, dim=[3], keepdim=True) - std = torch.sqrt(torch.mean((x - m) ** 2, dim=[3], keepdim=True)) - else: - m = torch.mean(x, dim=[2, 3], keepdim=True) - std = torch.sqrt(torch.mean((x - m) ** 2, dim=[2, 3], keepdim=True)) - style_2 = torch.cat((m, std), dim=1) - - x = self.instance_norm_2(x) - if self.temporal_w: - w1 = self.style_1(style_1.view(style_1.shape[0], style_1.shape[1],style_1.shape[2])) - w2 = self.style_2(style_2.view(style_2.shape[0], style_2.shape[1],style_2.shape[2])) + if self.temporal_w and self.global_w: + x,w2_local,w2_global = self.style_2(x) else: - w1 = self.style_1(style_1.view(style_1.shape[0], style_1.shape[1])) - w2 = self.style_2(style_2.view(style_2.shape[0], style_2.shape[1])) + x,w2 = self.style_2(x) + if not self.fused_scale: x = downscale2d(x) @@ -287,7 +498,11 @@ def forward(self, x): x = self.skip(x_input)+x else: x = F.leaky_relu(x, 0.2) - return x, w1, w2 + + if self.temporal_w and self.global_w: + return x, w1_local, w1_global, w2_local, w2_global + else: + return x, w1, w2 class DiscriminatorBlock(nn.Module): @@ -333,12 +548,14 @@ def forward(self, x): class DecodeBlock(nn.Module): - def __init__(self, inputs, outputs, latent_size, has_first_conv=True, fused_scale=True, layer=0,temporal_w=False,residual=False,resample = False): + def __init__(self, inputs, outputs, latent_size, has_first_conv=True, fused_scale=True, layer=0,temporal_w=False,global_w=True,temporal_global_cat = False,residual=False,resample = False): super(DecodeBlock, self).__init__() self.has_first_conv = has_first_conv self.inputs = inputs self.has_first_conv = has_first_conv self.temporal_w = temporal_w + self.global_w = global_w + self.temporal_global_cat = temporal_global_cat and (temporal_w and global_w) self.fused_scale = fused_scale self.residual =residual self.resample = resample @@ -352,19 +569,21 @@ def __init__(self, inputs, outputs, latent_size, has_first_conv=True, fused_scal self.noise_weight_1 = nn.Parameter(torch.Tensor(1, outputs, 1, 1)) self.noise_weight_1.data.zero_() self.bias_1 = nn.Parameter(torch.Tensor(1, outputs, 1, 1)) - self.instance_norm_1 = nn.InstanceNorm2d(outputs, affine=False, eps=1e-8) - if temporal_w: - self.style_1 = sn(ln.Conv1d(latent_size, 2 * outputs,1,1,0, gain=1),use_sn=PARTIAL_SN) - self.style_2 = sn(ln.Conv1d(latent_size, 2 * outputs,1,1,0, gain=1),use_sn=PARTIAL_SN) - else: - self.style_1 = sn(ln.Linear(latent_size, 2 * outputs, gain=1),use_sn=PARTIAL_SN) - self.style_2 = sn(ln.Linear(latent_size, 2 * outputs, gain=1),use_sn=PARTIAL_SN) + # self.instance_norm_1 = nn.InstanceNorm2d(outputs, affine=False, eps=1e-8) + # if temporal_w: + # self.style_1 = sn(ln.Conv1d(latent_size, 2 * outputs,1,1,0, gain=1),use_sn=PARTIAL_SN) + # self.style_2 = sn(ln.Conv1d(latent_size, 2 * outputs,1,1,0, gain=1),use_sn=PARTIAL_SN) + # else: + # self.style_1 = sn(ln.Linear(latent_size, 2 * outputs, gain=1),use_sn=PARTIAL_SN) + # self.style_2 = sn(ln.Linear(latent_size, 2 * outputs, gain=1),use_sn=PARTIAL_SN) + self.style_1 = AdaIN(latent_size,outputs,temporal_w=temporal_w,global_w=global_w,temporal_global_cat=temporal_global_cat) + self.style_2 = AdaIN(latent_size,outputs,temporal_w=temporal_w,global_w=global_w,temporal_global_cat=temporal_global_cat) self.conv_2 = sn(ln.Conv2d(outputs, outputs, 3, 1, 1, bias=False),use_sn=PARTIAL_SN) self.noise_weight_2 = nn.Parameter(torch.Tensor(1, outputs, 1, 1)) self.noise_weight_2.data.zero_() self.bias_2 = nn.Parameter(torch.Tensor(1, outputs, 1, 1)) - self.instance_norm_2 = nn.InstanceNorm2d(outputs, affine=False, eps=1e-8) + # self.instance_norm_2 = nn.InstanceNorm2d(outputs, affine=False, eps=1e-8) if residual and has_first_conv: if inputs==outputs: @@ -390,7 +609,7 @@ def __init__(self, inputs, outputs, latent_size, has_first_conv=True, fused_scal self.bias_1.zero_() self.bias_2.zero_() - def forward(self, x, s1, s2, noise): + def forward(self, x, s1, s2, noise, s1_global=None, s2_global=None): if self.has_first_conv: if self.residual: x = F.leaky_relu(x) @@ -412,8 +631,12 @@ def forward(self, x, s1, s2, noise): x = x + s * torch.exp(-x * x / (2.0 * s * s)) / math.sqrt(2 * math.pi) * 0.8 x = x + self.bias_1 - x = self.instance_norm_1(x) - x = style_mod(x, self.style_1(s1)) + # x = self.instance_norm_1(x) + # x = style_mod(x, self.style_1(s1)) + if self.temporal_w and self.global_w: + x = self.style_1(x,s1,s1_global) + else: + x = self.style_1(x,s1) x = F.leaky_relu(x, 0.2) @@ -428,13 +651,16 @@ def forward(self, x, s1, s2, noise): tensor2=torch.randn([x.shape[0], 1, x.shape[2], x.shape[3]])) else: s = math.pow(self.layer + 1, 0.5) - x = x + s * torch.exp(-x * x / (2.0 * s * s)) / math.sqrt(2 * math.pi) * 0.8 + x = x + s * torch.exp(-x * x / (2.0 * s * s)) / math.sqrt(2 * math.pi) * 0.8 x = x + self.bias_2 - x = self.instance_norm_2(x) - - x = style_mod(x, self.style_2(s2)) + # x = self.instance_norm_2(x) + # x = style_mod(x, self.style_2(s2)) + if self.temporal_w and self.global_w: + x = self.style_2(x,s2,s2_global) + else: + x = self.style_2(x,s2) if self.residual: if self.has_first_conv: @@ -444,39 +670,412 @@ def forward(self, x, s1, s2, noise): return x +class DemodDecodeBlock(nn.Module): + def __init__(self, inputs, outputs, latent_size, has_first_conv=True, fused_scale = True, layer=0,temporal_w=False,attention=False,attentional_style=False,heads=1,channels=1): + super(DemodDecodeBlock, self).__init__() + self.has_first_conv = has_first_conv + self.inputs = inputs + self.has_first_conv = has_first_conv + self.temporal_w = temporal_w + self.attention = attention + self.layer = layer + if has_first_conv: + if fused_scale: + self.conv1 = ln.StyleConv2dtest(inputs, outputs, kernel_size=3, latent_size=latent_size, stride=2 , padding=1, + bias=True,upsample=True,temporal_w=temporal_w,transform_kernel=True,transpose = True) + else: + self.conv1 = ln.StyleConv2dtest(inputs, outputs, kernel_size=3, latent_size=latent_size, stride=1 , padding=1, + bias=True,upsample=True,temporal_w=temporal_w,transform_kernel=False,transpose = False) + self.conv2 = ln.StyleConv2dtest(outputs, outputs, kernel_size=3, latent_size=latent_size, stride=1 , padding=1, + bias=True,upsample=False,temporal_w=temporal_w,transform_kernel=False) + else: + self.conv1 = ln.StyleConv2dtest(inputs, outputs, kernel_size=3, latent_size=latent_size, stride=1 , padding=1, + bias=True,upsample=False,temporal_w=temporal_w,transform_kernel=False) + self.skip = ToRGB(outputs,channels,style=False,residual=False,temporal_w=temporal_w,latent_size=latent_size) + # self.skip = ToRGB(outputs,channels,style=True,residual=False,temporal_w=temporal_w,latent_size=latent_size) + if attention: + self.att = Attention(outputs,temporal_w=temporal_w,attentional_style=attentional_style,decoding=True,latent_size=latent_size,heads=heads,demod=True) + self.blur = Blur(channels) + + def forward(self,x,y,w,noise): + x = F.leaky_relu(self.conv1(x,w,noise=noise),0.2) + if not noise: + s = math.pow(self.layer + 1, 0.5) + x = x + s * torch.exp(-x * x / (2.0 * s * s)) / math.sqrt(2 * math.pi) * 0.8 + if self.has_first_conv: + x = F.leaky_relu(self.conv2(x,w,noise=noise),0.2) + if not noise: + x = x + s * torch.exp(-x * x / (2.0 * s * s)) / math.sqrt(2 * math.pi) * 0.8 + if self.attention: + x = F.leaky_relu(self.att(x,w)) + skip = self.skip(x,w) + if y is not None: + y = upscale2d(y) + y = self.blur(y) + # y = F.interpolate(y,scale_factor=2,mode='bilinear') + return (y+skip, x) if (y is not None) else (skip,x) + + +class FromECoG(nn.Module): + def __init__(self, outputs,residual=False): + super().__init__() + self.residual=residual + self.from_ecog = sn(ln.Conv3d(1, outputs, [9,1,1], 1, [4,0,0])) + + def forward(self, x): + x = self.from_ecog(x) + if not self.residual: + x = F.leaky_relu(x, 0.2) + return x class FromRGB(nn.Module): - def __init__(self, channels, outputs,residual=False): + def __init__(self, channels, outputs,style = False,residual=False,temporal_w=False,latent_size=None): super(FromRGB, self).__init__() self.residual=residual self.from_rgb = sn(ln.Conv2d(channels, outputs, 1, 1, 0)) + self.style = style + self.temporal_w = temporal_w + if style: + self.stylelayer = ToWLatent(outputs,latent_size,temporal_w=temporal_w) def forward(self, x): x = self.from_rgb(x) + if self.style: + w = self.stylelayer(x) if not self.residual: x = F.leaky_relu(x, 0.2) - return x + return x if not self.style else (x, w) class ToRGB(nn.Module): - def __init__(self, inputs, channels,residual=False): + def __init__(self, inputs, channels,style = False,residual=False,temporal_w=False,latent_size=None): super(ToRGB, self).__init__() self.inputs = inputs self.channels = channels self.residual = residual - self.to_rgb = sn(ln.Conv2d(inputs, channels, 1, 1, 0, gain=0.03),use_sn=PARTIAL_SN) + self.style = style + if style: + self.to_rgb = ln.StyleConv2dtest(inputs, channels, kernel_size=1, latent_size=latent_size, stride=1 , padding=0, gain=0.03, + bias=True,upsample=False,temporal_w=temporal_w,transform_kernel=False,demod=False) + else: + self.to_rgb = sn(ln.Conv2d(inputs, channels, 1, 1, 0, gain=0.03),use_sn=PARTIAL_SN) - def forward(self, x): + def forward(self, x,w=None): if self.residual: x = F.leaky_relu(x, 0.2) - x = self.to_rgb(x) + x = self.to_rgb(x,w) if (self.style and (w is not None) ) else self.to_rgb(x) return x +@ECOG_ENCODER.register("ECoGMappingDilation") +class ECoGMapping_Dilation(nn.Module): + def __init__(self, latent_size,average_w = False,temporal_w=False,global_w=True,attention=None,temporal_samples=None,attentional_style=False,heads=1): + super(ECoGMapping_Dilation, self).__init__() + self.temporal_w = temporal_w + self.global_w = global_w + self.from_ecog = FromECoG(16,residual=True) + self.conv1 = ECoGMappingBlock(16,32,[5,1,1],residual=True,dilation=[2,1,1]) + self.conv2 = ECoGMappingBlock(32,64,[3,1,1],residual=True,dilation = [4,1,1]) + self.norm_mask = nn.GroupNorm(32,64) + self.mask = ln.Conv3d(64,1,[3,1,1],1,[1,0,0]) + # self.mask = ln.Conv3d(64,1,[3,1,1],1,[4,0,0],dilation = [4,1,1]) + self.conv3 = ECoGMappingBlock(64,128,[3,3,3],residual=True,dilation = [8,2,2]) + self.conv4 = ECoGMappingBlock(128,256,[3,3,3],residual=True,dilation = [16,4,4]) + self.norm = nn.GroupNorm(32,256) + self.conv5 = ln.Conv1d(256,256,3,1,16,dilation=16) + if self.temporal_w: + self.norm2 = nn.GroupNorm(32,256) + self.conv6 = ln.Conv1d(256,256,3,1,1) + self.norm3 = nn.GroupNorm(32,256) + self.conv7 = ln.Conv1d(256,latent_size,3,1,1) + if self.global_w: + self.linear1 = ln.Linear(256*8,512) + self.linear2 = ln.Linear(512,latent_size) + def forward(self,ecog,mask_prior): + x_all_global = [] + x_all_local = [] + for d in range(len(ecog)): + x = ecog[d] + x = x.reshape([-1,1,x.shape[1],15,15]) + mask_prior_d = mask_prior[d].reshape(-1,1,1,15,15) + x = self.from_ecog(x) + x = self.conv1(x) + x = self.conv2(x) + mask = torch.sigmoid(self.mask(F.leaky_relu(self.norm_mask(x),0.2))) + mask = mask[:,:,8:-8] + if mask_prior is not None: + mask = mask*mask_prior_d + x = x[:,:,8:-8] + x = x*mask + x = self.conv3(x) + x = self.conv4(x) + x = x.max(-1)[0].max(-1)[0] + x_common = self.conv5(F.leaky_relu(self.norm(x),0.2)) + if self.global_w: + x_global = F.max_pool1d(x_common,16,16) + x_global = x_global.flatten(1) + x_global = self.linear1(F.leaky_relu(x_global,0.2)) + x_global = self.linear2(F.leaky_relu(x_global,0.2)) + x_global = F.leaky_relu(x_global,0.2) + x_all_global += [x_global] + if self.temporal_w: + x_local = self.conv6(F.leaky_relu(self.norm2(x_common),0.2)) + x_local = self.conv7(F.leaky_relu(self.norm3(x_local),0.2)) + x_local = F.leaky_relu(x_local,0.2) + x_all_local += [x_local] + if self.global_w and self.temporal_w: + x_all = (torch.cat(x_all_local,dim=0),torch.cat(x_all_global,dim=0)) + else: + if self.temporal_w: + x_all = torch.cat(x_all_local,dim=0) + else: + x_all = torch.cat(x_all_global,dim=0) + return x_all + +@ECOG_ENCODER.register("ECoGMappingBottleneck") +class ECoGMapping_Bottleneck(nn.Module): + def __init__(self, latent_size,average_w = False,temporal_w=False,global_w=True,attention=None,temporal_samples=None,attentional_style=False,heads=1): + super(ECoGMapping_Bottleneck, self).__init__() + self.temporal_w = temporal_w + self.global_w = global_w + self.from_ecog = FromECoG(16,residual=True) + self.conv1 = ECoGMappingBlock(16,32,[5,1,1],residual=True,resample = [2,1,1]) + self.conv2 = ECoGMappingBlock(32,64,[3,1,1],residual=True,resample = [2,1,1]) + self.norm_mask = nn.GroupNorm(32,64) + self.mask = ln.Conv3d(64,1,[3,1,1],1,[1,0,0]) + self.conv3 = ECoGMappingBlock(64,128,[3,3,3],residual=True,resample = [2,2,2]) + self.conv4 = ECoGMappingBlock(128,256,[3,3,3],residual=True,resample = [2,2,2]) + self.norm = nn.GroupNorm(32,256) + self.conv5 = ln.Conv1d(256,256,3,1,1) + if self.temporal_w: + self.norm2 = nn.GroupNorm(32,256) + self.conv6 = ln.ConvTranspose1d(256, 128, 3, 2, 1, transform_kernel=True) + self.norm3 = nn.GroupNorm(32,128) + self.conv7 = ln.ConvTranspose1d(128, 64, 3, 2, 1, transform_kernel=True) + self.norm4 = nn.GroupNorm(32,64) + self.conv8 = ln.ConvTranspose1d(64, 32, 3, 2, 1, transform_kernel=True) + self.norm5 = nn.GroupNorm(32,32) + self.conv9 = ln.ConvTranspose1d(32, latent_size, 3, 2, 1, transform_kernel=True) + if self.global_w: + self.linear1 = ln.Linear(256,128) + self.linear2 = ln.Linear(128,latent_size) + def forward(self,ecog,mask_prior): + x_all_global = [] + x_all_local = [] + for d in range(len(ecog)): + x = ecog[d] + x = x.reshape([-1,1,x.shape[1],15,15]) + mask_prior_d = mask_prior[d].reshape(-1,1,1,15,15) + x = self.from_ecog(x) + x = self.conv1(x) + x = self.conv2(x) + mask = torch.sigmoid(self.mask(F.leaky_relu(self.norm_mask(x),0.2))) + mask = mask[:,:,2:-2] + if mask_prior is not None: + mask = mask*mask_prior_d + x = x[:,:,2:-2] + x = x*mask + x = self.conv3(x) + x = self.conv4(x) + x = x.max(-1)[0].max(-1)[0] + x_common = self.conv5(F.leaky_relu(self.norm(x),0.2)) + if self.global_w: + x_global = x_common.max(-1)[0] + x_global = self.linear1(F.leaky_relu(x_global,0.2)) + x_global = self.linear2(F.leaky_relu(x_global,0.2)) + x_global = F.leaky_relu(x_global,0.2) + x_all_global += [x_global] + if self.temporal_w: + x_local = self.conv6(F.leaky_relu(self.norm2(x_common),0.2)) + x_local = self.conv7(F.leaky_relu(self.norm3(x_local),0.2)) + x_local = self.conv8(F.leaky_relu(self.norm4(x_local),0.2)) + x_local = self.conv9(F.leaky_relu(self.norm5(x_local),0.2)) + x_local = F.leaky_relu(x_local,0.2) + x_all_local += [x_local] + if self.global_w and self.temporal_w: + x_all = (torch.cat(x_all_local,dim=0),torch.cat(x_all_global,dim=0)) + else: + if self.temporal_w: + x_all = torch.cat(x_all_local,dim=0) + else: + x_all = torch.cat(x_all_global,dim=0) + return x_all + +@ECOG_ENCODER.register("ECoGMappingDefault") +class ECoGMapping(nn.Module): + def __init__(self, latent_size,average_w = False,temporal_w=False,global_w=True,attention=None,temporal_samples=None,attentional_style=False,heads=1): + super(ECoGMapping, self).__init__() + self.temporal_w = temporal_w + self.global_w = global_w + self.from_ecog = FromECoG(16,residual=True) + self.conv1 = ECoGMappingBlock(16,32,[5,1,1],residual=True,resample = [2,1,1]) + self.conv2 = ECoGMappingBlock(32,64,[3,1,1],residual=True,resample = [2,1,1]) + self.norm_mask = nn.GroupNorm(32,64) + self.mask = ln.Conv3d(64,1,[3,1,1],1,[1,0,0]) + self.conv3 = ECoGMappingBlock(64,128,[3,3,3],residual=True,resample = [2,2,2]) + self.conv4 = ECoGMappingBlock(128,256,[3,3,3],residual=True,resample = [2,2,2]) + self.norm = nn.GroupNorm(32,256) + self.conv5 = ln.Conv1d(256,256,3,1,1) + if self.temporal_w: + self.norm2 = nn.GroupNorm(32,256) + self.conv6 = ln.Conv1d(256,256,3,1,1) + self.norm3 = nn.GroupNorm(32,256) + self.conv7 = ln.Conv1d(256,latent_size,3,1,1) + if self.global_w: + self.linear1 = ln.Linear(256*8,512) + self.linear2 = ln.Linear(512,latent_size,gain=1) + def forward(self,ecog,mask_prior): + x_all_global = [] + x_all_local = [] + for d in range(len(ecog)): + x = ecog[d] + x = x.reshape([-1,1,x.shape[1],15,15]) + mask_prior_d = mask_prior[d].reshape(-1,1,1,15,15) + x = self.from_ecog(x) + x = self.conv1(x) + x = self.conv2(x) + mask = torch.sigmoid(self.mask(F.leaky_relu(self.norm_mask(x),0.2))) + mask = mask[:,:,2:-2] + if mask_prior is not None: + mask = mask*mask_prior_d + x = x[:,:,2:-2] + x = x*mask + x = self.conv3(x) + x = self.conv4(x) + x = x.max(-1)[0].max(-1)[0] + x_common = self.conv5(F.leaky_relu(self.norm(x),0.2)) + if self.global_w: + x_global = x_common.flatten(1) + x_global = self.linear1(F.leaky_relu(x_global,0.2)) + x_global = self.linear2(F.leaky_relu(x_global,0.2)) + x_all_global += [x_global] + if self.temporal_w: + x_local = self.conv6(F.leaky_relu(self.norm2(x_common),0.2)) + x_local = self.conv7(F.leaky_relu(self.norm3(x_local),0.2)) + x_all_local += [x_local] + if self.global_w and self.temporal_w: + x_all = (torch.cat(x_all_local,dim=0),torch.cat(x_all_global,dim=0)) + else: + if self.temporal_w: + x_all = torch.cat(x_all_local,dim=0) + else: + x_all = torch.cat(x_all_global,dim=0) + return x_all + + + + +@ENCODERS.register("EncoderDemod") +class Encoder_Demod(nn.Module): + def __init__(self, startf, maxf, layer_count, latent_size, channels=3,average_w = False,temporal_w=False,residual=False,attention=None,temporal_samples=None,spec_chans=None,attentional_style=False,heads=1): + super(Encoder_Demod, self).__init__() + self.maxf = maxf + self.startf = startf + self.layer_count = layer_count + self.channels = channels + self.latent_size = latent_size + self.average_w = average_w + self.temporal_w = temporal_w + self.attentional_style = attentional_style + mul = 2 + inputs = startf + self.encode_block = nn.ModuleList() + self.attention_block = nn.ModuleList() + resolution = 2 ** (self.layer_count + 1) + for i in range(self.layer_count): + outputs = min(self.maxf, startf * mul) + apply_attention = attention and attention[self.layer_count-i-1] + current_spec_chans = spec_chans // 2**i + current_temporal_samples = temporal_samples // 2**i + last = i==(self.layer_count-1) + fused_scale = resolution >= 128 + resolution //= 2 + block = DemodEncodeBlock(inputs, outputs, latent_size, last,temporal_w=temporal_w,fused_scale=fused_scale,resample=True,temporal_samples=current_temporal_samples,spec_chans=current_spec_chans, + attention=apply_attention,attentional_style=attentional_style,heads=heads,channels=channels) + #print("encode_block%d %s styles out: %d" % ((i + 1), millify(count_parameters(block)), inputs)) + self.encode_block.append(block) + inputs = outputs + mul *= 2 + + def encode(self, spec, lod): + if self.temporal_w: + styles = torch.zeros(spec.shape[0], 1, self.latent_size,128) + else: + styles = torch.zeros(spec.shape[0], 1, self.latent_size) + + x = None + for i in range(self.layer_count - lod - 1, self.layer_count): + spec, x, w = self.encode_block[i](spec,x) + if self.temporal_w and i!=0: + w = F.interpolate(w,scale_factor=2**i) + styles[:, 0] += w + if self.average_w: + styles /= (lod+1) + return styles + + def forward(self, x, lod, blend): + if blend == 1: + return self.encode(x, lod) + else: + return self.encode2(x, lod, blend) + + +@ENCODERS.register("EncoderFormant") +class FormantEncoder(nn.Module): + def __init__(self, n_mels=64, n_formants=4, k=30): + super(FormantEncoder, self).__init__() + self.n_mels = n_mels + self.conv1 = ln.Conv1d(n_mels,64,3,1,1) + self.norm1 = nn.GroupNorm(32,64) + self.conv2 = ln.Conv1d(64,128,3,1,1) + self.norm2 = nn.GroupNorm(32,128) + + self.conv_fundementals = ln.Conv1d(128,128,3,1,1) + self.norm_fundementals = nn.GroupNorm(32,128) + self.conv_f0 = ln.Conv1d(128,1,1,1,0) + self.conv_amplitudes = ln.Conv1d(128,2,1,1,0) + # self.conv_loudness = ln.Conv1d(128,1,1,1,0) + + self.conv_formants = ln.Conv1d(128,128,3,1,1) + self.norm_formants = nn.GroupNorm(32,128) + self.conv_formants_freqs = ln.Conv1d(128,n_formants,1,1,0) + self.conv_formants_bandwidth = ln.Conv1d(128,n_formants,1,1,0) + self.conv_formants_amplitude = ln.Conv1d(128,n_formants,1,1,0) + + self.amplifier = Parameter(torch.Tensor(1)) + with torch.no_grad(): + nn.init.constant_(self.amplifier,1.0) + + def forward(self,x): + x = x.squeeze(dim=1).permute(0,2,1) #B * f * T + loudness = torch.mean(x*0.5+0.5,dim=1,keepdim=True) + loudness = F.softplus(self.amplifier)*loudness + x = F.leaky_relu(self.norm1(self.conv1(x)),0.2) + x_common = F.leaky_relu(self.norm2(self.conv2(x)),0.2) + + # loudness = F.relu(self.conv_loudness(x_common)) + amplitudes = F.softmax(self.conv_amplitudes(x_common),dim=1) + + x_fundementals = F.leaky_relu(self.norm_fundementals(self.conv_fundementals(x_common)),0.2) + # f0 = F.sigmoid(self.conv_f0(x_fundementals)) + # f0 = F.tanh(self.conv_f0(x_fundementals)) * (16/64)*(self.n_mels/64) # 72hz < f0 < 446 hz + f0 = F.sigmoid(self.conv_f0(x_fundementals)) * (15/64)*(self.n_mels/64) # 179hz < f0 < 420 hz + # f0 = F.sigmoid(self.conv_f0(x_fundementals)) * (22/64)*(self.n_mels/64) - (16/64)*(self.n_mels/64)# 72hz < f0 < 253 hz, human voice + # f0 = F.sigmoid(self.conv_f0(x_fundementals)) * (11/64)*(self.n_mels/64) - (-2/64)*(self.n_mels/64)# 160hz < f0 < 300 hz, female voice + x_formants = F.leaky_relu(self.norm_formants(self.conv_formants(x_common)),0.2) + formants_freqs = F.sigmoid(self.conv_formants_freqs(x_formants)) + formants_freqs = torch.cumsum(formants_freqs,dim=1) + formants_freqs = formants_freqs + # formants_freqs = formants_freqs + f0 + formants_bandwidth = F.sigmoid(self.conv_formants_bandwidth(x_formants)) + formants_amplitude = F.softmax(self.conv_formants_amplitude(x_formants),dim=1) + + return f0,loudness,amplitudes,formants_freqs,formants_bandwidth,formants_amplitude + @ENCODERS.register("EncoderDefault") class Encoder_old(nn.Module): - def __init__(self, startf, maxf, layer_count, latent_size, channels=3,average_w = False,temporal_w=False,residual=False,attention=None,temporal_samples=None,spec_chans=None,attentional_style=False,heads=1): + def __init__(self, startf, maxf, layer_count, latent_size, channels=3,average_w = False,temporal_w=False,global_w=True,temporal_global_cat = False,residual=False,attention=None,temporal_samples=None,spec_chans=None,attentional_style=False,heads=1): super(Encoder_old, self).__init__() self.maxf = maxf self.startf = startf @@ -486,6 +1085,8 @@ def __init__(self, startf, maxf, layer_count, latent_size, channels=3,average_w self.latent_size = latent_size self.average_w = average_w self.temporal_w = temporal_w + self.global_w = global_w + self.temporal_global_cat = temporal_global_cat self.attentional_style = attentional_style mul = 2 inputs = startf @@ -498,13 +1099,13 @@ def __init__(self, startf, maxf, layer_count, latent_size, channels=3,average_w self.from_rgb.append(FromRGB(channels, inputs,residual=residual)) apply_attention = attention and attention[self.layer_count-i-1] - non_local = Attention(inputs,temporal_w=temporal_w,attentional_style=attentional_style,decoding=False,latent_size=latent_size,heads=heads) if apply_attention else None + non_local = Attention(inputs,temporal_w=temporal_w,global_w=global_w,temporal_global_cat=temporal_global_cat,attentional_style=attentional_style,decoding=False,latent_size=latent_size,heads=heads) if apply_attention else None self.attention_block.append(non_local) fused_scale = resolution >= 128 current_spec_chans = spec_chans // 2**i current_temporal_samples = temporal_samples // 2**i islast = i==(self.layer_count-1) - block = EncodeBlock(inputs, outputs, latent_size, False, islast, fused_scale=fused_scale,temporal_w=temporal_w,residual=residual,resample=True,temporal_samples=current_temporal_samples,spec_chans=current_spec_chans) + block = EncodeBlock(inputs, outputs, latent_size, False, islast, fused_scale=fused_scale,temporal_w=temporal_w,global_w=global_w,temporal_global_cat=temporal_global_cat,residual=residual,resample=True,temporal_samples=current_temporal_samples,spec_chans=current_spec_chans) resolution //= 2 @@ -515,10 +1116,14 @@ def __init__(self, startf, maxf, layer_count, latent_size, channels=3,average_w mul *= 2 def encode(self, x, lod): - if self.temporal_w: + if self.temporal_w and self.global_w: styles = torch.zeros(x.shape[0], 1, self.latent_size,128) + styles_global = torch.zeros(x.shape[0], 1, self.latent_size) else: - styles = torch.zeros(x.shape[0], 1, self.latent_size) + if self.temporal_w: + styles = torch.zeros(x.shape[0], 1, self.latent_size,128) + else: + styles = torch.zeros(x.shape[0], 1, self.latent_size) x = self.from_rgb[self.layer_count - lod - 1](x) x = F.leaky_relu(x, 0.2) @@ -527,33 +1132,66 @@ def encode(self, x, lod): if self.attention_block[i]: x = self.attention_block[i](x) if self.attentional_style: - x,s = x - x, s1, s2 = self.encode_block[i](x) + if self.temporal_w and self.global_w: + x,s,s_global = x + else: + x,s = x + if self.temporal_w: + s = F.interpolate(s,scale_factor=2**i,mode='linear') + if self.temporal_w and self.global_w: + x, s1, s1_global, s2, s2_global = self.encode_block[i](x) + else: + x, s1, s2 = self.encode_block[i](x) if self.temporal_w and i!=0: - s1 = F.interpolate(s1,scale_factor=2**i) - s2 = F.interpolate(s2,scale_factor=2**i) - styles[:, 0] += s1 + s2 + (s if (self.attention_block[i] and self.attentional_style) else 0) + s1 = F.interpolate(s1,scale_factor=2**i,mode='linear') + s2 = F.interpolate(s2,scale_factor=2**i,mode='linear') + if self.temporal_w and self.global_w: + styles_global[:, 0] += s1_global + s2_global + (s_global if (self.attention_block[i] and self.attentional_style) else 0) + styles[:, 0] += s1 + s2 + (s if (self.attention_block[i] and self.attentional_style) else 0) + if self.temporal_w and self.global_w: + styles_global[:, 0] += s1_global + s2_global + (s_global if (self.attention_block[i] and self.attentional_style) else 0) if self.average_w: styles /= (lod+1) - return styles + if self.temporal_w and self.global_w: + styles_global/=(lod+1) + + if self.temporal_w and self.global_w: + return styles,styles_global + else: + return styles def encode2(self, x, lod, blend): x_orig = x - if self.temporal_w: + if self.temporal_w and self.global_w: styles = torch.zeros(x.shape[0], 1, self.latent_size,128) + styles_global = torch.zeros(x.shape[0], 1, self.latent_size) else: - styles = torch.zeros(x.shape[0], 1, self.latent_size) + if self.temporal_w: + styles = torch.zeros(x.shape[0], 1, self.latent_size,128) + else: + styles = torch.zeros(x.shape[0], 1, self.latent_size) x = self.from_rgb[self.layer_count - lod - 1](x) x = F.leaky_relu(x, 0.2) if self.attention_block[self.layer_count - lod - 1]: x = self.attention_block[self.layer_count - lod - 1](x) if self.attentional_style: - x,s = x - x, s1, s2 = self.encode_block[self.layer_count - lod - 1](x) - if self.temporal_w and i!=0: - s1 = F.interpolate(s1,scale_factor=2**(layer_count - lod - 1)) - s2 = F.interpolate(s2,scale_factor=2**(layer_count - lod - 1)) + if self.temporal_w and self.global_w: + x,s,s_global = x + else: + x,s = x + if self.temporal_w: + s = F.interpolate(s,scale_factor=2**(self.layer_count - lod - 1),mode='linear') + if self.temporal_w and self.global_w: + x, s1, s1_global, s2, s2_global = self.encode_block[self.layer_count - lod - 1](x) + else: + x, s1, s2 = self.encode_block[self.layer_count - lod - 1](x) + if self.temporal_w and (self.layer_count - lod - 1)!=0: + s1 = F.interpolate(s1,scale_factor=2**(self.layer_count - lod - 1),mode='linear') + s2 = F.interpolate(s2,scale_factor=2**(self.layer_count - lod - 1),mode='linear') styles[:, 0] += s1 * blend + s2 * blend + (s*blend if (self.attention_block[self.layer_count - lod - 1] and self.attentional_style) else 0) + if self.temporal_w and self.global_w: + styles_global[:, 0] += s1_global * blend + s2_global * blend + (s_global*blend if (self.attention_block[self.layer_count - lod - 1] and self.attentional_style) else 0) + x_prev = F.avg_pool2d(x_orig, 2, 2) @@ -566,15 +1204,30 @@ def encode2(self, x, lod, blend): if self.attention_block[i]: x = self.attention_block[i](x) if self.attentional_style: - x,s = x - x, s1, s2 = self.encode_block[i](x) + if self.temporal_w and self.global_w: + x,s,s_global = x + else: + x,s = x + if self.temporal_w: + s = F.interpolate(s,scale_factor=2**i,mode='linear') + if self.temporal_w and self.global_w: + x, s1, s1_global, s2, s2_global = self.encode_block[i](x) + else: + x, s1, s2 = self.encode_block[i](x) if self.temporal_w and i!=0: - s1 = F.interpolate(s1,scale_factor=2**i) - s2 = F.interpolate(s2,scale_factor=2**i) + s1 = F.interpolate(s1,scale_factor=2**i,mode='linear') + s2 = F.interpolate(s2,scale_factor=2**i,mode='linear') styles[:, 0] += s1 + s2 + (s if (self.attention_block[i] and self.attentional_style) else 0) + if self.temporal_w and self.global_w: + styles_global[:, 0] += s1_global + s2_global + (s_global if (self.attention_block[i] and self.attentional_style) else 0) if self.average_w: styles /= (lod+1) - return styles + if self.temporal_w and self.global_w: + styles_global/=(lod+1) + if self.temporal_w and self.global_w: + return styles,styles_global + else: + return styles def forward(self, x, lod, blend): if blend == 1: @@ -909,10 +1562,147 @@ def forward(self, x, lod, blend): else: return self.encode2(x, lod, blend) +@GENERATORS.register("GeneratorDemod") +class Generator_Demod(nn.Module): + def __init__(self, startf=32, maxf=256, layer_count=3, latent_size=128, channels=3, temporal_samples=128,spec_chans=128,temporal_w=False,init_zeros=False,residual=False,attention=None,attentional_style=False,heads=1): + super(Generator_Demod, self).__init__() + self.maxf = maxf + self.startf = startf + self.layer_count = layer_count + + self.channels = channels + self.latent_size = latent_size + self.temporal_w = temporal_w + self.init_zeros = init_zeros + self.attention = attention + self.attentional_style = attentional_style + mul = 2 ** (self.layer_count - 1) + inputs = min(self.maxf, startf * mul) + self.initial_inputs = inputs + self.init_specchans = spec_chans//2**(self.layer_count-1) + self.init_temporalsamples = temporal_samples//2**(self.layer_count-1) + self.layer_to_resolution = [0 for _ in range(layer_count)] + resolution = 2 + + self.style_sizes = [] + + to_rgb = nn.ModuleList() + self.attention_block = nn.ModuleList() + self.decode_block: nn.ModuleList[DemodDecodeBlock] = nn.ModuleList() + for i in range(self.layer_count): + outputs = min(self.maxf, startf * mul) + + has_first_conv = i != 0 + fused_scale = resolution * 2 >= 128 + block = DemodDecodeBlock(inputs, outputs, latent_size, has_first_conv, layer=i,temporal_w=temporal_w,fused_scale=fused_scale,attention=attention and attention[i],attentional_style=attentional_style,heads=heads,channels=channels) + + resolution *= 2 + self.layer_to_resolution[i] = resolution + self.decode_block.append(block) + inputs = outputs + mul //= 2 + + def decode(self, styles, lod, noise): + x = torch.randn([styles.shape[0], self.initial_inputs, self.init_temporalsamples, self.init_specchans]) + spec = None + self.std_each_scale = [] + for i in range(lod + 1): + if self.temporal_w and i!=self.layer_count-1: + w1 = F.interpolate(styles[:, 2 * i + 0],scale_factor=2**-(self.layer_count-i-1),mode='linear') + w2 = F.interpolate(styles[:, 2 * i + 1],scale_factor=2**-(self.layer_count-i-1),mode='linear') + else: + w1 = styles[:, 2 * i + 0] + w2 = styles[:, 2 * i + 1] + spec, x = self.decode_block[i](x, spec, w1, noise) + self.std_each_scale.append(spec.std()) + self.std_each_scale = torch.stack(self.std_each_scale) + self.std_each_scale/=self.std_each_scale.sum() + return spec + + def forward(self, styles, lod, blend, noise): + if blend == 1: + return self.decode(styles, lod, noise) + else: + return self.decode2(styles, lod, blend, noise) + + +@GENERATORS.register("GeneratorFormant") +class FormantSysth(nn.Module): + def __init__(self, n_mels=64, k=30): + super(FormantSysth, self).__init__() + self.n_mels = n_mels + self.k = k + self.timbre = Parameter(torch.Tensor(1,1,n_mels)) + self.silient = -1 + with torch.no_grad(): + nn.init.constant_(self.timbre,1.0) + # nn.init.constant_(self.silient,0.0) + + def formant_mask(self,freq,bandwith,amplitude): + # freq, bandwith, amplitude: B*formants*time + freq_cord = torch.arange(self.n_mels) + time_cord = torch.arange(freq.shape[2]) + grid_time,grid_freq = torch.meshgrid(time_cord,freq_cord) + grid_time = grid_time.unsqueeze(dim=0).unsqueeze(dim=-1) #B,time,freq, 1 + grid_freq = grid_freq.unsqueeze(dim=0).unsqueeze(dim=-1) #B,time,freq, 1 + freq = freq.permute([0,2,1]).unsqueeze(dim=-2) #B,time,1, formants + bandwith = bandwith.permute([0,2,1]).unsqueeze(dim=-2) #B,time,1, formants + amplitude = amplitude.permute([0,2,1]).unsqueeze(dim=-2) #B,time,1, formants + masks = amplitude*torch.exp(-(grid_freq-freq)**2/(2*bandwith**2)) #B,time,freqchans, formants + masks = masks.unsqueeze(dim=1) #B,1,time,freqchans + return masks + + def mel_scale(self,hz): + return (torch.log2(hz/440)+31/24)*24*self.n_mels/126 + + def inverse_mel_scale(self,mel): + return 440*2**(mel*126/24-31/24) + + def voicing(self,f0): + #f0: B*1*time + freq_cord = torch.arange(self.n_mels) + time_cord = torch.arange(f0.shape[2]) + grid_time,grid_freq = torch.meshgrid(time_cord,freq_cord) + grid_time = grid_time.unsqueeze(dim=0).unsqueeze(dim=-1) #B,time,freq, 1 + grid_freq = grid_freq.unsqueeze(dim=0).unsqueeze(dim=-1) #B,time,freq, 1 + f0 = f0.permute([0,2,1]).unsqueeze(dim=-2) #B,time,1, 1 + f0 = f0.repeat([1,1,1,self.k]) #B,time,1, self.k + f0 = f0*(torch.arange(self.k)+1).reshape([1,1,1,self.k]) + bandwith = 24.7*(f0*4.37/1000+1) + bandwith_lower = torch.clamp(f0-bandwith/2,min=0.001) + bandwith_upper = f0+bandwith/2 + bandwith = self.mel_scale(bandwith_upper) - self.mel_scale(bandwith_lower) + f0 = self.mel_scale(f0) + # hamonics = torch.exp(-(grid_freq-f0)**2/(2*bandwith**2)) #gaussian + hamonics = (1-((grid_freq-f0)/(3*bandwith/2))**2)*(-torch.sign(torch.abs(grid_freq-f0)/(3*bandwith)-0.5)*0.5+0.5) #welch + # hamonics = torch.cos(np.pi*torch.abs(grid_freq-f0)/(4*bandwith))**2*(-torch.sign(torch.abs(grid_freq-f0)/(4*bandwith)-0.5)*0.5+0.5) #hanning + hamonics = (hamonics.sum(dim=-1)*self.timbre).unsqueeze(dim=1) # B,1,T,F + return hamonics + + def unvoicing(self,f0): + return torch.ones([f0.shape[0],1,f0.shape[2],self.n_mels]) + + def forward(self,f0,loudness,amplitudes,freq_formants,bandwidth_formants,amplitude_formants): + # f0: B*1*T, amplitudes: B*2(voicing,unvoicing)*T, freq_formants,bandwidth_formants,amplitude_formants: B*formants*T + amplitudes = amplitudes.unsqueeze(dim=-1) + loudness = loudness.unsqueeze(dim=-1) + f0_hz = self.inverse_mel_scale(f0) + self.hamonics = self.voicing(f0_hz) + self.noise = self.unvoicing(f0_hz) + freq_formants = freq_formants*self.n_mels + bandwidth_formants = bandwidth_formants*self.n_mels + # excitation = amplitudes[:,0:1]*hamonics + # excitation = loudness*(amplitudes[:,0:1]*hamonics) + self.excitation = loudness*(amplitudes[:,0:1]*self.hamonics + amplitudes[:,-1:]*self.noise) + self.mask = self.formant_mask(freq_formants,bandwidth_formants,amplitude_formants) + self.mask_sum = self.mask.sum(dim=-1) + speech = self.excitation*self.mask_sum + self.silient*torch.ones(self.mask_sum.shape) + return speech + @GENERATORS.register("GeneratorDefault") class Generator(nn.Module): - def __init__(self, startf=32, maxf=256, layer_count=3, latent_size=128, channels=3, temporal_samples=128,spec_chans=128,temporal_w=False,init_zeros=False,residual=False,attention=None,attentional_style=False,heads=1): + def __init__(self, startf=32, maxf=256, layer_count=3, latent_size=128, channels=3, temporal_samples=128,spec_chans=128,temporal_w=False,global_w=True,temporal_global_cat = False,init_zeros=False,residual=False,attention=None,attentional_style=False,heads=1): super(Generator, self).__init__() self.maxf = maxf self.startf = startf @@ -921,6 +1711,8 @@ def __init__(self, startf=32, maxf=256, layer_count=3, latent_size=128, channels self.channels = channels self.latent_size = latent_size self.temporal_w = temporal_w + self.global_w=global_w + self.temporal_global_cat = temporal_global_cat and (temporal_w and global_w) self.init_zeros = init_zeros self.attention = attention self.attentional_style = attentional_style @@ -949,7 +1741,7 @@ def __init__(self, startf=32, maxf=256, layer_count=3, latent_size=128, channels has_first_conv = i != 0 fused_scale = resolution * 2 >= 128 - block = DecodeBlock(inputs, outputs, latent_size, has_first_conv, fused_scale=fused_scale, layer=i,temporal_w=temporal_w,residual=residual,resample=True) + block = DecodeBlock(inputs, outputs, latent_size, has_first_conv, fused_scale=fused_scale, layer=i,temporal_w=temporal_w, global_w=global_w, temporal_global_cat=temporal_global_cat, residual=residual,resample=True) resolution *= 2 self.layer_to_resolution[i] = resolution @@ -961,7 +1753,7 @@ def __init__(self, startf=32, maxf=256, layer_count=3, latent_size=128, channels #print("decode_block%d %s styles in: %dl out resolution: %d" % ( # (i + 1), millify(count_parameters(block)), outputs, resolution)) apply_attention = attention and attention[i] - non_local = Attention(outputs,temporal_w=temporal_w,attentional_style=attentional_style,decoding=True,latent_size=latent_size,heads=heads) if apply_attention else None + non_local = Attention(outputs,temporal_w=temporal_w, global_w=global_w, temporal_global_cat=temporal_global_cat, attentional_style=attentional_style,decoding=True,latent_size=latent_size,heads=heads) if apply_attention else None self.decode_block.append(block) self.attention_block.append(non_local) inputs = outputs @@ -970,46 +1762,87 @@ def __init__(self, startf=32, maxf=256, layer_count=3, latent_size=128, channels self.to_rgb = to_rgb def decode(self, styles, lod, noise): + if self.temporal_w and self.global_w: + styles,styles_global = styles x = self.const - + self.std_each_scale = [] for i in range(lod + 1): if self.temporal_w and i!=self.layer_count-1: - w1 = F.interpolate(styles[:, 2 * i + 0],scale_factor=2**-(self.layer_count-i-1)) - w2 = F.interpolate(styles[:, 2 * i + 1],scale_factor=2**-(self.layer_count-i-1)) + w1 = F.interpolate(styles[:, 2 * i + 0],scale_factor=2**-(self.layer_count-i-1),mode='linear') + w2 = F.interpolate(styles[:, 2 * i + 1],scale_factor=2**-(self.layer_count-i-1),mode='linear') + # if self.temporal_w and self.global_w: + # w1_global = F.interpolate(styles_global[:, 2 * i + 0],scale_factor=2**-(self.layer_count-i-1),mode='linear') + # w2_global = F.interpolate(styles_global[:, 2 * i + 1],scale_factor=2**-(self.layer_count-i-1),mode='linear') else: w1 = styles[:, 2 * i + 0] w2 = styles[:, 2 * i + 1] - x = self.decode_block[i](x, w1, w2, noise) + if self.temporal_w and self.global_w: + w1_global = styles_global[:, 2 * i + 0] + w2_global = styles_global[:, 2 * i + 1] + x = self.decode_block[i](x, w1, w2, noise, w1_global, w2_global) + else: + x = self.decode_block[i](x, w1, w2, noise) if self.attention_block[i]: - x = self.attention_block[i](x,w1) if self.attentional_style else self.attention_block[i](x) + if self.temporal_w and self.global_w: + x = self.attention_block[i](x,w2, w2_global) if self.attentional_style else self.attention_block[i](x) + else: + x = self.attention_block[i](x,w2) if self.attentional_style else self.attention_block[i](x) + self.std_each_scale.append(x.std()) + self.std_each_scale = torch.stack(self.std_each_scale) + self.std_each_scale/=self.std_each_scale.sum() x = self.to_rgb[lod](x) return x def decode2(self, styles, lod, blend, noise): + if self.temporal_w and self.global_w: + styles,styles_global = styles x = self.const for i in range(lod): if self.temporal_w and i!=self.layer_count-1: - w1 = F.interpolate(styles[:, 2 * i + 0],scale_factor=2**-(self.layer_count-i-1)) - w2 = F.interpolate(styles[:, 2 * i + 1],scale_factor=2**-(self.layer_count-i-1)) + w1 = F.interpolate(styles[:, 2 * i + 0],scale_factor=2**-(self.layer_count-i-1),mode='linear') + w2 = F.interpolate(styles[:, 2 * i + 1],scale_factor=2**-(self.layer_count-i-1),mode='linear') + # if self.temporal_w and self.global_w: + # w1_global = F.interpolate(styles_global[:, 2 * i + 0],scale_factor=2**-(self.layer_count-i-1),mode='linear') + # w2_global = F.interpolate(styles_global[:, 2 * i + 1],scale_factor=2**-(self.layer_count-i-1),mode='linear') else: w1 = styles[:, 2 * i + 0] w2 = styles[:, 2 * i + 1] - x = self.decode_block[i](x, w1, w2, noise) + if self.temporal_w and self.global_w: + w1_global = styles_global[:, 2 * i + 0] + w2_global = styles_global[:, 2 * i + 1] + x = self.decode_block[i](x, w1, w2, noise, w1_global, w2_global) + else: + x = self.decode_block[i](x, w1, w2, noise) if self.attention_block[i]: - x = self.attention_block[i](x,w1) if self.attentional_style else self.attention_block[i](x) + if self.temporal_w and self.global_w: + x = self.attention_block[i](x,w2,w2_global) if self.attentional_style else self.attention_block[i](x) + else: + x = self.attention_block[i](x,w2) if self.attentional_style else self.attention_block[i](x) x_prev = self.to_rgb[lod - 1](x) if self.temporal_w and lod!=self.layer_count-1: - w1 = F.interpolate(styles[:, 2 * lod + 0],scale_factor=2**-(self.layer_count-lod-1)) - w2 = F.interpolate(styles[:, 2 * lod + 1],scale_factor=2**-(self.layer_count-lod-1)) + w1 = F.interpolate(styles[:, 2 * lod + 0],scale_factor=2**-(self.layer_count-lod-1),mode='linear') + w2 = F.interpolate(styles[:, 2 * lod + 1],scale_factor=2**-(self.layer_count-lod-1),mode='linear') + if self.temporal_w and self.global_w: + w1_global = F.interpolate(styles_global[:, 2 * lod + 0],scale_factor=2**-(self.layer_count-lod-1),mode='linear') + w2_global = F.interpolate(styles_global[:, 2 * lod + 1],scale_factor=2**-(self.layer_count-lod-1),mode='linear') else: w1 = styles[:, 2 * lod + 0] w2 = styles[:, 2 * lod + 1] - x = self.decode_block[lod](x, w1, w2, noise) + if self.temporal_w and self.global_w: + w1_global = styles_global[:, 2 * lod + 0] + w2_global = styles_global[:, 2 * lod + 1] + if self.temporal_w and self.global_w: + x = self.decode_block[lod](x, w1, w2, noise, w1_global, w2_global) + else: + x = self.decode_block[lod](x, w1, w2, noise) if self.attention_block[lod]: - x = self.attention_block[lod](x,w1) if self.attentional_style else self.attention_block[lod](x) + if self.temporal_w and self.global_w: + x = self.attention_block[lod](x,w2,w2_global) if self.attentional_style else self.attention_block[lod](x) + else: + x = self.attention_block[lod](x,w2) if self.attentional_style else self.attention_block[lod](x) x = self.to_rgb[lod](x) needed_resolution = self.layer_to_resolution[lod] @@ -1159,10 +1992,11 @@ def forward(self, z): @MAPPINGS.register("MappingToLatent") class VAEMappingToLatent_old(nn.Module): - def __init__(self, mapping_layers=5, latent_size=256, dlatent_size=256, mapping_fmaps=256,temporal_w=False): + def __init__(self, mapping_layers=5, latent_size=256, dlatent_size=256, mapping_fmaps=256,temporal_w=False, global_w=True): super(VAEMappingToLatent_old, self).__init__() self.temporal_w = temporal_w - inputs = latent_size + self.global_w = global_w + inputs = 2* latent_size if (temporal_w and global_w) else latent_size self.mapping_layers = mapping_layers self.map_blocks: nn.ModuleList[MappingBlock] = nn.ModuleList() for i in range(mapping_layers): @@ -1176,9 +2010,11 @@ def __init__(self, mapping_layers=5, latent_size=256, dlatent_size=256, mapping_ #print("dense %d %s" % ((i + 1), millify(count_parameters(block)))) if temporal_w: self.Linear = sn(ln.Linear(inputs*8,2 * dlatent_size,lrmul=0.1)) - def forward(self, x): - if x.dim()==3: - x = torch.mean(x,dim=2) + def forward(self, x, x_global=None): + # if x.dim()==3: + # x = torch.mean(x,dim=2) + if (self.temporal_w and self.global_w): + x = torch.cat((x,x_global.unsqueeze(2).repeat(1,1,x.shape[2])),dim=1) for i in range(self.mapping_layers): x = self.map_blocks[i](x) if self.temporal_w: @@ -1241,13 +2077,16 @@ def forward(self, x): @MAPPINGS.register("MappingFromLatent") class VAEMappingFromLatent(nn.Module): - def __init__(self, num_layers, mapping_layers=5, latent_size=256, dlatent_size=256, mapping_fmaps=256,temporal_w=False): + def __init__(self, num_layers, mapping_layers=5, latent_size=256, dlatent_size=256, mapping_fmaps=256,temporal_w=False,global_w = True): super(VAEMappingFromLatent, self).__init__() self.mapping_layers = mapping_layers self.num_layers = num_layers self.temporal_w = temporal_w + self.global_w = global_w self.latent_size = latent_size self.map_blocks: nn.ModuleList[MappingBlock] = nn.ModuleList() + if temporal_w and global_w: + self.map_blocks_global: nn.ModuleList[MappingBlock] = nn.ModuleList() if temporal_w: self.Linear = sn(ln.Linear(dlatent_size,8*(latent_size//8)),use_sn=PARTIAL_SN) inputs = latent_size//8 @@ -1259,8 +2098,16 @@ def __init__(self, num_layers, mapping_layers=5, latent_size=256, dlatent_size=2 inputs = outputs self.map_blocks.append(block) #print("dense %d %s" % ((i + 1), millify(count_parameters(block)))) + + if temporal_w and global_w: + inputs = dlatent_size + for i in range(mapping_layers): + outputs = latent_size if i == mapping_layers - 1 else mapping_fmaps + block_global = MappingBlock(inputs, outputs, stride = i%2+1, lrmul=0.1,temporal_w=False,transform_kernel=True if i%2==1 else False, transpose=True,use_sn=PARTIAL_SN) + inputs = outputs + self.map_blocks_global.append(block_global) - def forward(self, x): + def forward(self, x,x_global=None): x = pixel_norm(x) if self.temporal_w: x = self.Linear(x) @@ -1268,10 +2115,17 @@ def forward(self, x): x = x.view(x.shape[0],self.latent_size//8,8) for i in range(self.mapping_layers): x = self.map_blocks[i](x) - if self.temporal_w: - return x.view(x.shape[0], 1, x.shape[1],x.shape[2]).repeat(1, self.num_layers, 1,1) + + if self.temporal_w and self.global_w: + x_global = pixel_norm(x_global) + for i in range(self.mapping_layers): + x_global = self.map_blocks_global[i](x_global) + return x.view(x.shape[0], 1, x.shape[1],x.shape[2]).repeat(1, self.num_layers, 1,1), x_global.view(x_global.shape[0], 1, x_global.shape[1]).repeat(1, self.num_layers, 1) else: - return x.view(x.shape[0], 1, x.shape[1]).repeat(1, self.num_layers, 1) + if self.temporal_w: + return x.view(x.shape[0], 1, x.shape[1],x.shape[2]).repeat(1, self.num_layers, 1,1) + else: + return x.view(x.shape[0], 1, x.shape[1]).repeat(1, self.num_layers, 1) @ENCODERS.register("EncoderFC") diff --git a/registry.py b/registry.py index f6e25be6..cc588777 100644 --- a/registry.py +++ b/registry.py @@ -5,3 +5,4 @@ GENERATORS = Registry() MAPPINGS = Registry() DISCRIMINATORS = Registry() +ECOG_ENCODER = Registry() diff --git a/run.s b/run.s index 5f3fff46..ab8e5ab8 100644 --- a/run.s +++ b/run.s @@ -1,8 +1,8 @@ #!/bin/bash #SBATCH --nodes=1 #SBATCH --ntasks-per-node=1 -#SBATCH --cpus-per-task=2 -#SBATCH --gres=gpu:p40:2 +#SBATCH --cpus-per-task=1 +#SBATCH --gres=gpu:v100:1 #SBATCH --time=60:00:00 #SBATCH --mem=64GB #SBATCH --job-name=myTest diff --git a/tracker.py b/tracker.py index 3fcbe0a4..55969dab 100644 --- a/tracker.py +++ b/tracker.py @@ -51,11 +51,11 @@ def __iadd__(self, value): def reset(self): self.values = [] - def mean(self): + def mean(self,dim=[]): with torch.no_grad(): if len(self.values) == 0: return 0.0 - return float(torch.cat(self.values).mean().item()) + return torch.cat(self.values).mean(dim=dim).numpy() class LossTracker: @@ -87,17 +87,18 @@ def register_means(self, epoch): for key in self.means_over_epochs.keys(): if key in self.tracks: value = self.tracks[key] - self.means_over_epochs[key].append(value.mean()) + self.means_over_epochs[key].append(value.mean(dim=0)) value.reset() else: self.means_over_epochs[key].append(None) with open(os.path.join(self.output_folder, 'log.csv'), mode='w') as csv_file: - fieldnames = ['epoch'] + list(self.tracks.keys()) + fieldnames = ['epoch'] + [key+str(i) for key in list(self.tracks.keys()) for i in range(self.means_over_epochs[key][0].size)] + # fieldnames = ['epoch'] + [list(self.tracks.keys())] writer = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) writer.writerow(fieldnames) for i in range(len(self.epochs)): - writer.writerow([self.epochs[i]] + [self.means_over_epochs[x][i] for x in self.tracks.keys()]) + writer.writerow([self.epochs[i]] + [self.means_over_epochs[x][i][j] if self.means_over_epochs[x][i].size>1 else self.means_over_epochs[x][i] for x in self.tracks.keys() for j in range(self.means_over_epochs[x][i].size) ]) def __str__(self): result = "" diff --git a/train_alae.py b/train_alae.py index 8094dc5b..1cbd3834 100644 --- a/train_alae.py +++ b/train_alae.py @@ -32,8 +32,9 @@ import lod_driver from PIL import Image import numpy as np - -def save_sample(lod2batch, tracker, sample, samplez, x, logger, model, cfg, encoder_optimizer, decoder_optimizer,filename=None): +from torch import autograd +from ECoGDataSet import concate_batch +def save_sample(lod2batch, tracker, sample, samplez, samplez_global, x, logger, model, cfg, encoder_optimizer, decoder_optimizer,filename=None,ecog=None,mask_prior=None,mode='test'): os.makedirs('results', exist_ok=True) logger.info('\n[%d/%d] - ptime: %.2f, %s, blend: %.3f, lr: %.12f, %.12f, max mem: %f",' % ( (lod2batch.current_epoch + 1), cfg.TRAIN.TRAIN_EPOCHS, lod2batch.per_epoch_ptime, str(tracker), @@ -53,8 +54,12 @@ def save_sample(lod2batch, tracker, sample, samplez, x, logger, model, cfg, enco g_rec_all = torch.tensor([]) for i in range(0,sample.shape[0],9): sample_in = sample[i:np.minimum(i+9,sample.shape[0])] + if ecog is not None: + ecog_in = [ecog[j][i:np.minimum(i+9,sample.shape[0])] for j in range(len(ecog))] + mask_prior_in = [mask_prior[j][i:np.minimum(i+9,sample.shape[0])] for j in range(len(mask_prior))] x_in = x[i:np.minimum(i+9,sample.shape[0])] samplez_in = samplez[i:np.minimum(i+9,sample.shape[0])] + samplez_global_in = samplez_global[i:np.minimum(i+9,sample.shape[0])] while sample_in.shape[2] > needed_resolution: sample_in = F.avg_pool2d(sample_in, 2, 2) assert sample_in.shape[2] == needed_resolution @@ -67,14 +72,20 @@ def save_sample(lod2batch, tracker, sample, samplez, x, logger, model, cfg, enco sample_in = sample_in * blend_factor + sample_in_prev_2x * (1.0 - blend_factor) Z, _ = model.encode(sample_in, lod2batch.lod, blend_factor) - + if cfg.MODEL.TEMPORAL_W and cfg.MODEL.GLOBAL_W: + Z,Z_global = Z if cfg.MODEL.Z_REGRESSION: Z = model.mapping_fl(Z[:, 0]) else: - if cfg.MODEL.TEMPORAL_W: + if cfg.MODEL.TEMPORAL_W and cfg.MODEL.GLOBAL_W: Z = Z.repeat(1, model.mapping_fl.num_layers, 1,1) + Z_global = Z_global.repeat(1, model.mapping_fl.num_layers, 1) + Z = (Z, Z_global) else: - Z = Z.repeat(1, model.mapping_fl.num_layers, 1) + if cfg.MODEL.TEMPORAL_W: + Z = Z.repeat(1, model.mapping_fl.num_layers, 1,1) + else: + Z = Z.repeat(1, model.mapping_fl.num_layers, 1) rec1 = model.decoder(Z, lod2batch.lod, blend_factor, noise=False) rec2 = model.decoder(Z, lod2batch.lod, blend_factor, noise=True) @@ -82,21 +93,41 @@ def save_sample(lod2batch, tracker, sample, samplez, x, logger, model, cfg, enco # rec1 = F.interpolate(rec1, sample.shape[2]) # rec2 = F.interpolate(rec2, sample.shape[2]) # sample_in = F.interpolate(sample_in, sample.shape[2]) - - Z = model.mapping_fl(samplez_in) + + if ecog is not None: + Z = model.ecog_encoder(ecog = ecog_in, mask_prior = mask_prior_in) + if cfg.MODEL.TEMPORAL_W and cfg.MODEL.GLOBAL_W: + Z, Z_global = Z + Z = Z.view(Z.shape[0], 1, Z.shape[1],Z.shape[2]).repeat(1, model.mapping_fl.num_layers, 1, 1) + Z_global = Z_global.view(Z_global.shape[0], 1, Z_global.shape[1]).repeat(1, model.mapping_fl.num_layers, 1) + Z = (Z,Z_global) + else: + if cfg.MODEL.TEMPORAL_W: + Z = Z.view(Z.shape[0], 1, Z.shape[1],Z.shape[2]).repeat(1, model.mapping_fl.num_layers, 1, 1) + else: + Z = Z.view(Z.shape[0], 1, Z.shape[1]).repeat(1, model.mapping_fl.num_layers, 1) + else: + Z = model.mapping_fl(samplez_in,samplez_global_in) g_rec = model.decoder(Z, lod2batch.lod, blend_factor, noise=True) + + # g_rec = model.generate(lod2batch.lod, blend_factor, count=ecog_in[0].shape[0], z=samplez_in.detach(), z_global=samplez_global_in, noise=True,return_styles=False,ecog=ecog_in,mask_prior=mask_prior_in) + + # g_rec = F.interpolate(g_rec, sample.shape[2]) sample_in_all = torch.cat([sample_in_all,sample_in],dim=0) rec1_all = torch.cat([rec1_all,rec1],dim=0) rec2_all = torch.cat([rec2_all,rec2],dim=0) g_rec_all = torch.cat([g_rec_all,g_rec],dim=0) + + print(mode+' suploss is',torch.mean((g_rec_all-sample_in_all).abs())) resultsample = torch.cat([sample_in_all, rec1_all, rec2_all, g_rec_all], dim=0) if cfg.DATASET.BCTS: resultsample = resultsample.transpose(-2,-1) - @utils.async_func + # @utils.async_func def save_pic(x_rec): - tracker.register_means(lod2batch.current_epoch + lod2batch.iteration * 1.0 / lod2batch.get_dataset_size()) + if mode=='test': + tracker.register_means(lod2batch.current_epoch + lod2batch.iteration * 1.0 / lod2batch.get_dataset_size()) # tracker.plot() result_sample = x_rec * 0.5 + 0.5 @@ -104,11 +135,18 @@ def save_pic(x_rec): if filename: f =filename else: - f = os.path.join(cfg.OUTPUT_DIR, - 'sample_%d_%d.jpg' % ( - lod2batch.current_epoch + 1, - lod2batch.iteration // 1000) - ) + if mode == 'test': + f = os.path.join(cfg.OUTPUT_DIR, + 'sample_%d_%d.jpg' % ( + lod2batch.current_epoch + 1, + lod2batch.iteration // 1000) + ) + else: + f = os.path.join(cfg.OUTPUT_DIR, + 'sample_train_%d_%d.jpg' % ( + lod2batch.current_epoch + 1, + lod2batch.iteration // 1000) + ) print("Saved to %s" % f) # save_image(result_sample, f, nrow=min(32, lod2batch.get_per_GPU_batch_size())) save_image(result_sample, f, nrow=x_rec.shape[0]//4) @@ -129,9 +167,12 @@ def train(cfg, logger, local_rank, world_size, distributed): channels=cfg.MODEL.CHANNELS, generator=cfg.MODEL.GENERATOR, encoder=cfg.MODEL.ENCODER, + ecog_encoder=cfg.MODEL.MAPPING_FROM_ECOG, z_regression=cfg.MODEL.Z_REGRESSION, average_w = cfg.MODEL.AVERAGE_W, temporal_w = cfg.MODEL.TEMPORAL_W, + global_w = cfg.MODEL.GLOBAL_W, + temporal_global_cat = cfg.MODEL.TEMPORAL_GLOBAL_CAT, spec_chans = cfg.DATASET.SPEC_CHANS, temporal_samples = cfg.DATASET.TEMPORAL_SAMPLES, init_zeros = cfg.MODEL.TEMPORAL_W, @@ -144,6 +185,14 @@ def train(cfg, logger, local_rank, world_size, distributed): cycle_weight=cfg.TRAIN.CYCLE_WEIGHT, attentional_style=cfg.MODEL.ATTENTIONAL_STYLE, heads = cfg.MODEL.HEADS, + suploss_on_ecog = cfg.MODEL.SUPLOSS_ON_ECOGF, + less_temporal_feature = cfg.MODEL.LESS_TEMPORAL_FEATURE, + ppl_weight=cfg.MODEL.PPL_WEIGHT, + ppl_global_weight=cfg.MODEL.PPL_GLOBAL_WEIGHT, + ppld_weight=cfg.MODEL.PPLD_WEIGHT, + ppld_global_weight=cfg.MODEL.PPLD_GLOBAL_WEIGHT, + common_z = cfg.MODEL.COMMON_Z, + with_ecog = cfg.MODEL.ECOG, ) model.cuda(local_rank) model.train() @@ -160,11 +209,14 @@ def train(cfg, logger, local_rank, world_size, distributed): channels=cfg.MODEL.CHANNELS, generator=cfg.MODEL.GENERATOR, encoder=cfg.MODEL.ENCODER, + ecog_encoder=cfg.MODEL.MAPPING_FROM_ECOG, z_regression=cfg.MODEL.Z_REGRESSION, average_w = cfg.MODEL.AVERAGE_W, spec_chans = cfg.DATASET.SPEC_CHANS, temporal_samples = cfg.DATASET.TEMPORAL_SAMPLES, temporal_w = cfg.MODEL.TEMPORAL_W, + global_w = cfg.MODEL.GLOBAL_W, + temporal_global_cat = cfg.MODEL.TEMPORAL_GLOBAL_CAT, init_zeros = cfg.MODEL.TEMPORAL_W, residual = cfg.MODEL.RESIDUAL, w_classifier = cfg.MODEL.W_CLASSIFIER, @@ -175,11 +227,19 @@ def train(cfg, logger, local_rank, world_size, distributed): cycle_weight=cfg.TRAIN.CYCLE_WEIGHT, attentional_style=cfg.MODEL.ATTENTIONAL_STYLE, heads = cfg.MODEL.HEADS, + suploss_on_ecog = cfg.MODEL.SUPLOSS_ON_ECOGF, + less_temporal_feature = cfg.MODEL.LESS_TEMPORAL_FEATURE, + ppl_weight=cfg.MODEL.PPL_WEIGHT, + ppl_global_weight=cfg.MODEL.PPL_GLOBAL_WEIGHT, + ppld_weight=cfg.MODEL.PPLD_WEIGHT, + ppld_global_weight=cfg.MODEL.PPLD_GLOBAL_WEIGHT, + common_z = cfg.MODEL.COMMON_Z, + with_ecog = cfg.MODEL.ECOG, ) model_s.cuda(local_rank) model_s.eval() model_s.requires_grad_(False) - print(model) + # print(model) if distributed: model = nn.parallel.DistributedDataParallel( model, @@ -194,6 +254,10 @@ def train(cfg, logger, local_rank, world_size, distributed): mapping_tl = model.module.mapping_tl mapping_fl = model.module.mapping_fl dlatent_avg = model.module.dlatent_avg + ppl_mean = model.module.ppl_mean + ppl_d_mean = model.module.ppl_d_mean + if hasattr(model,'ecog_encoder'): + ecog_encoder = model.module.ecog_encoder if cfg.MODEL.W_CLASSIFIER: mapping_tw = model.module.mapping_tw else: @@ -202,6 +266,10 @@ def train(cfg, logger, local_rank, world_size, distributed): mapping_tl = model.mapping_tl mapping_fl = model.mapping_fl dlatent_avg = model.dlatent_avg + ppl_mean = model.ppl_mean + if hasattr(model,'ecog_encoder'): + ecog_encoder = model.ecog_encoder + ppl_d_mean = model.ppl_d_mean if cfg.MODEL.W_CLASSIFIER: mapping_tw = model.mapping_tw @@ -216,10 +284,21 @@ def train(cfg, logger, local_rank, world_size, distributed): arguments = dict() arguments["iteration"] = 0 - decoder_optimizer = LREQAdam([ - {'params': decoder.parameters()}, - {'params': mapping_fl.parameters()} - ], lr=cfg.TRAIN.BASE_LEARNING_RATE, betas=(cfg.TRAIN.ADAM_BETA_0, cfg.TRAIN.ADAM_BETA_1), weight_decay=0) + if cfg.MODEL.ECOG: + decoder_optimizer = LREQAdam([ + {'params': decoder.parameters()}, + {'params': ecog_encoder.parameters()} + ], lr=cfg.TRAIN.BASE_LEARNING_RATE, betas=(cfg.TRAIN.ADAM_BETA_0, cfg.TRAIN.ADAM_BETA_1), weight_decay=0) + else: + decoder_optimizer = LREQAdam([ + {'params': decoder.parameters()}, + {'params': mapping_fl.parameters()} + ], lr=cfg.TRAIN.BASE_LEARNING_RATE, betas=(cfg.TRAIN.ADAM_BETA_0, cfg.TRAIN.ADAM_BETA_1), weight_decay=0) + + if cfg.MODEL.ECOG: + ecog_encoder_optimizer = LREQAdam([ + {'params': ecog_encoder.parameters()} + ], lr=cfg.TRAIN.BASE_LEARNING_RATE, betas=(cfg.TRAIN.ADAM_BETA_0, cfg.TRAIN.ADAM_BETA_1), weight_decay=0) if cfg.MODEL.W_CLASSIFIER: encoder_optimizer = LREQAdam([ @@ -246,28 +325,37 @@ def train(cfg, logger, local_rank, world_size, distributed): 'generator': decoder, 'mapping_tl': mapping_tl, 'mapping_fl': mapping_fl, - 'dlatent_avg': dlatent_avg + 'dlatent_avg': dlatent_avg, + 'ppl_mean':ppl_mean, + 'ppl_d_mean':ppl_d_mean, } + if hasattr(model,'ecog_encoder'): + model_dict['ecog_encoder'] = ecog_encoder if local_rank == 0: model_dict['discriminator_s'] = model_s.encoder model_dict['generator_s'] = model_s.decoder model_dict['mapping_tl_s'] = model_s.mapping_tl model_dict['mapping_fl_s'] = model_s.mapping_fl + if hasattr(model_s,'ecog_encoder'): + model_dict['ecog_encoder_s'] = model_s.ecog_encoder tracker = LossTracker(cfg.OUTPUT_DIR) + auxiliary = {'encoder_optimizer': encoder_optimizer, + 'decoder_optimizer': decoder_optimizer, + 'scheduler': scheduler, + 'tracker': tracker + } + if cfg.MODEL.ECOG: + auxiliary['ecog_encoder_optimizer']=ecog_encoder_optimizer checkpointer = Checkpointer(cfg, model_dict, - { - 'encoder_optimizer': encoder_optimizer, - 'decoder_optimizer': decoder_optimizer, - 'scheduler': scheduler, - 'tracker': tracker - }, + auxiliary, logger=logger, save=local_rank == 0) - extra_checkpoint_data = checkpointer.load(ignore_last_checkpoint=True,file_name='./training_artifacts/ecog_residual_cycle_attention3264wIN_specchan64_more_attentfeatures_fixINencoderwithaffineture/model_tmp_lod3.pth') + # extra_checkpoint_data = checkpointer.load(ignore_last_checkpoint=False,ignore_auxiliary=True,file_name='./training_artifacts/ecog_residual_cycle/model_tmp_lod4.pth') + extra_checkpoint_data = checkpointer.load(ignore_last_checkpoint=True,ignore_auxiliary=cfg.FINETUNE.FINETUNE,file_name='./training_artifacts/ecog_residual_latent128_temporal_lesstemporalfeature_noprogressive_HBw_ppl_ppld_localreg_debug/model_tmp_lod6.pth') logger.info("Starting from epoch: %d" % (scheduler.start_epoch())) arguments.update(extra_checkpoint_data) @@ -275,15 +363,15 @@ def train(cfg, logger, local_rank, world_size, distributed): layer_to_resolution = decoder.layer_to_resolution with open('train_param.json','r') as rfile: param = json.load(rfile) - data_param, train_param, test_param = param['Data'], param['Train'], param['Test'] + # data_param, train_param, test_param = param['Data'], param['Train'], param['Test'] dataset = TFRecordsDataset(cfg, logger, rank=local_rank, world_size=world_size, buffer_size_mb=1024, channels=cfg.MODEL.CHANNELS,param=param) dataset_test = TFRecordsDataset(cfg, logger, rank=local_rank, world_size=world_size, buffer_size_mb=1024, channels=cfg.MODEL.CHANNELS,train=False,param=param) rnd = np.random.RandomState(3456) - latents = rnd.randn(len(dataset_test.dataset), cfg.MODEL.LATENT_SPACE_SIZE) - samplez = torch.tensor(latents).float().cuda() + # latents = rnd.randn(len(dataset_test.dataset), cfg.MODEL.LATENT_SPACE_SIZE) + # samplez = torch.tensor(latents).float().cuda() - lod2batch = lod_driver.LODDriver(cfg, logger, world_size, dataset_size=len(dataset) * world_size) + lod2batch = lod_driver.LODDriver(cfg, logger, world_size, dataset_size=len(dataset) * world_size, progressive = (not(cfg.FINETUNE.FINETUNE) and cfg.TRAIN.PROGRESSIVE)) if cfg.DATASET.SAMPLES_PATH: path = cfg.DATASET.SAMPLES_PATH @@ -299,10 +387,25 @@ def train(cfg, logger, local_rank, world_size, distributed): x = x[:3] src.append(x) sample = torch.stack(src) + latents = rnd.randn(sample.shape[0], cfg.MODEL.LATENT_SPACE_SIZE) + latents_global = latents if cfg.MODEL.COMMON_Z else rnd.randn(sample.shape[0], cfg.MODEL.LATENT_SPACE_SIZE) + samplez = torch.tensor(latents).float().cuda() + samplez_global = torch.tensor(latents_global).float().cuda() else: dataset_test.reset(cfg.DATASET.MAX_RESOLUTION_LEVEL, len(dataset_test.dataset)) sample_dict_test = next(iter(dataset_test.iterator)) + # sample_dict_test = concate_batch(sample_dict_test) sample_spec_test = sample_dict_test['spkr_re_batch_all'].to('cuda').float() + latents = rnd.randn(sample_spec_test.shape[0], cfg.MODEL.LATENT_SPACE_SIZE) + latents_global = latents if cfg.MODEL.COMMON_Z else rnd.randn(sample_spec_test.shape[0], cfg.MODEL.LATENT_SPACE_SIZE) + samplez = torch.tensor(latents).float().cuda() + samplez_global = torch.tensor(latents_global).float().cuda() + if cfg.MODEL.ECOG: + ecog_test = [sample_dict_test['ecog_re_batch_all'][i].to('cuda').float() for i in range(len(sample_dict_test['ecog_re_batch_all']))] + mask_prior_test = [sample_dict_test['mask_all'][i].to('cuda').float() for i in range(len(sample_dict_test['mask_all']))] + else: + ecog_test = None + mask_prior_test = None # sample = next(make_dataloader(cfg, logger, dataset, 32, local_rank)) # sample = (sample / 127.5 - 1.) @@ -334,10 +437,17 @@ def train(cfg, logger, local_rank, world_size, distributed): i = 0 for sample_dict_train in tqdm(iter(dataset.iterator)): + # sample_dict_train = concate_batch(sample_dict_train) i += 1 x_orig = sample_dict_train['spkr_re_batch_all'].to('cuda').float() words = sample_dict_train['word_batch_all'].to('cuda').long() words = words.view(words.shape[0]*words.shape[1]) + if cfg.MODEL.ECOG: + ecog = [sample_dict_train['ecog_re_batch_all'][j].to('cuda').float() for j in range(len(sample_dict_train['ecog_re_batch_all']))] + mask_prior = [sample_dict_train['mask_all'][j].to('cuda').float() for j in range(len(sample_dict_train['mask_all']))] + else: + ecog = None + mask_prior = None with torch.no_grad(): # if x_orig.shape[0] != lod2batch.get_per_GPU_batch_size(): # continue @@ -357,36 +467,41 @@ def train(cfg, logger, local_rank, world_size, distributed): x = x * blend_factor + x_prev_2x * (1.0 - blend_factor) x.requires_grad = True - - encoder_optimizer.zero_grad() - if cfg.MODEL.W_CLASSIFIER: - loss_d,loss_word = model(x, lod2batch.lod, blend_factor, d_train=True, ae=False,words=words) - tracker.update(dict(loss_d=loss_d,loss_word=loss_word)) - (loss_d+loss_word).backward() - else: - loss_d = model(x, lod2batch.lod, blend_factor, d_train=True, ae=False) - tracker.update(dict(loss_d=loss_d)) - loss_d.backward() - encoder_optimizer.step() - + apply_cycle = cfg.MODEL.CYCLE and True + apply_w_classifier = cfg.MODEL.W_CLASSIFIER and True + apply_gp = True + apply_ppl = cfg.MODEL.APPLY_PPL and True + apply_ppl_d = cfg.MODEL.APPLY_PPL_D and True + apply_encoder_guide = (cfg.FINETUNE.ENCODER_GUIDE or cfg.MODEL.W_SUP) and True + apply_sup = cfg.FINETUNE.SPECSUP + + if not (cfg.FINETUNE.FINETUNE): + encoder_optimizer.zero_grad() + loss_d = model(x, lod2batch.lod, blend_factor, tracker = tracker, d_train=True, ae=False,words=words,apply_w_classifier=apply_w_classifier, apply_gp = apply_gp,apply_ppl_d=apply_ppl_d,ecog=ecog,mask_prior=mask_prior) + (loss_d).backward() + encoder_optimizer.step() + + if cfg.MODEL.ECOG: + ecog_encoder_optimizer.zero_grad() decoder_optimizer.zero_grad() - loss_g = model(x, lod2batch.lod, blend_factor, d_train=False, ae=False) - tracker.update(dict(loss_g=loss_g)) - loss_g.backward() - decoder_optimizer.step() - - encoder_optimizer.zero_grad() - decoder_optimizer.zero_grad() - if cfg.MODEL.CYCLE: - lae,lcycle = model(x, lod2batch.lod, blend_factor, d_train=True, ae=True) - tracker.update(dict(lae=lae,lcycle=lcycle)) - (lae+lcycle).backward() - else: - lae = model(x, lod2batch.lod, blend_factor, d_train=True, ae=True) - tracker.update(dict(lae=lae)) + loss_g = model(x, lod2batch.lod, blend_factor, tracker = tracker, d_train=False, ae=False,apply_encoder_guide=apply_encoder_guide,apply_ppl=apply_ppl,ecog=ecog,sup=apply_sup,mask_prior=mask_prior,gan=cfg.MODEL.GAN) + if (cfg.MODEL.ECOG and cfg.MODEL.SUPLOSS_ON_ECOGF) or (cfg.FINETUNE.FINETUNE and cfg.FINETUNE.FIX_GEN ): + loss_g,loss_sup = loss_g + # tracker.update(dict(std_scale=model.decoder.std_each_scale)) + if not (cfg.FINETUNE.FINETUNE and cfg.FINETUNE.FIX_GEN): + (loss_g).backward(retain_graph=True) + decoder_optimizer.step() + if (cfg.MODEL.ECOG and cfg.MODEL.SUPLOSS_ON_ECOGF) or (cfg.FINETUNE.FINETUNE and cfg.FINETUNE.FIX_GEN): + loss_sup.backward() + ecog_encoder_optimizer.step() + + if not cfg.FINETUNE.FINETUNE: + encoder_optimizer.zero_grad() + decoder_optimizer.zero_grad() + lae = model(x, lod2batch.lod, blend_factor, tracker = tracker, d_train=True, ae=True,apply_cycle=apply_cycle,ecog=ecog,mask_prior=mask_prior) (lae).backward() - encoder_optimizer.step() - decoder_optimizer.step() + encoder_optimizer.step() + decoder_optimizer.step() if local_rank == 0: betta = 0.5 ** (lod2batch.get_batch_size() / (10 * 1000.0)) @@ -395,20 +510,27 @@ def train(cfg, logger, local_rank, world_size, distributed): epoch_end_time = time.time() per_epoch_ptime = epoch_end_time - epoch_start_time - lod_for_saving_model = lod2batch.lod + lod_for_saving_model = lod2batch.lod if cfg.TRAIN.PROGRESSIVE else int(epoch//1) lod2batch.step() if local_rank == 0: if lod2batch.is_time_to_save(): checkpointer.save("model_tmp_intermediate_lod%d" % lod_for_saving_model) if lod2batch.is_time_to_report(): - save_sample(lod2batch, tracker, sample_spec_test, samplez, x, logger, model_s, cfg, encoder_optimizer, - decoder_optimizer) + save_sample(lod2batch, tracker, sample_spec_test, samplez, samplez_global, x, logger, model_s, cfg, encoder_optimizer, + decoder_optimizer,ecog=ecog_test,mask_prior=mask_prior_test) + if ecog is not None: + save_sample(lod2batch, tracker, x_orig, samplez, samplez_global, x, logger, model_s, cfg, encoder_optimizer, + decoder_optimizer,ecog=ecog,mask_prior=mask_prior,mode='train') scheduler.step() if local_rank == 0: checkpointer.save("model_tmp_lod%d" % lod_for_saving_model) - save_sample(lod2batch, tracker, sample_spec_test, samplez, x, logger, model_s, cfg, encoder_optimizer, decoder_optimizer) + save_sample(lod2batch, tracker, sample_spec_test, samplez, samplez_global, x, logger, model_s, cfg, encoder_optimizer, decoder_optimizer, + ecog=ecog_test,mask_prior=mask_prior_test) + if ecog is not None: + save_sample(lod2batch, tracker, x_orig, samplez, samplez_global, x, logger, model_s, cfg, encoder_optimizer, + decoder_optimizer,ecog=ecog,mask_prior=mask_prior,mode='train') logger.info("Training finish!... save training results") if local_rank == 0: @@ -417,5 +539,5 @@ def train(cfg, logger, local_rank, world_size, distributed): if __name__ == "__main__": gpu_count = torch.cuda.device_count() - run(train, get_cfg_defaults(), description='StyleGAN', default_config='configs/ecog.yaml', + run(train, get_cfg_defaults(), description='StyleGAN', default_config='configs/ecog_style2.yaml', world_size=gpu_count) diff --git a/train_param.json b/train_param.json index 5a94da99..43ccc0b7 100644 --- a/train_param.json +++ b/train_param.json @@ -9,16 +9,22 @@ "DOWN_TF_FS": 125, "DOWN_ECOG_FS": 125, "Subj":{ + "NY717":{ + "Crop": null, + "Task": ["VisRead","SenComp","PicN","AudN","AudRep"], + "TestNum":[10,10,10,10,10] + }, "NY742":{ "Crop": null, "Task": ["VisRead","SenComp","PicN","AudN","AudRep"], - "TestNum":[50,15,50,15,50] + "TestNum":[10,10,10,10,10] + }, + "NY749":{ + "Crop": null, + "Task": ["VisRead","SenComp","PicN","AudN","AudRep"], + "TestNum":[10,10,10,10,10] } }, - "Data":{ - "Subj":"NY742", - "T": 100 - }, "Train":{ "lr": 0.001, "gamma": 0.8, From df82e166408baa7eeb558ef680dba976e392fa1f Mon Sep 17 00:00:00 2001 From: Ran Wang Date: Mon, 3 Aug 2020 16:23:27 -0400 Subject: [PATCH 06/14] formant systh --- model_formant.py | 81 ++++++++++++++ net_formant.py | 285 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 366 insertions(+) create mode 100644 model_formant.py create mode 100644 net_formant.py diff --git a/model_formant.py b/model_formant.py new file mode 100644 index 00000000..a32dd3c3 --- /dev/null +++ b/model_formant.py @@ -0,0 +1,81 @@ +import random +import losses +from net_formant import * +import numpy as np +class Model(nn.Module): + def __init__(self, generator="", encoder="", ecog_encoder="", + spec_chans = 128, n_formants=2, with_ecog = False): + super(Model, self).__init__() + self.spec_chans = spec_chans + self.with_ecog = with_ecog + self.decoder = GENERATORS[generator]( + n_mels = spec_chans, + k = 30, + ) + self.encoder = ENCODERS[encoder]( + n_mels = spec_chans, + n_formants = n_formants, + ) + if with_ecog: + self.ecog_encoder = ECOG_ENCODER[ecog_encoder]( + n_mels = spec_chans,n_formants = n_formants, + ) + + def generate_fromecog(self, ecog = None, mask_prior = None, return_components=False): + components = self.ecog_encoder(ecog, mask_prior) + rec = self.decoder.forward(components) + if return_components: + return rec, components + else: + return rec + + def generate_fromspec(self, spec, return_components=False): + components = self.encoder(spec) + rec = self.decoder.forward(components) + if return_components: + return rec, components + else: + return rec + + def encode(self, spec): + components = self.encoder(spec) + return components + + def forward(self, spec, ecog, mask_prior, on_stage, ae, tracker, encoder_guide): + if ae: + self.encoder.requires_grad_(True) + rec = self.generate_fromspec(spec) + Lae = torch.mean((rec - spec).abs()) + tracker.update(dict(Lae=Lae)) + return Lae + else: + self.encoder.requires_grad_(False) + rec,components_ecog = self.generate_fromecog(ecog,mask_prior,return_components=True) + Lrec = torch.mean((rec - spec).abs()) + tracker.update(dict(Lrec=Lrec)) + Lcomp = 0 + if encoder_guide: + components_guide = self.encode(spec) + consonant_weight = 100*(torch.sign(components_guide['amplitudes'][:,1:]-0.5)*0.5+0.5) + for key in components_guide.keys(): + if key == 'loudness': + diff = torch.mean((components_guide[key] - components_ecog[key])**2) + torch.mean((components_guide[key] - components_ecog[key])**2 * on_stage * consonant_weight) + elif key in ['freq_formants', 'bandwidth_formants', 'amplitude_formants']: + diff = torch.mean((components_guide[key] - components_ecog[key])**2 * on_stage * consonant_weight) + else: + diff = torch.mean((components_guide[key] - components_ecog[key])**2 * on_stage) + tracker.update({key : diff}) + Lcomp += diff + + Loss = Lrec+Lcomp + return Loss + + def lerp(self, other, betta,w_classifier=False): + if hasattr(other, 'module'): + other = other.module + with torch.no_grad(): + params = list(self.decoder.parameters()) + list(self.encoder.parameters()) + (list(self.ecog_encoder.parameters()) if self.with_ecog else []) + other_param = list(other.decoder.parameters()) + list(other.encoder.parameters()) + (list(other.ecog_encoder.parameters()) if self.with_ecog else []) + for p, p_other in zip(params, other_param): + p.data.lerp_(p_other.data, 1.0 - betta) + diff --git a/net_formant.py b/net_formant.py new file mode 100644 index 00000000..dcb99ae2 --- /dev/null +++ b/net_formant.py @@ -0,0 +1,285 @@ +import os +import torch +from torch import nn +from torch.nn import functional as F +from torch.nn import Parameter as P +from torch.nn import init +from torch.nn.parameter import Parameter +import numpy as np +import lreq as ln +import math +from registry import * + +@GENERATORS.register("GeneratorFormant") +class FormantSysth(nn.Module): + def __init__(self, n_mels=64, k=30): + super(FormantSysth, self).__init__() + self.n_mels = n_mels + self.k = k + self.timbre = Parameter(torch.Tensor(1,1,n_mels)) + # self.silient = Parameter(torch.Tensor(1,1,n_mels)) + self.silient = -1 + with torch.no_grad(): + nn.init.constant_(self.timbre,1.0) + # nn.init.constant_(self.silient,-1.0) + + def formant_mask(self,freq,bandwith,amplitude): + # freq, bandwith, amplitude: B*formants*time + freq_cord = torch.arange(self.n_mels) + time_cord = torch.arange(freq.shape[2]) + grid_time,grid_freq = torch.meshgrid(time_cord,freq_cord) + grid_time = grid_time.unsqueeze(dim=0).unsqueeze(dim=-1) #B,time,freq, 1 + grid_freq = grid_freq.unsqueeze(dim=0).unsqueeze(dim=-1) #B,time,freq, 1 + freq = freq.permute([0,2,1]).unsqueeze(dim=-2) #B,time,1, formants + bandwith = bandwith.permute([0,2,1]).unsqueeze(dim=-2) #B,time,1, formants + amplitude = amplitude.permute([0,2,1]).unsqueeze(dim=-2) #B,time,1, formants + masks = amplitude*torch.exp(-(grid_freq-freq)**2/(2*bandwith**2)) #B,time,freqchans, formants + masks = masks.unsqueeze(dim=1) #B,1,time,freqchans + return masks + + def mel_scale(self,hz): + return (torch.log2(hz/440)+31/24)*24*self.n_mels/126 + + def inverse_mel_scale(self,mel): + return 440*2**(mel*126/24-31/24) + + def voicing(self,f0): + #f0: B*1*time + freq_cord = torch.arange(self.n_mels) + time_cord = torch.arange(f0.shape[2]) + grid_time,grid_freq = torch.meshgrid(time_cord,freq_cord) + grid_time = grid_time.unsqueeze(dim=0).unsqueeze(dim=-1) #B,time,freq, 1 + grid_freq = grid_freq.unsqueeze(dim=0).unsqueeze(dim=-1) #B,time,freq, 1 + f0 = f0.permute([0,2,1]).unsqueeze(dim=-2) #B,time,1, 1 + f0 = f0.repeat([1,1,1,self.k]) #B,time,1, self.k + f0 = f0*(torch.arange(self.k)+1).reshape([1,1,1,self.k]) + bandwith = 24.7*(f0*4.37/1000+1) + bandwith_lower = torch.clamp(f0-bandwith/2,min=0.001) + bandwith_upper = f0+bandwith/2 + bandwith = self.mel_scale(bandwith_upper) - self.mel_scale(bandwith_lower) + f0 = self.mel_scale(f0) + # hamonics = torch.exp(-(grid_freq-f0)**2/(2*bandwith**2)) #gaussian + hamonics = (1-((grid_freq-f0)/(3*bandwith/2))**2)*(-torch.sign(torch.abs(grid_freq-f0)/(3*bandwith)-0.5)*0.5+0.5) #welch + # hamonics = torch.cos(np.pi*torch.abs(grid_freq-f0)/(4*bandwith))**2*(-torch.sign(torch.abs(grid_freq-f0)/(4*bandwith)-0.5)*0.5+0.5) #hanning + hamonics = (hamonics.sum(dim=-1)*self.timbre).unsqueeze(dim=1) # B,1,T,F + return hamonics + + def unvoicing(self,f0): + return torch.ones([f0.shape[0],1,f0.shape[2],self.n_mels]) + + def forward(self,components): + # f0: B*1*T, amplitudes: B*2(voicing,unvoicing)*T, freq_formants,bandwidth_formants,amplitude_formants: B*formants*T + amplitudes = components['amplitudes'].unsqueeze(dim=-1) + loudness = components['loudness'].unsqueeze(dim=-1) + f0_hz = self.inverse_mel_scale(components['f0']) + self.hamonics = self.voicing(f0_hz) + self.noise = self.unvoicing(f0_hz) + freq_formants = components['freq_formants']*self.n_mels + bandwidth_formants = components['bandwidth_formants']*self.n_mels + # excitation = amplitudes[:,0:1]*hamonics + # excitation = loudness*(amplitudes[:,0:1]*hamonics) + self.excitation = loudness*(amplitudes[:,0:1]*self.hamonics + amplitudes[:,-1:]*self.noise) + self.mask = self.formant_mask(freq_formants,bandwidth_formants,components['amplitude_formants']) + self.mask_sum = self.mask.sum(dim=-1) + speech = self.excitation*self.mask_sum + self.silient*torch.ones(self.mask_sum.shape) + return speech + +@ENCODERS.register("EncoderFormant") +class FormantEncoder(nn.Module): + def __init__(self, n_mels=64, n_formants=4): + super(FormantEncoder, self).__init__() + self.n_mels = n_mels + self.conv1 = ln.Conv1d(n_mels,64,3,1,1) + self.norm1 = nn.GroupNorm(32,64) + self.conv2 = ln.Conv1d(64,128,3,1,1) + self.norm2 = nn.GroupNorm(32,128) + + self.conv_fundementals = ln.Conv1d(128,128,3,1,1) + self.norm_fundementals = nn.GroupNorm(32,128) + self.conv_f0 = ln.Conv1d(128,1,1,1,0) + self.conv_amplitudes = ln.Conv1d(128,2,1,1,0) + # self.conv_loudness = ln.Conv1d(128,1,1,1,0) + + self.conv_formants = ln.Conv1d(128,128,3,1,1) + self.norm_formants = nn.GroupNorm(32,128) + self.conv_formants_freqs = ln.Conv1d(128,n_formants,1,1,0) + self.conv_formants_bandwidth = ln.Conv1d(128,n_formants,1,1,0) + self.conv_formants_amplitude = ln.Conv1d(128,n_formants,1,1,0) + + self.amplifier = Parameter(torch.Tensor(1)) + with torch.no_grad(): + nn.init.constant_(self.amplifier,1.0) + + def forward(self,x): + x = x.squeeze(dim=1).permute(0,2,1) #B * f * T + loudness = torch.mean(x*0.5+0.5,dim=1,keepdim=True) + loudness = F.softplus(self.amplifier)*loudness + x = F.leaky_relu(self.norm1(self.conv1(x)),0.2) + x_common = F.leaky_relu(self.norm2(self.conv2(x)),0.2) + + # loudness = F.relu(self.conv_loudness(x_common)) + amplitudes = F.softmax(self.conv_amplitudes(x_common),dim=1) + + x_fundementals = F.leaky_relu(self.norm_fundementals(self.conv_fundementals(x_common)),0.2) + # f0 = F.sigmoid(self.conv_f0(x_fundementals)) + # f0 = F.tanh(self.conv_f0(x_fundementals)) * (16/64)*(self.n_mels/64) # 72hz < f0 < 446 hz + f0 = F.sigmoid(self.conv_f0(x_fundementals)) * (15/64)*(self.n_mels/64) # 179hz < f0 < 420 hz + # f0 = F.sigmoid(self.conv_f0(x_fundementals)) * (22/64)*(self.n_mels/64) - (16/64)*(self.n_mels/64)# 72hz < f0 < 253 hz, human voice + # f0 = F.sigmoid(self.conv_f0(x_fundementals)) * (11/64)*(self.n_mels/64) - (-2/64)*(self.n_mels/64)# 160hz < f0 < 300 hz, female voice + x_formants = F.leaky_relu(self.norm_formants(self.conv_formants(x_common)),0.2) + formants_freqs = F.sigmoid(self.conv_formants_freqs(x_formants)) + formants_freqs = torch.cumsum(formants_freqs,dim=1) + formants_freqs = formants_freqs + # formants_freqs = formants_freqs + f0 + formants_bandwidth = F.sigmoid(self.conv_formants_bandwidth(x_formants)) + formants_amplitude = F.softmax(self.conv_formants_amplitude(x_formants),dim=1) + + components = { 'f0':f0, + 'loudness':loudness, + 'amplitudes':amplitudes, + 'freq_formants':formants_freqs, + 'bandwidth_formants':formants_bandwidth, + 'amplitude_formants':formants_amplitude, + } + return components + +class FromECoG(nn.Module): + def __init__(self, outputs,residual=False): + super().__init__() + self.residual=residual + self.from_ecog = ln.Conv3d(1, outputs, [9,1,1], 1, [4,0,0]) + + def forward(self, x): + x = self.from_ecog(x) + if not self.residual: + x = F.leaky_relu(x, 0.2) + return x + +class ECoGMappingBlock(nn.Module): + def __init__(self, inputs, outputs, kernel_size,dilation=1,fused_scale=True,residual=False,resample=[]): + super(ECoGMappingBlock, self).__init__() + self.residual = residual + self.inputs_resample = resample + self.dim_missmatch = (inputs!=outputs) + self.resample = resample + if not self.resample: + self.resample=1 + self.padding = list(np.array(dilation)*(np.array(kernel_size)-1)//2) + # self.padding = [dilation[i]*(kernel_size[i]-1)//2 for i in range(len(dilation))] + if residual: + self.norm1 = nn.GroupNorm(min(inputs,32),inputs) + else: + self.norm1 = nn.GroupNorm(min(outputs,32),outputs) + self.conv1 = ln.Conv3d(inputs, outputs, kernel_size, self.resample, self.padding, dilation=dilation, bias=False) + if self.inputs_resample or self.dim_missmatch: + self.convskip = ln.Conv3d(inputs, outputs, kernel_size, self.resample, self.padding, dilation=dilation, bias=False) + + self.conv2 = ln.Conv3d(outputs, outputs, kernel_size, 1, self.padding, dilation=dilation, bias=False) + self.norm2 = nn.GroupNorm(min(outputs,32),outputs) + + def forward(self,x): + if self.residual: + x = F.leaky_relu(self.norm1(x),0.2) + if self.inputs_resample or self.dim_missmatch: + # x_skip = F.avg_pool3d(x,self.resample,self.resample) + x_skip = self.convskip(x) + else: + x_skip = x + x = F.leaky_relu(self.norm2(self.conv1(x)),0.2) + x = self.conv2(x) + x = x_skip + x + else: + x = F.leaky_relu(self.norm1(self.conv1(x)),0.2) + x = F.leaky_relu(self.norm2(self.conv2(x)),0.2) + return x + + +@ECOG_ENCODER.register("ECoGMappingBottleneck") +class ECoGMapping_Bottleneck(nn.Module): + def __init__(self,n_mels,n_formants): + super(ECoGMapping_Bottleneck, self).__init__() + self.n_formants = n_formants + self.n_mels = n_mels + self.from_ecog = FromECoG(16,residual=True) + self.conv1 = ECoGMappingBlock(16,32,[5,1,1],residual=True,resample = [2,1,1]) + self.conv2 = ECoGMappingBlock(32,64,[3,1,1],residual=True,resample = [2,1,1]) + self.norm_mask = nn.GroupNorm(32,64) + self.mask = ln.Conv3d(64,1,[3,1,1],1,[1,0,0]) + self.conv3 = ECoGMappingBlock(64,128,[3,3,3],residual=True,resample = [2,2,2]) + self.conv4 = ECoGMappingBlock(128,256,[3,3,3],residual=True,resample = [2,2,2]) + self.norm = nn.GroupNorm(32,256) + self.conv5 = ln.Conv1d(256,256,3,1,1) + self.norm2 = nn.GroupNorm(32,256) + self.conv6 = ln.ConvTranspose1d(256, 128, 3, 2, 1, transform_kernel=True) + self.norm3 = nn.GroupNorm(32,128) + self.conv7 = ln.ConvTranspose1d(128, 64, 3, 2, 1, transform_kernel=True) + self.norm4 = nn.GroupNorm(32,64) + self.conv8 = ln.ConvTranspose1d(64, 32, 3, 2, 1, transform_kernel=True) + self.norm5 = nn.GroupNorm(32,32) + self.conv9 = ln.ConvTranspose1d(32, 32, 3, 2, 1, transform_kernel=True) + self.norm6 = nn.GroupNorm(32,32) + + self.conv_fundementals = ln.Conv1d(32,32,3,1,1) + self.norm_fundementals = nn.GroupNorm(32,32) + self.conv_f0 = ln.Conv1d(32,1,1,1,0) + self.conv_amplitudes = ln.Conv1d(32,2,1,1,0) + self.conv_loudness = ln.Conv1d(32,1,1,1,0) + + self.conv_formants = ln.Conv1d(32,32,3,1,1) + self.norm_formants = nn.GroupNorm(32,32) + self.conv_formants_freqs = ln.Conv1d(32,n_formants,1,1,0) + self.conv_formants_bandwidth = ln.Conv1d(32,n_formants,1,1,0) + self.conv_formants_amplitude = ln.Conv1d(32,n_formants,1,1,0) + + def forward(self,ecog,mask_prior): + x_common_all = [] + for d in range(len(ecog)): + x = ecog[d] + x = x.reshape([-1,1,x.shape[1],15,15]) + mask_prior_d = mask_prior[d].reshape(-1,1,1,15,15) + x = self.from_ecog(x) + x = self.conv1(x) + x = self.conv2(x) + mask = torch.sigmoid(self.mask(F.leaky_relu(self.norm_mask(x),0.2))) + mask = mask[:,:,4:] + if mask_prior is not None: + mask = mask*mask_prior_d + x = x[:,:,4:] + x = x*mask + x = self.conv3(x) + x = self.conv4(x) + x = x.max(-1)[0].max(-1)[0] + x = self.conv5(F.leaky_relu(self.norm(x),0.2)) + x = self.conv6(F.leaky_relu(self.norm2(x),0.2)) + x = self.conv7(F.leaky_relu(self.norm3(x),0.2)) + x = self.conv8(F.leaky_relu(self.norm4(x),0.2)) + x = self.conv9(F.leaky_relu(self.norm5(x),0.2)) + x_common = F.leaky_relu(self.norm6(x),0.2) + x_common_all += [x_common] + + x_common = torch.cat(x_common_all,dim=0) + loudness = F.relu(self.conv_loudness(x_common)) + amplitudes = F.softmax(self.conv_amplitudes(x_common),dim=1) + + x_fundementals = F.leaky_relu(self.norm_fundementals(self.conv_fundementals(x_common)),0.2) + # f0 = F.sigmoid(self.conv_f0(x_fundementals)) + # f0 = F.tanh(self.conv_f0(x_fundementals)) * (16/64)*(self.n_mels/64) # 72hz < f0 < 446 hz + f0 = F.sigmoid(self.conv_f0(x_fundementals)) * (15/64)*(self.n_mels/64) # 179hz < f0 < 420 hz + # f0 = F.sigmoid(self.conv_f0(x_fundementals)) * (22/64)*(self.n_mels/64) - (16/64)*(self.n_mels/64)# 72hz < f0 < 253 hz, human voice + # f0 = F.sigmoid(self.conv_f0(x_fundementals)) * (11/64)*(self.n_mels/64) - (-2/64)*(self.n_mels/64)# 160hz < f0 < 300 hz, female voice + x_formants = F.leaky_relu(self.norm_formants(self.conv_formants(x_common)),0.2) + formants_freqs = F.sigmoid(self.conv_formants_freqs(x_formants)) + formants_freqs = torch.cumsum(formants_freqs,dim=1) + formants_freqs = formants_freqs + # formants_freqs = formants_freqs + f0 + formants_bandwidth = F.sigmoid(self.conv_formants_bandwidth(x_formants)) + formants_amplitude = F.softmax(self.conv_formants_amplitude(x_formants),dim=1) + + components = { 'f0':f0, + 'loudness':loudness, + 'amplitudes':amplitudes, + 'freq_formants':formants_freqs, + 'bandwidth_formants':formants_bandwidth, + 'amplitude_formants':formants_amplitude, + } + return components \ No newline at end of file From 10440643e064420c7ea3faa254da421371bc9530 Mon Sep 17 00:00:00 2001 From: Ran Wang Date: Mon, 3 Aug 2020 16:27:58 -0400 Subject: [PATCH 07/14] formant systh --- train_formant.py | 247 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 247 insertions(+) create mode 100644 train_formant.py diff --git a/train_formant.py b/train_formant.py new file mode 100644 index 00000000..2d6b2a05 --- /dev/null +++ b/train_formant.py @@ -0,0 +1,247 @@ +# Copyright 2019-2020 Stanislav Pidhorskyi +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +import json +import torch.utils.data +from torchvision.utils import save_image +from net_formant import * +import os +import utils +from checkpointer import Checkpointer +from scheduler import ComboMultiStepLR +from custom_adam import LREQAdam +from dataloader_ecog import * +from tqdm import tqdm +from dlutils.pytorch import count_parameters +import dlutils.pytorch.count_parameters as count_param_override +from tracker import LossTracker +from model_formant import Model +from launcher import run +from defaults import get_cfg_defaults +import lod_driver +from PIL import Image +import numpy as np +from torch import autograd +from ECoGDataSet import concate_batch +from formant_systh import save_sample + + +def train(cfg, logger, local_rank, world_size, distributed): + torch.cuda.set_device(local_rank) + model = Model( + generator=cfg.MODEL.GENERATOR, + encoder=cfg.MODEL.ENCODER, + ecog_encoder=cfg.MODEL.MAPPING_FROM_ECOG, + spec_chans = cfg.DATASET.SPEC_CHANS, + n_formants = cfg.MODEL.N_FORMANTS, + with_ecog = cfg.MODEL.ECOG, + ) + model.cuda(local_rank) + model.train() + + if local_rank == 0: + model_s = Model( + generator=cfg.MODEL.GENERATOR, + encoder=cfg.MODEL.ENCODER, + ecog_encoder=cfg.MODEL.MAPPING_FROM_ECOG, + spec_chans = cfg.DATASET.SPEC_CHANS, + n_formants = cfg.MODEL.N_FORMANTS, + with_ecog = cfg.MODEL.ECOG, + ) + model_s.cuda(local_rank) + model_s.eval() + model_s.requires_grad_(False) + # print(model) + if distributed: + model = nn.parallel.DistributedDataParallel( + model, + device_ids=[local_rank], + broadcast_buffers=False, + bucket_cap_mb=25, + find_unused_parameters=True) + model.device_ids = None + decoder = model.module.decoder + encoder = model.module.encoder + if hasattr(model,'ecog_encoder'): + ecog_encoder = model.module.ecog_encoder + else: + decoder = model.decoder + encoder = model.encoder + if hasattr(model,'ecog_encoder'): + ecog_encoder = model.ecog_encoder + + count_param_override.print = lambda a: logger.info(a) + + logger.info("Trainable parameters generator:") + count_parameters(decoder) + + logger.info("Trainable parameters discriminator:") + count_parameters(encoder) + + arguments = dict() + arguments["iteration"] = 0 + + if cfg.MODEL.ECOG: + if cfg.MODEL.SUPLOSS_ON_ECOGF: + optimizer = LREQAdam([ + {'params': ecog_encoder.parameters()} + ], lr=cfg.TRAIN.BASE_LEARNING_RATE, betas=(cfg.TRAIN.ADAM_BETA_0, cfg.TRAIN.ADAM_BETA_1), weight_decay=0) + else: + optimizer = LREQAdam([ + {'params': ecog_encoder.parameters()}, + {'params': decoder.parameters()}, + ], lr=cfg.TRAIN.BASE_LEARNING_RATE, betas=(cfg.TRAIN.ADAM_BETA_0, cfg.TRAIN.ADAM_BETA_1), weight_decay=0) + + else: + optimizer = LREQAdam([ + {'params': encoder.parameters()}, + {'params': decoder.parameters()} + ], lr=cfg.TRAIN.BASE_LEARNING_RATE, betas=(cfg.TRAIN.ADAM_BETA_0, cfg.TRAIN.ADAM_BETA_1), weight_decay=0) + + scheduler = ComboMultiStepLR(optimizers= + {'optimizer': optimizer}, + milestones=cfg.TRAIN.LEARNING_DECAY_STEPS, + gamma=cfg.TRAIN.LEARNING_DECAY_RATE, + reference_batch_size=32, base_lr=cfg.TRAIN.LEARNING_RATES) + model_dict = { + 'encoder': encoder, + 'generator': decoder, + } + if hasattr(model,'ecog_encoder'): + model_dict['ecog_encoder'] = ecog_encoder + if local_rank == 0: + model_dict['encoder_s'] = model_s.encoder + model_dict['generator_s'] = model_s.decoder + if hasattr(model_s,'ecog_encoder'): + model_dict['ecog_encoder_s'] = model_s.ecog_encoder + + tracker = LossTracker(cfg.OUTPUT_DIR) + + auxiliary = { + 'optimizer': optimizer, + 'scheduler': scheduler, + 'tracker': tracker + } + + checkpointer = Checkpointer(cfg, + model_dict, + auxiliary, + logger=logger, + save=local_rank == 0) + + # extra_checkpoint_data = checkpointer.load(ignore_last_checkpoint=False,ignore_auxiliary=True,file_name='./training_artifacts/ecog_residual_cycle/model_tmp_lod4.pth') + extra_checkpoint_data = checkpointer.load(ignore_last_checkpoint=True,ignore_auxiliary=cfg.FINETUNE.FINETUNE,file_name='./training_artifacts/formantsyth_NY742/model_epoch29.pth') + logger.info("Starting from epoch: %d" % (scheduler.start_epoch())) + + arguments.update(extra_checkpoint_data) + + with open('train_param.json','r') as rfile: + param = json.load(rfile) + # data_param, train_param, test_param = param['Data'], param['Train'], param['Test'] + dataset = TFRecordsDataset(cfg, logger, rank=local_rank, world_size=world_size, buffer_size_mb=1024, channels=cfg.MODEL.CHANNELS,param=param) + dataset_test = TFRecordsDataset(cfg, logger, rank=local_rank, world_size=world_size, buffer_size_mb=1024, channels=cfg.MODEL.CHANNELS,train=False,param=param) + + rnd = np.random.RandomState(3456) + # latents = rnd.randn(len(dataset_test.dataset), cfg.MODEL.LATENT_SPACE_SIZE) + # samplez = torch.tensor(latents).float().cuda() + + + if cfg.DATASET.SAMPLES_PATH: + path = cfg.DATASET.SAMPLES_PATH + src = [] + with torch.no_grad(): + for filename in list(os.listdir(path))[:32]: + img = np.asarray(Image.open(os.path.join(path, filename))) + if img.shape[2] == 4: + img = img[:, :, :3] + im = img.transpose((2, 0, 1)) + x = torch.tensor(np.asarray(im, dtype=np.float32), requires_grad=True).cuda() / 127.5 - 1. + if x.shape[0] == 4: + x = x[:3] + src.append(x) + sample = torch.stack(src) + else: + dataset_test.reset(cfg.DATASET.MAX_RESOLUTION_LEVEL, len(dataset_test.dataset)) + sample_dict_test = next(iter(dataset_test.iterator)) + # sample_dict_test = concate_batch(sample_dict_test) + sample_spec_test = sample_dict_test['spkr_re_batch_all'].to('cuda').float() + if cfg.MODEL.ECOG: + ecog_test = [sample_dict_test['ecog_re_batch_all'][i].to('cuda').float() for i in range(len(sample_dict_test['ecog_re_batch_all']))] + mask_prior_test = [sample_dict_test['mask_all'][i].to('cuda').float() for i in range(len(sample_dict_test['mask_all']))] + else: + ecog_test = None + mask_prior_test = None + # sample = next(make_dataloader(cfg, logger, dataset, 32, local_rank)) + # sample = (sample / 127.5 - 1.) + + for epoch in range(cfg.TRAIN.TRAIN_EPOCHS): + model.train() + + # batches = make_dataloader(cfg, logger, dataset, lod2batch.get_per_GPU_batch_size(), local_rank) + model.train() + need_permute = False + epoch_start_time = time.time() + + i = 0 + for sample_dict_train in tqdm(iter(dataset.iterator)): + # sample_dict_train = concate_batch(sample_dict_train) + i += 1 + x_orig = sample_dict_train['spkr_re_batch_all'].to('cuda').float() + on_stage = sample_dict_train['on_stage_re_batch_all'].to('cuda').float() + # import pdb;pdb.set_trace() + words = sample_dict_train['word_batch_all'].to('cuda').long() + words = words.view(words.shape[0]*words.shape[1]) + if cfg.MODEL.ECOG: + ecog = [sample_dict_train['ecog_re_batch_all'][j].to('cuda').float() for j in range(len(sample_dict_train['ecog_re_batch_all']))] + mask_prior = [sample_dict_train['mask_all'][j].to('cuda').float() for j in range(len(sample_dict_train['mask_all']))] + else: + ecog = None + mask_prior = None + + x = x_orig + # x.requires_grad = True + # apply_cycle = cfg.MODEL.CYCLE and True + # apply_w_classifier = cfg.MODEL.W_CLASSIFIER and True + # apply_gp = True + # apply_ppl = cfg.MODEL.APPLY_PPL and True + # apply_ppl_d = cfg.MODEL.APPLY_PPL_D and True + # apply_encoder_guide = (cfg.FINETUNE.ENCODER_GUIDE or cfg.MODEL.W_SUP) and True + # apply_sup = cfg.FINETUNE.SPECSUP + + if (cfg.MODEL.ECOG): + optimizer.zero_grad() + Lrec = model(x, ecog=ecog, mask_prior=mask_prior, on_stage = on_stage, ae = False, tracker = tracker, encoder_guide=cfg.MODEL.W_SUP) + (Lrec).backward() + optimizer.step() + else: + optimizer.zero_grad() + Lrec = model(x, ecog=None, mask_prior=None, on_stage = None, ae = True, tracker = tracker, encoder_guide=cfg.MODEL.W_SUP) + (Lrec).backward() + optimizer.step() + + + epoch_end_time = time.time() + per_epoch_ptime = epoch_end_time - epoch_start_time + + + if local_rank == 0: + checkpointer.save("model_epoch%d" % epoch) + save_sample(sample_spec_test,ecog_test,mask_prior_test,encoder,decoder,ecog_encoder=ecog_encoder if cfg.MODEL.ECOG else None,epoch=epoch,mode='test',path=cfg.OUTPUT_DIR,tracker = tracker) + save_sample(x,ecog,mask_prior,encoder,decoder,ecog_encoder=ecog_encoder if cfg.MODEL.ECOG else None,epoch=epoch,mode='train',path=cfg.OUTPUT_DIR,tracker = tracker) + + +if __name__ == "__main__": + gpu_count = torch.cuda.device_count() + run(train, get_cfg_defaults(), description='StyleGAN', default_config='configs/ecog_style2.yaml', + world_size=gpu_count) From 9c8bf1f36e5eaea8eb58c62a1ab074534a57461f Mon Sep 17 00:00:00 2001 From: Ran Wang Date: Mon, 3 Aug 2020 16:30:42 -0400 Subject: [PATCH 08/14] formant systh --- configs/ecog_style2.yaml | 95 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 95 insertions(+) create mode 100644 configs/ecog_style2.yaml diff --git a/configs/ecog_style2.yaml b/configs/ecog_style2.yaml new file mode 100644 index 00000000..52ebde67 --- /dev/null +++ b/configs/ecog_style2.yaml @@ -0,0 +1,95 @@ + # Config for training ALAE on FFHQ at resolution 1024x1024 + +NAME: ecog +DATASET: + PART_COUNT: 16 + SIZE: 60000 + FFHQ_SOURCE: /data/datasets/ffhq-dataset/tfrecords/ffhq/ffhq-r%02d.tfrecords + PATH: /data/datasets/ffhq-dataset_new/tfrecords/ffhq/splitted/ffhq-r%02d.tfrecords.%03d + + FLIP_IMAGES: False + + PART_COUNT_TEST: 4 + PATH_TEST: /data/datasets/ffhq-dataset_new/tfrecords/ffhq-test/splitted/ffhq-r%02d.tfrecords.%03d + + SAMPLES_PATH: '' + STYLE_MIX_PATH: style_mixing/test_images/set_ecog + SPEC_CHANS: 64 + TEMPORAL_SAMPLES: 128 + BCTS: True + MAX_RESOLUTION_LEVEL: 7 + SUBJECT: ['NY742'] +MODEL: + #####TAKE OFF CHECKLIST!!!######## + N_FORMANTS: 2 + LESS_TEMPORAL_FEATURE: True + LATENT_SPACE_SIZE: 128 + LAYER_COUNT: 6 + MAX_CHANNEL_COUNT: 512 + START_CHANNEL_COUNT: 16 + DLATENT_AVG_BETA: 0.995 + MAPPING_LAYERS: 8 + TRUNCATIOM_CUTOFF: 5 + CHANNELS: 1 + UNIQ_WORDS: 50 + MAPPING_FROM_ECOG: "ECoGMappingBottleneck" + ECOG: False #will be overloaded if FINETUNE + SUPLOSS_ON_ECOGF: False # will be overloaded to FIX_GEN if FINETUNE,spec supervise loss only apply to ecog encoder + W_SUP: False + GAN: True + GENERATOR: "GeneratorFormant" + ENCODER: "EncoderFormant" + AVERAGE_W: True + TEMPORAL_W: True + GLOBAL_W: True + TEMPORAL_GLOBAL_CAT: True + RESIDUAL: True + W_CLASSIFIER: False + CYCLE: False + ATTENTIONAL_STYLE: True + #T 4 8 16 32 64 128 + ATTENTION: [False, False, False, False, False, False] + HEADS: 1 + APPLY_PPL: True + APPLY_PPL_D: True + PPL_WEIGHT: 100 + PPL_GLOBAL_WEIGHT: 0 + PPLD_WEIGHT: 1 + PPLD_GLOBAL_WEIGHT: 0 + COMMON_Z: True + # ATTENTION: [] +# OUTPUT_DIR: training_artifacts/debug +OUTPUT_DIR: training_artifacts/formantsyth_ecogfinetune_NY742 +# OUTPUT_DIR: training_artifacts/ecog_residual_latent128_temporal_lesstemporalfeature_noprogressive_HBw_ppl_ppld_localreg_ecogf_w_spec_sup +# OUTPUT_DIR: training_artifacts/ecog_residual_latent128_temporal_lesstemporalfeature_ppl_ppld +# OUTPUT_DIR: training_artifacts/ecog_residual_cycle_attention3264wStyleIN_specchan64_more_attentfeatures_heads4 + +FINETUNE: + FINETUNE: False + FIX_GEN: True + ENCODER_GUIDE: True + SPECSUP: False +##################################### + +TRAIN: + PROGRESSIVE: False + W_WEIGHT: 1 + CYCLE_WEIGHT: 1 + BASE_LEARNING_RATE: 0.002 + EPOCHS_PER_LOD: 16 + LEARNING_DECAY_RATE: 0.1 + LEARNING_DECAY_STEPS: [96] + TRAIN_EPOCHS: 60 + # 4 8 16 32 64 128 256 + LOD_2_BATCH_8GPU: [512, 256, 128, 64, 32, 32] # If GPU memory ~16GB reduce last number from 32 to 24 + LOD_2_BATCH_4GPU: [64, 64, 64, 64, 32, 16] + LOD_2_BATCH_2GPU: [64, 64, 64, 64, 32, 16] + # LOD_2_BATCH_1GPU: [512, 256, 128, 64, 32, 16] + # LOD_2_BATCH_1GPU: [512, 256, 128, 64, 32, 32] + # LOD_2_BATCH_1GPU: [512, 256, 128, 64, 32, 16] + # LOD_2_BATCH_1GPU: [128, 128, 128, 128, 64, 32] + # LOD_2_BATCH_1GPU: [512, 256, 256, 128, 64, 16] + LOD_2_BATCH_1GPU: [64, 64, 64, 64, 32, 16] + + LEARNING_RATES: [0.0015, 0.0015, 0.0015, 0.002, 0.003, 0.003] + # LEARNING_RATES: [0.0015, 0.0015, 0.0005, 0.0003, 0.0003, 0.0002] From a9b212b4716ab65003e82c71afb2e05579e23aaf Mon Sep 17 00:00:00 2001 From: Ran Wang Date: Tue, 4 Aug 2020 16:34:03 -0400 Subject: [PATCH 09/14] formant systh --- formant_systh.py | 152 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 152 insertions(+) create mode 100755 formant_systh.py diff --git a/formant_systh.py b/formant_systh.py new file mode 100755 index 00000000..3c70315a --- /dev/null +++ b/formant_systh.py @@ -0,0 +1,152 @@ +import torch +from torch import nn +# from torch.nn import functional as F +# from registry import * +import lreq as ln +import json +from tqdm import tqdm +import os +import numpy as np +from torch.nn import functional as F +from torchvision.utils import save_image +from torch.nn.parameter import Parameter +from custom_adam import LREQAdam +from ECoGDataSet import ECoGDataset + + +def freq_coloring(sample,ind,color='r'): + for b in range(sample.shape[0]): + for t in range(sample.shape[2]): + if color == 'r': + sample[b,0,t,ind[b,0,t]]=1 + if color == 'g': + sample[b,1,t,ind[b,0,t]]=1 + if color == 'b': + sample[b,2,t,ind[b,0,t]]=1 + if color == 'y': + sample[b,0,t,ind[b,0,t]]=1;sample[b,1,t,ind[b,0,t]]=1 + if color == 'c': + sample[b,1,t,ind[b,0,t]]=1;sample[b,2,t,ind[b,0,t]]=1 + if color == 'm': + sample[b,0,t,ind[b,0,t]]=1;sample[b,2,t,ind[b,0,t]]=1 + return sample + +def voicing_coloring(sample,amplitude): + for b in range(sample.shape[0]): + for t in range(sample.shape[2]): + sample[b,:,t]=sample[b,0:1,t]*(amplitude[b,0,t]*torch.tensor([0.,0.35,0.67]) + amplitude[b,1,t]*torch.tensor([1.0,0.87,0.])).unsqueeze(dim=-1) # voicing for blue, unvoicing for yellow + return sample + +def color_spec(spec,components,n_mels): + clrs = ['g','y','b','m','c'] + sample_in = spec.repeat(1,3,1,1) + sample_in = sample_in * 0.5 + 0.5 + f0=(components['f0']*n_mels).int().clamp(min=0,max=n_mels-1) + formants_freqs=(components['freq_formants']*n_mels).int().clamp(min=0,max=n_mels-1) + sample_in_color_voicing = sample_in.clone() + sample_in_color_voicing = voicing_coloring(sample_in_color_voicing,components['amplitudes']) + sample_in_color_freq = sample_in.clone() + sample_in_color_freq = sample_in_color_freq/2 + sample_in_color_freq = freq_coloring(sample_in_color_freq,f0,'r') + for j in range(formants_freqs.shape[1]): + sample_in_color_freq = freq_coloring(sample_in_color_freq,formants_freqs[:,j].unsqueeze(1),clrs[j]) + return sample_in_color_voicing,sample_in_color_freq + +def save_sample(sample,ecog,mask_prior,encoder,decoder,ecog_encoder,epoch,mode='test',path='training_artifacts/formantsysth_voicingandunvoicing_loudness',tracker=None): + os.makedirs(path, exist_ok=True) + with torch.no_grad(): + encoder.eval() + decoder.eval() + if ecog_encoder is not None: + ecog_encoder.eval() + sample_in_all = torch.tensor([]) + sample_in_color_freq_all = torch.tensor([]) + sample_in_color_voicing_all = torch.tensor([]) + rec_all = torch.tensor([]) + if ecog_encoder is not None: + sample_in_color_freq_ecog_all = torch.tensor([]) + sample_in_color_voicing_ecog_all = torch.tensor([]) + rec_ecog_all = torch.tensor([]) + n_mels = sample.shape[-1] + for i in range(0,sample.shape[0],9): + sample_in = sample[i:np.minimum(i+9,sample.shape[0])] + if ecog_encoder is not None: + ecog_in = [ecog[j][i:np.minimum(i+9,sample.shape[0])] for j in range(len(ecog))] + mask_prior_in = [mask_prior[j][i:np.minimum(i+9,sample.shape[0])] for j in range(len(ecog))] + components = encoder(sample_in) + rec = decoder(components) + sample_in_color_voicing,sample_in_color_freq = color_spec(sample_in,components,n_mels) + rec = rec.repeat(1,3,1,1) + rec = rec * 0.5 + 0.5 + sample_in_all = torch.cat([sample_in_all,sample_in],dim=0) + sample_in_color_freq_all = torch.cat([sample_in_color_freq_all,sample_in_color_freq],dim=0) + sample_in_color_voicing_all = torch.cat([sample_in_color_voicing_all,sample_in_color_voicing],dim=0) + rec_all = torch.cat([rec_all,rec],dim=0) + if ecog_encoder is not None: + components_ecog = ecog_encoder(ecog_in,mask_prior_in) + rec_ecog = decoder(components_ecog) + rec_ecog = rec_ecog.repeat(1,3,1,1) + rec_ecog = rec_ecog * 0.5 + 0.5 + sample_in_color_voicing_ecog,sample_in_color_freq_ecog = color_spec(sample_in,components_ecog,n_mels) + sample_in_color_freq_ecog_all = torch.cat([sample_in_color_freq_ecog_all,sample_in_color_freq_ecog],dim=0) + sample_in_color_voicing_ecog_all = torch.cat([sample_in_color_voicing_ecog_all,sample_in_color_voicing_ecog],dim=0) + rec_ecog_all = torch.cat([rec_ecog_all,rec_ecog],dim=0) + sample_in_all = sample_in_all.repeat(1,3,1,1)*0.5+0.5 + if ecog_encoder is None: + resultsample = torch.cat([sample_in_all, sample_in_color_freq_all,sample_in_color_voicing_all, rec_all], dim=0) + else: + resultsample = torch.cat([sample_in_all, sample_in_color_freq_all,sample_in_color_voicing_all, rec_all,sample_in_color_freq_ecog_all,sample_in_color_voicing_ecog_all, rec_ecog_all], dim=0) + resultsample = resultsample.transpose(-2,-1) + resultsample = resultsample.cpu() + # import pdb;pdb.set_trace() + if mode == 'train': + f = os.path.join(path,'sample_train_%d.png' % (epoch + 1)) + if mode == 'test': + f = os.path.join(path,'sample_%d.png' % (epoch + 1)) + save_image(resultsample, f, nrow=resultsample.shape[0]//(4 if ecog_encoder is None else 7)) + if mode == 'test': + tracker.register_means(epoch) + return + +def main(): + OUTPUT_DIR = 'training_artifacts/formantsysth_voicingandunvoicing_loudness_NY742' + LOAD_DIR = '' + # LOAD_DIR = 'training_artifacts/formantsysth_voicingandunvoicing_loudness_' + torch.set_default_tensor_type('torch.cuda.FloatTensor') + device = torch.device("cuda:0") + encoder = FormantEncoder(n_mels=64,n_formants=2,k=30) + decoder = FormantSysth(n_mels=64,k=30) + encoder.cuda() + decoder.cuda() + if LOAD_DIR is not '': + encoder.load_state_dict(torch.load(os.path.join(LOAD_DIR,'encoder_60.pth'))) + decoder.load_state_dict(torch.load(os.path.join(LOAD_DIR,'decoder_60.pth'))) + optimizer = LREQAdam([ + {'params': encoder.parameters()}, + {'params': decoder.parameters()} + ], lr=0.01, weight_decay=0) + with open('train_param.json','r') as rfile: + param = json.load(rfile) + dataset = torch.utils.data.DataLoader(ECoGDataset(['NY742'],mode='train'),batch_size=32,shuffle=True, drop_last=True) + dataset_test = torch.utils.data.DataLoader(ECoGDataset(['NY742'],mode='test'),batch_size=50,shuffle=False, drop_last=False) + sample_dict_test = next(iter(dataset_test)) + sample_spec_test = sample_dict_test['spkr_re_batch_all'].to('cuda').float() + for epoch in range(60): + encoder.train() + decoder.train() + for sample_dict_train in tqdm(iter(dataset)): + x = sample_dict_train['spkr_re_batch_all'].to('cuda').float() + optimizer.zero_grad() + f0,loudness,amplitudes,formants_freqs,formants_bandwidth,formants_amplitude = encoder(x) + x_rec = decoder(f0,loudness,amplitudes,formants_freqs,formants_bandwidth,formants_amplitude) + loss = torch.mean((x-x_rec).abs()) + loss.backward() + optimizer.step() + save_sample(sample_spec_test,encoder,decoder,epoch,mode='test',path=OUTPUT_DIR) + save_sample(x,encoder,decoder,epoch,mode='train',path=OUTPUT_DIR) + torch.save(encoder.state_dict(),os.path.join(OUTPUT_DIR,'encoder_%d.pth' % (epoch+1))) + torch.save(decoder.state_dict(),os.path.join(OUTPUT_DIR,'decoder_%d.pth' % (epoch+1))) + + +if __name__ == "__main__": + main() From b14bc9f54c8934947cf4983dd63bcc360c361a56 Mon Sep 17 00:00:00 2001 From: Ran Wang Date: Fri, 21 Aug 2020 15:54:22 -0400 Subject: [PATCH 10/14] formant_systh --- ECoGDataSet.py | 9 +- configs/ecog_style2.yaml | 14 ++- dataloader_ecog.py | 4 +- defaults.py | 10 +- formant_systh.py | 13 ++- model_formant.py | 31 +++-- net.py | 2 + net_formant.py | 239 ++++++++++++++++++++++++++++++++++----- run.s | 7 +- train_formant.py | 60 ++++++---- 10 files changed, 310 insertions(+), 79 deletions(-) diff --git a/ECoGDataSet.py b/ECoGDataSet.py index 54a2b3f6..1fbdfa9a 100644 --- a/ECoGDataSet.py +++ b/ECoGDataSet.py @@ -106,9 +106,10 @@ def select_block(self,ecog,regions,mask,mni_coord,select,block): region_ind = np.delete(np.arange(regions.shape[0]),region_ind) region_ind = region_ind.astype(np.int64) return ecog[:,region_ind],regions[region_ind],mask[region_ind],mni_coord[region_ind] - def __init__(self, ReqSubjDict, mode = 'train', train_param = None,BCTS=None): + def __init__(self, ReqSubjDict, mode = 'train', train_param = None,BCTS=None,world_size=1): """ ReqSubjDict can be a list of multiple subjects""" super(ECoGDataset, self).__init__() + self.world_size = world_size self.current_lod=2 self.ReqSubjDict = ReqSubjDict self.mode = mode @@ -135,9 +136,9 @@ def __init__(self, ReqSubjDict, mode = 'train', train_param = None,BCTS=None): [self.SelectRegion.extend(self.cortex[area]) for area in train_param["SelectRegion"]] self.BlockRegion = [] [self.BlockRegion.extend(self.cortex[area]) for area in train_param["BlockRegion"]] - self.Prod,self.UseGridOnly,self.ReshapeAsGrid,self.SeqLen = train_param['Prod'],\ + self.ReshapeAsGrid = False if 'Transformer' in cfg.MODEL.MAPPING_FROM_ECOG else True + self.Prod,self.UseGridOnly,self.SeqLen = train_param['Prod'],\ train_param['UseGridOnly'],\ - train_param['ReshapeAsGrid'],\ train_param['SeqLen'], self.ahead_onset_test = train_param['Test']['ahead_onset'] self.ahead_onset_train = train_param['Train']['ahead_onset'] @@ -638,7 +639,7 @@ def __init__(self, ReqSubjDict, mode = 'train', train_param = None,BCTS=None): def __len__(self): if self.mode == 'train': if self.Prod: - return np.array([start_ind_re_alldataset.shape[0]*128 for start_ind_re_alldataset in self.meta_data['start_ind_re_alldataset']]).sum() + return np.array([start_ind_re_alldataset.shape[0]*128//self.world_size for start_ind_re_alldataset in self.meta_data['start_ind_re_alldataset']]).sum() else: return np.array([start_ind_alldataset.shape[0]*128 for start_ind_alldataset in self.meta_data['start_ind_alldataset']]).sum() else: diff --git a/configs/ecog_style2.yaml b/configs/ecog_style2.yaml index 52ebde67..10916c6a 100644 --- a/configs/ecog_style2.yaml +++ b/configs/ecog_style2.yaml @@ -32,7 +32,7 @@ MODEL: TRUNCATIOM_CUTOFF: 5 CHANNELS: 1 UNIQ_WORDS: 50 - MAPPING_FROM_ECOG: "ECoGMappingBottleneck" + MAPPING_FROM_ECOG: "ECoGMappingTransformer" ECOG: False #will be overloaded if FINETUNE SUPLOSS_ON_ECOGF: False # will be overloaded to FIX_GEN if FINETUNE,spec supervise loss only apply to ecog encoder W_SUP: False @@ -57,9 +57,16 @@ MODEL: PPLD_WEIGHT: 1 PPLD_GLOBAL_WEIGHT: 0 COMMON_Z: True + TRANSFORMER: + HIDDEN_DIM : 256 + DIM_FEEDFORWARD : 256 + ENCODER_ONLY : False + ATTENTIONAL_MASK : False + N_HEADS : 4 + NON_LOCAL: True # ATTENTION: [] # OUTPUT_DIR: training_artifacts/debug -OUTPUT_DIR: training_artifacts/formantsyth_ecogfinetune_NY742 +OUTPUT_DIR: training_artifacts/formantsyth_NY742_constraintonFB_Bconstrainrefined_absfreq # OUTPUT_DIR: training_artifacts/ecog_residual_latent128_temporal_lesstemporalfeature_noprogressive_HBw_ppl_ppld_localreg_ecogf_w_spec_sup # OUTPUT_DIR: training_artifacts/ecog_residual_latent128_temporal_lesstemporalfeature_ppl_ppld # OUTPUT_DIR: training_artifacts/ecog_residual_cycle_attention3264wStyleIN_specchan64_more_attentfeatures_heads4 @@ -90,6 +97,7 @@ TRAIN: # LOD_2_BATCH_1GPU: [128, 128, 128, 128, 64, 32] # LOD_2_BATCH_1GPU: [512, 256, 256, 128, 64, 16] LOD_2_BATCH_1GPU: [64, 64, 64, 64, 32, 16] - + BATCH_SIZE : 32 + # BATCH_SIZE : 2 LEARNING_RATES: [0.0015, 0.0015, 0.0015, 0.002, 0.003, 0.003] # LEARNING_RATES: [0.0015, 0.0015, 0.0005, 0.0003, 0.0003, 0.0002] diff --git a/dataloader_ecog.py b/dataloader_ecog.py index 620f5f25..21bdbcf4 100644 --- a/dataloader_ecog.py +++ b/dataloader_ecog.py @@ -34,7 +34,7 @@ class TFRecordsDataset: def __init__(self, cfg, logger, rank=0, world_size=1, buffer_size_mb=200, channels=3, seed=None, train=True, needs_labels=False,param=None): self.param = param - self.dataset = ECoGDataset(cfg.DATASET.SUBJECT,mode='train' if train else 'test') + self.dataset = ECoGDataset(cfg.DATASET.SUBJECT,mode='train' if train else 'test',world_size=world_size) self.cfg = cfg self.logger = logger self.rank = rank @@ -49,7 +49,7 @@ def __init__(self, cfg, logger, rank=0, world_size=1, buffer_size_mb=200, channe self.workers_active = 0 self.iterator = None self.filenames = {} - self.batch_size = 32 if train else len(self.dataset) + self.batch_size = cfg.TRAIN.BATCH_SIZE//world_size if train else len(self.dataset) self.features = {} self.channels = channels self.seed = seed diff --git a/defaults.py b/defaults.py index 30161cfd..801f8899 100644 --- a/defaults.py +++ b/defaults.py @@ -85,6 +85,14 @@ _C.MODEL.COMMON_Z = True _C.MODEL.GAN = True +_C.MODEL.TRANSFORMER = CN() +_C.MODEL.TRANSFORMER.HIDDEN_DIM = 256 +_C.MODEL.TRANSFORMER.DIM_FEEDFORWARD = 256 +_C.MODEL.TRANSFORMER.ENCODER_ONLY = True +_C.MODEL.TRANSFORMER.ATTENTIONAL_MASK = False +_C.MODEL.TRANSFORMER.N_HEADS = 1 +_C.MODEL.TRANSFORMER.NON_LOCAL = False + _C.FINETUNE = CN() _C.FINETUNE.FINETUNE = False _C.FINETUNE.ENCODER_GUIDE= False @@ -108,7 +116,7 @@ _C.TRAIN.LOD_2_BATCH_4GPU = [512, 256, 128, 64, 32, 16] _C.TRAIN.LOD_2_BATCH_2GPU = [256, 256, 128, 64, 32, 16] _C.TRAIN.LOD_2_BATCH_1GPU = [64, 64, 64, 64, 32, 16] - +_C.TRAIN.BATCH_SIZE = 4 _C.TRAIN.SNAPSHOT_FREQ = [300, 300, 300, 100, 50, 30, 20, 20, 10] diff --git a/formant_systh.py b/formant_systh.py index 3c70315a..c1f099f1 100755 --- a/formant_systh.py +++ b/formant_systh.py @@ -52,7 +52,7 @@ def color_spec(spec,components,n_mels): sample_in_color_freq = freq_coloring(sample_in_color_freq,formants_freqs[:,j].unsqueeze(1),clrs[j]) return sample_in_color_voicing,sample_in_color_freq -def save_sample(sample,ecog,mask_prior,encoder,decoder,ecog_encoder,epoch,mode='test',path='training_artifacts/formantsysth_voicingandunvoicing_loudness',tracker=None): +def save_sample(sample,ecog,mask_prior,mni,encoder,decoder,ecog_encoder,epoch,mode='test',path='training_artifacts/formantsysth_voicingandunvoicing_loudness',tracker=None): os.makedirs(path, exist_ok=True) with torch.no_grad(): encoder.eval() @@ -68,11 +68,12 @@ def save_sample(sample,ecog,mask_prior,encoder,decoder,ecog_encoder,epoch,mode=' sample_in_color_voicing_ecog_all = torch.tensor([]) rec_ecog_all = torch.tensor([]) n_mels = sample.shape[-1] - for i in range(0,sample.shape[0],9): - sample_in = sample[i:np.minimum(i+9,sample.shape[0])] + for i in range(0,sample.shape[0],1): + sample_in = sample[i:np.minimum(i+1,sample.shape[0])] if ecog_encoder is not None: - ecog_in = [ecog[j][i:np.minimum(i+9,sample.shape[0])] for j in range(len(ecog))] - mask_prior_in = [mask_prior[j][i:np.minimum(i+9,sample.shape[0])] for j in range(len(ecog))] + ecog_in = [ecog[j][i:np.minimum(i+1,sample.shape[0])] for j in range(len(ecog))] + mask_prior_in = [mask_prior[j][i:np.minimum(i+1,sample.shape[0])] for j in range(len(ecog))] + mni_in = mni[i:np.minimum(i+1,sample.shape[0])] components = encoder(sample_in) rec = decoder(components) sample_in_color_voicing,sample_in_color_freq = color_spec(sample_in,components,n_mels) @@ -83,7 +84,7 @@ def save_sample(sample,ecog,mask_prior,encoder,decoder,ecog_encoder,epoch,mode=' sample_in_color_voicing_all = torch.cat([sample_in_color_voicing_all,sample_in_color_voicing],dim=0) rec_all = torch.cat([rec_all,rec],dim=0) if ecog_encoder is not None: - components_ecog = ecog_encoder(ecog_in,mask_prior_in) + components_ecog = ecog_encoder(ecog_in,mask_prior_in,mni=mni_in) rec_ecog = decoder(components_ecog) rec_ecog = rec_ecog.repeat(1,3,1,1) rec_ecog = rec_ecog * 0.5 + 0.5 diff --git a/model_formant.py b/model_formant.py index a32dd3c3..85e5db86 100644 --- a/model_formant.py +++ b/model_formant.py @@ -3,11 +3,13 @@ from net_formant import * import numpy as np class Model(nn.Module): - def __init__(self, generator="", encoder="", ecog_encoder="", - spec_chans = 128, n_formants=2, with_ecog = False): + def __init__(self, generator="", encoder="", ecog_encoder_name="", + spec_chans = 128, n_formants=2, with_ecog = False, + hidden_dim=256,dim_feedforward=256,encoder_only=True,attentional_mask=False,n_heads=1,non_local=False): super(Model, self).__init__() self.spec_chans = spec_chans self.with_ecog = with_ecog + self.ecog_encoder_name = ecog_encoder_name self.decoder = GENERATORS[generator]( n_mels = spec_chans, k = 30, @@ -17,12 +19,19 @@ def __init__(self, generator="", encoder="", ecog_encoder="", n_formants = n_formants, ) if with_ecog: - self.ecog_encoder = ECOG_ENCODER[ecog_encoder]( - n_mels = spec_chans,n_formants = n_formants, - ) + if 'Transformer' in ecog_encoder_name: + self.ecog_encoder = ECOG_ENCODER[ecog_encoder_name]( + n_mels = spec_chans,n_formants = n_formants, + hidden_dim=hidden_dim,dim_feedforward=dim_feedforward,n_heads=n_heads, + encoder_only=encoder_only,attentional_mask=attentional_mask,non_local=non_local, + ) + else: + self.ecog_encoder = ECOG_ENCODER[ecog_encoder_name]( + n_mels = spec_chans,n_formants = n_formants, + ) - def generate_fromecog(self, ecog = None, mask_prior = None, return_components=False): - components = self.ecog_encoder(ecog, mask_prior) + def generate_fromecog(self, ecog = None, mask_prior = None, mni=None,return_components=False): + components = self.ecog_encoder(ecog, mask_prior,mni) rec = self.decoder.forward(components) if return_components: return rec, components @@ -41,7 +50,7 @@ def encode(self, spec): components = self.encoder(spec) return components - def forward(self, spec, ecog, mask_prior, on_stage, ae, tracker, encoder_guide): + def forward(self, spec, ecog, mask_prior, on_stage, ae, tracker, encoder_guide, mni=None): if ae: self.encoder.requires_grad_(True) rec = self.generate_fromspec(spec) @@ -50,16 +59,16 @@ def forward(self, spec, ecog, mask_prior, on_stage, ae, tracker, encoder_guide): return Lae else: self.encoder.requires_grad_(False) - rec,components_ecog = self.generate_fromecog(ecog,mask_prior,return_components=True) + rec,components_ecog = self.generate_fromecog(ecog,mask_prior,mni=mni,return_components=True) Lrec = torch.mean((rec - spec).abs()) tracker.update(dict(Lrec=Lrec)) Lcomp = 0 if encoder_guide: components_guide = self.encode(spec) - consonant_weight = 100*(torch.sign(components_guide['amplitudes'][:,1:]-0.5)*0.5+0.5) + consonant_weight = 1#100*(torch.sign(components_guide['amplitudes'][:,1:]-0.5)*0.5+0.5) for key in components_guide.keys(): if key == 'loudness': - diff = torch.mean((components_guide[key] - components_ecog[key])**2) + torch.mean((components_guide[key] - components_ecog[key])**2 * on_stage * consonant_weight) + diff = torch.mean((components_guide[key] - components_ecog[key])**2) #+ torch.mean((components_guide[key] - components_ecog[key])**2 * on_stage * consonant_weight) elif key in ['freq_formants', 'bandwidth_formants', 'amplitude_formants']: diff = torch.mean((components_guide[key] - components_ecog[key])**2 * on_stage * consonant_weight) else: diff --git a/net.py b/net.py index d262f4ce..aced7ea0 100644 --- a/net.py +++ b/net.py @@ -2192,3 +2192,5 @@ def decode(self, x, lod, blend_factor, noise): def forward(self, x, lod, blend_factor, noise): return self.decode(x, lod, blend_factor, noise) + + diff --git a/net_formant.py b/net_formant.py index dcb99ae2..cd632ed3 100644 --- a/net_formant.py +++ b/net_formant.py @@ -9,6 +9,17 @@ import lreq as ln import math from registry import * +from transformer_models.position_encoding import build_position_encoding +from transformer_models.transformer import Transformer as TransformerTS +from transformer_models.transformer_nonlocal import Transformer as TransformerNL + +def mel_scale(n_mels,hz): + #take absolute hz, return abs mel + return (torch.log2(hz/440)+31/24)*24*n_mels/126 + +def inverse_mel_scale(mel): + #take normalized mel, return absolute hz + return 440*2**(mel*126/24-31/24) @GENERATORS.register("GeneratorFormant") class FormantSysth(nn.Module): @@ -33,18 +44,12 @@ def formant_mask(self,freq,bandwith,amplitude): freq = freq.permute([0,2,1]).unsqueeze(dim=-2) #B,time,1, formants bandwith = bandwith.permute([0,2,1]).unsqueeze(dim=-2) #B,time,1, formants amplitude = amplitude.permute([0,2,1]).unsqueeze(dim=-2) #B,time,1, formants - masks = amplitude*torch.exp(-(grid_freq-freq)**2/(2*bandwith**2)) #B,time,freqchans, formants + masks = amplitude*torch.exp(-0.693*(grid_freq-freq)**2/(2*(bandwith+0.001)**2)) #B,time,freqchans, formants masks = masks.unsqueeze(dim=1) #B,1,time,freqchans return masks - def mel_scale(self,hz): - return (torch.log2(hz/440)+31/24)*24*self.n_mels/126 - - def inverse_mel_scale(self,mel): - return 440*2**(mel*126/24-31/24) - def voicing(self,f0): - #f0: B*1*time + #f0: B*1*time, hz freq_cord = torch.arange(self.n_mels) time_cord = torch.arange(f0.shape[2]) grid_time,grid_freq = torch.meshgrid(time_cord,freq_cord) @@ -54,10 +59,10 @@ def voicing(self,f0): f0 = f0.repeat([1,1,1,self.k]) #B,time,1, self.k f0 = f0*(torch.arange(self.k)+1).reshape([1,1,1,self.k]) bandwith = 24.7*(f0*4.37/1000+1) - bandwith_lower = torch.clamp(f0-bandwith/2,min=0.001) + bandwith_lower = torch.clamp(f0-bandwith/2,min=1) bandwith_upper = f0+bandwith/2 - bandwith = self.mel_scale(bandwith_upper) - self.mel_scale(bandwith_lower) - f0 = self.mel_scale(f0) + bandwith = mel_scale(self.n_mels,bandwith_upper) - mel_scale(self.n_mels,bandwith_lower) + f0 = mel_scale(self.n_mels,f0) # hamonics = torch.exp(-(grid_freq-f0)**2/(2*bandwith**2)) #gaussian hamonics = (1-((grid_freq-f0)/(3*bandwith/2))**2)*(-torch.sign(torch.abs(grid_freq-f0)/(3*bandwith)-0.5)*0.5+0.5) #welch # hamonics = torch.cos(np.pi*torch.abs(grid_freq-f0)/(4*bandwith))**2*(-torch.sign(torch.abs(grid_freq-f0)/(4*bandwith)-0.5)*0.5+0.5) #hanning @@ -71,10 +76,20 @@ def forward(self,components): # f0: B*1*T, amplitudes: B*2(voicing,unvoicing)*T, freq_formants,bandwidth_formants,amplitude_formants: B*formants*T amplitudes = components['amplitudes'].unsqueeze(dim=-1) loudness = components['loudness'].unsqueeze(dim=-1) - f0_hz = self.inverse_mel_scale(components['f0']) + f0_hz = inverse_mel_scale(components['f0']) self.hamonics = self.voicing(f0_hz) self.noise = self.unvoicing(f0_hz) freq_formants = components['freq_formants']*self.n_mels + # ### ratio on mel + # freq_formants_hz = inverse_mel_scale(freq_formants/self.n_mels) + # bandwidth_formants_hz = self.formant_bandwitdh_ratio*freq_formants_hz + # bandwith_lower = torch.clamp(freq_formants_hz-bandwidth_formants_hz/2,min=0.001) + # bandwith_upper = freq_formants_hz+bandwidth_formants_hz/2 + # bandwidth_formants = mel_scale(self.n_mels,bandwith_upper) - mel_scale(self.n_mels,bandwith_lower) + # ### + # ### ratio on hz + # bandwidth_formants = self.formant_bandwitdh_ratio*freq_formants + # ##### bandwidth_formants = components['bandwidth_formants']*self.n_mels # excitation = amplitudes[:,0:1]*hamonics # excitation = loudness*(amplitudes[:,0:1]*hamonics) @@ -89,6 +104,23 @@ class FormantEncoder(nn.Module): def __init__(self, n_mels=64, n_formants=4): super(FormantEncoder, self).__init__() self.n_mels = n_mels + self.n_formants = n_formants + + self.formant_freq_limits_diff = torch.tensor([950.,2450.,2100.]).reshape([1,3,1]) #freq difference + self.formant_freq_limits_diff_low = torch.tensor([300.,300.,0.]).reshape([1,3,1]) #freq difference + self.formant_freq_limits_abs = torch.tensor([950.,2800.,3400.]).reshape([1,3,1]) #freq difference + self.formant_freq_limits_abs_low = torch.tensor([300.,600.,2700.]).reshape([1,3,1]) #freq difference + + self.formant_bandwitdh_ratio = Parameter(torch.Tensor(1)) + self.formant_bandwitdh_slop = Parameter(torch.Tensor(1)) + with torch.no_grad(): + nn.init.constant_(self.formant_bandwitdh_ratio,0) + nn.init.constant_(self.formant_bandwitdh_slop,0) + + # self.formant_freq_limits = torch.cumsum(self.formant_freq_limits_diff,dim=0) + # self.formant_freq_limits_mel = torch.cat([torch.tensor([0.]),mel_scale(n_mels,self.formant_freq_limits)/n_mels]) + # self.formant_freq_limits_mel_diff = torch.reshape(self.formant_freq_limits_mel[1:]-self.formant_freq_limits_mel[:-1],[1,3,1]) + self.conv1 = ln.Conv1d(n_mels,64,3,1,1) self.norm1 = nn.GroupNorm(32,64) self.conv2 = ln.Conv1d(64,128,3,1,1) @@ -121,17 +153,39 @@ def forward(self,x): amplitudes = F.softmax(self.conv_amplitudes(x_common),dim=1) x_fundementals = F.leaky_relu(self.norm_fundementals(self.conv_fundementals(x_common)),0.2) + # f0 in mel: # f0 = F.sigmoid(self.conv_f0(x_fundementals)) # f0 = F.tanh(self.conv_f0(x_fundementals)) * (16/64)*(self.n_mels/64) # 72hz < f0 < 446 hz - f0 = F.sigmoid(self.conv_f0(x_fundementals)) * (15/64)*(self.n_mels/64) # 179hz < f0 < 420 hz + # f0 = F.sigmoid(self.conv_f0(x_fundementals)) * (15/64)*(self.n_mels/64) # 179hz < f0 < 420 hz # f0 = F.sigmoid(self.conv_f0(x_fundementals)) * (22/64)*(self.n_mels/64) - (16/64)*(self.n_mels/64)# 72hz < f0 < 253 hz, human voice # f0 = F.sigmoid(self.conv_f0(x_fundementals)) * (11/64)*(self.n_mels/64) - (-2/64)*(self.n_mels/64)# 160hz < f0 < 300 hz, female voice + + # f0 in hz: + f0 = F.sigmoid(self.conv_f0(x_fundementals)) * 240 + 180 # 180hz < f0 < 420 hz + f0 = torch.clamp(mel_scale(self.n_mels,f0)/self.n_mels,min=0.0001) + x_formants = F.leaky_relu(self.norm_formants(self.conv_formants(x_common)),0.2) formants_freqs = F.sigmoid(self.conv_formants_freqs(x_formants)) - formants_freqs = torch.cumsum(formants_freqs,dim=1) - formants_freqs = formants_freqs + # # relative freq: + # formants_freqs_hz = formants_freqs*(self.formant_freq_limits_diff[:,:self.n_formants]-self.formant_freq_limits_diff_low[:,:self.n_formants])+self.formant_freq_limits_diff_low[:,:self.n_formants] + # # formants_freqs_hz = formants_freqs*6839 + # formants_freqs_hz = torch.cumsum(formants_freqs_hz,dim=1) + + # abs freq: + formants_freqs_hz = formants_freqs*(self.formant_freq_limits_abs[:,:self.n_formants]-self.formant_freq_limits_abs_low[:,:self.n_formants])+self.formant_freq_limits_abs_low[:,:self.n_formants] + # formants_freqs_hz = formants_freqs*6839 + formants_freqs = torch.clamp(mel_scale(self.n_mels,formants_freqs_hz)/self.n_mels,min=0) + # formants_freqs = formants_freqs + f0 - formants_bandwidth = F.sigmoid(self.conv_formants_bandwidth(x_formants)) + # formants_bandwidth_hz = F.sigmoid(self.conv_formants_bandwidth(x_formants)) *6839 + # formants_bandwidth_hz = F.sigmoid(self.conv_formants_bandwidth(x_formants)) * 150 + formants_bandwidth_hz = F.sigmoid(self.conv_formants_bandwidth(x_formants)) * 3*F.sigmoid(self.formant_bandwitdh_ratio)*(0.075*torch.relu(formants_freqs_hz-1000)+100) + # formants_bandwidth_hz = F.sigmoid(self.conv_formants_bandwidth(x_formants)) * 3*F.sigmoid(self.formant_bandwitdh_ratio)*(0.075*torch.relu(formants_freqs_hz-1000)+50) + # formants_bandwidth_hz = F.sigmoid(self.conv_formants_bandwidth(x_formants)) * (0.075*3*F.sigmoid(self.formant_bandwitdh_slop)*torch.relu(formants_freqs_hz-1000)+(2*F.sigmoid(self.formant_bandwitdh_ratio)+1)*50) + # formants_bandwidth_hz = F.sigmoid(self.conv_formants_bandwidth(x_formants)) * 3*F.sigmoid(self.formant_bandwitdh_ratio)*(0.075*F.sigmoid(self.formant_bandwitdh_slop)*torch.relu(formants_freqs_hz-1000)+50) + formants_bandwidth_upper = formants_freqs_hz+formants_bandwidth_hz/2 + formants_bandwidth_lower = torch.clamp(formants_freqs_hz-formants_bandwidth_hz/2,min=1) + formants_bandwidth = (mel_scale(self.n_mels,formants_bandwidth_upper) - mel_scale(self.n_mels,formants_bandwidth_lower))/self.n_mels formants_amplitude = F.softmax(self.conv_formants_amplitude(x_formants),dim=1) components = { 'f0':f0, @@ -144,10 +198,13 @@ def forward(self,x): return components class FromECoG(nn.Module): - def __init__(self, outputs,residual=False): + def __init__(self, outputs,residual=False,shape='3D'): super().__init__() self.residual=residual - self.from_ecog = ln.Conv3d(1, outputs, [9,1,1], 1, [4,0,0]) + if shape =='3D': + self.from_ecog = ln.Conv3d(1, outputs, [9,1,1], 1, [4,0,0]) + else: + self.from_ecog = ln.Conv2d(1, outputs, [9,1], 1, [4,0]) def forward(self, x): x = self.from_ecog(x) @@ -156,25 +213,42 @@ def forward(self, x): return x class ECoGMappingBlock(nn.Module): - def __init__(self, inputs, outputs, kernel_size,dilation=1,fused_scale=True,residual=False,resample=[]): + def __init__(self, inputs, outputs, kernel_size,dilation=1,fused_scale=True,residual=False,resample=[],pool=None,shape='3D'): super(ECoGMappingBlock, self).__init__() self.residual = residual + self.pool = pool self.inputs_resample = resample self.dim_missmatch = (inputs!=outputs) self.resample = resample if not self.resample: self.resample=1 self.padding = list(np.array(dilation)*(np.array(kernel_size)-1)//2) + if shape=='2D': + conv=ln.Conv2d + maxpool = nn.MaxPool2d + avgpool = nn.AvgPool2d + if shape=='3D': + conv=ln.Conv3d + maxpool = nn.MaxPool3d + avgpool = nn.AvgPool3d # self.padding = [dilation[i]*(kernel_size[i]-1)//2 for i in range(len(dilation))] if residual: self.norm1 = nn.GroupNorm(min(inputs,32),inputs) else: self.norm1 = nn.GroupNorm(min(outputs,32),outputs) - self.conv1 = ln.Conv3d(inputs, outputs, kernel_size, self.resample, self.padding, dilation=dilation, bias=False) + if pool is None: + self.conv1 = conv(inputs, outputs, kernel_size, self.resample, self.padding, dilation=dilation, bias=False) + else: + self.conv1 = conv(inputs, outputs, kernel_size, 1, self.padding, dilation=dilation, bias=False) + self.pool1 = maxpool(self.resample,self.resample) if self.pool=='Max' else avgpool(self.resample,self.resample) if self.inputs_resample or self.dim_missmatch: - self.convskip = ln.Conv3d(inputs, outputs, kernel_size, self.resample, self.padding, dilation=dilation, bias=False) + if pool is None: + self.convskip = conv(inputs, outputs, kernel_size, self.resample, self.padding, dilation=dilation, bias=False) + else: + self.convskip = conv(inputs, outputs, kernel_size, 1, self.padding, dilation=dilation, bias=False) + self.poolskip = maxpool(self.resample,self.resample) if self.pool=='Max' else avgpool(self.resample,self.resample) - self.conv2 = ln.Conv3d(outputs, outputs, kernel_size, 1, self.padding, dilation=dilation, bias=False) + self.conv2 = conv(outputs, outputs, kernel_size, 1, self.padding, dilation=dilation, bias=False) self.norm2 = nn.GroupNorm(min(outputs,32),outputs) def forward(self,x): @@ -183,9 +257,13 @@ def forward(self,x): if self.inputs_resample or self.dim_missmatch: # x_skip = F.avg_pool3d(x,self.resample,self.resample) x_skip = self.convskip(x) + if self.pool is not None: + x_skip = self.poolskip(x_skip) else: x_skip = x x = F.leaky_relu(self.norm2(self.conv1(x)),0.2) + if self.pool is not None: + x = self.poolskip(x) x = self.conv2(x) x = x_skip + x else: @@ -194,6 +272,7 @@ def forward(self,x): return x + @ECOG_ENCODER.register("ECoGMappingBottleneck") class ECoGMapping_Bottleneck(nn.Module): def __init__(self,n_mels,n_formants): @@ -201,12 +280,12 @@ def __init__(self,n_mels,n_formants): self.n_formants = n_formants self.n_mels = n_mels self.from_ecog = FromECoG(16,residual=True) - self.conv1 = ECoGMappingBlock(16,32,[5,1,1],residual=True,resample = [2,1,1]) - self.conv2 = ECoGMappingBlock(32,64,[3,1,1],residual=True,resample = [2,1,1]) + self.conv1 = ECoGMappingBlock(16,32,[5,1,1],residual=True,resample = [2,1,1],pool='MAX') + self.conv2 = ECoGMappingBlock(32,64,[3,1,1],residual=True,resample = [2,1,1],pool='MAX') self.norm_mask = nn.GroupNorm(32,64) self.mask = ln.Conv3d(64,1,[3,1,1],1,[1,0,0]) - self.conv3 = ECoGMappingBlock(64,128,[3,3,3],residual=True,resample = [2,2,2]) - self.conv4 = ECoGMappingBlock(128,256,[3,3,3],residual=True,resample = [2,2,2]) + self.conv3 = ECoGMappingBlock(64,128,[3,3,3],residual=True,resample = [2,2,2],pool='MAX') + self.conv4 = ECoGMappingBlock(128,256,[3,3,3],residual=True,resample = [2,2,2],pool='MAX') self.norm = nn.GroupNorm(32,256) self.conv5 = ln.Conv1d(256,256,3,1,1) self.norm2 = nn.GroupNorm(32,256) @@ -231,7 +310,7 @@ def __init__(self,n_mels,n_formants): self.conv_formants_bandwidth = ln.Conv1d(32,n_formants,1,1,0) self.conv_formants_amplitude = ln.Conv1d(32,n_formants,1,1,0) - def forward(self,ecog,mask_prior): + def forward(self,ecog,mask_prior,mni): x_common_all = [] for d in range(len(ecog)): x = ecog[d] @@ -282,4 +361,108 @@ def forward(self,ecog,mask_prior): 'bandwidth_formants':formants_bandwidth, 'amplitude_formants':formants_amplitude, } - return components \ No newline at end of file + return components + + +class BackBone(nn.Module): + def __init__(self,attentional_mask=True): + super(BackBone, self).__init__() + self.attentional_mask = attentional_mask + self.from_ecog = FromECoG(16,residual=True,shape='2D') + self.conv1 = ECoGMappingBlock(16,32,[5,1],residual=True,resample = [1,1],shape='2D') + self.conv2 = ECoGMappingBlock(32,64,[3,1],residual=True,resample = [1,1],shape='2D') + self.norm_mask = nn.GroupNorm(32,64) + self.mask = ln.Conv2d(64,1,[3,1],1,[1,0]) + + def forward(self,ecog): + x_common_all = [] + mask_all=[] + for d in range(len(ecog)): + x = ecog[d] + x = x.unsqueeze(1) + x = self.from_ecog(x) + x = self.conv1(x) + x = self.conv2(x) + if self.attentional_mask: + mask = F.relu(self.mask(F.leaky_relu(self.norm_mask(x),0.2))) + mask = mask[:,:,16:] + x = x[:,:,16:] + mask_all +=[mask] + else: + # mask = torch.sigmoid(self.mask(F.leaky_relu(self.norm_mask(x),0.2))) + # mask = mask[:,:,16:] + x = x[:,:,16:] + # x = x*mask + + x_common_all +=[x] + + x_common = torch.cat(x_common_all,dim=0) + if self.attentional_mask: + mask = torch.cat(mask_all,dim=0) + return x_common,mask.squeeze(1) if self.attentional_mask else None + +class ECoGEncoderFormantHeads(nn.Module): + def __init__(self,inputs,n_mels,n_formants): + super(ECoGEncoderFormantHeads,self).__init__() + self.n_mels = n_mels + self.f0 = ln.Conv1d(inputs,1,1) + self.loudness = ln.Conv1d(inputs,1,1) + self.amplitudes = ln.Conv1d(inputs,2,1) + self.freq_formants = ln.Conv1d(inputs,n_formants,1) + self.bandwidth_formants = ln.Conv1d(inputs,n_formants,1) + self.amplitude_formants = ln.Conv1d(inputs,n_formants,1) + + def forward(self,x): + loudness = F.relu(self.loudness(x)) + f0 = F.sigmoid(self.f0(x)) * (15/64)*(self.n_mels/64) # 179hz < f0 < 420 hz + # f0 = F.sigmoid(self.conv_f0(x_fundementals)) * (22/64)*(self.n_mels/64) - (16/64)*(self.n_mels/64)# 72hz < f0 < 253 hz, human voice + # f0 = F.sigmoid(self.conv_f0(x_fundementals)) * (11/64)*(self.n_mels/64) - (-2/64)*(self.n_mels/64)# 160hz < f0 < 300 hz, female voice + amplitudes = F.softmax(self.amplitudes(x),dim=1) + freq_formants = F.sigmoid(self.freq_formants(x)) + freq_formants = torch.cumsum(freq_formants,dim=1) + bandwidth_formants = F.sigmoid(self.bandwidth_formants(x)) + amplitude_formants = F.softmax(self.amplitude_formants(x),dim=1) + return {'f0':f0, + 'loudness':loudness, + 'amplitudes':amplitudes, + 'freq_formants':freq_formants, + 'bandwidth_formants':bandwidth_formants, + 'amplitude_formants':amplitude_formants,} + +@ECOG_ENCODER.register("ECoGMappingTransformer") +class ECoGMapping_Transformer(nn.Module): + def __init__(self,n_mels,n_formants,SeqLen=128,hidden_dim=256,dim_feedforward=256,encoder_only=False,attentional_mask=False,n_heads=1,non_local=False): + super(ECoGMapping_Transformer, self).__init__() + self.n_mels = n_mels, + self.n_formant = n_formants, + self.encoder_only = encoder_only, + self.attentional_mask = attentional_mask, + self.backbone = BackBone(attentional_mask=attentional_mask) + self.position_encoding = build_position_encoding(SeqLen,hidden_dim,'MNI') + self.input_proj = ln.Conv2d(64, hidden_dim, kernel_size=1) + if non_local: + Transformer = TransformerNL + else: + Transformer = TransformerTS + self.transformer = Transformer(d_model=hidden_dim, nhead=n_heads, num_encoder_layers=6, + num_decoder_layers=6, dim_feedforward=dim_feedforward, dropout=0.1, + activation="relu", normalize_before=False, + return_intermediate_dec=False,encoder_only = encoder_only) + self.output_proj = ECoGEncoderFormantHeads(hidden_dim,n_mels,n_formants) + self.query_embed = nn.Embedding(SeqLen, hidden_dim) + + def forward(self,x,mask_prior,mni): + features,mask = self.backbone(x) + pos = self.position_encoding(mni) + hs = self.transformer(self.input_proj(features), mask if self.attentional_mask else None, self.query_embed.weight, pos) + if not self.encoder_only: + hs,encoded = hs + out = self.output_proj(hs) + else: + _,encoded = hs + encoded = encoded.max(-1)[0] + out = self.output_proj(encoded) + return out + + + diff --git a/run.s b/run.s index ab8e5ab8..93e71065 100644 --- a/run.s +++ b/run.s @@ -1,8 +1,8 @@ #!/bin/bash #SBATCH --nodes=1 #SBATCH --ntasks-per-node=1 -#SBATCH --cpus-per-task=1 -#SBATCH --gres=gpu:v100:1 +#SBATCH --cpus-per-task=2 +#SBATCH --gres=gpu:p40:2 #SBATCH --time=60:00:00 #SBATCH --mem=64GB #SBATCH --job-name=myTest @@ -14,4 +14,5 @@ module purge module load cudnn/10.0v7.6.2.24 module load cuda/10.0.130 source $HOME/python3.7/bin/activate -python train_alae.py +export PYTHONPATH=$PYTHONPATH:$(pwd) +python train_formant.py diff --git a/train_formant.py b/train_formant.py index 2d6b2a05..1a71b796 100644 --- a/train_formant.py +++ b/train_formant.py @@ -42,26 +42,37 @@ def train(cfg, logger, local_rank, world_size, distributed): model = Model( generator=cfg.MODEL.GENERATOR, encoder=cfg.MODEL.ENCODER, - ecog_encoder=cfg.MODEL.MAPPING_FROM_ECOG, + ecog_encoder_name=cfg.MODEL.MAPPING_FROM_ECOG, spec_chans = cfg.DATASET.SPEC_CHANS, n_formants = cfg.MODEL.N_FORMANTS, with_ecog = cfg.MODEL.ECOG, + hidden_dim=cfg.MODEL.TRANSFORMER.HIDDEN_DIM, + dim_feedforward=cfg.MODEL.TRANSFORMER.DIM_FEEDFORWARD, + encoder_only=cfg.MODEL.TRANSFORMER.ENCODER_ONLY, + attentional_mask=cfg.MODEL.TRANSFORMER.ATTENTIONAL_MASK, + n_heads = cfg.MODEL.TRANSFORMER.N_HEADS, + non_local = cfg.MODEL.TRANSFORMER.NON_LOCAL, ) model.cuda(local_rank) model.train() - if local_rank == 0: - model_s = Model( - generator=cfg.MODEL.GENERATOR, - encoder=cfg.MODEL.ENCODER, - ecog_encoder=cfg.MODEL.MAPPING_FROM_ECOG, - spec_chans = cfg.DATASET.SPEC_CHANS, - n_formants = cfg.MODEL.N_FORMANTS, - with_ecog = cfg.MODEL.ECOG, - ) - model_s.cuda(local_rank) - model_s.eval() - model_s.requires_grad_(False) + model_s = Model( + generator=cfg.MODEL.GENERATOR, + encoder=cfg.MODEL.ENCODER, + ecog_encoder_name=cfg.MODEL.MAPPING_FROM_ECOG, + spec_chans = cfg.DATASET.SPEC_CHANS, + n_formants = cfg.MODEL.N_FORMANTS, + with_ecog = cfg.MODEL.ECOG, + hidden_dim=cfg.MODEL.TRANSFORMER.HIDDEN_DIM, + dim_feedforward=cfg.MODEL.TRANSFORMER.DIM_FEEDFORWARD, + encoder_only=cfg.MODEL.TRANSFORMER.ENCODER_ONLY, + attentional_mask=cfg.MODEL.TRANSFORMER.ATTENTIONAL_MASK, + n_heads = cfg.MODEL.TRANSFORMER.N_HEADS, + non_local = cfg.MODEL.TRANSFORMER.NON_LOCAL, + ) + model_s.cuda(local_rank) + model_s.eval() + model_s.requires_grad_(False) # print(model) if distributed: model = nn.parallel.DistributedDataParallel( @@ -73,7 +84,7 @@ def train(cfg, logger, local_rank, world_size, distributed): model.device_ids = None decoder = model.module.decoder encoder = model.module.encoder - if hasattr(model,'ecog_encoder'): + if hasattr(model.module,'ecog_encoder'): ecog_encoder = model.module.ecog_encoder else: decoder = model.decoder @@ -141,7 +152,7 @@ def train(cfg, logger, local_rank, world_size, distributed): save=local_rank == 0) # extra_checkpoint_data = checkpointer.load(ignore_last_checkpoint=False,ignore_auxiliary=True,file_name='./training_artifacts/ecog_residual_cycle/model_tmp_lod4.pth') - extra_checkpoint_data = checkpointer.load(ignore_last_checkpoint=True,ignore_auxiliary=cfg.FINETUNE.FINETUNE,file_name='./training_artifacts/formantsyth_NY742/model_epoch29.pth') + extra_checkpoint_data = checkpointer.load(ignore_last_checkpoint=True,ignore_auxiliary=cfg.FINETUNE.FINETUNE,file_name='./training_artifacts/formantsyth_NY742_constraintonFB_Bconstrainrefined_absfreq/model_epoch29.pth') logger.info("Starting from epoch: %d" % (scheduler.start_epoch())) arguments.update(extra_checkpoint_data) @@ -179,9 +190,11 @@ def train(cfg, logger, local_rank, world_size, distributed): if cfg.MODEL.ECOG: ecog_test = [sample_dict_test['ecog_re_batch_all'][i].to('cuda').float() for i in range(len(sample_dict_test['ecog_re_batch_all']))] mask_prior_test = [sample_dict_test['mask_all'][i].to('cuda').float() for i in range(len(sample_dict_test['mask_all']))] + mni_coordinate_test = sample_dict_test['mni_coordinate_all'].to('cuda').float() else: ecog_test = None mask_prior_test = None + mni_coordinate_test = None # sample = next(make_dataloader(cfg, logger, dataset, 32, local_rank)) # sample = (sample / 127.5 - 1.) @@ -205,10 +218,11 @@ def train(cfg, logger, local_rank, world_size, distributed): if cfg.MODEL.ECOG: ecog = [sample_dict_train['ecog_re_batch_all'][j].to('cuda').float() for j in range(len(sample_dict_train['ecog_re_batch_all']))] mask_prior = [sample_dict_train['mask_all'][j].to('cuda').float() for j in range(len(sample_dict_train['mask_all']))] + mni_coordinate = sample_dict_train['mni_coordinate_all'].to('cuda').float() else: ecog = None mask_prior = None - + mni_coordinate = None x = x_orig # x.requires_grad = True # apply_cycle = cfg.MODEL.CYCLE and True @@ -218,27 +232,31 @@ def train(cfg, logger, local_rank, world_size, distributed): # apply_ppl_d = cfg.MODEL.APPLY_PPL_D and True # apply_encoder_guide = (cfg.FINETUNE.ENCODER_GUIDE or cfg.MODEL.W_SUP) and True # apply_sup = cfg.FINETUNE.SPECSUP - + + if (cfg.MODEL.ECOG): optimizer.zero_grad() - Lrec = model(x, ecog=ecog, mask_prior=mask_prior, on_stage = on_stage, ae = False, tracker = tracker, encoder_guide=cfg.MODEL.W_SUP) + Lrec = model(x, ecog=ecog, mask_prior=mask_prior, on_stage = on_stage, ae = False, tracker = tracker, encoder_guide=cfg.MODEL.W_SUP,mni=mni_coordinate) (Lrec).backward() optimizer.step() else: optimizer.zero_grad() - Lrec = model(x, ecog=None, mask_prior=None, on_stage = None, ae = True, tracker = tracker, encoder_guide=cfg.MODEL.W_SUP) + Lrec = model(x, ecog=None, mask_prior=None, on_stage = None, ae = True, tracker = tracker, encoder_guide=cfg.MODEL.W_SUP,mni=mni_coordinate) (Lrec).backward() optimizer.step() + betta = 0.5 ** (cfg.TRAIN.BATCH_SIZE / (10 * 1000.0)) + model_s.lerp(model, betta,w_classifier = cfg.MODEL.W_CLASSIFIER) epoch_end_time = time.time() per_epoch_ptime = epoch_end_time - epoch_start_time if local_rank == 0: + print(3*torch.sigmoid(model.encoder.formant_bandwitdh_ratio)) checkpointer.save("model_epoch%d" % epoch) - save_sample(sample_spec_test,ecog_test,mask_prior_test,encoder,decoder,ecog_encoder=ecog_encoder if cfg.MODEL.ECOG else None,epoch=epoch,mode='test',path=cfg.OUTPUT_DIR,tracker = tracker) - save_sample(x,ecog,mask_prior,encoder,decoder,ecog_encoder=ecog_encoder if cfg.MODEL.ECOG else None,epoch=epoch,mode='train',path=cfg.OUTPUT_DIR,tracker = tracker) + save_sample(sample_spec_test,ecog_test,mask_prior_test,mni_coordinate_test,encoder,decoder,ecog_encoder if cfg.MODEL.ECOG else None,epoch=epoch,mode='test',path=cfg.OUTPUT_DIR,tracker = tracker) + save_sample(x,ecog,mask_prior,mni_coordinate_test,encoder,decoder,ecog_encoder if cfg.MODEL.ECOG else None,epoch=epoch,mode='train',path=cfg.OUTPUT_DIR,tracker = tracker) if __name__ == "__main__": From c0bab317fd7a373663236f575c716564e0d671b4 Mon Sep 17 00:00:00 2001 From: Ran Wang Date: Sun, 23 Aug 2020 14:12:29 -0400 Subject: [PATCH 11/14] formant_systh --- transformer_models/._backbone.py | Bin 0 -> 4096 bytes transformer_models/._detr.py | Bin 0 -> 4096 bytes transformer_models/._position_encoding.py | Bin 0 -> 4096 bytes transformer_models/._transformer.py | Bin 0 -> 4096 bytes transformer_models/__init__.py | 6 + transformer_models/backbone.py | 119 +++++++ transformer_models/detr.py | 349 ++++++++++++++++++++ transformer_models/matcher.py | 86 +++++ transformer_models/position_encoding.py | 120 +++++++ transformer_models/segmentation.py | 363 +++++++++++++++++++++ transformer_models/transformer.py | 302 +++++++++++++++++ transformer_models/transformer_nonlocal.py | 342 +++++++++++++++++++ 12 files changed, 1687 insertions(+) create mode 100644 transformer_models/._backbone.py create mode 100644 transformer_models/._detr.py create mode 100644 transformer_models/._position_encoding.py create mode 100644 transformer_models/._transformer.py create mode 100644 transformer_models/__init__.py create mode 100644 transformer_models/backbone.py create mode 100644 transformer_models/detr.py create mode 100644 transformer_models/matcher.py create mode 100644 transformer_models/position_encoding.py create mode 100644 transformer_models/segmentation.py create mode 100644 transformer_models/transformer.py create mode 100644 transformer_models/transformer_nonlocal.py diff --git a/transformer_models/._backbone.py b/transformer_models/._backbone.py new file mode 100644 index 0000000000000000000000000000000000000000..58dd64eb692a673116c18084b6d3fa7aa190c71d GIT binary patch literal 4096 zcmZQz6=P>$Vqox1Ojhs@R)|o50+1L3ClDJkFz{^v(m+1nBL)UWIUt(=a103vVqiFU z1fqjs0Z_RBnifVNA1W@DoS&VLRbWEkZB G{|5k_n<@JM literal 0 HcmV?d00001 diff --git a/transformer_models/._detr.py b/transformer_models/._detr.py new file mode 100644 index 0000000000000000000000000000000000000000..c128639014ca744704adbb4d8d1ffd832c66a95c GIT binary patch literal 4096 zcmZQz6=P>$Vqox1Ojhs@R)|o50+1L3ClDJkFz{^v(m+1nBL)UWIUt(=a103vVqiEp z9ioF_0Z_RBnifVNA1W@DoS&VLRbWEkZB G{|5kuLMiG1 literal 0 HcmV?d00001 diff --git a/transformer_models/._position_encoding.py b/transformer_models/._position_encoding.py new file mode 100644 index 0000000000000000000000000000000000000000..e6112c501feae2dc865057e3fadc8392239448fa GIT binary patch literal 4096 zcmZQz6=P>$Vqox1Ojhs@R)|o50+1L3ClDJkFz{^v(m+1nBL)UWIUt(=a103vVqiEp z6QYA*0Z_RBnifVNA1W@DoS&@c z2hpQoGz3ONU^E0qLtr!nMnhmU1V%$(Gz3ONU^E0qLtr!naD@PukeOGKnpcvUpO=`EQ>l=XnpUEal#`g34eSd;bq#3>)&Fp>$S}zL G{|^9-jw%cQ literal 0 HcmV?d00001 diff --git a/transformer_models/._transformer.py b/transformer_models/._transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..c9952e4a8f02fb08a8e4846e5215d6f55dcddf20 GIT binary patch literal 4096 zcmZQz6=P>$Vqox1Ojhs@R)|o50+1L3ClDJkFz{^v(m+1nBL)UWIUt(=a103vVqiG9 z8lrRZa(GVC7fzc2c4S~@R7!85Z5Eu=C(GVC7fzc2csUZOBY=STt$c1EN7Aq8` z7U!21C8sK+ 0 + return_interm_layers = args.masks + backbone = Backbone(args.backbone, train_backbone, return_interm_layers, args.dilation) + model = Joiner(backbone, position_embedding) + model.num_channels = backbone.num_channels + return model diff --git a/transformer_models/detr.py b/transformer_models/detr.py new file mode 100644 index 00000000..d58dcd60 --- /dev/null +++ b/transformer_models/detr.py @@ -0,0 +1,349 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +""" +DETR model and criterion classes. +""" +import torch +import torch.nn.functional as F +from torch import nn + +from util import box_ops +from util.misc import (NestedTensor, nested_tensor_from_tensor_list, + accuracy, get_world_size, interpolate, + is_dist_avail_and_initialized) + +from .backbone import build_backbone +from .matcher import build_matcher +from .segmentation import (DETRsegm, PostProcessPanoptic, PostProcessSegm, + dice_loss, sigmoid_focal_loss) +from .transformer import build_transformer + + +class DETR(nn.Module): + """ This is the DETR module that performs object detection """ + def __init__(self, backbone, transformer, num_classes, num_queries, aux_loss=False): + """ Initializes the model. + Parameters: + backbone: torch module of the backbone to be used. See backbone.py + transformer: torch module of the transformer architecture. See transformer.py + num_classes: number of object classes + num_queries: number of object queries, ie detection slot. This is the maximal number of objects + DETR can detect in a single image. For COCO, we recommend 100 queries. + aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used. + """ + super().__init__() + self.num_queries = num_queries + self.transformer = transformer + hidden_dim = transformer.d_model + self.class_embed = nn.Linear(hidden_dim, num_classes + 1) + self.bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3) + self.query_embed = nn.Embedding(num_queries, hidden_dim) + self.input_proj = nn.Conv2d(backbone.num_channels, hidden_dim, kernel_size=1) + self.backbone = backbone + self.aux_loss = aux_loss + + def forward(self, samples: NestedTensor): + """Ā The forward expects a NestedTensor, which consists of: + - samples.tensor: batched images, of shape [batch_size x 3 x H x W] + - samples.mask: a binary mask of shape [batch_size x H x W], containing 1 on padded pixels + + It returns a dict with the following elements: + - "pred_logits": the classification logits (including no-object) for all queries. + Shape= [batch_size x num_queries x (num_classes + 1)] + - "pred_boxes": The normalized boxes coordinates for all queries, represented as + (center_x, center_y, height, width). These values are normalized in [0, 1], + relative to the size of each individual image (disregarding possible padding). + See PostProcess for information on how to retrieve the unnormalized bounding box. + - "aux_outputs": Optional, only returned when auxilary losses are activated. It is a list of + dictionnaries containing the two above keys for each decoder layer. + """ + if not isinstance(samples, NestedTensor): + samples = nested_tensor_from_tensor_list(samples) + features, pos = self.backbone(samples) + + src, mask = features[-1].decompose() + assert mask is not None + hs = self.transformer(self.input_proj(src), mask, self.query_embed.weight, pos[-1])[0] + + outputs_class = self.class_embed(hs) + outputs_coord = self.bbox_embed(hs).sigmoid() + out = {'pred_logits': outputs_class[-1], 'pred_boxes': outputs_coord[-1]} + if self.aux_loss: + out['aux_outputs'] = self._set_aux_loss(outputs_class, outputs_coord) + return out + + @torch.jit.unused + def _set_aux_loss(self, outputs_class, outputs_coord): + # this is a workaround to make torchscript happy, as torchscript + # doesn't support dictionary with non-homogeneous values, such + # as a dict having both a Tensor and a list. + return [{'pred_logits': a, 'pred_boxes': b} + for a, b in zip(outputs_class[:-1], outputs_coord[:-1])] + + +class SetCriterion(nn.Module): + """ This class computes the loss for DETR. + The process happens in two steps: + 1) we compute hungarian assignment between ground truth boxes and the outputs of the model + 2) we supervise each pair of matched ground-truth / prediction (supervise class and box) + """ + def __init__(self, num_classes, matcher, weight_dict, eos_coef, losses): + """ Create the criterion. + Parameters: + num_classes: number of object categories, omitting the special no-object category + matcher: module able to compute a matching between targets and proposals + weight_dict: dict containing as key the names of the losses and as values their relative weight. + eos_coef: relative classification weight applied to the no-object category + losses: list of all the losses to be applied. See get_loss for list of available losses. + """ + super().__init__() + self.num_classes = num_classes + self.matcher = matcher + self.weight_dict = weight_dict + self.eos_coef = eos_coef + self.losses = losses + empty_weight = torch.ones(self.num_classes + 1) + empty_weight[-1] = self.eos_coef + self.register_buffer('empty_weight', empty_weight) + + def loss_labels(self, outputs, targets, indices, num_boxes, log=True): + """Classification loss (NLL) + targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes] + """ + assert 'pred_logits' in outputs + src_logits = outputs['pred_logits'] + + idx = self._get_src_permutation_idx(indices) + target_classes_o = torch.cat([t["labels"][J] for t, (_, J) in zip(targets, indices)]) + target_classes = torch.full(src_logits.shape[:2], self.num_classes, + dtype=torch.int64, device=src_logits.device) + target_classes[idx] = target_classes_o + + loss_ce = F.cross_entropy(src_logits.transpose(1, 2), target_classes, self.empty_weight) + losses = {'loss_ce': loss_ce} + + if log: + # TODO this should probably be a separate loss, not hacked in this one here + losses['class_error'] = 100 - accuracy(src_logits[idx], target_classes_o)[0] + return losses + + @torch.no_grad() + def loss_cardinality(self, outputs, targets, indices, num_boxes): + """ Compute the cardinality error, ie the absolute error in the number of predicted non-empty boxes + This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients + """ + pred_logits = outputs['pred_logits'] + device = pred_logits.device + tgt_lengths = torch.as_tensor([len(v["labels"]) for v in targets], device=device) + # Count the number of predictions that are NOT "no-object" (which is the last class) + card_pred = (pred_logits.argmax(-1) != pred_logits.shape[-1] - 1).sum(1) + card_err = F.l1_loss(card_pred.float(), tgt_lengths.float()) + losses = {'cardinality_error': card_err} + return losses + + def loss_boxes(self, outputs, targets, indices, num_boxes): + """Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss + targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4] + The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size. + """ + assert 'pred_boxes' in outputs + idx = self._get_src_permutation_idx(indices) + src_boxes = outputs['pred_boxes'][idx] + target_boxes = torch.cat([t['boxes'][i] for t, (_, i) in zip(targets, indices)], dim=0) + + loss_bbox = F.l1_loss(src_boxes, target_boxes, reduction='none') + + losses = {} + losses['loss_bbox'] = loss_bbox.sum() / num_boxes + + loss_giou = 1 - torch.diag(box_ops.generalized_box_iou( + box_ops.box_cxcywh_to_xyxy(src_boxes), + box_ops.box_cxcywh_to_xyxy(target_boxes))) + losses['loss_giou'] = loss_giou.sum() / num_boxes + return losses + + def loss_masks(self, outputs, targets, indices, num_boxes): + """Compute the losses related to the masks: the focal loss and the dice loss. + targets dicts must contain the key "masks" containing a tensor of dim [nb_target_boxes, h, w] + """ + assert "pred_masks" in outputs + + src_idx = self._get_src_permutation_idx(indices) + tgt_idx = self._get_tgt_permutation_idx(indices) + + src_masks = outputs["pred_masks"] + + # TODO use valid to mask invalid areas due to padding in loss + target_masks, valid = nested_tensor_from_tensor_list([t["masks"] for t in targets]).decompose() + target_masks = target_masks.to(src_masks) + + src_masks = src_masks[src_idx] + # upsample predictions to the target size + src_masks = interpolate(src_masks[:, None], size=target_masks.shape[-2:], + mode="bilinear", align_corners=False) + src_masks = src_masks[:, 0].flatten(1) + + target_masks = target_masks[tgt_idx].flatten(1) + + losses = { + "loss_mask": sigmoid_focal_loss(src_masks, target_masks, num_boxes), + "loss_dice": dice_loss(src_masks, target_masks, num_boxes), + } + return losses + + def _get_src_permutation_idx(self, indices): + # permute predictions following indices + batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)]) + src_idx = torch.cat([src for (src, _) in indices]) + return batch_idx, src_idx + + def _get_tgt_permutation_idx(self, indices): + # permute targets following indices + batch_idx = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)]) + tgt_idx = torch.cat([tgt for (_, tgt) in indices]) + return batch_idx, tgt_idx + + def get_loss(self, loss, outputs, targets, indices, num_boxes, **kwargs): + loss_map = { + 'labels': self.loss_labels, + 'cardinality': self.loss_cardinality, + 'boxes': self.loss_boxes, + 'masks': self.loss_masks + } + assert loss in loss_map, f'do you really want to compute {loss} loss?' + return loss_map[loss](outputs, targets, indices, num_boxes, **kwargs) + + def forward(self, outputs, targets): + """ This performs the loss computation. + Parameters: + outputs: dict of tensors, see the output specification of the model for the format + targets: list of dicts, such that len(targets) == batch_size. + The expected keys in each dict depends on the losses applied, see each loss' doc + """ + outputs_without_aux = {k: v for k, v in outputs.items() if k != 'aux_outputs'} + + # Retrieve the matching between the outputs of the last layer and the targets + indices = self.matcher(outputs_without_aux, targets) + + # Compute the average number of target boxes accross all nodes, for normalization purposes + num_boxes = sum(len(t["labels"]) for t in targets) + num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device) + if is_dist_avail_and_initialized(): + torch.distributed.all_reduce(num_boxes) + num_boxes = torch.clamp(num_boxes / get_world_size(), min=1).item() + + # Compute all the requested losses + losses = {} + for loss in self.losses: + losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes)) + + # In case of auxiliary losses, we repeat this process with the output of each intermediate layer. + if 'aux_outputs' in outputs: + for i, aux_outputs in enumerate(outputs['aux_outputs']): + indices = self.matcher(aux_outputs, targets) + for loss in self.losses: + if loss == 'masks': + # Intermediate masks losses are too costly to compute, we ignore them. + continue + kwargs = {} + if loss == 'labels': + # Logging is enabled only for the last layer + kwargs = {'log': False} + l_dict = self.get_loss(loss, aux_outputs, targets, indices, num_boxes, **kwargs) + l_dict = {k + f'_{i}': v for k, v in l_dict.items()} + losses.update(l_dict) + + return losses + + +class PostProcess(nn.Module): + """ This module converts the model's output into the format expected by the coco api""" + @torch.no_grad() + def forward(self, outputs, target_sizes): + """ Perform the computation + Parameters: + outputs: raw outputs of the model + target_sizes: tensor of dimension [batch_size x 2] containing the size of each images of the batch + For evaluation, this must be the original image size (before any data augmentation) + For visualization, this should be the image size after data augment, but before padding + """ + out_logits, out_bbox = outputs['pred_logits'], outputs['pred_boxes'] + + assert len(out_logits) == len(target_sizes) + assert target_sizes.shape[1] == 2 + + prob = F.softmax(out_logits, -1) + scores, labels = prob[..., :-1].max(-1) + + # convert to [x0, y0, x1, y1] format + boxes = box_ops.box_cxcywh_to_xyxy(out_bbox) + # and from relative [0, 1] to absolute [0, height] coordinates + img_h, img_w = target_sizes.unbind(1) + scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1) + boxes = boxes * scale_fct[:, None, :] + + results = [{'scores': s, 'labels': l, 'boxes': b} for s, l, b in zip(scores, labels, boxes)] + + return results + + +class MLP(nn.Module): + """ Very simple multi-layer perceptron (also called FFN)""" + + def __init__(self, input_dim, hidden_dim, output_dim, num_layers): + super().__init__() + self.num_layers = num_layers + h = [hidden_dim] * (num_layers - 1) + self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])) + + def forward(self, x): + for i, layer in enumerate(self.layers): + x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x) + return x + + +def build(args): + num_classes = 20 if args.dataset_file != 'coco' else 91 + if args.dataset_file == "coco_panoptic": + num_classes = 250 + device = torch.device(args.device) + + backbone = build_backbone(args) + + transformer = build_transformer(args) + + model = DETR( + backbone, + transformer, + num_classes=num_classes, + num_queries=args.num_queries, + aux_loss=args.aux_loss, + ) + if args.masks: + model = DETRsegm(model, freeze_detr=(args.frozen_weights is not None)) + matcher = build_matcher(args) + weight_dict = {'loss_ce': 1, 'loss_bbox': args.bbox_loss_coef} + weight_dict['loss_giou'] = args.giou_loss_coef + if args.masks: + weight_dict["loss_mask"] = args.mask_loss_coef + weight_dict["loss_dice"] = args.dice_loss_coef + # TODO this is a hack + if args.aux_loss: + aux_weight_dict = {} + for i in range(args.dec_layers - 1): + aux_weight_dict.update({k + f'_{i}': v for k, v in weight_dict.items()}) + weight_dict.update(aux_weight_dict) + + losses = ['labels', 'boxes', 'cardinality'] + if args.masks: + losses += ["masks"] + criterion = SetCriterion(num_classes, matcher=matcher, weight_dict=weight_dict, + eos_coef=args.eos_coef, losses=losses) + criterion.to(device) + postprocessors = {'bbox': PostProcess()} + if args.masks: + postprocessors['segm'] = PostProcessSegm() + if args.dataset_file == "coco_panoptic": + is_thing_map = {i: i <= 90 for i in range(201)} + postprocessors["panoptic"] = PostProcessPanoptic(is_thing_map, threshold=0.85) + + return model, criterion, postprocessors diff --git a/transformer_models/matcher.py b/transformer_models/matcher.py new file mode 100644 index 00000000..0c291473 --- /dev/null +++ b/transformer_models/matcher.py @@ -0,0 +1,86 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +""" +Modules to compute the matching cost and solve the corresponding LSAP. +""" +import torch +from scipy.optimize import linear_sum_assignment +from torch import nn + +from util.box_ops import box_cxcywh_to_xyxy, generalized_box_iou + + +class HungarianMatcher(nn.Module): + """This class computes an assignment between the targets and the predictions of the network + + For efficiency reasons, the targets don't include the no_object. Because of this, in general, + there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions, + while the others are un-matched (and thus treated as non-objects). + """ + + def __init__(self, cost_class: float = 1, cost_bbox: float = 1, cost_giou: float = 1): + """Creates the matcher + + Params: + cost_class: This is the relative weight of the classification error in the matching cost + cost_bbox: This is the relative weight of the L1 error of the bounding box coordinates in the matching cost + cost_giou: This is the relative weight of the giou loss of the bounding box in the matching cost + """ + super().__init__() + self.cost_class = cost_class + self.cost_bbox = cost_bbox + self.cost_giou = cost_giou + assert cost_class != 0 or cost_bbox != 0 or cost_giou != 0, "all costs cant be 0" + + @torch.no_grad() + def forward(self, outputs, targets): + """ Performs the matching + + Params: + outputs: This is a dict that contains at least these entries: + "pred_logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits + "pred_boxes": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates + + targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing: + "labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth + objects in the target) containing the class labels + "boxes": Tensor of dim [num_target_boxes, 4] containing the target box coordinates + + Returns: + A list of size batch_size, containing tuples of (index_i, index_j) where: + - index_i is the indices of the selected predictions (in order) + - index_j is the indices of the corresponding selected targets (in order) + For each batch element, it holds: + len(index_i) = len(index_j) = min(num_queries, num_target_boxes) + """ + bs, num_queries = outputs["pred_logits"].shape[:2] + + # We flatten to compute the cost matrices in a batch + out_prob = outputs["pred_logits"].flatten(0, 1).softmax(-1) # [batch_size * num_queries, num_classes] + out_bbox = outputs["pred_boxes"].flatten(0, 1) # [batch_size * num_queries, 4] + + # Also concat the target labels and boxes + tgt_ids = torch.cat([v["labels"] for v in targets]) + tgt_bbox = torch.cat([v["boxes"] for v in targets]) + + # Compute the classification cost. Contrary to the loss, we don't use the NLL, + # but approximate it in 1 - proba[target class]. + # The 1 is a constant that doesn't change the matching, it can be ommitted. + cost_class = -out_prob[:, tgt_ids] + + # Compute the L1 cost between boxes + cost_bbox = torch.cdist(out_bbox, tgt_bbox, p=1) + + # Compute the giou cost betwen boxes + cost_giou = -generalized_box_iou(box_cxcywh_to_xyxy(out_bbox), box_cxcywh_to_xyxy(tgt_bbox)) + + # Final cost matrix + C = self.cost_bbox * cost_bbox + self.cost_class * cost_class + self.cost_giou * cost_giou + C = C.view(bs, num_queries, -1).cpu() + + sizes = [len(v["boxes"]) for v in targets] + indices = [linear_sum_assignment(c[i]) for i, c in enumerate(C.split(sizes, -1))] + return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices] + + +def build_matcher(args): + return HungarianMatcher(cost_class=args.set_cost_class, cost_bbox=args.set_cost_bbox, cost_giou=args.set_cost_giou) diff --git a/transformer_models/position_encoding.py b/transformer_models/position_encoding.py new file mode 100644 index 00000000..3ba13535 --- /dev/null +++ b/transformer_models/position_encoding.py @@ -0,0 +1,120 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +""" +Various positional encodings for the transformer. +""" +import math +import torch +from torch import nn + +from util.misc import NestedTensor + + +class PositionEmbeddingSine(nn.Module): + """ + This is a more standard version of the position embedding, very similar to the one + used by the Attention is all you need paper, generalized to work on images. + """ + def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None): + super().__init__() + self.num_pos_feats = num_pos_feats + self.temperature = temperature + self.normalize = normalize + if scale is not None and normalize is False: + raise ValueError("normalize should be True if scale is passed") + if scale is None: + scale = 2 * math.pi + self.scale = scale + + def forward(self, tensor_list: NestedTensor): + x = tensor_list.tensors + mask = tensor_list.mask + assert mask is not None + not_mask = ~mask + y_embed = not_mask.cumsum(1, dtype=torch.float32) + x_embed = not_mask.cumsum(2, dtype=torch.float32) + if self.normalize: + eps = 1e-6 + y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale + x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale + + dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device) + dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats) + + pos_x = x_embed[:, :, :, None] / dim_t + pos_y = y_embed[:, :, :, None] / dim_t + pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3) + pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3) + pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) + return pos + + +class PositionEmbeddingLearned(nn.Module): + """ + Absolute pos embedding, learned. + """ + def __init__(self, num_pos_feats=256): + super().__init__() + self.row_embed = nn.Embedding(50, num_pos_feats) + self.col_embed = nn.Embedding(50, num_pos_feats) + self.reset_parameters() + + def reset_parameters(self): + nn.init.uniform_(self.row_embed.weight) + nn.init.uniform_(self.col_embed.weight) + + def forward(self, tensor_list: NestedTensor): + x = tensor_list.tensors + h, w = x.shape[-2:] + i = torch.arange(w, device=x.device) + j = torch.arange(h, device=x.device) + x_emb = self.col_embed(i) + y_emb = self.row_embed(j) + pos = torch.cat([ + x_emb.unsqueeze(0).repeat(h, 1, 1), + y_emb.unsqueeze(1).repeat(1, w, 1), + ], dim=-1).permute(2, 0, 1).unsqueeze(0).repeat(x.shape[0], 1, 1, 1) #BxCxWxH + return pos + +class PositionEmbeddingLearnedECoG(nn.Module): + """ + Absolute pos embedding, learned. + """ + def __init__(self, SeqLength, num_pos_feats=256): + super().__init__() + self.SeqLength = SeqLength + self.time_embed = nn.Embedding(SeqLength, num_pos_feats) + # self.elec_embed = nn.Embedding(50, num_pos_feats) + self.elec_embed = nn.Linear(3,num_pos_feats) + self.reset_parameters() + + def reset_parameters(self): + nn.init.uniform_(self.time_embed.weight) + # nn.init.uniform_(self.elec_embed.weight) + + def forward(self, x): + # x: MNI Bx3xE + e = x.shape[-1] + t = self.SeqLength + j = torch.arange(t, device=x.device) + x = x.permute(0,2,1) # BxEx3 + elec_emb = self.elec_embed(x) #BxExnum_pos_feats + time_emb = self.time_embed(j) #Txnum_pos_feats + pos = torch.cat([ + elec_emb.unsqueeze(1).repeat(1, t, 1, 1), + time_emb.unsqueeze(1).repeat(1, e, 1).unsqueeze(0).repeat(x.shape[0], 1, 1, 1), + ], dim=-1).permute(0, 3, 1, 2)# BxCxTxE + return pos + +def build_position_encoding(SeqLength,hidden_dim,method='MNI'): + N_steps = hidden_dim // 2 + if method in ('v2', 'sine'): + # TODO find a better way of exposing other arguments + position_embedding = PositionEmbeddingSine(N_steps, normalize=True) + elif method in ('v3', 'learned'): + position_embedding = PositionEmbeddingLearned(N_steps) + elif method in ('MNI'): + position_embedding = PositionEmbeddingLearnedECoG(SeqLength,N_steps) + else: + raise ValueError(f"not supported {method}") + + return position_embedding diff --git a/transformer_models/segmentation.py b/transformer_models/segmentation.py new file mode 100644 index 00000000..edfc32ef --- /dev/null +++ b/transformer_models/segmentation.py @@ -0,0 +1,363 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +""" +This file provides the definition of the convolutional heads used to predict masks, as well as the losses +""" +import io +from collections import defaultdict +from typing import List, Optional + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch import Tensor +from PIL import Image + +import util.box_ops as box_ops +from util.misc import NestedTensor, interpolate, nested_tensor_from_tensor_list + +try: + from panopticapi.utils import id2rgb, rgb2id +except ImportError: + pass + + +class DETRsegm(nn.Module): + def __init__(self, detr, freeze_detr=False): + super().__init__() + self.detr = detr + + if freeze_detr: + for p in self.parameters(): + p.requires_grad_(False) + + hidden_dim, nheads = detr.transformer.d_model, detr.transformer.nhead + self.bbox_attention = MHAttentionMap(hidden_dim, hidden_dim, nheads, dropout=0.0) + self.mask_head = MaskHeadSmallConv(hidden_dim + nheads, [1024, 512, 256], hidden_dim) + + def forward(self, samples: NestedTensor): + if not isinstance(samples, NestedTensor): + samples = nested_tensor_from_tensor_list(samples) + features, pos = self.detr.backbone(samples) + + bs = features[-1].tensors.shape[0] + + src, mask = features[-1].decompose() + assert mask is not None + src_proj = self.detr.input_proj(src) + hs, memory = self.detr.transformer(src_proj, mask, self.detr.query_embed.weight, pos[-1]) + + outputs_class = self.detr.class_embed(hs) + outputs_coord = self.detr.bbox_embed(hs).sigmoid() + out = {"pred_logits": outputs_class[-1], "pred_boxes": outputs_coord[-1]} + if self.detr.aux_loss: + out['aux_outputs'] = self.detr._set_aux_loss(outputs_class, outputs_coord) + + # FIXME h_boxes takes the last one computed, keep this in mind + bbox_mask = self.bbox_attention(hs[-1], memory, mask=mask) + + seg_masks = self.mask_head(src_proj, bbox_mask, [features[2].tensors, features[1].tensors, features[0].tensors]) + outputs_seg_masks = seg_masks.view(bs, self.detr.num_queries, seg_masks.shape[-2], seg_masks.shape[-1]) + + out["pred_masks"] = outputs_seg_masks + return out + + +def _expand(tensor, length: int): + return tensor.unsqueeze(1).repeat(1, int(length), 1, 1, 1).flatten(0, 1) + + +class MaskHeadSmallConv(nn.Module): + """ + Simple convolutional head, using group norm. + Upsampling is done using a FPN approach + """ + + def __init__(self, dim, fpn_dims, context_dim): + super().__init__() + + inter_dims = [dim, context_dim // 2, context_dim // 4, context_dim // 8, context_dim // 16, context_dim // 64] + self.lay1 = torch.nn.Conv2d(dim, dim, 3, padding=1) + self.gn1 = torch.nn.GroupNorm(8, dim) + self.lay2 = torch.nn.Conv2d(dim, inter_dims[1], 3, padding=1) + self.gn2 = torch.nn.GroupNorm(8, inter_dims[1]) + self.lay3 = torch.nn.Conv2d(inter_dims[1], inter_dims[2], 3, padding=1) + self.gn3 = torch.nn.GroupNorm(8, inter_dims[2]) + self.lay4 = torch.nn.Conv2d(inter_dims[2], inter_dims[3], 3, padding=1) + self.gn4 = torch.nn.GroupNorm(8, inter_dims[3]) + self.lay5 = torch.nn.Conv2d(inter_dims[3], inter_dims[4], 3, padding=1) + self.gn5 = torch.nn.GroupNorm(8, inter_dims[4]) + self.out_lay = torch.nn.Conv2d(inter_dims[4], 1, 3, padding=1) + + self.dim = dim + + self.adapter1 = torch.nn.Conv2d(fpn_dims[0], inter_dims[1], 1) + self.adapter2 = torch.nn.Conv2d(fpn_dims[1], inter_dims[2], 1) + self.adapter3 = torch.nn.Conv2d(fpn_dims[2], inter_dims[3], 1) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_uniform_(m.weight, a=1) + nn.init.constant_(m.bias, 0) + + def forward(self, x: Tensor, bbox_mask: Tensor, fpns: List[Tensor]): + x = torch.cat([_expand(x, bbox_mask.shape[1]), bbox_mask.flatten(0, 1)], 1) + + x = self.lay1(x) + x = self.gn1(x) + x = F.relu(x) + x = self.lay2(x) + x = self.gn2(x) + x = F.relu(x) + + cur_fpn = self.adapter1(fpns[0]) + if cur_fpn.size(0) != x.size(0): + cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0)) + x = cur_fpn + F.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest") + x = self.lay3(x) + x = self.gn3(x) + x = F.relu(x) + + cur_fpn = self.adapter2(fpns[1]) + if cur_fpn.size(0) != x.size(0): + cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0)) + x = cur_fpn + F.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest") + x = self.lay4(x) + x = self.gn4(x) + x = F.relu(x) + + cur_fpn = self.adapter3(fpns[2]) + if cur_fpn.size(0) != x.size(0): + cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0)) + x = cur_fpn + F.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest") + x = self.lay5(x) + x = self.gn5(x) + x = F.relu(x) + + x = self.out_lay(x) + return x + + +class MHAttentionMap(nn.Module): + """This is a 2D attention module, which only returns the attention softmax (no multiplication by value)""" + + def __init__(self, query_dim, hidden_dim, num_heads, dropout=0.0, bias=True): + super().__init__() + self.num_heads = num_heads + self.hidden_dim = hidden_dim + self.dropout = nn.Dropout(dropout) + + self.q_linear = nn.Linear(query_dim, hidden_dim, bias=bias) + self.k_linear = nn.Linear(query_dim, hidden_dim, bias=bias) + + nn.init.zeros_(self.k_linear.bias) + nn.init.zeros_(self.q_linear.bias) + nn.init.xavier_uniform_(self.k_linear.weight) + nn.init.xavier_uniform_(self.q_linear.weight) + self.normalize_fact = float(hidden_dim / self.num_heads) ** -0.5 + + def forward(self, q, k, mask: Optional[Tensor] = None): + q = self.q_linear(q) + k = F.conv2d(k, self.k_linear.weight.unsqueeze(-1).unsqueeze(-1), self.k_linear.bias) + qh = q.view(q.shape[0], q.shape[1], self.num_heads, self.hidden_dim // self.num_heads) + kh = k.view(k.shape[0], self.num_heads, self.hidden_dim // self.num_heads, k.shape[-2], k.shape[-1]) + weights = torch.einsum("bqnc,bnchw->bqnhw", qh * self.normalize_fact, kh) + + if mask is not None: + weights.masked_fill_(mask.unsqueeze(1).unsqueeze(1), float("-inf")) + weights = F.softmax(weights.flatten(2), dim=-1).view_as(weights) + weights = self.dropout(weights) + return weights + + +def dice_loss(inputs, targets, num_boxes): + """ + Compute the DICE loss, similar to generalized IOU for masks + Args: + inputs: A float tensor of arbitrary shape. + The predictions for each example. + targets: A float tensor with the same shape as inputs. Stores the binary + classification label for each element in inputs + (0 for the negative class and 1 for the positive class). + """ + inputs = inputs.sigmoid() + inputs = inputs.flatten(1) + numerator = 2 * (inputs * targets).sum(1) + denominator = inputs.sum(-1) + targets.sum(-1) + loss = 1 - (numerator + 1) / (denominator + 1) + return loss.sum() / num_boxes + + +def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2): + """ + Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002. + Args: + inputs: A float tensor of arbitrary shape. + The predictions for each example. + targets: A float tensor with the same shape as inputs. Stores the binary + classification label for each element in inputs + (0 for the negative class and 1 for the positive class). + alpha: (optional) Weighting factor in range (0,1) to balance + positive vs negative examples. Default = -1 (no weighting). + gamma: Exponent of the modulating factor (1 - p_t) to + balance easy vs hard examples. + Returns: + Loss tensor + """ + prob = inputs.sigmoid() + ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none") + p_t = prob * targets + (1 - prob) * (1 - targets) + loss = ce_loss * ((1 - p_t) ** gamma) + + if alpha >= 0: + alpha_t = alpha * targets + (1 - alpha) * (1 - targets) + loss = alpha_t * loss + + return loss.mean(1).sum() / num_boxes + + +class PostProcessSegm(nn.Module): + def __init__(self, threshold=0.5): + super().__init__() + self.threshold = threshold + + @torch.no_grad() + def forward(self, results, outputs, orig_target_sizes, max_target_sizes): + assert len(orig_target_sizes) == len(max_target_sizes) + max_h, max_w = max_target_sizes.max(0)[0].tolist() + outputs_masks = outputs["pred_masks"].squeeze(2) + outputs_masks = F.interpolate(outputs_masks, size=(max_h, max_w), mode="bilinear", align_corners=False) + outputs_masks = (outputs_masks.sigmoid() > self.threshold).cpu() + + for i, (cur_mask, t, tt) in enumerate(zip(outputs_masks, max_target_sizes, orig_target_sizes)): + img_h, img_w = t[0], t[1] + results[i]["masks"] = cur_mask[:, :img_h, :img_w].unsqueeze(1) + results[i]["masks"] = F.interpolate( + results[i]["masks"].float(), size=tuple(tt.tolist()), mode="nearest" + ).byte() + + return results + + +class PostProcessPanoptic(nn.Module): + """This class converts the output of the model to the final panoptic result, in the format expected by the + coco panoptic API """ + + def __init__(self, is_thing_map, threshold=0.85): + """ + Parameters: + is_thing_map: This is a whose keys are the class ids, and the values a boolean indicating whether + the class is a thing (True) or a stuff (False) class + threshold: confidence threshold: segments with confidence lower than this will be deleted + """ + super().__init__() + self.threshold = threshold + self.is_thing_map = is_thing_map + + def forward(self, outputs, processed_sizes, target_sizes=None): + """ This function computes the panoptic prediction from the model's predictions. + Parameters: + outputs: This is a dict coming directly from the model. See the model doc for the content. + processed_sizes: This is a list of tuples (or torch tensors) of sizes of the images that were passed to the + model, ie the size after data augmentation but before batching. + target_sizes: This is a list of tuples (or torch tensors) corresponding to the requested final size + of each prediction. If left to None, it will default to the processed_sizes + """ + if target_sizes is None: + target_sizes = processed_sizes + assert len(processed_sizes) == len(target_sizes) + out_logits, raw_masks, raw_boxes = outputs["pred_logits"], outputs["pred_masks"], outputs["pred_boxes"] + assert len(out_logits) == len(raw_masks) == len(target_sizes) + preds = [] + + def to_tuple(tup): + if isinstance(tup, tuple): + return tup + return tuple(tup.cpu().tolist()) + + for cur_logits, cur_masks, cur_boxes, size, target_size in zip( + out_logits, raw_masks, raw_boxes, processed_sizes, target_sizes + ): + # we filter empty queries and detection below threshold + scores, labels = cur_logits.softmax(-1).max(-1) + keep = labels.ne(outputs["pred_logits"].shape[-1] - 1) & (scores > self.threshold) + cur_scores, cur_classes = cur_logits.softmax(-1).max(-1) + cur_scores = cur_scores[keep] + cur_classes = cur_classes[keep] + cur_masks = cur_masks[keep] + cur_masks = interpolate(cur_masks[None], to_tuple(size), mode="bilinear").squeeze(0) + cur_boxes = box_ops.box_cxcywh_to_xyxy(cur_boxes[keep]) + + h, w = cur_masks.shape[-2:] + assert len(cur_boxes) == len(cur_classes) + + # It may be that we have several predicted masks for the same stuff class. + # In the following, we track the list of masks ids for each stuff class (they are merged later on) + cur_masks = cur_masks.flatten(1) + stuff_equiv_classes = defaultdict(lambda: []) + for k, label in enumerate(cur_classes): + if not self.is_thing_map[label.item()]: + stuff_equiv_classes[label.item()].append(k) + + def get_ids_area(masks, scores, dedup=False): + # This helper function creates the final panoptic segmentation image + # It also returns the area of the masks that appears on the image + + m_id = masks.transpose(0, 1).softmax(-1) + + if m_id.shape[-1] == 0: + # We didn't detect any mask :( + m_id = torch.zeros((h, w), dtype=torch.long, device=m_id.device) + else: + m_id = m_id.argmax(-1).view(h, w) + + if dedup: + # Merge the masks corresponding to the same stuff class + for equiv in stuff_equiv_classes.values(): + if len(equiv) > 1: + for eq_id in equiv: + m_id.masked_fill_(m_id.eq(eq_id), equiv[0]) + + final_h, final_w = to_tuple(target_size) + + seg_img = Image.fromarray(id2rgb(m_id.view(h, w).cpu().numpy())) + seg_img = seg_img.resize(size=(final_w, final_h), resample=Image.NEAREST) + + np_seg_img = ( + torch.ByteTensor(torch.ByteStorage.from_buffer(seg_img.tobytes())).view(final_h, final_w, 3).numpy() + ) + m_id = torch.from_numpy(rgb2id(np_seg_img)) + + area = [] + for i in range(len(scores)): + area.append(m_id.eq(i).sum().item()) + return area, seg_img + + area, seg_img = get_ids_area(cur_masks, cur_scores, dedup=True) + if cur_classes.numel() > 0: + # We know filter empty masks as long as we find some + while True: + filtered_small = torch.as_tensor( + [area[i] <= 4 for i, c in enumerate(cur_classes)], dtype=torch.bool, device=keep.device + ) + if filtered_small.any().item(): + cur_scores = cur_scores[~filtered_small] + cur_classes = cur_classes[~filtered_small] + cur_masks = cur_masks[~filtered_small] + area, seg_img = get_ids_area(cur_masks, cur_scores) + else: + break + + else: + cur_classes = torch.ones(1, dtype=torch.long, device=cur_classes.device) + + segments_info = [] + for i, a in enumerate(area): + cat = cur_classes[i].item() + segments_info.append({"id": i, "isthing": self.is_thing_map[cat], "category_id": cat, "area": a}) + del cur_classes + + with io.BytesIO() as out: + seg_img.save(out, format="PNG") + predictions = {"png_string": out.getvalue(), "segments_info": segments_info} + preds.append(predictions) + return preds diff --git a/transformer_models/transformer.py b/transformer_models/transformer.py new file mode 100644 index 00000000..611e4877 --- /dev/null +++ b/transformer_models/transformer.py @@ -0,0 +1,302 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +""" +DETR Transformer class. + +Copy-paste from torch.nn.Transformer with modifications: + * positional encodings are passed in MHattention + * extra LN at the end of encoder is removed + * decoder returns a stack of activations from all decoding layers +""" +import copy +from typing import Optional, List + +import torch +import torch.nn.functional as F +from torch import nn, Tensor + +class Transformer(nn.Module): + def __init__(self, d_model=512, nhead=8, num_encoder_layers=6, + num_decoder_layers=6, dim_feedforward=2048, dropout=0.1, + activation="relu", normalize_before=False, + return_intermediate_dec=False,encoder_only = False): + super().__init__() + self.encoder_only = encoder_only + encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward, + dropout, activation, normalize_before) + encoder_norm = nn.LayerNorm(d_model) if normalize_before else None + self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm) + + if not encoder_only: + decoder_layer = TransformerDecoderLayer(d_model, nhead, dim_feedforward, + dropout, activation, normalize_before) + decoder_norm = nn.LayerNorm(d_model) + self.decoder = TransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm, + return_intermediate=return_intermediate_dec) + + self._reset_parameters() + + self.d_model = d_model + self.nhead = nhead + + def _reset_parameters(self): + for p in self.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + + def forward(self, src, mask, query_embed, pos_embed): + # flatten NxCxHxW to HWxNxC, mask = NxHxW + bs, c, h, w = src.shape + src = src.flatten(2).permute(2, 0, 1) + pos_embed = pos_embed.flatten(2).permute(2, 0, 1) + if mask is not None: + mask = mask.flatten(1) + try: + memory = self.encoder(src, src_key_padding_mask=mask, pos=pos_embed) + except: + import pdb;pdb.set_trace() + if not self.encoder_only: + query_embed = query_embed.unsqueeze(1).repeat(1, bs, 1) + tgt = torch.zeros_like(query_embed) + hs = self.decoder(tgt, memory, memory_key_padding_mask=mask, + pos=pos_embed, query_pos=query_embed) + return hs.permute(1, 2, 0), memory.permute(1, 2, 0).view(bs, c, h, w) + else: + return None,memory.permute(1, 2, 0).view(bs, c, h, w) + + +class TransformerEncoder(nn.Module): + + def __init__(self, encoder_layer, num_layers, norm=None): + super().__init__() + self.layers = _get_clones(encoder_layer, num_layers) + self.num_layers = num_layers + self.norm = norm + + def forward(self, src, + mask: Optional[Tensor] = None, + src_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None): + output = src + + for layer in self.layers: + output = layer(output, src_mask=mask, + src_key_padding_mask=src_key_padding_mask, pos=pos) + + if self.norm is not None: + output = self.norm(output) + + return output + + +class TransformerDecoder(nn.Module): + + def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False): + super().__init__() + self.layers = _get_clones(decoder_layer, num_layers) + self.num_layers = num_layers + self.norm = norm + self.return_intermediate = return_intermediate + + def forward(self, tgt, memory, + tgt_mask: Optional[Tensor] = None, + memory_mask: Optional[Tensor] = None, + tgt_key_padding_mask: Optional[Tensor] = None, + memory_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, + query_pos: Optional[Tensor] = None): + output = tgt + + intermediate = [] + + for layer in self.layers: + output = layer(output, memory, tgt_mask=tgt_mask, + memory_mask=memory_mask, + tgt_key_padding_mask=tgt_key_padding_mask, + memory_key_padding_mask=memory_key_padding_mask, + pos=pos, query_pos=query_pos) + if self.return_intermediate: + intermediate.append(self.norm(output)) + + if self.norm is not None: + output = self.norm(output) + if self.return_intermediate: + intermediate.pop() + intermediate.append(output) + + if self.return_intermediate: + return torch.stack(intermediate) + + return output + + +class TransformerEncoderLayer(nn.Module): + + def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, + activation="relu", normalize_before=False): + super().__init__() + self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) + # Implementation of Feedforward model + self.linear1 = nn.Linear(d_model, dim_feedforward) + self.dropout = nn.Dropout(dropout) + self.linear2 = nn.Linear(dim_feedforward, d_model) + + self.norm1 = nn.LayerNorm(d_model) + self.norm2 = nn.LayerNorm(d_model) + self.dropout1 = nn.Dropout(dropout) + self.dropout2 = nn.Dropout(dropout) + + self.activation = _get_activation_fn(activation) + self.normalize_before = normalize_before + + def with_pos_embed(self, tensor, pos: Optional[Tensor]): + return tensor if pos is None else tensor + pos + + def forward_post(self, + src, + src_mask: Optional[Tensor] = None, + src_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None): + q = k = self.with_pos_embed(src, pos) + src2 = self.self_attn(q, k, value=src, attn_mask=src_mask, + key_padding_mask=src_key_padding_mask)[0] + src = src + self.dropout1(src2) + src = self.norm1(src) + src2 = self.linear2(self.dropout(self.activation(self.linear1(src)))) + src = src + self.dropout2(src2) + src = self.norm2(src) + return src + + def forward_pre(self, src, + src_mask: Optional[Tensor] = None, + src_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None): + src2 = self.norm1(src) + q = k = self.with_pos_embed(src2, pos) + src2 = self.self_attn(q, k, value=src2, attn_mask=src_mask, + key_padding_mask=src_key_padding_mask)[0] + src = src + self.dropout1(src2) + src2 = self.norm2(src) + src2 = self.linear2(self.dropout(self.activation(self.linear1(src2)))) + src = src + self.dropout2(src2) + return src + + def forward(self, src, + src_mask: Optional[Tensor] = None, + src_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None): + if self.normalize_before: + return self.forward_pre(src, src_mask, src_key_padding_mask, pos) + return self.forward_post(src, src_mask, src_key_padding_mask, pos) + + +class TransformerDecoderLayer(nn.Module): + + def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, + activation="relu", normalize_before=False): + super().__init__() + self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) + self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) + # Implementation of Feedforward model + self.linear1 = nn.Linear(d_model, dim_feedforward) + self.dropout = nn.Dropout(dropout) + self.linear2 = nn.Linear(dim_feedforward, d_model) + + self.norm1 = nn.LayerNorm(d_model) + self.norm2 = nn.LayerNorm(d_model) + self.norm3 = nn.LayerNorm(d_model) + self.dropout1 = nn.Dropout(dropout) + self.dropout2 = nn.Dropout(dropout) + self.dropout3 = nn.Dropout(dropout) + + self.activation = _get_activation_fn(activation) + self.normalize_before = normalize_before + + def with_pos_embed(self, tensor, pos: Optional[Tensor]): + return tensor if pos is None else tensor + pos + + def forward_post(self, tgt, memory, + tgt_mask: Optional[Tensor] = None, + memory_mask: Optional[Tensor] = None, + tgt_key_padding_mask: Optional[Tensor] = None, + memory_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, + query_pos: Optional[Tensor] = None): + q = k = self.with_pos_embed(tgt, query_pos) + tgt2 = self.self_attn(q, k, value=tgt, attn_mask=tgt_mask, + key_padding_mask=tgt_key_padding_mask)[0] + tgt = tgt + self.dropout1(tgt2) + tgt = self.norm1(tgt) + tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt, query_pos), + key=self.with_pos_embed(memory, pos), + value=memory, attn_mask=memory_mask, + key_padding_mask=memory_key_padding_mask)[0] + tgt = tgt + self.dropout2(tgt2) + tgt = self.norm2(tgt) + tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt)))) + tgt = tgt + self.dropout3(tgt2) + tgt = self.norm3(tgt) + return tgt + + def forward_pre(self, tgt, memory, + tgt_mask: Optional[Tensor] = None, + memory_mask: Optional[Tensor] = None, + tgt_key_padding_mask: Optional[Tensor] = None, + memory_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, + query_pos: Optional[Tensor] = None): + tgt2 = self.norm1(tgt) + q = k = self.with_pos_embed(tgt2, query_pos) + tgt2 = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask, + key_padding_mask=tgt_key_padding_mask)[0] + tgt = tgt + self.dropout1(tgt2) + tgt2 = self.norm2(tgt) + tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos), + key=self.with_pos_embed(memory, pos), + value=memory, attn_mask=memory_mask, + key_padding_mask=memory_key_padding_mask)[0] + tgt = tgt + self.dropout2(tgt2) + tgt2 = self.norm3(tgt) + tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2)))) + tgt = tgt + self.dropout3(tgt2) + return tgt + + def forward(self, tgt, memory, + tgt_mask: Optional[Tensor] = None, + memory_mask: Optional[Tensor] = None, + tgt_key_padding_mask: Optional[Tensor] = None, + memory_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, + query_pos: Optional[Tensor] = None): + if self.normalize_before: + return self.forward_pre(tgt, memory, tgt_mask, memory_mask, + tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos) + return self.forward_post(tgt, memory, tgt_mask, memory_mask, + tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos) + + +def _get_clones(module, N): + return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) + + +def build_transformer(args): + return Transformer( + d_model=args.hidden_dim, + dropout=args.dropout, + nhead=args.nheads, + dim_feedforward=args.dim_feedforward, + num_encoder_layers=args.enc_layers, + num_decoder_layers=args.dec_layers, + normalize_before=args.pre_norm, + return_intermediate_dec=True, + ) + + +def _get_activation_fn(activation): + """Return an activation function given a string""" + if activation == "relu": + return F.relu + if activation == "gelu": + return F.gelu + if activation == "glu": + return F.glu + raise RuntimeError(F"activation should be relu/gelu, not {activation}.") diff --git a/transformer_models/transformer_nonlocal.py b/transformer_models/transformer_nonlocal.py new file mode 100644 index 00000000..d34b87e7 --- /dev/null +++ b/transformer_models/transformer_nonlocal.py @@ -0,0 +1,342 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +""" +DETR Transformer class. + +Copy-paste from torch.nn.Transformer with modifications: + * positional encodings are passed in MHattention + * extra LN at the end of encoder is removed + * decoder returns a stack of activations from all decoding layers +""" +import copy +from typing import Optional, List + +import torch +import torch.nn.functional as F +from torch import nn, Tensor +import lreq as ln + + +class NonlocalAttention(nn.Module): + def __init__(self, d_model, nhead, dropout, att_denorm=2): + super(NonlocalAttention, self).__init__() + # Channel multiplier + self.d_model = d_model + self.heads = nhead + self.dropout = dropout + self.att_denorm = att_denorm + self.theta = ln.Conv2d(d_model, d_model // self.att_denorm, 1,1,0, bias=True) #query + self.phi = ln.Conv2d(d_model, d_model // self.att_denorm, 1,1,0, bias=True) #key + self.g = ln.Conv2d(d_model, d_model, 1,1,0, bias=True) #value + self.drop = nn.Dropout(dropout) + + # # Learnable gain parameter + # self.gamma = P(torch.tensor(0.), requires_grad=True) + + def forward(self, query, key, value, attn_mask=None, key_padding_mask=None): + # Apply convs + query = query.contiguous() + key = key.contiguous() + value = value.contiguous() + theta = self.theta(query) + phi = F.max_pool2d(self.phi(key), [2,1]) + g = F.max_pool2d(self.g(value), [2,1]) + # Perform reshapes + self.theta_ = theta.reshape(-1, self.d_model // self.att_denorm//self.heads, self.heads ,query.shape[2] * query.shape[3]) + self.phi_ = phi.reshape(-1, self.d_model // self.att_denorm//self.heads, self.heads, key.shape[2] * key.shape[3] // 2) + g = g.reshape(-1, self.d_model//self.heads, self.heads, value.shape[2] * value.shape[3] // 2) + # Matmul and softmax to get attention maps + self.beta = F.softmax(torch.einsum('bchi,bchj->bhij',self.theta_, self.phi_), -1) + self.beta = self.drop(self.beta) + # self.beta = F.softmax(torch.bmm(self.theta_, self.phi_), -1) + # Attention map times g path + o = torch.einsum('bchj,bhij->bchi',g, self.beta).reshape(-1, self.d_model, query.shape[2], query.shape[3]) + # o = self.o(torch.bmm(g, self.beta.transpose(1,2)).view(-1, self.inputs // 2, x.shape[2], x.shape[3])) + return o, self.beta + +class Transformer(nn.Module): + def __init__(self, d_model=512, nhead=8, num_encoder_layers=6, + num_decoder_layers=6, dim_feedforward=2048, dropout=0.1, + activation="relu", normalize_before=False, + return_intermediate_dec=False,encoder_only = False): + super().__init__() + self.encoder_only = encoder_only + encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward, + dropout, activation, normalize_before) + encoder_norm = nn.LayerNorm([d_model,128,80]) if normalize_before else None + self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm) + + if not encoder_only: + decoder_layer = TransformerDecoderLayer(d_model, nhead, dim_feedforward, + dropout, activation, normalize_before) + decoder_norm = nn.LayerNorm([d_model,128,1]) + self.decoder = TransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm, + return_intermediate=return_intermediate_dec) + + self._reset_parameters() + + self.d_model = d_model + self.nhead = nhead + + def _reset_parameters(self): + for p in self.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + + def forward(self, src, mask, query_embed, pos_embed): + # flatten NxCxHxW to HWxNxC, mask = NxHxW, query_embed: LxC + bs, c, h, w = src.shape + # src = src.flatten(2).permute(2, 0, 1) + # pos_embed = pos_embed.flatten(2).permute(2, 0, 1) + if mask is not None: + mask = mask.flatten(1) + memory = self.encoder(src, src_key_padding_mask=mask, pos=pos_embed) + if not self.encoder_only: + query_embed = query_embed.unsqueeze(0).repeat(bs, 1, 1) + query_embed = query_embed.permute(0,2,1).unsqueeze(-1) #BxCxLx1 + tgt = torch.zeros_like(query_embed) + hs = self.decoder(tgt, memory, memory_key_padding_mask=mask, + pos=pos_embed, query_pos=query_embed) + hs = hs.squeeze(-1) + return hs, memory + else: + return None,memory + + +class TransformerEncoder(nn.Module): + + def __init__(self, encoder_layer, num_layers, norm=None): + super().__init__() + self.layers = _get_clones(encoder_layer, num_layers) + self.num_layers = num_layers + self.norm = norm + + def forward(self, src, + mask: Optional[Tensor] = None, + src_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None): + output = src + + for layer in self.layers: + output = layer(output, src_mask=mask, + src_key_padding_mask=src_key_padding_mask, pos=pos) + + if self.norm is not None: + output = self.norm(output) + + return output + + +class TransformerDecoder(nn.Module): + + def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False): + super().__init__() + self.layers = _get_clones(decoder_layer, num_layers) + self.num_layers = num_layers + self.norm = norm + self.return_intermediate = return_intermediate + + def forward(self, tgt, memory, + tgt_mask: Optional[Tensor] = None, + memory_mask: Optional[Tensor] = None, + tgt_key_padding_mask: Optional[Tensor] = None, + memory_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, + query_pos: Optional[Tensor] = None): + output = tgt + + intermediate = [] + + for layer in self.layers: + output = layer(output, memory, tgt_mask=tgt_mask, + memory_mask=memory_mask, + tgt_key_padding_mask=tgt_key_padding_mask, + memory_key_padding_mask=memory_key_padding_mask, + pos=pos, query_pos=query_pos) + if self.return_intermediate: + intermediate.append(self.norm(output)) + + if self.norm is not None: + output = self.norm(output) + if self.return_intermediate: + intermediate.pop() + intermediate.append(output) + + if self.return_intermediate: + return torch.stack(intermediate) + + return output + + +class TransformerEncoderLayer(nn.Module): + + def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, + activation="relu", normalize_before=False): + super().__init__() + self.self_attn = NonlocalAttention(d_model, nhead, dropout=dropout) + + # Implementation of Feedforward model + self.linear1 = ln.Conv2d(d_model, dim_feedforward,1) + self.dropout = nn.Dropout(dropout) + self.linear2 = ln.Conv2d(dim_feedforward, d_model,1) + + self.norm1 = nn.LayerNorm([d_model,128,80]) + self.norm2 = nn.LayerNorm([d_model,128,80]) + self.dropout1 = nn.Dropout(dropout) + self.dropout2 = nn.Dropout(dropout) + + self.activation = _get_activation_fn(activation) + self.normalize_before = normalize_before + + def with_pos_embed(self, tensor, pos: Optional[Tensor]): + return tensor if pos is None else tensor + pos + + def forward_post(self, + src, + src_mask: Optional[Tensor] = None, + src_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None): + q = k = self.with_pos_embed(src, pos) + src2 = self.self_attn(q, k, value=src, attn_mask=src_mask, + key_padding_mask=src_key_padding_mask)[0] + src = src + self.dropout1(src2) + src = self.norm1(src) + src2 = self.linear2(self.dropout(self.activation(self.linear1(src)))) + src = src + self.dropout2(src2) + src = self.norm2(src) + return src + + def forward_pre(self, src, + src_mask: Optional[Tensor] = None, + src_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None): + src2 = self.norm1(src) + q = k = self.with_pos_embed(src2, pos) + src2 = self.self_attn(q, k, value=src2, attn_mask=src_mask, + key_padding_mask=src_key_padding_mask)[0] + src = src + self.dropout1(src2) + src2 = self.norm2(src) + src2 = self.linear2(self.dropout(self.activation(self.linear1(src2)))) + src = src + self.dropout2(src2) + return src + + def forward(self, src, + src_mask: Optional[Tensor] = None, + src_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None): + if self.normalize_before: + return self.forward_pre(src, src_mask, src_key_padding_mask, pos) + return self.forward_post(src, src_mask, src_key_padding_mask, pos) + + +class TransformerDecoderLayer(nn.Module): + + def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, + activation="relu", normalize_before=False): + super().__init__() + self.self_attn = NonlocalAttention(d_model, nhead, dropout=dropout) + self.multihead_attn = NonlocalAttention(d_model, nhead, dropout=dropout) + + # Implementation of Feedforward model + self.linear1 = ln.Conv2d(d_model, dim_feedforward,1) + self.dropout = nn.Dropout(dropout) + self.linear2 = ln.Conv2d(dim_feedforward, d_model,1) + + self.norm1 = nn.LayerNorm([d_model,128,1]) + self.norm2 = nn.LayerNorm([d_model,128,1]) + self.norm3 = nn.LayerNorm([d_model,128,1]) + self.dropout1 = nn.Dropout(dropout) + self.dropout2 = nn.Dropout(dropout) + self.dropout3 = nn.Dropout(dropout) + + self.activation = _get_activation_fn(activation) + self.normalize_before = normalize_before + + def with_pos_embed(self, tensor, pos: Optional[Tensor]): + return tensor if pos is None else tensor + pos + + def forward_post(self, tgt, memory, + tgt_mask: Optional[Tensor] = None, + memory_mask: Optional[Tensor] = None, + tgt_key_padding_mask: Optional[Tensor] = None, + memory_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, + query_pos: Optional[Tensor] = None): + q = k = self.with_pos_embed(tgt, query_pos) + tgt2 = self.self_attn(q, k, value=tgt, attn_mask=tgt_mask, + key_padding_mask=tgt_key_padding_mask)[0] + tgt = tgt + self.dropout1(tgt2) + tgt = self.norm1(tgt) + tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt, query_pos), + key=self.with_pos_embed(memory, pos), + value=memory, attn_mask=memory_mask, + key_padding_mask=memory_key_padding_mask)[0] + tgt = tgt + self.dropout2(tgt2) + tgt = self.norm2(tgt) + tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt)))) + tgt = tgt + self.dropout3(tgt2) + tgt = self.norm3(tgt) + return tgt + + def forward_pre(self, tgt, memory, + tgt_mask: Optional[Tensor] = None, + memory_mask: Optional[Tensor] = None, + tgt_key_padding_mask: Optional[Tensor] = None, + memory_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, + query_pos: Optional[Tensor] = None): + tgt2 = self.norm1(tgt) + q = k = self.with_pos_embed(tgt2, query_pos) + tgt2 = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask, + key_padding_mask=tgt_key_padding_mask)[0] + tgt = tgt + self.dropout1(tgt2) + tgt2 = self.norm2(tgt) + tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos), + key=self.with_pos_embed(memory, pos), + value=memory, attn_mask=memory_mask, + key_padding_mask=memory_key_padding_mask)[0] + tgt = tgt + self.dropout2(tgt2) + tgt2 = self.norm3(tgt) + tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2)))) + tgt = tgt + self.dropout3(tgt2) + return tgt + + def forward(self, tgt, memory, + tgt_mask: Optional[Tensor] = None, + memory_mask: Optional[Tensor] = None, + tgt_key_padding_mask: Optional[Tensor] = None, + memory_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, + query_pos: Optional[Tensor] = None): + if self.normalize_before: + return self.forward_pre(tgt, memory, tgt_mask, memory_mask, + tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos) + return self.forward_post(tgt, memory, tgt_mask, memory_mask, + tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos) + + +def _get_clones(module, N): + return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) + + +def build_transformer(args): + return Transformer( + d_model=args.hidden_dim, + dropout=args.dropout, + nhead=args.nheads, + dim_feedforward=args.dim_feedforward, + num_encoder_layers=args.enc_layers, + num_decoder_layers=args.dec_layers, + normalize_before=args.pre_norm, + return_intermediate_dec=True, + ) + + +def _get_activation_fn(activation): + """Return an activation function given a string""" + if activation == "relu": + return F.relu + if activation == "gelu": + return F.gelu + if activation == "glu": + return F.glu + raise RuntimeError(F"activation should be relu/gelu, not {activation}.") From b5ff3692dbe18912c42714ad33526afd760247da Mon Sep 17 00:00:00 2001 From: Ran Wang Date: Wed, 26 Aug 2020 15:17:18 -0400 Subject: [PATCH 12/14] formantv2 --- ECoGDataSet.py | 3 +- configs/ecog_style2.yaml | 4 +- formant_systh.py | 105 +++++++++++----- model_formant.py | 25 +++- net_formant.py | 254 ++++++++++++++++++++++++++++----------- sample1_1.npy | Bin 131200 -> 0 bytes sample1_2.npy | Bin 131200 -> 0 bytes sample2_1.npy | Bin 131200 -> 0 bytes sample2_2.npy | Bin 131200 -> 0 bytes sample3_1.npy | Bin 131200 -> 0 bytes sample3_2.npy | Bin 131200 -> 0 bytes train_formant.py | 9 +- 12 files changed, 293 insertions(+), 107 deletions(-) delete mode 100644 sample1_1.npy delete mode 100644 sample1_2.npy delete mode 100644 sample2_1.npy delete mode 100644 sample2_2.npy delete mode 100644 sample3_1.npy delete mode 100644 sample3_2.npy diff --git a/ECoGDataSet.py b/ECoGDataSet.py index 1fbdfa9a..db5b21f2 100644 --- a/ECoGDataSet.py +++ b/ECoGDataSet.py @@ -429,7 +429,8 @@ def __init__(self, ReqSubjDict, mode = 'train', train_param = None,BCTS=None,wor wave_+=[wavearray] if self.Prod: - spkr_redata = h5py.File(os.path.join(datapath_task,'TFzoom'+str(self.SpecBands)+'_denoise_16k.mat'),'r') + spkr_redata = h5py.File(os.path.join(datapath_task,'TFzoom'+str(self.SpecBands)+'_denoise_16k_wide.mat'),'r') + # spkr_redata = h5py.File(os.path.join(datapath_task,'TFzoom'+str(self.SpecBands)+'_denoise_16k.mat'),'r') # spkr_redata = h5py.File(os.path.join(datapath_task,'TFzoom'+str(self.SpecBands)+'_16k.mat'),'r') spkr_re = np.asarray(spkr_redata['TFlog']) spkr_re = signal.resample(spkr_re,int(1.0*spkr_re.shape[0]/self.ORG_TF_FS*self.DOWN_TF_FS),axis=0) diff --git a/configs/ecog_style2.yaml b/configs/ecog_style2.yaml index 10916c6a..a7ebdb10 100644 --- a/configs/ecog_style2.yaml +++ b/configs/ecog_style2.yaml @@ -21,7 +21,7 @@ DATASET: SUBJECT: ['NY742'] MODEL: #####TAKE OFF CHECKLIST!!!######## - N_FORMANTS: 2 + N_FORMANTS: 3 LESS_TEMPORAL_FEATURE: True LATENT_SPACE_SIZE: 128 LAYER_COUNT: 6 @@ -66,7 +66,7 @@ MODEL: NON_LOCAL: True # ATTENTION: [] # OUTPUT_DIR: training_artifacts/debug -OUTPUT_DIR: training_artifacts/formantsyth_NY742_constraintonFB_Bconstrainrefined_absfreq +OUTPUT_DIR: training_artifacts/formantsythv2wide_NY742_constraintonFB_Bconstrainrefined_absfreq_3formants # OUTPUT_DIR: training_artifacts/ecog_residual_latent128_temporal_lesstemporalfeature_noprogressive_HBw_ppl_ppld_localreg_ecogf_w_spec_sup # OUTPUT_DIR: training_artifacts/ecog_residual_latent128_temporal_lesstemporalfeature_ppl_ppld # OUTPUT_DIR: training_artifacts/ecog_residual_cycle_attention3264wStyleIN_specchan64_more_attentfeatures_heads4 diff --git a/formant_systh.py b/formant_systh.py index c1f099f1..f981e83a 100755 --- a/formant_systh.py +++ b/formant_systh.py @@ -12,6 +12,9 @@ from torch.nn.parameter import Parameter from custom_adam import LREQAdam from ECoGDataSet import ECoGDataset +from net_formant import mel_scale +import matplotlib.pyplot as plt +# from matplotlib.pyplot import ion; ion() def freq_coloring(sample,ind,color='r'): @@ -39,21 +42,53 @@ def voicing_coloring(sample,amplitude): def color_spec(spec,components,n_mels): clrs = ['g','y','b','m','c'] - sample_in = spec.repeat(1,3,1,1) - sample_in = sample_in * 0.5 + 0.5 + # sample_in = spec.repeat(1,3,1,1) + # sample_in = sample_in * 0.5 + 0.5 f0=(components['f0']*n_mels).int().clamp(min=0,max=n_mels-1) - formants_freqs=(components['freq_formants']*n_mels).int().clamp(min=0,max=n_mels-1) + formants_freqs=(components['freq_formants_hamon']*n_mels).int().clamp(min=0,max=n_mels-1) + sample_in = spec sample_in_color_voicing = sample_in.clone() sample_in_color_voicing = voicing_coloring(sample_in_color_voicing,components['amplitudes']) - sample_in_color_freq = sample_in.clone() - sample_in_color_freq = sample_in_color_freq/2 - sample_in_color_freq = freq_coloring(sample_in_color_freq,f0,'r') - for j in range(formants_freqs.shape[1]): - sample_in_color_freq = freq_coloring(sample_in_color_freq,formants_freqs[:,j].unsqueeze(1),clrs[j]) - return sample_in_color_voicing,sample_in_color_freq + sample_in_color_hamon_voicing = sample_in.clone() + sample_in_color_hamon_voicing = voicing_coloring(sample_in_color_hamon_voicing,components['amplitudes_h']) + # sample_in_color_freq = sample_in.clone() + # sample_in_color_freq = sample_in_color_freq/2 + # sample_in_color_freq = freq_coloring(sample_in_color_freq,f0,'r') + # for j in range(formants_freqs.shape[1]): + # sample_in_color_freq = freq_coloring(sample_in_color_freq,formants_freqs[:,j].unsqueeze(1),clrs[j]) + return sample_in_color_voicing,sample_in_color_hamon_voicing -def save_sample(sample,ecog,mask_prior,mni,encoder,decoder,ecog_encoder,epoch,mode='test',path='training_artifacts/formantsysth_voicingandunvoicing_loudness',tracker=None): +def subfigure_plot(ax,spec,components,n_mels,which_formant='hamon',formant_line=True,title=None): + clrs = ['g','y','b','m','c'] + + if formant_line: + + ax.imshow(np.clip(1-spec.detach().cpu().numpy().squeeze().T,0,1),vmin=0.0,vmax=1.0) + f0=(components['f0']*n_mels).clamp(min=0,max=n_mels-1) + formants_freqs=(components['freq_formants_'+which_formant]*n_mels).clamp(min=0,max=n_mels-1) + formants_freqs_hz=(components['freq_formants_'+which_formant+'_hz']) + ax.plot(f0.squeeze().detach().cpu().numpy(),color='r',linewidth=1,label='f0') + for i in range(formants_freqs.shape[1]): + alpha = components['amplitude_formants_'+which_formant][:,i].squeeze().detach().cpu().numpy() + ax.plot(formants_freqs[:,i].squeeze().detach().cpu().numpy(),color=clrs[i],linewidth=1,label='f'+str(i+1)) + minimum = mel_scale(n_mels,formants_freqs_hz[:,i] - components['bandwidth_formants_'+which_formant+'_hz'][:,i]/2).clamp(min=0).squeeze().detach().cpu().numpy() + maximum = mel_scale(n_mels,formants_freqs_hz[:,i] + components['bandwidth_formants_'+which_formant+'_hz'][:,i]/2).clamp(max=n_mels-1).squeeze().detach().cpu().numpy() + # minimum = (formants_freqs[:,i] - components['bandwidth_formants_'+which_formant][:,i]/2).squeeze().detach().cpu().numpy() + # maximum = (formants_freqs[:,i] + components['bandwidth_formants_'+which_formant][:,i]/2).squeeze().detach().cpu().numpy() + ax.fill_between(range(minimum.shape[0]),minimum,maximum,where=(minimum<=maximum),color=clrs[i],alpha=0.2) + ax.legend() + else: + ax.imshow(np.clip(spec.detach().cpu().numpy().squeeze().T,0,1),vmin=0.0,vmax=1.0) + ax.set_ylim(0,64) + if title is not None: + ax.set_title(title) + + +def save_sample(sample,ecog,mask_prior,mni,encoder,decoder,ecog_encoder,epoch,label,mode='test',path='training_artifacts/formantsysth_voicingandunvoicing_loudness',tracker=None): os.makedirs(path, exist_ok=True) + labels = () + for i in range(len(label)): + labels += label[i] with torch.no_grad(): encoder.eval() decoder.eval() @@ -68,6 +103,7 @@ def save_sample(sample,ecog,mask_prior,mni,encoder,decoder,ecog_encoder,epoch,mo sample_in_color_voicing_ecog_all = torch.tensor([]) rec_ecog_all = torch.tensor([]) n_mels = sample.shape[-1] + fig,axs = plt.subplots(6,sample.shape[0],figsize=(5*sample.shape[0],15)) if ecog_encoder is None else plt.subplots(11,sample.shape[0],figsize=(5*sample.shape[0],30)) for i in range(0,sample.shape[0],1): sample_in = sample[i:np.minimum(i+1,sample.shape[0])] if ecog_encoder is not None: @@ -76,35 +112,50 @@ def save_sample(sample,ecog,mask_prior,mni,encoder,decoder,ecog_encoder,epoch,mo mni_in = mni[i:np.minimum(i+1,sample.shape[0])] components = encoder(sample_in) rec = decoder(components) - sample_in_color_voicing,sample_in_color_freq = color_spec(sample_in,components,n_mels) + sample_in = sample_in.repeat(1,3,1,1) + sample_in = sample_in * 0.5 + 0.5 rec = rec.repeat(1,3,1,1) rec = rec * 0.5 + 0.5 - sample_in_all = torch.cat([sample_in_all,sample_in],dim=0) - sample_in_color_freq_all = torch.cat([sample_in_color_freq_all,sample_in_color_freq],dim=0) - sample_in_color_voicing_all = torch.cat([sample_in_color_voicing_all,sample_in_color_voicing],dim=0) - rec_all = torch.cat([rec_all,rec],dim=0) + sample_in_color_voicing,sample_in_color_hamon_voicing = color_spec(sample_in,components,n_mels) + subfigure_plot(axs[0,i],sample_in,components,n_mels,formant_line=False,title='\''+labels[i]+'\'') + subfigure_plot(axs[1,i],sample_in,components,n_mels,formant_line=True,title='formants_hamon') + subfigure_plot(axs[2,i],sample_in,components,n_mels,which_formant='noise',formant_line=True,title='formants_noise') + subfigure_plot(axs[3,i],sample_in_color_voicing,components,n_mels,formant_line=False,title='alpha') + subfigure_plot(axs[4,i],sample_in_color_hamon_voicing,components,n_mels,formant_line=False,title='beta') + subfigure_plot(axs[5,i],rec,components,n_mels,formant_line=False,title='rec') + + + # sample_in_all = torch.cat([sample_in_all,sample_in],dim=0) + # sample_in_color_freq_all = torch.cat([sample_in_color_freq_all,sample_in_color_freq],dim=0) + # sample_in_color_voicing_all = torch.cat([sample_in_color_voicing_all,sample_in_color_voicing],dim=0) + # rec_all = torch.cat([rec_all,rec],dim=0) if ecog_encoder is not None: components_ecog = ecog_encoder(ecog_in,mask_prior_in,mni=mni_in) rec_ecog = decoder(components_ecog) rec_ecog = rec_ecog.repeat(1,3,1,1) rec_ecog = rec_ecog * 0.5 + 0.5 - sample_in_color_voicing_ecog,sample_in_color_freq_ecog = color_spec(sample_in,components_ecog,n_mels) - sample_in_color_freq_ecog_all = torch.cat([sample_in_color_freq_ecog_all,sample_in_color_freq_ecog],dim=0) - sample_in_color_voicing_ecog_all = torch.cat([sample_in_color_voicing_ecog_all,sample_in_color_voicing_ecog],dim=0) - rec_ecog_all = torch.cat([rec_ecog_all,rec_ecog],dim=0) - sample_in_all = sample_in_all.repeat(1,3,1,1)*0.5+0.5 - if ecog_encoder is None: - resultsample = torch.cat([sample_in_all, sample_in_color_freq_all,sample_in_color_voicing_all, rec_all], dim=0) - else: - resultsample = torch.cat([sample_in_all, sample_in_color_freq_all,sample_in_color_voicing_all, rec_all,sample_in_color_freq_ecog_all,sample_in_color_voicing_ecog_all, rec_ecog_all], dim=0) - resultsample = resultsample.transpose(-2,-1) - resultsample = resultsample.cpu() + sample_in_color_voicing_ecog,sample_in_color_hamon_voicing_ecog = color_spec(sample_in,components_ecog,n_mels) + subfigure_plot(axs[6,i],sample_in,components_ecog,n_mels,formant_line=True,title='formants_ecog') + subfigure_plot(axs[7,i],sample_in,components_ecog,n_mels,which_formant='noise',formant_line=True,title='formants_noise_ecog') + subfigure_plot(axs[8,i],sample_in_color_voicing_ecog,components_ecog,n_mels,formant_line=False,title='alpha_ecog') + subfigure_plot(axs[9,i],sample_in_color_hamon_voicing_ecog,components_ecog,n_mels,formant_line=False,title='beta_ecog') + subfigure_plot(axs[10,i],rec_ecog,components_ecog,n_mels,formant_line=False,title='rec_ecog') + # sample_in_color_freq_ecog_all = torch.cat([sample_in_color_freq_ecog_all,sample_in_color_freq_ecog],dim=0) + # sample_in_color_voicing_ecog_all = torch.cat([sample_in_color_voicing_ecog_all,sample_in_color_voicing_ecog],dim=0) + # rec_ecog_all = torch.cat([rec_ecog_all,rec_ecog],dim=0) + # sample_in_all = sample_in_all.repeat(1,3,1,1)*0.5+0.5 + # import pdb;pdb.set_trace() if mode == 'train': f = os.path.join(path,'sample_train_%d.png' % (epoch + 1)) if mode == 'test': f = os.path.join(path,'sample_%d.png' % (epoch + 1)) - save_image(resultsample, f, nrow=resultsample.shape[0]//(4 if ecog_encoder is None else 7)) + # import pdb;pdb.set_trace() + fig.savefig(f, bbox_inches='tight',dpi=80) + plt.close(fig) + # save_image(resultsample, f, nrow=resultsample.shape[0]//(4 if ecog_encoder is None else 7)) + + if mode == 'test': tracker.register_means(epoch) return diff --git a/model_formant.py b/model_formant.py index 85e5db86..fcbe8a00 100644 --- a/model_formant.py +++ b/model_formant.py @@ -53,14 +53,33 @@ def encode(self, spec): def forward(self, spec, ecog, mask_prior, on_stage, ae, tracker, encoder_guide, mni=None): if ae: self.encoder.requires_grad_(True) - rec = self.generate_fromspec(spec) + # rec = self.generate_fromspec(spec) + components = self.encoder(spec) + + rec = self.decoder.forward(components) Lae = torch.mean((rec - spec).abs()) + if components['freq_formants_hamon'].shape[1] > 1: + for formant in range(components['freq_formants_hamon'].shape[1]-1,0,-1): + components_copy = components + components_copy['freq_formants_hamon'] = components['freq_formants_hamon'][:,:formant] + components_copy['freq_formants_hamon_hz'] = components['freq_formants_hamon_hz'][:,:formant] + components_copy['bandwidth_formants_hamon'] = components['bandwidth_formants_hamon'][:,:formant] + components_copy['bandwidth_formants_hamon_hz'] = components['bandwidth_formants_hamon_hz'][:,:formant] + components_copy['amplitude_formants_hamon'] = components['amplitude_formants_hamon'][:,:formant] + rec = self.decoder.forward(components_copy) + Lae += torch.mean((rec - spec).abs()) tracker.update(dict(Lae=Lae)) - return Lae + + from net_formant import mel_scale + thres = mel_scale(self.spec_chans,4000,pt=False).astype(np.int32) + explosive=torch.sign(torch.mean(spec[...,thres:],dim=-1)-torch.mean(spec[...,:thres],dim=-1))*0.5+0.5 + Lexp = torch.mean((components['amplitudes'][:,0:1]-components['amplitudes'][:,1:2])*explosive) + return Lae + Lexp else: self.encoder.requires_grad_(False) rec,components_ecog = self.generate_fromecog(ecog,mask_prior,mni=mni,return_components=True) - Lrec = torch.mean((rec - spec).abs()) + Lrec = torch.mean((rec - spec)**2) + # Lrec = torch.mean((rec - spec).abs()) tracker.update(dict(Lrec=Lrec)) Lcomp = 0 if encoder_guide: diff --git a/net_formant.py b/net_formant.py index cd632ed3..9aa43e77 100644 --- a/net_formant.py +++ b/net_formant.py @@ -13,13 +13,28 @@ from transformer_models.transformer import Transformer as TransformerTS from transformer_models.transformer_nonlocal import Transformer as TransformerNL -def mel_scale(n_mels,hz): +# def mel_scale(n_mels,hz,min_octave=-31.,max_octave=95.): +def mel_scale(n_mels,hz,min_octave=-58.,max_octave=100.,pt=True): #take absolute hz, return abs mel - return (torch.log2(hz/440)+31/24)*24*n_mels/126 - -def inverse_mel_scale(mel): + # return (torch.log2(hz/440)+31/24)*24*n_mels/126 + if pt: + return (torch.log2(hz/440.)-min_octave/24.)*24*n_mels/(max_octave-min_octave) + else: + return (np.log2(hz/440.)-min_octave/24.)*24*n_mels/(max_octave-min_octave) + +# def inverse_mel_scale(mel,min_octave=-31.,max_octave=95.): +def inverse_mel_scale(mel,min_octave=-58.,max_octave=100.): #take normalized mel, return absolute hz - return 440*2**(mel*126/24-31/24) + # return 440*2**(mel*126/24-31/24) + return 440*2**(mel*(max_octave-min_octave)/24.+min_octave/24.) + +def bandwidth_mel(freqs_hz,bandwidth_hz,n_mels): + # input hz bandwidth, output abs bandwidth on mel + bandwidth_upper = freqs_hz+bandwidth_hz/2. + bandwidth_lower = torch.clamp(freqs_hz-bandwidth_hz/2.,min=1) + bandwidth = mel_scale(n_mels,bandwidth_upper) - mel_scale(n_mels,bandwidth_lower) + return bandwidth + @GENERATORS.register("GeneratorFormant") class FormantSysth(nn.Module): @@ -28,10 +43,22 @@ def __init__(self, n_mels=64, k=30): self.n_mels = n_mels self.k = k self.timbre = Parameter(torch.Tensor(1,1,n_mels)) + self.timbre_mapping = nn.Sequential( + ln.Conv1d(1,128,1), + nn.LeakyReLU(0.2), + ln.Conv1d(128,128,1), + nn.LeakyReLU(0.2), + ln.Conv1d(128,2,1), + nn.Sigmoid(), + ) + self.prior_exp = np.array([0.4963,0.0745,1.9018]) + self.timbre_parameter = Parameter(torch.Tensor(2)) # self.silient = Parameter(torch.Tensor(1,1,n_mels)) self.silient = -1 with torch.no_grad(): nn.init.constant_(self.timbre,1.0) + nn.init.constant_(self.timbre_parameter[0],7) + nn.init.constant_(self.timbre_parameter[1],0.004) # nn.init.constant_(self.silient,-1.0) def formant_mask(self,freq,bandwith,amplitude): @@ -44,29 +71,64 @@ def formant_mask(self,freq,bandwith,amplitude): freq = freq.permute([0,2,1]).unsqueeze(dim=-2) #B,time,1, formants bandwith = bandwith.permute([0,2,1]).unsqueeze(dim=-2) #B,time,1, formants amplitude = amplitude.permute([0,2,1]).unsqueeze(dim=-2) #B,time,1, formants - masks = amplitude*torch.exp(-0.693*(grid_freq-freq)**2/(2*(bandwith+0.001)**2)) #B,time,freqchans, formants - masks = masks.unsqueeze(dim=1) #B,1,time,freqchans + # masks = amplitude*torch.exp(-0.693*(grid_freq-freq)**2/(2*(bandwith+0.001)**2)) #B,time,freqchans, formants + masks = amplitude*torch.exp(-(grid_freq-freq)**2/(2*(bandwith/np.sqrt(2*np.log(2))+0.001)**2)) #B,time,freqchans, formants + masks = masks.unsqueeze(dim=1) #B,1,time,freqchans, formants + return masks + + def formant_mask_hz2mel(self,freq_hz,bandwith_hz,amplitude): + # freq, bandwith, amplitude: B*formants*time + freq_cord = torch.arange(self.n_mels) + time_cord = torch.arange(freq_hz.shape[2]) + grid_time,grid_freq = torch.meshgrid(time_cord,freq_cord) + grid_time = grid_time.unsqueeze(dim=0).unsqueeze(dim=-1) #B,time,freq, 1 + grid_freq = grid_freq.unsqueeze(dim=0).unsqueeze(dim=-1) #B,time,freq, 1 + grid_freq_hz = inverse_mel_scale(grid_freq/(self.n_mels*1.0)) + freq_hz = freq_hz.permute([0,2,1]).unsqueeze(dim=-2) #B,time,1, formants + bandwith_hz = bandwith_hz.permute([0,2,1]).unsqueeze(dim=-2) #B,time,1, formants + amplitude = amplitude.permute([0,2,1]).unsqueeze(dim=-2) #B,time,1, formants + masks = amplitude*torch.exp(-0.693*(grid_freq_hz-freq_hz)**2/(2*(bandwith_hz+0.01)**2)) #B,time,freqchans, formants + masks = masks.unsqueeze(dim=1) #B,1,time,freqchans, formants return masks - def voicing(self,f0): + def voicing(self,f0_hz): #f0: B*1*time, hz freq_cord = torch.arange(self.n_mels) - time_cord = torch.arange(f0.shape[2]) + time_cord = torch.arange(f0_hz.shape[2]) grid_time,grid_freq = torch.meshgrid(time_cord,freq_cord) grid_time = grid_time.unsqueeze(dim=0).unsqueeze(dim=-1) #B,time,freq, 1 grid_freq = grid_freq.unsqueeze(dim=0).unsqueeze(dim=-1) #B,time,freq, 1 - f0 = f0.permute([0,2,1]).unsqueeze(dim=-2) #B,time,1, 1 - f0 = f0.repeat([1,1,1,self.k]) #B,time,1, self.k - f0 = f0*(torch.arange(self.k)+1).reshape([1,1,1,self.k]) - bandwith = 24.7*(f0*4.37/1000+1) - bandwith_lower = torch.clamp(f0-bandwith/2,min=1) - bandwith_upper = f0+bandwith/2 - bandwith = mel_scale(self.n_mels,bandwith_upper) - mel_scale(self.n_mels,bandwith_lower) - f0 = mel_scale(self.n_mels,f0) - # hamonics = torch.exp(-(grid_freq-f0)**2/(2*bandwith**2)) #gaussian - hamonics = (1-((grid_freq-f0)/(3*bandwith/2))**2)*(-torch.sign(torch.abs(grid_freq-f0)/(3*bandwith)-0.5)*0.5+0.5) #welch + grid_freq_hz = inverse_mel_scale(grid_freq/(self.n_mels*1.0)) + f0_hz = f0_hz.permute([0,2,1]).unsqueeze(dim=-2) #B,time,1, 1 + f0_hz = f0_hz.repeat([1,1,1,self.k]) #B,time,1, self.k + f0_hz = f0_hz*(torch.arange(self.k)+1).reshape([1,1,1,self.k]) + bandwith_hz = 24.7*(f0_hz*4.37/1000+1) + bandwith = bandwidth_mel(f0_hz,bandwith_hz,self.n_mels) + # bandwith_lower = torch.clamp(f0-bandwith/2,min=1) + # bandwith_upper = f0+bandwith/2 + # bandwith = mel_scale(self.n_mels,bandwith_upper) - mel_scale(self.n_mels,bandwith_lower) + f0 = mel_scale(self.n_mels,f0_hz) + # sigma = bandwith/(np.sqrt(2*np.log(2))); + sigma = bandwith/(2*np.sqrt(2*np.log(2))); + # hamonics = torch.exp(-(grid_freq-f0)**2/(2*sigma**2)) #gaussian + # hamonics = (1-((grid_freq_hz-f0_hz)/(2*bandwith_hz/2))**2)*(-torch.sign(torch.abs(grid_freq_hz-f0_hz)/(2*bandwith_hz)-0.5)*0.5+0.5) #welch + switch = mel_scale(self.n_mels,torch.abs(self.timbre_parameter[0])*f0_hz[...,0]).unsqueeze(1) + slop = (torch.abs(self.timbre_parameter[1])*f0_hz[...,0]).unsqueeze(1) + freq_cord_reshape = freq_cord.reshape([1,1,1,self.n_mels]) + hamonics = (1-((grid_freq-f0)/(2.5*bandwith/2))**2)*(-torch.sign(torch.abs(grid_freq-f0)/(2.5*bandwith)-0.5)*0.5+0.5) #welch + # hamonics = (1-((grid_freq-f0)/(3*bandwith/2))**2)*(-torch.sign(torch.abs(grid_freq-f0)/(3*bandwith)-0.5)*0.5+0.5) #welch # hamonics = torch.cos(np.pi*torch.abs(grid_freq-f0)/(4*bandwith))**2*(-torch.sign(torch.abs(grid_freq-f0)/(4*bandwith)-0.5)*0.5+0.5) #hanning - hamonics = (hamonics.sum(dim=-1)*self.timbre).unsqueeze(dim=1) # B,1,T,F + # hamonics = (hamonics.sum(dim=-1)).unsqueeze(dim=1) # B,1,T,F + # condition = (torch.sign(freq_cord_reshape-switch)*0.5+0.5) + # hamonics = ((hamonics.sum(dim=-1)).unsqueeze(dim=1)) * (1+ (torch.exp(-slop*(freq_cord_reshape-switch)*condition)-1)*condition) # B,1,T,F + # hamonics = ((hamonics.sum(dim=-1)).unsqueeze(dim=1)-torch.abs(self.prior_exp_parameter[2])) * torch.exp(-torch.abs(self.prior_exp_parameter[1])*freq_cord.reshape([1,1,1,self.n_mels])) + torch.abs(self.prior_exp_parameter[2]) # B,1,T,F + + timbre_parameter = self.timbre_mapping(f0_hz[...,0,0].unsqueeze(1)).permute([0,2,1]).unsqueeze(1) + condition = (torch.sign(freq_cord_reshape-timbre_parameter[...,0:1]*self.n_mels)*0.5+0.5) + hamonics = ((hamonics.sum(dim=-1)).unsqueeze(dim=1)) * (1+ (torch.exp(-0.01*timbre_parameter[...,1:2]*(freq_cord_reshape-timbre_parameter[...,0:1]*self.n_mels)*condition)-1)*condition) # B,1,T,F + # timbre = self.timbre_mapping(f0_hz[...,0,0].unsqueeze(1)).permute([0,2,1]) + # hamonics = (hamonics.sum(dim=-1)*timbre).unsqueeze(dim=1) # B,1,T,F + # hamonics = (hamonics.sum(dim=-1)*self.timbre).unsqueeze(dim=1) # B,1,T,F return hamonics def unvoicing(self,f0): @@ -75,41 +137,45 @@ def unvoicing(self,f0): def forward(self,components): # f0: B*1*T, amplitudes: B*2(voicing,unvoicing)*T, freq_formants,bandwidth_formants,amplitude_formants: B*formants*T amplitudes = components['amplitudes'].unsqueeze(dim=-1) + amplitudes_h = components['amplitudes_h'].unsqueeze(dim=-1) loudness = components['loudness'].unsqueeze(dim=-1) f0_hz = inverse_mel_scale(components['f0']) + # import pdb;pdb.set_trace() self.hamonics = self.voicing(f0_hz) self.noise = self.unvoicing(f0_hz) - freq_formants = components['freq_formants']*self.n_mels - # ### ratio on mel - # freq_formants_hz = inverse_mel_scale(freq_formants/self.n_mels) - # bandwidth_formants_hz = self.formant_bandwitdh_ratio*freq_formants_hz - # bandwith_lower = torch.clamp(freq_formants_hz-bandwidth_formants_hz/2,min=0.001) - # bandwith_upper = freq_formants_hz+bandwidth_formants_hz/2 - # bandwidth_formants = mel_scale(self.n_mels,bandwith_upper) - mel_scale(self.n_mels,bandwith_lower) - # ### - # ### ratio on hz - # bandwidth_formants = self.formant_bandwitdh_ratio*freq_formants - # ##### - bandwidth_formants = components['bandwidth_formants']*self.n_mels + # freq_formants = components['freq_formants']*self.n_mels + # bandwidth_formants = components['bandwidth_formants']*self.n_mels # excitation = amplitudes[:,0:1]*hamonics # excitation = loudness*(amplitudes[:,0:1]*hamonics) - self.excitation = loudness*(amplitudes[:,0:1]*self.hamonics + amplitudes[:,-1:]*self.noise) - self.mask = self.formant_mask(freq_formants,bandwidth_formants,components['amplitude_formants']) - self.mask_sum = self.mask.sum(dim=-1) - speech = self.excitation*self.mask_sum + self.silient*torch.ones(self.mask_sum.shape) + self.excitation_hamon = loudness*amplitudes[:,0:1]*(amplitudes_h[:,0:1]*self.hamonics + amplitudes_h[:,-1:]*self.noise) + # self.excitation_hamon = loudness*amplitudes[:,0:1]*self.hamonics + self.excitation_noise = loudness*amplitudes[:,-1:]*self.noise + self.mask_hamon = self.formant_mask_hz2mel(components['freq_formants_hamon_hz'],components['bandwidth_formants_hamon_hz'],components['amplitude_formants_hamon']) + self.mask_noise = self.formant_mask_hz2mel(components['freq_formants_noise_hz'],components['bandwidth_formants_noise_hz'],components['amplitude_formants_noise']) + # self.mask_hamon = self.formant_mask(components['freq_formants_hamon']*self.n_mels,components['bandwidth_formants_hamon'],components['amplitude_formants_hamon']) + # self.mask_noise = self.formant_mask(components['freq_formants_noise']*self.n_mels,components['bandwidth_formants_noise'],components['amplitude_formants_noise']) + self.mask_hamon_sum = self.mask_hamon.sum(dim=-1) + self.mask_noise_sum = self.mask_noise.sum(dim=-1) + speech = self.excitation_hamon*self.mask_hamon_sum + self.excitation_noise*self.mask_noise_sum + self.silient*torch.ones(self.mask_hamon_sum.shape) return speech @ENCODERS.register("EncoderFormant") class FormantEncoder(nn.Module): - def __init__(self, n_mels=64, n_formants=4): + def __init__(self, n_mels=64, n_formants=4,min_octave=-31,max_octave=96): super(FormantEncoder, self).__init__() self.n_mels = n_mels self.n_formants = n_formants + self.min_octave = min_octave + self.max_octave = max_octave self.formant_freq_limits_diff = torch.tensor([950.,2450.,2100.]).reshape([1,3,1]) #freq difference self.formant_freq_limits_diff_low = torch.tensor([300.,300.,0.]).reshape([1,3,1]) #freq difference - self.formant_freq_limits_abs = torch.tensor([950.,2800.,3400.]).reshape([1,3,1]) #freq difference - self.formant_freq_limits_abs_low = torch.tensor([300.,600.,2700.]).reshape([1,3,1]) #freq difference + # self.formant_freq_limits_abs = torch.tensor([950.,2800.,3400.,4700.]).reshape([1,4,1]) #freq difference + # self.formant_freq_limits_abs_low = torch.tensor([300.,600.,2800.,3400]).reshape([1,4,1]) #freq difference + self.formant_freq_limits_abs = torch.tensor([950.,3300.,3600.,4700.]).reshape([1,4,1]) #freq difference + self.formant_freq_limits_abs_low = torch.tensor([300.,600.,2700.,3400]).reshape([1,4,1]) #freq difference + self.formant_freq_limits_abs_noise = torch.tensor([7000.]).reshape([1,1,1]) #freq difference + self.formant_freq_limits_abs_noise_low = torch.tensor([4000.]).reshape([1,1,1]) #freq difference self.formant_bandwitdh_ratio = Parameter(torch.Tensor(1)) self.formant_bandwitdh_slop = Parameter(torch.Tensor(1)) @@ -128,8 +194,11 @@ def __init__(self, n_mels=64, n_formants=4): self.conv_fundementals = ln.Conv1d(128,128,3,1,1) self.norm_fundementals = nn.GroupNorm(32,128) + self.f0_drop = nn.Dropout() self.conv_f0 = ln.Conv1d(128,1,1,1,0) + self.conv_amplitudes = ln.Conv1d(128,2,1,1,0) + self.conv_amplitudes_h = ln.Conv1d(128,2,1,1,0) # self.conv_loudness = ln.Conv1d(128,1,1,1,0) self.conv_formants = ln.Conv1d(128,128,3,1,1) @@ -138,6 +207,10 @@ def __init__(self, n_mels=64, n_formants=4): self.conv_formants_bandwidth = ln.Conv1d(128,n_formants,1,1,0) self.conv_formants_amplitude = ln.Conv1d(128,n_formants,1,1,0) + self.conv_formants_freqs_noise = ln.Conv1d(128,1,1,1,0) + self.conv_formants_bandwidth_noise = ln.Conv1d(128,1,1,1,0) + self.conv_formants_amplitude_noise = ln.Conv1d(128,1,1,1,0) + self.amplifier = Parameter(torch.Tensor(1)) with torch.no_grad(): nn.init.constant_(self.amplifier,1.0) @@ -151,21 +224,24 @@ def forward(self,x): # loudness = F.relu(self.conv_loudness(x_common)) amplitudes = F.softmax(self.conv_amplitudes(x_common),dim=1) + amplitudes_h = F.softmax(self.conv_amplitudes_h(x_common),dim=1) - x_fundementals = F.leaky_relu(self.norm_fundementals(self.conv_fundementals(x_common)),0.2) + # x_fundementals = F.leaky_relu(self.norm_fundementals(self.conv_fundementals(x_common)),0.2) + x_fundementals = self.f0_drop(F.leaky_relu(self.norm_fundementals(self.conv_fundementals(x_common)),0.2)) # f0 in mel: - # f0 = F.sigmoid(self.conv_f0(x_fundementals)) + # f0 = torch.sigmoid(self.conv_f0(x_fundementals)) # f0 = F.tanh(self.conv_f0(x_fundementals)) * (16/64)*(self.n_mels/64) # 72hz < f0 < 446 hz - # f0 = F.sigmoid(self.conv_f0(x_fundementals)) * (15/64)*(self.n_mels/64) # 179hz < f0 < 420 hz - # f0 = F.sigmoid(self.conv_f0(x_fundementals)) * (22/64)*(self.n_mels/64) - (16/64)*(self.n_mels/64)# 72hz < f0 < 253 hz, human voice - # f0 = F.sigmoid(self.conv_f0(x_fundementals)) * (11/64)*(self.n_mels/64) - (-2/64)*(self.n_mels/64)# 160hz < f0 < 300 hz, female voice + # f0 = torch.sigmoid(self.conv_f0(x_fundementals)) * (15/64)*(self.n_mels/64) # 179hz < f0 < 420 hz + # f0 = torch.sigmoid(self.conv_f0(x_fundementals)) * (22/64)*(self.n_mels/64) - (16/64)*(self.n_mels/64)# 72hz < f0 < 253 hz, human voice + # f0 = torch.sigmoid(self.conv_f0(x_fundementals)) * (11/64)*(self.n_mels/64) - (-2/64)*(self.n_mels/64)# 160hz < f0 < 300 hz, female voice # f0 in hz: - f0 = F.sigmoid(self.conv_f0(x_fundementals)) * 240 + 180 # 180hz < f0 < 420 hz - f0 = torch.clamp(mel_scale(self.n_mels,f0)/self.n_mels,min=0.0001) + # f0 = torch.sigmoid(self.conv_f0(x_fundementals)) * 302 + 118 # 118hz < f0 < 420 hz + f0 = torch.sigmoid(self.conv_f0(x_fundementals)) * 240 + 180 # 180hz < f0 < 420 hz + f0 = torch.clamp(mel_scale(self.n_mels,f0)/(self.n_mels*1.0),min=0.0001) x_formants = F.leaky_relu(self.norm_formants(self.conv_formants(x_common)),0.2) - formants_freqs = F.sigmoid(self.conv_formants_freqs(x_formants)) + formants_freqs = torch.sigmoid(self.conv_formants_freqs(x_formants)) # # relative freq: # formants_freqs_hz = formants_freqs*(self.formant_freq_limits_diff[:,:self.n_formants]-self.formant_freq_limits_diff_low[:,:self.n_formants])+self.formant_freq_limits_diff_low[:,:self.n_formants] # # formants_freqs_hz = formants_freqs*6839 @@ -174,26 +250,62 @@ def forward(self,x): # abs freq: formants_freqs_hz = formants_freqs*(self.formant_freq_limits_abs[:,:self.n_formants]-self.formant_freq_limits_abs_low[:,:self.n_formants])+self.formant_freq_limits_abs_low[:,:self.n_formants] # formants_freqs_hz = formants_freqs*6839 - formants_freqs = torch.clamp(mel_scale(self.n_mels,formants_freqs_hz)/self.n_mels,min=0) + formants_freqs = torch.clamp(mel_scale(self.n_mels,formants_freqs_hz)/(self.n_mels*1.0),min=0) # formants_freqs = formants_freqs + f0 - # formants_bandwidth_hz = F.sigmoid(self.conv_formants_bandwidth(x_formants)) *6839 - # formants_bandwidth_hz = F.sigmoid(self.conv_formants_bandwidth(x_formants)) * 150 - formants_bandwidth_hz = F.sigmoid(self.conv_formants_bandwidth(x_formants)) * 3*F.sigmoid(self.formant_bandwitdh_ratio)*(0.075*torch.relu(formants_freqs_hz-1000)+100) - # formants_bandwidth_hz = F.sigmoid(self.conv_formants_bandwidth(x_formants)) * 3*F.sigmoid(self.formant_bandwitdh_ratio)*(0.075*torch.relu(formants_freqs_hz-1000)+50) - # formants_bandwidth_hz = F.sigmoid(self.conv_formants_bandwidth(x_formants)) * (0.075*3*F.sigmoid(self.formant_bandwitdh_slop)*torch.relu(formants_freqs_hz-1000)+(2*F.sigmoid(self.formant_bandwitdh_ratio)+1)*50) - # formants_bandwidth_hz = F.sigmoid(self.conv_formants_bandwidth(x_formants)) * 3*F.sigmoid(self.formant_bandwitdh_ratio)*(0.075*F.sigmoid(self.formant_bandwitdh_slop)*torch.relu(formants_freqs_hz-1000)+50) - formants_bandwidth_upper = formants_freqs_hz+formants_bandwidth_hz/2 - formants_bandwidth_lower = torch.clamp(formants_freqs_hz-formants_bandwidth_hz/2,min=1) - formants_bandwidth = (mel_scale(self.n_mels,formants_bandwidth_upper) - mel_scale(self.n_mels,formants_bandwidth_lower))/self.n_mels + # formants_bandwidth_hz = torch.sigmoid(self.conv_formants_bandwidth(x_formants)) *6839 + # formants_bandwidth_hz = torch.sigmoid(self.conv_formants_bandwidth(x_formants)) * 150 + # formants_bandwidth_hz = 3*torch.sigmoid(self.formant_bandwitdh_ratio)*(0.075*torch.relu(formants_freqs_hz-1000)+100) + formants_bandwidth_hz = (torch.sigmoid(self.conv_formants_bandwidth(x_formants))) * (3*torch.sigmoid(self.formant_bandwitdh_ratio))*(0.075*torch.relu(formants_freqs_hz-1000)+100) + # formants_bandwidth_hz = torch.sigmoid(self.conv_formants_bandwidth(x_formants)) * 3*torch.sigmoid(self.formant_bandwitdh_ratio)*(0.075*torch.relu(formants_freqs_hz-1000)+50) + # formants_bandwidth_hz = torch.sigmoid(self.conv_formants_bandwidth(x_formants)) * (0.075*3*torch.sigmoid(self.formant_bandwitdh_slop)*torch.relu(formants_freqs_hz-1000)+(2*torch.sigmoid(self.formant_bandwitdh_ratio)+1)*50) + # formants_bandwidth_hz = torch.sigmoid(self.conv_formants_bandwidth(x_formants)) * 3*torch.sigmoid(self.formant_bandwitdh_ratio)*(0.075*torch.sigmoid(self.formant_bandwitdh_slop)*torch.relu(formants_freqs_hz-1000)+50) + formants_bandwidth = bandwidth_mel(formants_freqs_hz,formants_bandwidth_hz,self.n_mels) + # formants_bandwidth_upper = formants_freqs_hz+formants_bandwidth_hz/2 + # formants_bandwidth_lower = torch.clamp(formants_freqs_hz-formants_bandwidth_hz/2,min=1) + # formants_bandwidth = (mel_scale(self.n_mels,formants_bandwidth_upper) - mel_scale(self.n_mels,formants_bandwidth_lower))/(self.n_mels*1.0) + # formants_amplitude = F.softmax(torch.cumsum(-F.relu(self.conv_formants_amplitude(x_formants)),dim=1),dim=1) formants_amplitude = F.softmax(self.conv_formants_amplitude(x_formants),dim=1) + formants_freqs_noise = torch.sigmoid(self.conv_formants_freqs_noise(x_formants)) + # # relative freq: + # formants_freqs_hz = formants_freqs*(self.formant_freq_limits_diff[:,:self.n_formants]-self.formant_freq_limits_diff_low[:,:self.n_formants])+self.formant_freq_limits_diff_low[:,:self.n_formants] + # # formants_freqs_hz = formants_freqs*6839 + # formants_freqs_hz = torch.cumsum(formants_freqs_hz,dim=1) + + # abs freq: + formants_freqs_hz_noise = formants_freqs_noise*(self.formant_freq_limits_abs_noise[:,:1]-self.formant_freq_limits_abs_noise_low[:,:1])+self.formant_freq_limits_abs_noise_low[:,:1] + # formants_freqs_hz = formants_freqs*6839 + formants_freqs_noise = torch.clamp(mel_scale(self.n_mels,formants_freqs_hz_noise)/(self.n_mels*1.0),min=0) + + # formants_freqs = formants_freqs + f0 + # formants_bandwidth_hz = torch.sigmoid(self.conv_formants_bandwidth(x_formants)) *6839 + # formants_bandwidth_hz = torch.sigmoid(self.conv_formants_bandwidth(x_formants)) * 150 + formants_bandwidth_hz_noise = torch.sigmoid(self.conv_formants_bandwidth_noise(x_formants)) * 4000 + 1000 + # formants_bandwidth_hz_noise = torch.sigmoid(self.conv_formants_bandwidth_noise(x_formants)) * 4000 + # formants_bandwidth_hz = torch.sigmoid(self.conv_formants_bandwidth(x_formants)) * 3*torch.sigmoid(self.formant_bandwitdh_ratio)*(0.075*torch.relu(formants_freqs_hz-1000)+50) + # formants_bandwidth_hz = torch.sigmoid(self.conv_formants_bandwidth(x_formants)) * (0.075*3*torch.sigmoid(self.formant_bandwitdh_slop)*torch.relu(formants_freqs_hz-1000)+(2*torch.sigmoid(self.formant_bandwitdh_ratio)+1)*50) + # formants_bandwidth_hz = torch.sigmoid(self.conv_formants_bandwidth(x_formants)) * 3*torch.sigmoid(self.formant_bandwitdh_ratio)*(0.075*torch.sigmoid(self.formant_bandwitdh_slop)*torch.relu(formants_freqs_hz-1000)+50) + formants_bandwidth_noise = bandwidth_mel(formants_freqs_hz_noise,formants_bandwidth_hz_noise,self.n_mels) + # formants_bandwidth_upper = formants_freqs_hz+formants_bandwidth_hz/2 + # formants_bandwidth_lower = torch.clamp(formants_freqs_hz-formants_bandwidth_hz/2,min=1) + # formants_bandwidth = (mel_scale(self.n_mels,formants_bandwidth_upper) - mel_scale(self.n_mels,formants_bandwidth_lower))/(self.n_mels*1.0) + formants_amplitude_noise = F.softmax(self.conv_formants_amplitude_noise(x_formants),dim=1) + components = { 'f0':f0, 'loudness':loudness, 'amplitudes':amplitudes, - 'freq_formants':formants_freqs, - 'bandwidth_formants':formants_bandwidth, - 'amplitude_formants':formants_amplitude, + 'amplitudes_h':amplitudes_h, + 'freq_formants_hamon':formants_freqs, + 'bandwidth_formants_hamon':formants_bandwidth, + 'freq_formants_hamon_hz':formants_freqs_hz, + 'bandwidth_formants_hamon_hz':formants_bandwidth_hz, + 'amplitude_formants_hamon':formants_amplitude, + 'freq_formants_noise':formants_freqs_noise, + 'bandwidth_formants_noise':formants_bandwidth_noise, + 'freq_formants_noise_hz':formants_freqs_hz_noise, + 'bandwidth_formants_noise_hz':formants_bandwidth_hz_noise, + 'amplitude_formants_noise':formants_amplitude_noise, } return components @@ -341,17 +453,17 @@ def forward(self,ecog,mask_prior,mni): amplitudes = F.softmax(self.conv_amplitudes(x_common),dim=1) x_fundementals = F.leaky_relu(self.norm_fundementals(self.conv_fundementals(x_common)),0.2) - # f0 = F.sigmoid(self.conv_f0(x_fundementals)) + # f0 = torch.sigmoid(self.conv_f0(x_fundementals)) # f0 = F.tanh(self.conv_f0(x_fundementals)) * (16/64)*(self.n_mels/64) # 72hz < f0 < 446 hz - f0 = F.sigmoid(self.conv_f0(x_fundementals)) * (15/64)*(self.n_mels/64) # 179hz < f0 < 420 hz - # f0 = F.sigmoid(self.conv_f0(x_fundementals)) * (22/64)*(self.n_mels/64) - (16/64)*(self.n_mels/64)# 72hz < f0 < 253 hz, human voice - # f0 = F.sigmoid(self.conv_f0(x_fundementals)) * (11/64)*(self.n_mels/64) - (-2/64)*(self.n_mels/64)# 160hz < f0 < 300 hz, female voice + f0 = torch.sigmoid(self.conv_f0(x_fundementals)) * (15/64)*(self.n_mels/64) # 179hz < f0 < 420 hz + # f0 = torch.sigmoid(self.conv_f0(x_fundementals)) * (22/64)*(self.n_mels/64) - (16/64)*(self.n_mels/64)# 72hz < f0 < 253 hz, human voice + # f0 = torch.sigmoid(self.conv_f0(x_fundementals)) * (11/64)*(self.n_mels/64) - (-2/64)*(self.n_mels/64)# 160hz < f0 < 300 hz, female voice x_formants = F.leaky_relu(self.norm_formants(self.conv_formants(x_common)),0.2) - formants_freqs = F.sigmoid(self.conv_formants_freqs(x_formants)) + formants_freqs = torch.sigmoid(self.conv_formants_freqs(x_formants)) formants_freqs = torch.cumsum(formants_freqs,dim=1) formants_freqs = formants_freqs # formants_freqs = formants_freqs + f0 - formants_bandwidth = F.sigmoid(self.conv_formants_bandwidth(x_formants)) + formants_bandwidth = torch.sigmoid(self.conv_formants_bandwidth(x_formants)) formants_amplitude = F.softmax(self.conv_formants_amplitude(x_formants),dim=1) components = { 'f0':f0, @@ -414,13 +526,13 @@ def __init__(self,inputs,n_mels,n_formants): def forward(self,x): loudness = F.relu(self.loudness(x)) - f0 = F.sigmoid(self.f0(x)) * (15/64)*(self.n_mels/64) # 179hz < f0 < 420 hz - # f0 = F.sigmoid(self.conv_f0(x_fundementals)) * (22/64)*(self.n_mels/64) - (16/64)*(self.n_mels/64)# 72hz < f0 < 253 hz, human voice - # f0 = F.sigmoid(self.conv_f0(x_fundementals)) * (11/64)*(self.n_mels/64) - (-2/64)*(self.n_mels/64)# 160hz < f0 < 300 hz, female voice + f0 = torch.sigmoid(self.f0(x)) * (15/64)*(self.n_mels/64) # 179hz < f0 < 420 hz + # f0 = torch.sigmoid(self.conv_f0(x_fundementals)) * (22/64)*(self.n_mels/64) - (16/64)*(self.n_mels/64)# 72hz < f0 < 253 hz, human voice + # f0 = torch.sigmoid(self.conv_f0(x_fundementals)) * (11/64)*(self.n_mels/64) - (-2/64)*(self.n_mels/64)# 160hz < f0 < 300 hz, female voice amplitudes = F.softmax(self.amplitudes(x),dim=1) - freq_formants = F.sigmoid(self.freq_formants(x)) + freq_formants = torch.sigmoid(self.freq_formants(x)) freq_formants = torch.cumsum(freq_formants,dim=1) - bandwidth_formants = F.sigmoid(self.bandwidth_formants(x)) + bandwidth_formants = torch.sigmoid(self.bandwidth_formants(x)) amplitude_formants = F.softmax(self.amplitude_formants(x),dim=1) return {'f0':f0, 'loudness':loudness, diff --git a/sample1_1.npy b/sample1_1.npy deleted file mode 100644 index bc9718d0da11774978f46470dd011776b88b14b6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 131200 zcmc$`c{G*Z`!}wEGNnOtks?$yXdtvjMWP}@p(H{jGDK3MP-#$7lnNzNGKVr;=FIav zp5x53p(rZP{=V1eygjS;dauuKJCV_Y4R&Mr>$5xTZ*}l?k1m zI*K02OlZ*8-tnTB1xCYrHLl?2`1Sw&{vZ7dI=FJyhVNXc7=E6q%437T@)C_7WlXqL zy(y^Pn*mouY)MQ}222#vqfAM35D>av9GgM|@s5hjCQmB7*bqxSe}W2PdCMdE`e>kE zw)mzPhxK>6{P9oxr(OOvT{M1_A;^HOGF!%s63IY{aLltV8iY#cANfhvgRtcM`$FMl z666U+$)}G~AX-+zU^|TtqkFD%r8Jljyum=h1zW!~q4INaPmFry((ICqiQlm!iYZ7v;k zndTpUeX}byE&G45Ay)U^L#1v81pN9qb+(EDMHidSlsZhe&mZ4T^?QS}*Kpy%sJBw% zAr|BX+0yc0P<@Jih1{+tC%kLNp3N?0IUx_6I(HU|PHt$nud?^F(~c38J_Pc{d9=6uxMyO|3{`>AK& zd+=ay^T+GVY7%UXQ`=N@f&!~7_Oe~&=`gWMb(;;=1N`;n&l8_c{&izo?)m-Z-{-%_ zr~GpMIQiEpKi@ZQ)@Q-l$lIrkYB7HrAAYJ6NrBMc!r|NU$WSGE|90AZ3anW0^5b9z z6`XzMWSc86q3P@%wFnybAAG^D_fPs$wrb~8IvdCalUwC$@cRPnEQ%anpOTk?Ps(}V zT%J~*(#e6CPKWn>XV}30uy|w71m|x)zkkhEW2_UG&)vLcX*^!vRwe00u^jkwe1Epf zFe}l?TuX;_p}Kor%Lc(x@S6I#9}$X|Ju=eBB!SXraeuurIxLu&$i6|Gp3lk~$yz3J zxbRL_d(u#Cn*aFq|2dERe)M(9DsRo^LbZgXRZu@0ZfOK$G!-zw##VgsL9CZtYtD~u ze!>9%nQHMRZFK1Q68wJtI0NqP>fEkCV#3#B8ATVa(IG*ft=FSYhnI@4`USDv)5pgr z6xVX0D#0_om&XCKC9@^>c{0J8bL}Q~9s}!1m$z?}nXu}1SZot|rNjjWo_T14Zql0T+Lp{%i2{U|k z51sGf{H>4q_3>$x%l*LDFS&3q#{YL>JO^wH*7}={bAYNUC6ZP9?|id4>bJ7NB`#bv zo{Y3=VndAKn%Sopa^QicWiR<62eh`TUp<+~h3JB&j_ut%XbQQYFWtcfF-Fs^$~inZ zSjSu`eToZaXEUA|ZsJ0eez42^dM+rxjp@x9oL=AmHZS~f{2L{-I0%HF;X%%t*;AkO zIgp!Edo|}b3zliU5HWFQfxw3gM+wico;|PG6EJ6bJ^rjOT%O=yaHAjSy5gPXPX^$I z{?4@gJ_5L}TE3ima1eq<{6o@r;C<|Ix9lk)EPumX_Y)84a5`Q)_>LY6l$3)QUaz^p z-+#vcVniN{{fF|)`Qzp539k=Qo7ed9AZFlhT6Pix&T*_{jGjU$A`7{5TScyaY)8H5>SuK7BusufS$#e(Y+KZkOjXTBBxPd-JGBj z$vz6?r?!hO6s3TKMue|Q4H+my6pM603Y2EpjC>s=LkDH8S8NXnh@^~LB)f6=Cdsk49_bR3hgC;$n}BVz3qMAlFI&T z>5dyS`@11Ve!FyWdpBOMSkc<8y|8Iph5X}P{je)>&YO%m1h}JYxBk*;0$8lg8dbyl zd8*jX@thZfkhw+GCCHn5gd+8wki8$zE$bvuj4ZhxTYnJ-5E1C-_mnGj;R8!%gMTCq0 zWfBx^-Ys%PmJF_E9zK@4N{2OTz3d-pP1_$y-Z$0@}xikp#wy%h7ri0Q~g{i;=EU4~_ojN?4-TZ*t;9uoV=#&#pWKR+)AT&$bZ4LM4_Dw7MlVMfl{ z#q8BRkag&2@8O_6cr|;S*Ih9p`1j0epOHd>S8F#jHCt$4Ka`^#Q1nl|!e8G%DPR8i zlz*S7ZK8W%f&sBw5y$1uQ@}Q=;#ig2AWVqO-nC^85!QR<{#ZUr0PA@4x%!=07?d+JSuf@LN}>7SKr* zGQPE}1*#c#Hj^vb!SG!)Pn+UzVxwf#kZW1En(kp#MVF)6jSj;%?clm8Xz_ zwURpPrZ5@W!q0!Xb$|?S@5qD?Jg35{rMIPa5}B}JW7OMJZw?&)>^+g6%!Um&W*5~w zVZxfnuiu`zi|q>6ukVw0Fd#hVmh`y|bT~M=p<1Jz3~Gr5nS}EsIOkIJ+!^~pIb|~y zI2_fRiSV3#}%!2!ak5EY#A<1=pUbp3{`*hcS&$N#0{U;3TlR zz-*!$B9`HW~I!eypp&dVYoB zmvAQz6D~F!KKp3nKj)pl@AJohMS9D+#9v&P$`HPjVM>SV=W1QOL&)&+o8IR^`$0f@ z-e>mTBf{V@a?h7qBJ^*1ym7Q*5E@pc+@39$9`(A+VkIQ zFESx_CHF~}786bfW|+LIWq|bj><7;iX`pO-V@-WG6(rkZ(;mI0LX)0a>NhPqJf>ap zv6r9rXRK^2P6uMW>UEWK!X^jHcj3bP%6k;poZy@MF_jEnR@b6jYpGy8u}yGh6bpL4 zCv*)Yn0M!>dLI>j+O;eC<9%~(&cI_1jSLS` zLzM1|QK3a9w{z8PDjW{3kP)6qgU>RX4@tczLwt#POkFGq?yN77oOgl@jH?UH$-NYq zT=c}CG@J2X*W=H1vvLfe=V|faj_3WQ-(K=SbG-9SrX>d?8^+W>VLtpG(AV24PJz4k zYRJpFNRTP|F8Rbk3OEhVK_?gtxO%_o)y_#ae1GfRmi3JRKB8y49){6C=IcC%*N)iE ziHr#B!~ArPq*%6S8517*NrZhKWI{wxu!8As7Hl|oSE@~n1@FYFterpM_!fDol86`P z!&&*SjU1U!{;D^j-I58hv)o1m9ckbhH+wXQH3+>GZ}ODw2@v6ZMVihdK*r0j7fc_J zAoPV)*~|tC*mtnEW!lp4x+g9BF3$kLyVS4iY?**QM(?e^%LXd~a<$oyY4+Beyh|*I z@ZWAbbcqc|h`L2PaC~Zp^k}%-RRTP|nEj^8vLE7tO;3CJ^}~V+!+2690T!;UjqeO1 zf#7!~#dF&s22)UZAJYpW+wy%Rd(r5wG4 z+TaDdi%h=I0>&;0c0;YrAXsmgs+`#jv*Z>pG&5;~U*VssssnrAeEG)j$6QF*j&GO< z4`aeR+azftAv)~5YBKX@Kc45+qJ7lnX?bMl%07CroDG{VD^w53GGO+O1>4x|bdY)Z zx#SIw@A=#)(P%qHhnV{%57va!;LfcFM;C9WLGKdTuG%?t5Q}_uX}lfJ|6hh0?{9zq zPkqJbqr;Y~Lc=y-`>Y7^%Vg7e3>q55wr zuzs1~bH{f~uydzK39;A^vb$C~wv`Qj8U{7(8Z2YXW1HG&*mWRJEAzZ=Xy$PW);~^NLzTrx7+@uHyEf8`1eKbShjpNgTXB^(_m|%lz=c8ZsX4CP7f3yF) z4Sk}iIQ~{||MS*(3k&R?$NKnVKg?~_Em^H2Oh~9{OZ_%Q2ce1pwy_Den+5`$kMciEKR?pxv!&w(9ZueFrfd~sL!B1ctKk6$ zNY`PPY?r~#Hd=APhvf@uif*R9~rKs1$BDGV)+e-NYiudhZ~ty%>J}) z*g2a%=`^Dg^u7!~bfdRJbMfj7-{O6=vW`)?0v(TjfShMJ*66Et*F62r07i2@-bT=or3JvEFUl+PmA)D>Ti)+u6Bgw

J9{Io2>DYwUg~51h|DXaV~X9Z-ADS2hgW6!`_WXAZ%KL&AD4c)xLENO`%;mjY&e8lsx}DUe6p zX0!5r2t+1YlNsJqp@lKGcQTL)7kxK%jhRs()P8U-6Zh9W4O!$BJO|;toL@6L=BwCV zcp)*SH4YtT*2gcx6r5QfqH7s74yy^_9N}+9z>7n9)5QG|V4X+<--c00%A436Hb{ZG zmtV)v+#~`=MR&@sp=Jm_B>LIIzaHvES0$dSY6WI13liHvH{={);ShEpfo}NQ&&R_D zp?`b#XJ1h=6mz?IINcb6qm^P#Zs&)=ypQvxvIYel`ihxuW4vAU8cuh&H^X3%vm?#^ zBNdo3$hm=bR3NJs9G1H_3d(!s*)N#?g}0+W|B>hQ1_zjDK!d5nT5Z<^$TT`6?80@y z=TF3cpRAmOyDqH14a}w?hPu#oXK)fY{LE^dsH5;S_H3*3A&dtQv)tz#i*>%!^dFo% zj{Se!dq1U>;QnAeS+W_g41AP9N##=_CC-ef+2M^y~jL{?qIJJiiHo zwgex<{uCh&>Bko^Phx0C?B}Igs4%*$ywNEPysj8{nWdE>Lc?_Jo(hcj4e#s{Ve3b_ ztb^wtu~CpyoV}UQ?O~MJ$NXSJ4#vwl964DwO+~uxvcCJ}u}Lr^DTEwu~4@e z=hg3f-uhkbLC+aCap~vbdb<7Aa%;B|#0u-IRhB%#ZqzBZkGULl>et!V+^@rbPt8$L zqgJRFLn#yQ+rd13$janXJJ`&>dF73DmQ3wN&OYa;0gZ+U(VMlUKor?PiTwEy{*weM(eHP6itl4tLSpI7;Ag`c2cKZ~hL zhs3dOS&?C7d;r#M)1G~)h4*c4AtTmX4-P`?mOIyemtcROjz{_Dmj>Xn)YHv{3KU?f zPn*BWN`*q1dCQawxL?LTlI_<>!nk#pu{EsNKgs+{#m8R+aIoK5`e>m8tc+euU24Pn z3xvW0`J@qu*?Z>WGp8xwb4@zFI)@6e8#nIl+k^etn6qv_TNr>id1tJJgV<6uR1dVwG0J6-c^jjLcOT1 zR@D&bJkT$y!hHhCkm?(PG&0C4S{=yAC4nKyVvySz`@Chp8c!|ngNlq3I#<*w!24|D zLYmVU)H!TD`g7^8x<|ml2^VR+ALlfPe*IDTpV!@hULi)(!D$FQT6i09>noGP5_Jd=M3!!lfY&Y$U9z)^B!>rE2F7#*sQv?oO*NuDy+2FeV8VozAhH} zC8wZLade6_|Mz*|&p17_QRAr;X$Je2P1xS^|9e02&p5p>aeGz&;4HWf-G3tL{mE;I;De^aWxlo`1th z8U3c>3@WulmZ`x@6TJ*Hs2|HmGQr(aZ&k%nvjy=OIjNNr7|+M^^~ z4=G;%o$m9bk*PwU?R?tv_I&~xZEH~+Nx{0-r-)Zw z^ax1BD=#^Cx&xKW%U#JxYDav*T?)HDG$KKnRpB881;|Ch>c-~fc+ktX^bBUM17`90 z>n}F7LrGL*?(ftJAO?llu#=xayyu9XjcFzlJbU!v<`&Gu6*Mwqi;YDLw%Xy`q+Cc8 zbtC7Sv_ZtZ57uQB{a~=rltp=p5@y=y29G?GMwILkA@c$;DBS)B^V zY-8B;lo{(U9;ni-ct8NcX?w!^9E`8@Ut$*W0SK4w_;>@AfFQGXmf;{?FB~aeREsX~ z*~)7Dl#>WCCND}fPSpeDcj?`44*?7{)DDO4Dg)LTg_R-WCa~o^&hmY%3(S&#iGSJL z3*m={D*eCoz-vN6)04q|+}H3p2=xMRWqiZ6+`oNU4T6rCwA03!3l4W;{g1!Eh;ousaM>-s2$|c3?_73|<$*@kQ*i3*FSC({l<|KS?>H2OIg7bmRg8i!*{)*2(9|L_vaP^_r{wC@Ax&tM9cVWX#{r zmgTkVL)niKb>|J*QSD(h`S%}6ksX;?$$`HNoYu)lY97P9td;H@cC6cC`nKs^(2r)& zjZw(5HO9UUim&df`-XNtLu4 z1^7cgQ{*xxus(Dbp|lIC0?A|!s& zvh?6+Ik?%^N#EK|gf-PogB$M+0CV(%J%;)?zrP!D?}T(LcqRCT?XPJ8zFd*M$DytqFmLwScg7+MF$9h$Y+)=zL{9$5FZT5()kKKZ4jOnEg!_%MEz$!wFz?RX+pE#O1LTa)oRnj$1&smI;!%ua=J01snGdN1 zW_x>+wHxvCA@owWL!%x1uOB=T_=W%nN7j$9N;>*(X3_tDR%pXO}x zogW149etOFScbr9IdpKwVhC9DZY)StV4nA||9*iTX)w1}CC7id6q&Y$4==B2M|vIE z<`Qpu5Mh`0Q`^8nq;^7Z>81G^o_ddOd;J8zQsEp%> zf$-phK^@}0`qPNbAraY89{=vwF9CzYZW}HHw}NSCA%QkO00ADi(qp&)FZz>DNd(r_E3G`cCIj~~$RfM} z5ksh=%W2gMJ2KY!TFl!a){QD!KE4T}RwE5Zwly)oGqKLZl$@tF=Jz&Gzno0QpSSC> zpm+TU2=9Hf>+VJh)anPesZ>-#fsFmf8y?ljYucZ>L!cii6`ek|FJc(k-rDPDa> zoKm{wZaa#2_bO`r+CxQiGIq`vu4A1qVVRGr+}Ky-@)}}lfHCT2b^q`~y93NecXjH0 z?}4-@F&5h-DuAIl;BK*a4Dz?WQ?tJA6N(S!>N4v6gj8hXHt|1!sLafsRG6$e;achiX(uE*9HC4E}uY8%5jw}AsGby%+AEcW`lUVn#^(cI*46t z3Hr~vJkjvLDB>6io>EwLYjqL8t|uvEPiz&$7igsz-K>CqhrGB$*gr?m@^0}w*2|3l z_-L71BOj@lM2+NGx1f^9vMz-z0+RbY!IQ?&jGCCczr^0pL3&qnHB~bKYa{flv+9Uu)D%5Fvo%m##Fe>)6Msc}|I@i}BrG^Q^WUt%TLg`ws+Qyz!cQ&NnR0 zh(IJeHMgDX1a4aD`rU>^@GtXXH0v9LAO*g_6%HJ)!NU#vM5$o6g`3Al7ULb;E?vp; z7=hTmaSwheVI1{*?45Bv9KX&D--w^bKtmwacs30CM@&~fs1_T97b}~^e(vjo&GO7Q z1M&xEYaQU`E-;no@;7IDq~Z=5ha9{=q~U+@3z{r?pI!hx)PDocIv{L-t+aLoI8 zVKvv-?n?y2jmo3@1aba$`$J;^*5!%`J^VuOb}1sR)s0g6f!9y7HvO~5yHR#!?y1+i z2avH;yCum7*UjJFQk+gEp?;qeXLKF0ufwY(?@u$`NY$&$_YD#2g`8=qSo5|NnM+Fh zUoHO#-ApwDth;(4;+?GSqTv|OzYqQI)z2F0{?tDPDr}QHyXA*LV!vddvoi&Dy>H#b za*+a=41*^rlpzqW{ccdKI0_~lCknT<;JRV!_b79#NpPPX3R63S`^FdRFHG&hI`yYM z^BI*^VP1SAPt|!MO7p9gnJOhAs>t3N{yD5u_-ggyL#81#zi)P)QGtwv91fDArn`}g zZuxJTaU;@WVMv<%`2zV%OH5HOVgIXwctK&4UQpL&+H|#P5FWF-xecr$fhmn+!%o%; zm}`hpJg8WMVjQl87cTdqIldUz)rP}JP9k7Txp4&LsqOn!`)LHJpPVp1jP-}?Hiy0C zY3fIYj4D0ss0lIb-J!JTlL@ERQ8z?WaNK{`GT)|w>w&fJIeCZcA^eQG>1fw$;HKzZ z+jcDzd8y2o#F%FyJI?m~bMY^MmA&sP=SnpQ>iR|evG&G){X#r%rV>V{uCav^v+SSaUWEo@PtQ63l(f&c5n*Ims6K-}eXY($s< zI?B2HGsmmJaJ+kU_O>cey1o5M_2y;}|H#In+K~_93k3l*ts10u?*((HOcyf0cI#!= zryk6&{HEXP-G#j7SoHR;sYeS=g_`TMJ+U9#mR)U30Hb+!N4}MpLBHF0$J`a{Cmp7> zSHQgyR6h0v3@ib}^Pl@(Us(biOvCPd0d-hsoH1{057y<%t;irnH-Jw9H=p3)7TBH{ za<{dK2zOp=^SGgf`=7`(@z>FmnZCy8U= zF1(uX<=H3@c4j-WCmD<@9odZ~>_W{Fkf_V$- z|8%>5`X$zL5uMpHUyJoTrz;{oZ({$9a?K~ZD+Up>*H1^6HoV^5y|=0&sT=EpA5(6) zNI*?nS|hkPs_?$v=2Ff&O{f@=(;Y~t0^^~XQb$INJK89icz(F2tl*3FvbfhTzP@3fg$N>ZEo^u_BAB_{Rfz6K>VCQ}Hg%Jc znQ+y#tR&_s?O4B$u@`?ILC>I3ZX&X&u@jv6RE_)tr6#|mHl8un2T^>6e`Z9)sSvCe_cX+U~tM(V;))2Sc#ZeM&4O@WC0Oicrx zr<`fglLPRZxRbDvp9(A6p0sta^n;Mwh!@XBtWR$JTv&P;`y6jfzI0uv3~ImDe?L{z z1P0$;zOt$4g~%;Tm%h|fAa;1o+Sm%>0=rUe z>~n#b>CVI-TZbGdj61h45s`+9NLmj=KUxSC`FU^$)+IN-9;qMJf{5u0XVTfDQQG|_ z_Lh|b@QQ3m9WyHeo1|}-o@mwJd1xh|k5CROl188GC~t8d_w1S>Ed(@*S1qmmS_qV8 zmDfYeO%Ti+E4=DSBi095BT;^TanUjv+Xg1E z4-nHiU(G3q%{4l2a9|Q{u0KuQcWe~oGRih^It)X=ryr%{@*%kSrigb*jRGB8_b7bz zA_3dzjv_4?GAv9gcnH-|V0i738_FZY*ff6WIb$j2p|5rmPFPO^%JG16jTwKBYm0p+ zf_`xPweHj7kUsv~xc#U5|J!-`b^7@H^jVFd&{4?v`B5>JhYB(|*H~*$;=XW+({>H) zOKi9OxOSnKlVPq(p;ZPg&Lfm(_Rh7Kha`R70|Ht*!-?Sc$@ zo6h4g6i6H--@MoP{GCBKO8i$58uHo6E9}mNs*Kvu03VyFW!;O6mvz>f4en{`e{=&bD z(2qt$dwx04Dlh`ssW;aQmNvqs<>>sQRb<$oCDUkd73(M{>^q$5j_X9jYSvxJ*dIg7 zB8@o@uglpzt<%1EzVz1dI--wtk#2^v2b)vT&IXRbxNlewOy${Oe(qt!;Q8`E_0Ivs zF0$WQ+@TE_>&9JJW&8loN4ZZa?0CKAz5Z#XWDx3G9d)%XVLX${$e|monGk;BOfOHx zTU3xJu)4br*HNEC*PE|CV9G9(8QzNXkmIuagQY~MO%)$ZnB%INWhz`B@e?_$NLwmTEvt?__Mh<#!_+(dqjKab82X z{K_nJBldwWUDA{KzDF*Up<@m_SwOpPD!Q^7`#1}mMXud~^Zn1jT9cF|`6shQ@^X9MGR$;h*Hy208N+_4mAb zp#R*%&CjGTo}GsJI~c}+ckB4L_BYJSIOe-Xn`;cm^R8-h%W-%m8P@&@>&8`S-??L^ zHv>5}=L{FVjf2tK-M<=FhOi#}hPZ9m|9VQIhg1AH#y53J8(aAdfz^p~!#A)`<@=bMoD zqIirz81{21o2N-EV?My*cT4!#MSq6$0lmetdSG`7^UD8G@X~)+Ox3 z_%5?*;W@j%_eMJ1m@!wo;JH?hx*q`zeUB zj^APd`_uL7j##XT>q6W@0c+kps6rZ{JEI4FWkRFarC%R0kFECnOG%daQ7DRcO!^f@ z18+Hlc1tvZ`2Y&Z-o98jsj~HGi1Aw#6O=5Pi1j~}7%%ZT{v@Fe=K|s-T<=q{>+ebp zA4V;Chn3P3F-{^!%za-c&J%2W1@6~0Ai;gCMJf}iV7cMri~ZxczODmjk|RjKn@E$h zd))yg2XE9(vz0>uf!rH-FAfS4&nS4Xhl9WO4moy>k5KCsq@AzU3iW8Hv-TI*1(&zimO=ek3eao@F;tSB6g_xn4GdEJgRfN`F1vgCyl@GrdDJbb?j znI@90-}7{#1=&?x;;0vy4s-0Zxrg~5<@{keu8m0CRkJOr&lfG!eCd8ZoCC%=SKYS_ z3^?ukvI-`KAh3PIL{}r``@RPZPa8 zhj5=DmPOw3Ko`_c?6s8$!a7_rPrjeIL7IpuS>u7B`D!e}nWG3ANMgmW)V!8KJx zR>XKQO{2)V%UdTwCHY)u#p5wByTn~MUNZ`cm!lrW8IJ+WR?AH{9 zjVWzYPJhn}=<)bZ{WJ?wM>B_HPlCG;3)>f)5T}emnXii>B;?iPE-~~I>jPMsC|!MChr~CA zuG+^N0%lHIEu*;F!9e9_zIwx7IG_oPsynPS1&-(Li}@<|fC?8e)u|*JOuIN-=I*v4 zr5#eh(T?#GlbzFi{@5Qeaz=X-_D8fF$~ks&E9M(soiCAgAtE*JE}_;V7^gJ8-270X z6g*fc@1Fz@K%QsRV*khpxIKwy@qOM4qssZWYk$_kLM6w=>!aA0RlU4TKfh;;bLN>ZMyxy@nd}?8`jmWz44;G_(mCI2b_*wlEeE$ zvx@=6PBJ6|bK1P5Z@Rbt(4xkt^V(*?kK=`ORh zFxH6{)NSghDg}dd8Gi1EZD7lNgqP;g2@|Vt6)F&j&?Mn^D`IRAG7c(MKf}1F>zmX3 z+Sg+}E*rADsn-bTgiUlSHcY?^$JitL4@^PVqs%P|l9P~CWv-iy`};d&b|5p!aoBQI z>(fEMQJ6E*FgWUobwcLvPVc@v0@r!f57!7!z`@zXEnI3eJnx!T6go`9ti7zpVBrjC zlFwJa(wYUt)GjT8bw1NCnl+s2n*mxB8B&)2ULW|Uxc|R>J{vW(kLh7PXSj2}AFgv~ zZjG+G@Np3Q_`+0qMu;#<*;{Eliglj8^!jLBi9?1uo?UB88gPBdy7!EDC!%?DN7_?+ zk=Hu!6Z*fgzI#UvYc}@9)$5$ys)YuS12yDL#3TIwUSd&b9^H;gQVegJs$*T76 z(a|WlDqQZ@WGUDXzD_IAT7c2zu~@nR&fC5jlf>Q1!-uB&mJ=bU$R#k|o-7EvZS%=6wE;NW*0_Uj zf3*(P07El5eW0xvnLpl7^zXyIg+9k7KVm;p8|No`_3CllpA?g9Iy{8D9<0%NQ;Pcn z2Fe;Q{V_gx(}lq+g00x+!236WkcgzDh!=Qj@xEI4M161o32cR>X%zgNkn*+*>EnF} z~GF>{wymJZbuPmi~IhWC02ONDZs*`uIAJN6N7Fxwu~f0^O^I_ytUdx_(0zfB`*64-o%Tl5Xq{ZCmlTXY?~res2R8I6&@%Es3! z3Aqs8mj8RFqs0kAuh77q>$K#{m^f zsT{%iW$5P4ngopVG5@s~wo*F|W)T}T58-|NYxB=1jIZL~?apV(vE@0hUgc_={=M>W0fOheEILB*`mX|P}qHC*LA4XUE@5;B;#O^;`K{hogO z-`49td0c-U(_c>?>F-ZJ{-@FW_0JGlJ?u~J7P`lQ)(d8O6M+%z129^nGgvW$@tcP| zH27a*pEU9HcT$fX_=k?I`h*n%1oqoHPOrjzV2=~MpQTASf8W`$zqb|CKKb$;iY*0| z`K~?Wzyg?i<1yYcUIBMBCu@cGw}a4axcIPv43U=6gLg`Yp|njyq^6}0^jtTbv;Ttm z9_N^u=2kZVu{Gngh+z{L-`yd+=z{e>Vwbd|v2R5>@l_&L?uXj38wm$6|5sAkF;*Pw zpH=Mt`r_*@?5n%wyvmJi?7OM0)EQ<_1@1Oh)Ez9PX!P~x<1Iz)$emTP#z`IbTZ7+} z7ANp_3{t)S?1Q(MGTPpVBwVyteE{E~Ut;-5!+>vA$*dk9$v!H^q zi|QcSIsq3fRZm{SIGxCnWJ!MF6vkuDzs$F#!8U&NzN8Tv1SsX($a7;piF%^F(2r55 z9Two?EX8?NIG@YdQ_Q1c`>9@l`2{a#?N+bDxM|g!9{FV$fB9_g>j*Ju0_1+~bacp= zf*iA+dkh53FM3R__BNgYS#BL?f&tD4sug5jU_PN(`wY8(=PVQq%AfM?!TMlZeGl<{ zo&;Jj(?xG_8rHF}BzIw6Aw7QS<3GhOJwE8y>Bs+eEHq?b^xoG4X3@-ZNBz1XUjO;! zwn75T@um$HOO%6P0-;jN%?1&g$L>DD{*v)W;yr!q2#A%@{)*l(-q&+rqwn4U#OfyA zJ*6=;8>pl=D~z+HT!ZC?^`9mMGno3 zLYIGQ@~)XMa_G+h!%aKYX+u6gWUC4db;Bn(y5@Hs*bEg}x`_B~vOElkNAF#X3)PybA$7Ft! zR>B^vqpzlKL`%hbx|w4SE>qtjLdHW*d7%bKY^rYNG$Dg~|B~@l(iD_gYnPbxOu{-9 z1D+X-UrT%Ia6d*f2h^I(>aG#$kPUO8kK&0Q7qa-;%^1vNk}B z*2NO?nHC_3AAB|{RR%Pbvfw>~g}^Ek-@&Ep`%(=6$bL_g7w`Jw6Hhl_T*3-%I-~y z;EKn-5)=2AYeWap;(6ZX!BjFb_;mNp9lUPN#r7P08$w2Qp(%E2_?ClQ$n zlsQm;HzAi}?+h1buuuG#aPfe^W{9L|aB3dGywbewrCGgy*ZW^XbxHTLo`5e|<&GBw zM!`(#)~y0tj2l;TaIM37O7!o)ZD9-T`DYqDKP0^Qlh!1R?q?5C!g;Gr2&3`i2NhTc zW<$Nr?Q*2{=KQ+`!7fzd-gRX2Z|q0?MfkEyH3e}kJ>a{~iTTRkwY0--kx`6EnY^xB z5AymNnjB!)i2Hx+LWNc?z}5FIyx653;Ja!24JRr*YZiWQ!Z`!QJI=4AVLgb(4FOZT zoyb^^V@u&_u`)5 zX;e|7ga#tfNR)U=X*5Jc3X!P{86x7!SmyZ{kK^DR(?J?gruX`N*L!}}@~qbL{N8uH zf4pm*zb<>9dk^;B*XO?P>%OkfC*`^E>67Z+AiyIQkg3-JjuW1#@jn}2&fs9pq9@fb zcEnpmAE!sh+h;^^hG^mAf+X3k2WaC4br~g?QkRY?NA8L2&u(xxRwEmN1r{ z&lc-giTn^*U$@>!Cm579@uFUzAUjW=W9hvwb_|f-9W43OFrsEhf$6^^E=4@n;`JZyk%Z5tmjx?BKEl#b$+uK zE_UgMVXqrk+^T23N9K3=Pk#76ZO3f?e`~KVj9e*?^W!iLQ|?9b$cJtcyUO_k*4Gqi zM!rN|ODm}X-uS{epsw7$PjlIOT-W=Y7pGoM@O^gEZajhYb2od#&s4S$BWg7#>{hoE zn*t2iUa&;|oP6t&$){9e^2f)p#&_+6qn+f&Wk<=xp%k6esBI0z$h!q;t8B`M{03Qu zGxE%PKbi1ZWtR$czY-7Eq5@D+s1V|QhV?ODdwf)*kQaXW%0ruT3Lsm1k;MI*1(zBsK(lX-znf1L=r6mR9cEbpbmLC96}%PIEX8OW~_aduoi2z982 zC95tSY5-aiogS%!Jgzr)?Js$a>t0`TBxM%S!BYC?tIW5^o4B=YueCk$C<&A9yGyT?GC-p)6Xy9|BHOjbUOwvp+C^n?8C_?;UdVV;_o)SNUJ8MyXpv-f*~d#B$e zrQP`iL-_Tb^J>{dLxhvs*70pnuCdsrL<^_qE>0@^+jGH@+Tll1*dZFx5yg<;JKDZb)8XS?| z3#ws*ykR-L@VH5LIt}y3&WK&znA(7Gq!RU`f(qE~>i!Vdm%We~u-xu_M=w-9G4RdP zV!$_7VYAa$XXf{R^2_W64hKwMVE%95?|{N3gCOwRSGr$n=D2^06Ms7Y|8zgyn*XJ2 z9v!yJn-Fgl>CjI=Q#<3n7BAX_aay3!#*`JNvwG8x1v0w=T=z6E_fVg7`5F(4Ru)Z^#PSZEL}I5*UZ z>%|;FgnupSy;kx}+hhKy$d8n3srASMsCk*Bz7*?GoIH#lCB_mDc%w`6JUI0*@EPYllT)Y3@q|iA_53XE_uzj- zVo=~zDcsS$F>a3fbX@e!J0HrFfzGk?+rpSx^?XFdl6Y>m+^#_LE%pwGB7s#6|@*|Gdemr*b7r@e>9S0L+ zQSXN1_Gyw9@_u%w`DD1Z0Qoe>x%+BuVD@9=Y3oi344+#xxn8mn%oowlc}Ug5CXWNP z-=}L}>9ET9VhPOWn;g=syhVXAqbKZ7?6H1#ZTqg272UwOah+7SX(wFZKK1Yz=C_a; zFQ3=YI$(I2AMlRQVE5)_Ng-HQoz<%r-xu8jJ!|W?w~F*Yr@@agMlu~_EH#w!y^)_! zeZP+GDddqkS98DaI_k#1lndmD2N;ikZ*kbE7^IYBA8}(|w#}+A#W##q{ zu;+_yys6p%etF7e%a6A~+}eDeG~{g^`^Knkt!{^|DGq^^BW=(k@blVjE94>1OOL1) zq=Vgq+<>ZEy->Vd<>E_U9N#-HdcW3S0EXF@2D16i_}9PnkJ)~>*#=s;L>!--(X?n8U?3jaC;qlzK-eutXd&{y{OLN(^D^!1cFg&}?EU|*wncts z^4LoCgXvQRw}%HXZ&vBpZ4ws^PMNDZm+(+wd%T=wcM2JfMBJ4W!FZtfn{SV~tTDe! z&$nLLDu*y0jklUEs3EdHJW>}>Z6-p++lbu$HsbN!PaEkt{x>idKE)P@Iv@pezRSvWzSU*cLF8Sy-hw5cSet|{SqAw?k*d%j|5b}0^eUv zH|Dm3s&evfYvjq}3Ru2$nI7`Luhy(rZfyqNT+!X$STEpaJp8=n1O)^=c}zR7Kj0!Y z$ou}reB7paO~q$A;L;Ll(ph6VY~@uH5vNh%vZ(ykm#3SM2Y0wM6ZtRPB0TQIV0{rI zBK^a!{gse$_kfD~jaR^?m*7IHP9h?+ir%E|PQg06li3N9o|#o<9}ZCOudJ@h2_&bcX9mP`Pk;tdNRnJe%umx z4)bR7Drx)`8KC7zzF6iE4&uXwJ2rib0`FhP$BldeD2#Op$~IMCx~=MJ)p56u zt98REFO%J0yL#aINTIY;Jq@BRe|>(dqYIuk>|VaCjtcy{9iEO0V?96LUuta0tzevL zy={pl1#})|2?vpPXI0YL31gf;Kbe%^`5oQ~Spj>MR&n=$#f>q?Yoi1I zJuha?Q<+xWou(Y%*bfx%1HYs&zs`jtmLkGWhj`6N-w+(9NPVi}t-FtPJ)gF!MO)xJ z-m+^?iFg@27@wQEqP`Rgk-{nW0oEN4sLt82wh^e5Z23&i7SI!|ta9Z;J|dalGOJ8+ z{jO_cw8FDaXj^uiZyeY0c)|+2$XZ>{K*@e4W7q>^hv|Fi-xyFH{e^Z-VCH!<@6*mV zwD9Hd5S&W8tZt+@2#qm?A!ZH#-cOnDk?DuCw|AN4-m!8Wfb`sa+D+v7rDQ+ZWSKMo z8-I)zJwg8P`0II^tz`_b&<~lv;n@H%{hw)>=Vfj)EptE9{%Qa0-~Xu{v;8yJ#BCdj zakAaY0VbdEdl2d98bW@x5z}@23dqN$@0eyk$ejX*C=sVzC6Wm5tmFiWN-+^kRypya z4*38>v(t`TX(WPW*HBG(TM0q!Bl-Ix$;3rTy@ps3GQpN!Qg{DUGr=`AbXw~m>eqF- zyjiMPMqCfM9_;1th6v8J=?y5z14a5`?ljE*;ac1$V(VECTz{>up!jZg{Iax&CcOHx%sT|Gi}g@>>^ZpS3;G z0YBad_I|!ah4yvA{Bnuypzw8Y)M;KL?3)@Lq2H_kZZ`XvEXiEB*u`C9f$Pe59-62w z6Lf=yrKYL)(JrFZq#qMFEt!melO>sY89SMq)e#^AyB7 zw;$U#lb>`ZXvHh(fI+x8X5*`HwHIt@aldMvkZ0dH&y_uX4_gc`_@CL4j zeqR;xW#Llf*&c|Kv1x9D)Y_F}-pJz~Jui!1SdMj3(biT)-!XpGvukxn$IyTKG4uUP z&9zZ)|AFgM&d0-4g!{l;Mlwe8d^hyet$A5LAFbQ1EyjwyuteX&eo@yG5tZOd!x`jHDEBJ!Q1~j-HI&xnD^Ds6fE>o2)ZUOGces5(T zlHhR&P1r<-1VaxK|B^Lmgp@K>y;jLq@b77ea^lAN^Iftr>d4zYS0zWdc~vLahV`>` zZA0GiGZz<_Tx)^IkkxxuZfXJh2b@15t0@rE(lPh}$NiJfoE7u-(V;GR&|wVg#kg|G z(+U3lFkl*4s*^SVvbqfQW1;;(|M6}89_v2n`YZpCf?Yp+5fz9Kke?YJF#Woybd#`$D`2CrFmM={WBa#PUq6)G;CI9_=%=0qaGoSx# z%lw^bnZGl)XP<|;pV{tzYNO;vm)JkUdWb{L3f$Pg>Q(Lty!Q$D+USc4qoUJ+K@H;- z(|k*eBtDdWl3heJ6dtIES#H}zsWPZSZXp{R0I<2)RaS$VEKIvaUyscn{ps2_dpg7`Y^e`Y;mpL09f~<9$sAjXy;GJb!BmP6F@I#ng?jeJMKJvutJz3}9KUA`8T23Ks|>^7~TLbmSKOGBu86=|8v z?u+@4C5ko<&da-Dul|b9H@0Jb;_symE8Y!&>T+}CWmvCnuOlpM&xQ4Ab2@xl@@CeZ znBNg;o&o>QGyRb5opSQq5Djwm7Eh6sa6C3|KIaDZ76AXPP5if7Aav8t<89^bU^*_k zq2_!iBp&3@Z9tyx^-DTmiLYyf(8mSc9|e&gxHv(gSQ2@|=LGN0q$96q)uHfN6)Rd#~d z#j|IHF+L|4U+f`a)C*==tEN6Iz`Flv!>B{qeGrf&pFw($b))SZ_a*E89+zYK6LXv( z&BboFZfFpeXsggDK|{dfQtElJVF(fjBs62z4T07IcUyzJL3nKCP|;g8^SqdT$h71C z`C|reKaEzBb*F~lsFD3aSJ#>2nAc|>$J}OG=6hh?A9MfgZRYWRvdr@^EwkM^pR^Mf zG&&%X<8kmecI1(_C~R6!{sQ^GIDbftz5o>+r)PU}G6^Y}eWh`RrG!4`xA`%*YYEGj zl}@P*BqDO*P1}R78j03PpGJMtMxy3S;wsI2?B`dzL@Zj5{Cd-JH+Br<6M_|o4Xbrd z5pw(-L%f>Wf_pcdw$7M?5B_!SG0n{%b70}i>pN#LKr zdXwc5{x_PtK`gvt&Cd%R;9eW^yZme$+|}8{=EaV6(8@a{-Bl?d!J+?jDyR+Y9*srP z=aONC93w80pg>sRsI@NU@o0J8+j4KB3x=bcrrLz*$nSgc(6n?9@at|%s}-8zPh-A! zil>PGZLCKxZ?D^t)sE{`tGv=aXwLLo`62jgqDV5ant}Nkcw>;-#PD?>o}bxn{W--%FVj}=R>WD?JcUP~o!T4G9$=CNrFke=6dc$kfo#qzG@-6Io4eqy1o7Y?}0qGT$ zOE#m<*s1N8syy~|!Q+M8SrZq#A#Be#Psa+ZpWr_cSh48u-|Z&n90y+Hby?TeXfTi3 z4FPI}Ua{45u=$p=C<*7EV`BPwcg?yX?S7P2`Mxe-go>Bv9>)0eSl*>4vN&Gc`EWCX zryYLmS8?}m#e6p6sJf>a*0(>7)ehyTfsEy^7a#dt4i|3;4c=p~g0b`B_Ln82sOLkLBWhs9LB4Or@qr&dkSpEBnFg4Np;5>u$okzX&SkCzdR4lO}@^r9?wo zwFQiaea5sff9Z5f@9kb|3M^|f-7}s`1?wTR=NC*mfGXvjtR&e9{(A%ygP(Q55zz^m zTI8$Q{Z#Z#?~5*I@wHjq>WcTHo%GQJ^IcPEI|6m$`{6IH=6x0D-#55^r54YdnFqU7 z@|4?I4uR>ZAcnFr>fTKVU3LgWo`ulWL8rbUkKm0y&YgQZA+S!2H{JyEEw6-DN>_G+ z!PjT4fzv&3Oq8M$Hc5l=g%R)W`BPv%X@SZ-NV z$UC;u`Y-LaHVBFSny!g?9+oBjHL3+&@Xm%qR8$*z3WGLD^6lsZAq`r-r#13|vR7EV zMZf&+n((fPW9Gh@-}CJLk@@*sR$ z8vdZt8S8sYBf937^}!fh<4XTW{cx;YI^fMI)D4TW?F;+gnjcM z?1)>r%^r1YnAc<87jv6wnb)0dneG3{GLQSGcF$Xa%bKvi;M;TLi6s9nOS1(eW9}IbRU_OnX~M61l{KP7lX@MPD$#XIoJ?xeVt43n|mN zWrX70J(tFfz7WPC$+9aSetl=_1G#a8Qur?u0>G4r*y-R;fm8rGBmK`+tKmXy%P+Nirvk;Xd$@d{y#+Xd>UQi9A|U`5it;r9!gVD}F7d*|I-z^?ZFs(kF9 z!hH^gF3F<7z2T6S3wU2$`!=8bFWrQRI-LVwP%mfPYQf6Gs0XoB!G+&=e;@2#_1btO zyB7*iemyLS`6HPDWMO-mZg5!q`)*DV@<(}!g(wMj!+Q~hwjmno6qqe$6X2i&a`nA< z`yA_2>&{mT$oIj%<$vWk2hQ+V&VHYtN^*GwcMU>5St)sg7V1)%3QwnDeExOod4X%E z|9sr+^UU53F3R%F?qR@tftUXGkk_KNSuMY83+iaKQa0b$zgGdp*Yo z^)Dn}e&zTu5By+c4!pzR{K}(|9l>i&WGANc!!|#_oecmQv;BIJi=q{Gxx>3-kSTinj#v zJHE~MbrpFXAKg3IPvU3?l?%NK#Ws_`nEP?U=L5K2#Vr;G-!mYx>Q=qQ*kwY+p}sna z{T*?9ygAhGUM3-DQ6IacGK;WUoFM05oJx$$5041adqS}F1PL5ik%l}X%3?b@%Am|V zUtkLPIJ~C}Mxs^e5Vd8Gei_C^=L}pB|J8%MiXV>ERVdH^#Gl1V;{4U&qm`@`^6AVm z9qHMiD%Y6FX4@zV{NZ7@dB7~xt%f%K?a$sCN6ZP|9dC3|^0gg0^8`zcW& z&m)8u-_ik_Zk)8va(wJKq6e{fTQjQ6F=kP?*v1^KRgJV1HDj67?~!49@ApINnFQt7KBp z%s7kroiYE9H7Tlx6>&UJcH+cqnK`)b^{RB|{bJNN*_5&=8TA3cwsB0#6ZJJ++!Xh& z9E9ZC<$=wy10Z+Ad)G4OKFEx_Q&n)O8v=*6)^Bv^hPt6LBX0EDBVT^#R3qPx5%T~71ZxjYc$D^rh)PLkDTm6-H;lis*qfd^MtmN zZzjbUPd)YJ>G6zih*1|S5#5bE#Z}AK2bj*pi)Qzy%>OUc(Ro9=*&w*Q8`7w#?*qf* zr~RdnH_qdi(Tfs%E~mKSt&MP8G#1yEO#1$JKW083@t(XBcjphm4%;^?z8_^kzE@>% zzyt#Z-?>DGqYfCe9rL=(?UZ7!!+esc`*$c@efa(WP)QVZKdzbUFxxSY`;+~*?f!Ip ze52xv^*iaX#$mw?Jz@Mlcnz1Bbz+^PHg(xS%=;2hObNYU--0~PPZT?&YT%H;#>iMh ztfP-u=wqUu32Zqg_Rds{i<{{lUg`G=A3w~yP?G4Eu1*lnGAK{y^*r8jgI$DNx$et4*aaoL*Vp$kqL4#GY4&hwI}XR=7a zvGUaotm`&uPwcAdfzTMUdAd!QFYKC=z6#ejgYRkjO&ma+D5kwQ7TW~wo{!~!Q%P{I zRKL1wWg`p;XFh$ohKxJ}L}FJn>S;`nO$+(DA(y?BDnQ5luSF;4iq`aioveg6!>bQ2 z5A+wf-0Fv-)f?h{PWQs;%MOwW2*}J(7yQFF%YpR8U`^UDyTY z@-L2-qi)#8s@AI;Uk$>R#pz~KrkI!W%k*iLNiU=rhfrFSalD=68oCzqbX{GR`zX$B z1nv6p;Q%2LxO`b7wosuFGI#g$%&TpK*hO&-S(|9kC0;6f$q)Stx?d4DQ3vBj_hd0= zA9&aAWRF8Xo&7!h=61dE^ZgJMhuyxjt8xGa?9O)GmK*@%?e#8pYW;9$ROS4tAmqj2 zajT5g#rehk>A@-s9G^R{=y6M+gOrm-&u=Qu7nJYxWNTsFEbr5d2!l>&l~BDe?~Zj` zm7_Vg)yZ)0faIPv^%RiG<`j;ix5I{^#_$DSP#2+I`=|UU8B$l<33&~)!XYjLk&|r{ za8zkoa6t;=!CQ~@SYv&+__&Brh7Ib~<(-Lnf_iW}#{?`-uyWTJBxv-uK>$_n21q%mQIKaXI77nm*fQ17r9AM!93kO&@ zz`_9*4zO^5g##=cVBr7@2Us}3!T}ZzuyBBd11ub1;Q$KzNlAu82qhUJLm4s^3)`G= zo41X<4STYYA&TU=?)!M$-{a_a+{g1i@B8jQtmC})d0p4um-DmEwLa^!cI4@chG#Fa zvUsw1OIh2vU3Zl_EGng8t17i$RLa)L)!o&~(bCD)+U8%!FSvTxV29mqS~=TbL+OM2 zMGq>d{`-&2eov1@7IR@N(aU1tnzm5*p^r;kAT4~MOWheyEukH;Oo z!V$pVKla(ze-s$3o44&gIRW`qDDY)(u((ee1&WPk*q`F45Y+qY-CY?vEal>F zwI|U5EaflyJfTBPmHWHXZ>SLXMpl{)Hww(&z4K~T!*H*#^2^N{5+rQ8QgQb)39PfZ zj_~drhNKz`xf6<`(3u7Zj7r?p6fe%8i{M5*N$0b4NGCaK7Am>TMT9obx-M{=Emn4>%o& z;vWQH{g8X_E(yX+KNrlH^@Fi(@sH?D{h+Q9B>LU5AASmd3nhQ*f_ZjnDJ%0%Xf;y* z;jw}T{66W@(BvlIV0$-Xyx0OgDTLGcN^PLX6Y|Wlyb3(;*5z$UtAHhU3&QTb_26>w zs>Sw;-$3Rv-&_8}jlje$1jRM9!kPPH173SNfO1kQGW=l|Sg-86t#Gpo8Xsp^m%Qr* zod!PkCqhFIReN|x)({z#+m;j7tC8X7!P7$5?Z^t^brN}Zs9~fU8RENsjX^jSZQ!~_?2sT@)+cN=zYUCHwu*m{tK56 zP+-<8Hl$&M3O9uu@jW@?pjyA?fOjk%#Pv}EE}9J1GmrJ#47wod&F?QSY3`vRVmSf5xWXNecQBy0_tvrC8X7o2=&3jN>9Bkv zQT-2z`Jel&-tTX_^6%F_*Iga2+6ITzJ)2FZfOn(u_khs}c>HqRSEW1u%>Vm(|4+w9 zPwGC@ZKQ(!glk5>%P0f|bk5jc9R^Cn*1_k^WYB!Z(-`q$7?!8@=869tg?fwEj7@e7 zSp8h9_em0YBP4X20RcXf=O4{b;KV^L*BjAPI3IoSwXq`uT8qC1#p+CgV#J+&3o;Z? zyx6j=(KQ4P8N1Gem-j;Ak?#*}2wkvNbQ6B}fiB3v*Dia0=z_a+zifO-H#F{0>MobX zaIV1--J0D2_A)wnLPsm~+*pchl&A**pW+iL8*or&ePX`SyAt9q3}t$Z*8oRwh)?L< zHn3k8ALP;A2ViTa^ZO0~bnnQxzFcSr-u7;FPn#y7lw6PJn<`9R|UA#X#rKSDBzWPR)15l7^U16iZNKkAJ8MRl(_=QJ54pH`NXUCLxyE&-hI4EYAhRR64*04@j=LYhMQX0{Q>l; zW+MLGC4kQry)M2`397|eZl)LjC!f^mN@ZNI!8)RlRux z&V=3wRdN`CV5`=Hq17W$7o#pDLmCFZX1UF=al^pj__A}KHyP+0zbplB4gs-6S)i?) z2->#e@^|)ife6p_BJq=TP<)f=AhSOo6djl!R-|)4WbR~sze6RsY}eJin}>&%xs{>} zaWYT{>LyRukAaXEp+4;T7+CC!bw0~R!Td`Dhr`q;)V4f1dA4yB%r6LixDii*L+Lqh zhOB8|HPs=SAV&kUw_zJ*PSD`6xaQ;2&2+#Cc;+23VuA|ey`)*s6uganRQ0rJ68MZy z*uHV60ZZ+VQeOKJ;IFSrqCFwQ)|ZMy_Y#MpSYiLwgTyfiVEiz6nnH(kL*uA9gK=oD z8Hi$Pk3*n=D;>!&py#s8$D9&6l-W14>DOWSPgDAAn~IIo_W0wJ#^Lqg4!vZrssG^T z_rHPkKXJT@uT{H>v;V(sVWGh4De(+bnbmyh{0EpkH=FnpugyaYDoQ+QXFfgodCC%Rp5;rEUvOVYo>)3~Z zqlxWc^!d)oP48QPD`5(e5(Aio8cs!+dp_ef|U&63xBjv-@yadvBn%- zIUFz->{EH4<^h?L=3#ARiHtS{A5(u16=HfIi1l+IJ(#0oZAaG<;hRTr_zO*;8C#EC-D*9Z-}k*j;-2!1+uN4IOOxY3gviBSnpLYLoB5G>{$a3 zk!E)!Wg@c($e~>=9`DP+Me@Abm*>^M<#;yW^XVE8*=K(H$^8ZpvEcSlB)35)$5ktz zqzl7!Ry+2rUDZ1A4Dx^)a@_u7~3e?1DZKW*--o*#yl_iN0DPY%QN!-r!y4~zk; z$w%w#3_4W4O^Z4@*bPyOMw zrbCoe>ZYSUOz8CO$cy{M0NJRI&tLeB!^h)+C)V$s0DA}C(Vg!mV7|cok@2_5|E!;d z@W-*vX_GLq=Z4uERVE}J6Xfys7>7}B-o=-J3`l-;Xy@Yz251=%Xg8mmfc#ymN}1;- zA@9I}CvxeNKw3%JoV;xcayU6(_3KRmTk(vdZ(g>4@V<)wzj5BlaPaJ(|Hn`K{r-RR1Aot39k1HI z&ntXme^CErFU-7=j_AGG1Ny95Tml!nVaxG|eHClE!9Y!`BaYAw8C>H$z602#R+o2ZG82dy8Wkf;!^+@7Lj}21_v(2 zr#BBt;i2Q=)Vapx9=NI#l*4t20AjALgt_!Cs1F`>sNCKH#aEv+IcVb`oYiV%(}rS* zdve0DWhoc*-6spK8Rr6vq1Cmd&H3P6H!XW?J|A3IZ*XWAmwS4+Lxz7+jt%h<5r=7Mr)cE>vT9i*aqtZwHfOoK z{+>Qm*7fE~SS23C`MeYS?StKiy!}q@-Y&%U>e#V^a^0w2eYiti8jn((@Iot#wJ6Yb zF3i=V5wZ18YKIeAQPkr3x}=hC$c27+h%^nzHhO<{xw@ZK`;b<2e|AnyM7s(Y>v zk_c8YtrJ8zmvHR%o0dUvc;Z9*kWB*iKN}No?i>KOCEcN<**;Lvx%H4N>sGJ@C3JAjR@6z#{}200-oc^>!Y1M|Dj8uIZ%v~+Tq8Vj{Zv%N!EqNxtyLOUEQ z;=dx^Fju$q9f^?W5X^n(bu}aow#qwftA+VTji0Piz>53-QlT0T$Q%tlf7uqG$vf+KHR2yT{AD}4ZPZ6GzA$q zLqFCvPC#-^KCz850gK6%7Pn=mfIO{J@FRZ;n%6SdW}Lw0$xCuadQU;3zfO0F-#>n2 z75|3>m8NbbPl46z!MGLd^X*vMX<4I7f@_ZNw!Tmy!F9H=Y4QOQwDzq3B)^*kvR{j+ zDwzGrcW!FFkOX9}-foXEGR$>c{j4%H0NTsWJ3HPqfk>r?n)rh^up}%kaCct?(ofG_ zjC|IFRG%0H{PMt~>Cy##NwrQSv2hUy(1F-Z^QD;7HWWp1W_x+A0nGl;BSsU!8lvmmidYrQO@9%q$c*UfZ5(Ke*oh}k* z2sMbjZD&OLd=-i-aT(Hh(2OJ;cPM(^A)rY$xGWzrj0$hGiN?B(A;To@u)Aw0s7L8u zKxEt~@;=Jl#deE~Y#X&qdVG74+&Y4{A|DR%a(Jo#SckNUGdip@r# z^unj1BL*X&dQ|(-i@_n#JvV0>zM&Jy#3zU9Sz90|Z5i4QwL!{S?fk+dEdP7fl-6a} z3c-y$qp=EjATunBi&MLydHD1O{bG#wdDwDO_%sQNAFk)m%aLHNSpHz~Og|VTU?aT58;cDAL=%>xpsDJ1ti5 z13v+l>%wVu7+_gClrZCCOBx2+wpU{*by z0G1V1ryHNgp;%j8A;^UZ_be!N6_{Vi-!t_~gYi$Ca<7ok@D|H=JQP_wFS20x%iKeW zEyeQjKWAM&gkbx9LfDBawm1}J_*m=wyuvdzxJ_=U`93yWH(W^K2!zlgN3fUZTi9J z5ICsLkHOF(QBey59efS;*Vna=LhCbypQoRZfmZzEjn;M&=n$m@v{XsJ;W?1}@+BG2 zzDoS2t`S(@W${$elmg2l){%AL6o_s5!ujGP6;!EwVRzDq5FVqrPtln;foyFH@Bi{R1*+zy?{e5Xz-nRv_gJn8dKAye&kA`V=KTe<9`ma#MT(35Z}3R- zq?@HB!lN=xwuKF(S~Tyo^?d&02uKPZ@3ec@0XK)jgrlPg5O{1(SX`ruc_%@j3CV3Y_eThyav1b*%!8aJqQyRP4bY>U!`V2y;HYlj zy&BG5uqo(F-LkO{8Xn#0+;emQzP_$MGb7vw0y@vh;;EP)DR^`Hv3Mu=cMqT7>urG) zcpsBlfCG~ddWnp0B?P{z&>YS#0OqA1JWU%SkiLiTn!dK@$i+8Z)jK5*{ON;b-5OZl zb%5#fEUXElT&vl|LNH$U%q~vnXYJ5Dzb*BSUL$ClZCbo%Uj_5y2k!HG)__S$68`0l z7Vy4T`uSQs_T1RzUV4Z0Vm!xh(Npt7;Qxd>%+_lZv<}%?shy_7dfwMXZ@)7k$#f?2 z%$5mwJNHUkC4U@@Wo9&aZT<~E$aW^C0G5&s=xSqyj4sk?77hh zaCNMA{J7YO#o2Or&XkW}QgZbxd887Br}?kz3&$h-2gkP!ZX+N$bqApfHwO_e>1UY2 zMG~?N+)GT+{dLt-1V_h$TIyrrIe1mP~aT zahxsSU@)IorHT1-!H;=TvU|r z;f>?}`F>0P{vuPb_`j|4X03=8dm1Ddz@d}(dLS<2o_^(0?vdxFIp;x0C#Y?dt+@rh)uDUJl))m2-AC4 z<{inXrUCdR&8TSh5D&pe`)#5Z8M#T>o)7I|nLLyxj zT_chK&*aUN3~DBe}U%9S&vEAHpBVAOye1KtR54uczF=3bKvAvq@H0ssS73ayn=TF z@J3ioy9l;`aPY0&GJ`#kz()G@_n|2aNKmIu}NE1M4h% zwAnOP@A(>ElifA|w-!!c>uMuHFCOPo=GqH21xYCp>v}*jHas=lzZI+`e~D+`tO7a7 zptxh$e#2xttn#}E2Q2R&SYEKf^229L-jKpCV9P7#$gxxp6&ZIAsE4+}!Sie`X7U~I z=t|FPqi-QPgpwrsc7&L*fgTd_!`w82_sg?&!R zo$%GUA?e&cBG|{Sr#yQ%47Ro>LQXA@L2u=gb_P~I`&hvJP;uu3Y(Bfr=|TAj>wgg1pea}CJdkoR_#dpMR zV<4i1{U1InCOT-(7rI@7iRx#1!!_U0Q4~_UUH)ej1qXFkdQ}pUXT*!a7@K+&E^ssX z*J2D9EzY0G%})VdzbSg@#Vmw>a;B+tZ6o?JzU$$hSMA6;`)z>k+ct#1cgQ95S~cQz z9F%+E@fb{cbJvA^XaUuTfMX8SE?}`G`G0r9>W%l*6yjKOA%(WVxMv_7I5aoehds{% zi-7gc`(M_8g=BzfIUNsdLM(2@j7CuG7&WI~c?ogTK@LS7xyV+6zEKYU0g<_LZQUGl zz@;Hi<64 z+0szlA)s&YDgw6O7xbbvYT+(h?M?aa4zRo^D8IP92b85fi7f?qDCXbOEWEx0>L=&C z6ij-+zHC;(%8~$2? z5Klh{1tyqz`h{ko@bYZ?=)riz5i~~Rvibx@U%0RRGz9QJTFU;ysUDba2VSq#wnDyU zw>Nu0D|iZec_`hkf?_R0e`B#Cz*)KZMe=@ue7}X}E{tCxI|o-$ed{5k)ay(H#*^1| z+I58rkl-9CJLT8$F)-yg%NICG10Pl|-?S8rpFH&a{w6+*uT*aO(~i5n4EcG>=F#{@9QalnxhNia+a3`0 z8T^1!j6TV*_te2ePtpbHeFP{taYJq2hJM(l(-B%G*bn^K7eAX;^uy7MTneUGJt911 zjdkBTtlre+$Adpi0`vQs%U!$4kR9Cild~Mlvw}=58Z5B*V6;X8USj^Ne4<1mg$!|* z_`ZF4TLJ>#bb?31T2LIxYBTTrAY#rQbyeRuhH^J$FD~=bkQ>QR#e+yg#ixotdfHJ@ zV;QUQug@b$PWz1aHyI+DzHc%pe)}7;cP=X=O<;Mw&GHAz!y1HF6laLgYmmvdgIi@x zafp1a$^GHpN`$+vFq}T!iFp4=rtk?2qm)1+@#f7m#G76o?dv>_vb=W0Zsunq{i$ne zd)XOiZHKz7gUc8ask`mCVdntyv>i`Vuxm!Pe9b4DXcdSFjzxj8Nl1U+>?aSdk1+qe zWxKoHQ@}}lIN!+yC_mM5@eHvVd57%CHNIJbY}eG7e0ugCC|9D!&yQ9>utvq13n3L? zQI8ARVp{+#>C2w+iHxLvx((ag)X*@TTcW)%!#s+yu|Xh@daX^Rb0zglH3z)0*+G8tXrRGS}d7d`})#)Fx=wh-Vo zWZ6E)bRe@a`?fpo=mch%^V2DdP-HhLq)eUA5&mvL{u(-lf*6YLP z9=QLr#=cal2j0G4yH)T(4-6ew7}+S<1KwGeXP8=jz!vADJd5pP8s*Jira#8usQFlU zv=|fqj<;8F8UJC){>aTKXjNki-5E3i_~85tx4W63eNOL8apVNB-+Hf7&^Qh%FVdRW zgy?W@?Q0>OXd3WxhBybB)8YR0p?3ym3u)f@bPtPYtTYpXjoh+*G5wDHOQn}dObYzU^83u+Ndn%r8>Td_G=lApWOIe^2o(2h zU9xa;3!2~JaHUt7fJSMQWBdom$S}u^(+Z6uIiuUf7fJ^r1MR2dZ38nvjg1lHYjZTSQcq-tqQg6Np{Oa{Bsg z5FXLXW&MID|KqPT)sMTeEHQyyx8>(Z^Z$H)dUk1f!rqf`z#{4Rwckv*JNL+JZ4nv! zKiJPd&!~p^{K2Y$3}p6tSeLVr zfo4~vh%@7`O6wx7D-5qHQ&a7>c!SX=Ayy3|$ zxxm!f>C@v^h{%RYgRd~&+d_~azDcYTB?Yi;p4&Z))@q-dck!X2nj4R~C|j9m%Z(HI zQZy#&EciXgx_ca1DUMlOVxgh{LfY)Ylz^S<2M&$yPUs1D zZG83q9Ry#pqE)?bL|m0o(;|#6Utbc}k@pSFJ74{2Q~46f zwC^{*g5_D(i%B9EINBhb(zZ|KNj8K#*MHwj&qTQ0Z>@ERxd@lKVl$}y81O2DbM>cc z!IAU3I71YxD>hr-ePC4qIbVXb)Q#eh(Y*DC+Lw8lPGLLM@!VT9A1ihF9-A|=^||b= z@vQ~16Vrt;G@Ui;Nk3BcRZY(QhDXH;MW<}IKBMA+PQju1PoTNx zt_t_-46tCN&C}j~0ei=5Cr-z=!SeE!N?IHd9we@0>vj!-?0iO^WeXM`K1f#Yk(5tZ9WXUNfA|<%hSf=(V~!h_gY79EzY&b@ zNa>zEaVq``$h=jK=u)Z${YaWD=fOr`v%Bo6!&eKoj}BkcGHroW@{RKh!5%Qx68}<- z!q|w}a-ix9qJwP4Hr%k9LU*5AFNR54jrj!$JRp8>)^BgK638T%J(cfAIb{ z?pJyL{SGO|3oxCJIQfnVj}sF#tS{+Wm5#&aN?fMNdnRn*_i)d$nE=v;{K!r{CKwTn z*2oqzflJ}^sjl@C(9Ax$aZ-W_lxbbs=_Llpc^;$)V)+1@OhAa*b2_~Kyz8Phruz^h zde_;OQXq{s9;UvX3fdn|aQ9)nno3;D1W%~%XAnK=RK`hj}0QZ)@^>=J25`6>WJ862M*BCFn|I+nX5$mD?1 z{vEzFw7)f9UH9NP(w;RmKi@cx5(Gv>3~d>RZ+-Wy-5M(5RhavjK_(-Yr*$%agu4(g z;k?~odlC{-ORKrs-41mA?SI&3Fy3jMjl}%d4$!>(>C&#G&){vQkh|kf2NF2-KJrQ8 zFv8yqc(uSyM^+&_S7KV2C`swOER{5Y?4*9U+FLVG*4MzJTFmm2(;Z}%J zYL9w&ssWaM&9m_*VC#BN&fwHyCCK;|n(OA|LCWvxn;+7@0N#x4r~S2Bh&mB6Wplj| z&ZLr@kG{k7i-&hd`v&zv^Ag!b_(3-)uThDPGi?LSYn;)A>TQtap;j&#-VK=zF{6(A zNtpg~=gWudM?ulGwbkw%1u9t1Yw==vs@eW|nNrRFxqq$h|KUS~quV5 zV4|~(KsEa$d_DQ5Z1xfp2%XXz4+#I%9asI%>U|%aX#3h$M1{uN>ih0#^g-aS0WCTk z_J96%ZpYboUs2-I_dPG~bRxMYyRt2DgJ`<%4kH+X}m>p zA*}xO_$p|=$1IfIqZHh09{*H2%5`9l-chHZtQQ*J z!Y+|f3g<+?`p;d6`RRBPV{bVMcPw34*p-Gj+^Tn6xsr=yn#v!BuE*+{FYG*Ino5zk zQ_amP%T82SxNR70FgzO+Z`t~QhR9c~+oLGssB_O%r1YMNe$H}yZJ;xdSg70;*Pj%W zbN9!-ym^doY;D?qts3K7LUyP*Udcp8bD5HbFLQu5r^|Y}!yiT6`C_6t+klqZ+9PSL zorvqy)ph5GFn)p&uk^~J15Jy6FR`W8AfsZFl8LlekV8s(n|$jVL?z&PBzbBeP~8m` z?C=5(yT^OE$1(rdII)9T$SA*fn^s5cG8-Zg;g*V9jHE{4q zU6$=GM!4Rs-#9N;pyIg?!aw$WM$CT4K>pY7u=pXM;8al-Vn+O)9+Is=hKFoM4~=1Y zQ~#2C@<lf}RE);>x@0sK|^AO-&1i* zdg^8?;2PS-eh@Id*{tqwGtFM$FN+LHKG_Ge%~#kRo$#R8ira7~svf2vGlPTA)I-b_ z+xxL~*#3W$^AdaiAc)$OiEOMLgPL0$yc0qH6Yr~Wz$y+`$N$Z);&j!DXbZV!l}&*n zF~MFFtK-Q&XtE}r{|Dcz^Nd`UU+J?>fvhqkx$f}PfAmqSdZY*WF1pW}sW8XAzmg@C z1oq-j+=+}fu;B9E{q39^%Fuh%B-T-nf~!kf*kAS{mIq?b8!;Yio<-oC!nIMPY*K(z z&8MJ)+@@}tDO5B)7M8kljfyN{L&)irF(f3P&}l^-M&Xx=Htes*^5>05tn(;X{vW3% ze)?uI3U}UcK-#wjqB>X3d%hyWA%ACSlAN^GNTdT2Uil(>*~s1_k75+gW9oUEuLjj~JCq*VScP0lmbeZD#Gt@Q&)<*j>Ok3A^8=mO50CTa)`eqn zbp002t@+lC7=O={bdku0oDCo1w3IJHO68D&oqqYj#bBhtwS>mmHA zqqD>ihBF#Zm;%Qjq|EIr-uG%4^wsar?Y%n!B8!4zer^<4*Z%POdR;1z9Pe-Sfl;8T z4Op++Is%V`*E`?kAVc8xMx8uutdAoqOoqi4)3uh=-)qe5grry%uARN!AI-1yM?Dk9_A%qf6_+oKp|bb`9ETmK$W7;s z$p#r3VrC8D?tP@9Ktqqz2UuK?L*5qL-9C&=>H~skjzml3<8Idb9uq?(-Hja)7k zM6IY|d{k%j)_CmmC06Vn1@RF?^pbXwJW597$k~&|?EOeaF7@Kt@m9n(u z`scisaG)7*PTniJ2c!c_G&;mEy?kxguHuGKjQ?>^y52*9hU^%+X<|RPxKJZEIlVw4 zeLI%dG;|;i>#_Wo(?h7Z>ZvHt#u0>n*4Hm+GJ@`kK9oOXa!EwI z4k<1<@ra%-2Ksi-qgAH;&}|oQH>NQH>=9=NBNuvr`P*Yhd|MR+cI4tDq7uM1jBnec z(EzaZk@oyTD!}?>SYC6_H-pA?4v+OjB9IqwW@>B4Ky^jXcAwlJ%sNe+*mSc4qO@Xo zX#Fj)ta@M9fr#;5^LP4;*$Kd^dDmm@_7T{q`t|)bbqWX(oleav4MHeeX6CF+7s%8| zn`2(WTHN;JF|daqal z0q_0fMrkUDrByZNEC2UC9Y@?UZu&k0-acv8R(L^!$DfL~^7Yc-%bb#T{WeU094hAc zIUnQK6Y-}Ov3`m9@(LLj%x){DiVDA=!j{6Vu7Oyel3Te)+|luIFg%=k*Lmh29&2^o z{(rUqC%pfC{RWTETbF)LLEMos_X~_tM{HEiPg39tt04I-{9LF9-C>0D4^xj|3Dby@$qr{3clQ?L$S(n;UB8l z^VDrmN}8Vp{x`?(Zuvupe5o{zf@2hj*|^N7aeoBF1#RW2GDL{u`cW_hzM6b*4KIThuu@XH5c1-1m|GUaTM4GaPh#D!Y-k zwz0eBd?RxCv2j=N!@z&_PiD>D#zXpzc{g?5AqaXNqOjjB{MYGa55{9U+N0motcp9JzxAy85sXK1o7l`5jP-e%-@ma?Vm$(7e9C!m z0taBr%10OWtWFrEsK+x~u)NCm=A!)3E@-)NwJ$$$5cEE@Im_^k0Xc~|?zM9aq=olf zp1}4!oBGViTh#-A+gLG^xrGd+u38_3vA&s+Pyyu6 z(pC-c3!z7{p2%*GgM<(2Z|dthpzk0SE#_kS+R~pY=P0AF$I&=t|DG|ps?fM8Itb%^ zBEt1NcTym`XX8mPqfywOBPV$Yn-_XAIol(b3NKo(nvspL{+yet3(b`nKeqp&*YU7P zSew$8sUXUP7ln==4Kt~b!awM~F9Fk!{Kl#d7qI$J)7vQ{GzLyCKBX0fW03mrU3r=W z6|8P{&Daf6LGtJK#IY`n7dLxJKv*8V8mIft)m8si#q=fVA6zc#PeAs@Th!_L2`F^V zd3y}&7c6a=$>KhX>2f>OzORY?N4FOp#3O5m>G@W1yL!FV@v2=tzH0wIzB>LJ-+zz) zo_A39@}(R3)}8L_M{8dj5KvpbxFuSdtRIE`Y7@=T`+AtMMYWF8V9@h=qSK3Bkxfn9WAFT zvTCu@5!a!r9KFj_l=}n9G%k&z4Cg($`YI&k!ZH-TRiGDf)QXmED5yn>`Gzn4bld?B z8KKjX)JC{Jp?F$Dya(o*%1mcg2H};$eK7||5)AWmWR66UV4l#r@pT9ZnxrgTC^xXN z^n`!#QxZJcJtuw~tMl=)8R!pS{i;?Kzg+p&GGI>?vB;@_2+a)MbNLZDKt359yic+L zNu00Kyf5E_qU^(`TlWs2dKp^N#0JYT>|TWH)5g0+?V;gGCjj!p=+&d zM(G6GjL-a~S95{m?ksat`d4J1O*UFr(un*+4b-ADnvrV%cHUAMtiDf~I_iukQC(|CN|C|;j_oEJQ#r?If9+yI3 z>iubfn6E%wFE65#(*U#cX$skg|J8r<;yp*>AoOlc6aYx)kk`E86Q4ig!1Nz`dMRO#V$uQ9JNg}RTUd7?GBon>07g~oGVsbOv zVO#KaISq?;sJOQHdBLq4csu`idWVrgcEfXyYH2zsFT~zH^MwvVFBWqOjHy5o@eUBf z;v}+GaQqTh2cO=JoWC1UVM*XwPT)xjbZR9j8*HI~tbgG5mCs{fk`Z}ngD}SD?-1H* zIx-0Lgcw1~=0Vtoio_3L{5qHZ8uz?sRG>nfWkp=tKi72OIZXho9jT=TF#%cJt0r7*m_lkX;^u7Rpvv4znI03tawcq@51}g!eN(TJ?H^k>I2ET1V$U#C>j zi}1E1D&KbwB7c>Z8T(B`C`ZWQ%KpTDRCbH?$HIY56sR>;cljjNujgQVOt2Z?_c)6< zi`XCtF+h>2&je-Kl3rUs1DL*wTPV$=pdx4@vWtf4|Kfr|sq;k05AmTBUv@)|uE=n) z<2Uep>!ui`QUy`paols2HBgrLSYnac0fB~l9U|Xjdh36`zQZa%vpQb=KX%KWRbShU z>Dqm@o_k^X;_w9fltbITUBnPof;8^rpqskaKxcY>lzh{vanHV~4BMRQySywDlC=nCKMu4EwWR8b;1kfB4> zKI|~h2y9c#5B9r@@%s8Z?r~yuDcs&6P9EGCFtyVCKHjE6aphyffUV<@FVJ3EXF~_U z*ZB)Ku-`9O|EaR!FxJmJ(Z)Y;oP_b&E=Hz)+vr#ylWBrS!8k}}emA}NkPZs?C8wJ& zM<9?8cYH*g4EKG6g5~)zecs&Fku6K3;3U8P=Yk>?l8s}Ei)Lxi+2$4d;1UDQjgEfJ zGiHFm@KgEWV65NWQqw2x+&FB@Ra(=0asuL&9~O}?o<1Y(<3Rh7N!Yvmtd}MGzvF!M z|NDPySJ%U;{rmjYH zbUG4Zi)31FW}wh*JNn~RXeh>Fe{eOPf_$R}&CEnbkj%5<+>>yz{@HSGKq5v~BHAEzn0(+@5s11A ziE-HPsNgpbTirGvgwZ&idCtHNVCgOp^$%!(#2|66$_0#%&M1vzTGT_}4$b$c6x$(& z^3h{CyBk8Hey!AU4}iPL3DLz}Bf#Q#EbV~>1xk~1B zt;n|$BS5UhEf!BqM=ZelWrslRD0pw&zcv~B{SkxgqZOSW#=wZ9F+~Hb@1406;eOPb z3gIE)W=?7h=nfOuy{(20DUUWk+;^T10Y7-Zt;c?^BkjoJM$r!p_=%g^JF{yX$Qc`T z_ym|x`n_+>`2}qM_Z2$akLBO1>tYori~m0GPdvW*{(tkktLLrG``fPW$E)*nm*tL8 z;-}z@8SmH=_J8iPI&by-)%kxP|NH&bdH<*5|Ly(eVZxcv<{|i6%<|~yS~6sb*_fYr zJq(M5GmoVENLWQh*4Nj%4VYr{w+}pjhWv8{{9n-8ki;Qb`-|zAZcm!4bA&jIdeT)z z3hq%*nM|9{VHyoth~7kJ_cBnHc^j=NMj6htngcC?=!LEbe| zu0`7iP}ICQ=|pY^n%`CRa$d0j$&|kJY&5Eb(%^OKqilVUowD}L_`oo>Ug*^iH)8k? z)vj$x8inZJPaZ$S;%Uaz=Ele`WC*h4TwH7 zDkUXuC51wysK`($g%U|hC@QHalB5t7GGr!n88c*_XAZ}4aFB=!^}C*Tt@HM-@3Wru z{NDHX{_*?kUVEQ?w!M#gxbExv49gGqLxzCgM-fLFn6z)8@nb${(fpcCWl?>=7*t?c zW?YH+<8?N-X46OuyI_!WMm=#%X++lme6C*>@-LhfsBaBkyMGAv z4?5a7#_2F&KloyFVh9v(LzMlaesI$R$wh4kZVFCJ=vaB1ULE0cQR*-;zbq1}UUJ7?=058^uWWw@85UGE0@bLZ`! z7a@Pu)O&GBdO2`~2xuy^m%(O%q`IHdwIK1xd{B8m^7?$Y8a!0&1JNn7?{{Gk*xkz` zm>-C%3)G+z&=y?3ys*3?5+!t^0Fp;kzVslI6C(F+aVMvAE7G_dpd zw&+hZ>TZZS-jlNLgY^6Q+%+-C6SW%AxE7DRyRtq%FVi(R@Mz} z&h9^l``V%CRHO4r+YZQk-B!Tw)`fiB(ziJqDd4y@VB9|D-}x!b{?2TvnRC}>)JNem zZv{BSg`2AHh--8=?C4BH!68=Dzkiqcod17qCpny&+n)`>vnS@=F?s_KBr>2y zr}Tk>>V7wH>VW#!XVd+hP`9Ia&8j%vH$;5uhuzlA^~CB7*AK0nPU8M}lh;Wd#LGla zNTik0i0(`-&v(HgLU~u+)U{=poQ_?pcu{Pa7|$v1I^9htt_y1Vlkd<-)+V0J!RS83 z{k2+&QQAq4e+A3jo?4=9U-*^VE)Z;z?2RO`u0{PSbxl_q*ZE#WmG#a;a6UkxMzZa1 zzMt=c8Xsfl2t>Wh|6Fll0B-ivWOnlOfx%+Kd5Mp`(Byt%lSenMi!1c^1&&fcpvGua zHVt{I62^(OVTf1zbavT?Z}~(rX87fpMx1KhoeihU@?CwXf38V_oo! zxbKTx#H-yYe=}-|{29s4%HvHL#LRHWNj9h$1S0OO`FNrSLJubw>^B$zo2@rfdQs;^ zH@?d#clik14x5=CiyDAn-gO7l&rv{W_ZQtgm)oJ@%@Plt<4vIZUU!0TZxhTfIx6Zfr}-Dg;hfVX4mWT!6fBX8V1 z{TAb4RhjJT7cn1Fe|-1aEl=9PxqZ*MHotbLs*edDe%uRpE^bwMEI)6`RG6l zuS$M?g?dkWpU-UF(h0Fqdm}tHG{G~Ey~pgPYeD(*sI~5=TBxb|9{2}wwsv~5s|PB( zK;m|?Sx)!>gj8uo+iCxeW5dAb%8@H$z$yQ7Y6ap>%pUyCXT$t=A!8$N^R0g9jB9?s z4fB+yV&Xas<8CmtotikZr5ENrOH1B6^g(#tg%yJ=h|^l~QtpQX9aL_8%Gb!DgVCB4 zJ#YVExZW(1R60Be^u}&I-et)1=&BH6R~>-PtB&b0x9HIB)^cLXkO4E}an{T08Q`p} z@=)J^0ilAaA~$6aFYDoM*mG(a?2UyU79)?;<)GN1rB{aG@Z0xWeu^=m=&1TJk&_so z@3(Q$T>JOEnB&*tdTv`#$0(?XhpYb%M_gT6A-_Na1M^26{A^kOem!&jW`6$#r|J{}*tm$N;;Fc-;9w4-8 ztJ}wA`v{-!Ar%epcG6xSblhhc$G;A*&<^XD#OlwL8MD`XOV*V( z>h%z|FLrNy`@4w>WuKv4b~};0r}mKtd24Ka_78>k=M!-~JL4V8GhvrRO7P$9) zlw!^{0BhcipFWyRgYB=9mmFL$0Lyv=Kd>oKK}_4RqE@6Ic2DTM3&Qo(>hbAwGpEZy z@Lf>kk>F%7Gx61VHRTUkA$f91CnG`T#g*5uc7KARixc)I?$$znsM@b-%+Jd{1M2P> z8hGArH&AUIfvV#MyY=4=K;(3wpH@aMMAe^njGF6$&Y?x8YcL-va;N$RmA4<{-!`cH z$-#QL#q|w`WGP@VUs`ZAi~_+~sToo>RA>p&-PI;B2sP_njZ0GKz^3Ez<_E^j)vJd3 z(_I)~@a68@FMS4#30KMOyfF-vt*u8DHqoGfSMb`EH&l=i<(CRWo^`kwOHZ-c5OA8l zNl!gS2j9Vwe)dc%d=j|ScLnu>^l1{#EK4a+sXcNe>t-)p&HEO#QKlb$%--msQE1TK zky3n8j}AY2m!v)H8v+w?|LKhy3~-sp|Ry=grivMP0u?zdH_OV7_nT@7qtLXt=J+`wSNULA{{(4pA-C zx4UsavYD4Y2yUZq&hX&+IJbA-WP$AvxW4ffUbBJ$B8?Sa);=7A1#DNoXf*xB&-^pa zWwN|53f-my+qq*{NnfB%|vK_D0^5+ z8F5qbA0BFgOmKZaxSwAlW#iV2IL}1e za-MWx#vVkpxy-p*Ot2DWvsgWAC~2prti6F4~r&fd#(Z&Z#0 zW06Et0@hWX)B|O`XDP7HUDBB^z76;u*DVXd`igDAg0%w`O;EH%Rj%nJ*2BFPMG6Si z!RZf!`?Kh^khp5!a+b5r(4YSLP@Y9EOzsdJCP!#6CdqNR2J4z>-zpzJ8196P`_|-p z6}P~tJ&_*w*jr(zfNj<7+g*@fvAKt`h6-byZLGh2hM?JmR&9*?hi_UFQA;YQFlrTU z??Iu!*qh^W;(Izk%KK$brd%+@-{3Jp+m;c z)K4pd@xEe_4>Q&JVAYv^Qz_INm0u7i-;Mt-4@>*>5nj~wES?dpKzu@ZcYgBr<2`VG z>Ex3YsJkOO{=M~OLK}p3ak}2$-vLLeGO=x)i6PFYUPh=TTa8YWi>8o;lu_Ly-+7g zM(En3wq(Np=HRHBR~>PAsd?geX(#DC9K<>}MG~ zNNO6jxQn(Uk3C3VN}8QUBvm8!%~Ei`KNVS8)Za^d!ao{roogkbYRCFEEU6?c-^Mpb z`XWEPNa2a%zAA9bTH}_?(F^>6k?YJct_$-lK)fq*9aZ~^N9sc<{>1pgd zxardf3b^T5UmoG$* zgH`B2DDre(+9_XXK_1KQd){S^S#aBKsGYoQfJI;Vgua(|!P(@kPkHNmVZk2Woalmn z@O`z*=QJ;_Te;nb{)IaUnGp9+xjO`s*LkPwcH;Q>5b!1db=G{YENpIHTMZ@(w;NgG zOCWmGnd7oqG2H(>^Ydh1G2$AE?UoG}W1WkkQFG}FXlPe2{1Da%an;pJ{IE`E!jq7k z;)nURJFfZb_ha7UCcn!){tn=f=Fj6j(h9dddyg%5YlqbwjOyR`eAB}F_AVR4Jb#u# z)WNH$OJ^IfjNte@b0su0#is`nymy(7ly^dl_YAcj_4=Ym;)sId4_ z?DH|+9^iA1ko<`}nayYYx3}$Y0-ElpiaVRy;LpNMXK$eXpnLk-He-xGTGwwighVt~b$2a<1;k)r+F((xwmh|E$2!eW^T4%-JaGWXO2dGTqV8uB_8K9FE< zMBawo!8fsa*k6YNLugMkY0!CPCg-_29X4p+@7Yl}2!BG~xkmn>!JDp}&|o?Z*rtN~ zwAcnAXhAERxh<}5R~ANabhiVg%gi%ep&X(YYcqmsV&VGRWcxQsktDRM<4s{mJn^4q zn{R0jAk&roQHp_SppwbY+V!Cd-tLlC8gy!da6P5tuZA&hb-wJe@i5{!hn>TMrT%?B zlKX7z!Yeig6~~;AvPa~)n)VQyyL%s2=8+sn< zwaDA+^k%Inh182Cd3w#rPs>*H^E0T0hZ~B|CuSht{Fa5nplJ_ODg7B;8`cG?=R0%P z?CXYcb(ITJ7Jbm7vUr#(R-gOt^CwTso_>XO>HHYMjWMWO`Rk73muzb~ytUld`|S*__ZI3dQwIj& zZK=Lh;n@Lj8w=65szh8xnn!c|Cn`i*B&2(7q{8Oq4wm_W1K?QkdYhp%9n?JEj|AKq z1g+bNf3%$j;MbPws|EM5?(F`&WiANgwm(`3}uFFgd8YsEizX%2y2i%;rw zAsyai^W^#>zAr>fDoWjL7zFmceD$&S?{hQzb;=2={r8X`DldGrE`H%C@^GnkXBXo< zyz6`A2F#b-TNmQB5p}%N4xIb&Gx6^@WPYF7cIsu!u?LQWiwBjy?SIkhWPUD`tNyux za~!> z(kL~p2TbX`R_w^)O&gSnqFq@Lcck7qB_H0ZURv zP^F;$JaA7oOkXRw+jKMwT>A8$1fDJei`8-h%iq>PkgO-iN#ibXr>?iL{Ll+@{kD)Z zsMqkIX z@Wd_X13x|A;ybE+FjLjMkX^AKk_}YPZf?c;o0aO_lVQU!Mck{$%OAJ}`P+Yg{Ev=@ zy14q6O_pkn!Go!%B^-exV7o+SWK%N!E?cc0@vdJp+tm6_-57K|bxv zg)J7(hhgHJQQ)lk2!xm34ZEGmfP)4lw2Ga7>x|5N1oQsP`~LIzpXZsMGoR=GR;$}L zerDTt)JuJSN9I{TBP3GAs1<87LGZR_8*6?xQN4KiC)ak=i+OYP(vDxqBi-*jAac5! zl%3i&sq?Lu@Z`maJx-&Lx6wNs{vbZSEO3R8^h`G~qgj4T#{IL(jlOUm$vTqg5k7EV zI*+s*(NI44F$!q}WrmLZP+X(Fq9Bgj@>Y|I*N29;ww!!5R;-f*xP-y5qYxV`z! zoim}$@FXVd)X%rAP_RXe!Fjn8%$tJZF6s1w($>JX#}E1-Rl!9KBj?H5IX~KwU`rR>eK7P2hR!V!+J8E{IyS^>Y9D ze%M+;f5?)Lc(&59TQ9aSVAa=cG;hga*!o6cON`(kG#(DRydb3?o|w`GT~8w)_3EPM z#!;A;nESpZb_EUMn^u%@VSckdO^7?ghz?oVZpt_Jply=p_e z>^;sw(0|U@r_@1%tCxg+>LaftR7Sn+%@R7SDHnZEXF>Ld6x+~>QqlH6tlwhzIICao8KV3 zIdAW?`2`)QTR9au2k`GGdR6F9s8Cw=r2RoxKb-C#KW^Jgg}0J2`o|GxbG&*c>$}Sk zSpCS$OD)BHgUL!tRQe!r8_1pyZ5aUNqXRN-_*^&E<=L%YLWPB$UuHH~Q(&=t_5q89 zePHm5#1K@N_+d=~UL`pQjyzf{>hjacXbT;h5{a;Qp|82y_sWBo_Ev@;%J_ftp1)MfM8i!NIXrnq97ksNv z+0;oNf~(v9{FL282U9H`*7XsC@UXg9$k3Gz4c0N&TTzE^sOlxpUc?JAx@X^1+0wws z;9UNxeKc4D!QtAN7gUkY+2V%x84`0^Scmlki+y1$Im=N`YxyVA7K=RF=B0|iRmY%a zkY(uW-BAEhNB-ipF%bX$TX-yO9E!DmeG(NIhdDEm=3c}PDpV|1R=P9>Sth%@KlCvm z-u}g|@5u9yxFBS@;5*j6j{ZLAzIzyq`1r4>2IB85a_6!X`VHK1t+@FFaZhZ&(;khi z#d>5{wqJHK>UhcP?)W6mfQ}VhLDFG==WFKtA#z$h<2A-xad%xs_n>Yui}H_m-bZnr zd~vH?blq!ED`^@E`&~#VH;)DG6KNnu7bxrGFb=4pW+f@g_mX&b;lxd?6r#M+!fVtL z`8Tk;R~IT%ejiYTK`uxW0@tJWro>uUG zp)|s)LhEY`JH(OhG(W4e7WotN+t?`{bfB?blg-gWzahG3MwvVXs706QK?yyuP~7h3 zp#bE6EQ$EFT^(_uq8}WLUee%+{__IY@4X=Nj4}I~vjg#J58fT&Z-sSY2A)rmCwPeE ze%@--JAQLV<*4LPKU6$?-ld82g~(uTx``p`bIPsLsmFb5k*}hb9*&nJGm`xNLl;c` zQgNL^z3C0Fg>z5kVx3R9n$_e4^34ofRM{sf@K_<{;r?2Dz8&9}xFa5&A;{L-q=x$g ziF*s4Jfy+(Fp1gaSbvPLJ1cRvq#aI|UNT(ts0nhC&b^F5|DlH>qJo1fTEIx|TGV6I zli?_~%8gl%_`92hFH)a1g9FQc)7Z8ySf3o)D)NL1RXJ}$V&t%Hjlw3nYml$;^4ZLR z1&C)en%?QWeF!w==6ip155dd@`BL^3nD;ttuKbPfU;WP4nXWhl>jjpcjh{gt>XX!> z%@~jO8svxvsUV&$`pCDe`M>KGv!7(lEAKcTf;d)!LKhw<1>c)EF;I8~fy%70jG|Q_U=ks&=Hl-YWFm^3%VI%VU=66t!UC2UQ&^-~+ zR&#vrqdCiVJg1@0hw{$zABX=ZK9c#o<+VRPE?Y1Lr`UOZu?~*n{P48D1@rZ4EB2Ue zY#D`%M{*Zl;lTR6!ts0eq8T9AdRmbW^|eCz_q>kRLp?o@YbBd@pnuh1qTI8lVX&G| z=%D-=fm5dvW-~EQP;kZK_O0KeU|ta+`O*pViMJIJYLGvdxO3BwwZRO?cjYOH?)mrl z{eO2IZ+@sb>hlNZg^~9%i>?3te$4m%KW)owI=`}K52KDg=XWDvTrcc~dnN_jA@;zj zxGgV>z(zYJLGf8E39?WAkl|HHX8G9m{1R&=dGenI6YRPPYqC-7#rGKZU);3PeYTG% z^F8$l)Ihz=eR@kbJ?SI2DgM?2{k??7{aeX0u9L*>e=4v~4|#U2vGzkXUr1HPweb|* zbfR>3WU5W=CD;hL80cgnFV&m2ukULEcus00JViXaXH!m{(-Gt$+;pp6?$-bn%j#G% z0_s8L_@{)9we_g4HeIs23UTKjw(~3!MxI*WU}U{I>R<;+iunI%0$!`(R1KdtkRK4{ zeLYzZRo!-*%Cd^V<_`PwM`aO^Sa;*z&n@p!=VH<0;pd^s?m>Pd4=e_d4#?;fNQiv-3HVBS3?P^JLDX ziU!VO6UvW024J=46W9A(RM4I@aE$6gKdM1L5p#?$*yh;aqSY|8Q*N!?`D_>-CfP9N z4vfHn=pL!&p`83XgOqZY~MMxoQs&w+jWC`>rr_rIVv2HW4J=&x8b z4kBZ_2D?xF9oNkH!MsmBS{CaM({ffWx{F33v-+Z4)b?n82dUhf<)a>=zHhHjJ#-HdXx5cK&{w4jSLT@VOX*`8YKkd&u zf;>9C9eQiM@>_`H&%lHt^)I9)+T@488XY8;#W=ZO!QzX55D~KTd+c}G8EyNL|V(b@p z6Q`shex>WZgf%DPtfVsP{d37h5TYb76$|NlTl ztMq0CxxYFfke5+Tv_00TB`?V*MvHDov!4k7yM#}ZZ93>*HWnFkANBlwHCAefE$f6s zr|#XfT90vu_5(q&Br22#$UP6fN&zkF+e44CIzict*VOzz{ysIg!~7fTVV+a=`Np6I zc)K@mDp{fd_*KTeLXh7+{dIHd#P%wv?`z3fG*}L7rHH z(*Eot_U{ns`+6hhsMBV`!9TNU3}lMW?3cCp+h3XcV&;0s952lFG@I>$D-B~Xtdq9j z)X7n>+|Ih~!s{^*_~I~?9*T89?ZVk@L!+>Lk7JgR6Y6NcT~9Jmhs&II$=SqW+&_hF zP80t~hhpi?(bHMD|1#dTE%7Fiv4gk>d~((cX(EZI zJ}iD`iTanhCN~xcCBrUSl+&G!E#TbtZut)6Gkr9+J}`p*9NJ5tG})vULZ~mrIrNhY zp==cw9$xl=aNYgHvA{PH^*(>9osdZdBg>*%N!eORwNHNFUDXLO5yJD-bv+P%l3o45 zcs*!mToo37g!%;oHsz=8=0cEs73E%fs#Q*(>HFdU{}81 zE2z2xb;h+eG#KPVk&>d4D)NhtmkK(b^Fu$=sofoO3Z3vE{i9CItv(Q*Tk)%YFOL7! z0W%j6XMR+gQ|{8Ge$3UyXQwJu~YEZ-?QaY5INP)KLhEtm6w9#c?*gbs`n(dAp+y$l1Ri zhevx%8^##u({-w)-Sp}xPb2ci?hL+EpsNyK(qtwK?WjZ^%j=kVITk_bLze z?Ojm5LZwhArWF_p@)Ms?Z@_hF|MwGx-SAdNYC#f%0vexBxNA6~U!uj0AEpXeXPQ~I z$OL(dX$f4`re6K1|Nlh2c{dgA3)87TTnE8q?%F+r(<9JKyQ}xU=r3=WIgXk07xS3e zGS45hDiDZA90~LM|JQbLebe1r$VbYLRpgI;g}!e;4yGtS83Hf)J!7I6=hV+$S3G;R z6ZmpZ-;@$Xe42>-Y>-+svAHd(r#MqhWP%w18f7iSXWgXpI{8tVZpnf>b&_aZ;oKFG*_8{+CE&)Y5=Xds;==JKD_g+zSmX7;+HA0VgW zu<&B@T1XhasPHepU)7hF{93a{fcgH+_f>rt_4AYi;%Oa?kM^HKT)=0k$%QX4pX~JJ z;V|;Fqnkct$=lQ6JxjVozke@GY?-r{3`gGumSojic?qP-Q;W;1x0wvB-(npd(MQ;l zG9E8og*t!Li5eF!j}WsZ8hfq!7=+`BO6RW!4B{hc8o+m9n5-|RjdV& z3t9AftHF>YVA+4*vHGu%;D1>1z}qKP;5e}<^|e(a$T;S_pZ=8%{vkVy?tgnnDDj6h zx7nl4pGH`10DCO4dBX4DwkZSZC&ub<#KW0XW6F~E-S8~6f?Z0n8$$1Yh!6_xg5zCO z5jaP)EbLV#ws-Qk5$mR)QIcy!Q3k@*T$fzbD+w>iUCREsmAe_uz#|O zd}OSp11q;k`HHQm>m^c}t5c4Co^@9^U-gW^9lg@}pKiFmUVFB6e!&RTE2;fm-hT0x(&7=pLb)CS9X8oaLwUhsK4^2IjSub`Qu4~$%wdAnde#7Ya(g%?+W z%fshOV>9Z2_o(H?4c%>^a>QQlctAJgb#!ppL?S=1n`{0!>PZGGS2xtMQDAt(fZB#q z#5ccO5pvuT^UjWAoyPb2po@Raio#!1U{yW-ego?2Qgm*MjnDl1`v1@V&OBze%yHl= zX5+Kja~ymEu8kk>{Cgkfb^obN-Fnhea0LAkZ)eB9ogV?tj1?|f=x5r?c~3PJ=U=s| z`_sBxFfU$ta5hx|bxh?IOAfqDK%bwkj{=Byy}rJ6O!s>|i5*X0zxQ_=@r>Dd+BCL@ zgf9&`xg33|97erM-}O+4#kuIURhv<_CMHh7yRMs1SZjB9|3SP@bLy3-qp0it=<|!; zuGu6SINU3FvVp(s*4_9^br4T+zjwq5f48m1UMDagrCrm!OFNhGZ@5mug^u-4qY=-s zN<-`f7xK3w*y{AyhrmqVn#(be4qu;shSX5x1;?diE>Zmq6ak4X+|NshDdDxP@#u`g&bt zx#c2VPSnVQtzQa~k(Yh##$oMNFh14a)V#R^rt1VK{L>lGNk{wJ=t5E-U%A_p1$A=| zv2o?yY$P^yY0g|aIDdF_1h*Y5Ac;bfB@L+=Fwq!I6}pRh)n3vv+#a~@b67U_(lfwj zy=m3xj2BTmUVdX=j2DSEE<5ySE*;!|DE`=mx^I&suaCU)X#o=!uJ}u93Lx>qIV;hx zkBQM8?R5&AiNq{PPGQHp%OF#saz$kw`U1pVFn|0Rc}B|zMNi^7n*6xL+V(j5=LOKy z8scysS^4FdB>Fn(tDmqxf%9aZtmW&cZJ0;g9Jt{o;%k|4FtZ>3r@9$)JTR|k9y7o9 z&+~POujLa2#=z8iapBu*gRo!Cb#2YdJ}ByC)Ei=bY4XH$$U&)EXtCDd^{}l1k!O}9 z2lGv`aa@#@Yg@tMMvC0~QPe-8k1Sh{d1ev6ta*`dRFHcuDQO{st+VX*)N5Fuj^Z@7 zC5WFob40g+e*pL6&zsXTXdt-kcT-b29qSB*%8cJ5AmCEd9~n3XysmK&7dZ|m_B}h* zZaWT-hfbFBuKl~-Gv9~#e*bKzjr(kzPLIQcaHrXty1(Cx?eLj3L>%fwovM#oBi3W< zA80$P(!r@=MQlhl4NjYth0Ejk&&%68Zg{B=lqV$hheTRnR&Pz9Yf}+$ouGNl9SSAa zYt&hbwv`dSl*EjZ=hk918={~EzK=EEhv|4L}IjI66 zD{b^=yxP{_#X*Iw&^CQE3^51 z*4fIZU697X(OB@a4dzW+7kGNtf{)?PB-4Gb$@RVQn*>Z7iEZEIq5U3x#AQ6yj_yh) z$HO0A$_F= zFu#&$tIBMhIoL*A)&*+#-svW@SK|v0G+@2-+?-AD!FF<-&tEbK{f8LNR*wo>l1X@m zL)GENGFa>Z*Iwmx0iV$-+XoAgFWh=tt}4tMrtKH(xxOCp$n84^O8L-7bg9#Yq7l>u zP5F7*a#I!I`ss1;{^fK+X|Fe2U{MI=*&It%<=Vg`aXkMi`dwa5)5*CKg2&x532jdus(Zv_Q^vUCRmIddT2zEIc3E02&^->C$na8%~FuTQm%F72U3Em`}_K$`C92)CX%Q z*H*Jib%XMxLr|A-3#c9WCOpSz0u%1#uXa;Vzoe>i*Vjh$7wYjzSUJ)TCVK=uMrIIq zd46Q*(#-+r*X9lSBsc);&t|)<;vN8nli$O6HVnc}y+Jh>>tPsrvbJbr;3y~$3&(64 z9tF`Wr8HjT2{NCX`J8TA5v*rdBQC+JswVP((dA@*elYO)7t^3|2wY05DZ=|i$9L@5 z6FCanMRL2=o%=iP|2aRHF&Z+zVg9l6*@drX`iG$)Yh2uJ5`B(8uhXa!r-S1oU2if+ zhbXt9x?4twFH0;5krG3k9AoVt!+>C7H~vb#DWM#Fr>^|ig8sG?%E_&X>F8^_y~D@$ zUN0%qJNbg!l|mj`Pr3UM3b`IUpd8uQLxK##KJ9Ho-RT3u^P&$Lh*kRfpOnaAGTps4 zkwxnr@Z`k7u4Smtdv=s|SfB@#)_o9OnKuZ*4`M#AfFZ~c`>k^nhQV$!FtMj<7&JfM zmt=o622*8mBVdMID3J4}3DsKzqlN6C*r)FO<>h|{v#5Zk?DWa80|T8TF! zWLjm7LDLkC7=)$MIB>nL`Y!xrbF5$GHr&g_~`i+nB^ev1Bm81aaHy)*+dKM&v^>P0FHD|~W>4z+?CDsMD z$SY8K?&LSo15r{{kKZGnvm!5G;igzB?6&3I8G`r(f!U8)GJE>qQ=@(M%`1HncXgZf zQ&B1iL%N&MJ=DpL-m_$UU>M$5UNYL$i1qYSPsZbT(eIZ2)4ul1C`JG(kaL5|O8TdFTEvp*>hM(qj5!Cf%yyq8Ui$LA+MF*<}M*E>7Z$@() z=4o2KUQZqsY5_sTOqUwvJc#a&2sCX-CnnDhqhfyz5fU}owT6m%zfbcY_#8!j951Jd z7ntAE<`r=CiKP(MdhL?}(iGBS%=L5iX2bz%J|C0&jO$%p@>*-;4VRs(5iQFtz&yaA zgd{s1`1`5P@xxG%hh5A1$seF@>sJ|I=X+7lhdMzvQ zy|pBZDrvm&8P-MiQx41=Z-=aJP64^7Bh_>G;rX%fA&`@PV4m*K3P!CP#e}O8z=-7R z+3ow8Xvei@a;YQFaPyMyRKpfhe?5gW_&^gWdh%qiyniJLP4}wYdF>rx=o-z}9m4sy zD@yVH!d@u2G-V{-jy$MOG1mFV%0Y=|!{j>g2%=35EPkk0Oq5vmv~GQeddylENxWMD z5lr+iuG_biblysfJ;VP6M9!QMXW{FHdFP&K)%pf#(e=^SdzuZ5)sySWCWDCD&8-Hq zUNNLd>$=Vbw+EnkIB;~Sb1|?6Zxl`TZHCn9)nZ1E8$gMAU%563eJww1Stz+Y9jb0b z7ZxPtAs;?_?nhoV>|1wDY)J;j^_H_qyHF4C{9>)0SKNERj z|8@7M+q5UnuP3xeKCVN3gHch*4;%v>$cxtHVPkCpQ1$#%@6-Z|_avJy!#susmv2B{ z7!_pRwH*D4c?pND2T#mk-EFhdd!OnBxNi+P<~O_rbx}>J#*E)I7%tv$?mgBS&nW41 zJ|7u|gtn!8#<>4y#w*O{XC5WOkOAi-Edt9Wd*BDXrdj1X;!Vsy-R1Wy0Hwi(&0$K>gzK5Hj_8## zqNLW>=NHq6zI?p=*~>eL1aD?m=>A?Zf6AoM67hcy5oJm9EPbSro>KA?$G_41lG3!u zZjvS5P4o6|C7Q>Dvl!a7#9t}X@ODZTv0A#%wOu10&SkINFOK`FxRcGfx%m5sb15~> zE@Z&IsnMMoe&bN*kk+yZ@!Oul3kRuA$Xnl|Dy+1hh-E{sl2{=sR8N|fR4nw=U7)Algb_`YyczaKl7LU z+JL2`nfK~k4+JOhByh3yLd&YzBvrl+=uvPM>s&tdcf+v$xvQl?*$6y(r zNTmh&qZ}3b&oc(0`qH!FR>X(yzE~^WSC9HJHooRxFSJ3bQwd9KQUmlnIA)N(wFzw*aNr;fw2%kBm30(y2{N( zqJk(z?NSn(!G6oDPF;Mui;T5A2rO&qC2A&Zk^Dk9?uU&*YiA#E*tPWYLq!TPnXlCz zLO)jqt3-mra0_t>jQ+^!Q%(GZQ~H$_r4q^alc)Hli-G5?}GYKx9*boIy>aEFP_-$<>Fv8W{UH6U< z>Z#%rYcfYk`>P{oY;hjQ={7rj(w#wuRwmW2LcKEo{2mu97%d~k78!#hR7y(DgM;>O$h}W)1_8;-$Hz!McSj$q8`*i9i0qDyyjHITA?{VTo;d? zGT%9cb@Z3N_gpwcgN)Fr(5z6@6TF+tp%#w%0V$=)ZPdT@U1q;dSzmJa3H9Iixj#y` z3;jZU%`P{Ib~69(pEJMDY>QQejz5aSdZBRC)WQ<<;W~H5LkauOW6GXId>H4e@Y6XG zWUD}4=EII{exHHwGMlASKsEaQSe#RuY=i8hT2JnO>4!UyF7G+`lnV3LCh9obP@k~p z&yy^j0n~qy=Dl$Y>rn+U%2k|*(;aAfbhQ}q(2KIV>jsdIB!A>-`2T^P2Xmcgw#@hW zXUqKjpRLiXL2P`+U%o?y;g5@tYH_`4I=pZ#e+$sCoXg*Yx;O(r1iqIflj{kL=f$t9 zNGR)6fq-TU@w~Hn=i_7Dgyws7fU%WAc&-?HT7JBb%-`kYva#+XuB9)P;xw`DS5)FW z;L=T^3;bsC7NOr1d!_g771%#pxm6#f;eO)2k&2^e0l2zme|ezZ0v8ueCtXZKJiktY zuRiLKYCdHt=o;^bouy5m{}ds=|J;RuOQ`q2`-S?xZpApf@VS+99Ptz0Bc<#4l!h?R z(wfBKPXkrjoy{Mv{%ibAa!1BzLE@gxlr;|3#K`^O7TfS1Vw1b>=q6Vhk$HVYeNovk z(c-L?j}RLr6&~B?ZeJKBo+=Yh>gq>G#i6d53peSg_kCJN=3qa$zj4WGJNGsc{`}*b zu;OCWIheh}8I}ht=j>Y3UbTbv;c4ygd#Dq=^MQqa7uJyoE#-`RE=X>6)w%k!kl20b zl$~fqAD%a?9Is>1?^nudd}2`tN&UPaxcX=_38j>}=DL)VmL(%~Wtxw{VsKt?NUZ^N zwyr(mmg|T5&+}i+kq_aYYnpvc?H#^fPXD+5BJ$(+`5pBdwS;vnZdN55eY1|~r6!yx zMSsEI8=-P9N$PRdLCuBb5Xe}znFDpbgmf1@${$0%vBoilhu=D&<#nMTV=wB9B){8p2zj4R zPF@c@d=PbC^@7COJpZo$%zpl##(}c;raN{$4Crbp^w@%N0NWOgM=#YzU}f|+qYblk z*r|E#m{Wg0%yWt7roKg8vb;u#+Euk+)Z%u%^+P!*w&Q2Qfn`-rCw zrBlJAk63-*Rw^ut_3=@sDSg&%VzXOA-S#|wFLrV3y2L7?owh-6+r1Q`ec!S1*XknB zO}gi3+T9A?3+NUaV-%n{(7hLVjWwRObkJ6OZF2MZ?t5=uZ zR!h_&X*{laOMRF;VF;EE$S{am8o68+Ek3f+GViQnJ$9^$Tnw_d(RNA@*=(!+Na@fDw;W!Xp0OQRp5 zsIO(a|6l<~#NSW(i1|pzc|9f7lRXf#sUl|!)?NG6ZwC8KAfIE8zw(TIAF$pNSW@SU zxIv>A#p>S&;bfn@z49U|oTGj_AuEG=6B&#>B2Ovc)@0cxi+tiA?7r>3t%wT@7ISc3 zPJ@d5QzPNh==ZYh%wtK%AuvDqNRv0>ZysRwe{_Vu>ppY;ywhg&nLpFGk4&36|MTh? zWd1+2y?Hd1Z}&e=QYb@)l#~Vv4TuowQWTLCib_dBgUXbY2&Ez-kurn~A(5HHHbmxm zo{!-;=1LTb?|!~(onODzv)=1@KEL((_0L|{eeQG4eRN;&6J%T-GM#?ck33X<xEg~jfi6!!_>tx#Fd zI6MxIz0R@0mOhXe>n@3VNUo=JHZn*P_lW^%`#l^5nMJriLt74?!nq3(qO6x9;$If75I)^836s**Ov4 zp1>KP;F#e@zOIaE$Xi-ZG=9bY9=O>P8WL$@Y%}^6zxSNT0sr@}!7Dgy`yH2B z@UT}-EL&Uw3EJz`dL(^_@CIVKe6o-PZnv|`2`(kh`&iTx9STY1==$`Ut$rl-{pDL@ z;k(P3)ct?uTt~5Z;t*|$BI{ZzabK<=+^6NY{RBUq`go^IBa*YZ-AQUuP(mGZHVr@a^qYmalW|-CB1*wlvH<&N( zaj6X2X4gq>xrV%jK_{Dso_%0#Z>{v41NSe?!N(d$sPIBq_3cX2L1;@n@kI3!4a9Vo zejBYry(Oisj+KIc@|FK~zW#5Wm-BsR=H;IgApdy(j#Te)h+4gM>z&`|BhDx)dPWP^ ztHn(6qgP9y^5_L~gV|@`%U<<@iJ{;@G||vn@p2JT=YQ^_@5J{z5MO+Q8R99FjW@@#lRaI=nBY z&b{*k_HYB=VDzg?-5TxqCK!2#O-*V$@&A$j++!6lB~hrWpuy=M&uKag2RRj-52<0@ z=F;fv$dv<7p5+pH%Bv5UrDs65i!Cb}t+OSXH)|H>seiMDM{g&9T z>8>Wm8?qVBUTh$KH&To%r0WUijSZd?KT3$()-&wcC7vX5|HU%KD8%`{_$Zp6T?1Ck z53}YqAA-Cbt6hy^K2d2jG}vPIneZ08i(szJN56)XCyu4wBa!CB&Bn3>G7q&KU71h` zY#mGU+GW$gYHh#`iG`;yeeSCF>V5A(?A=F=YD>5+ciShoL8iv0vQ>|za@dW60i>&=-yA?`1< zIN-|0h+!b#`q#yOq{5_5ZE*2qC)hE}eQR!p}V;=wZWaKC&r znG;qmkeux#7xtGq3^eqR$>@;B>gmY$ZSP-gjQ#)STj!4yvf_H?#I#8GMmtf7J0iq- zwt;wa$kTj+QBSLRi#6w4H^RB3(kB)3*}A-7T?Xw=xKQHvVyXdkgfUjbaINy8S25J2w;i*(VeIQ-05|aACe^wYbkpZBHjC znHm~Ba7Y7b^(8-!S(lT#ZL=FRZ*~xyE>W*X%lnCP28nwRI!FwY9`uXJ3=xMh-j}So zG{UZ`$E$dnN(!a!P4USf-Y2?g&Yio3SX31`DRk!(!!{fK4rYMqS07mVR-tZzMdyv| zT}`k`S|rm*4SiCUv?>g|N&vh3yxq6Y*ASI`yOXc?c9Za{eFJx;;q`{YV%UWNM&dXwDQ*8o)L*M3ESxaH` z`WwZoYf^~xIx|j6cnz_8!YHtkvxy`WmZ~J9|FGMw+uRXl)kMteM|JIuG$OC_M%LNX#69KMvqg<#-9R~hr=IOgkbWn6Kc~J7plsU- zCHof0lMihxHfaPSisiN6ST~n3KVkcW)&`Ry+0>u*z2MoteDb><>bF~8PMWdqg>RH9 zzgn#G7N)6f43?n6hJtl2=Td0!RF7GyTayOYUp(AD#De;Xv#i;B4vc`!H<5EcYe!&% zRGHs{U1PAKXan0xy+8K_uT4h?73+c5riz!uyhFX%V~#&E7)IggUZG+R^zj=%cE#Ot z82y1O?6c+KsL&Vn?vS|v`hqIiD$~$^Y{!EEANg72Up)}(dmLE^3<)!r1p}+$vWm;A ztrG1JtsGr6C5-jIH;?b$)fj*^$43N@DfL0Ga`zPro@sf zqF(5OO1<|{@mJd?P-2)72zc<8#NIo9mtCiVeAQSh$?lFmbW+v%jVC&Z1v2gOJ$gva zW6#s}h+h@_QnizIfI@Okw(KPpi2E7O8aZ*OoiGbcP7WPyAUZ<&S#P3?$*2o=^a*x1 zVkMj<{MfbvWQU%ox%+lOVwQ4Elo%Dzz|Oeq=AZw^|Ma^b*&JN_9CZ@(4j07G)(^wL zv*TI01{Cmb0C)jhTL?_JSYRv@te;PMtnpw6HrNWcn2mDLObjhBB_nhCs+;KCP zWj~RJyVJdm|8y%H_S|ea^A`DsQ}VxBcIkjtQ?1wTqz1wrtn`4otCwVU&mSML9VATW z6}P#u4U@UmZ)>?xAG!ZPs_>=PgQPpn?aq}Sy(IS7(KpMh5D(NA^k5qY5S0x~&OO4V zV3(e`?g8@h+}wIiIMcJpW*d&Z3C5_$cvgY4eSZ_tvMf-)?@jD) zkuP&NO>((oFL-n4RZee0-FDf-feMHV8Ww<`N1|!)ZO^)eTP-v&Sv_;;;&&RbPAWUq zuR&c4-N3w6)~NHt{M`JmH1Y-Zg?3V2BEROuScHJVI2<_K>?w+I=af>MoB23+Cl>Uq z6vy$NqwZCx&mO+cVdwR?sCV)l6`E=26Y&KGL#}j!g`BN)HXr79dOA_Z!-|IMi-v7PcD>`ix{Pl4H`iqjsZIMq*Kkx7O_dKD$ zPCwrKVu@2(!vv(9bdhxk9S5DlxC_SY6JXuQ$eMwAa;LIR@0Z|1e8<~QZ`S@mKgNzH zdmGRf(_wbg?6Hegg!(+mLBFGk$ls)x%I)nW!m>uHlnbcqcc{`$@*ssAcswwt$weWR zg0n-jhKT$5q7Ll82$n0r693hZN_ot<3V z11ALU8<=8!w~*&c*O_(y-2ap?RiI=%7z5vznJ!5`hhT@i-_Ik`G+6mH`++#>%Q+`~ zPdt*?1v=G%bLI_MFt_wHN8@-2sY_`x^yO$L!pWrHNw1GIyPTbN`bH%lirRylLTN-G zy+n~wjYiZbx{@9(LLT4|F2^6^6e7)^^ug>N@|;4Pq<6*uai(tET1)!`^|h7Db|Nn4 z6m;vz8T7+}ftXhlIt|dAs3T-I^nvJ{=Bv1T8g53#Q&Yx7q%J7nm#)dpGNXrF= z)OsQ5=g6B@e|PG`Z2*VIaMAjHDoF?wyg7|{Dm$G_X7Qv?WK>&k=6zorXJ6D!#n8mP@cNBA#wI(1*Inw%tB^Eo!>g}w$IpOt_YaCYYc-c^cdcOx+9OjeCiAM`7^~O|HgR)bsH7 zUq2l=2vW}1!cPMgT1kDpsAL7XFYSNkF@1j0BYpn=9qF(CJ+JA<|0&YX4?TadQ+(Y7 zNK2+ZdBTc)|F1!z(WryWQRB4dy%Xxv)QIj48f*nI4g=41cTs;rw(_f2aRdnsxW%(^ zqLgU4j${m~BQN#k^k~V-4&wbfH+Z_On+*LBv*zyZA&xg~{ic(9$n+)k4V08F;-_L` z@2uQLR20>i+I7(h{8kL}sgyiDQen$|m>tht#?4Ls%^0djf zF9o3c;%Lc6^hbNSr?lJv`6~?SdE-*+>p)9*=)g(H$G&`Pc1=bHv0zl{cv(L{ygw*A z1mp~pZP%q9?qL}t`r|gX;(4P)jIsQk1m*+3&Fdl^=KG05PIA?yNz9vygRMmG7Lv@m zZjUkV5?~9;FmZMH3|682+8t_%Bv2;!`eqmG%WiGlRC=tQB=FogbQ${xp)LlJ*C@rL z^6**(frGlFL#^A|F}V`9MXnF99jn82X*J`4duc!^(ocUbm_&qxRVmdhfWGJV1jMUS ziF6K^bg$kUpq>ycv))q<3>M`lL!ZV0?{TxP29i&NB1EO0P|Hc+n&uNU9cwQ&@9l#n98zW}sL#%#Zq}lYeIAuFBOlkH4)DH1gslc~o}0tf41!yRK%|oA zt>TR_IL)%+gHGlc^iMx>+a5Op8j4r?s?Lu>Mih%}_{}-jmzkcN8~4=1?wuH z=JxY$@+ybvL*1T=S+!s+$@+l3un~p@znia^LOk#}P4O$iosi%?TetaOI}~0Gf8+A4 z8P4r3(n&aoxZ%08r-m9Spm20ahR&BE?A!k9-p*(SGUm1bk#( zX(W_A0ZY=9jTep}@AH=Z)Md+2;HP9b8kzJ1)oT9h{-!d}S+c0o{nSU&{K_Q31otmY zU$Rn(4u$w~Wo4Fg3=pHJTldZvQAs7+9Vayl^e?;H^&)>mKRMN;Y8A5#>lkLQ3+`k7 z;+-~oELA3%2+cNL<~>seH)(ZVSE725Hkh|L!c5HqhJ zua66zL}g(2sisNPBlozmM*Cep1j@{NcO(`;;r=s$0!L7fTs56>n2J2?lomemHK zyzPn_8|GOw#nr_NSpU;xh{+atMktNBdU8+!>VjRX?j$#Y4xjswntd8HGh1!iZI(_{ z63CPQ;)hdc_7C)qnc|~ z&@ztxo*bf&3D(*7q%^W0o2G$^Q18~&Vk2;3k;$COi4ph}vddbbe;D$fOKcd@ABJtG z7iUe*V*Oy7=jW}yV?@vm$^Og z$UlS0f~5nJvOX_u+=O`8yAdJx-y;sf{_#4ydkr9NRk+*FycrlPD&7kSqJPutyH>u- z+Tp%oO^b^<>Zc#)VW2%jo-=~1J!^X)Z<*RN6AJoBsdyXzNmgr&#~wJ$$F_Fy}C&uX$5`fVT!J z8#`$coJHVpYRz)>Gm&68E3)fqY9_I;Vlr+#RaPzNF-?{|CI{Xe`<`t{Qz{ddtL{Wv|+`@imLz3z{Z7dSe% zZqdRTd~Q#zIkf+7CZRn1$p28IkqFKh+pfd@BtuV>-ss7Gl4KxqUUV^)xN)zn4tX&^ z81^#+no6V2hi>E*3i5=DUIFEuaxKZc#<0d}>^Ygdos%K*y9V~|-x+k>p$~M;EzD$* z BW%x>}7dJv-Sw#co?BW`P2MP~InNVa^wR$klyVLQvt-o6kj!0bzh3awli1@)(Gw9RBr?oyi`CD2#QSCM=U?qr zAYXm*bD=Zh{;zxW&ILvTB`R&(@`@Z%DXs5jWL8W(SP#_yK9^4dFHJ@4P_f23Eq`_g z>XYew=F+&<^8x*DjoS7qrW5LDTJtIGViKEpMB(bIPbBk5(c7y}-x7zIUiniC`M|(E zTxcYYevM3Re7{e0L7BqG7s>)%AQ`fSCX`44t$WIQ^G{GgDp9%Jd@B`HIXcN>3(VU( zz5qiJtG8K9E`cwuVe>Ey3@+PJG9PESZg(AE*JY8_4 zZY)F%c}|CWGM_EgYK8zk&9`T+BToAKcAMAdt3jAwg5r9-9=<-wdk{O>1XZGpdh$W& zi?TDT|i73qHmdMx2M)R40Rb88d17=dE`U}<^XQE*SrIw6pz${ipo)7gGa-Q+8mT=KhYFH;xQC-dCZ1SqWS7Y`MV{YNP1jeOZMA{J?Cu-y*p`(m7h)dxPn*Y)SFeM`f7@bKzr!-aFekS+N^O)eE5 zN+rv;kHixzzRzE0-@FI?vx8GvWwlUMcbLn}0dbss{$(s1d%!e^Zw-~R2U2r$Y@Gsn zA;jB#ZjIsqLU-4nG1bFSr_O_109=?6=q}Z_TIj@=?Fxw97Oijj& zE~pPj3*-q_i2=jKORpTx26FAEN@^+85CK_!xvTTdL@6?4>>Ec1QMtHQRamNv7$4Z4 zlf=+Pl=MU+6`hPn{PNLD7gyD@bXjE&o6YZ(ndJ! zUz#C}c#r;>jm3xd{yX0L@89Ls9(ubF#^73lu;x3=_l{&m#FAR=gg5-G%u}-}@^~h<^~}{MolSgoJ8U&VR1Qd}&Ylp@AmE z`)I!o;p|7>+|flT7HqXZm04`U*`7yA&Wdp+GfvH5YaAainl~Q60a$*uGDqxpKfl|mQ!sYg{DI-E2uAt;>4?;1yAcB@KNtU zpX5TYO0<0OKrM|3nEB97YE%<_<6TMO-SvdG?aS=!xjOPSsBp>2&|)HV=GoIGVKVW3eJ3GJbhj+(jlWfqp!9l^I zX_YAIv4>UZ@CgjTS4BC~)5!DUFFs_`GSUw+70P;R&?n>)2?;4(JOFa8!8}E%Q*J9N zYO&SqPd%N!zR~OI!}%X1)0a;GMXQv@WE1N1`Z(xYVjX}bwcBOqJsP<6{g@j;-LAuW zOe(wh5Rd%*VW9^4-&N>)jZd7%{fjJ1{povk;GwPL>vfEnNTAN_TD{BO_GzMP=bg?I&y_P}G3x9~a8$rC5HvjUW&Uufu= z`h&5)c$P(BKGEZ6%{(PiOPoDCpGUE@kWih#7V1zt;zVqjxiC-Ga@5hth({ee)eCn- z7TSn&EdQP{}0O@;>n{ zjX0Dxowe^8BwDXO*DieSC*J|ramDdvuRczv$^qO11Z=cFa zmx3A)`u${6jq3wqp*B-_)1s6JJ(jUFUROtiO#^bjx?%mFGd^t0wwO5ZPX8A66(SDv z7rv~%TZy_Ifj1XUppKi<@lBMKiG*5ak}kNUoQ$sT)(sm+-&KQeFAT>jh|>79wvQs} z9y^{~_OUw)!jCej&L2S>ysEZx&)q_x9KEZoJ(31y*J{YfdaRFM36y(diGD6dC;Dyr zI^ff%n9nuX_pV5wZS{vv7<-Yijm5DO;#L*;b7P(P_m$g~1qxKCXr8K)a6z4Io)gZg z4TBIn<#+KK;%0M>rI)?li|g{p<~t$|m{&Ej>}^v=J%9OMk!5>EVCC&KT;1pXef|6U zeh=nxQDBvr00X&S+A&|ppdxX__Hn#lZnib}=U^W|;O;lqAAq_SWn98*dl8ph%VW>2 zhxW+o@bQ{>LtX#3(kpdgCYNc2yOINRMu7r7DGS9UrW~tJkjofmP`du+k2kbBJY?*?0`f<0OsFH z!OP#dk3-^j=Ec8e|NZyS@ArR-fB)WpI`2OnpSaq<*U>cq2Zj@`8T0f&;r(5a18vnH zwpYS4v-mZXoXyvld7ekCu0+=RmDdpYkxozZ&Ss*oF=gRTX(t|B)j#EVx`^0v{V7kz zE>gJrSLcUk?TD*6**$}Np+`({Hyj->e|AU_R1nK0nP=}UEt`u)f6>Pqw};e$*GR2s z{nlQn%N@EpX!eKq|Mzw8Im|85z=604Nrw41A(;2qA35!D@K4^9wBJpcy>1+~d6fL1 z@lwD}q07Ri`4e~1H?J4K!3z_kf^im zWcg@0NQUeSzP8&85W6_8!$$EGVs|AdPc^q0`9e$U1%wKSx7R7DD#JYJS6>WV%ALS) zO_QVj;xIUE@ex)>{Sn?T+5+o$zXt}+0P#gxnD>~fNT(Ea;d5Zq5rISfL>AUG#duN4 zwgAm@u?kco`)SW}2HQTuZ1nb#$mq^-9sFQVH+d zFdJFMLag5_gqY9a^Mifg4AVAmp!&VJs(+z?eC67oTK1rn81H&im&t>?rseOStxkGQ zsLH?H#8#G}4&=wZ`*}M+<>jf-{lDwMFr~UeRkj=iDA8vYSCzwtp{Xm=_6_i`V7OOg za}T8R=l!^HzYnTi-(;#9Q6O@){H={4J#cBrw?hx>>`}J0pTA?Dhri(}*=~gO-a}!J z0C9Y^CEWgOAIIQOz3Z)&?qiTC*rUD^?}zVv9lPeX4#CsQr9Q0H$fIqXG^%)s{=8FF ztevQn_4j@9_c(E^eBmhS{)64!q^!ar$k+S1E>LC=GIm;r+I^eRLc-P@7QnUt5b9erW}RJK4$=!ndN1-27i#gc&|0GpHkiyV zyS#H4bpRKRzc4}H&el6+UdKn^wEIOi8PxfcADsynMLlzMv&v(p#UqgDu63Mk)hP6M zRdc5ajDYBwhQ&LX1|U|EdQ{*?H83#dnm!yz!FBQj4~4IhC~>$+tmUSV_~Sb!r49{{ z=X5|H%+4WLT`cSKIDggV1>3Q7GnwyF7x8A4S z!4%T{aR(*rSwH!D@|nw6Q$JDv`RLa9NYqP}3U06vMSWzUu;V2nIDd6Hi$5=Z1x0Nu zXcKPrP*-SO?!CSO43F)8y_!FQ#BO&Dw!K_QzLHfM8WsM&kN$rvPhB^>QZob^XEn7(T1R0e-;pouqloK&&hxdTd=##x z2r_(iM;_mz&1!CZh-YL$SJz7JIb`N3k2wl{r!(eF1Y z_RMkp0Wj&)5&oV<0ZIF6kA2Y;@J-n6{T1=NW)|Zn8;bkk{zN4A#yQmYiZPOPygi6G z;DMn<_CxTj#BajK5qZ5IRkfb!Q^C#Le7U441sIJq6?^4+!1mnFbHC8f#Ad@Ds%``A z-=0_Wc}c&{|G#2i-1a~g?3exh`KLEHtqom)K1j=!cTX2n zaSwZ}4`0q8u_8=kdM%ZNQ;09%M|cyV#tm)JkVJg1YFqoJrMU0^t=}ylg!yZkP{FgI z4&q1sJZ-JrN(AaAk7yWUUD|n(t!8pDQQCPhAwBd3?mJ!XT+Ju}XA_+>_r*Fu@xcB? z`RV<@yxJ!{EEs)kG`==(e>wybFV|XG4j|4)Sf)9h6ZIR8PS2;x{lQPrpL?Tz^vJ6xMpb!-WnT9XUs!sNmmhUK9QMgu zzQ8=(`r*0}78<$Md7)I(jY_IR1{Eu|^%1JzchTU8c0xH5SljrhoG_@a>yJ4X1s52b z4PAVjVD$5>!25jnxd-qE5=rzU^muZb86{H=F0z zH9}zCn+gK~)Xj|7e0(|E7h-jPT`c^Dyq)6@s2^PrU%jAcQzqI(JT@y{&Xq=eR?))! zF78-wje5O8ZY%P6r(X(Xlvjc@I%3@Se@4V?>ZRE#O9}IG+a0xA>X7F(QWbO>^~YYS z=Gc@Lk%X;Po#rQfL7#Ek@chgOO_JV?i(@;(}`VA|c&AouU9$6!)hb@8V zXHj}yCX_k~W_hhQtMf*{phU}e?D`1EuCCEvKt9O&`2>C2&7%+{c*8T=d$+t`2}t?7)o*dsjt)L7yUk=k1LDi_MSXB4nOy= z*+NiXu*dcA&5iBoOR81e+@gc}eYN2$Qc)js`}q6b@^UJ;pGg|lK8!wL)H{VDZ?V4^ z5nvr~aR`#QWBR`Gpw3^8&;7C;R4|F)$vIz%{zOt9Cm3o5pp3;_v+g(zEKZyFIJo`E zSLxS5kMw!@_n3a&^hkf59_jD0l%5B}zmo8Ac71~Hg@@2(D<4)2{r1U~C4=~&| zs_RjK>saT=25*yAk|V;TR3B4M)QvmV&4+&`>LI_s9XWo zQwx{Ev@$KLv465+^;R{kJB~&vnmxVL1ZEs!F)X@~;Kz5&?YMd+i4_*f$>SPXwueKLM-8i>A_D$|XAO?E+)!f4{x<8_D zh4L^crWh#ZM2)~)Q~EjScO!7{2G{E;9`wb`iG34-dfcJ;oj=#eqCcWnw@|Dc>d>W1 ztzx_02l0B0%f1ct!e~`eakpzf#2peH_S{5;fI!|mc37wTt!JyT>GL4`+OKZ;d)+X! zl&*CyL)-^_UeNCk`u*NhsPN1cdG;MmQ*k$x#v%AbA>(cPQ4q6Vd2bQc53}RtAGl8= zj_gQf`$5#@`N6&H=si))D=%1a4_Oa^EZZ*o&68B{*5X>SbAA9W4l`V_NuokkanQ^M zDH`ycfAf^F9eD|IEh=UEXs~BW#7Nqf28L6jH-9P&gI_#rUKsj(_}!X)B{GP7Maj1U zwF>{f{?f0T9`BiVwtr6ia~}Qo&|jyI|Bm$6|EE#9?)F#^M=vniPCwA)L!a${&wPjT z8^9rkEqV7*0BNmvVtTLM5P9RE&APHBq$Hdr@WF-#!t8v+(r$Sx@$+Z$J8s@Vgqq&Y zMmlv8GnM&@x5qmO^(13s-G){&o%Bpy^Eu+v^hdgGczh;e71fU0@R(xJM22U@38uW`9 zhtOjtThp+Q+QB-h6<0X{?QU^*qfd>2K<^?aOLH2mkciIUWo|*=yB4a!o)GlmC{($9 zv6@thn!Z2bj=H;cTyK^3_ajbWZx-LtLBb-}D0)GIMv@X*FO?n}B#tVxeJXDKBr{Ko zi~k<-87>|aHOj9e4(F@CJ^36*JU-ZO36-w~Gu`}K-l#iv{i!1{b)drWz#m60IrqZX zZ0iki1EuK8RKs(0G@Eoh8gmFm{DEJkm}XvOCnHc)*TCo zuU?lkfBjf7VJlrnT|k_ks)d%BO=uh3%q}yn#d<@Edd-=1mVB^sGv>X4`!Z*t82;qz z$OFnO(Q(2&t3;}eV(3*v=I)3*@n4ZoD4TE9)G8(c|KYd?X-%~tl=1jm-h3vwnGK10 zwkMP3#jT%KD;JVFhQi5azI_K`mfjSA&*q>?%A>(*q06WT^_I(`*vQ=zc^G-|4viNzRV7BJ=deN;`Yk-P@^8v+$*z5eI$dk8G#s^$#6Epie*8HN$?tvnC!t7t;qNLpWf&HbiR=BsWUJ8CSXJT&(%y9Cm`7^YuVFP z<4|(?{I=1vBhb&Nc>AO55Y(PXX?k!HaYrVv_3iItJ@TT|-hc5C|LJr8>39V*PgDAh zKlA&a&ikJp|4+{Y`s?)1rH`u`Z9AHckbkG0y`v(fQ$qcc@cYt7vVS4@1%C#F)(+VRVyU)RTrz z$aTm%;D$Qn+^$@oEEW)-A$Ih;u`Lxcf5zAEJ2?bAbyrq0G5yK^XaDaH`J)42Skad9 zdT>!WEc`2e=eWW3_VU8R9efUpS z-#U>#NEmJ(=?v_l5{o6@SYK@JCrSe=eP1`Cj>(H=D=HV_8128o-5XIPT4K1To}&g1 zH-&OVp|0hEZe@nfe)OljA-L!2S_()nny9=1Tu~fW&VnRZA@H*cl6LCc8aQywO*iym}GI&tL5p_v_esdt@6-4J?=iFnvkA%Un zJUGB86{HjIM}0JIg1W`$>RM-T-(pqBB;{BHfgwA$?<{|hyf^0Ip`m#2I~}u@?*!_4 zlxJoJZmkE^RF+w0)a$dld|i5bF!FYGI%xiA>W52v8pAXX_d@o`4bL_r|5{(|WV%Jw z0EF@$p#4A{-0-XKEhLcd!Y$h=`x~F*=VWqe+~SCX@fhd4piG5F)GtSM=8$?*yF%dyhhymTC-1k+7V_TakA{1c3k?{ir*@h;1~QTPdYB~M=c$@A%X zz&}O$eed}5-cVBH7$~HgQG#-ZLH6yJP@lkI;29m;cv)-|%HD43a2vpVgy`mt5s$D> zAaIrHq>sFkjIq$qs1tZjg_^66IHDn$2(CJX5%3G>IQne^)}3S-`EG7OJYs3-j;=Q& zutd{=V{OaWzvU+#=E5HSZzixm!oXr7_UFE(Umrcve+PY@(#Q1I|F>e`FW%cps9PrN zUj0#n6?GOf7cWfYA)ZyNP%Bj$^&KsBTXkN405|vf`aL0UNr#Pxz1X5sV%28)q0S2P z#`T@d;%cbxci?a!$5bn!c%7v=df@$itS4{p7o7jMDiyx6G?MAYaEu^Xnh@b)B888eSob zF>gIPp`7em2TFmVK98Fm2}SyDWWz!^VYb*2@ajnuQ8#4o@!@MDft*aWj^A5|R?j1g zh?zRV$s)GjT`(W}B=;Q;FZ%!@y1iw*n&^vAl5p4YDC%&E3EPiLB0t?S_xTR)hY*=r zUXp@(>Y0no-Ha|FPb<8MTSg9j24t9$g4=}(Sy2Bb3%`1d*F=fs@z`&eegC~WrbyQ zAIKlb=&W0gxCdrU_nYYyn4IL5W)$uP!Bo8^E~vwNn7?J=g~A}vy64|yz8iuIsWgRt zj^N<4Z)no<8mo!^u<&1ZBoI0T!MuX?M*lhQckn59D6+o>)(ASOF1(H%_DrL zZc#^|XCrTQ8TSa-^YEH_AkMe_l)cjJPV~{v6!~qyIsu4JjkebQ&+93D9{oLU=*Q`i zKF_2*vye-Ly5UM1Rku+m!+~b0P~S2Nr*s9m7qmuUN$D{x99(VJisIoa#6-xh``Nb?_5r!-sQ%8*2u(q;=$O(!~U{@2+h-2Kl32(+<9ZqP5vy5yeY<(`byA6iF@-a(vc7epn%T~wrDX=-pvoHpE z6-REv4c|7zE#}P`pPcOgDSViZe`y3Qp;qUWn18E>+_sQTjfc^?xzZ~CWa6>=pq#nVD_DjyQ|d0yFZ41L*@IYXBF z6cH^qrpj5<3X=0|MsM?X)NPKsD!(}+ohU8au`f{l160qP{$#wX84g_4`F?*_FQn^9 zN{${!9b1h!>bt;R;Lx}DwE7Aa{dlvA$Gt~@Vr{dy82x1az90LxF)W!dpMcGhv(iRA zxPHF={au=W2)4a>;c2sL2-0^_CI%kTU^=wXF7v_=d^cFUZ{hQwb^gcy{lGu-_UUGi zES)&hg~qBO^mCTe6z`%zrbKWlhZo`mocFy;TsaEG^JYA*mnY!G$<6!RbjBg` zqVe2N+X#62>|2tmG76=;S6`A&`uFRl&+EVE$*9KIZpIhmu;WC_rPVfLz^4Cr;r@B7 zQ`~6U6QhW{p=WkVi%$>3^6HBRLexg#UU1i;C6RwV|L@QJ`|m^_|5K!2PqVgg)YA9~ zh&_M3o!RKm-{tT3(a-G{AXXq9R>ZCUe?%3A{(OdWVcg|AbSIUnb z$6nlrDPFm@HVXA_xHUTX_tt=6rEbvXf;gZgt_Z7p3CI^YTBUimnz+GY&i5@1#JT8{ zRh@G)iB)vt56f>Rp>MoaRFpN6NSR`RuG(7Uw|~riq*Y8ZEmu8~;CM}@H~!{2o1F~_ zs-~wK)tW%2P^UqE74ib8&m@Ka#p@m^WZH-Lsb?SD4S%T*L${j)r#<>BER0E;yS9%3 zYeIoCZx$8wWW(S0F;l=RTeUx-pabHc4<%pPiF%FV)2sJCYJuovnMgmeMyQm%uy0Xk zB$0lwIl=>Z`U1PQ@qY;DCe&*}Yz}F?g#82eBh6QR#5;E9ZKj}JB5ii^^XSKJ68g#| z-scMPfh~0w&KG|sk&;3JX?H^)vM9bcKe7&1ro^^v_X6@I+ z$~-#nU27T`AFR7Bt62|;YkvujY{dLKT+OY~W&m^wj6&=J>VdcF24}#QSfJ!xjyRc$ zYuERxY`>yHLTwr6-*g7~`k*G>6(2w%zg`+1JXHYvrF#Na88pG%N)eFV-vz;Q0u^i5 zqK{ItkB66EFI+s=&gEw~2n&X;T}-7zP#nbdCQcjcWI@yEMR_Y<_mkoo?HMh*P z9e;3mbo|{v&5M9nZI*d{V?brfi9O^)gSbH5xnu2A2oGXg_Hic-W~;AP#;-#h==wUN z1K!BiwxGo@{k_GjNm{~mgz&lCE*pnv~AMf$#x{=S~e=j2qLO~5eKs{YEt zI7p;F0sp>nP^{1pAKf$o0i&@w)QNFWKf^h{vKpT|sXN#_*8G{L^zWr#51o%f|9<*$ z`j{T+$La5cCRU*#Kq`x=&TRvopR(v;bjw8aC!#X^5 zN{I6>k5rihb;RnIglc{r*1H(bime2!6Z9?q(kI_Y;?LiZ^fo{pO`lW8ZK^(z=Jo~V z`!n~5pR?!(#op|I(Ff<#_M_fl z!rS8sy1q||cZ!1NajDOQ&3a{6qXYKOwzQ^}&@|3JE z=zgsJNGOAZTaTJV3V-*VlXw{pY??iG1}UiHPmw-zW^*&_bCd{3%5H<4!eFk>18s24 z$@Y@rjXDrMbhKPvs1Ws=YD<0{$^hQ;+sX?c00^1RWOqeWL!z^>-I{;l@)mi13LK%q zcJUZtZoHrO{W{mKuJH$VSsi*gZ`1rZ=)U)`EKwW*eTR*+s+AIenZd_RKF>a{?H(B6RN$j)29Tb<`R)j5jWi0rPOKuG+*f+}%aCP3ez- zh}#(rL*yI%Jr5JFHH+DZqYml2pNsZnjKlX^pQR#F#=-6QG5yEB|Nb1Izn>oI*Z=pJ ze*8bhe|n!OSBcKT&2G3AEx)nD264EJ}JigRg%%}L@n`-lYJhw5`6-T{!e>% z9!=%`E&!Z#)Tu}jg^o%kl$^*=X)7s_gia|W8mW{z8l)&`Fr`sRNRcU2M1`j^PnB8b zVef5YQ=&N3&wb8a>s{8;?Vfwry7%|j^VhrHz2CiUd%xe$JA9vEsPmpa<^g=)<6fhJ z^HVYgs=D>XEG>Ta;vuX%7boTG=R75nC9uY~MpNiH)E-!R#A>%k<0Hw>YnGP@+mr58p4} zq~*mI?4S2bw5a;&Hi+)Hu{r5G#)I%<`fkTyUytoRD%Rycj-Qc#k6HiOIB=4Ljazi_ z`0vd-xIS&zLFRS;v{d}lSIqSJN4(B-aQdczd39!&C+6XOfq#14|MvNxeV_mIJ|q8c zX1!2cLN0g(_SuX(QSt)o70=;y$D3e&l!Zv%oq=um`3rb2xOks5=+w$|hj6?vC~D%c zGqVKv9}JTOZ(*I7H$q>3m55|jT8!~if0@Nn;~I_;#=3WxgS6)Al$WvujE5R)+i|{7 zAU#ucTp8|^vW zaL%EjIxu{yZuq#q9^4-$g}%C92SKcmNj#@AkP!ZSNlFpp)^pYS8r(0l40gtI%OZ8m#j}dw%jA1Wu`Z0b?GF75;WuOIgIgRDHZxLpRsT~OQFei^6|OR6=3eJ94&eh`&_P{dX6QA{dVQG9aVpCh2t`PFE8eDp-X0; zNYtn{kPm(n8+ft{;$CcdxB};`R|zGp5d7MX^FWufhA^-GsbuM7`9p1B#$KqC^Q{fa z5|iRBE%;!4@u1~hQ><^Tq{PX5hH<>BGNqo`;5dF<>wV||)}c%?=AP2SKDFM}mWi0> z*YDwR>4Y&4#@Stp7_jDoROTX&Q|q`;W7Xw7-w)@51q90q|p73E~Bp15lEYj>d5s&O=L+SLa^!Kf`!Z4~|K%Y+n=T>{SduJhuY04CY$c?a#t|mh}S)Yp=q%NoL+R zv|h6Gl9J|}vc*1H&wIiyZYyF{ZoVJCe?cK$N8eo8d_D{NY6m{C8k@+fH2qXF(1(3; zkCv)k-04RhOLLK(y4wbrH@jaCd_hw9jn@x4=F7UmIiJ#}gO*xms-K zE#t23@4>p;S(>~n6WgF=RZFakBo`ic?H5lwh;`1)$J^_8uwjvUd;W0?4wUJi)s$V) z3X9g*``MYbg4dA)6`tW-z;N>UuHCKhPDr~(*a72#{&UA0?%SLBD(370=`M(hj^2L= z>v8H%+Hi=s0`D_S&Ndd7$GDpJO`_8{TxhaVX%51=9h~zDv!rGZkN4~Z&Yq_gUGS&F z8iTx9AHl=uP0rXeA3++tZkJ=cPg8^KxiZYJQRz;Y9E|7teNleroO;kF_S@X_X&ull zR2Z;NqaA)Nw$eT8(+($3i%Q0nW1X=4eYplsKaOi=+%V&d8RsJ(XP#%q7qc9>9kb3n z&nz?Qv_z8d`hr>i|6aNacV_qEe$hGpMntXz8|15WSB%DbjMLXRFAZJY2%f6i;{La* zVX!B$HD0R}6cgiiJPFH&2kshgclIWMr*eq+o^3~2W)lKPga%<=|K6n53TKS#wG|(m>4S9;_YeN*V~Bmz_J_FbHP687S4FR$ygUxD zrDJo;TU$UmsCbj8bPE*B_1`*(ab?2u{BQn-aYava`PCX&U#RnLaNguLJWdetboY$>-P~3 ztD7&*d*5n+*gSt{PsJv9BVJH^9{b?vsp*?v6~+EBXUf9DvoH_BJ|2{AuNEO$2NGj!3r(=DQl#RoBCT&6;2e1B^htPlzm~YS z*zn~$tea)vXm8BHd3)XdSsPZfK}a-G$`P--w>#wI?o3pV!8T#%=ImdgW8si`%oad;kaINil6nFp_=?zY*)c0k59AZh)t}Sb23d!%iI`#kk3spFsjA*MuRg)_oc$p#JXtnZ`CTXn-pcn$b(C?z%qHm0 z%BCOl!A5?s%=bHToV+$L(y<@g3B3seGyAsk!9aD(bQb1~kKAtLI`e+al6ijNQkPX~ zIm6>}Z zZ^6Fgf4F1`G=phdM9Kv`PWc9pj-82h?W-prth_g`0mA)W^J;c~fQdN;7iGOmAa0wT z%}QsiOLvEF6GPHKSsqmpgbp-muPs=qZZs0G{?j8%Cf%K`hC0}r#8@St&zr(&;5 z2iQul;z~4QUdR)rQJefbVAA8fcggMTV4&+Id`M1NpIYFrUdPG1=3u@`URWh z>~VcsE98CXW!YlfigY;7%V;_mIxhw$aGzm)O0m1kIA zV84q~wYzpnV!!qPoj~?+%-^n++_}7*2U_WS$G>#t!MhKKGI$etur@u2e=~^DeO=S?`g++zqVe%`5o~37I#5i0M5UiJ|4M274yD= z6^xqjKEY)T`<&WrJ`60&GHQH_u5|J?SDST$($3~ZAJc}X)?%(yegX<-ep~WBJtHq3OLpaY9AX)CC(uwna z9Ot5YSGic%`oIGxP23;8%v-$>`xm@)x|cX~qZI-o&L)1wJja=8C1W#nt3YH)>yh0X zuwFs{D@xN8;{~|CYu4W`fAC!5sQ@k`+W{c_ZXZ%`xb~yuj*vb8U07xtNC{VB}|jeL&nE zClvicEU4Rt$U zoqE(6y`ol7jMJ`Fz9RcSpU>o+TKq}-~$+LiS-Ogt%9W`>c8BT}AxL;{ivN1-lUEmfu22c%u?D6FT$%Ud@o!-8jKUN@ zy#Ca%Ygy^r0kLydF4~iX<4uCK{O|bO8W+*}!%x~EvRwGZRaeZb(YZTOF_sVAt_ESt z@p!#u7h{SGagu3?CG7jC%5Mi-tFcyCor<{AG z*JtzKaG6JS)OU2&Cq3P!q8*C(y606f|K*lsxU=BdHkhvceN2NXp68DrKku_m2RPnr z&rU|aWF*ereZgvnO1Z8BdDv$Ua~uzjncWVKH62G@9~<_M8K1{bSfp$k)ehsdxAZn* zJl6t~E%W{GW}^8(iCAhm2P7WL|4|~?4jJ#xKiKj2xIc3%+zqc+H_4o>l~(@ocVOOk zWy`?@0)~NeuC#U9WGN+USeKD>0zr@j8BZnNmlgw;6DKF|H@GS diff --git a/sample3_2.npy b/sample3_2.npy deleted file mode 100644 index 17be947158892fb118caf96daef5c986102e51ab..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 131200 zcmeF(c{G*p`#5?kB^8PyLr57a5}87`G9;xzg`}iV$~=_}DJjX22%#iHWGF+X(uHl# zxXoJWe@0|1BdG;UO>)Gwe_FS*$zOMVag`YTQaOylO zi#v;_q@|V1RcFZqqLS*?Dw2CdC9NHtU7aoL%^jRAt^RfYS!XvZ>~5Fq7LHcfq13)T zqWk1k{{4^i9#L=6{|7e?mVe*A0Y3ig8^FH@{5-(V13V6J2XF^)2XF^)2XF^)2XF^) z2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^) z2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^) z2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^) z2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^) z2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^) z2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^) z2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^) z2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^) z2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^)2XF^) z2XF^)2XF^)2XF`e|LZ{QVCaMBm2_|^=@PrDJp$HB2eq_PhamO4-A020L*V+`?Uru- z5D@Pk{^;#H45O^;H*G&Q0x{%kCgz){@M%||xK|1l3Js^(9}{R0(Dn1pZD|H9W)p9; zBrpKX<<5CMVnAiN>zfm=XyE@!Mv9Fv49u=wbE+1Da3{a))AdR+#I3tjdix_8EHk+d z@@^f3gi15nqYA^&mL2p_Yr`Pe*byHb{?rS5R#%&I?&|>G{raiiif!;?_w+7x%?=P* zKlKJhlR*628r4rHDbQxb->%_J0*4a$7YeSOFn5Q2WN%|T6jDm=TwKu!D`o2ZHW_uo zJ~LlC)F1m2AuX? zHrsseD@b4Dd(D5K4w!`bfSB4QIC*!Z&tqpRP>)H5hu&`o%Rjqs%3p7Xx`%0&MQ^%5 zyOxjrk^c>4$pF`t1<$>er{|a!sIiBKk() z7!d*oS+#$=8wE4Lr=f+2%#c8|N z8sjk#rHfC0sb)gNUB>$J5e(o+tQ-sdL5CZ6C_J6l#~@-s?IxjX0)mImU%K&X5{!R8 ze_0?r3D$~6>PN;VU`A=!($aJimal*BLN9N=vN#E~svdHpFPKoVa#P^(C!2Yn!&cJP6dD4_9#r!MT-AS_Mp$`SuD3^isiN7vbm z!t&=@exC%9S3*KJN5Ri);>?3-DjeO%<$NuY24^DAy)?2Pg{H#K0nyqMpb&Oz_q;R} z6wWm+skaY+UD~#jpdOx=ze5b!EIs=S5(C6-6$$~?;;=IlVa+gK%V1O|Bp z-);umRj~nXE!_atrrN)5^?=STY3JwjEx_B-q2_K?57d&a-7c4|cH-)0~7YNARFNSkMqzWx| zY2D0gsYWDUx3#)`H7IE|PvTa6C2BbOqGi8937T^qE0-L}L>fKp>21US(6y{;lz!HNZA%%H8;r2=O*%=k3n+ zLWsnt57aISkh~Wc;zNdE)0B+m@!1i$y{=t(4|Y8eOtn8WAwz?jGt;T5IwO#J^oELB z!w{Sdz80)#Hw1weP5Xi?hM+o1O-PzN2tEz6>!V`^fy4fJ+ip(^FgSji3tk@pQlpYU z^A{3mS&zxx+T9K!JXZ_Ek5xnAb*7#4o?K9{W4`+%l?5WR$8vk^%D`!}j>hdAA~eqa zDM%Bi0JTTW`0?ry5c24$3As7~X1k*uPq9%kUurq*CWoP_@zJqUb;EGstkAn_u~gWf zn)PbHk`5M=t)g+VbTEA#vS#`y9S(?VJUr3B0D^#f&Ot*aD389CFzuX#*U=BkAJGy;C3-}N6SF(B2zC}LKB z3|cDtBA8lZ;IHq@K+>boc~Sa(RuKbAY#Z40s z|B0Vp{|==8j^pL{S{|3`&wV1`ITM;Q=hZ7z}Z zt0CVqy;PZ%2(_ap{lspQVB}Djjaw!Smj3tmbS7DD$;Ds-PVecz8^%h3(D~n5CvFje z>u_C`jw}I~^R~&nk8^;+Nq4ifO2N)2QJSMj282e)ZC8>mgt<4L9b(jrA!}o9i?c!w z_V~EC(f6rfmd3H`Sa~`yqcT}9Iv0WeTp|~zQ9clUzLY9F76tx8>(Z1DJ_Fw40Z-#{ zieP46EdNw55%#!$opj>8kJy`JuCFS=xbbKg6dJ9QS z^G6#{+MCp0^eH0Zdi{c3bPEv$T6_>6;{A%)if>t~&XpqTit+tkzsgaT$GGJ#wGzZa zzRRA`cOPkVgj2`U3xE>b-stxB3ph!fQT_C^0=Vo?`F%W52_m~M+Vm z%P0uDFlc-}49M>}^5=)^D8!$2Gl|M#!hnyZfHbbL z0!=aOIvpz^BAU_zvwmiN8U=lzW1zXM?S3y%5|_$O^tFI(*H|f^U^z(hmOm}M76#I? zomLU!?-9W+@=W~n7bN2UyW2^SfT%;KYs2{o$WUZjY_PlpN&nFJ$kfO})~oiM$cjw@ zLP%Kl87l%f8J$=^AW4MQbCaj*mOA0Gc0d-_`5q8+cJ7%?ZHJn`VY{-;jZk>`QN5iO z0YX_VhSseqgqTN1?Hd=fLC`o6t{_%^&yMK z1u_R;A7XQq&FSszMkVd9K82JKQH<9c!Czk3`%pID%HGwE*j^kyyic|R)u;`&s!0)1 zk^@ob&q5XQx1J4gcB?~dy%SoYJxwTL;Y@Wx(O2ZexHv$b0%RSzCzj&k2o3sty%uf& zp4+n*Ht?51TGveB!I5&H+&wPjuTcYLBQblb|9k?4?yVX>yYhf-?4!*03uWNBq3`pC zv&}%-{{BVBY&RtISVT3Ali+mR;hV1-`@!y!7yVr(8Q6cXjlaIN4_p>?1`=kvL0

$m+v-zb=0X6`@aLIy35y!x$p{?$*!nN9J?(0ewe$aymbny!6q(L70m8%8A-Zf)$p z_0ykvitk79<1Bp2*I#_fe~+)n_>^DHe~w?Dq(Y;XK8~+nXVhlDJIa92w(+?O&fvI; ze^7GTOfu*{w_&QdQ($j4MLPuR+1!SPHeFMkzhKC9eQc&ez0YsL4IeVh(oK%@Ps9A> z?KA%AIt>!rfO}~f3r5dGWk`@&K%aZ>w!~`|+zmOs&r%QDgMa;|r;`|i9{Lg&Wjj7Aq4m`zi*7{>NRxIQ~_M#l~^QWfw0sj10vpQ*;>-=TqhdA$Lpx`xWhn$J2J)ztzJp zKa?6Uxo15YGHzVbWzHIe5RF_5{p&vw@8qG77rS~apF|>i3#*8Fw+}!J(HT?M zEr#HA6#dAs-?Uw!Bg{ z^IbQrDsegTv#bpQ^G_w%#HN7{w>m{_A`jJN3umQ&t3p;DENKB=9kO>RebL3OK_TN` zpFgrMN9+v`uoxJ4fRKZhStWRW3W9~d^gJ9dNEf0F?L36)DfX?L_SaCRe?fpd! z5Eei1`;u=f*hF}5>3q`*4syJtips^#3fm!%=ce1-mSn;Auo)IB7qX$cOPsh* zkPbf$g0x(;2SI$1bKYgU9_$CsdHQTqJLIXv_+0aBg^s?{zPH=jfVpWtwQ5B-w5N#H z`*{w4&gKix$i+S3mA~?XDKS3ONEA( z;sYT$*uLs%D9Wg#!~W;_y6xDmr=sxG*WwHGE-Y+8>Pe ztrVe!V*RlAy=_MUj>ARmU%LrS-{;FEza6_VOZ!h{)A#x94Wlg{MlC$BJ=*`KeHIUb zwn`OFW#W7Z>z)};f(uDqM8(kZ|L^m0G3eogS*Lm6^W)e^`_QyK!EcXG`TgQw{~P5$ z$EW<~>|K-vhVi~hoqu(=o_9CQ@_ymg`KlYX^~juH5xb#I^qz0vk{-}0&JWz`&MdpHK+lEf$lqa<9wZ?9 zwoeY)!Cw$DZ1TjqfEHxj7u&@4?M6D`zSj$-2T=IKO$Vng5|9sd-Lbu111Qt%;B>ukbGLsE-OXqMl}{J9iXZL=18VNmaM^xfdek+& zi0T20mwC6gCUirroCaH^u@lV6_shzwtD$rUc(R_SL0$gCV7d0|D1=FAda8R9jkPL^ zpIMm*lXYJNIfu(2WUWAjNL33oePVuIB7^;bP@x4`nt0#k5g1)mGK7ZxY@O-Q2XRsr z5_HX0&@|M(ptz|4hLf&LUfW#<%1JDpB=trJHM~4(FVzB>zb(G?>vY1`mzoEkyZ7UI zvhuPD20^F2^7W5)612zbM1+wPg>XTF^9`HD|#O~Q8DnPWdf zKz>rLh6S(4CoWm2{_{OPUn;tmbgKvAxOEck;iY;e&U;jlp5wfrURBnCH#okS{cGFy z1zZ1J?)m-rS-5sqKpF>boHD-eu!jxnXUxrL(P3jhwmA0+j{D~7yt?;*@lSh$|31Gz z{A)htm-ENRw?qD|od4dxJ;$%-&-bGbeV~0T{HLDbKmXt8pY`#d|Fis7(dHj-oVs9y zyyn_j({5N&yVtG!ZZC9hx09BYBmhU>{5H#C0L-%NUtt8c~9Ivi{$jsImkYwjRYbv@idy*7xd8+N@D?j)l9*L-K% zND`3Zl4YbLpSw_b*v^_d*G3due1a{Wm5Z9M9ggxID2L9=BgUko{ov8yZtMPz0CT;| zwsAN2LWF+dn+vu*u%uk~liq9sB&JR}PJAMRp4YG6`+`{T=lJ;JmXMQjYd(W<7`MEn zIr)Q-xIOOlFG2-$JbOxtuP#AU+ARZmV+Ybwds3NGK|o- zZ@6Y28O5DyR*&`}qPn>1DbtEBWUszzVM}Ti((G!swyVvC<}dT!oWS#CzmIixap3g& z4HWGvkD{<)@eMhjJ^3V1{@|GTyQ~G+HXEeB>*s-rZC>*EXZMf_DeC+agA|l|z*g=F zEfSS(so12pG#j?=7c;Y_HbGOktcKKMB9spo9}*N>V=vk{-Ahv|`-TU%w%^^QczO zWfu0w4N63}w+z6Fla+&%r){wRhDdy~aSLpaG@lpA?Eoc-spT&niSYRR`ES>1=-@T6 z=Ojmx34P}0e~;q)ZJFZf&?t;+*cEiUFu|S-+_e5Ty|1aTX;foT!8tn2ja=hL)5G>c z^(G0gEi4G%Y`;nx``h9hhlG#UQ(@7Gc@jl>R46(gLVcS>g}l)k)iel6n;Ub5!o?ac%bplq10QridVjI3F#L%lF|!oN1* zViyP+wXe8-z7l-8Z5FD@#3Pm1eUs%6s*&+a;m{fMb~Ltd4M8x!4-L3#6s4>nqQp=$ z1990wlrm{XOZ!Sh?Ao}6cW)4olaubU^^rY@=&Yi6G^82nxyra7`kjOHwz&)BZ_k5$ zR*mP&*j*q{<@CkiOfMA6&Yly$yB8XIN@9PA6JS#zOFQ=`2~v(aR$gnOL&W!Zi+J&W z&cBP6U1-Tu4x}ciM0^j#@s8bEuNq@8Za~-4XJoP&h!f!(bZAB>-APKpdRY^i?2A2m z$hQyK4aQwa8XQDhn`*?^G8AMo*}7Kr6a@uGEgW=BBBAS-9=sGdKtP6$5-zK@wIiR$ z$&~s1SPyQvDsIa91jCn}MqCZ=1Vh!}*`c*$ka^BrE^dkQEudDjTA2mH4kNPrd~iGr z*{PfpYy`IT#U(0tGC@(+!@ur&7*d(~=}WVSL9Sa#Cr6jR0@r1qukF242EQs9MkC}l zuy(zBFr@*eMR^IrU6^|Ga-jNK_?EoK zJecVFNVSz;2YlHVJh)Ougz1xc8kNp{OSmQwR;rkv3oBrJ{=C}KQ zN{J>VcRpbMV<&s7=CyScFk~F^R$oDZaL1t>=>UvJs@*vhJ2~Axf1l^mKilP(*B?J5 za^p1*DjHgTNdDl$hUi)AFJaslU!TS8TO?w^=7GA-Dch6U|K#gG>;1pIR;>{u=3F9! zQFN~7uqX*^-}v?4)gOe4#>+vTx&2`8>PgktZ32;EI)u!-gcS8<;2bX0)v9tZ9RXPfAAZoCOG25O=0?7qAfti}9);Gc$w;vF zO2LjzM6}}GKAER&ok-SFaJ_|m1sd~C`7N#e39J;tzHTw@gF1^ZGfm{NUo16rI|%0| z%{?4KyuERL+v-N)7?!sUK06Im<*3SUeii6te4B7OnFX$D*9+ILih_>7Tz8k> z?;&d4lCR6&l)~^%EBBuYZQ!w5-E7~NK^RCbFI)YT4f-;MbDoj8@HWcR_46qf7`rh_ z**Gs(uk0rw|G5wPO7(ARzw88+)Z&eww|2qTh6m-#P7XlcF*VJEl@ySzZS)(*dHddz z9?xVXNib_Fl37>W40;DgZrfXZ1{Iy+>pkL;ApIqF#b93|jP*2K&w5?~OF#ekq2=8L z+wl8rmEL@E>0vq?zDqN$^~HJT0H4W`3OZa}@g-oP856ciKc!D$Tn0Zr$G3y| z^1v_GygqiQWt0s*n}zkpG0q@omq1UK72d}sl%KYIV1sIOENLvA3EzdzN?E+X_&@V3 zv;3gxeUx7xpAY_|eEH(jff*H4gCrg(Y1vM##{7}2c0)x$mIbSPvX?IZ&VXA&iT#to z7|*2IKuN}WZD-3uIDZu9Eeb-~&IaTB-J^`8RU*^!$*=d%cKO%*`+rjY{r^|g&U4el zIg1D2$Eqf|9&ZB7qcv5A@QCn1a9Dig77=7`cKj|LYk>}PjY)N>R0vK~-Y`3*3@J0@ zu2g%sAkEnB@!{f%jn|KDsAKaR8Q>y{Q&Cjat((ZuS#umcK zF9$&O5^JTGCkYPHmn=u~>EJW`UMGn8@Af`_KAj9>Z|Lc=Vf&Ac*RrBGe)&o|HP);M zWF?pG7bKLS-2J~ai;i?5=f-7Jkw-+7dF+BjgccdC@JwcX>LnvPwVEIma}u(m&Ggvm zG=QkQxu+&PT2ZNJaN&1_EEMKbPHvZ~h9y@--C|!6q4MsViI7MJ5c5uzv21Rsv&H+L}T;!Mv(gz zwLcuoZA2+o^1+Qk*gyHHEZ3a|m21YAIN&@lA*y0YP8=0#Z*mP5V_Z$siZkoNrv@QH z^5M6KrWnuP7kz8%F#_xyZhP_$>&(0?efT4Pp}A zAaF-o_+olB*s~e>6ZW4V_VJM|DM}wfGgHvgrlbUHh{B&##G7ClI&*9XwHsz%kYb)F zB|?b3&eu;?WH`1&aKd0dw)btnt@3+GhSV8^zCvLt%$E5&)RK<#d8CosRM01IKgb;{Drp=W(~ACOmj|@g&5IVt*n-@@@K+Y5RcR z{(qx<{qWzO`~T~{0&nrM&JiMT6EEh**kjz~?lI$Pj6eOgX?CizYBPvDw2=3EmOJCZzSSX?9Bhk6};2A&8ap#2?E9r34#C{`(Hp~fTu z*<@aSEL+!)M98epA#x}3bp5U@=Z5|LtW{!*f|5|+>bcIGZ#9s8wJ>_<(Et$6U-VV* z_~-cy{&@eKpMTT){C(w$jQoSNDGF4l3pEqhw}E}dx*Gz#+ep*wRFQCPBQo?@-`Db_ z4_)7%Fe7?=5X}N#kK9!hR8B5b(KVx>a~%eIJYq;F)=EFuRGENMY(^SB=Cz~Xx9{W^ zkCdX^$~B4)3E8l_=6K73@h(`}CMUY(G!>RDI@5k>hzhb@-%B1`BLmw{xm_WV2)oVF zu6?rZ1t*_h74yeC;AC>efLnA2Xc(H4GIM$${rk3Gy6>?6eyB$w0^@V`>%ao{V=Smr z^9VETWI^=(kqbKxVcgKDi++p^7xYYzswJU=I~`pS^PNf@2@BSKGe5T;Brb&JR{P z8Z`k`zvmeIsgX+&$`;04#Je0;g*H#`=KH4{!EkVPEaU*YmnaD0a|83 z34U9zZ%UXe@9G-$4rTV#DcmC;Y0?xR5){@fVTd0nc7gB*%2x z&gSz`pF!zJ+HD@BC>I~ko5z7&S5>LoaJ<1$w}15aF&Z3K&~Upd%mBB%L$_~7PveUD z{8+4gWZNB#A6$3PIL6^K8<@>2sFpr#pr2iysKLPJMKt&IKEeL}`E+~VC27<40{{I# zDZgKQ`rq#JpZk;k`QE=>k9Eo(k-FIp=K>lw=9u)usNRBz4?i&9P_-NqmSDWj`%Iw) zvV~Y~!fh8QrXxMGgnIpx@*{r}Xb{BiTgmvZ}t!mE`mIJVIH=0GnE z9BN$N)2s-P`+dEJ(*DuYbEp0#{i9ySCX}#z{@DhWwkqY;t0_ITf5@2t1`X%!3F4(k2 z_Nml?PPnTgB+*dV1NH6)KbQ{@VcxR_ot+p*b1UT6>w+RWXrFpxT6c|r?KuxMH@{Zs zl|H_an_dH&)=zbLp4E^TBXjvoKre3}nln=w5P|CZ1HH$Bc@U+c*7 zH`y3(e(}j^6F(depvq4=dUdg&MJjH5yY}>cp}9BuaSZmazXl9!$jAP<<-?#ZNogKX z?^EdZC0ux76ClMh$LAPoyN1-?V0o4A{qb`n8>EG}&m?|gyx2G943ZEHwhOqaiUJi* zmJB(%VLOf)aVY7z1sgnm&m7s*&jHTy=sTN4r}>fJe|~=-E+a4bRfprUfucf%4NS;7 zo&MI~3=3lCaJH!*o_>$NzAeXz7?Yt?sidqMfLLZNd2;M}MqZy*J?$;ni zS@#w9SGFSw@y+y0^j?(Ge)`D^KLV=TmM{4I9FFrF6Y7p{BcjrpnT73b14vd-?7*aY z528Le{A~5?Hq_y$a7^8#66x7QE&N;-gREk5LoeHWf>pdKCDq|3h}u4D^XV^Lu+nq3 z_KNgA*!5+GtwA+DC)2lmX`}=JW>j<7WIqCsco&(~2MA!RwOW1Nkv*1R z&o7^}o1u4^NYp9kT2L;({Iqbi3fL#Vn>eSIV0%*8=a*eM3RYnplUUb_t{dJCKY-(V zu`dq=2NzJ$LRW`kbu#?jYm|N0X3oz3;QE@BK*Ca{pxN+BW3$JNUg- zRW;62rY{a!Re+=g_jm5ya{<{KtQz^TB_9OX9*H_SHBfn9ReX4E5lBX#s>zXmhf1wx zm;U-$igZ-XE-2a6ps?4)$v=jxQAq0G(yuzD$aqQd_R62{kjTr;dwc^v0a?CkOi!-~ z!nDU@iOKyC?{^}|596;(e+#TycD4@`wOzwos#<|@;c@uX-eypb+^_e^xebC`{Z!~@ z`oQCsuhXr)_}p(|NsNR64I(dPe+)WCf&!2CZoJfX2+sQ&Aa7p{A=>xM-*C%7`9N;N zh7Gk~w4}0DX?F*_-sdboOd&vJmy1dx#(%lSIVf?v${^)wa>0%{8IYS0a&t>i1}G+S z@2(gs0}EB^Q^SU0uZl<1H(vW|nebeW>T}HVGb- zy*cMb3&HZ=KN9*pmjfqrhN1)({d1q?%hR9oawPapfvY^m@!Z*Cc4PqK`-9JAuNS7n zhVU%;8Q-W7Q$XSl;dn;f1u?mtJSNP&$)phWv0!zI+og7l`+s;f=kn%W7Hp^b`tO^~ zgDV?fvV>H5P|UI0k}1If;Zm*j8!&D%OmCp;_+^a8Q<5&!jbuZ`aizdg<$vmdKl}IR z^}mfH%5_J{*Wb8ZdnFXx%L9=B{T=)8xoS;?g)L5AToBB(worWe&v_9Oatvw2=Z4E} zF)n#`lYv0;qwP}agDa{p(zuVBV2n`N>-6{&q}*Np>d3oHH0Hm(VIr;pb)4?C$y?ol zGQ-6#`0VaO3x7&TH9BE?KlG~Q3n?Np_uF{TC4qp3FXS0LRp>|dNv1D9#&#mbeIs}1 zOPY}VGAgCJCc_rpZ-F|^Lz)^p2_U&K^uxEK1mGHX^gJdG0J*y7 z_aVyxSQ@gm_?+(m1Ti`d@0Sd~B5P{edMyGFXD!G#&LsfVn+C2tVfRI1C5m3E`xc2^aKeGMu_yw3tv zZ$L*#N3YbW2Fk^w_ivABf?kWl;qj~*h!x&jzg+ASaOwBM zn?=b;ri+lI|Co%zS84S4bq}K6sC!!5Ci;*_t}@S@)PzKqAG=@b5QBz$CSka|2~3KX zFX+@EK)FeyE_&DkPELE)P&OBV@eq;Q;u#Cx&6I58Bz|&*^gh#hZlrZhC59f z;i8H%7_9As5DrRBN^S<}uyb-HJ?UWVo5FZ9GXeRS$abHoO-0gs?R|`XJqFKRd$L>B zR>D$i#>vW4IIqB6UR|@k8SacOak#441nJjg##K(X1Cw-AUs-JcM%(KbzI#c81-VO? zoV-ndgtdJxT~CQ{py80H8$RcB-ZzB4^92J=yS7UgbV1=<Pl)Vf?hPo_cL zfc^UWIDYe8q40z0-VU&Ov?6!xP7$c|NeOzc%s@^-!RZx^4any~aYf;-4pg^Uo*TZb z4`n`53JzBxpmIZYVbezf`kuS4JY){`mk%oId7tY=t}CV*9vQczx>;*9J#=y0FJ5Bt z#XHexGDfGHMyv$Ac*)Jb^1DE3*@N>3d}z>Qc%bGaaaupFc$oG2WE>Bl`^&qbMP-8$ zm2Tg$VfsFQJb%&&TjQTi9%LZy-&=R;Q5C#MAzh16FGqpG*S^fy(1ZN?Zk;G9C7}=z z7^@7XA@j$QM!w1nBr9=mMei3n%Kn8B`r1^KzGNA}HyZmh-9P5CW!sU;fcMgOo6`{W z`cEAL{|1O^YjI7Zcfq*EUxtr}$^mFiZF^Wh8>v*(qw1olIk8$@cZ@9ssaG2~yQ zSrth6Mxj()X(>qGJ%2*+dlQ6oR2<(`UI>aW&&CDMItRv``-MaEqLBT=`7xHtPr&f0 zO4#vbrLgJbv4kvgJ6LQ&OWrRZ0H=}2H*KaE$N6ZzQ=R$Us&LecU;5fe;Tk=dhM5Mm6JAAbdR{z$2bfkm`&vQt@OI{E`sDN`} zsCp2#o0;3lQYgUfkNVl9NP{C+SGasTjL#3=1~(_{$FC5qsa(3RV~A?(-Y}0)B)m z8>;Q9K>4XKyZm|$)GcRDB^R^+>4^HVn#mq0cvx&7v3&q2H5YELZ|Z{&dg z-p#@m$4U3Tw!~jsA%gO)zFK?EAdH?eth7Bm2r4g)Y+Uu*fLK}l`t(E|NN1&-?@0fM z7M|QXGU(Na5_{4<7u@bZR$@{3Ab%e!-Mg*EZ7Bf}`vPP>`(U|GR2x=w9YC7DTckx( zdeB(lsb$CBw4u_UtnwdTHK^n4((@<1-=gLfMvLb5l!1G$k$>*at(U=Mn9U2db^wlmSWs;ifC+#qG$_JG^310 zT}#A%&FYK94^^--GLt2@vjdL3_O+E4>H@`V7mtLg1n@L_xzEqD1?lg-`3Ou&NcDcq z)&LVG^2xePrQGA9|EwcgpO9elD4mCF#I95*d|@Mtl%UeJ)ryh{HZDU3S|m@3ju zszJ(|UGLxgSPXefv`-tV;d2>2?+mpJ%fU)?OP)$d7(VBG^KLxx8G2E*x-RuZ6ui)6 zez-YW4iitmwA(-Gf|m5wm+3A9C_Ov#sftA%gq+D!tf;m{sl86j zivNTPjE7_b3^9%~V!^>7>v~8`sY&S|D=n z`mf7eJHT!1*6ktzd=9F+L_89olOFEN`o1E82%YK0^Jc1(0f%w53~>Iza(>w9#sDfD z|J~=r5+i}QJJTfjWFJ(eHIE6qcY(9|_C&R^c2K$JAYEhD2*JCmL>ymMLh0^p%jHyx zp~GeS$75CL;JQKM*$~!qiSEyv-`}o=>gQd}8o3=%D)v@xFUHXob=j(wa@%3eTT2z< z*-m`E>3!gdkRDi8Bt2ral>oCZD%pCsVjN0Qw_uwM{=SK?c3K4f-W6Zo`IIl;e^Q_5 z>cA)*_dU4ql*BQdSG6CEN@C)3ez{ht%1#%5zhy^+x$pm+^J%To-dV^2`!rtQm6;r< zj6a=_iof%cE^c5*JV=MtbDABOU&7~Z7fRo1q+&7wQ|j_ zDoA;+t(Tng7F47wmg|)jBAxtvl^NHXuzhds`*orNsW$R9T+zVsf5WZu&qN4F`86>v z_c#GXmF6V`&K^M0dBfsv3o+g&?3CPiVJp(K3%Izmv=T`=m$1AKo7P;DK*Af5z8){J0rEZ_J;cKg;>g^Pda%QM|QpJ3enIJRhk(#^;&edOjrP zWq|R#_fc;pasJ}|R2<2?4?WnVbLI#Y+y770)?DtVBE=g=lp3pOsQZI6N4uViHZ6Sr zp^QU9_Fsj@JsSH_te=N+^Y#|xqg}J8tuGx(93d)B#8<*_$8rU>BoRcd1zmxBT<1;n=3uD^ZuWQ0O!73_w83N~u?jLwj08V4I=T5rd_(ni|umh(9DGzrIF*Z#3qBp<)$MdBsUpH2MHKyvVt)com=;%IQ4vvmRR4?%F6>*$RR? zx;N;`HbOyau##nO6;%G5adDW|03jP`xtcYd5Wde?bj^7JT=+&Uw$;Snt8lhA5~K}6 zy6nhBCdN&OIA4xxT0aOeSL)6a&k7%dAq3i37u+Ap2dhy{$J3U{V7z_xo5b==c=5|jGufgFEbj?8?_1IV*7fo} zd&IGyc`>K&0*#I_Y z3%eaDhDhq0CiWst?IaY8z5~uH*RhQZCXA__0g$ zL>;90lV(539)u$YzCXzHrGT7;Nw=&Y{=Sb`)q%@jA`yEn;T%i53-#)Xj;uOH#W<(V zr~*aKKjfY-|5lkcX&d`F=)rFZgL)M@3a+b^nx{!Xl{syS#5)YJ5RxhSghFwt*b5xBl0 zQLV6FtNLWG>zfE%&lVl2zz>;7mS=iEFb|)9sQFp_fQ#d@XS#>u2l7zI<_P#k#JG=B z^Y+}H*9eRn^j))@m79+l|K$F=L>7AAZ9mb{fnob;N z2|8-nNb5wdi;sRSZ|Fygj!y^ba{Ey3IrRl|R=1;EM*P=7sZyl)zes!YXs*}q|68Vn z$dHhTNQTIeNV-f(1EQirQWBw(GK4ZVNQQ{0NM@PmdE;%Ky$x^kJbNP%qT#;2_ga4E zUZ1l*>%Q;b@BDKu=Y3AA_seTv*S_|CJ|9~=idJ#uC4}!-X1luW1CX^er2N$i!TTen z>BFIBFfwN=dhOi`iRrH+9VZ$fnZN0&^y4+|AB+`N_hdSYQ<27O_l(Gttw(Nq!Lce7`ZoL{}@4AO!R_f zCnUCsZXZ%4!Rki=tvP`%Fz{puyPwwu#{ym_c=~mNl%&3QDG_s=a$nGk>W;z6lyuR_ z$GARV%5E6Z;G?x2GiiK+5NT>IGxG4R>bj7Qw$JXX3IrKHs0! z5$~coNR=rYlqAi9t>nqLJ{&&-$Hkd5unyL{AYV@U1@`;i3_c$sF$E(*YSoQc2nDEzo-6pnS|GvYwBi-4+e$uy4F|Gg+wOdx?DBDnY zYnU)=2iEaeOBBpX4WRW^md>yEzRw^3%Z6Eh7=?43dAPzbgfutr=u;i*Lz@ixPwGWF zk^h0gQ@;N7XvF8}(p{TKWSXblLsG2e?;EIpjaU(%mN3g zZ!GUoH?Vt%J$q9h3-`L-4^UrjMMkSl@?(tyXivqnvZJ5y`5*e5ahz!qIjd{X=vhyq z{FpIq9<2M%zt&Q3t~r88iR?BlJ8=C{=j?Z#&s8Y(88vmQQy#o}pV^!9yBj2|!v>y_ zXW;q0jRamD?CYtRYB}echRBm=S2>INkk3l|$hNani0U|iI7 zSv`p z{ngvLW}EdYG-==PrPZ+-8KIPixB<9N1RE{1n=l0tDenuNX<^mz^AnvsjZy;I~q2Cm?!Cs z`D}6B<=j1ijTRyrdA9d(OG7Tww4{GfndXaDuSNDwDSbvU>Saf!e#cw*Da#vL? zexrZR{jGEEzpd^1R@e4@`)408w$_&q#Pxo@L*jbMO8CBiPbb{kcOF(Z%c5z6=CQ6^ zEjzSv9^}7-%<^F!$);{xFrD=@%v`2O%q*XRm^%~88SRT;lz33pVc#^wZ7Nipv>E{U zA;+jX(?*EdKXo(ecm#we?M-YamZR05%zxiz6H#!5EgR#XZbbN)Vo^3UfXsi0I`m?F z%pQG)Z};@D4shG>LlqcAeETZbGoyMDN$RQJ9nB8ZBva4vl)n}^cbv~NSh|mDnH|gb zE!IQQ?V2b2Q9a=L$>G#7n+X`Pyy1INT3am37Z>F1gl z_E&gyTw|^cM>JEqMkYsbJyt>XtM7plB$8JBBHgVWsaY>xVEx&H@}oO1Cr0<8)k|rz zebb$Yne&wA9*$Z>Aa#E@Mv(;U=R6kv%Huq@XB$_TP7$yxD!P($vr)`LUu{WzZ!o1b zV0)xfiG3Z+-103+h@#gcd^EfqJ|(iir z)h7AojS3)tz5ZJ8Ru(X4a#-_XKe(Hz$XskW3H3LLHhP5OeNe*V#ojmFs6U~h{;z*C zqG(-M>hn)WMjEv$b^Ei>NV7Ct_R2=|@xNboSbBmQ|BhLfi3*ssVlMToApl`F-NJcC z0@!!y(;2p4U-j#Zic+o~%%8ejPh{^0m$a)Fo$LqTT6}XMH|~3`%j%xW$s@tSUJqY(t0bZMgqICR}_#0PkI4iuCdsm?t>1nqZX$P4TH~ zr>ODs_Obk3l2?lNjhHh%lGR|Yn?qxIt_?ntq}ivPI>2NxFZ4$k?r#_#k!+aKGu~9!sWv_*j)yyK8b394NHj{A9#B5d+hJ@S9{9kzw_aP5uAA z54g+@B&Dn_f@p!|%E5$raB;|@biux9gP3@xG<@!-28B1oV!z{=J&_TcOmm=lXvA9d z!!+1DIIS#v9P=(}KR;D2ocv#P$46%#chlg$2u)V|h?>I)bmZ?;Q59>2aKnnntT%CA zt1%%p>0kxg+!5~oYZLo+bnO^BrFxLk$idb>y#vU|@{!Og!!R2Eo87{W*Hl~4)MFEP zzi;sOUU}4mMBtd!IRX)pK9LleZ&o5g4~uP6!)M67_sM%ruNC|zGmdiZ8-U^6fsz#+ z*so*d?c{ewlE8TS228>hEJtv|npOCA)+B(w+{YmXLVo zJ-%Zob<{%b@8=2Rc_-i7E@lE{CG>1srj8-&sm=uF3q#0OC#(HqP&;a3Fr-A^GEs@8 z-*er|jo@_gLC(%ABjA|zm|rPw0%~;){%EPS!0LxftOLtch$>1`vqN_nC7zDRvZ$R# zZkfMtiro5l{Cl2^o|73_Ks1u?^R?7wQF!^w2nI8(JARSAvR|hIk=p4@CQF=voWF~U zakUsWU$t)ExmJPfiQh+1O()`3p(0JP_ak=W#hwmdRcAg)K!I@p-mBCv!G*VBs=-BRslGZFioIw_do7KSh<)M>qGcnL^+;ro60@O7II6jzE}ood-!jtkjg;IM|kMXi%QUZ^s-~Zu@Y+Y zQ&zW4Gy?rE$)v9|9iWrL#MJS=9pvZYa{arTLB8FlOZFvxzC5SZCS%)yhqs_SI)((& zf1>o9j0b_Q=w?=OBi6Bp)tE@*dpP^s=YdXE^KfgeiiZ03B9t-F${ocz;;nnY)_O=` z`+k8|yiXYU9CE`x1?%sz9~L#quv)Qh^~M5ohr%0dV=zCEW=-%h1MYhWTK8K>dtx3| zaMJHninBm5a;bh=b{32-{2CH2oCYcp@8l)SEuDDMen+F}fBK-l9p>LgVXxJ0d760= z_+&)RYHwD7QqpXQ+3{PbHnP+2#Dz-a!;_flg83dLx8=TjaQ7gCvwDK_P6J5OgEO?) zc?ikdglFG8iPw`=FAos=(a4dXPcQLyquM@=cvJp1FF*Eszn*X|7MO-<^4M;pIqWBky&XL7f_Za3x*~>Am^TJL zH{Wy2!mnn3xtlo8F)rn+YZcGb$8A0B6c3~7H;bS~C)UbXxEGZFKehMh|O#HJv%EUyE7 zhsqG2+WMEIOZkXG@YnY{fsr8JpR2C7F0+!bF6!}VMB9$& zPrXWHy+S~w)Gn@CCP4C!-45%#sz;HVm-o$Iz&a=fle(3m0>rNLst2#51weVcI9?r_;EAlxaVzj~n%g7x*c z%ZQbM(uuvl#$MJ!@OjmVB857zeJHl=uX{5{3`ui5@N5I({F0K|-*$+qDP-59>V&1v z!-Quhov`P%i?2x%zRx}%{4#d98|&!%ht!V`z+S^hH$gwFugf@+ycmNykLBmjIOhNR z+~11(t$V=M&;B+~oaS)*#Hn zPRt!yN_o_+5VQyczUV7T*#Ay5i&|42%mVrK^QfCiGZ5}ONcD?x93C~+w+3AugGA9k zYgz|}AbbxmTgUTOSe5heaPvwAb6&cOUdyG(+VIEiOPOs*>le$pKr#stx@G-1J^N8V zXV0wO)!$E&4P3B*e#Zq{@3oD@x_;DX(BCM$W(YC5sj2 z0rAe^Qc_9>*z4KwshbSLx@EQBuFwhStv45NZW@Q`X!DJNgmEz2bh;CoHUXASM_E5g zO@sf_?I#?U|Cz_J^_-H}-nk#beEwRGQnKbw%)1Ubvp?Z}6V{nkM+QiIMMk>P&%VdD zBbvXQpIC&3klehq&JN2l#G~=)dkEJAk~+QG;Zu$KeP-WB249aN<}9lYr-XiF_8_j6 zy0aCLP3lTde2>CBvmx1x@fPTnlx#nEcLel4S`Ch8VNT<@pd-uW)j)n9^2Us!2@zba zg?Y;5`f>IYO~7LarLTqEPi4Cbg@P296Oii~c5Nj?$P zj$%GjH~g-0gAuO>t@%nZ;5?kEd2de>sy!ocs2+0&G?h)ewqso)g{N({)`daD?$Jef zaIG6j*wa%m72)$gI`7x@lmz@9tMT>w;{2cXjta+E0Z>$G3|>9=6_GwtrxywpBlf@K z?hvL-;w;Fo@AObzWxrahP(z zlq(++EvNgV!|P$c@v#kOQ4;ubTL%7G?t;?1w+6 z6y1H&s2%!~ueb}J=z=JI@!RVQ0}#kud6t_r2DuC4c3~6v{%y&ae%=Vz?P>PLa2)*i zKCty(|F_laClX5%!~21etkJGx^U%e;jegq@p7UW=p7V7U`$I3Cb#EG*2eRbTA7Ag! zfwKf--|J^s4-q(j?Gf`V^p5FprRpuf4eh^YUmn4{M4Nqoss$%ud1`*&+m=qC5U`G} zuF3;v{TpkSP81>o;WvBK+1d~>)iN#GorDrkvgxD=^&{>>{Vm1E2NAQNqqEX)tOsy+ zV2yl%`+kKMVnQw0C*-ZNOSY&5X~q(6`B)U=`*UXYHrE{ZNGD{H9oY_jX4h>aI`RC0 z1uvaZqe*y6fc_`g*FKWXp_ayp`Ky1K_ljFjz%X4+z;&xB*kHfPS%i6W_M>eVX)ocq zK}u&tD)<+mXyG!)9L{@Af5`6j!aU7Q`LO`B-Uu=T^VY9AX-Mlp&+`i$9mqe1@?DYs z5Tc7ytJKvSL-{t36g(QR@6S>!{M%8i|M%gP4v5E`k4VxU_rLgFeL}aso39@6DNL%A zmBc~W{yeJ7c3rSkvQnD*b{w2bIlcIo#v#1(ujvW5GN3!f^gTxx*GVJjZtiLrMKf&a zw!RN$k&lAyCWZRH<3D%Twv(dq^T^Hf04Us;MtoCzd)PXL5w2f77<lHt0^sx=4UcYC_w$X=ZGRJAVgfLeno>|~n36B3Wii(HZNr;}N+CJiW z4bmG;`q5XH2TQk8RiDTaf#T6i%@0v|5F>E0RxI%=irGBga6YmC$q31t>}tV!zKN=d z#DaX##Im6*)eg9pk@0Ehb}x`F1P#9JZ-oAz27~7AWe}5>SXEq74w`(zVhMyMAg#6K zf8Z_vpI>77E&1i>?O^u7-%_|QLDBX*tdWQ$=B~I+JKRo(^Ne-jpz~BM zkVw63!S2)!Z?vCklo)hEhS`U)JJ&jazrWat&y@%}Ew8G7(Ii5x%-x7Pm>0=B_qWoi z6WI09SSGD)4@p(q@7fQnZM+usJ=12b@ys@owekdC0lb5~v_9Emeb0e**;IPWH@canvD}3D!nr|W zC++chIG=R+uEac~mSrY?#=LO~tRHQ}_w0m&*8F?4C!sE}!t-;?FetiaN!=0e1T}>| zZqbGcFbZWoA^rL-GCz9i`>0(NA`IuT2-~!y{G9w3p}(r?7~c{%~=0?Zi)0aAEmCJdRf+-kNswb;Rnas zAt>*J?j+?99PIyoG757$RW>aQOIvU~ayTZ*pD+hLRCLcxQs*J=)vKC^crJ@&JZ(cb zCH8&gfBlt-xl%mig0V&3b8vRV^!o{{`*gDs8@*yM3X^YaNeseGU>^56cyTrj^`H6W zEUD0eR^vI9`JN1+YeIFRZ%r}hhsoP>zGocCyr_6QqdAVqabKc^>PJw%{V}s2&vE_o zCdaFweT^ue(#$X`I}oA*^cWZ{+u`r}cDoj#QFySsSVCQ96etGlR38WxK=9(>i%A(o z zO`|5H6l`lAu$%!gQ7pIaJV{3~`aQnEmK~_XK7id{a}eb!uXq)3Vc$j9bfTR&)?sjV z|8>QEM?Lp7vB<0S$T|NJMT>L>7&-qi&+=#ncKQ21k8Z}{IV9O#L(X}a!^$fun_GrP z0`}P&u4N)}^4k@cnM`opshlx^bwBqyX&hKxNkGa8q*f7W1j6}_yKy4~Fqn>ZZ4Idb zqMX~>nP>Rio8OGHx?Tf%zRH^I)*sNOX@hD+VHslQ;g5~h1C$>}Yx#u72RY|fSB4fv zLrmQB`Z$XUWII}TjVZ1X5q>&@T?Dlt#^(*2!B7cmCy>cZq-CNw;e;D^e>;n1jYUab{yPsK&8f6ga zA47Yzvl5h6Z+?pQt$_tPCGkp{Ht6+FO}mKoYFcc)#{FI$KvnbV+#i-U2tRwYbttJ7 z=Lrp(!8DlP*r7qClhO*G4O?!U-6Vm->Dv!%B=CF}xaTp5`2zF;j-AnU|K=BO#sAjI z=#u#Uo$~^&ho{;L4P#Eij2DAI`aE!iw3`Lty8n`5wCVc->}zrvI>xq&Img@sZo3iY zr)jr8UBx{3wsS#2`!CD^Uok83*UmZ664CZC`Hp?hr{7*UP&5v*sbQiPbmPEnKwmGv z(hZ55>E}FNR{~#b)ZaPL_sE-;QQ^c$HFB-%Y_C-owUz0Q;ObURTn^{2I_%MD1W_}HHNqUnflf`#vB76EK{ z5#Ny?J#hBGPixDjG02$PDM+^y&-+_bO|j)6Lz*p%oI}Px^^k`=C5Eq)vA-ehLdo}l z1)y4IG@)4grysJ1mtXYh@*>F6G7UC#VZP?M+QfO6M*MpSEeUVUKxz~1O}|n)P%1s> zrcVzcGsWO`nXoY=CEeOJ#5sX-A@zA7I_aXX}zbq*}*e_T; z(9*6JfN0)z5-Z)?K(X)^jXU7~$q@G( zUy8wT#Wr`-^Hu_Q$4iS%f2#+j>sbX1#sMHQ9aM2uv=l`)j1y;P2}r3(&w1c^0W#Xr z#kn#S4}4mL`Rn+7Z_z7xzp}3f6sOF}t#LmkvBhR(wsRvWkaq?G;(z@)tomI@p17U?x9OXBbSdSKg4|k z@1JkpM!s)F>~8tJr>-`j)m|q=l+8q|hYCK{JxBx1CAvtZeV;)l>?cfxqyux$f!s%B zwUD^-vEZ9!H>8C&KUTOg2xOL3ZJvlhFsW(kaJ)={NK@Ty{JBIhS03|}xzq;MDvVLr zKURSZZ^xpZa{-v=9?Y!1UkcXZ`fi^O5@1!nwNq`R9Q#EXKIDC<0q)$8palVe}xqQsOW^4W1_y_BLY%}ci7urC4CZ_O?Fs=(I-5B{h+5$sG z6e)J!ux`R4yLddP4}PA#b!l(I7+f;0U?oXmKhfuSUWW7kJ@5a|d|+!oc(^89ZRoxL z&Z{-T3VB#3(`&(d#uL~1$Llu`_A$0E-8Rfn$2YV0RsOTRmLAK>Hl=y&=3y zu&73aa@H`heH}<%QMBUWvmO-OHox=oAsp{l{Ia|6527XM#<>eN14v>!AKRdQFEV(_ z{^-ea2TBx6+HKcbk4VK``lqAPQR1E_8X7msLGf^-ijQR%WM;h7X>}h4ZB~6V_nVme zX6`6`2Xj6&OAjC6iNkZB)|@2E8>d0TzU>=l;S5ww+Eo;MnT3>kwm;f!4V?L?sj>BiNJJ+(OH+1HU3M7%{nm8C zzK$WP?wkXDv=e9~k{}dij(I452kv&ikMKa>Pi#5Bt z`kz{y*HpXa*yciL;qvQ8hON-VF3uADFcDJeA3Y6xoQu4d)&jbBlp#V9b^39+43xh= zb$OGR4UTMP`kJNK2mJQC{C;6v-?Bg0TK)vp0hU4#7}oy!p|xE>m6Rs32%2RYxPph|X0L7S55t4;IR z{}_CP=8zr{`MYZ z2`sfBum37F2cPfUn$;Rjn-!R25>)u?aTS<8@9Y)0KmhB^QRz?Ib)a>nfhg7A3_eqm z2PTZ#fc5a>_X6JSpy;`&U19+)URmEDgy{*@N>uDSEGAeA(3f9u$PkN>TB-&%bQR&9JY7op0v z`mdzhKj+@o=Y{Iwi?uBa&^`A_Kq`t1&r9^wC49+HIp8;TYlsZ<ML;iW6cM5h$=$%r3i2ZU_hiCut5zv+$Y6sdwtNC!gKP#x_i|gcicb;8x62<+%_urdOQD8oTi@rut@NB0ReQ1S z;Xgl@?kuPMAUX%tuaESU;r@>ni>TZ5^-2h58ce2qS%@gSC&K2RW1S3DYGK?Qj;{-@ zGS}3{(DLaWm*t(tk@=sl)z-8z6xrXlLH&0aku9gXHN3HYEzec$A#V*z^@yILHqHRK z(+0fjXYswW)?fS6B%Zs_8@!zY^M;MuUdXX{B!PJg=4;z_Ac_vhpTEM#kjWz!ZdFfQ z*ALs{7nk@?{1cAG@{Nuzpz)7|+Ae43&_Zp{o^PHLn1^IQK}m^qKNm!=j^EEmS&Tn< zjdxT5-^htz)};azEQ096EN zu8#DpMoKMiY|F`o5NWc5*-E()*o~yPg4JVzo%C6PVloR+JZTf<<0(Y`j$ zv<-jFUBLaX%!}l!zp%b7^P>g5SSPr7GR6M%t^&>Lmwvx~gt=#=@xN9nW#Fbm?kb+g zb0Ao<6BCq6K&f|=O^>w@$sFZ4bniq1a+_^k30rJH`I+9Vd4lCg@2F*I-Y!5(2gJOt z+qWUo*yAVfojQ?bOmCHO0`4#Gav+)KRG^WhHR;goc%V4*fKqD!?=yz`2WKs@A1C^0 zUGIxd&@UDeVl*6vBgd#H*fEbNoC_t$Bn|?RNLIwY!W`SKls!RN)#Ls&6t~`_nDjWhHoo~ z7r41z$9=$cWed^$c-~8ZoJ#tsA*@4A2nx<#nF8_C;yxO@&E46WYX6q<&Alu zUiT6Yd5Guz(4Lt+jqev^rc85H%sonFWqQuwhxtTKC@gP|3~JAUr=s@t!INESHwvcu zVB={0y%5$O%#G=ecENRzRb}Ck<1yYq%vss#J3~O>Zk-l)_qL;O`KM_e3EgOw-^;4) zem{zFP!w>h8bF&B?=IWb^&^pR&$lueJ-DCD@!Pkk#7+S2#^x98JK zOcr@vxCj#yB!-*sv0mujY?D7*8+h9eG1iL3AtlK<`KTl9i22;dgFJ@^QIlCiLW|2N zBKi-XmdzeR+*dVD@l1`P)nh!W-U`^ieE(=kyC4Y(QuLRu9IHakobME$uxG+Erk~a> z2l4#Rd=5K>5IjHU(Vn(BxYXXMPRVX-oNaRpx`@rRurqb<97_r}Tc-QvgjC&(;|hKcm&YSLHERGcd2Q zJ&(!x8IrkFt@z+9W+rEuwu0-o2{?kp$Hr``_|Cn70lu>PJ#`HuA&bzHam zx~oh5M+*o>MaVK%R)gLuD|>lT32+w|>(H@PfbAd7$f8{}Fd5Wk6>m@tlNM2*Z_+k` zeR}l6dNS_k|2SF~Bh(71W-S*}&$j^csq^V?Rh!|SK!U|0?g#LMeKybgit8UcwZdw? zH-mN73DNY|by!y)7?Net0>%{IFN^)@2G7Pqg}t75Uc&q1In^E)LVJLv&+Gvou@7!Fr+lgZI0(*vWJtL8RX_=$*{b`0RpxKgZ46AM0a3>l7Dzdwe^3^HB64 z>G~KFH#gontTv0dX@~fa>Mx)e+tS{$4><3qR7k&bcL5!onVG-QHjC^c&NWT&O(0ux z2{fU_(eWyEF?s+}*C`*xuVm1v}2q-PyMM6ZuDpO&%i z+AJlsj&2BrdlBn1cVN9@1<%Gw^-8qKC3;a2>;HGUj>@j#xdi5mAyoIIi-2Bu)brOSYk@rtGE)7*xF5pSEBk!99>{g6OPS7IK<}&RQ*|c?Dd{{qcKcN(vK`1w zJR$r5&xECg_a^dSHaiS9ur9az1JN6-P4cEC$c_MNr zv1YeGAUMY=DEypChj1g)7T(D^5b_u=UE%766q02ul|VlX2P!D_+V){y6+wpl8lT5A z4>#2C{3a2ni8vALAEGJMFjT;PNqSYunFk9cV5@KANpPx!)L2=DFUOj|=fei$LB$q0 zn--rZr-nH+k3@P_tMU2&)YXumwH1sscCS75!~21+c0mfBqd;HIU^;ZL4P5JI{&tQzWm{AEo1ZK%?;@@ne$6l55!&LmU^a+>l7{reoqKh0KLQW ziWBT!km|c<-|?#@NJ9A7g9A%VXz7L4kB7`%NK2IdoSc0xBHIO=e`kSpCo-=Zxbb;f zJ6avImfMfQEj_CLT8^N5F~KEU(VOrKVK})ACuUB{#kH;=5qU4djIf1BNE zhNT`%7}nVLbPU7J=8pGzBG`X$?MV`!(E#?t)vTvayCd$CUIHJ5NT~kg$qSiw<7mY0 z_T{X;S(Hc?wsOYjy-scRM>@|%)GzQgAfIvp>+=u!OLNYm+T`wH9`}7s{eEi!Y=fM9Ry$p^IGDCpZ|fixxls zrl)d|x8~5{LPS+@mVC`hgjb~TKbKOvrZ-@qG z{tJ|ED(=6um<2D@I*g7ww?cIzqc8dWAXuKyX@|ugh@_4uhT{8daANhfG?_-ASvkU+ z5Y+}I0pb&U(jPx5}#D1AS-D zMb@qwCyTR72M^Mnc2b$;<6!lK_fW7o3=lE%S9tSaTM=#XLvc?0+FjrR;TUdBGCJmK5xd-1%6kn#_der2G^LzXNV!u3BnxyeOS0@z*?qB-SN z1&ItNXa=~kE?_zI0)=4>MBW!v<4SG?9*4xi%sw11e}8JZe#QEg;5|3G{cvCPr0KZ^ z+#fM#%(3R2#rcHZS^EruR#>2Ne4)1518FTSXP!)tf~(8vl`~FLkT)s3?HbnS)>^wY z3RnO8KD+ha|F_krYsZb@ghfzilI&`M1E6foBGNA za?ZfIU=u~FUR?i|*c|Y-JvsxdW=lUB@LXLc#>VSmRQUe7dl$>eDiYWqjG)*~#C4q0 zb8#%aiD)z5OU#e59eMN8A_MBmV10JXzZsC$8RH*7O7!~feH{bH^oM5jW9fbr z{Mt#&@j3PfT*w>$@sNlFCGJv6{!jk*$$M(a`xe$`)i5iUHNl&wC-v5chrma1RGG?T z3T7IJz3tfVq`#tGz=*kxQ9_(F)E^gspX2FMITOrNYmv&mC^-w3!vb9NA~SG^_wRIn z1D=apMRjY}J~D)SuD+=FX%0A58CYF^4dOaR(5yxrzW-0fy*NajkC2qt>ta2T z$kK@*w>@)6;uG@(f9wJ(QJC!A{df_PRxgRpx8S~C?sM@!l{3ipxX`!8_%Rf|qG!%a z>Oz`LZcJ945vb`ABeAEl5^T|0%raLlDoOlm{@$(~StA>+6YIT*ImmuTEY>L*oy*X) zDDOu3zTxNYK?|~VXX@qq5{1m4_nl&@Y6UfRV`8sy2fqJ)eHN@)0^SV^18=6RV0G6o zvyaqX$X49oVv}bEBpx@I46bd0st=`3zjFFupN#!2H$f7xe>?lh=K$uN2!>t=pKOHs z%GtbMaad1y-8?(QrWXvJeP(?;RSgu3DY}9?1CjqOZjB$jg~*v8{?`HfiDdYE2nDaO z52KQ+Zl(s`O9(e@-zZ`qU`58&irYQNrzbGJpcM1r;ve4XW3ENyJCP#aClirBx0Fmr zN+P0{IU-_+0zhzQpH!%26>z^z?yn|wfNP+Yss}3xT3mc*)oMCH=}z(@g+U{1{@yWn z^-whkJ~0+plCFh(zAxulZj?b&|MS@GGkIV;t&1nimqTjh!o{BL6_Dj&BmJVQ3`FWa zxw7H?QmKZ0;(j>R3GuCsP}(*@iKYElw~1EBk``ywaVLWOQ*jC5|KV2LeZP6SwH^0A zln{GLBSc;=sX2oAC#J>HN@fR};d|*2efaxsP?g%gQSLGd((ciGYj`fCxWonf_VfSt z|NrMXzjfaK=RV-<^nK?b-Zvy8Esg|W&Z5jmFu%2b9!x_oxa}311M)Pxa-%7pyR^qO z^wTLa)-PXY(WNFsiRP=v^CPnmu3FEw>pRvX+J(=5eKrQhJo^myo*Mw>0P6ak&DG%F ze7TpAEegqtcIka%twS+3G`D(pbs}=ms)@(;UX)reBwWWffb4G#+&&m?~L zA$FJzd?nY7Cgsle&~7K9)E{g+QZxz3pI9c|Smy?6@z(<{zQOO8^8LvogF#TfyK(J~ z>jYG}pI2_jd^i8Xq?E3`^RS%9cH3VH`}q4fPq6yq`N*9)#c^~vuNk`ki{|SbxCigL zvDXF9G5@JoHn!_*=l?R#|YdH%lpzBfbY8| z>NYm&s>7h88WXb{^XapnRMi(+$YI_<|54eD zQ5*+fTThD01|V;(h&_o^^&nDvk&#aQ^gtn#ep_OA+J4LpIen|ImLL1^ zXoD3=S6iUt@W(rHTqM}!*PN%+!W_ZxJ0G5}@dbL@M`4T88OV9`oZ#QWBqValxLtRb zJ2HB(`u**_8nk+OJ?S$E=VNyEO*~6oxLz8#CS^fHGORc8j5zFDB$8*1>##r3$YGJt zo`c9N2TM2>{qfw1-9V2jz`nM?HXF~0=pSzuWQ^-eMqF=+YGY#5}m;#uKMV0I82147ytYH2w1aK1jm6kB^5U zTD>p_sKu0BR<#9Az9F2Gd_IWxFuX_;34~#q>Y% zzZKvAE#9|2|8MJm{(nZN^U|miuuh13v^(C73^Fo@Ja7D-0eL;YV{Z@5;Q0}w4;HcB z-#jMpo^)?3Xi7T9m{|bW=AN|MA_bIAv;v=Y5_(;Q06xu0J()++MaNBDL&<<0~?iNXh3* zzL3jVP;)$eF07#us;SSdTfG>BgG@~YQrMT|-KKKuttpN7M(GV`!KTY8aj6~V;IMu&i1920hB26rZgc8pTCtZNnPJCuPSv{ zcz{MUc(Q1p+aXDW-_^~}*i}YhKpE9a2Fw8QhC~2w1)i7aW}KcPlZHgv+4Iu4dr@5G zhGW@FJU=Mz=+QIQbBHfij>hKw0%GOlt6pGPM0zR;it4-P5q%z&^GMn>k_y?u!!A3D ztb^YLYT-Hy`@_dW&0lc+n`ijNsf8BM8xA7|r2B!OLvY}Ul^Von*fP(Cbp|5NlvFSkEXIaKZc`o+~wG`F`Cb8!?Z6C8yJu zBOgqXqHe1~{sl1`9LI|A+^C#jF^)u_*l8-O=F$Xj^mbfE(kIM(};(?Y=)+=jwsg^O|8GH4!qBE;Hmm#m^_wmW1^Z#78-uM6f_gg=2eNMc&^Redv>%{dlK>?qey~z99*K^*lalilW$shI?vHqW<|CwubKhn%_A90rML0QPe(@X{D+pmpe zZ);Se80yEYj-P{&NQMhtwrd?+R7tYAbiN-hF;{we`i(vNCVkU7xp7a`|S&cT~s z0p0!TvvB{<$fplJvv525)EjH8`&*53ynJWJKXsF>@0-9`=BmI!hT@J7^W6IeVd>PI zr?*rE)_3jQkt|(;f|Y8;7^gZAv94K(I5vRzL-fw;JQ+dbXX-snzKx*%j`yn(4~7vt zhxI+4!`MeVeD0TaaWnG2c7o%6a1bIoT%WYs(EY4O7eWHw^#nto&sNi9{RH=M!w|COLZqw)o09!;ek zMI>YW^wT|m-cBRWHQl>1hNH-vRpx4#S_dMx6@B&pYzFkl#RUCGjS#+V1%!lCz%+87 zw7m}Q*Sodulr?QcrbNwM=~noj@`}wyHJO0eCA$6;Pnsg79U6XBQjHMQfHG;Z|AcCn z8jbPMcF1q?sy)P23`*axzEYm_gH$W}cZU=LKqiNDN&jRKNaPhs3N~SXtFp;R>i1r7 zI1X37;(PW^D=$OsV9e<&Ukc~PxRmE3*OZd7pZi_f9KP(waQe z$yA5vk7mX17|%uFk8WIflU{)+%=6S4UtoVCbB5>)eGxKWeb~^nBN?ncH2E96nqlY3 zpv>HncBmJOGxmGcfqgOD`l>zk5R+N9{`_YZFc)V&w*OlPYBRfn4pw1Z&+EemqcK$= zs1f?rub~b^41Y2A4Of9);kDN?3%I|T@g)24M_ezs7589cstJ^qCKO&@#JWGLtd1*~ zpAxlHtYcl*3FLvGm0g4`NEA*fWlO~z)?B9DyWAQ;o{N#w`#8=!$g2^Pd^q3gK6)$R z9p+G(#XaQ~$Mab??pzpA!{_i(3d(JU|Nr}cTXDX%ZoU6o>n2H0;!Wftq!=;o*l`8t z)7*o%&n4oyi!U~;uI!lxxi0mC;_8_E*Ly0$vuOZ?o=Z_?v<`xoZT$Y*8GXQaqW}JK z9M;V!x|@HV3`6~tB*sT?YLUEmgyJ8}yXic6*_()OakK{6Uq!dbbK}-+ggl@#%p& z_VvvAbv&Os@bcaE&{^2bCEjp(JqzOJjZ$3&XJOBX1d*k37S_h=4L)N1uf>PdKNVg7 z%wONSFICPFw?|knfYh&J@7eIV6M`#fazn zvM_f$T^L2ZTJr;A+i;(*)J*$%=?F@->&sfaJb-E`TihtlwWHL?{>#5R@)6(hH_aVJ z70^|3tu>jx4-z`;G=06spgG;_Q@KqK_y_zTTbab6&Dy!KQ-`_{E~Nza=wM#~Wy$(^ zMy&rqD%Sf1=MjIgtdT_MJnn~$zA5`ShxlFn@0F-bA-WwN-hVrX5%;T$I_xg3NKZ~& z;EQlBM0@MD)12#tOH{-)RXo2q+{BjJALRqzX(fq6EK$gN*SYtD#0=!kmhJYCArP^b zYhUjES^`6hHX=F>9neQ5rhWs@0jYm;D3)Q#NA zxK7v|UnuU6=YzgpyQO)nA6zZ{>qKdBJQOv|6S5j%iPPQPOdIoio{Y(+I<&(oLu$a$ zsaz1Ta(is3o`d>nR~Jm@%TfN6H)UIU9t!9B7A|nY45db0mok^mMg99NXJ#17G2iTE zxuj?oGH04PDnQDD+UbYQ+ds8J)pH3^@v?RZo%qeb(A)vG{FCSI#x;PsqmarDjVkai z>5tUusRO=ymA$`j<9X_nLSrY`Ye8vb*v}BpHS9ME?VOLvOJ_a*n|HbcH280yP+vYUz-p6 z4fs6y%-gb?q2+tk?(=Nza5yQWq09yxdGj49O7v^n=oqm$?4tJFz1z zc@9o^bsW-l$Nf{5vh5*pMDY2ZtMrHh_sh6VmgXP41^;6zKj|z>(4_K*irCI(#J(_F zysI1gw?mJO#XZLNe1|h(A43O`(YA_<5^h6CLZ4&f^dam&skkj~*4&FCwPMP1_I9Gg z4);hY<9ZasB>$&$HVO4Va2(XXQU&F&jA>KKdf}t3>G44MaVT2nd^`Sk4)7nh&)8rN z2nLf}moU%pFyHgNuM+;9Cv824t@tO!+;AI8nS<~bS(hSh24LjME1LG30RB7-vzHE6 zAd>oL?E|XVhx+Hl-XdenD|n?UGA)4lp9eJR|F(`H+ZTGuF5aVP)2iP#G#`HtPc{vX z%h-QAwf55KLm@H^&(n9p`J-1~jBmdP=0Vqq_zpBpfT)&9{inhqV6P2ucvApj5%s$v0|;UYm;f=9efCRQJ4`Yil*QXkKaD*p~+d7S0c5#iK5z z`24&D@u&l??G!8TR0(~JwL|R1t#DFO`xGQmq1;t4zYyP7yuW`u%AbMyyh;su6<+T_ z%h2M=>6COxok8C9nvTCqLLsM5B)4y&r$IH==#TdHqn| zM)ccnyQyaP6LlO0=RUT@c%MKI3+@S-1_*eww?Qzw2};K(2jNN)J&s^7c`Lcbf40CgYRF- zx9{gQ{oT*S>Cc>T|7SnvY;%tH=@{`OVE&-Xgix4dI2&58)>YN690aNQX1>nmK@hp# zered44dtzC-_>pF2W3-T-HweozLNW6Zw2;%QJA*f9@8FxIf=jDoa{h9K{;<>F~&ii z#_}kRPYAB9PLj;`=)eEvFw$XAPk3xgyLUUMg^1xf@hQ=!gOHy{mGpbxNrdz;j~-6z zAPV4Q|B?PQZIYs?8w2BRUt}hQ#jpB-j8BmVX)S zT1*8c&pI9r0Vcfl`MjyIf<;cdwM zJ=P4WV%%)Xu{0?EzSq|CeGw63cG|3?p^12WGQ#JYD3u62aeP5*NuL+U+?0+V|*a(#-?G6OS6_T5|nwKOIRG;|HJ1^K3q_G_xgTr8_4G;f7xi$ z16?`K+B#ahz{S8bHr*2aUq(qs+NC}cH9O)r(>huRWm9dNuk>!>h+es|b7~J^t&te` zwUUYYLLb#cZ}kvTc|wDxUERcNkDzUl7Hvd9@P|u|_b^^^p+DS>&w(t-x70qO9Tpfa zna{*Hi4rO6=gq}ba2-A)eohS6ZFeFhE2xcNo+Gi!^Dz2z$5-V2z&Z+Q!y)n4<}hG5 zJc_wj6YFb)Jkpr@)d{>`o+U1~sDX^z3+#irFs{gY*jcAN5BqzYv)!B$IHJft8y#H- zu@tMJ!#gRk>V6wn(6Mfa{|Sq;M=4-EQ|RUotiSFVZv8`7qYFGTRoYbT(Vz8+Te0Xd z6U6h*-10q!b!3x24J!Y{y6x|eNS5-n;ds5~adSl{jL)_D{qhU?4eqaDpQ*=n$i?7m zKjuEgyqu#aT+&{^==-Ecsfw9UZnftAM;~8;B|fIBAd*4wF09^Zqm@F$q$RwFmwW|N z7dlEav0h@u&0xL@aqS>W7dG*})DB5;ZCyc&>cD$Ko`3LH1@Kvz2r6De{ZjLZQP+hS z|HCDtaFUq~=kp4pHvj$zH8Wl{t=wJ?Qja2{wr#Hjf6)nQ3bh7WT(kqaC+p$K42|eX z^+uTSq-yD-lqOi?#J7`sOEXZ52S2^1wSZ!VRgpxe@&Fk4w4_fS) ziM=zLAb#ckI#-NuFTY#ocWZ@8TiZvB2fn{}JSJ7SAHG@!2j5=uPG+ZM|#^v->W0emjyV? z-`+x`uGR_vO?03=UD7p-+DTAVcmJNbvy)(Xi+l*V+(wja|EynotdZafwhyRhRbrf) zm*7RnBm_dDOtul3kR+7#EcISJ=*9)seOpRJf5QIzJ?8V zDr90x=dqy3Jy(AW*Z*2G{j$o_SWvTl{8R2xHXN^gwDkh!N$Lt6a!GvB4~ZsO{2^NH zAb;nkw8l%+Z`*BhZ%=A5F==qJU`ui{AukiE6#s-uNX+tk0}b{pt~1$VH^ z^`UN}x8>Hmwhq+&?95&-71T(~4nLJJupf1cLQSlj7iEB7kZ#8=t!B7V*S*Juw+qx} z-fs{pr9iag1G7}rqY_vK!LyFO1g>G3pGEbRgg{Qo+Im5ZkH~3^bvV>b%zHbB`k{zM z%)5SzT~mlUL#&IpzM^j8Xe*r3xQB7DJQ+c5sToAZx5bBCc9nsrOP8rEK?Ry(k%QY) z21LC|th#o92A8fimx>5e!0mUi*XD72|1EF~s%~!r=kNq$`<6E7>z!%0<#`u`-W##) zLY<_8qYSSXYnUJsbH}h8>vdKyd|NH?f(q*rFVNnP;Cg9+4Eo=xaL`xCd{=BYxCm}s zy?-0lW3XG<9*=by9@@Gv^w`}ndtT`%%a;mQs{(b!ZeiUH#hB5Am)$TqT=+_1DFc=# zuk^dTz8h}eTytMf3%~bKhdIm*T@aprq|DH$3+z{emfqre(D&SZ$a()>{NOn5E+jyCo7laF3fL8>kGTu2knYk#x-cx!85i+}LQS6nB26?>LEhJI~o-XzzH zT&!okX{go-l8yZ37``*hb51XMw zS8j$c1MNk#uLr(MXTZk8nnl*0EXY)K^H}Zu_i@V^znoStiy^PAItUI6)_A-e{P&;d z@Pl){zJ`v_t3=EbT%LD@=7GB4S~DXB)3IJDI4DQuFoE^7aD|a*H#}Z)_{oa4M#$Mp zEBa!L_K-s#{0thri1WVQEG73MqAj%NQxV$FRjb$Mv2h$(Gv8m|$c=ITJNcV)e_?&x zhK>uS1)T(6to`nFHmC#eg8Pe4WfRf3p>4v2T}_neAK&pw0^_>B9P^*pm;`~lG(CHT zYhmGbxg|FmC_s}NS1L#S0?zm5w157dKj%lBZ9S_sp>(YC?a@T_`Mk9j3Y>Irx|@eU zLBYbD+k>UV><-rYiv}%(M+T#%2FJfy)U2T{LtLN7mem?cztn zCBBL}^wr&ICe$U4Zl*0NC0u5y2Rj=_07L87-m9qZ#y;|5#`VnBC6YrRUYVvxRF@6+^G|WYkS)^SE9d(H#cc&5eu3k_H53# zLpzZDy00a+3`h<;aY4(94oY4t1D6{zz~96zEcpo5-|DC`y+70o!KdtEHKZ}`dR5zp zzvQrP&Dy}%`DIv#Tf~I_Ls<{Fao3$VTtG)V_C@8RialWS^Y(ryqp+ zHO5w*qr=<4M2dbq73?NG7eq8k0=K1J;!tx)M+zBJsG0vlhIm>&7u1%^TmDs}I%j%(3d*RE4o7u;(7 zzKkzDAeqywA+WLsxEO@$PHj5ux}Yzc$=40ipU=5zV!brxFX@aWR0=$f?>?`HcA}@T zsb?_eZbgM9yAN{+VvjnkeiG1cTPX&^yfeOKj-+L`#Ia3_Rr68_W#+2 zj+l!8eVQj4b>AnhE~mpC(Rpibh@l=>a-j{^@r?avylQ~dyhmbbP*nRQzB0u z?;;A`tu!{DfprW_8{a7hVjRz^*{d^Yp9tRAeS7BMdq`^kVTnOw%zM=eXsE2}2K|^# z#0et?Z259dja@^Bk`0V;9CY*EdKac?27{AXgjf`xt1`#G{@clXCqM} zpb>7hsgaO(8IqnCh5BfNw`I>x0`Yi%mAy3TT!eMo8@eerLUe46BmYPjSgu!&)5@m8 zyjrnYn>KU;LsR2HKjHuPn)N zS&4NpnG#K54L#r^bwzeL#;+WxF`hk}zXSRNbk{2lw}Jb2^$lruxb9!J%Q3!#23u8x zVA7-qMBmg5iOxWs>9_3-KR0%Pld5dCtqLpQ8HzCAgrg^G2$FNU3YPzPW08g<1F7DJ3!+mmf3Xx}*8t>!BK3C@es*o)Ec zM$x^g^7S;v{Z<%w$j@(w;tO?0qt{SiqPl4hWmhZIoH>3+f+*qf_LQh866zc(oMq8#E zXrK=D#4hy{bvD1nxL-g_zKGgWuRNmhe3;Fn*)^!o(h_6NkNH0yX3vkFM*Xk0 z=)ksZ9fYCrx-*Th+X)XI0f!ksS_rAh^Nq8XH4rfy9)IY3i+(h-#k*%!`ehdTEByY}09L+n4r+87DE&HH8qo63&*{ourKY*%{lNR}fg9bS2aYK_ zrrRnr;EeU4=hP3>2b%My{h37%Fsr|_+>YVE7OH+(hu|e4*>gpl;22ry3{Ad?F?(&`hQL z6_^#P<`y{Cg2IODXYOaAT}@5-a?I&&xM8$^9&d0LT&rGd*DH(TOR^+)SOVjZlIa(h z?|OwZ_&rY}oJt78{#Vfq>k8uQZTT{>R@BK{FTy3R{hmnj^vk(ilMO+;dji(tJf0}F z_A*?II)*uVUxtn8@c46osX#py+NYk%SERIo{XG%$8lwi7id~`Vn^^}DmS5hl4QYbD zQTlh&Y6>h`DM6*+^D#RAlQ#wPBt-AaUGlBMI(cuNM8;y=Ma~J2-bbjLb>Qal9X`OXJ8Pv!L6Yp*)wvWExZYo+gXuLT3JV?aH%7V|r9JiN?YU5@hy z!-dgVzBDL}bSk}ud2yLO*87IXx}mr4Otr^j23W=me&uf$%FII&djwT;QOj}u>9tCMKS>G&odKxXSg=P9g5SV1>@!c_)i*ph-b4fPe!BQro#98t-#V}sqD`C0$f`1f&&uhU*BLaW0;sjToFDS zJDa-7ep^4rx>o#uXMsnXx$kM*$E6}9lEP4|6HcWZx%R?xgQIf z=mn~0Ou~8BJ}5AIs_@I51(m^_vdcnQpm52oSMvlL&UBrlztb3mVaI@|tysq@yZlCW z$;@6DP|yv{LY?Y>Ede4KvwC5Lhnt%2Q5@F>aj$2@GeM5;$&6d#JrH_j?J~_xOd!O5 zuDid!A1)@cAhp?SNfBxK$QDdMt{1(P6QYL5?&q&UB%hM9|=g{J{dTTip<9 z_TDdPkO~q3Qj+@LusoLL?h( zZ8rzZ!{@@v=5jW9jQV*eXZmc!_#Mu;`ZFG_{A0?V@X$fv{%}?zVICWL|EhT|y&t*Y z9sZf$qz@8n9b+?e*uXiDa~;mO{j>dZ+;fiqpYHFp?BpTvebP{s7U?UC`kyNC*7u!T zz@lsE=3?$5i1DrMm9dK?_`I4v|5W-+SQGEgT?(%!zFyNkk|f!R@qnMYCgyh%XGUm@9t^@@wctMu@4#{#I{nR zl&2iH=9S$l-<$;2*PM(W?ZAB3;Zp63hdvTIzeJat^A{4(@St_8c6=Dp7a)?p$#+;B5-rFmY-D(z@OUjA)3``IZ%9DFpq3j)B?9#ShtYR>j85Bb=kVN&Ri$(6zz214Gw1ZbwJChQtsCJdazQo zwsYH74UE=czvu6&;V?!s-wz4tN%p9OTo0VUtXiB`$YD`Ocea4^~q_^y0~cO{eIvdf=9b%B^X?5N90tU0e{Z zt~~&QWorv}f9i*L)wx8{8?1BvA%SZ|ycZP12kc%?_QSC^CoQQh)AN<0d5!@=%LZYk z^_e42ux^%XW|@yyAJ*C0q_rAqSg>$l&j~S%qhtTIY*|{s0Ngs}OL>L!gpFR7cX__C zVPE7DJ4J3>kIoBdGk!S$89%#(*n0+H{YXm7^?7(*m7>TFp#cajEmCPO$2xvX4>dim zpT0lm`~P>_f8xNuyUvpHkr$)XFpl+e-Q9+~Hpp7FphP*Y3b=%-TMAF5081gO!?-w| zP-VtM=tWf!v!AefOA4``TYg~zb6Go~ICQYlz@0)!{O;Q_X+$N&PIJYnOH+xG(QE!Y z7GgeXK;!14rLDx{(E_3q^VJjX_DZ88j~HdOooFbHBr@KvDvQRvYwMf3{)2m(P&eCw z`{_I?9FXIxdoDpE7Rkh#w3l(gy7`@wgx(iGa z(&QrIyCK$B_lR9C1CCXVs%$yUhVc02wx9O~A&t){TK6Lbl0G-d#A$v8s&jeeXyy(O zdv*Q5muSqVll{oASX@Lz&q3Y|= z4!35EI&W?2f}letBb)CrKYxr&*5n((`-wED92&lOr?(B19s8C( z_o)RDcDmeg4SoQGTI#^RbvSgcL6P((` z?%a+3X4;H5#wD&axN7eBV@GNa*e7WY*FIxn9?08!2QK$Ok%{M%!#;ECgmfHuz3GG6Lr%``;Sbfi( z6}`~nUS#>Ykp{U-G&8DyVqL!D?{<%SQbAYtm-Ik1J_qN$MJE?f;KxXR*eW$DjEAe& zUeD=lO6|JJ?^0-w?8}g+B=$j{#Gv=TbYTA+51egIn^s_?;ek3j>D^zr zH^0Yvc~38Xn2Yf+t)F6k^(|#V*6n+=7Z_j4c`vV189KTJ*X_%k3w7l2_wspUU5&fM zf}Q!dpGKh{omuRWn%;$VDrO|F-@gs@=Tp0b))!63t8=cydB1bUC1;!Sy*MrBIA@#F z{#W;NKKD;8=X#v`^ZO*Zq+))(m~Z}p{mIS1HSz9H-km(GFV>Xd&V@Q`5nh6GAC(h) zX%EgXe22Qf`RAUUlSlhrOtRVDU<#4*`0F~wGgKnRs#5EM1eNee%Bw6)=_J1Lt2uBr zU>^II9TwYP)e;QXBPrj93khTD`V7gQIE0c&XinU zLP_}dT;%)?&hw4AZm+Ne*J)A(or{d9bhx>~Q{mvERILf~7lr6x#@BElo z3mt<97yGT2Z8uS8b`MV`l;)!DM3kiHlJ`V8%PM`ViWllQuaNcAC;*EIx6bV|F}|Dk zn%li)osf23b^g4XZm^MCxcE5gC0k!OotT=?4XMjxB>V7v8ffSzA+-zhJfpV_M(o7- zbnKnNo$5Wn7~k%6PoEBkYco$BzKiuj0yN?l&Sb#C7iZomc6Y(d%YyN9GpVqYu-)#k z0^g(8yz<#J zeRdtLulJj->KvoNOPRY3{Bd+pzbbzE)eQ!WkDi`ifc91I2D#mn!AuA$9gPe}eYpg> z_x(YvzcPpCv2#Nh1M-CUZY^cf;Y+){;Aj!rAA~DXQe;^mt#>2Kw-)EYhr}DJTX4Ll z-RR@NxP*l@?$Ud+8DP}2t_(9+#Z)!Rbewmb3r=w*7x~y+5z>=k}l9^WPoke16N=f!#G17!cFa9d~3M z>L=~EHpbXn2ctF<7FQl-LExzqr)<665;3*g7k3{0LQJmcbj_%5AQHxJRfueFBhsjI zuAjiVziIu_tFr3QpQYk$@z@9L=Ew70j2$`%iAO6QC?ugy;K-Z`#|DgtTdDc!*jtR_ z;ywB6QD7LTHfZnP7*-B*I(!us^jly&utK9^9@ZntG@US?!Gcbw1;6_rvmtheL+Zyv z17J`w)V=0^<1f8)xUeheD8@mZn|SekW*-FkJXThFh4ylgSASeq2T9eJbh+*FA#ji2 z=?UdPpe~AgqFxq9xMT;7ERKmLsB5nrUQvSYdktlC^}mWBoYK_Pe;o5l8$M3jrglQS z6VL5@AFPv1^?jVIDD0g1P~6BYfG}j)>U|Fxz@wYt&#m zE{vxnUsI!k4WV$^vPcL0_}(eOm7BUic+ksv_Vsq~m?h73(Z3bakN9+b;KTY2w2hjh z7|)S*?g!y|xfcSGCIzd+`hniCkLd31L0yXFse3TrYR9a`_gCE*;NoNZ`P>*2^e)#3 z-Nby<7R$IBzLZB`!Wo8-^ zzO7m=uIEIDEv(z01`BBLTXeCo$j%;EH0!V4TR;2Z)X=!W_A@L{lwdSw3^T!3`jDoB zc`wX+b!wn#(e(S7)1Nv0p3`#nbKVb}{r}W*KL1~BN1G;pn_kEDyVq)MYa2Gm8%?5h z6zgyF-J6`r$A+wEL-#59>G=rfzMSj-(>CY5%sKw=+JCy$uxN1nzgr53Xq?T%s3`E7KYXfAM8CsqvVP9v<~TDKv|aq7}^e z)t?R?ce1QzZbkpoku>Si>$uK4UB&)ad}4<9S>s3F`XIYZrt?i^AH?&VDizw-4|Dcc zt~`eCt7?_&mre2cvQ}Pj*_X%yecja>`L9qf*pso=^dTEUCKlbHI^(+VOHbCaYcw#- ze<>iT&;o({@5(!t;>+XHSN*N!>F`*tWrtj62$YKoJ>Px85yWI}E)s5f0oLuyj$0|` zgXf!HJiC&rAnmN(msZsKYlz?yKcLwOUy=*7+@rdomhy0j`3UvO4qUzwex?`JXXPcv zTK9pLTI0zPxjsmq8=O7t#DJX_-g$1g)DGdvjD{(V8sHV@PwIMt@#v|Vr=^`sAjmkK zb?Q0h&7D=^etxhSoT`Q!sodSbYnf>z9McE-15*Xp^9G=CoA%O&m=~c~eUl+B+zo#5 zYa0C6t?+gIhpJe51MpCbH!NF)acY*mpEk+0LF#~C!fTms$a}xG`Br%!goeW`ZwEH` z*~{xM$L}|1D57Q}u^0GalHHdnpuWPgQsr~)gK!`};CPM=zF%jw-#v->SP6!|jeJCU zp@iTXdUlNt(Us+gKXiA)n%j$x9!{XajOt&?W_uZ6v1>}ri_rr+7Y84($v_?FzgpUz zwy>apZz|=6DhsxV>G`uUF8#i`WkECgT8#bVhksD0q?&AiG2ORr_#TQ>gv{6EKw?AYYM zLg7Ie@OQZCg!X*-sbn8JjH|I}^)N0-VS$y1s|p3<)zse%ORPD>hJ$bWg7i8o-4#hn?Af2erF?gS#`RhV@NXN7 zmg@k+-~_d+6Sz(;v_Dsfbyqm|<9x5fwzp&#M_|12u5;JN^6B93O;KY=W1MX9#|?U( zbzod_&v%CQC&+lH{z_Ih8#3%vjyY}31g+00k%9_XXYTruFV8Tqc7*RG^W3aG`gR0>$#YYOfws;bEUlgcJG; z62gy%cz(m*FM2A)ynw}G`9D(lso4Lg{FR6< zu=ylvs~drF4Z|+y?rvv5pn zU0qxyz-)9a#t`aI|S z*Pa5aqYD4Ne&Ae>b3Ui#?EkanT<_2Qob&#v<$RuVKIe1)w9VQ7Pp!3Tx~8E36SiGG zIwvrh0vg<#g`8K{KqKp9dB=Cuo9@pS=qyhpyp6^yhc09M@6d^gT5pWwV?C)f=SKZ` zo2cgKtOmmP`PmQkS`CEy^=`*CKDER~)VXM=Dkofa=va>=0a0+`CRJ!p5ZF^c7NpyK zf-k&xZ3E9W!zQ;qPU_dXfv<4++6DN2`SW{n)~%f1$r;z2=kRCy=lPuT5%qFzRlJMJ0_)+44>ABmAx~l#;?mle;uqo$Di`nD9kDIexxhXF4XgB|Q>!Dl{ z6TEak6dy!CTpHzKg@ia0;~0y-UBtTX6Q(+PMc0^+6Mp;s(PTPs{StOmPs9B0gRgRW z$LJ8a!^Cn{B(4u8kJmP19T;<=(?6N$AHY|Kiwyeb&JW#KHG(`^6RLIch7=1n&Dmg; z9D?>&t@qr-Fxr1M|LoGmIC1M4{WjV8Y`Apw>{AnB0P}JM*zEtLv(+nJ<5wv=2qVuU z*PI+00OMuWNA{wA?4SLJ&!FgH#{?VZ@I7XISdMxW?z0$Y()&RI!(X#ePlL%t`BX33 z2fV?2T^qtMu6K2f)b@hEU+2&3{?j(6-~Q9_f7<`wJ+GzYN1XYxE+Cv4w)>Z%zp0|j z+J9~(Sf5MZ^!4R?C{VJd^gW6t0zIGI7~O#Nf0>W3?78rT;Ge&2yLw#}QNBU!bTh`S zjby!T%+anQl;aiL#~CGrK4Uh;IVh8mVr-EpHjKnNQmR&qlFGpGxY&xi1DLO3^v1|< zM(^M3QvdY3{@l+Q_kZrc_DpV$>TA>uS-t<>;dN|qQ@(%M9OuhwN=((VRlVTmlo)S} z^WD7LKVl;#Suh9!^U`six>THrFG>d74))U>a>97E58YvR(H{Qws%eWY`kA+0p?=+= zK0OcNaEWt&Gf|hv#pZ)x^)U3v5yij%?;F<>byS&ta9Eoi|09A9*DQi$YB3MIvYl0C zq|ShxtCt)WeVZP4fBwEd@5i|>r}czu!cv{nubvXA#Mzq{e;IL~}smqg@7gx;UFhj8x8X&3OF@D?r_087#LEN!s?pdI@0N|Cl_v1%!jGe zn6rtds6W3j)#pPv`!d;kA#x7B>r9DmpiZZ~CS<+rziqNVmt4@X>|4x6l6Uxs?K zyN(C~I|c%cefC;heo4es+iOIg&m#;!kAHG_SVWWycn{3B`9vg`OEkVbQbe%seK&ph z5bM(KI(@KDCYI3h4b9dNTCiak|Hfolfpr6v|M*f?~K=km%v2)OV}?6ofo(%Q0i zd>CW^qa`5dm;wzJiYs3|L+u6;PfwLN^mB_POuYCu7yGM?rIQ*z12XcqYxVBN_*N@H z?ZsGciqql=S@#Xilw^Y$y%QtKdCNQ`{=>mmR(fHTv-#>JJ@!jZ; z*Ne2%h{L){nf@=vC6O~F_D8b)F|Pl2=SnHmhtuC?H7B*H4+bWwd*&DSfUD3c%K^Tl0Y_F=>? z!I1*9_i!gZ^rL{sWP^bV#!nv9zuYuoO@{@un2{Y_7*{Lfaz&z)0RndE0d>`Mc%UdB zASZ+MVdpe>4Y9hR^G8_O5;sE4@gtS*FyH-7 zN2s(^KX{~{ePF(;A1aJQ;tP(mz&~S3QVin+SOSBVuMhV_-P`-&1nSFK9F?=rvqJx1 zkkS0~XBa;zx?k`n*59&ElzQ|{iVY9XEKS&r`FqKw4yC0S$A7VJ_RW>4)Aj@Ll9XDb zH`5&Aa7AC5{j=jg#T{@sz&Xy@{-18wy&DZ!q{jf!Q>SbyWEpVWQ7o)*PB$dGRw!xn zv_Q@Ghpo1#cRm{RyLAx{fLO2SrL_AGL15L(&F7y)5L~{tv%@aECX%M~p3j{Bk`PGy za3_(+m&nMr{24Kt0F#2ezx(nPNR=sl+g&pt%@_Q(eM)VFl*Xhzt>{ zw$+2i$@L*MO%1@b7#En4)&f?BfuD5KaUHNUOuoFA3c+0MTYi6UhJ<#zzzY#I7=NyP zV5}eOT;7}HRhlXTi4<3Xji?V_6J2~=Pq`M%r*zj(ZEA*6mDd9cE_TA5?rY=Y*%Xj? zd|{ceK^vGUFU1l@jgY0DarYs!9(L5s2#ajRxc$+b34@s}(EHJgd;DiB%vqCsaw@(F z)FfW!ZcAtYuZ4Q9vke+SdfSE-$pqj^q= z=pVn7U`6|ZdX9ru4@M~1s*TZ@ZtgS)-Ys-c591XMT0|#sFJyqj74!c7dK&zcn}W8f zZZH~7uQJDRzEp72nIl6?7-MA3_d`7vns;E%7XvzgwvL$dd#uyoTNKOErozfip~)*} zqn=G)L(4=u6Rxf0vMyG_`S79>Uychh;NpF1-c5|luo`-AG=cervwNJ9_p5b+8~>#T zr*%+oX63J3iI#R~QCU}0cDxgW&6aCCMgNN2e9a~%>JJF{DbMz(s)okT9T(3Tqpnuu zP-t6G1?rEFe;s~}`B&@qF4N!Jf;t`RPJj7Dfg3Wn{L@r14*SyGb4Lo%|GB_6Z6t>V zqdlteHRumzZB}q|+{J*Y(eG<+3G{;DdSYfA>R`pn>FikYlmV*GyZwJ+T=PImaoUb( z8Z0-sFD@b=SCzeHftcAgS0^ntG~Qmc%a(5d zc+-r;j?JCE59j-RPUU4@zl!-xCDCk+w^%Ps@hN?*68*6=68k^=paNs(Ic^(_L$^Q7 zFnv{k^-~T}s!%z&ih!D0*adO)kJH#4>a{mz;Ww>FPr z{@&C18MeH17>>IhqV*E%rB?qi%E15AgB|-VHiizWnbgdv6Fs2uDrs&d#+kV7Pwr7D zKzpmm)F+Au1HvDDF+YIyf0r7wZwBq}fX+a#+a2ajVDyXTwDoEYXi+vkw&N=Y!?@f% z59gpBglGxsg?$0#+a6Xm?NyMcq2YY@SRJsBg>4xgYy_JR<5Nm{EfC;zM0VhAD{v1e z2ESO-2EKtxPGOiwV`NVhuQs9rpLzqW=Q#sP*QhTn+dn-Y=h!VWx2?#C;6?w0fUnI^ z2m`Jjd!c<{1?Kawonv>UiUy$yI_#86IxG)esH|;61GmTfhWAovAbU?(CS^Vo`t0^O zxZ?cM<%zvn;uf4|`n{iW+KzS4#KR`1FyGEz=jHF`XlEbEiR1bpG5vpM)3e8=(SP23 zOhWdDTR)6{>KP4kWkPTI(WGtYe@~AXNXq(!dDpS?e^%l8ee87NFN^yFVE^=|k2Bh@ z5AQs%)cy>{zs@a>OuC9#;~3YMWOZIX z9XVPVJ{Emov>j3+l*GIjc7h4DfsfAL4f3P9d)Mw}K(^?>YIBobuzW3ZLKxr2NjJiD zPGP+9EN#;}bftc9R*~vf_=^6^uTOpMr%vlWar*I}E$4Z2#@$8_y2JOK)8}#4Q=IY1 z`Mhz@a=}`87Vt5f`iuMjUSIyt`*6|MNDh!3AUQyC zfaCzl0g?kG2S^T(93VMBa)9Ij$pMlBBnL|MNDh!3AUQyCfaCzl0g?kG2S^T(93VMBa)9Ij$pMlBBnL|MNDh!3AUQyCfaCzl0g?kG2S^T(93VMBa)9Ij z$pMlBBnL|MNDh!3AUQyCfaCzl z0g?kG2S^T(93VMBa)9Ij$pMlBBnL|MNDh!3AUQyCfaCzl0g?kG2S^T(93VMBa)9Ij$pMlBBnL|MNDh!3AUQyCfaCzl0g?kG2S^T(93VMBa)9Ij$pMlB zBnL|MNDh!3AUQyCfaCzl0g?kG z2S^T(93VMBa)9Ij$pMlBBnL|M zNDh!3AUQyCfaCzl0g?kG2S^T(93VMBa)9Ij$pMlBBnL|MNDh!3AUQyCfaCzl0g?kG2S^T(93VMBa)9Ij$pMlBBnSSd HIq<&#U!gAx diff --git a/sample1_2.npy b/sample1_2.npy deleted file mode 100644 index 10e4025517b4d898fd37457226e4073d18b7f9bc..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 131200 zcmeF3c{G+?8|XDqL>e>~DMCeq20~jjNK_);fQj`}yO!);;a}zW4Ck*R=14dk-4#KRiLiQ^ZHf z?xee|o6?r0O4{}sN~@PD*}J%TxY;;cySUk%{MUXnxAQ0QZg)qUb0_hp^19Va*R9w1 z>rZ9%(#uQ#H+~W#fBpJ_kpHRyq8^BTK=cD*91t8JI6!cK-~hn^f&&Bx2o4Y&AUHs9 zfZzbZ0fGYr2M7)j93VJAaDdi^PzO$Nt%X$3nB{&wSN?|;d;gDz#4BBT#&G(u&1$LB##+!kirBJvC9R~ zi42fxElsQUq{Gvd(e%@Y=^&ZCAiTSq0fxnMu1WGZ|GBU5{YUZoSNx)n-G` zN+V^LGfb$xCi#doKnHX8^5V^^3`mill6q(<3%=hrdEAi6g?sM1-8L7Fw~z2Sg!K}1 zrJJAaz=3=3YF;lY^(81}lZOp@XHtf67H1#OrgRtI(?=9RG%2SWe44GxJ z;X*vTpES&XsWNXWgKjaw=|H1_Xg3GQSN+oy=>j0}MP|LD^C2dF(ab~v8$$LLi`yPz z!mQ~FE@~*Uz%j&qM~gA=FlnW5DwICvpKilWxW3g%bQ%LbEM}N7ovCFzNgy30{>qh z#!i;AApdOri6V#b`wQQ1lh*CN$xHcgchFn4ZVv~t18pjGS99R{!J4?ieH>^xN#{9e zaKYL)KBB>l2QDM7_t^0~*wFAou-=vr37Pd%53hfVfaDdo%KZrk z<{ip$pcQgJp=jGyBRw7jj9U9_*u;bHE0Y|fdvHDb%719y!-BEV*Dgo6Y*2`t{^LYC z7yQdCw7rvfu*6~crmYz~@R{;q*S0l$Fxf#r`OZTC+ZsMxW>-*PU5w`H{KGU@Y`Kl= zs>Fnm#Tx5vFb9O~E37A>9R2IS__!B7xA3^|I-wNq7rv)(JMrhcx;2IzIQi_xF_S9X zKTY>O(tk#Skl*5A>$9m)KJCtp|3zEnCm`%KBOP-a8@$*r2<4F2Exg|OT| zw(*i*VCvinx6WpNl>$5rA_^a!Fzv52D%}-gTeg0{8u#Rh=We z|Mc@amu$6%+VK9VYnIM?i`%zJRc>}P5B_|=Kkv&jFVs(4#)Rb|ySBL&_rVd-OIpLe zWGI+_-$Xl&0;->+{0xSeFmq%i<0^T)KFh8q=$cL8!<$`tqsE%!{ZCl_pY?d3ZGvnrF7S5ecZwYw_R@HAruCv951#%Y)Xgl z%b(I8>M`JWPQSe2C=*VzJ8x=p&hg=_>F6`7dM-p6FP(gR77y;~9O-xt&2zF_z;;}-@2hg0QJFV4CPw+Ajzt~UN%JlyQ|rY-|9v99{(gsjSeDVCj^kWbn@6ILmp&hR$a{e&4Ky4PbJLUIUw@> z%zn~i%-PdAo&HnC+vCsj!jwo3Mpt`)xl5|8OOgR2lFX*NL2UOJpSza&sAe?`O|LTYW82Yyq?iaqVkP}|- zW!EhA6+l$)t>lb&7M$W)$(uZ+!R9&c7KP1yu&KDr@vI#lCu{ZIF%`+6V(~V#sgn#H zU*-g-zM%kp-(*p9PZ}6l4w>9eqyts-+a78%9hOfCER^Y{K~7T3v{}<=AgvvKMZJ;= zw0@dpiYN_=Qf2!4A(5*=<+I zV3hVaG@eI>nuubwxGJru1EQ2R$Q877;P#51 zR<*7RRlH4?$`Lqwx)@qnNdPIkv zjE9TA{$hhghSkoY7i_q>Yqars2^~C_opIP7O@&O88t-}y8Z23F;@IZQgvKTb_36KP zFuI7NMLsBi!AgnC2h!QF;>Q4mz?x=)fXHPNACx#ERaNp!GS%PZE`WWcB@bIY7< zOpyCX?(;F`z@PJmkXvpIvY$6c@*(lO%W(VID`8 z+bvka0O{WbN-we)5bAAT8r{GI)vwB90W&#J(H%WwY8eld2g*aP8*pLt@%tn6QU(P5 z=qR3bodzRXVjm02XmDhagei3o6W04qJd%RPUDHXwwZl(%;2+Vopd?NJ!u$U@?j8?r zkkYRcKw74ptfoC5R7{IkUfL*tx$`fi86FbA=B!{NpN)JNx*axf@eKzgw)a@X53wLf z=Y$pNW`VTQ>`luP*)UuE_*{+=@3t~DIPcEDf1e$d&)GMLK8 zcFqQmrJL8%;mX!@Fn!*V59YmaRyTLT zuq+v(XY-6?H8Ce+1zj_F9Z)jM;zz4P3s8^meeLSh2>MB4rq|~;LIumtW^`c-7{7^R zUHaGsPmK0v?!18eHKn&-1JPje{ff_93n^gQDeAsdjsi_@JdXvxqJnf8`+8Ca6?Ri& zqDHa4z;*9xu#P0bq}@7O8XWq-@v&)g?lu~9rBy208{&RAl;*JiGYt+!JR9@F<$vj^ z*>-NY2PWtpKQCX?2?^C>JB#Fd!0@Spr?F`t#9X&srbMFxXAynUHE}95hn@a>eJ2%O z-;@vQeL{!D^KQs)BC}!Ts)*M~-aI(;$$KOxfeS0IPR_4<$cCj4UcNqY3+olGU*9Ec zWIRGGid*#N_t@^*I-J+;jO~ON-wJ9H15T1&GeoyBK*Z^Mh(ZbG%i~WnNQDV^vMtss zzGp()mW5%K2RSfowdz*wB>@-@h0@QT5rADr{};U{d@ys+F6h}c-p_^YE|d}*{hf3K zd}uObgp>67V5v3b#7fMc<+n_-tE2d!mA$}hY6uTj`zg*&UCe{H=y54`4qSSyaY{$J2Zppi#(NKSf|JOST=S6*kg1uIb;Ga|W@(7~ z%FA}cy3<-E>GWRUgvDMwD%JOm%4K2_!F9ZZFv-wZzW+4li5@IJBQHW~U3P&+?Yk)dbx zgH?m2eNekNIeUID70PDseY>WJ4mTTQ=9S)Iz*)J&+fTe^fXrAmd5$>~dVPBB8j`U+ zdCjnDM(X(UJayi=SsCkr%*=u-Bsv>@RWbKp(_=x>Y$^LQK1?vT%Ux#Y!GPgoneXT2 zFyO$dy>k*`F=zhz{dYgn{J=N!MqCB3UZ;2ypT>j8ps2oZ2R>{UQ{~(%<$&+ixPz+L z&dpqLz)CEU3tF6{M#*75%%|D!N_Gu=ierACFnKkT6LpB^`q?n#! zvmh-#*7GB_qvP##78@J0z`$T|>$dfH-s|>KGsJdkmgbY+%g(YPYZ3opyDl4!2c(+4 zsbYcLjEuWa;uxT2dv$3|2OVTuqLc5xrbE4fX3{rZCOlxA^RZVNw`Z(uERP3Zu6kYM z9k$8D<2!FwPT6f5tcksn@F9r`URIYPT&w6{J+fYOVgv`ezQ?xpwQzu~xbjZ?7dl*y z&KN7KphA#?@x&0^Ut#*q{aW30_-WUo;)mzWshPbGcnm7sO$t`MBT0uw`K-3ZH|Vf8 zs8n8jA_G3juh}E}jtXxJwW6w{DR6T|q0IEdRA614WkKzt!RYLVMnxH{|K)ZOwx5-w zKQmib05?7F%=`9S06N2MuhNe2K&Ey`>m%-m-~GG0nxtrO>vkn|eme!yroBlxyqgA2 z15?mp77H%kseiF)lndWqdpDX@W3GDgIhST^3*NaW=Rgbku0}%{)qis)Oo5BUbr7l%6Vzx$cB;^U9l}k z*bqI*?Te@*13Y6U4+e7jpsVy%wwgT&!o4rZF$E+@eg5^#!MhX)c`92xv6cq*t=#o# zwhY|v@$kvxMUTgLwY5K3{)K6YHbD)_Ib(+MxBVtfQA$Gd$(CQ>kYf!PF(z zuD_`PL~HDl)Y2MYlH#0M=4Q?CE9_%=ML;KcfC~ld@wFpip=@|#8!u-f#)M55 z%_jcr!S%dkS~tC6d^~cq6z)G=z=bvEl`HxcSTK3x%=O$BCdfbkRQL+}_k6AvYBwKX zLe!nYyGz3uaP#`z{c|=jplhx|d({*sNIrXUZny>4|6hul;6L{MpS%+GpS?#GhlH-g z`eE&SzU5ISXirm}PJPUP#WQlh%*&vI{xk1Dmoyrv&zM*_)0_sV%reYmI~ACA{I7v^ zBsgH`TO-Knf_)RF{5oIP2|=fx1*hP7d1y|)s^!8yc(FfEYJWK$a?f?hzGpKa@X=L+ zMga}Z9$vIMX|NAu?-uEDfCN$MzwHiV{XWTaW>wsbK5+ihib4OYw-ed73r4R-D{ zSuqY5g11!3MK^K5SKFwvMVkXoE6*$~YGQzG@|tB@4{*OJ>fWOnN`s7?4J#|IF~A^w z?UB8o+2FtYht!fieBdwQ7V|9xFzJ>_|GYfxSG)N93|oZ-wH+n}k~u86WHRv(4fDsT zy}ztr9}BGHZ&W?=qCnX}(eE8+NHF+p{@sxKBzV5bTI-0Mf^-9}(E! zdB{{XSrT(D_SUsA*V}ZU$C4CQKcGXzByB50T?R~uIHG^B7$3hHeJV(i0a_MIhieTO z(9l1dCAEYF@)dSv7{$`0Y zk%3sh$a9jM`h^Db)@t6_G)4yv!4$JD?2nZ;l&UWhXT#2owW6>KJt^;TcEg^}1lEqcP?)y5GnxVGK@RbxKxU-&2ezr)xkl8*gvHTFNR z4>xka?n$(dAGX8X7GGD;-N%O5%I2hRV@wb$_2-(JVZEu>-+8})0*`drBqwagomx=n zaaoH3)9D2zR&`W}8;)pMgzfWvZ9Z#TuQK82od()ENiJ0DQoU;L@}S70LT`d52Sj`J zs&odjz)6y>d;Tcq%=Q+~$G%j!kQ~_N6^+NQe|WNiV-H+SD`)p4cfh8}%u%NaZD8Fsf|5vr7H zU_*2VG`TvDe^d;F#Pq~4zX`E++QZ{ z*yBP4U61UVb-4bu4#${}38>I@GBMwI4GroqeQne^%7CjTg_ds3-2d?DPfiK#D50N4 zD24nIO5t@vjtQl3zwkU^c{0j%x`x=lewkI3@n$~@LYjxC&OCwrDt>_p$rGty_{4^- z?oNYk88p3M%-O8k+U8vcar}a%*#4n`4mCc%NmqTSFlkppj9)VDU*0~$A1^Z?t{M2} z=5t{1L`14Il>^MFw{J+lotyf)2`w8 zlyLu_@vN(3=hyFig#D~F2d*xh#fOa9nHHyyjn4zZ`myG;R=Lj+z^2W|X3u=agVU-l zx2&wkpHo<$@=r-wL(15$XxaO=2DjS*=NZF8#XK<8vz=vt<0hM%YUVx+A1_~co$x-w zZQ=6@xBry(8CQk2BQ zUOl&qJ*f|ZwX-Y@FOxyzR$Wd`XCJ&V@#!7arh%Ba;YljiKO)Euf_uyBAj^HhgTd(^ zfUNkqn!hFuSuyV^ZF*jgiYA?xZ!u^@R^{DB9;t0;)XVhfv9(=DeWIe$ zbZjr;evmUyIo6BfTzSVF=k_4_Gv7=7SGrK^14ogB+wCa&#K@wyq81c4cf+)jC59xqXWbM4v!=*^B`Em5Lo7pJ%>E_NO51%1>ORMl2xA#B})*sT^RRBTDX@<}C zmV>XpO3w6|wZM|TTC3|^2LrEW$H%;`1?%!JUu#tBAYg%Ma@*2IuoV<>*>5^vaiPn; zpT*4(kaH~7COR2>_!WtoBiX1rLp(j@TRF1w;K+#xs*$}*(bIN*B?=z?`sBWS3F5B2 zi*=t2%&oPfd(H>L(6;8aKWy@Vn`g+~)QZ`3F>h-8@m6^EIH`h& zPyPq7o;>%_Sni`;m?sJcqi)`Bh9KMYwRbyfKxc=8zyA43ShzQ1*`3+75c;jSCT&e`YfInQHLCeMZL##(XYpKJ`5?Y9HGhxS*oLwzguNC%VVL5+F3CXCY${Yb4(38^m!sElJMEk&*FXQ%LCzQhH5^n#K6rLxP!t?B1 z)I2TbyQ>elw61J-xSALfudpnBTvD*rmMu2@^%%V)S&y z^D?EyS;#8w)?KsNNyw+`mbJ$BU_|b}awbdcEwmnbNa1T2KwM3&Wl>lyOr6mfDSE#R z(q%r4jhK@`K4JLax=|X)*v?IzfXEQa-+GF%q6w z+1J;_c0jbEHdno_4J@d4ic2agplBm_a-JkZbZWpynK#9tV_w51x<;yL>9T6GX&e16bgwh_{PTYl@&Z-cMTb#^~-@4@AyXQ$`( zfqqNb%O5QiXnCt29-N8onEM{9a;g{ZynFLcdmww;~u4yCpa!>P@cZ?74|P?{93Q_WNe(zqoyu^>=+kc)jp>gxf+X z+%J4Tp&s&2`-Rv2(|N*j{=7bN&wIv)yuWisc>aHrf0ida|Ihu`^Vj@%<p&*M#>wdTLomP@HBKXiDEQ&m z%SoAyNd0$j_cyanhp zmAUgN`#!a!lF&_+)vk3Yy5KNZDm@D|T-qDq-CF`}=f9Xz_V<8Ct-G!J8xl)N$k^6%4$7{oWqPfj{3*_}1sYK%Pb|K(9WH6x0X_n*232x>8(^mAF?-I zI-QDQjx}gS`jAm|OvTv2(spF8wRl!zQaRFTZ?Lwj%7BK?(_bCN^<}@Eb8=3=c>DF{ zZ!U?TabeC?MW3xX6i|EbnD)E45x6!h<-QwcgSu^Y!s*Aikvb*f^h2XWl(o}V@gd_G zDq35*T610otlJ@JZcVR;`Y;7;*#~4O8OYmXUB!g-z)ef?U2wm)J{{CBfdhr#N>)4? z!*Rq>g=LpMx53@3FDqWPG(yaV?Om(xHN%F~yy@!kJs?WC>z><$^Pc|t^~25kRR%7T zusv>6IBi2?FDyJ-)<=8P3_Gq$ylpUTgq1QD)1UEMK~;Ke!E;A4JUD&&+oeh-c#Uj5 z%9CM3x5eq-gE)R$ta3ag0_Qbs4!n^UYflA!a?h)-mvmS?s69LP6ceUCTk6X&!1_YP zYH6>v90*%uzgQ02+fu9g#ShicVfNwa()k8-$UhWJf1OT;?7>QnWC<3y?~wIgr9aLM zVLN|1eOj|HNdQ~aUFLLTaAB|c$E)|SpPO>Wojr!*a3S$#OWd(vzwy=E>h7NcsQP5( z-Th4fk|)IUd&LCsyx(u}BFu@jnd0&dr?Gy1OgH;ypa7)ij7*6r=0FLt(RDt(|NQ+f2p8WM=mR?bkb>V_0n`XtVtE*LuOR~37< z9Yjr97G6GG20k4&voz)3BK7F)qa}AMkm+;rkO|BdG&E}|Ni?S$^}1^3CoUwTxDazA zDTO|iIBL#F{z^vNs+d`~u9A?G)2{g|o^>L!v%1Ru;09#iD(}AMcP27e?=F(FAse<^ z)txTpwu4By(`Ta-T~MGfdCJ=@T~OOu82v+v1grBndRadykhtHm>{2}w!oR<@d@J?$ z`ghT_3n_fWgQQsX@b3ZG-?2saMO_rm8`yQk=gVjVkVnE+>N8AGij%Ce_56A?+8w=r z&y{Xu*B5gpzON6htFM&g%F~e9Xw$N3$7m=hVpgAPJOy1oclWu-P7*S9ly+IXz6JR_ zNTAK=!5mz9QR*P)BMh8-6n-(R4U9E{CWll}LH-GQfz%NkZvoB9C2AZHcle^P-3R-_ zke&J|(K_H-pPj3IGYwP}Jp8IJha&Z{pH~=`QOI>2<>=so7vMVo)1_@Ui{V!p%j65S z8IE)r#H79>!?WiS3!6%)5aUpPU`Z(rkES`eT`zHqNV9B7d;$Z4*e_J(8!s#r_r2 zE}fp-OdxAsU$HMT2O^Z}!h4G=fqVR&`j)zOFi>&ZGU7)C_c=S`%wN!9-tt2~!cA~K z#TipNZQ4Nif>_S zvmXdxr2Ciqa10N2zE!N+KamSFrITM8z2iVc>{0%$ChULgNsYn8frhe0y#g_Caag`9ROC9FeKFz322J*o))4j(EkWE31 zXDAs3ZLpfVNDJ%x;#apP6p@kH0uj~v%~&ru zCu`;4-HC#`cOUF%ZA4{G>%7cVOOR`Czs>6YNTfFG{N?tNVz8L$)xRvQ5q^4kO&i+L z4JOg^<-JI<9aykuE zQa4N*;8%mh=I~vcey5`dDcAb=$Y#V}QZ{Xt9SK3{kqn)8>%foPc&1)cCl># zjX)fqwD52U_V&i{ZL6z!LwLNc^x0&jp@{jmd_v39oN_Qo{WjusG#y+uFXydT909EX zS?(^s-$BIkxnJkMDuRJcR_;HQo55p=mihM2ebAdwQoQ657YyZ%r#z!)KJ zFm+=UadBL(M$K1R=~Fj!7a88rd)@}>Nd>Dut!sy`wRcPAAMJ(g1DZOqi)f%wRp&d5 zL&06R7JKT<(;526CUH7xs0>!BA)Y<@6_|Fz?fk zAG+S{z`wnHu;wQfUKq@v7VTxi-dl`=RabD_+23dMODPjBF8u62%bX4C{*mjw&R@LqI4>dSR9C|qm(T`<%Ltrps&TCzzH6sNXw za$+%3V<}#!@NPsp(H+ABJ3G;Llg9^EDU(p}v35Dezd++n1=nA zFXWP<&GSJ)X5J1_QZdTf@k=LvUpsQHn@^XxPey46&Pa#rQqe-s1kT4UDzek83{vdhLVZf8EwU9b_rf%{=oe%tyY*@$_!$ex*&3@! zbMShK(11rg93TF5t0I5DJ_Gn@>CWvDBv3gJ>8v@o8CoqzYA486gO$nUn2muI5IRt( zz2~Vj@r~x-yqfxj zCzgke^+CAIy>Isp;(Y$@$m{D4kYLk5^TRioo6|?1J`()Gc^a2Xemur`yS{-fiWxRE zV8{2Yl$7oOk(+YjXHzP`p35>EvHu9s5B9B1RQ&)tX`)AL3Jbx8EdEhlsvhQ}69+cZ zJ7Dq|S@z)~G6dV}fBk4hg#&X%M~r4*ec$%mV&CUfNSZ+E&J(A@WcjcCjVU;uN1146 z9FOx3XQh#svKSC^e&?L4+t{F0^)0~^w-373P*64r=iQKlPV#VGm&|(07TPTa*oCee zdcTRw3b*~Eui^v7?UaQ%( zm3f_VVO&2D-v7TzAs_zRdH=VLD+}h#Z~H<9e%#rtD0`fDxn;<-0_UIpT0J>QO``!M z?pZ4N-phhuUqO$*MjA3W>ub2^TMd#Hw+&nLxdq9bGA^i;>qcD;KLZX2lF*J;+19tm z$S7JhewOwq3E8AwexOj@gCwY&wti|G@^t;Krs#(4{q)6>vjgK%z>=xXyl<6|aWOBl z|9&r!PM^J^?D6;Y8N&Ddvwr@G`@(tUg1pk*GG>0f*VN3{8+wt zRUI<+Skc}1up3?85j!DrxDQQ&D;`;kX{dynr@qUahEBB_ZS{zvplB<@tb=MKlxXv% z_Ct0H3VQuUY0j4-lvTD=tvybM`Lj>7oa?89Li_i^yO*fI z^;K(8jw8bs^W;k(t-HX<=U3^B;Z``BP}=Jj*$Udm7L>HiPDuH_{@1QI*nZ#BsT_{; zb9U&%O!osEDA)7|J=n&9$U9%oY}|wMh6Y^>qipzKaB#n7!V)G-xiq$G#z~yNtReOD zTwf=osdwr|ZRv#8>Xwbyx_TixFy?x&BNe{?o|v=Z1kRVz-R#`El@41@UO0Qku^YHc zY+s+RX#@-ByNeu6>VdBP)px`DX2|Gr&h7~5h25ejU2PBc!I-A;+F2`lpf>x?M6-%E zP%e0Fl+w@&y5@ngzUw+5AU?2c*H#kjJgcAZuA2f;v1{hu?V$nvhE=h`F&gBvGMV2e z&|ud2Lz0zRG~ixvHju)36Z}^1*EwvfZn(x^aKJ5WBPH023&)oq_AO85L)HM# zPeNV*0(()3%!A{4wy+;{8x?&?z9E1_wSq(0(|NG@q6U3E_BS~0>KXiffB}b;wcRd? zv%oET&y5?A<9Wrx{#c;5Z~aZ2Ke&9iX_Uh!F0dOG(vSFXfq8O4oHh%u7tz_?^$^?p zr&H{&%uOEG7lhaUDTU7^l>hCx@Vr0epV$4<_E4L`KG|y>aLT`SRi;@F3>wS~fBysb z8@jGT>|C6$^Da$nra~SbH(|CjRZ@_F`7AG4*;*u*>-olVdMn~aPF2=F*^NSN{f%T^ zl2Fw1tNh(($f&pL)KnWuGBP%e{BdGoFOoSqZIOFUJM#3p;5f;u0SyIzG6|ZThl0mS zr{2o_041qDhAWTP!yGf;g_j=m!9&-TG1|BvOn%U68@&y;i(2A=**pHuQ{j6H-!JjT zRpl3pIB;N=_qEDPxsgQm*{t?NW8yUn-TE-pu+Vllh) z+w&nQiV~D(+72UYn{F78$sj59waJ`^^T_^sFu{NHULlVne%`599%h1H$EmDIEhKm- zt7FuJ^ZP>|u8vu`6~Fh^4c3~*P>hE?~y;R|m?9Mj0r7k92o7V|7?z`Vx^pj!w<68YqIFIIf@UNG-`ApC| z_Ud5uB@))>JT%>Wo1jbX&? zoEoyH-2H^}G#&5m;h1+r(2Kdl7vHu);s?ty1(hzarQV+9nM48gHPAMO_4qG74ZAN* z7_Wzais%0m$A#@{{jB6w2F^D>`|!A#FZKt}l|~)C+BwiD8#BB?Z+yPc*%tXA3ftFT z{d-sDVEg>Yy}))EIRVh`(3tjxe0XT%FUzsO>lmuq`?X%-@v7AIv8PxJ@HR0x#018;+NGuJm4LQyt#VXcz+Z=zwmkQ z&8N=&RgL|!0n^0tYT1x}Jms~~2@XV0;jP!&JAR$8eH#y{nMwXnaYbP{E<;I;`fUP; znH?uH|9|o)3$Ob>m8z-d40n;*L03O;)3N?;a9!vkK6<(jDB?~Z&F|oNwDan$a(Xq$ zh?;bf*%jD7Y`Yv#140ORZs^V|Jm$mg5hf`jSxf`W(^kr?8*jlvI6a zJsA~MPRwg*?nMfsk~>GWIuZTu-p5NOH=|ZZ{F@BPg`LI+^Yb^PWkQU_u3tOZ6oIMR1l~v6BS0 zx=XaC@9PFBn(@Ks5^ca*M%bT%3#KM0d1p+j<7s8!i6wV{HzIa6RJm^(&$5 zj)v60)O?VMJXV>h^ad4KO)mQNvk2*Hn4eL#t3;tM3le?|RG{FbzIk8ui;(Huf(>Oq z-yn(SYqnnT{|Hp2@*x9*dI;4UjwUDcz+2zLfxbBZy6Csa;`t}LK}F9stg*ZaSZ5xD zjcscHt!Fz7J~}r;psTMs^F%jzT)*OUeH&i)8&?=5ZNz|Q=Q2J79-u(3$2&JcQVRrS zfAv?guYh2^+ZM0*#h|t`t9IqeDlnN_R;9Y76<%(4RvMs@psd|Ry$#PM$}>@NmOdVEOp2OMYp(f7d_^VjpjFwHHG1i4 zs)X}+Zf-T-*NgM}gHC0v5NE>5uymyf-{=sPOX2rpe@68gNyW@;HcY+7rjfRDU`e9e zxfY!FfA3=E`88b}*g(JHw|%kzF06Xa5z`Pr0ncu2nlul@i*#44#CemU2EFZv&f|PM zRk^%f&$v)}NHw5H?e84;^ZEYV{-^hdaNSt=^*6q+y()_C5`cui;l}NFUA2z#tVSm< zK8U7STdF+&d%Z}CIR@9^b;HHiS?9bvs6e9lGB&Gr!v&3}$^85EFhnZra(eI)5^pVd zvF}Y98uHsvI}%fiT90?xWG`t&XSwE#^>zuH@A9C@?Q&}>y@LjdrC6%%{tC{1{$n(Soq)}3t5+)P>s}MBWvklTK^^%Dv?OpYWjkXEIkdLx@7ku zn=&n>p`s2H)Z(hnvadu%NbpId{1xbn=^GR|RYHkWNQV(YN3ud(KZ3_*Yg_NSaY_HNT;+a53ozkO2kX)Va*?R$3o^+yQ! z6?Nl#MFJ%5RJ0$ylmkymi;Xv#)xlYHH85J%4Z%E=6ra!la-pXb3p-Q5^hzS@;lx9j!sUS}{of34Sc9c)kd+u3s>^q4?dbZ663 zTl^h^+f~w2t}vlH-k++6zeiBx*I1I^$%UK76=P)T_;?hKYoQ+TrxbEPc-}v43-=4< zpXUqr3#G+<+vuCKnc!dLa``ls0oA?sEAC+b&3EOz_XoGNg4O+nSwlDTLA_g6)N@fP zataJeDXptTK6eXB^ES7l>NQIIu=(96?Y?SIm^ulS7<2RLKakM(tmP%aQ?R|fTg|}x zR2OnxI97Y#v;|d9TB_r*3;X@vO3yiaGZKwP>31;5Wnl1DX3ejhc2J#v_w-I52Gkqx ztUO8{=i|bA=`WAQ2=KbU?5nzTE~wI(_N^<&j|<=DPdQ?1`m^4H3gjKzs*l|-ho^~@ zOHo=SC_wzu=Lsu2k#G0)!}&!N6f6NlWkC#N@j%ApiW&e&Gy#JvvI6 zJD+qV65BH!Kc;dOT9A6L_q;c2k`ev#Pkkf5T8L*Ck2*!`pLl4a_g4IV4#HLeQz;ZR3`))xmIDL8cs`((! zc|NE+xlS5%?Yd4TR#w=RgW4l;Zpr0Js9wMxOUP{m%08_Fm7|@I zd#}Jgd_ynLD$m?lQQr;k4N~|&jC&yc_w2?glpe5&b55n;^{m_-TOt*3e{6hdEER#r z-O*)-PkhAR75KB?3dgOG1AiVDZo7WkVDm174^J<>j;(CP>%a0F^tKQ4z}Pxq(?vW! z)MljYjWy@N0gdPjkMMo0N)4xdz~3t{u5q)p#eULlFOT4_t&l;@XRJemBZZOYB5L0mtSac-4%GesW5FcvYg-lk-j=^?r>S z7MjeS+F1Y6k0@y(=kbR-PSPm1m&uA~}7@=1dgkC()$r&p@t$ zk6sx@Qc+jCbMHKyx12aFI#Fd>BT_M^4Mtpw!*Xu;^JDM{!dt9C&%fR!OWK zR5DyVV#i{^)BO2%U(ZHlxb50~I7mSnccRw$o3W8k`guC-HXr@F9np%|SeyGP0%Rk3 zp;Y-Z7g;7=4*2nufz+p$`DM!DyhH!7{9W;tNNtVloogQoAbYOfabr!qF2m=Iv956m zSWR1-tsWeT*EwIi^_Kh?JuP2Sopd+?p6amQUmGlekw>3f?C-ZjV@lKW6c-W{ot*ec z-Le{jPh_i9sjWWcPfOPnoZ4zdxZ$F}3(|&n>W1PnsK67`!bq&NNR;KvA zZ-mi1r(WOOi}O;=BwP9(;PoeoU)#L08{yg16<_DOwu0Nx^&9yjcpX$nq0}?HPI{m_ z{rkdLGPI=>OrNMl1?2f0e~ItbcaF5&pR42rg_U!bU70Ieqs#9tg5Yd z6V5})?+|Ua!QVHzqL&zMQb99OmPi->3Ab$(e^ zsN8NB{(j5GaEmMdTIbVLskbSQ2lmN=fD03OQ1y^fCa&uwpocv?7fk6fj>$SiLJ>vTu>q5NkzDqGl-n$v?-P1V zaX7CD>Dc+7-BeVDWSk2*-n);Y2J;T7TN|q3!kq@Gp3Pk#F(qK8V~S#=GUJ0$l0l2I{jV5YZ)BBxHA?*vFJv3SL>hH zN5}gAL(QeOR%Fat7*n@66Myp`+Ea-n}p8QIP#tv0;z89u)2Cq1LdW5&7s< z&Tj5bLDKukDkE>pV4!t@GFOHS)2u~GZYoj0;LBm}6!9{)~NwiERYP(m!hK9 zh=lN$PdG@lH{PS-r{F*1U&sNU+^&;>4Lr13X3{k72_~8v^)|ou83~CTFUd-NUWL>? z)K(ry%7@k-o5G^Ba)|Edj!b-f3h7vPw8(Pv(bGn+9HopBr2fNHRIw`$sTUm&v{Up4 z(~jV_meW-*q?hrsGPDh>0z><(Pn1GYeNN_N-M7d>e5T^;BPFP{^+~n8dKr@FP}rl$ z$VDR5opa;f;PyWfqk15`78+8`uPW!2gZ&AkxY_65fygX9pOy z5WI?!rBm4kVcSioEj>+wGvDY1wmSHG70&i1qKrOBQTTF}jq@fXoXt*g0E)sn5LEr-7c*Yq*i5r1E$!TiMcU1B(2V%3=DR{Z^tvSZ?# zCVk_9MG7zBIrc+DYn4i@En`BO(=x^)23}`MjlSNQjN_|K)t_d)!hR0-MSI0@e0XKg z?X0N_v48h!)B$&o5)esz9dDqUfIFC9a`Q-MZ zEKQMTHxUVSou3##$m~I`%2CCSXLq9L-M3%m@S2cm%+A*(AIp(kisk-6@4@W2Zb)$i?y;CC|Q;@`myD=Th>1f5tr;MT7bVRp# zCn?!QMIrgeef@{9Uz1MnY^OIPl_&a+kIrQw9seJjryZ__WIxK}hZ%jaZ|C>BX;)~V zXld4=;ETWSBU!%l{O4zgyNvWdNPF{WuGjDXTc(7_kR%b243Qy`beWQpv7$m!5}}eZ zBxP)n3=vV0%rejO#@jr58{X!5_C_K^!+m}4wfxS#K4*Q_ec!*|`R7{i^FF7Qm)E|o zYw!JhKDO1IN)PHk!|aZ^KesakB#UmmA7^Ppt1ABIsd9;kE9pku zv(Y9b-Ny4SLoFXgc59EgF_nScr?E+6oY%?)DcgN{kN=#bsyyB)Sx8{#{d`m*R(t5J}=a{20Nyk84b)UN8|MoYnIEKyoFJm_?dqrvoA8q?T&)moPL! zZ*lee(@&r{+y22hJsU{+gz1KETxWT%tJONyjqG;ou2i)QB4NvSLrwVuDBntfW&8dv zls^-{GRi|h!eUXh3M(%me8)1|m2Dq@tfemLuUZJ+A1Tcq3^jwX1zXW;?^Z}me;w&G z(E!POO;4pBwZZ1WYZty4_kyYOmu%v}Vf_9+7=1lI4CXb~*DJj-zmR|CQ5$&}S_-?2 zX`c=P$#c?k`C1>C8rZsT!}^Q$zuwnayxPF-?0|x>lW#*e0@lNPz^aANjTB_`AT+ zlOgP0UKbn>c%9(s*A0>q2HvGa%yG(nK`){=1}jriMW-I&`hXd`gLKS4^@*)G-+G_3 zKSoaMH^SVWlJtWqpZ}>BjPW$wp0Qs9|C*eMOJewZe^y7lgXSPrx@=H_Gz)eTr{el> z{0y8BW6r=jSnq;-S*aJ;?|UQoe2DlIj0BCBI+Dl1Vz1+jsU7y0w`z;g(T+oiuKy90 z6GLFiSMPO;*oOW4j^B=UX9N2w-$I4B0z}kqS&^k|L*cDqLaZHF$73U2Fe^EL)>m0N zzv6tKFaDP;v%xS5=Q#Udg<%M3Y~D7YI^Ksi84MoR3wI*_gM+7i{p-<)&#|RDwvosz zPrHYtQVlxeiOJNpU64BFSD2)T`CREfp{gLzHeq>WR|uCqYA=j7mq`Hx%AjciD8}^f%);(W3lnMC<9W7^zU_n&{YrQSkg1d$Ti zZCiHW`lZgf?>e8WQ0g;k>Q?7Gc=bNBH|KXZh}(n>JR{G*^Sc`fJUZCdQ!&+|>6(Vf zljl}Bi~5kyO8m&Sb5n@wHBWs{(SMJB$-akAgZ#*7kG!kN#`Q@=;L z%7gNp9ORw5E8C_d7sv~blQox#K)9F7m^M%i^uJk0?tV&yn8SbS;p}^enb8Z&a=?1w zBg_$JlQY3P`sk`>a~)E(*D~~J?MA9AXa0_c4It)8Iul+yTzApD%fHb=L?h4k9cgLE zMH*K0_bbzU(dyO6-YLb;C`PU9=+y5R%v(DEUK`aAJenhTm{bcsm)uX^t*^p$3$fMS z{ALL4wcI8k(gWG0*X=AC2H|39<2T0fA#k9q*XpGm24)4)W45ck5bDRg#>n3dbUFu_ zE-ChaH0@@^G-VI){bHK)wC#eCQh^tpqK%OH<Tu-dtBL|md1xMLdCc+=WI)tJg49s6X~J(UMCVml#A z`-{ENU?03pTRD1Y19LleYM%-T7zeq=^xvbn9#QV9qQz(Y&$+*K&i%Kw-N5?l{%`;6 z112^Ga)G$s&wE%*Ur7@9k13XALj%a-hlpb@*2nBIVEA@dAL{_O4L?wZLBzYi zay>Ju7m*~N`rX#(Kuyy198dXbkxR$(c|xAk316W1gh77t1Pf%PLxf=zEvw_%<6Xr#n!DE7O`bo+bT|D!X1cDnTN z^z1ww2{nF{Rfl<6nI+05No~Mx!GBfNBoJ{uay0sM4abF|vM>GUAX>;IKj7dwX8%~wBWp7-#6Yb^cU`H=)IzGRw5x`iLaOFd6olVgvo8a(S0Um$n65v8}jEt6imhG41&{0Be=ywfaRaO zf9u>&e8q9$z4jt9bd0ks8JBN zGGBcUmLTD@>KEy5?MT&TQJ3{+56X}3yp$N-i&ihD$@EQkB4*Cho_jcI5rNeG;W$MS zuxolO{FTG^+@5WF!*q&(T|vQ>oSThe9{6fY;Jm?%){yO?P9^qrFmuVZBq54kkMPm( za`>27u_^Wm`;yl@VozIRAIQt4d7r^dTt{GWai~v&{#D!L8`mp<{Po&vfty*toXKIs zgZ+iuXZrj~Dyibff-+hWfw$&4{9PVX4nQ9T}_FD%TyzLL<#m zaLFqh(Z~OO*GmGb&2=_F(?h zoq8gBKRijha>3bQ0ItS27jogg=emsE>6|+>tdC;n;*NueD`f0ZJ!SC zr_45T+}?)!A7sLX`vmab6{bKhmw|bLGph;KS_Ae=>LQ2~SgjmNn1?5h zS(Hz(Z`v>>o+%BV`>8?U4YAnocy>=@#3s`mXdE7~5&19;w)f8{37x>ai`vgml?o^S zSKaa9*+<}kj+$Qf{X4DveU_G{}|Mcniz~I(YH)gV&(T-?@}W;U$~#M^YREdWj*3k zjGKU3J;Og*sx7ek;Ueq6auuSA($MJ88%Bv|BC;%Nr;%Ic?;FB5{~iCHr=sViM-~u` z#QS_L)mao?{xX8W9P5r>q^}&%=|H4*I@8G#XCUYA+RM0F44bc7x9?o5Kn}$3BdDem zaVb-gCfWNDyUF6uSeJfOV#L%{^n`?B$k#fGy6Tbf>$PSs_DCRJ$*b1i+YF?HA0kBM z4A9tk`{B$+0scMer|aCn=l)*pV51OxALsYY<9Hr}`~~TfIT;AxWB8`}g(j~5dSYQG2#XDv`HC??;E!*#jsPY=!t6G2#&GHn3+om|d~KKUAk`B6sR*Oea) z09DBeh1^?%ATI8~m(0-vw9b8kSp#LS2^p&OqbaI%O zI^MT~++19)e|Iy;wcB>dyu{C!`;6*jY&&rC6qHBDkU;8Bl)kgcAn+F5$VzU+I`*&{ zQz@K>v%h^F=xjX?H`l6YsBbMo856DSF{~rr$^*96!}8k?@VDZ9!r14q8}=#Ke2@LG zs6mF+igoKZmY6#f-e4Dl`GGWR0*@GQ-$THr-%`pG^RR-GexFvD1&Wc2_0ux5V66LV zNT_fcsD!e&**1P3s=1Jg_5jm^9Sp|wovmxduZlc=A zPQR17mB@!XG4l!Ldz9Rg{qDilgAC8<3(PwYAPo=B&|;S%Bxf6*ed82fPgT7v()-40T8pqz9@_AX3o42B*XX>YyVCX9*`0${n9;m*04XZewX~kiW87VqVj*bIG6B6r`Hvb+GSn8RAu4|B`euA5jSW`hGhw z66E@G)imBVL9Unc5wFkK|9x|pa)m%T0qvtVa@;A5^{Uh41$)$qSEtda-_5!P8l^5A$ixWs9kIK}++uoW;0KxO*^8BV=(8j{QOy`{C}VOTXDaY2W)-zw|(q9hx>QL z#KxaxKQWgy(r>is2kz%NpLqH32(I_7opTUAig{nhZb^4y?#NQg!*2PYMIi7-UslBa zcbZw$nsR>@$giJA-AI~&aNj|yUyS4Mu(7^1=*k!*iu_sAIyeO3dwAG7p0~oPtcQo2 zS29@e&|UCaE=4v*KW<&jY(rYVSTqC4Bt+{NA4|T9^mK+eb=CDIMUTZ_BG@It=Sp z)qcA|C!n|9g5RZS9IB%&HVP8P!F<#Cc4*oJSUDeK{U|vN{!h1`bXxvr9>>;mN@9EG zei-xlYduQI8apxXI^^tugnLa`XI32q$NP9@~y+{&Id|5gbCY^HMrHti}+x z`ls(9dnb_Onbi)TYTWNL|2{JKdK58dS$8-m^ds~8ajn#yt%z(|S9tv}NREb$YF6ncPj3YCcqjDv2&*~^LzV#*fWK=tf`ApsLyUGnlyzaN=E5?A! zaHhuHJx!?gEdSwp%puTFGV9uob%_+7cG+6GgNWUui*WyHHxhTCr(i0==YMqGuWKm@ z_&rwT?f1p^f7;v19AgDQQK>$7Me{2neWXq=6f8#Uf63h;Oqs|#~bekdYgLbd3r0daHCt+NW=PX|KOTW0@a95htn=?TN~mt zwX^hWJmM~pz$)BHb}n!==Ty2H`$ee_W|YlHlaG8sa+2<%V`9zKhLVZ zlG}g=uG0I|w!_EA-$ahFcffk?;eelkZNQvxfTP$Ce@`j0`;>7z^e10-7dqJmQG8;z z))xjKkh$_47ikP~7slFiC_Ux4e{f6u)>@hx{DB28ol)aScuRo($Jp0ClFgx- z#)f0_4**-XGNT}{9>>nYe^zrtCBd2H7NBV162~0A_ni5V z-Rp&UnwxTC0cgDuqzM+SUv<)u*1?|Vx*Q$IKZf#Mk--q6i&L%C(;q|mwh!e!8nExr zN;Lf2F|7ah;gkx9$DEHy(jND}IIljbSKrNBk9g%LmCH)vpzJ^%)g}8bSSnd5O?^8K zE~T7ad`sgH-uc(;q+1!#oo4!;qlfFH5p*|pHH@Mewsbq+`?JVL-fojZ?cedAyKCDi zk@$J!=6Mk0uTLZ1Dc(J79m5FMukMfCZ$POu--@w^7Td69KWr;xX z@TJCws62?_zfdch_!Y%$o@h89S%9Pkw2l zTS5Wum+C1hQl*2mp%*qPrUCCUb&Gh*TnNsT+Lq#9557iD{B^0_nAi5_WbRx)^csvC zr-}{0caf9;K^+pDc2i?Ih;`tEkyKB42OOVM+xgco*Mml6ETJs19CIGK<}b=rfOMv} zH=ScCj0|W0J?fkTTKmiz(s2B{aP=K&r^EM*b)lg1R4tH5y==+u+zxNFpQ@J_c0z{v zhq2pNJAtpi*qPUr2s^E=sD05OLap?jh})PK$vpSB(zz4o2N#$Q77z7-N6)L;d(`+m zBlrs@VgI8z&3=nx{}0~S);Yfw?+Ine`#^HsU-x;gAjA9_>k}ORVtx95W0Gu33_|#X3P%zK=_!p#qFUSx-v6ev2%Qo&G** zUxf(6c`QP;5ymq1w^{3>y z$f|ZB!vJ<6CY5Ha|J7U~{mn!)9qHRof$nNj$mv33Z`JE=EGIRuCLzn_Z2+)m|9 zOQX^jT#p=%N%AMmfe#hkbJNs$hf4NA=cc9gHP9@&QL+Gktoyc1= z%=uyR_MGn-N764U9?fWsBXZoAXu`HnkrBLpyz|r-0qfPY*m_O+y*hxZ=9T6jmNp1KcdT_NsTJQ78Z?4wFu$=wok}OA6+Roa zTtBx-0>?AA?%Rsv`7UtRV-WKN==q&GqwD_7FW!p(t(Eaf;`?_l3%DMh>L56bISDgf z4E*Wyz!B1J9)#=uOA66u?+dW6>B-P>wpGkI<{EI@jW9n=yZz}Z=E1jV1_d3^ode!t zR^qRnbD$-x?PK~K`<~Cd)je1=4l=1>B9?UHz-35ZFSpVSiJR$~9S{^7 zNPM5mKmLxXF-a$0F;P@lQSE0e6Yf>tyV2NYgF(9x`xpYyb2^_VDnDJY8M{ z8Cs^nhAzz4)T~XMf6|D551}RDtrAI~)QspM(|wuZ>s-lX(`(dMUUq@G7qR z6HbO^$(I(A(*qYQl{MVbr>0k?-Tr`(~G2udrmCH;J9L&yWx2=0led-M5n*i zgW|QU0tS-+5S|XIxFS-DA{)kuv$F)GSfuYV@Vo#S@95%OnTiKqEyDaY{Jyv7m%Lxu z-vbI$=H)iHpOV;OyE5{y1+=JBB$ZGH3>>-VpN!98VRdc4ch9S!X~QH*eJ%lUjqSWb zJB4*Xui=u>-byrbWii!)Clm4R++)3vGTRe|-?h zEUVhw5rbe_)70T~i3E{mdfWJNiD02L<|%!#4Q!Mdqpp3d0%@L(MSYh7u*f}>S$(e* zY{U%QJ|7~$s$6TQ>PR{Ei!yx3`%nX1xgkOK%Nt-xYKdm;MGGXedc3AM+5t8#3c+*v zn19XI`nT~9*8SLJ*c&Xgf!It;;cX#Y7f`%D@^`cahKeXs?7v~%gk^T|cu*hwJa_Zr zzJ@WlXi~vSlEi+Z&+$AA=l}b?|3B{oTl>L-HKA%F_XTiStr3#X!#bH>OP;fyxXwRb zzk#riv3=>5QHC1k8E}5eyu*vDmq;@$I~9g9g3G@G1Ml$IBfkQ8a0{eQP};6?Yk& ziAqO_dmgK+UoQuRBaO;FR$Y*p@lvPNeHgS^4b0tdVD6iRlhkd@`Oqjma+Es`&wW~R zmMCwW26czFZ=8iQP&H{^QSfCJQtH|MXyds@cA}ML`iIF7T*ysRw2JE*GmW;h6eBPa zO1MA!tp>cAea|*<=A)*@))&GNo#-4**#$M3Vf^=7(+T@JhN!x84*Jnfppi&|V3Y;s z{c!f)pP#~gf2Y)$xqf{9^Nb!mC)$MQ9k*q2WjzL)Gwe1;Slgl8p{|5IeGJ&m@l+m+U z?@P|qx;BKG&MZiryN7*O7Y}cYo*hOGKs^-6i2E)!>}qO%YVp0M+BL^67eWh{Uq3Wz zg(h|}mgonGkV^mXY2c$=*L`pAlRQ`zcl-3Uy%@6ULp}N~V*l`k?V?o5I3Li+I2P8A^}jQ{ zlLFK=*#FVF9MOX7p`q5ruMKjL%UuepX;Q$lUEX&(C@gOAc2)+Zu=_YM-%yA~8P zyuI&EP7(564l3O_m;e@&kN4iBNC4w2?|L3D=7PRp^KOgsMtE5o)Wxrl^LxQ#o2i(K zxU^_T-0&U(dlAd04M82Cs%-arrnnjOdA|JM#`pIadF4O?OD)J5yh_c%=R221wR+QL z1?HFp6+U}Z1!m7Xd--($S^-Hk@xz{9Q^7dzGOTw3ynVHpZ9pCV28N=X|)H~ zFK2yZ_Ad_+q(yIqkW=&Vd;>X)ZxN|Tb**Wygl!$N(Ospp!u9B;?~Xh!={=}+pgpvj z7vJ}11yp@;om~I+vy09mxF7ibd-G`u%*S>7Yk$PO1I4@v`hLQ@4(WH4(tfT?KugTr zri73x=#samR{M$TGIvNWKCFBA&+nx=%V|G|%t7_*qdjG~ z|D(ks;x>J)62h4VlPO;oA`0(`u=(d$CqtE57&nLGOV?HUs@fP@KC|PJobx!c_|vu8 znl^?a``b3C{|+Ov)l|2-H`cG^xvD;$EM-}jnoD61NDnP-)h(1iN z3t1fTCY>@GLObhp*!5LMP~;?Orx5k$%-kvBiyTIk^xbO?NO` zD>VYUvE<%hwOC*$eHN#f%t928+eCP|3z5Im*oP}$KBEzBqhE8nxc`-Tfqdl`*0*JT zw4@j91UFBn*q`22pmFWu@7E78_lz|D*E*#P+;qrY#q)R$1WR^ef>H@6_HMH2vlb%h zV;qO?o@_vFv#l#(iw!70)0;IKY7O9h#^}J{tQGd-L_e+TebEU9#e#y2M#FIQI28pu z<`ISOMG4Z0gFxiC?_40ziRaar2+N4zb2;M9IhA*R!xK-?|5F>HGfM^XVlf3%@R0goz0f!;SY?FLZad z$)BwayzPb-*J8d(U^qBNX+*lyA*vt-snQao49%Z!fX||d=oox`g#tM z=G46%_@)yH9DGVWvpR^pZ;ic>5*kNyZ(Q4nT@|Y_bnAg~z$K>)1Nnfm1_;9iU%APw! zN@D%==IQ9W-v%Y=1#G2ySG((1uV`<(ErtGLb;ef&5*_617s zEvf8D%0S{{%Z9dERY+jP&uZyJC9?Z+Bj>ig+Xo4(KB`*a;3zi7*A z>)ntgfANhxp4Zs!A}hwL)(_$*BPnOF{+>qZw#`{JT(|qWt4r-i3kXC-$S_t`gZ?Tj zdwEg`a1|Hp(6Lp3-5<`#qFpsG8PsJRZ&(eJmQkN?&^CfYdh~;OGVbU9I93-U*b1rU zEf-SHw*d3$^XYF@n&B>gg5@Ia2XKddw#fU6>mNI{!fL)ZgH6^+k@VMfSXUnyl4aQf zCKTT#Z&&EReeV%w;!@Gcsh5Wb<_kaKM|2|jl|Mb7}SAOmqTetw0T%x=CF(>)# z>tMks9A7dy)gHQ`J;2guexH}v2REBjzSMsl1eZV3rQZTtK{)u^0jHyp;BwJwmX42r z`qLEC-?Cu6*$YY6qTFsovwL?L?;{-Va<$54g#(DR^k`)*sUI1ZQxiMgdXPY@tmYl+ z4m5fDD58kKz7j(@e#gRCV1i zn|N;fducLP>omB9_ApZ8d?s8{mVc-3EaWO%vAK=mIb839Us_d=0jl|*aNznVqwTZb zS#PI7jOxwon%W=;oNGP%sk#!_pHG)Bf6PaQ*Cgc4J@9?R=Eu)P{Xt|ySf}m6oZ`q1 z3z5|AxDPn4HBmp0`K8CT0%Au8P>E;P7wK;7_hEX;aeJl!k#D|GG2KVNI!;aF${hn> zJhg5Z={F7fr*hOkKf%7A6Bg}{46vVdYA<_xd^>vcK;#hV+87eEFxfe*I*Yhyhxm>e zET9;>(%!NU_})(`pMLw!0y;D^Gk?8p7THH=HcjwOAUkphr*3#JGX5c#C>2wHgcp;) zx^NJ{MTU+tVzUU<&KJsl)9pef>ggBgS%(nO>m$vlWvsh4PYJD~8$#h;#QMw~Sg%;Y zy>Uvd5^e4kxuAgc|2thrW!CUq0*l2Es=HD}Krb}v`E$hukqGXWovwzW5h>u-e31;o z5(ho6S2co}Yi$zCn|_c&MMU?zL@p*ADOZ(u%|&+re@>-CxUpFa z&tx44dW@H@?CpjWl2t4fe?JTd$}9HT^ZSQ&;d$D6?C!v^Cag%&uM7M~}pia9h7g?m=3@%jJM z)rgO^6^zw)uRZd^`+=`^K?BT>WxHgpjRI9~mC#FX?87@_IeTFDKllItJm?MB-`3}Q zFG)v|mKH&8GbHw5I<5yan(WcT{eN;!h^~Mp_5m6T-a0aeeF07B0*Nu$2ax+g>N6+i zFh|Q?wo1o+N4{pxFIhbhca=->nHsKBJURG#La+kp9iLa6WcPwp-#z;0APPphW9-8*C^0oaCE-pbJWeNfIhT-NFMlDmAFhjzshd7DIgfpb zZYyTi=Kstw+S>n4PArSq{J?QPHAOXu=UP7ed_jjt67ypY9DZeWWfm?rtvZ^O&q6J^ z>9yBh%$*rDbcx-Cbsd?{j56s#|Nrb$9@m6*OKf(EGtnbxk9SGhK5aag zcNy@jeSqYFzjsZc&{&v z{RdYcC-E8&U_V^Vdiu0G;yUHU|51>H>Q9~0&9om!Blfp0W%bRXM6!^z3qJ34YO_Dm zc`l-U{;vV~lnYp&f7oA&a~9PmcNcTBkK^b3hPt?mgjRidYkLF}P}7gVOy$D_2>;+g z8_ipayvfeBhiSS{7NvQ7A^jjyTGk6)+>3pJgAu!E@$+wrD&WvP)PZU>T+EcQSobv!W?=d;0y)mPJ`8-Zr!C{IFE8<+-&P4G^Wu&>B>V5f0C zzW;trwU+z{F-hYx#u2f|Uy8Bd(TY3b74E)SR{b96J9{p$cGV#N&38cpN4n6GXVN_- zz80tD5y$EE8??(CxPNc5XRz@S_Ce(d z-CEy==QV_sf1va$0}XDnM9C1Y|H;ZuE}9X*?y4ZoX|F0sWH?DPz=d@I%b~gyMl}$5 zPe65Vaw~8LnRG#hY##DX!$hI1C*C-l!bWbn7b z0+rJX)zuzIYiT+AczP6EpPX4a>pTT{lS13BVtsC{ja#Eo^}qAkt@r-Ftv+2lt`8?H zf*O-VR|_m)PG{`eGy(SqS$|kbljdOLkNW|0c|70LPv)0P2G#|eDp>d8`oH+*fVbVT z8DKSE`q6;r>M}7lUJIkb`RndoET^hS;BY8{VmlGnan8)evGgXQ&3rFWKgxQfx|q1r~4c5T-++Eo4fXtA>?!Q1%*#@ zz^TH(>iTOC*D-=-)#GsfKNa`lFm*m+mUd~FdynUp=xz?n_+c)Iom0}2TI^e3e>$Wf zH-gMoX0%7_hmeDu-`oT~k5s?Fkey=_VxLT_SnGhZ@A3VDlj7I$ z7oVo!fUrAbN!}2Q(Cd6!zW5LkszsEn5=e;jjz>uk>xqPyP6oN{nM2~Am?!vR7f^}( zWbf`ri-@#(QDnXa_x*C8i~XsbL3Squzct2>q3{)b3m#Gz(r9vHvhIvPO%EA~J(ZPU zht^`2_vWIK#IF|b?c0$JvfX=fy%#YDIqZnVIwfPx3=PZjZj|pEu6Y+)kexeIFYlKq zWbwT3G*eY8sIr?7drdlU{`>V=uto`ZH!KXinX-n}UBApfQhOmgF~bW@o*9sM!f-OU zwh5{}lsf;)>4W{!4maHdNWlK>+$W!dn0F!&svADp2=$e-dB5VYp75GQc8F~+7(V;V z`e>>eC>T@p1a<}@|6N?_KX?j}3qkC!BlZ(X^ZF19USS_b<=(oP8l0C9ZrHt1z&^l= zj4KtldXP^~V0=L-=EKE5xY@^Ci^#Vlg}+ZGB7ZJP>5h~{L@#|**a!uHz|KC&P^&87 zdYjx|P3i#GKx-8bRuZ&4@tswz=>)~w$%_<*jj;K9$J~{})gbWLgnvn@7V>$&oM*XS z22K6XW4F)bf!(woo-AJusg(;CdbU?UmWQp>i>@*duKVQ5hWAUw8up2M;aDfcyEZ~; z*90Y24qx3SS|LkHj8Vs(2y#!w#D)HcTXE<8=9$)Z-2YHS>?w^9d99@8DCVD-6-z0a zA8Lm0r9<@L@4G=oa{EU4lTnazkLF#&b0NjVbsgHz|J(onpXdD6dHX?CSXGdy=`k89|s(`2k)zQ&?QO@Wq9C&`8Lkaw0;i~QTWD$AM(b!nMGgcw9O&3`bU7r znr;~JM%MEPVBI3&%SkcX&~{{}{uP`ea6c^XOK$+qcTLr7ZPiqUK}RJfW;f>3XFsm0 zFSNn)Y0!%c$JV-mJT6>zP8s(%K1U?7&-S2AelOX=hZCrmrM3R1{TzCEiht6oVgZ@9 z49H!%v4{w?$BLf>&7-?-Z!GoQolA*z+2FN?WK!Cf0!xK3f5$4B~QhN&V@-P6 z1#ojI`-34R_WjSkT-$yV>v%8JrFR+T0hvabf8cu%Tb#~7E~AZSFh&m?>w zvv+9XUh2a2(!e!IOCpkHy@6-MVc#N=JZn;i{fWkoi-h(ZL}ocu!m;R&=T7VfdQ<@p zwFP$BcuqwBc(VXwWEUu^6_4sCW4-sid&Z_0Fz+Bkg-`5C4V0Ww{rr;;$!JROfI86dbedK7^-2$NDm-qL666Sq;JRH&Lg*iYiX6!O5EpR%=xQ5~q z5zd{pxO~2)9jMkEX?1Zwr2Mo+3wK;Ml&O#|hG9O59k&j5t6~dioX)iV5>XFy_q;Cs z?Zmt`jtr$nm45tO{s_GDz;hbRemyDMg?Yw*+t)0o|B3&t`2KJ4zV-coTmSR_Gd`1- zMwNhdLR_QW@or?0mOkux{r3#W>H8gjdw2%Vj~Knbi1q#!F@blbdRsw5!YRhw62LAe zZQ-zOUl0@=l-+aZRN>I8clEze36~C$avI z_h!XuGjlxe^ZY@lN1t&0sj=hMvKn!XK9t|~r!S!$M z;TNYDT0nm|j2Mva2Lg`4fhSjL5U)|oJR8;-2)j^H{Zwd24g`JbvcyK@(rp^lxSoM5 z77ps4S|h;NoMD`?K_56d*~C)j;rbs9=_^%S5$InWdy@My6bM<(61Egs;xQe#%{*G;n#^Y~YCI(<3v!6Yf_wkqUb5VOH?ya>;Y z$_Wi#XLI{?7`gcLO*R|V{<%MS5-t6(zXR>ewJ71+FrDBmgB1WRh(x1?g5 z!BFmLkEAW;ujagVT<69-5@uo*aRmDbew^scWFtWoi;51V6V?TAVO-J(&Ntrf|D&Z@ z53HV7jRL8OkdbtWA^$0UJ_*Nj9v&Wt{C?NCu{+rRBSgC83;$jJv-O_;=eqU2|L4Eo z`n>f$@y5b`*y|K;VjnP#)Va%Ti8F9}y@T+cw&*XCE5>y3NLZHsm-TQRN_}scxy!s0*Utp_eQxw3?{8lNh#BGrSkkg2D+GQMxWHkP@iUX5a?AF(=p4no2iPw28; z>)?WNlI_Lw{cw@F($mv#9I{%Ud(?)^fnL8bIgfG<-uw#a?pK?Idw)hgeejuuThXWA z*kIk?YMj%h+dKZLn{55O37loF@*HF+?)WgzwSN$nPS1IIOIBcg*S;OeQY9!@u~w9E zssj=0niYv-1Bfp~|GduQ5j1|b-oy0U26zZ!9W7_oEM+~q!keYC@xzqE^+k^j|` z9QT5Q5Yh44r1g##NbvNkDfSzN&{&PryAF;5*)!%xpd<-+&C*}kUCTzol3!nDTlXS` z#ivh;A50+g5fj(+qjN}dsUp4MEWZC=4k|P*UqBYoRN7HQGS*K&-Sg+|H1b^2yCZEh zio98+uY{>~AaYyLSO3rEKz~A1z>m}j;oDX~P$&h=BKJ!<=-__6TkB34vqoe_)Yz47 zjq{XOY_=-N1jH`h^{05!3@PqV_p6d@grEkLNsIj_RJ&AZOpdifev?=2;l0J6`2ET& zrAa?XwWfb}SUv!xb4V8rP8ET8UXg@A6ZW?%nU17>?*+#baQQ3Fvv*p18EFS&PG9*_ zI3Lyp(*0Z(FZtdL%c;MgJ)Ir~zNff_9rrDh|JIzA&b=q4NW_e!Nx;_ufeMscAg5#%pGZmdVx3-zgHdD7sF+s z(o+vHnPuzGe^vo=apoh3zjdHGvn%LO71s5Ok1&7jxfm73ddU zeJ#C!`+zqcHRbu$X?7M~}>P(LM!@!^|V zBpN&!`i?IIh^Jm|;*9VXxt!&TB^&aH%#YX+J zLI;xmFmo_1vliKHFZ7ihk42L|sPb3uRDs>QFODoeJy6HKo>{++=Tiq>y3-yy3!Ayb z>rYrAGE&`rVO_cu%}(?l7)^u9B;*$@G1Y&|$CP>oo?= z>E@rxZF|5!;0M{-G!AXn&W)Wu+>LN4C9p>a`wA#a*3UCy{SQ*M*)K4U_=;tW#Y5+D zKWy|(*~dA==jwmAM0pC)?eOsa+c}K5UR}^(f6|KdWyScv2<1Yww_ZDqW-nZ%BCe_6 z`NiR;cGUhTA9&9wiXUc)Lf*SH-wzTqkT+Yl+XIF`#9pp_srzdQ3@zFU>o|5mAC;)u zbvy^8{>}Akb5A-!E(&SLZQ#E5AKQC`^RXaou%dizPYjrCcfCh-w+x8-D=vEK?eLr6 z^EPhBFkGgRej#+JAM0$=k}gMK{_njm1>vbi@HeGy}s9~Ov)d)+R?(XK=nBVhwOeWQ_9ab4q1CCAQg0QvQBO|pO)K9y*U^ZWl z@~6Bh+uHL`_}*{f{3p#(YScAJ3#nYxf52*HhOr#;&0dyEh-4uPrm186q%5eNe$c%A zQ!7+G7Z(vLYlqN@-wX`R9bm^dsd*=^0W6#Zm3OFDfp(lSXC0f6cvu55UB^sbUTXm*ww9fh=kUJ5^!dR(?M{f% zW6NVu>4FfBv#4P=30#rYVZFf~{P(cm-s#j0O)&=Ayx4ER>&a`;mfZ|3->Y_?XKRNe zNnvIR_?!!Kc%`;Ca17W&UY_f_JOlRv9t6$A|5Nwdis!9$E1v&z{cqo&r=pNlv%_&O zADpjKF%OG%%#yAyn1{i`#dOq&4ADwmrzJJOQp;G|c_VLf-;KV>P~9uh|c zpYOSf4=HfJjLURs{{CC=Kd$_f&Z-1WDt)Mk?QBNu3$w+$y0L#d^!QlZBb?_uo(=mL zI*5$7Ra_8v8$#j+92;j2WB*CTEjjb%UKFVnQ=YT06D4-IM@pL1qZlT+|Es+(kEZf_ z`&LrXphP64qB1rSQo5By358NJeo3W~G82UiDH)11m_-?iLZ*uhiOlnS4(A-laSoyo zsrULlYn|1z`mXQ$d!F}wpY{IJUwhs2eeZkkYwvwspX>AaQOI~fbX|4oIdQTKiv3OI zrxta<6I%=AU?mFVO$miiehvZNZjFiI5R@3wRv93lp{(fb)&5C;pC_^Ff!+UGhZH`5^MQa6xK`g1k@1rQ{%h+!c6o@hM~ts}M{^H6{6S)FHsd z<hvYO$8}!v&yHry}(~_-|@C%0vHzg-|eF{5DkJw7loV21o=qG#P*T_f~$J!aT?zc zVdgjHdIfMjAX}H2!NnldN^Lndj8loA@tY-=UiA`&J!xCif@=sB+xrIR$yuOSL)R`g z>V`p^nnmVUND$KWteRs-J-BMvwr*h*L5YRSgLz5FODVRfXmJwqfNMD?$T`;nqqT9A zwX73POK6>eG%{4X2^5#&c*XPc+oR$+$j__TQdHyp7BmelY|mz7L*`u4#7toYn9WZp zSy9*qmStbs?3F37ldt8B+aU^we%DQE4eo(Gop~w^<{j{q=d{@5xh_!o5@E3xd3iqu z)%(vyo=XnhkvP`+W}sPH3_U(m3w#0`N9-Kxz|35f->(#PnD z)=Rd*8*yQgs~S3Y_ueFU20I_Ix?_P!dgyn2Zw}EV;J?B}u$16$T~_(1HlK*xxzfJy zW)#fy+)Wp0LVu)r$<3ndcF;NfYG=AY8;DCSYR^1S138z+b|~H}hqSWHz3cf9_v~Oq zpOveGiaQmvu0ds>H1p18Iu3ah?|=KNyA}N_E}KRfTQF|F^OmapcjR#xSrBQ5c%L9o z3(jfj76^QGphY0R4Js$Ekbe}kgZAl%%4xyfU`;u?dO7kAm`e?09JfSXA=4#$d`p@k z@XUCxhe#VZ?izDR?nWIuRns`)IRzxVJZ3dt;rJ`@`t8EDzsI@Q?U~*0e{bjPZT4}7 zwh?zS<`23~3r0vpv7qx>Q(fbl5s++d=k0AD0pT0nSH_H4P~ExVP1DXHP%_og>Dhw) zD}5;8cF+JAMQGXYHyr?&pYrq7=^l&|RP&UUBM#~;%A+_vCOCFDOVHn9{QigIc#nQF z;kh&G{+)sjBA)xyhZLJ0LT)-!!vAeA5jH?Sek`kpD1p;M2C<#QMUZcgRmFJWjRm2u zR@H>htAvt*z_)~Cmwr-DTp_#;OW6HEpamQ)|I**Pj0}oiP2B4Iba?IiackLyK{)v_ zL1HwG0oLN*i@rgKYM#eib?h*ok0lWX({|U*$yhAoGj9bEU5l^z|QM!84+)G z)~u(cjfgxQ?R#B>Oaz@exp=U>pWs;YHoMf5La5jYP0g3>$GF>%H;G>nA4s{mbqsN7 z){;g7($5MBi{ppB`MxTKONwt&gA2Prt~mYE7MlU+D|p(~)7b~E`d$gymKgssN;Bw| zd`~p&PTEH4=^&I$wQN39`w0WxYGaqo0m526CFpZ49r=actBTwnAS8oFJY?jmgOT`urLA(un1e_QYb^Ky=#a?N@U6K~TVWh&%CwblCI z_r87vQ*2U4UO0!~SyI2pMl*wm&q{utB=HhvFZERBqF!Rntx(=eiQOPW6*BR;(hX^e zU40=-o4{vUj&I~g4e(l+2q;`Z{!;Vl3AZJP|KX69KTXevi$x`|+kU=>hB+_W*6gYV z$w$$#J9pJWfXFmCgWLceu3CZpGtKaLj(XgTS}V+XT({y;MjNbm=H0`&y&cHqBOl&U zIzS=Es?Eo-9gc*koqofO`p>np3kP;LgC;9}`oNqvNLq8K$qn)C)%TkGZ;#U;ku3J& z{IVek9^|^Hf&OlGKW4Y%e=6DMvCGNk`28C3Be3xyeZvQ@U-Jm|@r9qdy51umIC$Aq ze5TS6e6|b?y|WDG(Vm5+_u>%;JTjL`J4b;rnqAxSQp{s9x&Nf|uMRN&u6TXl2dt-v z177tdcM0C_#_yjsloJL4=LX-EAedeEOP;TA#eCCDoKe$L&~ zOE7(e--TW6A}V%$)N4G^N^pca1U55k5vS%Ya2aw5{;*h+okT9A3FbY`yx$Bui9tG;ZpOepgx)SJZhzvf*3yy`3_H0+xC zPDjB<4z^AG`(1hZKy_|#i(n-Q;v^oJWg;IH|4Im*cl-r#j7fhl zYpx~u3o14=3m`tCpf$nqXg{&=^?dTXG77Qq#%)$ZDe??4FW>%*yonQ?a7O(;;$XRR zLfkWRh@3CWj=Jux0x#D-QyGE`6ooQJ_a`)neVI~s{V)ZtTyL)w79xTB&rt7eQ#k%D zb_%KQZUdL7WMhYpE?^AKwcq}%55n({+w~z&(vb<8_w)615RSiVSdDs}^-EsYi$5pB z#*|Bxx8t~8S}cw6cQPFD6ExqO&=0NxTh<-giFyq7Yr2zAm*JtED@~Wx4_pf?CzyU@ zxKZX3Ub8CIDt|q?samV@e&3zD+ZBS)s z)CUghKvQ>FGw6BkJ8C_Q{94)yUnUxoAyPbMdh>K9@Tea2ZD&*fZ{j!QK(7)AdP1-7 zh{SwB=FXi;I!_?y(50h&3yNT-D>>V#unF!I$452dcXX+BpMLa$43b?57sA?bUi*{f z&(qid+XF(!KjS*#v*^?GNsMcgi)J{U7owi^*2x+f{v6={ znLQ~U?NDoMv#b#Fb|+k`N(xPTU>RxFV=I#kx9^Zney&12#AEu=8}9Z(3wfzVVP*%o zZLL`E|E&!;)zn(TH@AZF*8^8G9=1b|j_e#i8v2X4ZUnu_roonD8fDgAOvqJn_gv@m z_kPRnzwA;sk0z(3G6If^*L%Jg`S<6u?FajMJ#}rtmnoPhxT?sO;)%TAnsZ|WvQe)T z9F<}V=)inZy4pyjA0n3@d%U`<6$*Ax%03yRKji2;fBhD3;-a4qQ_-W0=n5bBP=@|< zmHJIZEbK?t^tU&*a3b!14_|xX57ftP>A7TD(o66rIPBYKgFFDwIY0ToDMsi^bq*+%yLxT6tDoONz_n1?}0$&!LQBb5YK4|CIH{SLx2ht|=6{ogEh-e{j8 zuFn&y8V#ke|L?G@vXP_^uU!^T#9Sm3pG6!QO?TS~HSy!yC`&5|*LiB8F2>P7)BJJZ z8uGib44%)qv5f@xhq(8>NB(rl-7+8xxP z39RgB3#Wk2+SE&RI~cI;^?|c)oiy;@|Ajg=OM=wbS9h=6(g|f`_c?1lF%EGvrF4~Z zFN`$~c5SQ0coR=y+U!y$v_wdK!rF z%h7gC-RgbHip<_wmHL+(6 zP~8;i^L9=RxQ2}6@gm=!W|Z&u;qg)ke;sVD`n(?FR+t)m z>;pr=7Uib5sN-7p+O6*l>VjKsI+*im03-_9)%n*900)gw*`q~;y_fW4a(Vkf>f;4> z4b)4c|B%XAP9{NQQvXE-^bMwhu-t=|)+A1pC6QPy&p(|`QK)j~Wltb<+)Tz1})!C2w*go6d^bFj`_M9ZxAPpE7 zoLt82rD3~1wID-ae-j~mMoePLw33*(FWS9%@C70F;k=CNOaYV~KK@|zEKK79M4*=b-9!e1W&@j{qu1g zl00-we8d>@UNr+-YU}zzFMca=%7_NrKV48|HBe!ETQy6-tP}L?PAwb$k^>gY-v5>G zyaerNH?h6Fkubh8-y`6AD^bC(9%Z$)m5_5Cm0B2!{AeS0WX{h35qYT2K?->;BKjQ+ z-4$9PE}_ARZ@dpIHz_4*=96Jzqv*V?n|pz#q5h!tNjvZ^_s>|hw*?xrPpoiqXoa#> zUp85|b%Siuhs7&}kx%&wd0?ME14Ne9bcQ5TVZD+QIiyb|PQ<-~r=oNA$@)2y;^0nZ-E|;eTqz>5hD5=@6o(x_x{`J%!REP-p zNa94D9t&M*Ayd=?43D$S)>lU!>gl~|rt#Scv`ReT++^MZw%=FoVwu;1&+!yx3(n^dUo5Qp#Jh-Sy%=Hhh^qnl zSvum)`7r;-)9l&tv&jF|6&KXCvxhJ=-gvGxwVUwd=69U)t%H!9x!5{yWeX9%Ir3fa zYmB3rE!#H-^Ud`*F6`QMDi#{l8pRd(FfOw!K>p{K7O?V5bX28MK=H@<%D|3)t|!|A zifs$3hk)nH19z(90Gv>A%C=LY!8z*@ui0bQ(_fv}jpx1w8x}U)DSZ_u6Bfxce zk*({i{+C2f$~V%rHsm9ruHRy=-%614#eQB~-APz<`sMX%bQ0E2b?)pjZX zgw-m3xlIsl`RVP3ur^>!P`{eilVIr@aWV<($LQjBo(#;B5DAvO;#Y?{d9NPFBp~jh z;FRa!Bjn9GeCy;%zZK1}jv4Z`A`bcL;-7t4-b%ta@FEk7ZFCsEx9zDJ##Q+HEInRh zeub)O_2%$56tM7KSgz(t0oC_ESPRY~pS_1%?*p8tvfJ(Tmp!uiVHUgwaL1YZC8v>q*w4(WM|kM7|f1U*-+W3)G@tHt@Q)bTkD zZYZ^XeB;><-GQxeTbT@%SM`qQ9VA26Rxy{CN@$0eDL+ZM7sN)6G~33MLB(_|9h=0b!K@F!Ih&mmYmi_e4Zbc1b%Hq z;#sNqDARg^M?Z0DzQ;R4Y4y!xgFMAh-7m4YD!LtFV)Y|V4K=|+>!uf9C($3(;wBsA z(FhH<$$0@y&5$m^wbSG$`k^Q5Paj!`erAr%fnV2lgXI^0TZusQKhI4boa5FCcS#D{ zFV!_eivNPO(iY9otJdZ{$khVNH3RITn$_3kir)Ewj8AP>e zg>6|P@>Ck{TeUU{d9v1J`0XAkM4g}f#P9BY_#a;#rW^)H{>!&o5^>{UIu@adVdYR8 zcf_k1^%2Yt$R)r7%Z8y;Ls0rZWseJ`G zWVjxqFVavpbX!6i4(A`vj0-uIN$j=LI<4l2l%c zZJdOtFMly&!H(h! zxt>3n;56xRB4m0H$X@Zu7u^_8V)jJ-hX)gCLwjXbhBHC_irJvXDHfdTyFh)TJ_2J- zfw4PK$11=2W`4!oK^T_T3C}~G>cH)R!a4H>VYR2bs?Krj*ZPU6bCT#F%lmlFZLt9e zx81N(V=Em9(eE3BHx0p+lxgC&8WTP~;iVk59|HH<(npaZ$QK=vAQ9q30pna(s!22j z9?Kmm5xw0H;bw3B(?-Z39w;fH_XTzD$iCZLawv#nR_HNpp+Up8wF*hcFpp%Oz3767 zVYpits{%1Bu(R71xDe}wna|;35{dkIr|0@^LHrK8U;U0ptA3xcKPr3#INzNYPhQA^ z!M_@wNgYDF;1l(c&x8RfjZO(U+ALsS$G#7{-~L|y-tXDR|4;Y#TJ~}iIG(iBWySc( zApfUwl6A042Uzs2*jCP22JwE4gVOdf1h047$M1?C32Wlbg)33b#OLce25AzVhzI=8 zH@&ErIA8hbh1+4o&#qW6&!V`i$&BGVwk|IF-riOad)?`m z@SqjK>}tg;xvPO=Vb$&GZE0YA-P!ojZp?QbtJJ!D^gW^dLu8dXUnybjD|J*a4*5yt z^X+%8enc=Q>-~)zUjkX2&ut?;6ZAY@e#}iR1kX48)3JFUp_N0}LjiFgV{1Iu1-o}a z_uG^sz7l;ff7{CxzWZcQ)5zZPEDYmai}$W5dRqw0V?mk8ED-V)xsqTgdDEfIf`8Z2ddtZsdQ0e!8#vM{*fG&~c`c zv$MGwtQ4&6-FMaltuxgB*_(PehG^!SqOEYHscfr~NG}{Yu07QG7a5e!+>-IVhd71L zgW{YA2jKi}sU5wGsPK`;HGC8DW~^)Y`I*Kb!81x{jJHZZluT{k`>A#SaAnv`kfgGJNPC4k*YA=!#>ECfA!^t@b!G@oygF@3?)WudDO>0i z7_xE%)>xl2c#OJPZn;&y-VD^)+N!w@8kn$Tc>gI;#L=<-TDdYSa2Rf%@FTs%dBPTN z%X{2kSa2|AxxE4>u16OJb{W4IhMezxf~@@`uxUJ_G>-`!{V#hA;ns)%F#*mSR@s0;GeF0N2YtOE|g`i{~w8Nig! z>oG3RCRFJ0(YmoU1lMEcU_~kFxfPct(^qyA3Zq9_^*u<0_)o_686z?wdX^(mO^QrZ zOk5Axy#(`716#Kpuk0iuCrXH3%vVpoHz6ritp4?FUn&{OFX7py#qy-7wq7BDaZ6;&>SKT3(Ph_yOHF~q< zY#*2;XURq<^+ST6j)8q44NlZeC~rT@f~chSuJ89pAdA;1PUk%d(mu9HCu)2IvP*UC zMDA`7eR<>Xr#Q@~lX=glP+mrmPtu|i9%5eVV*%IEIq&d$B$_j%qoH7{)OydVYS?Z_ zQ}OfchTC%{Tz0heLC8^)@oo2MAbPxed#5?BM=mp+w2_A?Z|0Ria0&%#N5?MT9qa-n zC&r3rzKtNvYF2O#M7&A4?{fJ|6~OZfGD~h&L+>729T;ea?xGVW{dY*vF}!;2i?0}u z^%dDzgYBiUS=jrMC=-&u2c^c~dcI)unaUb}{9jX^$wxjnLrz+#%CUP*u)cYG`A&s4 zIJ1}4yAR{dlsT`AE8Hk>&D`nR?#uyjNYfZ=d`ibWkk|JQUmbul6R*d|e5p|RRz%l~ zn+gp%E!JKSs9;z8?G*FG04VQ_qtsZU&PvjCt5M9eqz60f;a|W4$CE!?xt&-*@|`@h z;}8o3uL~c4b{_fZmwg%W=|P_P@2h{jYhZv2x#D}^;1GzZ8yi*?GGHvZThQ<<@=uSc z?Z3Nv5IQ`{EK^%4P`F$pr~W(Y@|}FMZ_0}dIx;_`hU2gvTn?0-UQB{-<3kZ^Rmm_F zrPg?(pdSvZZi!VmIsj|5nrgpFqCb+6BS%VM07HDl=U+UqzxN0BHoMF!F;e$L9-Zv| zPn_G{qF&yU%kLH-9;Wj{{13(oCgk0@PkD~`QuesKQF-+Ec3iiwbSc%A!|&zw%)6F& zg$a9#?>vdcI6A%DGc&snbt>khZ#uLS`R6nHLpGKD!mG3I!yez+{gS=SelK>(KF;1| zm;crM?DPJqWZ#edd_Lba*G$Z>7xgPXd?>vgIHunmExKET`eJQ49vsNS7VRyt;88Wf zoAu!0k~hfvTYTZ^1v&KZ#iyGc2qh5(k)JmzoFfzQR<)X!#L0wbT2XCjMlbQ1Pt}p5 z1@qWH?Y7wUvXP*<8DxAND$;5eg0Xp+tsSnv&Q)Ap zPD=jwc9H!Z?D~zr;UK>p*J+X^y-SVARJe7ZVMR6C5x&rF)*Cm(aa4WXBU6HT-^KAS zmpFwGt`0jayKW)R?0)WCs4PU@iC77d0P zMIh!qOD)=6h&od~m%?3XF{u0W9~JehK6PV8+c`&BklwB-q2-SJP}A$`=VdYAb5OTK zeiN>*51Fp*out4E>3c1FiBwR#CU*AaO&Uy1oLy9c{#BnA*?lvibO@=Oh>1dexn!zO z@CfR!%;%1DX^Eggks$Bw6?7_m>UI#AC`12)P)$aL3=^buZ|3W=8da^#EG=`H!Jb&>&m zVFwdya(?wocK=|vBX-H&|9@Nlzq;S=_xXMM_xJpF$Jys^pFF&;;SvqvJNgq1HX?u0 z?(37Z15GetGi_n}Fb{&xoH}FY{hEkx+_kLt_$Oj!b+21aa|@9?b-PA*R~L~*o`2&M z>i%X8Nv+Lm!g!Xlk42;}`pr)kyBa(85aN$kKafvDp1|?>HBK#vhg+lZ;lyjiaq*n~ z@hB()R9dtSZHcIc`8|FL@_HRG6;z{Mvk>)2a!ser=P;qydGXJoNERgQcFcT#bQtt& zM*G+QZ{nqQkCpa?97i0~h3V&C=Q1F~H&RLUCHl)jPA#&k3DWAX=y2K>L(qPKv(rjJ zKwg^oSgk6NaLo@HUlt!nkT=*KTU~+Uy}FXQ+FxZ5MQUpsI*ECuE$?USGJ7G(nfp$$ zFX|+d{hnwm^n;G+jjRotIDhp|iU?pZ;PUn8{QN-{6x9#c+}(xz&SiTn`E;qE9o6O~ z!c7L1Nzlk?>Viv-`bu^c=pU#%{6_yE<`HuztLdZNh+28RCz^`}W;-rwPU!!_g>hFD zYpB<-ARMk*mT6-g-zOuqc55F9jd;6o-RK6-d2$?=13Dqwz_;%mFX}f?wrETso+InR zH^S}eAOxn(2-JxV0k!2I(ceFSycDZ4_hY`*?s=_muesB})z|Lhg-JT-UTqM(h54u* zmWlIjTMWbUw;~4>D@P!Mf7@ae!x8Ws-n-UOodKP07c~Yh4!}KmUh3T50d!Ny>p3rC z!1~d*`?;3;vTpM5{}@f^zIbE?CG5F)!KjPGg} zkkuziU8~A5j?3ZUe&s1)YZD)F;8+DAS$FjYWfb{*Uj3}zt&VYYzs2WM$58*fz&-kk zA;$f-$*T6;>mh;;ITwdEbP}#{%chhlEkuCW%7)A&%tw!keV`bgM@Uji=KL7S2G6^B zR&#e?{K+6oYT^d2^Ul_>{)JD>5j$`E=nDh#tE78h9Ixt? zZ(KFS`em-Uyp1fT6YJeIjeLh%gH8=ft>Q8Wy#JGE9k#JmWF`dc(Hp?>gBTHoT&>V(gm z-qj^gTY#HfzIo+Z#Hm>he%LD81)0PC$*I!)Q1o^~`|WB5gh#9r~ZeM*#R0@?44EhrVYTJWub>{a*)UQua0i#?Mx`)oz1wZ z!i4Rjx&bW2r3b57mV||}AT^w1Jefd;hmp_ zI{*>P+!gyfsZc!?urNku06N?Q)LtMyR9VjUo_#(8bc-qtk4_I^yyRoYmm9zQ>+JT; z?*HuVOB=$4j$@wb_d|N+qWMf{d=TJ6zs3Yh@0C8=xBUJ3-}{Tq z{(QOFbYFYK)!1};8kb}+!AjUonS^*XwO3=}>yNVF$m=1&mpI=F9W$EK{r;CdV874* zuhP}}rl0mBIy9+XeIOc&?dh;kPKR(S2=5Dve$#<|`iq6~4z;mF&WNm|rN{@OLA~$b z17n=WOBHqoVf>nxo+6Y#(m`;ExJ2yA>?Dl$HdcK#K;F*;o-@ytF}^)_t;LW~1CdPT z+Fle?PE5q+<=ibo9tVbu(jtRGFv!f8Vu&|F!d%|kmknLeT2G|PEv7OMO%C{q=dEx~xY+Rga~{rPJ^ z?XKIa%&rp9d*_peZYpS)U37Ld6TiDAO_9a*5nQYHC9B_PgusBV$vD{_Fbqvry*7>O znL(`FF-oZQ-T$LTOX-KtRQA>s&id`;&a-uPzZhG~fY}BO z^ZIv8nEzCHsq^hYa9AGo&Txi?ag(0W+AoNgX+0rx^dsV5^l7Y;ar6hAU&VhTdJt|V z^I!0n#`s(A=!LIekdOMu!;ELmbTHn&DQn{}4dOVP^wx9^faLw2yV6UBK==CEh0%wG zvAz}=Y<`My2>QPBu};7G7rWkmZy)Uc_SwTpZS$L7>!N=;pM8C*7yr80(!Z}C*!N>! z&o0^fe=phh`+YzAx_>I!=d-V8pZ8DO?EU{#TB~Gh81mC$=hfr$gVIT$&bdv{WnBZb zGEZ0cd_}(Lp<@2t>J-AqXsUMfD&l`fPt`Q~AdZjuxYnE#`R8q7+vDSUjBYP`?{3(1}U}^46re4yZaY00Qw^G=U)in zc(R)>Pv&wToDR0OIfi)l?~UAfTQ~GV+fh?yo+TM#%uQ7IHL1Y0r@5|FhXFyUZ94Mk z#}$9s(`R>N1n#ApW^lC(gDurvP30rvGwl_F-KoQ%+v+RRdw+VVXNo&$48}(du&;uT<*Y)jOX=f*8v1 zUCz@q5Su$GB3t!~FVdX%T1UePCREnDdTTo%-+S=M2EEOLAeic6eZ>j=ZREKJZ7hGa z(-ZoK%MK!5BE``wT~uQLJQJ*|8_@s2wYj-@W(N&IvSlHy7TsUIu$s62sx=`VgC1#mj#29R0!H_VmU7c*9S8v8{1I_ z#$531cRI!gaOiNA#`xUD(VJ_>(H^Z2*F1ewk_lVqZ?;MgL;tJhTTWsO{Xg5j_vs)` z+p$^q4T?4R*UF5*__LVxr$>juc%`+$0pySUy&duD zmtF3eX2E>kNanj$$VcHZk9ID52*eTonvZ-MbPm#oW)TMPg!1-njzC=Rx(3NzC4axq z@Av(uZFal;r{n*$|G#@(N5!{9^Ob!-xX|nlt;Bd!O`mnZf?BY?kiGTui?>jsXiZ`~ zN+5!~p5C0;jQYRyNZb9FJ`sG2R_;=3sw1j5i=J&q+}e2F>(&CTIzlN)-eZbZLFm!A zNG>6{gd}adc)4K=>PV?rElsNer<04Ow^S{;`cLa90YxYL4?|RYkf=)!IRDcTPz%#`$j1oo@*-5=4)K!L_XNv}=kim_79T-crsHm=OMJ<@;dF?{2v^#k~mECDDb^ z;laQBA?)X}%f-B>e1yt|!BXTcQ%iIhC`Z3NK79S}|AGCz?DC)L=lA_9%b!-4&yK+E zTQ#a``~KyJZ0|axF${scXSVG^{XE+buAIo253`x^=Tl6Pe||~EX-?`t*BAS}|9_V| z8$N4HJ?sbfThjB2ySqTaQtOtd6RuCkOxA9yLcZC(1_Ho}hah9$0~S|b5b^a6>M<9K z2*Zz49~>W+5!L)Y!(28Wh-7o|))xk41oQq^(}xdHmwxZrBMj*TLerl^{bdE}w-%?k z3z=17J{V`veco1>)zZ1SvVjaWK1YmY;s<~?`}y`TrC;^M{y)685|w=9aoyxYa?mZM zLwIA_b|DYyr&bT#Dv4#y z){@}5zFMV;1qHT@Uf{mEh5><>o{FaWF(IofZ}+TKE$_L31}@ty(vo9G*zUA1fvT@?aixDKjG4o zh6Cn(;30TRF{u-u=jHk;YKjI;E=LOHm{K6A@hIW#Kn8gZ+PVr?5~P0*SfPRIl8;-T zjF(IH!{ZIU1OAeIu;I~z108$Gkh{!7K+%m1)s_~XZxJ7tVi|RF1=^2=B_c#>Ivo^e zH$Q6m%m9wAfwPGtbhxWKeMf&C6~`ZHfY?5a$Lq$}t0$taQf|QWDG9VQ6%GdZ0f_7W z*}Fy(`Qh|-TFuXFW5DnXdHxVwIA+f?>W_~NfE6oEHHI5?VrH*-a~o2@ zuTg8ws~`$^o`3U6vx5SDmgj%m#`s1SZOx9c)ikJ)ci2*yiS_pUMw~6?{rId-+5bMg z2L`pwiL;Zvz*Km6Rce|HR=%@t=F9rP?A^G3vJ(lo_H(8@^e2JmOpCrN;wO*jU2U7T zro!TR^q8JL#MMf>+KN}wfZsklu&JI34;17AWu;Lcc7BWZD6=1WzeQ9n7odXG`%@Z& z$rz7qCg*%((!t2@u&1M|H@P-7&VRC1CD0Xef0 zqKFe<@{d@i9vgzD*TGQ)^2=Eqmvtzz!uVi_(W309h@TWWBybD$w;WO=AAONz!NYSa zlJ{Z$UV5ctWhLVHFEhAqt;zi5KM#7cL*M3K8d)*Hdk*P$FFTdusisGcv<|j;W%&-P=Y|q)`x~+GfQ-M>) z`SCpCe%R%|?0Eia#BEmDk;SUXU{Y-QT3DeE_C>vWT{B6BO&X~GquB%8_XLkJkeBB0 zE>(L|)MJ)Ri7*OTfH<7`3Z6@=F`q_wlvum78Prd23TtR<0lLK$|D3E2urdt#pp%X3 zfE5vP)q`XR<>=o2^J_aKciRVDif%ysxz^#yA=J6NKf|LqTLt17Zv0!2AHE^3{D!Vl zBbd+XY?|HL4wcHO!;3HX!rlJsQ&agQ5RbgHQb@lG%#>E3gi$NxspZ^zNNdvi{G?}Yj5(@)PPwSlVmi^84BE#SRG*NscR6{L1F!iPNR`I#Y@x9NQKec=ZRG#)Q&E%HY`7K%?$ z!6$txfR?tX%UjfG@GDDTYLQ{h*6{Q-T*zm`Xz7^Fro;6O9MeER98e9%0 z7u`Z!hSlg>qiM`9&wlzr zf}7H}1F}>QhkfPV1%pzIe=fGm8ZV&0#DGdt1I9y{+vMGy_R?T>;_Ld`{DWY)iI|&+ zJXi^`+PjxOp@GV?{($d@YaY%h&)OYFfmQm!QgVtE80X;VQ1wQ;hb-4F7+2QZ=2*YZ z7uPfQ-dAZgF~G8R_UD3mzvhSRe#;)m*!{YEgGJ&G?hy#DP;X~!#r*t595=+P|85^N zQ>7Qq8V`eP=@Xvw!c4HeBbN3J*Lm~wt!?^T=@7Z_1=9olFZ{!^CYP@df=^nh+;-U^ zDA>@vwknkgqGmhYoHbd{dS~fgJKka7$ubf>vEbKp*zfl-lZSre8s;-q#Ie+0qh6T8 z6Y69w#$)HC488kC2HKtroHmF1WI>2ts1)Pph#Wa<=%-VV4aBA zJ~q+{Ht(ip6?Hox(Ahv{_+BS)4l9H{U*83OL5j{1m`7veK$NdDA_K2l3uWLL4Jy~G zEvY*6Yd+5QTjcIsofFN2@d(Xk>=ie~j-nNbc;quz7j9MzJ3SXk6 zWkUh?$b(}CNEDE{FC?9@hz<<3bSlRpyRQ^S)Zl9 zo>OW#YJ9+M7!>k$>^zCMzBH?ga@lA{YojLP4o`GLMzo@+&yrp+A-C{S`T9X_Lg&DS zeKg1y8D3{@G6a%LdwyNVUQe<6C;NQkfmH&Ha!lZ*w+)pK{as)F_vf&$A6#|d=K~hT z6aM3Yzn;TB&c2Sl%`Vyd8y1B*hXz#lvC#}53l X1ApwmA3N~J4*anLf9$~jq#gKQ+!`+n diff --git a/sample2_1.npy b/sample2_1.npy deleted file mode 100644 index 4f3472a105f029f297dee00a4080237b5d4df45b..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 131200 zcmd43c{r5s`#!F+B_b)3q9RL{vL)M{BoZwuvL%IxloXYe5=oXsDNBgTzQ;sd82i4@ zFk_j;n6aj03H5z`pX1x-bsW8q_wo6zzu#~Ea3A+Gk6E6%&+ER<^E$65^7vWPla{O4 zyx4rC9UMI`-H_Ilkk)q6kd~K_cDi=s<_&vyyK6Tb9RHu=N9^4^9kJt{&i28vpS}R$jta;(z|Z;~SmpsFcEl*@H!WlL@1+Z>`rg{6i)P)fS}@uZ_d+|9DKp z@_!%y%-R0q@fH4zuVXJsd0t~e!rC!U6_;Vq3v;7uJ|)0GzT!Be=uRk${bYJ%O9ybb zSBtVobb|5n*&De2Ua0P|xU6$&2v&btXJfpX4i4@cyBZB>K=jkoh!h-#YgYt=v_21j ze^rlxi|YWW551QTw;F`jTOG4c?FZnt-v!;7vm{7m-?_KCwhxAcjdCPdePA4MK{Y9! z1TWU+O)Q@rgnB2HpC7&tL%p!O)$$}2bh6CX*vK=$jPLeHd(Idv{q;TZ@3$)4Tssao zl#M>e*p7liLpo`M!2tH%BFRE#Oi-TuyoW+!Lb&{O>ONf-2$N>mwePavNpA^5dW;6Q zd8^s7_YZ=;#Rg?r<6h7>Wpe1JP%Aj;cq79)JfyI%)-?W63Mt5%${HUAD73WqM`WR`;0N*d=j>;w}b_|8Jl~ZvFoIjn6`OSEE7_S z4tYLY7>6CrcaQz59)k+UJN4f)si3mM?#kt8GK8-F{!($VAJ{49Uu1_4!pt3)7m;cd z=s#*DTyT^MR}9yxt!t*j2~yR3k;@3^QR7`iW*J}#MQra?S>RdPWTB_Pf(rXpH;?3x zLO|4tO6pZ6q$#&?Y|k8nsjXM-Z#zta$i4l97g-EQe7L$sR&@ZPm$TQhntFgW>}0kw z*998WZ@mSUtdm8r4P*g@AGBwq02p**yW9?>C3wrHAJ;2qSP(VS3pFM+IYAppkAJ1=O4IqOI>Kz+YaobXSQ6-?#JJb3Vxc#!_(R zdA{-g#Lpib{TWyO`+EE{|9kv*IQ;v(zvqAd-|P?3E4&A(aCS$?%wb$Bshel(~NL{OJ@#kyll2 zp{IdPs7Z4gvll47CkX9^{)CwBsY7CX-M%AW5wp#I>k=KZ!hpx*02M4wE>^v99)cYn!z<694MO#c zBXK2LDc}(kQTg454h32H0-jYP;BzNDVe&o$D&DMm%SU8_dY*#B+nW>c_wN@@d+Hv0 ze*|hW6M0<2i6CY=`C(U62Uy%`^I0(Kgt_yQc~2C&fz5Y|$rN`hNWR)~OJc4O{C{oV zVZ_-ELapDdoXvY6%fZ;=ei0dnL5wq~f(k441zcScG%(x}&o@FIhUK)xKx5e|1p?p9#YuK2a&=g2pdw^>U9*aJl!v&7RKJEI)rMFoiYItex@ka)%VWc*QRnS4hM7FFwas;+= zy3Jh>8HKdGyC*fH=uq2{@=Y(E3Ip3)uLWG9fEgH?Dq;2zA(ON z40P7AYtNh+fCOe;a-W3N&T})pbV$yT;0orZ!Y0ydHqL#TYGRz_ZS?L7-K@?``ZS`7AAn*2`^PbnEtnM;BUPCjpzT} zxz`x`-q_pykjf?{D}A{S4wukezXlQ^jbga)!h;ATkq`U({rh0H`^+|1*M8vtu|IE7 zhX{5mA&TZ&?cme#QIb|#2tM^n)p1&Rh+pMR(whVID8PL5B5kr0*>GnJD!CI7{!ZS( zz|4L`KI_qV&X9}(3UeJV`;n1ZM8ELpc@h$L({)^O>_Z7O@qsPey~uv1|5ebIP9(`3 z=dD|5K$M!aklWLMDCZM5Mw`AxY$x8-jy}i*9nq7s!v+noHZdpT$Xg=Btt#A3{7eTQ zw!H$auf}0RVY_d4KMOd@_;Gf_OmGQVDa!uF1hMtQ1r1-Y_47LOXwMI9JuCk>SL%e} zTi}W%NrMc=ilc&!ln$t0$1@r{*Y~J85}5?0`>Qeb&hPU z0E1iomvh&3KKzA}byiN?oA9c}CuCH|8+76^Z}uGo(Tzul@-MH5zb7oz+v48vqS9 zPC*+bB3Pst>YAqzVMF&di=y}e*m_1(((eroJlsTn3VX0XuCngqxkKadFlO+|-Of=U zt}jmZx;p~MP@dwOM+3#*n<;m5si1UujjR`7xKuwM$`~90J`1;HR_Q2+Xyza0wPk^y zK*f#KEG#d&v#)GaaRTz4D_XOAM?vh_Go5*UDu@Rw-HewQhS|OIc{Tf?;ltdMZNru$z!9ctf~R5eqMgiCv>k&Gi+O+9 z-Z3C0)umRWQ$k4^qty#LSQ`QOEdzmNZ!{~7uAQVD?{-3Was%V{4i|$c9h*fei(~;eS8Yh?I0dteocAY9Hj|GC@6?F zAdW7hXuGdHh`%$1vMqi9G0J54)~Zp^ny1*dKTbuZw6A$q)ii`VZmi?eKtsOPPdC}h z(2yYAY41lBDoW5T?CG-}MjjU_$Fx?+$fzww_S{(_a#|avdQ`9*3FS?GGa6|^*^>_i z?V>9XPl7>UY~veb^sA?Oqf`+DhZrwXtLuT>Vv(crt^yA!`<&PF^M+dd2Y#mY<&}?{3?;oi%161a&686n;OK^ z9N1i*)rFiMn9AT3h{$jE?oW8RKIDWK5WS{BK(Z!E1~2A1k&z8?pJ-GgiV@I0c<5~f znhV)Z{92ul!l%ZhE}l$BPEOD8n(xDq&85YcwWqQ`C$MXeV|69?sOK$cov4PG&Y1Ap z<}#QF(K3Q@Kt=8zc8r*KSjOeb?bUy>Qs4!OX z^o_ka6)t`;%7`b>fPR1G^IkI=JS})aTzvt1-t_17xSdqcvfe!Ci1D`COB++IVhQkA z!!G;5X#yxVp1%F#Sr_Q9z4`8jTL*lX_gX!s)eXo#EHb@-2ooPnG{z_UAaYl8;^+|) zRQiu7Zn#Q<))ME|D6auv`p#_Kjm5bXbG-w^Y6e8Vc_Na&jR^rZ>sWAP1jOgQlh`~* zV8g6ILpYTIVLy7av;*kyD!R|rQh*M)YO!_9aXKt*bl6tiO@SNlnS4EKhp^*43T+L; z5J6e#bX}%Ef?nub@m>lzi#Z%<*hqsglfCUz_z`H1g;N2BOmMjNi%->u3AZn87pgeH z1W)0KUg1+L=*qA(pUC|8`TZaK{ofw{#{1vz7jrsS0?my=|0oG}6edC8EtmMi#wl=r z-ug;3fdLM&AsyyC7;iq2AvdtG8v-;9E7>K=!06gU`yrJils3rd9?NV%DbsDN+cQ0= z^OlGFdgTFh0{5nAT`&c?$?(iwRHPx6+N-8@KWWIIhS(m3;h*)h__wzY6}54CukKJB zMuaGtUA~llBsuWIVOvHgh9Bay?#nfZO}?LaMKJ~8mU+IlmZd_r&RiOde*wQePVWb+ zYJlUekKnB@&Cv4k+_lF=ZQzo>mQLN*4t6FP$HKfiU~cKUtiw<@h}qc7_#EqpoKwds zmHA|7IY0Aw%cCX`w+Kpkb}kv2Zs}a!T-$m{ckd4}qwbTdC%6nMSnzfGzYDG>4HWtcz$;e${bNZ7ED$;P3_%$3c zf(ocUYAJLE(qi5_!Q;t5`wnH+*zTnxv6p%?#%qR={VL8zHWTx`fLWW(cIXr zHC7Aw=QjP~zbk-p?{TDFLKf2CE9k(r)gp?QZ`%#o7KFE1Oo)hTLKYn|K}pvukyCBQ z>c#;_u)n2F9=ELpM$$sD`u27(l@DN>Wvc)ufidRA#zZiBQ)Ii;6%9H`vTw7t;6SK% zQC)4N1uEznc|wy!unNcVjtcgGXeLKyj!QL!kFSwmv-K0ml6{hijzy4?e5?1XUO9wc zwiMRH(w2h!lDh+5so@<*xTq$eDi$c(sTp4s{g zIjLLXpL9RRKJO@fx;`4R@BN(J|- zAC@hz(&grekWD>iDy2yP?y#p7$0@zABKEjwG@%PDWB|v1z72#nTXwYnYzC+6`=70D zYXZjPht~$i(dOcAYhV(Tbig#eTgxofEZ6y^7SfeX} z5_UuIp6M66d~5)WY8KMFFAjlBvUtzsM^tzrNARf_9)WxQc#+1VEGUuUU!|w^e=ZOB zgKM104*y~PX;4hE*|WMG;}<8THFEPNz-?8jghlHl&l3>JCpF39;P7tU>Zb!9h0!VGzn8NKkP|Z7ZQh%=>xm5`9%s!%MuRIXs4p!O&=tc?og3B zW4%N6@GuI#7^`VvPeLwK%?zWv9f-2)czhy14%HvO6msut6DSM5j#JB}{Y#$2`DLr; zPRU95Vu#yAUi#YM>ZBiRa>GM zsDV$;N5`@u#>xV0xlwM~c6-1^BAwauLf=k<+EKQ0`lWYj5*3ipCOc~^XM z01YJB3l2QYrNB&u(}wC&OwY*Pm9Y+2ffQ}q_Z`wIE;WV{o;u$C{*{gx#Zo_>OZOw4)8+2Uzw;5R((ZGcbT9aQ z{W;<2(gw2Blx_p@JR}r&L}GJY8=6bvzh>3egQCu{-Q5(?gO(g##S;x%QHsr%+v@w@ zBYVnkB|=gQaQs}$_ISP*@ZIIRrQd%6qqF`ej-GvoCbr(Jn6@uOF?zZ@$93Ky9xmnH z)$SFrA}#%z(a{6y)%W~s**YNHK)RUJkPqS`GrFyI&yk1k`k@?GYb4$p#4HH;3?8yU z5tnA)LD{LvR|8vHkZDv1?>-J9nt5u)pr7nVW%zB<@l`#j%&}|h3F|r(@Rl!fMKK?# z1V2m;cd9~?;|3AM9hC@wRwS@J@eR`8J-X@j*E$H7oTCMN>4Ao%!@b$v1TdZ9-gjxs zAav-gq1+-2Lbv(&tuqt-U?I*Wxir@gJlvCCwfwpOA7N)mv~Pf=8n#o6rwt%v!Iz{Z z-U)&V3ji@>Z$_MKGZXT+~0DJDq7@T+{B>8ah13RYLh$?**% zd2WG{lv)bXcRqP>U@sMg=SmoISyGT+!_c)?A%iHvW<{Y;mVmr|Y`p(Ir2*;adYk^{ zO-34es*cIw&0uUS+w$IK04yfmq#aTjFy(km=G~8YdsmJLh7diyNhu&2y(1BB^o*cp+dI{4zn|%@HdIft3ZM9DMeO6 zKOXeeT&mOuY7viTp20>^KU%ZP@J;@#O&l!YmD zu#JsZJZT(}PvY7_Wf+Jj^3Kt4<{%5~4h6=&MS0U_lVhy6CaW`<<6VaUJ z9>-1Z2T_2d^}@BK0c5&De`grii-POU(3=bKC}sI!x3X6}GE2Sm`nzQ|@|YCgXBXgy zW`_52|8}YXajBrNk^?Q!xw>cQMtLjvU2`%x-`WYmpWpEw!FZln?3w2YoZTR#pP!y_ zv=jV8@4oQ-)Cq(Q#eMyyb+E*C80BPCg5R-vk$vmy!6&DBi{@A}bcPR};2vlM(X;nI zIA^y(3&;5tu}vMY60Rj=8rcq08Z{r(xA#EE<*<#10?82BoLKDI)(5`)+gW4t?O;>t zCc9du1-`#E;W%@t3mjhGxFdOX0FKZ{wMQ`>FgRwGlRUtJnzkOA3XoT>7Vod%>PG61|`RR%yJZt>^Q)_CSVj8p%#I*w-^wz zCO*RY8x`7gU-RvrC4pj}@rvTl77%iOWZv=UF%bH<`|b$GqnUtu`Gc#v5&2Ddvix%r zS~FrT>)}jLnRkZnH;8z0FKh{|B%leq^K(~@ zw;+PpJ=5I|pAcc(A{!~-Vd!>0(Qt$a&JPqKtba28B@g)X{VdooWUXLzsGNc#mK-k& z$g_>SckEc8`n}zjR)^Iud17eqYNo)LE@EHo!Gyfw1BQXC2@qg+ezEmM9PCivo8~^z zjsjk;Zqs-$h*HYK!e4jL(9qtxC)+ME5Lw-0_(3}Z*>{%TI3PEI1U;@;Mt`KBrJJd5 z%+-m=oP!vj&DqxmP*)gaVu?khy)(qKf9M7lfdo_ywW9g1D=)jY~#j6lpVMz zG;y^bCG5X}U**I=f+d>!YeFZ`fB!!JIZEC*Kf57*8r?p2i8D2J9Pw3XhG|?KK`}L> zM}Ft^p%m_5k-_7+Nc>d;I~l{jlXl|4(hHfW^xZAH`$ioIr;}uu=R`!Z2X-E5sP995 zcB_dS^?Q)0PwZpl$wSHUx>k7wEmS z8pEy7T>r*P>42}1x^_V~3anN)6&`MCM?R6HMD_wQB3#}cKlK>nPx9&Bmd^(fewKbN zys!rmx&w{FL+~j3278v8R0Uc}TnJS5szgG$JI^Irry~`+hwKNJ${=lQ6L*V6Go&2i zdAXav8M+rzww-bAgr!|aC-`3Wz?oi8E__r6NVe4RJviL~XO4fr&dluqopO4M&DJ__ zdhh!oETIx&*zEfzS^>-iJR``4$>34O`J6K)0Pr3yS5{XRfc@IJYc2`3P{y@KCGA@a z)bO75+5z35v+DF=jgCGrjdixP^6Ujsd6V>eGFW{!T<|zGqaEA~Z?ZAjG2SV8pug=n z0bFDg#FO?9fcP_Y(junIhV9(ehI`Hg`Ji(V<3|6A=RfQF|Mr;oM(W8nM~wfUFIYP3 ziq%a^zhwquIyroK-bqv&2mke{VOz8?-S->!;3?ZqFxrsVl^OdCD5u{)(y6OPW?@W! z_fOrZe%Fb#XPlUxeBcasAg%c!m_tFq_iLN{izukz!fe8sH^Zp2RII0+MMeyJ zZrhEl9+a?A;=bo%1H$c>K5}_aJ~G-KG`1iMP`2zQv-?mde2|kU++#2R-}J8>RE?#= z+WV27MOF-;k6z{VI>-R4lshs4*GJ&PuOrB1{T1Ie&uK_M*KzBn)InrXM<*QQ!E}lT>Qgl5cgT-_OJ~nQ2c*vQ z{45Tmz`4_!%E561VD;(@5%FXH9qkk_kYY5naL$mm*v*A|Cz z=oCIj*4WSkHtSki6}?JfPT|$f!C_3V3_jYuP8E;JI7RgQ(<+deNaUG2)CeT|@qv5Y z*;?Rm(W^Ju&!9#gF*c!EOb$`^{z@(8yg`oT7}? zQS&RUqNh55V~VT6|5Z1{Ieu_(Iot(NW-%*o?lu5E;}`y7TP@(ox7Ytj0E`dp`+520 zE6|y`yy$|Fe((8UH^1@8*9W|NH;{JpcEYwtlvx!C?%RH&>=~-eiG-`x|2VW~`30 zRJ;!v70GlSe&2?Cy7!B+ zl+ux;ifid6K0IjK#VNEZ3_@H`Y27MOI*6R?o*OtpgUWW2&Ts)%r*cTV@yU7wmVSH^ z3;Q+-LAXzfVVF>^d2vHI=ug9`s2z}6H3hRO6(U24lW-xhRf%zT9G%`W!9KrVWo~o6@V|0YyG=A#J0|uHR&xUhOk07=k z<(V7@X-KiTR^Ic<0D8g0m20@E9kGeHUhKsAL4{27+Uy)`x;!4oh_P5|G`?84f*MJHl7ZZv~VhRK-k#powXD|8Xd z)9Y%6Wp5QChIG6HPM{LuvVLFK=Td|y+c&H(h|dHb!gY(o+1NhUF2%j8wH?CqdP@&j zeFB|EwjDdmFTvcsDdTskv0zk}=zBHuGX$6Wg#LEMBU4d%TxV(@5?xPrNR*%a+lfk5UUG# z{yr;I1E$>?zYd{FPzlv}lU`p19GV4v+LoS zW!>z98QIuV+^TrM*?hVcMk_#bJCzRXz21V%UT=yK_((u$JRb`61_n^lWpA0;A#9(2 zWu0Pzt#iAgsuN2)C}{4xmGN5gAmWeW+cK)&huUP!Gz;|F5zn_wn*($C$fLymk?f1l z@OxwJpwnd{9Q$RT$&dLm^5S@dOvG4cNrzT*z3&q67?0WMkAMq#HUA=y_6>zts;v0@rAx6PTSh5-O6Flxz z8`f`t;F)x%dEQoJ_t0&_rBhTyk7?7n%4DG;dD8DgvHyzyw8u*AM%I%k7IZ*N-|n&4yI38NO~>Q& z`x2Cv>#TXtuo}gzD4ErzmLZjm0vD75U&6$NrzgGlV|w8yqn+cJerCo^VE!`s|P<=H4Y=u zL-9Y!Y9y4xT-Qu+Z$T472%L1Cgw~V)-7^}134YHy6aWC*Z%a%)v#pc{_K60@MEFSNL!Gt#8R}LXJEH{E* zvFd*NCWH0Ju-P8vS>b zLV{#W^2{8j7g7BSgg0WZtN(IpT(}tsS1+D+*w+LTCLeu2Uatqygi?08bSpIOeA35N z)&(owm_y(_!Ybk z893}>Yz04Id3}Ga!p~P@K*CLLY8adPx7X`WJoqyb2h8kwM<$^yzqjHbnF1Vr;%&zk zs^@URMN^~oEG1_KT1f5FXJ5}mTInHFpZ8SLSm}TM-b)3fqk^pDL7A*dzK=t`bD=Tvgk1kPO>XULW_SQb5Sn zDWiU%8F;9jl^1c%NRa2)F=aWd?m6D~etjtmg}UVMC7Jz$f43>NsPh@qs9(xVRP^Np za>&}XAiZT2Np4y1DPw@u8>Dme=VR-TPN`}?DI43DR@n&K1m=Qh3edMr)FT{7O>SE0 zMV;nn)VD~Jko_ayVaiL)r?Bc*#%=>lr*t~>@St`vN{eN^dhx6V$o=x>XWun~(D9|i zx2`-wPA%_3(n|0s_S7&!klu0i(Z9VGI_Ya8{I?+dMm(mY6Lf%_6_3XN=Vs# zbhGi7R7Cb|T-Ut65@i#_;F3T&a!Q%^Zh4!8X5MZ3#AV!o#6{FBSRY7ex83O*>pLmP zKd@>^RELTvgWFh=-58%@#`xHaVfD<%^J(m-J|XdE$FJ@bc!zkpa=9*;M1WbD<)fH= zcqp5`(6*7Y85B+3-39~ku=HrN(uRO)@Hp+Sv7FKdHfQ=pu1Iu1%1>>!1Hx@kR!6s} zJ=g(+;9E8K*VF+|-ONeWU@?S0%J`DHssJWBLSxD6{82Vez(zOV6PmMXUuCjA2k~^Y zI;^UC4^Ae%#D^{zKX@m5_we@`5M6aA$9kaw(jFM}Dx7Nq{?24~1AnY;7@=N&VXhwd zTO8DKvAD#T{}67U(gDKSaW59I`b>=#pP&|T5Oj_lF+LPS2eb7%<@cztK+mA%e!*(2 zZuK9J>Hi%6=z#y#@jvn6&v}35cxIef8EqOUi^kU)HKs8hZ1kW`X&UO?_rAilOkwNv zz!BP&DTvPBdqSCx`L$-)M_jgzgQ@CT@0Uv~SjI(uPspKxwaWc%ls3$d!Ss@Sup19^ zXAkIlClw-o|8*BIGL6VQlNQ@FFn^B2gEjm?gNRK~uEr8kP<^U^x%yfv5;psGZhIO9 zxjY^CvQc6f@hoKf&2A>4sIT^i?om3Ck6oShn`ad$+-#qPk=Q#hJup`=WY-1TrG_b* zm~UF?vCC>j9wzW{-_47xWPz>I!LWC}6JSJ|mOHv(8mr@RO5O6ChMI3~eyuT+utaHc z<8B!Tuk1X7MLo>7S}Lg4o5X@2`$tk=B@IK0aPr$2=L*Oc-_29-5dKV<_~p`lHJhQ^;@UnOQt>8r7R<&E2e-LTB8MoI9>Qj_fDJ4({1Eg3Q)# zsn~y{AGKVSy-pJUgv=BV1qh5{{RF!Etaa~(p`>@`HqevWk>p--j6zILYcP_HYXA#V8^N65c2F5uz$g7q1hWuC%~mc! zLK=cQtM*nR9yi1K8xQkQK+~7=wP}SQe63S^KA{~vk~=RrpD%*&NpZ!L(?uwp`suPi zTQ#CIG~}!#03sBJ3hFJsM@y>0<#)B)k>3%kGtw^xkiPNq>1h%LopHDz-Rwv~DsDa7 zOB_gu@Iyx4r?v^P`SBQ+cwYmG`pYj1%?aQm!0)Y{gaiGs)aWdFE0nehJ8f=n2hqf+ zjKm`99OQSP(FX zy-&~$jjdRopRLZ#`-P(gbk6r5^|fjRl_sr#cbLA&D%&SNalQlG4i-K2{@evI^L40+ z(giBPlbDP`0A>G^XCGV{h9bj)_Io%wD1G&5%G}L_Nc)FwEBgP`-~Pt&A3Xm({#WPO z+}Lx?%X$hpVv8mTCKF)0@G06+c@lU;dE)ai-27Ls*XUZXa_7SoT)ncByoC9(v#S-X z`H$Cw>>Ghdm*zY~2+n+P4tgcSGM&#L^u^3vO%_T4q@KyyvXmzm0+k+|5{eu?~Cs2|^4)^ed6 z#@>GwvpPkAlE7aX{1 zhdkq8%Pqed*T#a+-WTQgp46y&Vi|081GQjcfBQ*PeeH5Sj|24 zIVk&r)BKVW#&b8BSl1``pa8XQi8@=Xj?r6m@Xkmd64JaUdV0?Q;xHLz=dSNZeuN)g zAqAZ%<%8_x+H*xHz^2sIY_JYmjG6j)y_AxL&HeF!SH7+br3052{*b<_M#X*3TJtJ z6>@TK?%c357NyjdQ+#c+k<)eqojJd!NLEA5@0%GwLV~)<>e3ddU;difYuf}tpF(AZ zW7;A3fZHW+_Fm{*kQl2cb$~c4)Q05O2I{r^Em&0#TtXFG&b{sgKgS7Ce&se`YiMJZ z^f!Sl@6|{JuQF&+e3-dqAP-V5|Jbr|2*9Xitz&p`3uuHh&8F6OLQ)i6s!6>YLN9(> zTvgNs2dQ^X{(RXDZn7^01u);!gw2C{FMnhDVr5sG?%^Kb=vt+Ev!Vy$x_tOAzU>FD zdxYm#vA%+PQ+g*!YyQFe-#Gk#d+yJs5p88a6!Pf7EJ#$yWC(BnUsrk!td&O zca^~gPlvDelzvDYz5MRSuMxOrq?}Xh&Vp_06n_>SWC73qAd8`R7UaaMM&)KF31P|;1ltlKxHkuVRX;nd_L;+F~_ zC@rv%e_ty_^CcE9d{SMnd-fvyHdnRBA3~7_*<+)^n|25&Sn+0&!_dU=LGiW`5eB^oCu(e; zX4;SHj}S`8mkFpq?3tl>Ml%Yy)W-);qEU8^RXdwZJPK!={}fbKh{W%>kChiDAzWso z>i+3ch>9_vy#BNumbNTBxue$(El0ywSw89lGlit$+zBGgEUZn}n!$WHmyZ`FoW<&I zX&W4r>U+T6##+zuOB-lBJ5|$@SPyJ8r8~EN*TG!d?ekc*6e#tV*K%Z}f(?7jhhyXt z2w>ll-V@mhotqpwDVUCzD$F=!vyTY$M#2y{_dxwi-sjfZJz(1TG?FK#8yIdkc!zg) zLmo~kHa5E#=F^>8I_-#1yfoJBbZG#p_ld5$f%&B4w4SSlEVDrDc>~=X%ftW1`+v2s z|9$@d?p&Y!fULnTEKl9-JC)8p0y9KSk(wVPpw7h|ayOXE#J|0#ADlf3le(d9$8Qlq)`*LCvo#tKQ18W`KQX@m2QO~R z#SvuRpy55GJ&uaFB|qC>_?MM>72>gL8u{h)vh%u6A`6F(5*sHL@@cr=`U%TB!h6>_ zuj=c;d^o4qT3>vD+)S@L7_r81>pbro`zQkZ4EA{Tug3WLobP_QCQO%1j^8!$tsCiA zX4>6%Yey+6KNY2daLBCds$N-bB}iSTI=v3T{BnN%&9xHsFef7E-k9i%oJ!bEeP}2_ zI>B#JlmGp52KX2xGe{M07|*BA%StQ z3Q-RAd9FY<3a@`TS9C8AS`w5#uOIq^)Ym=sKX#%W=?m}BA5!Q>QHOKy$Vg$x^NCE~ z9|kD=PDxi52M%Ids|>g-t02bC{CVotGDtYG`TO->H!&l@TN|)lo*jl9Pd2H|BN*TF4)~d~ZvdhKxi{C|!u;!B zh8j+&Vt$_eOMoL{y618Bpv=n+(DLiL-`m1!Ab&Ea?{&oZd>21ghe9uO9}-hyDVgAC@{lA+Z4uXoCI$Jvz2`Y+nKHO#R2kxFTPk&%_ep~tT1dHE8 zkl~$&kCvo?va3woLJJc_70Pfrn2+aw7q|bX{$HNGP{bQuD!Pe4y67yb9@d23mm4xXsuWweUt*eD>~=ec69^A zX1im-xsRx`gR&@is2df1F(1&$8Aj1%NvXt2ItAOvP~+#EKt?eg zkvBRx>P!uzaF39hd?T#iXM572`F4}#c_u_h(C!+6{b=79>iqJKT!)`P1rIpctmY?BIPQjr z0%;7z?=oL(UOU!?bZqHw6|P}?AnNPc?-{iyd;SC3cKbfc)-_IAh4EttdWMTd#vmBo z{k{-?ALH}!8$!T0`A_tKvq1aZ{Kw7wrH<` zm|IT+pXa|qvXG9mB2^&MMrFbs^(xFSI6jl>m4(a>W$z`(VZPraYNOKoWZ)?7{rT9&(+)xcgABfY1SJ>blat z>?FX(u2`GTP@GplYs7Z`GPZs%R<4OFV$ygkcE|Ms|!a#%emM- z;pr$WJ>`>-XrMyD8q*sm4aiXH3WrQ_eQ-{B@XebA5)?g6CHNbXfnAL6>MZ8x`($}J z$Z#zMK87aV+fhpewd~KX_a9K8^w*gpsxbu?-rZ5rY`{_g;*)*eSpV|wA4{Kq>rr6o zWx)IMsT2sgP*c5GiUxD{12=_zrU8GujovqD1~mMA9|v=O8-t{~ zIZe{9C$Rey5Oj_9VD~dfQ&C7wC4>IZ9dpUbdeC8P`W6uyjiQ>)g|N>xA$(Nd(^2VO zRDX8c?C~Ws+IRZ?)98R<6k~oMF77Or{|kJw@vX$_WQtQGsvQGp;&s^KM;-#grFS0Q z-Q9>LT;}G*i!uL7z0afAQ7nIYaPDpucE5?V%r2IR0uxBAg&OM6B)Fd}9;!|m2jfqg zNjAd&Ji6m4x|BM&xJ57RgAFxsb+{Kt(R%p_tT<@$3^e+{kPMx#`Jmm6D>P zY~!aVi_c;8444r=bD^U0K_%wPOgg>` zQfXHf;WZiXRZec1UKk&CRXPyvUjPKpquW1-`#`{1$sLb=CxOS-`K`Y8cqj;uv@&Mv zfi5_A#d#=5S9M9ppZOPm1SB1?5DTd+WX>?LavQUDV*R!nc9SiLXBH z-}51xCiFeIs1zm8`)*2G)*)O~$@J$9cqIP$_R&7;2sHOuIrd3#BanFtT<;S|U=`;c zrF3lwvVBT74eq31eVwDt0TEQlYczM;okNAL29Y0)<}}ck)~juOK!^4-y2`hku={M7 z-!56i`c>_QWM-ec55o?)v$1F^4H~RQA4LD4!iJo~<5vu5@NxgCE6+F4KuhhUcD>pF z;CFi)BxMjmUurb`_j(faWiAcU@mQaToM2|F)hN{7tD3xn?Kf){<9p5wkAlHlf1#pC z2K?;uE}6mN4maniW(8XseA83yT%Aq>%6r~iKE4rH8+z=p+!H1w(kG4yv;VVx|KRWc z>oI4sD&zA$8mOgi%6v3SfupuAETem55IP~Fcfqh7#0}W1PgduEPT#9Ldz1ksG%x+E z`q7CzEFbADJ7GNTgv5UDvshiPsIuyNgz>fv-1al~P#r2@CSc8|8C1dy zH7`ELy9QeLZ@!#&#OlqpnX>afE#M?_ZFuuc3lz-kqfM`B#_rE>*X``?M&P+%H6=b> z2RJor!q!9Okdhm}UHe8f@`>OooqUS<{8xCM(-eA;QF@NjIjp}}{;J8~qznbI_CAy% z{i2|n`~8N528^%lGH26&hUGO4R!5JnYDOswyyxq*Qo(KcS=G}`&G2DfqVQx(C+LeU z-M+@x2kslQHih3ILY5V+*osU56;l$UO{gC_r7RVh8_2M>^8SAw|7 zYC#+y=GW5-+j(VQC&JfVie~TcLG|Mk61s_9XpZQ0^Jr*2rbkD7+0y8W*dj!P?J=Kw zhvnw2hFD%Z&7-vCbsDz5H>_d1YuN#^&6j>J3>86(_Tw2gv)h=Dtz*rPx=7%e&DkvR z6(H_8dpRSn8~DGDmhfY7R%`X!-wIc$kQQoxmv=WA0*;){+BDt?PPgUX-HB`f)AY}~ z`I_q>=&e9rgC^!{ITLB!hxy!(ZBpZyQl&uX_@@A$re2`5Nfwrvwt&iXVXXCwMsV0^ z;j+)P6XIS+8P*u}gIn(*i8Y*9U-kU`X`AbDAaruuFIBMuL~+~_Yqlj1`5a-2b3+Pf zi2qE$v$a8ZO}ApV#{lr_Z&Af?Ofcq7 zX?l8T0*n`r6w@RBHGlVPA!S(OCSg~b+UZW#IEchv@VYZR0Uq1KKX|+RbN+wD@Bcji z_xSJP#Gmv2tLOhd@6Y_nfn7oMw}zqOSn)vS%OR+pf6>7nO#&lhC60xJ7R(Q9I3uqT z4?bDiwQ-%*NcOm|+xrHr|J?p!QKl)@hjM98vU3jwiCuqFlG8>*`#4t`UCXdMde_S5 zv`jiuf3av`nM6auD*uhQH;;z;egDS^C6S1TLS;{~L{hqy6d{F@Qc@C%OkX zGA$=tj`_gf?PZQd4I(vZt`=5?ZWMR6TB-O_8&YggIihnO=Yx)3460i(-h+R9?M2B@ zWT3T5wIMwYnzA&qA?YLJZ{iJq_2Cm}m0wvi&;-CzxA4Y)u^c>8G=FeJU_P*qjx4uY z4UC>rbzz;u`&y9+#)}b+uv984DZ8f`OsfcECype7;+@A~(O$KviRQhN*Mx}tMk9J` zP7k2|`RU2Y!a=me^Ii7V_I@m0cpPk5)`K3atMFR@jaCL{litTRu7QvpmBE^>M5yc4_hIN9f*FM~e65LCH%$Ecn_U?974$wk-XowD)X007t+Z2s z;mo#t1-AkeRxfzw%&r;~meTFJoQmrSW08`oUb!f8sr{O=NG>c#Tt9UopcA5-ghr2+ zU_LtSm0!ok5t!f0roS}{>wcZ)TiJ4~53-uiE;%J*T%&TN$6cpvyl;|mVO#qFh`TQO zw!SUF>sF>LqedI-ku_UuHbw#lJPUZhI8Y=byjvW{U|SmBuJg)N7(F_0{5Za@`@Lqc zXWsyDHBBDd9!Unv^IYCNU#OsQO4@p~Xc)Mb_>xbK^nlFqHB+J~ZD21kzqoo^D^OF? zvIRDD!k#eo)5Bk|u8)^@hu`IXNRm}4^k>KW45oumBhAWztu5Zdn714_`Qomqy=#UE z#-C0>zP&K1`mUf(sUO$|vqHR6x*@ueuT5;Y4?@MIZ~55{L0aDv85fUXm}K35D9UvR zsAg9v(NR<=)nJNXW*vv4H^zTG#yD2Tklp+nJSV}o;pWT3F_;I^$Maz9It_|{Ycg60 zO~Z%zp@x2u85q^KX{%|R0Y(V*_$4w8Sqf?YCEF zP595k>n|r(y?*qk|4+aEe|!8s+^>^-fCOmgq)31U*8R$QU-exc<7A4qvdMcBz?8Ij zyYx^z<~f8ICEu+^VXle&k%x$ADSGuC&!2$}qu8Tswt{FQ{))fBz4+egfrF@DHRWBhu>PKTWw%qq z7z8}wd|bL4*8%?5zdyQ!^y7x=a)~A1A+Y}~^Kgh%0V?~(J-Fga(Odxc2KVcoNKe~l zi<0C3I==Bs=8xU~+Q*zGzY8FtpmhQ9M=N`g-eoDK7l#N)_@!Xf7T}gwi{=Evu z)47X#kCNfG`(Blea`^u}c;3jEZxWcSTNxs@jo?0S)mRIzqwuB1D~deBJW`qX)O!05 zAVt09+g6*6M2EP`s&-XiJ|4yMiyt0y?wvW+lZWReC8c*)BSG}ls!99SI(SijYKQ9# zt}n4X+J2g06zd$lvOJ9IwS6nu+w}&!L8bG2;tF#;Xf3ck>9jA#JaxmU`+jetuKvpD9tnze0T8$bpg(%$;ybNyghC1hra@%38Iv?^bGsRwi63VoieX7Feq z{4%rH1$@$Bk9uE`VZ2}4lEs(;S6llOvZP6%Hd~-9uFwr6QbAUZ{f{}8*?$;J=Rcn`+A;)Nw=MZb`wm0>Bq`?c@e#1;F!IX6`9bZ*;$gQd zBe1N=q4jxs9D?-)x&x-C;K)G}9+7=C*tlkk(0n=#wpQHypy+{q(5KncHSG2{0wZ1*(n|$^=G|5&X9L0Sa=pRO%A!S6l0yBxexC9 zZE0{NioIL<;0%aXQ+6=A&ft2^?Ae+NfA$A|zd!wR{vQ9n@9$&!{ePdQAD4N-mRDUnM`V*y3LHae~KtMVhpZ;(9pgg2u!`69W-&K~3>^qSA# zii{riGgMD#4Wos(YxV4BsYv*9$}_3ExL&>{?YaB_6@{VV=pEaKv3^cSQ=cjsg@tgd zb7piSHbRuw+unMV_UIbZ+C~q=;LvS`ZdO6%kNo;Km`@$qZg-s0PK7Wzrz!XLKmC6C z-|e?|_DEOH46sgwJ$^qq1uAIoyN`RPz>!PXqG21x{YbmmAC{N^KkvmCKMvz{GqgUp zRUEHBFAvg=K8itZH5S*79%?~)aUW9Bxq4B@ueR@hr4oWVEvw;b+=0fz$=0{vU>~ePgaxLZDS|k zxD#*bi=Sh_sCVzhCag2(9@1iPUz!51f*XZ~w-CXg%JyrEax<=vsq%S7Re)u8h#e71 zfpAutVFyme<=j?g^$35bT5(hpVS03q)BeUeQDupDrMO0Ra9UE00VLxO_&I(^xX z>M-x)fBm4d(8Yf6P}$vCD3sHpfO?W)NMQ}QGzu$e_}9i4z}s+Shl z1XDo%!Nry29bGWt;lQ}}3g*SDr%4I>wt?n_rcch+c%SfgDdwxxFwBnS@pyh62F1F2 z{DN3l^n%afUtZUHq4LH+Wb_gd&L^m}s;c(GB**)hOG|iv&)>SCYs(m@Ol)S`l`#hW z{E`J$toBuil z`5an5SS&{%b=%y?Rjk(+d5gy-&U74BdxRNn=$?S?4y)^i2c}{5>+=B#7PD|5M#gjF z#u+%Td)nvC&1vvUF7^E7It{^1#f)a3r$PVzQ`wbB}fAZJQw8|E?lwCrAW=W`6^eWc~RKcx)+6I)Ql_FkkQ2F#yx>RD^M6Zg-jbY`Q!rJ-7~kbw#2Luu=&J07_;08x8SJN%Fu2ZOWW zDFbd3pj9xR_9%K3{JbuO#|Mvq!%u%^8!gORWGMC66gC3A=O%Y6H;+KL-qGzVXUU)} zT6^gT)>}+-?$PgdCqhU=f%z5o0cdJ}Z==dXg^-kXu!JF4I%!~tR4$9tXwVD~qO!3*qPcx1CmHJiI;0=w_HX)ky;u5k zNq=AZNMFDH9_iQrX{6u(PuKrX&l^mYFSZ<=fQ@?&O|DZP0wd+PBVL4dFl`W`u5HYQ zCOzvHSNe+3+)p!n4HXeHj=ef!G=f_6630&8 zA3+OOsksimR3v|X>%NaxL&!fV8w*BA$thUhOQjwdxrIGk6%BWUV+zz>0Ii^g<;?)u5n4q83i@P+_)!rJ)$0a z_)$8O2BFSRS1t5RVm#-W#a{uq@1ve;$dB{AJvm-3-sedWqLZO z>$b`->}o@{kwKKGlwM>kXDDd?2=^~#V(ohV75Be)KmKly=LHL=BfbtDNKZCH$Tp=E zNiptG+qAzJGK`bOwnp~B>rl5@K9dP}IC5{~m)k77;#;>Tsbc~r#V>t6J39h?*YY`f z*zh{Y?Z>#fZveEiR-SLZ+YYixjnMBB)S_HaNGs0E`cQ_|6!Daqd~_ z4n^#3a6Vw$_v;D$;G#0satP~7Czi<^Wr&`E_6^2K2hwN3BJEn)-MLBdIh_9K&7%pd zU$^kIUh5B^gg!6QpP%Eao4F0J{v^_tOQq$GL3DPhS(g(TJdS8+yM_?Kh{0J?)2ahZ z%NEu*QoF$P*h6zo*8w1zhQH;Crhx2=&>0CvypB6%h7I!fK>HK>fwDo2dx|@;U#hbo zhP01!OgtW>><3|Hx4%{qU66gOhDJC=2h93Cm>E+ zPjYwDB+RZ8YCR;3f2WkX*Flm0J`bRc+KoJgvry!xSJsd)0~7T#eKfN_>+}tuyOia; zS&;EHPcptT1BH#R>$V*IbN#=K#~J6N3{J0Sptw3zvRrHi)MvJtY}4A)m!xLFvY*)w;P`#5djh`^&cCVC4j6Sx%*3I1)^-#)|_?fKt2Vw@%cfRbn&ae9{^YUdXa@x$`68K~UnH_alv#e_Br-E$xPygUltbeImy=B8lDv}!S2h-!@{teIxhoC+jOw#S`|nY@Sru;+4(I5|4EtLIs^K z%nZZv2+A{?uQL!j(Ce4=a0cQw3D|vpk9CH(z(}av2>iGaETo2?YbJLeli+#+uy9^4 zuS>@K)%IuJRabG}U6gt{?pG`{(PlY^qO*Z5;>@eDgBYJr$V^ia>V~+rr^Sc0he1zs zm9=I4zw*!Cd$VR!5JBa23?|?__ngG>O=WoBn{&fT!eI#B-KF)K8&ToVEZe;zg^fPhCxu-bgk$-2{ujz?0&06f?1lY)L_RD@U5Djn8)>$ z+hX%O&Ua&6OsOl+v9JUB-^4dZp6>(0unndr zYbfxx=ZNZtSHqCoz?-}Lf(qGJUNr8;JhW`7*g4T{190D6J0~Hl9}M$Nf7zU%z#f#U zwK@{(YtLIOFJnF8hlVdN&ihkA`abv1rtRavshM*v?ZhNpRn87RF*^yGxtrxj%cmjz zNr0SI{4BH;RBwGIPs4qIaO)gAPncJ~(ofhk0oAHhmOiW>ZR@>NBQp=@>Ae)KeCKIM ze5b;-Eqw;OF#Z^M+d=x8(F3p|7+;g``Z=Qd7Bg)>J_n0oq~M6<*_6% z75y+sC@aVP`?uk0xPL8r`&p4~(*Pox$gVQN`hE$=qn2GxP*LjL+z9oXBk1Xur5@p1 zBPe6~{ocOQRFwPZGf(#BA*3d%DbVROfWo4L7@v)IV4lFNHGd4&pDedpaeK^xt0a9!0?Ub(PBM-{{vKJF= zsDG#RK=WJ7kI~d`44fZCgxl&mB5Wk2_;%B2Kb1ZdtLSLB{#HBcUr1>miY!GIBCKb>bA2AAb`6)>UD6Hf%{p%# zP9($*Lh6v`T6abwXj~$k^Lo+&xlRmgqG&`o^hPq}0|OcN?*?dI?N~2RJV*Gf-Y{IS z1IB{AWAG&KCPQ8DpWh>a94@Yi>;LIl0nVBCNl=wDD5Krl1yQqEhn}H!sA4oMd%TSC zPUckYFv~s=lnk&;HpV_4!FN_n{BZuH{_NlfMG}xwvK0(j7B()%Yn zp!>I+F8a|2N?S%NSFrAaAg{zun#Tm(Nbp-EKF2;74P0gtTzG%O^l>@+Eyj;|m9aF6 zPD1|A_i-1sGOxO}L5j0+dCr?gFkT8_<=jL7k|uxkE1rIE{r)Mi zR)GqS^~Y$ez9ZnePTfX@lM0-HT^*;EhalE{&LE{}01S66C5J~60huhd%;pfGAjBl0 z@8@*AUZ(d(g51R%3D#@;;k{P_7wOCtX zeG2;L{-2J2$Nhi%KL7N(>59+nud%O9gll5D+`3C*{`6DVDl4X8(V%$u6JYn+-jPnsw$ls3jFnd37N%Lc#+R`ee9S(;ve#VJ? zJqPB?$s}LS;K1{Qxnd@`e8#%}1EdczzU_!1Y@$!@dNxwqza=SrO9O=3+8W+{PXfxX zhX)=OO#<<7XDn5A3e?PF(!yi@JoohP%c69n>&0H2$I0CFd}}fS3DM=!>K((7`rTy9 z@9YtbTg+Wkk9|H?q!}mXo(;f)`E1*yXCW9=>}r`0sYEnA4bxksP9z+uDW}ze|E{!- zR5L0GX)r%$P*lcwz=ki^8-sh1jD+Cbi&xuFn&an^+Zt&ovQt)-#i0>wk#l^|W&RYf=+lzW-b;a8;uhPL??WIy(fXF*Jr!CQb9yI3 zsBqqYW7n7k1tJ^<=Q41A-P@RTkCOKwoK*~H=D>UvyK{lkV>;u|acW({B22-lb>aHf z(c{2Gh~$iXJpw+Qsv9TnjsV+48u&MiLUQiJrk8^hsC)Kx{L~F1a8`7uY#(Zd$O96e zy@KkYUSd_!nW|P`v9Tht4|GHJAy!T?ClcsKzWIDKauE8rc7OJlAVU$4hnLH>Avjzq z>EdyA7%cm^o~de4z^Si@`4-08Rj=mq^msiCM%f~14xgyN{E?g!VowFKdj3JhE2E&g zYY)dc%RhKK`tu)k)?jdeWd^jEJFaVNp8)wrr^M~JF8K73)bHb!lW^OO?YEJ|G{jOD zx*P{5fiu9O)`dC>kK<0ax*ouI07>iJu5noBJI(OknWNbM$FsL2tr+(Q>&Y_Bcpazz zzth)u`sdO|`gQt9KTjX&=jr1=ou^;_r}3X&_xJgY5Xv96ANx~;JEb38z&wc|k+{!G zwNPPlQFVjMOYpg5?-Z!$dOPswQ>9Y-m=9)4!$d66KGVVb zHjnU;>znrvLL9&2mEXnKAE@I&-r1!AxG4L0Q-Kl%nCsK#FSAjhKz`mjJy}U|$|-1Y&oc`t-zQ3IyDf zk1}ObA#THlU41*TKO0NtttSfu5WmM&AY}~W#r=_9*7ykMlb7EW-lW2MCh2b#4Fh1l zuQkF5?+1=gQXgF)41!nn-Q`uMMxd4<|Hr$EF<7XVu+ym;0=;{Ng;lsuAQN7FO_)Xo z1!bFk**PRICRq*gxMH8TtmosY<$X}`@tEEvO$zWm*|3o2G6r={n-Bk7`cwA^-ap|c zhxg-LMlmlwss8i28{8|(NZLOQA%_cYBwoTwC}YPTWMF@@Qrf+ELFWlz_5J)&_ueG1 zn}zU=7va1|%E`uLY8*DHuPLVGiXy?+&9dtU z5CwF%=4H&aVmzdCf<;7o4J3rU*7=+hkIZkd?|A6lh!*q@P)-G59*fAmq>A-;opgCx zd|}lPnoGTt;-fi?<~Ms-+S*|rUDO1z;NcKzO0!x%=toAycI?v(LVZXpb5bg}z8(1) zEa;!ptwIJ5^XzU^VH_A6fAKkicc?;hX=Qpc1}VlrcWVw0K>0#6qC&p{5QpV=u}fYC zS`MGFdCN0!>-V>oW{d#Snh|XQjY#lW`FZjjF#^xOk))6P=HMf2eXX`M84+)2zm?1? zz&Pbog%x)zkead+m!y0R8ojf>@~J`tk}BFZR$EL&=S+^cmz>17KdtfN*nuJBLRe*{ zoPzrrS1&xIV7>o`UP&hdtH?;pJ@3wwnm(klI!fa~GOmXdt^4tNS3P1#zaGZwnT_nF zw%Z6gq=3Et)8vt<0-$Ys-1FvrA{uRLQ65Rby4EL%m)#5qNX;krZP;`NDxO!o^f9>| z350bi?fB4$gymO7hUe!aH))$|o0b#6Aj{f2jHM1(q~@;%Zfu9*=%}3EsTDvB4Y%bW zKY|4B5d&NE3?zK|@cm6Kn1?HDV!<94hZyX1BY8+UkR;(j&NFL+S9d;KFRSPWqlG3n zS=Rt{D*cLk1R$Jr zAiU4U_{yLq7EwQdNV$$r*HAGCv-oBj590O0nc_pW>H@#bY}X%i5h2zruvqIvJy3p^ z-u`wUz*tM;V8r$^V4G1|88U4GJFcUw-^aSZ;_WY~FPnNH^59Tq(3c)~K}c+RG}w>( z8eaRs0rR!af8soeaf|9S@q-1ATVa9kk*q@)0S<6?X`(CL(B^4!Mf_JEm~&ODuEq1C z6Z3hpg2*U181fH?)!;nR=}67m!WnQ{nK9>Zn1R^?QP+uEXkd5Gm`d)Sg0{rAMrUgp zB$me8d*ZrCVnP$Q^T9Eg@9FLeV;X_67ST?%yHr?UAZ}N|^;dOnSw4?<*hfm9Z8F7n z0%W-}<+?K_;d@Khcaw0O4{Q?dXJYshpMS?O{g|E~(8vE2r~m2t|LOaT{&p2>8zzC6 zY>D>w(xlaOXOY3#16G;=8b~8WD7_o~XWxajNHD+Xqkd zp~r>C1Gid}F;DZO4`(O^30u@_GX+pk#$k8=D~HIKzn!JPXWfUg9wh0{8?~d_gBp9@ ze<(%vWEK@C!7^}JyC+Kf2@ff&nM~*30d{RiLl}-IS&d5hJU6gew@Ji&|QSmE}W;H zy)N-}z8!dSr_>)zd!yXVTBR4++Ys@%>@zE$ev~gJw62Z`>wU*HCNBIMMuqd|__Q=J zZlPAXAhHGHneE<}3AEw9h579fztbIv?S9Cu6E_Qy)N$+5{iEgJ;ZP@cb1M;6S2qoA zxHAAOG52;F8{+)_cKDrRa&h33=>KwWO%tkhkWE~=+K(pg-->;lhUb^y)t+2k7$+(B z1(fG7zi2^VbM6MLKftl^5`4fqD*MjuBp$_nLvn(i&(@})td+$H-PQ&$YDw%o6@c~S z3JtvubY+8Ov(N5RR+)$)WIU0du?!Kp1f#y(EkMFzlhMo`ZX+#xmcH9l`t)3 zm5TXR{PSTOJ|JRk6m!q_k7%LNMKfdi6`1p>*2>+(`+@Tp&0Uy?5T_eDa6c6H8x>mQ z2CiY=ou#i&qeBNMnw~nY$X*Lt1Lj4e7{|;R#F#Q4UI{D?4rXgM;O9g1nSO^>I|Nf3K8h;a?iX-8SLasM>2 zVb=58*9j<`6F9NnbprZ!^{n_`8iPrHW8o)Bcz>}_p?f!x46*Wq{f)C^h<4d^$WI;P zkiw<Np+ly0WZ;ynhT#Ti3Jb`He%xhS+ykVn|8)JI&eLBZ;c#YUfqriI8?_bC&A!wWG^Wb>uO3> z6sqnT250MH+deAJf8Wl=h#6xX2nWS*WlnHL? zKLgm)YfeLKR&us{-xGSm$doS46TK zRkVD19ZIc6TF&gNV}EC0orx($Z(YpqZJ>TR{uY1U_KU*4^&=p*>-F~A8z@j~7}BOz zQ3?6-4xg@hRU@D2AgYK^KT;_?d1UviVPto6SAf~!QIv8*<))|IDB|0ttn+Ip70t=p zyPms>b-u*pKdJLzUy+NeiK)S+sF%(2!w=mKupHgqY4E)V(jLWHZIP}3hN9rxMN+XS z=(=Ogy1EjS5XRkQ(p!Sm6yi4u-j7E<8^d4a>;v2vQPwdMBZ8G#ptiXZ#v^|lb03N) zfSS)a!;@|$U?A(fN`deegaa&2#m8lVRDy>5QO`PvTWksa*Sb8>aL?q`5fVJ6uWXGQCD1Zdd%hUn&?!Js;;dZh+(0 zwc#7_^B8CerJByZ#QqV}mG`P82O)4}ljP6ceXwZ{%Z=c?K`5*oF}!sT*HgauNWWLY zeEDZs)~aa?L}xaoH0e!30u0@#Q^vkKwc($HaNTzG`4@eP*0@fvc8lyjTwl1k8iOpnL^cBHTO|Ly(%6#rr&Y<+4=eem?c^U6rf`w6s}Yi#!?g7F5`(cQv0f4lXe zF(2!4#YP+q6uwo8h->ttRes?0)1uAr^wDmVRhe_*#f|}FD%);N^22rWH#e21-;z+j z-?3Br&e+%Cd9rWGOgB>Z>GFR~#Cjp88Y))5DMgktazU5NKS4Kh%>di>UU>CRL4VPB z4Cvp7{`VSY4t1CGkAWKdB=3$r!yvs^CdAd10^8rWZe%@AfeePh;}pseNY#EfDpDQ= zGtOfLTUv14u=RVirOhOG&JMlQIEDMh=j+c+?Zi6uCq4_9lvZJ0d?RnwSt3dcsFj~8 zB_XQ#t{TBPtW)@$Y4JYu5SrgTJI|;@MxsvpNzv2Y$W6ceH_fyW8L%=WPyP%#$e2;BhXXYshFu~mi+&k! zVl8!j3V~8uiSjrRnmj`mER0oeY=p)9IL^u)%9q> zr9gYFt~d5$>b_sblg4(CP;DIH81i>@k>nn?a zlX=*)FSritj5Fr0>A|{OITasCF%96C$Ri+pum!eegx_v$B7$S!7O!hMxSz>EVfcD| z6a)sHO_Y7c;N7)5VO9ELkoQoen89}pnoh55ev~u@o?=XdFHc5+ur14(1J`${%4IFQ z+sI%#`Kh&KZ~%PYaXanLB?GPblG;1xVQ{QoEXa2rfk4B#g#8HLd?kEk}BC!i+&)>qt|Rd`=-dm($RHdKr# z>JKDVf$7jpsWT(S9c_@^7}wYj2bsdon&N(BXG!|TWpN7jk)AhHJBsymhXwpB(y))U zerkNw#UZFGh?shEe;6_j{@%`T1pC5kF0!s4!Sl$>LEh)*CV}6_nA|>r=Z8D0O8!_c zi)Y>9i)#*!C8K57yv73x6#HywhWH3)jWZiDYUi^K8y+cQN zh{(3aUU;UY8U=;OPWUE&0L$8!W%q`=;On~v>jmsj9VHu9SAzG4=@MB&0?TBeRBJE^ zKCJ?^y$vg23)mmz*Cc6;V=r=3c`CmrYY3H&&6V6dF@lsU{KaLiVcew35lik>!-!J0 z|D{->9Y3PBvFY8vo7P@rS;PNlCtBw!yEDb$fC!@{JJmuMXYhSwar zraCf=P2-oIGL~WpndW>EnMJxBqnie>zXU zP9LA1JgpTPF$y1leo~I(rGk9+6}H;rxG!AnvQ-QF(wlmZ3OsfF1o^up@6N_!-S2H7 zO;19bk@>9tEXPk=2YC9ZibJUn8OWO@woeYArdQn|@kWDaG+<-t+_ip`b|uhLdS^E> z)w%e&B_~3eHA(7tf`Tb zqp;&^Pu|&`RB)9S`0cj^`!4X4lwE#SgWAqPJE1d$C_(bm+5nyolz)E54<{|`!?!=v z%!dW{Gpc*ubzGpJ{NHO%@n9drEEm5GAJV(Azwqxu^rI2co?Z;G33-L=HJfV&OB-S1 za!lUgDl+WNly5Y;jCB;0b{|aj#C4)!jq6=+u|I~6RT@h!UYE0aTBrT-eCey_bI1_u zBHf7K2(zT3Z4I1*@!zl>nA($rf;_{B!TZ_1>YoFMLwv8Rlv5ir)sH{7%Jd$dk8&PU zIPiMSceP}tco6Dao%MAtU_6uB$boBY84!8wR4;GE8!?qm>-ASZFy|1> zh-}4q$WaBs!BQgBrb-Q_&62=g_jaQgGuA2CW9r0{Q4LL9X^(R8KFqRdd#B2^a$s6# zaz?4C70!C^d{S254@~SaN-w{Wf%B`lME#Er2ud#2K7;xC3lds8&+M&**pG#g-UO^i zINWh=`AH@?1tmQ)m2E&v@8s>i<#i)liRruhab82X_}n651NMQ>XAXP)qZKXK7}Vz# zyhm=7p(9SbnLxX0F0sOdeVoNCqSo-^eE%t^TUSYXQ!e54u)~y+dMw5 z`wjClj`*+E}Ed=2|l{>-`D-WD|qujY^Y4RB4s0g8j_051*d zkCd|)kIjH#k#r&F{|k5UcbwDX@xLAazrLRSIqnDa9eY1Hcb;DrI1xmD6S0StHg!X$sZ6wA$_IbY!i}Nl!_I4iTzy4=4sN)m=CZxo^zjw z_W{H;q*NP>qnTZBoXYJ-6!xF0X7&HtKXbOUrVtQC&Ngp-P8EuLxk>l2s6Lv@B57zRRLoY|j1HAdbbIfa?!>w)!wZMcgw+ngELZ=!t`k(KMgW(tzu5zcdCF9lK73R*2-f4YAC5v$elU5H0Cc=fw` zRY)shTgmyT<=q}@9#>D97Zj<2UXIOFis*{(sOqw&J%3? zh3?igAmQC?g=!P2V7>lR;NEduU)O_EZ(ossFNvmT|Dpqm_g|}m++UpTmpEYau&$S0G z;=XGwSy?O+@AtPA@p&9+0MlHtw=(C7A*kST^YGm&WS&I6{+_oBEhwzwmO{PAe3)~a z?H$bjC>MO0?cRu_+_l@1`~1;D&6n<{!`Wb(ec5x%P&Oo(-Idt4rxJ+K>vkH3*Ff4a zj3BDd#yYURCFJ;VV@~x9uIumrkT{xyaZI@q@{v4CQ*cEck(DuCOxq-??&9W2Pc>izj z?`B;*Pu-3A)XwGeD{Yuxd)RfW!4wG=1eVJOj0unyw~IoMD1iK|r87?yGO>S)u#81Z z6B53+uaY$x>u6ZJjkc`AdYG|$!d@*7AU9Lry|qb$$nTZ4!B#GO{Lw?UUabd7`5SA` zZfHkt_urLIO@0E9I=9zzcr?wzs@ws9){x z$ka=MYhC;w)>V?hb$+kzzkJ_yORX-NhhhKa1A&{|jIlrci`S=*nGeCo((_rjvCfMN zW2UM(c^DG%IHFWi2EZ~%V`Q=&`%-tLyqgaJhKNO@K@#W_GN~Pe%N_qb%Yyfh-qZj)}M!@4y z0;~ViUKmx)yH)$M4i+jo&tDzIzO2%g+pi{Ng4DHntNm5wkj7pu^5iM@q1~myd*lTL zY>LW#`*6Q}kAW}mEzGNM=H=$tYt{`*VtfH7@>?OS_nT^@FP;xF`iI)FP7ulO#%A}( zeu$KmByG{}2hJbsqS&#nZtb}*%&dOgmrj- z#9RzX0tOpO#a4k*pa>7dy{p8!-HH#IOQ-XJ?ZRyqIWepgEv(GFa+_uIgZ z=MW#ws}m-eZWbsJh|nY*aP!sJAbi}fT>S*&qONXA3us@5^|)-wp5{Iyp!ag3Te)EZ z0-fUy?cFy8T@Nz&m1HI%v&vHcE$;7&$crEgnQ`F1tW&Z-U=-#|w2TgWW1W!s+tWKP zj=)tu&4V>!6R>|aiJx18hUZ=LiUOx;m~~Ll8Z4LrZSvXb=Q^{1nA_#VvCe1ud5eY< zeKSCdCPT{dzt;!;DenJopU*-K?PCU*&l&03AAsu|+M8pl&V3q$0D+h4ydy*yrR=J- z8^tKKkP|ih^{WT?|Gn6%z%r&C6{i^AFxSMoHb-gCtzu$OSXHFrugOxdC48M$p|t>` z+e688Bb>K=Gb-OHhIQR^(`CJrD9~=_@{98)1zzs_a{D}q0_xi|_PI0RIL|)o?2dV( zQLGZoikRoUE!ZjGD8}0v6Ghh=nNNa7U1(I-Rs5S5R{m-ossV;(a{5495wd)^ml)KC zeGC1LOn$>$`a4<*5ZA&*s=PsU=rAg$=3RM7{ z>9+a=DUx4I>@eRZsMTc48_XlF^W@&S_HYlxuC3|4?1Afmy%%{^+;c(EB&KtYAr%;Q zxu@@QD}a>mLNx^sn;^LHR<6qKe$dk?9}~syU_1~yK6u(rqb~O z)?Zml`*P-EcO7u{wWv?t#(qTS*O0G#E=OVZl(IZL&r+r(-Cx@Ep#F;t@7H2~lG+QL zr~7RiQIpW7Lp&0%vF?A$>e<4p;4>v3&c|qqg48y=P)p2#gtok2Jh*?!Q!E+%tiBk0 zboSL=qGdzv;0>bCo(eF&SBci$Ylqbvn&cFfdf}>vsgE{)Kir?wub|$h0NcmKW0zlz z0tmbp_q;a&@lH)ww>06ty@f>M&+c*X+8lT*JY*bDk*wMwoL@$4>a0n`I3LSji!WDd z$HC&&2JHiQU;o1L^D)!Q_;+*sEHkz|3jFh)?zxuZAlaSc8WJ!G%-%tDnwW3w$C|gXni@tO4B<$pRZmU~my zV%?Usbti^X1u!2U1v0lX^n)$&IGN>GDX?X%U1SWdKtTir&as>>L{!(FJ47L&kj;mF zt}z)x$|hHGS_?5>`MAzJw*mzj3s-RS8ucN0^8#aH3daAM?(v_nxJV~ z)t9f244G@9??*?C!7}3=pOqbBU^)BR#7Y|b&F{KYtMB;%c!X%q+R9Bxv0be0t#BVo zN@lu#lSD=?Pl^WLyu&_t<^tg*%OoT``zUKss2k1kn~RvI*WiBSuN zO6NAzm*Xr zPGLOe{Ifhe8f+2N>`NY@L9j}m?H(S?C(%rD5dAR_0X0 zF~1;i)}CoC#!ai=@X9O0_{%4AUq^_c6QKBWo3qo$Daf|yxx+xf{Gx~CYG2bCP~g#X zB^cp+pjt^j5c3Ho+h;g}I%gq&aL)PBI$AvP9#+7c-SIB z^VsbN*k3Z?P=dFA9Raa1I$Sas#`}6sZ1mkbfY>~wx`#EeJ}$4SAf#BTqteL{2sJeIohU*h%HvCKBw{7Wy$)?0R6!u9PiZ3m4LMde5>iv9FK^Dg8$ zYxJ=34GFP`Iy!dab^obyNU`=C>;rb2g__8ZeN5&z>Ll*OI{F%hCbU$nr<*Z$?;`ab zB7D5hwMVo8lA5ZUxy;Dm*}r6ZnKT7u*L91{dM075ni1~|#;>Klak?9;oedgI7Ijw$ zb;y>bz)$&D4+>iAxJz>c`=M}a;%zL(8FY)V6zB~i;cXeO^Iv2CF2$l8pUrsvuRh$2 z;`3q8{_VD8jB}cHeth5OIOf}sODi8-z&O~$;#RbUCNN<~m%@I>q}bY+(?hua z#`s}IJG}!u19rIN7F2?8YN+;XWeF;dF`C{`OF&^S_B(mEb|KRYKek!+4wS`^xjZOW zjZ9w=n4U{S01-PdM_w%ih8OG#A4nyj_bplKK3fCC>YOhopK1Yu)cz- zSO7dzAAc1|)BvHYo2B^`-tXV2v(zf3K$p)FgEQ`LWS+RvSE)J%1RL(Fuke09p-G_6 z?-SPVB%iI+a=|<$wW0iMCd>!i;NoPiFbSqQvGu`)8 zI^QY4hy9#<8SST=cZ_0vrAARH%L%ZZBwb3!xabH4iUm(F4W1nO=|zc}1~1e6`6sw8 z{dZi_kLe>lzUiM!KmT|9($CY!zpww(@qfDC;Kc_$;R-4UZ2Yv=chfkC@;1vjB~yXn zA=@u8Xa_Z8u}HN-?297Vxp4h44Q2Hvzv527z7i96mun;j(c)RY<-t@k zGAgOwc+xEP-i&JoibT$?p7W8@|$x4SU=g)i`TWIyBoEfJt`9^*or7B&rT~yB%}T(Ekah8 zn_$z~HWwp*GGx6nwz#}^3^uZFeO{nB1WvymWq%>S$v&j9a;+}c`Nz4zy}Kf`ss-xFlZ zK~4AOM|+bdXd67eqT*F6Y%!VTb3KX;yixm}Mx=DY(s)_ZNStpAQ!;a@yoKN6Nvg5m zBD>+`sK9mO`U z{ND2%_|B7g@EYp>#)$qn*f6ydmhmjhm;BI)KELVHXF7L7zsIGsu4QBIBmG|fQ$75j zZbxtbe_LLh9=bpY`^Q16%=l+0p&q(vyh!`9VM1F!g!7TOe2Sv3k=C)AnueE94BLc2uZc;&mhQytnAY@#h~3a$=mgQ zDHzT=eI*ZG3^z)PJ!?s?`t#3UcQqq@Wq`WugZNaV>vUozndaeC#HUb2)x zURE8&Y2RRa#H)!?->zIRO|6;Y&aBWA#{7nz5oM8Umef-&`{cSAW$5=;Py#2`zoBZ3 zK1jcMf%9Zfn~R2VT&y|E!qE!9%O9N*Wkvw~9GS0Xnt;RAKbyj0cK`w%o1mDTEu zltPpO?B?NLWby!w{wDC(z)4ZA!-U&She?28vJwm`(u ziSGjMFu`XuXE9eF^*eU8?hn8`2Q^nb5C29M|%r5G)hfg)Mcn+zxv~EJ@9Jeb zpTl;uCm8?h`_m^!q!&bgK9K8{9oz5Ua^iQ#|DUesSPMT@$mGC!B~$8!3J1D5$6}24 zw!rgTr;EP?nt;)}JGNi24i;CNX(p%RbLeoAir@)>n!`aCdmXZ<820mtyFD?kx7s^e zWKtt#c#`je#pz}$zpQh1Z5oRT%ol6&>%_bOsr!wo>ZliR_f4XwGZW+0^&GCetfyiQ z9pd`dqhEcJUf}xrV)WPHdh*w&Q(VjaA2oH|snFmog}0qaV0HAmQ0*O@hhSLEq}q}p zM(w5-J2W1on4$-CroRM3eR1KKr4b+&y4U^N+3qayK@Tx-A%9ka^<+Ws`dGM&sr(N;c=$s}B82ig%@o zwDYW}^`tEXZ70{UM~;$(loe$Vj85k@qno?kXCjsG=x?~(2mJzN0S^ezn@M1MM7 z&guG@HQu>%gPN4NT}iK4W;9Of4Lo!9MOTTLCNGw^w`_Er;3t>O(W7F`jR@ zPru{}69#uZocPcl^LLjtZ%mlq4wF|bl?^s)g_Hb;@9e|)76$j}U*+r;=%3>Sf&*;W zw0cfl0OnPvb!tX;g?B*5lB)HM^E#l_=<6Uio&)lht5mZ*QJ+t1o37qr)R8$}eyj2# z`o=$1^yP^L7>a&vvHN@;$f_#b<;S>en}tCtFYeXBRyAo`?Q_*IF-vmAWsMr}%2X?y zyT1vdmShPep>FHoXKqDfSu?ba@Q5xLXo3dOZx^mwp$>Uwa!8pN2kdTV_>^AhguJ=x zr=C8*_Px`L*Gr6gpnu|Q-w6-K>es)mALHxcY8zk`N;Od#-A&ZJ>+e@^u>EgjB7S&+FZzIFPx{DNhy7jZ(#fin21+sg zN3f$@HMJ&5XH?;MF?G#XI@#0oJ$3E-{v{v7!@5KM#5=-^0zq^3k>H*#2(4XLEWP0~C+kYw$geakH5vY~kV* zSmVezRp<~5QvEpwD?f*U=a2nEyB+{Axl3czY)ZjwUFo^B=?pli?_ia#*$#(2OgDXM z>wt>`IdZa9YzRB^>8~p-ZSbgO)7&|gED+x0@MuUJ^ZAAT)SM9C2quZv>qIP>pnLoD zaOidxWE_f&*@3z{3*(mjGQs}y!(nNGpTVt==Ceh0A%6#0TpDz|@V)21`^9uWl`f^+ z*lIqG-N5wR{zDey>zsKane&7>5UoA@AOPDbvhPa;D{o<5&-=BS;TG7Bw`|*-FI5P) zho&UXuPT5XlyJ(pjd{mC8k3eUsRb4@T`6^P1L#YXl)4C^9+CV{`GsaUf7iAmT={V; zG|kyBG=%eb0zugxj5Tdg!%TlHZ`=V4htZqKpSe&J{*iq_bnJf9*J<}4Fz0E19~@3P zqqR$=7iuGN0?cdveLbb0BV7;2FK;x@xMt8>UoquL;b*$O4I`kOi$W# zwwCf&Sj;jNY^20=_GE1hWl*PN^lKvLF{la2`IWa0*HgSBeMi>pK>xZn=NGe83aN_$ z7yUgPUQqrSHl04%nV`a%$)AMrKfE)$=Goq;f}$16g{ph1z$`tsYUk%_jN29V6FE}{ zXV?_OR-6fLvrJFk%|*Qn-^Qyh67BF(ZFt$smUhVAApG+W1Jt+9);VUorv<*g5bOMK zg$2z^#f24Pn?dC;{UXBwN?-+d3AX zbkuMDGdZ7LI5lYVK>1uJ*s`O3RGdeheWy$p9!2!2h?5F`x0?xyVxDy(&D9XxvSVax zOc~Uls<3nR#d5-h^1DgZa9&t)TN3K~yfb_m5{l2~^wH(n!81|!TFln?63&NySs3tf z`fSwM?unAOsc(YBiUot7sN)_!HI0)~gn3cn)>gS+aQv!cudvRQ2GH=AUyXxH_r?c`- zM07g@Tu~O@T*iihrS(M~SI|dtzK|cohz%D52W}~29LDn4IT{Li4ZwfT>!re-YPc7` z7B|(chQ2$oe<~Q&LPDX2expnycz4u8%68rGQ9?9CRB5bzK-cTs#>LNdU3=j# zj7#nN$b~>-2OXDeF8T$y-F((LrteSJvn)AQ5Eb7Gze*wPef-~#M<18op1%L%OZs=Z zq<^QE#~+7YPjB}>EyEPQi`YNLe2AS+%KZ3!)h{vdz4;#X+Bh?E!orh*%L)>d)P6|~ z#NLs6n4U}3AIEqAJCBX> zr)wzxh{YjBt1yr4MvA58!yKySjBD?CK0iumUiz2w&vSvJ{nEuIt`4Tub#|W8cwhr^KgE$95Or%a}H!4$QNxUaWip^DU)nOh>Go zxnQ+OcwSaaFYKT0-=-HZE}xSN0ZX^|1?oSme;FDY~d41N^#`LdVw&?f0c^*xm71%r~HEB&Ee^&Ys+e~%t8O?3s3BVF&^&K zIR}Xp2h^F2+`{@R1LMB@C34p>JHW-L%F5FN$DJ%igsLUmA?K`(!I1>i?JW5^Ytmcn zmmU;<`O~%oq8tWJE22M)bd}%PTkUvV!+t||P(RPxT6(fHvk@XEa@|E0@jLJH<>=9* zX4Idm+a`|nS*-7HskkTV9f^17>e_a~!|iriYglYJYwK#aaxn|i_12#5L*J`V%Z!N+ zF#a)L#m2#DZaZu-od4m{dW=u}IlE^5s~*spdr)l-=BwN5ii_LxVt(4BmirBvWAje* zdsIEuNch{)ZbJHx)I5*^tEY z@)NBPyMsrs26eiZiL^eGT3QQ%_p;mHilIJmUW{~}4C;nY^52xoL0!+%@2)7x53?gg zM5`vEUTVyj77>IUBDs?K1y0aq3n2D4bPs58>BI(;a&q!lDj z9XlqD<8xxsdG6A?I>9_`;m8|4%=-^F4%?aD1wL^~Db=qrZ?u``mQ3Z}$K~kyM7I;< zcqiH|?dt^*9d$O-uMY&A3vQgM>4VrFY3+!meXxel&DJQh7w%a(6nB=6-7mTx(&f;9 z{utx!`|l+Ty^%iHyUTuui_6%4^!e%i=w-U3p96h;^!oA3^!~pq>EqBPz1`CLNe51@ zYJpImd;Xs%qK>>pPTexbN67jy`K$EzC!nr-{_&P~sg$hz)`BSG0?Kgm=V=jFD=5pS zCFc`ss;SWFmu+`EtEC!;@7Efd)l%h0V;5>?;dg$KbI6QksINDwc*&qAixMl|ZCs{% zm{JsWuY8d80Zy&*Tr+85Bdl2Oe0WP>3nVMqKDnpS0)|t$-R_C4P-r}D#@9ABT-Avj z+`J3*SghY}3~cL!6(MOWr>S6^=9@~79+OU}=NG!uiaISr8|*S>HKHH5jyZ2vO*`y7 z=!jZDZO~)xE67{If_OE`>xV4^?Dss{YbJuaopl1Ke5D^ypKet5$ea{N5j-Js(k~no zyNu&Y&WEA@*?hqooM*NivQl{w@(~`^RUH*gtpe}L=Am11m=E6X-?dMl4PtIrms-Vi zz;)H8K?~JxaAR5S`n8G!W*!#H#N=AQRp3qi?NTO46sv}ClrZo1a_76V9WAia^+k;E zFU&WY8{&Pbz8xfkix+=8*#d4A5kHHLHNkb=l@mNBVji^G1{pUECP?!bJ{s|B0=v6| zp`0lUn6Jo<3Zl?G|eIA68UBk7Iy*mo;yp3j^hWsGO3KePYwB^>8ARMBg4>3}wc zy#_6)ul0J}?dqw2;W*LvP`AI?rk@#uf12us_(=27e?UXi|TVddK054=u5% z0L!#vJ&umW;430gA&Y)xtu2WU=Xs<4*F{J7d1@?}R59C4->n^1ESzA*{(|FCd%FFn zV?QnHlK+(puNg3Uj?bvBiUB)6nT6@1u9;1Db+r&L2Rt}$6B@O;;q2(QHB+|!eZFn{ z`%W*9FU4bJR>+w3K$wd$GYRzo@@BtRy8ak-$1RN}w3c>4q?gZd^&)I1HOW})aOeY} z!Ne%>{9Z6o@V?&Y(F?1;^_H^|$NC-g@#*_bFaO)}|8&29dwhDkWye)wJ;@J`};JE3^Y1X;?6TcUpRg8q zdrET-1NE#9js(jcPepw!XV0Bwvs%DRc68wAG|acM@4Z;_2z|A#$GrNJVqtt@{@f-x z2JB+yuaeVf0F(avgKIE;=}1H8)lO?B%&9ZmJe0u#>pt_pPMWp=OV%k~Ri+iZH;cvk zKW>9P62IgtP_JgwBZ(KCPuig2fz6^u7rY*wxVNSl@0!Rq@YRj(hCg}hw-#glzQp?@ zF>mTvKiIWIpvca$56lkxan(%F_wLuMGY-C}vk$cawY9x^yzVu|f2T{9z+lR*=031{ zw)XT@wO;rGO^+>Fxiir1$%$?fz=;FRa7wh0wMHk3=c- zDP>gb8pRmpw6hBSYCp@Ms6ou?$L>_P79O24ck&Zzt6675wR8q`yVc!sYwkyk?^%}{ z%qYZuz;xzlMj@qgee>y|T^}ivfOv%k_uf-68|zh}F__}_Vlf#%k{~9gW|e1lAq0Fa zH=a7N9vb!4H3#;!f>znZcI7plu*H3?f=qB1+`JIN`jXHNea7>TsLHd!PRO9;rCTc) z@0GlsaRJ}^rORzy7ZkGhnXkoupl`_KfSM_& z&p&BGAp_@qg&mZfd@pdppyg^of_4WOyGs>|cbGXmv0A$nLn?;%SHXD8bZ zGzrJ&pEsTmy>R6B`;9-&_+|gxvR}SyP?WoiQ~H8XO=LSEzsdO`@Y0p4m95PleGKBfq|H#o8P8m zUd}=19ZCTm826~RtT3|^jAO)9!%t%TzLbvV74-ADePi3eNd8zmhW@{}p6n-iExlmb zFE3%|(F3iMyBT~}#;(_R_2!M|KI8b{wye8b?*4s$`Z#pyZpvDBSf&qF2d|a*^9~mr zrP-O&`@29|W;k}%KsV$_H`+gQ`{(1#YrUgmAlL`3KTj8Z8|i@<)Da$Z8@n$0e7`II zsh_KU5#+bu*Pxw5W;{DM_QOX2vKwm?TmI(}F|%^dC|aI{ZYp z&rd=f5j9DJmO>~zm?b)bdK{i3Mg!p*90>bkvtc2Qi%#k}DfOcRbrs+2t1MP#14uoN zl)?V1!&@r_E7a4OWYT9T_<{*q)5@BSaIu7RZn0)Nc2Z2&RJL-GM$EErfOzugLbe1zQ#4AmxaKvDEv{Yq{0 zb&uY=J6gB{E+kdN1decF{QVyI&x$@6{NLqdO@~)$JY1OaL3dXQvmF#?xr;v5Q;LA9L%hpk4icwFB>M`@O45 z&>!<`@1!mq$9rpcj#2G5*3P2eGy4BnoS?B=1=|CK2M#=!pM>*X&k8o&%0qvXl?f~3 z(H{V8YX@a-pueWGtIC!Iy%2x3$hSVS2Nd^sZk*%P1*uWjO0!S5gKyv3sud3HP}x_w ziy!Olo{wL3%TRA;7yq}7ipx3R&{P}OgYyoSZ+0yf24&qpf#gyJ#?~DZcv4-|I1bf22Ax z4_u$t2L`q;=6~7Cg)EN}f1h7m=zZlJ9*jO<^mg=l>E(nx-rYhn==--bSgZe553s74 zT3)Z@2s1{?WB6PnK=qz+wl!OZwvYeh3mEPZ`UVd+{yZ&tGYM+usyI)zwa2fJ6FGbb4L}& zWy|*m@*Ohng_~NfCuGpiWQMY1$+JtC*KOJy+g92Eff43Y_3AKQ*yUaFLY&|9zp3r@ zYddlxkNsp{WF5HOxTo}!RSh=_49nUU)Iy(l>Z7NN8K^@*#kSR>pT;kSS&mRUWK1kz ziE=RhYsR4|66GCWry%Xg_2_~#J>A*PSGpm0(emi~M>^q1YWuG?^eIwrEG$WDX2Wa; z&D_mcy2s^xnY$Y~lwDPop3?>gm7eS^Lf^2rrH$uSJnMx&W+t1Dm|H?jEUqxjx zUU2LD2h&IGdZEmGLFjqIzt`vS$D_|jbBbQ3%ipc1e<$UC-GRUR{gV4a?hAP@NWCB& zARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZ zARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZ zARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZ zARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZ zARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZ zARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZ zARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZ zARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZ zARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZ zARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZ zARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZ rARHhZARHhZARHhZARHhZARHhZARHhZARHhZARHhZARPGrodf?1V9rMp diff --git a/sample2_2.npy b/sample2_2.npy deleted file mode 100644 index 187d061a432d6ce6548ad403cc932d13279611a8..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 131200 zcmeF(c{G&q-!OiaEfGnP6ct&r6p=`uB#~%Qku51iq@<#xlt{89N?Agw?E5kiZ;XB4 zXE2tTVT?VJCDe00_qn^j=kz__b3ga}JkRgjf?2b&Aq@9iVzz|L{+N2C&s|MLLy4h+|!;ruU-&;*c4e+jat&f2eA$D7MuMSu<(-M0>X&!YnS;od2cEB$cHOqd+VPXRNT6+^uO z-4Hf99wDXQ18Jk1o_Ia&2aCQ<90e`?aDRulgsVLjz6NEzR5(M2o)1~Q)H{Qa|KRYe z_?1lX;dm|deETRgyl;N-d1(}itMlG}Od0|9r-55K@`qt3V`GOM_ByG@By8Le!GyTn z!!D2JM`2stz2m>iMxfZ{Zq@f>8YpeEym}>!0s$+(zmi|*0#@pUm#G20Fm>1dWr#8r zx{jF&W*wu!Rh`w!YwBolvb%IH*M1N*X;Jnu<+7>cb{+&Y>$ z1m2;`O7Yj2kf7Mewl#SKCN^KQaq5JztFH;y0^Jrzc^xhr_TS{F$T-y%A{kDe7 zv#p>y`F6>fvje*7f7P94c7w$o z7<)$Gj>4l0Nu)tIB{#XGPoRMw-ADIeB^6Zah{6pYslZ!QzIac84&S$O-M2f%0LG$U z@&%qz_&bjN>@WZO@%U%|@9n>e!@uwM_x|6%H}gaID)%88oZIFELKflce)(a? zL8oZ)ZTWua;PZ-2F6{$*;_I2a2YbPi$2^VssT&@~ypFaB>;&V;+;1YYZD2rm;<6E| z18J|-j%=^;AudL2k3#%IAV}F*)+ME&vCCYS4%g)(!pp6(HqF_{HhJy%g)?!8D!W&) zejouf0`%(=m}L-cy))MQMIE@O?-p)3)(nPD4#kIf$pGKStfcv95ciaNcdyU@U>v^1YxCj&qyz$&{(c%5pIRt=Yu5+cocotwJnx0F zmq#P>H&ekmJhh-W;NYXcUKP5Kj@4Iut{(`~WY8u0qH zb(=1G69_a+o7x$*LyEPY^MhOpkbN0vQ85jct+F^;#ps~3DT-&1(ho}sF+O_IeV{*A zFCG)q16PdmyFbL~ho_+F}0jn7C)gr|MpsFuj*c37d zo7o*_FA5DoLdLyQYM}#A(Hu9e8AXGhtqs?`FVjFS-NEm)Hx0C@ZQO1SG$^@ul>c3D zKj=$!1Y{oVh3E%eYVWr6gF@p{sP6|F*!zJ1H^$+9zJ`9)^Kqc|C#GH7&4g+DpmW?_gTQ&9cT?M41_)T3 z7UCUdfc+DgniCrWYl~QKi6;zL^Rg?1!*u{wY3A$h8lc0Y@^3|*Od5E&9Js~pLxWnr z%SDe}=uq`A!rgl%1FTd({VLC8!ahf(NS3R^;C%2T|3~f-7&u{+aDSW$7U!;BlP?+s z#=`H>ZucL@$%=tiQB2W=K@I#D!Tv}Y4%2eO*#dhNTZ z1&K39xht2e5w$$Q-)#~Q^+L@0FoSo9<>cFnp@-?9A$)44U%MJs$D}14eMg4K71>+K zUk1RPWj9~L>rq&j-Q>~MH4JQpyadaBCfNHg=cZ0GL1b-zR`plx_<56jto;XeJS+Y< zUto*HH{Vs0ZdD5C$q(_@P@AD@4cCzGcroZM`aR{@9tT8=qblpKc|od$iJy;n98^8f z(>S`h7_@J9T}fZl3=RF^lX_<b(1c7AgOG8o6{95jj}!@9QX#<@{Fu=%X8xaV6sI6Dgc6m%X2nUc!S=MRs8FY~Uy^(q^odyb5R!QFhEG|_p1TcCBfydZ!X}Dkrgw!&R za9a$6CtvZ+mBZM&=WI}hhxpXKsVYewfZf#E*LvcTks@<4WE{A1(gDQ{6}}Ou+l+u z`@Qch_GYl>2Lca`CM`4~;u7r&?wZLdU%-* zH%E_XaA4~vFD82$W6K~s4DsJ4*fIz^_fglBObo!M*{56jO$LE2P)(mm$Gm8wFy$>q zz~6YzOS)qO$eZPfn$e>WS3EfJjb{uF32Yd;SvCs3tRHTD+&>Dp%o;zvR%U`}RIY;F zD+aU`I)y?26AB8ATbakl|7E=YSL69#`SADcfA;@u|NNb4>DL-Xwb<+3Z8EGW)DJpa z=BlJ+`eAsqkMriEK8Q98W9+!v2TxA5Txccs!TPsganuMU z2n5T?30EVwR^2emZ|#V;C62l!ss}L&rFd2=Q_-qt*lB;1h6?E4GEB?ph;Twr!@Zi0 zJj|YLu#lo7{sG(FpOt7R`e1f@r&&L8zC=B)zDz;7jp5Sg&ykVs>cG9n_}h>`#`v`E zU@b}=f5dMYR*bl!wS6LL-Xh&!?PcpFa>37EZ-G`;1(bT@G^O{=pf_s3|9-Izx;9_5 zJavr>Ytj4teA~LgusFK4Otce7K66usPg}9L`&}p^)(BiLOjwVvt%bN>`H~sIA3(V1 z0DG2sIpV7GsVhopMYazOqzH0k%u4?-JZwq#lAHo&o}LYTR1w<9fzut4IS-BT&#SUOTPtW{rDD0 zRj5Qn^?|)kqOmA6K(z+d62WcyGkse@l|#u8r#RpLQokHL-8Uq%U`bPJ^+n=8r}04}z^V zVrB26!ASA5w^l|pxb#&wDT+jgfd^Axb{o>+S=LkX%8S^~4SwE;+)e{^vyHtrSl(7~ zd40TT1PPv~TBbfcLjw7lGj2bgw}RH{Tkmf=Hp6$>J1a-j+W=Vwh9qW@VeFH>>gaeU zgzTt`89LexC0>K_>#lV}L%v-@=$#&5dQ5HJi8&W%q`9A5#(=Q5PlXb`XI*7)Az#knc2fPR1by%l^3Euz^%0$*MM+abWz4ex|HY(iw$mD5X z-G|-YF4tJy55d&s7KbG&L~91T6YZdaorv}R>h*L8)Zg7SK^%m-2srJn!vyQ=zj*e# zGr{fhR)OM^OmGn#>kvFW46R8fMq|nUdVT*V{{C-o|Bm;+|6cf+bTKqL1YJYjgkvxc za_=}qAJt5N(~E}J!qE({j__|b;==OglSwi?>)XIvO{auatPpgsk2M`uibV;%jJA>F zY7{rwIP5mnj#_Rz%dS=IK_>}sYuETuk)ssX>?L_R8diQ?yXGeyX_u3mLb3QC{+ajN z)t!bK*dB_ZwIFKj8~6PD2V}6RWoctYJqow@;&3L6jG~V(#FcUM zBR0Dgu0dC6XgCp)TJ7jacN@=OOa~2ZyLBd1>{UNn6upxW`K%L}EBBd+T&Y94a((I( z#9$y=wy15FY=*p}nv1`pNMKrL@+DZJ6{NdN+sv+hghllkt6<*-WUFm%thk$koa8np zK24$_RR^(O{lSAMi{`EzH^4yZ%=;&~To`E2;pB3Q-2+JEmFAS*s(xg(La(tPuN8@| z8J)T@lZ4FG*0-sTQ~>dXd6($#VxZoC5~3NMf>e33nhA{+hl(A_%&~<5_5B3$rOQ2NJhdAgi8tUICw)(V_L1C_Zh?NR^i=Z0d%sA0g$)wCx4<{|*gIR)s-oWKhvUb}%jKLzUM0&Yk@KTY2^uv1`uc-yD@sU74*(Ne*CKrdz|2Jcb+QsL*lAWdE2nM zgv=J!0}4u1Fhf^;#4P*ZBhxct>39$5md_`)UFrj=52Ec?9@F5Z49UH?e-Q3_5rt}w z4MV;J?+Q)j|8whrKXFaa-{#fNI|=f!=DSulVfn==N!9d>F>qW_AZFY!4w?ScuJ4f( zVECfbM@fGY+PK%}*M<*6oLNb5xiNYIYxG%=^A%lmOBXbK>l#n9m zt=dFGej7fCE8L|a6~6*s2@Z)6|l5?=>UpjuTNayaZHr;G4(ul=a@0@OHxLR$OgYLw%``6{51@w@)>=A3i^^gQXa`ACd8xwoXm+*i6Ol!5Ab?v9!rVjvbZ;`JXa1BkHtr0@ZQJ~X>_{A5L43nJW^X3>fa zLUBoz3PXY&phejcRp(6yan`K;kJ70yRcyPitN^QLr0z&sLnub_7EOCj9PB}ENv@2M zg+Zir!0$@@;0W>~(TrG?$B}@@hfwC!7@C^6TXsTX1Vulyar^#l05S3;e!P(ELKg@N zv~E6WR*Agd@q{ePXCfuPM<0T0OOg1fc5q&E2_l{o@@a~Bi&VLfZFuvo5`x5M>E2)4 zp*r?RM`{}h45m2uT;9|R%^Ityw@JOwW;A;H>{u5Vi*krB&UOJ8=lC~u&sHD?Tk4Rl zs$sF5$&A#CY%A(jUU6mNT#e4+`cBlXkL zb1jfHu#@o1qZ8;t9vzKw-Jss9PxR|*hd9ldR2Khc2o>sS5IWQfzIsgK6P8_ID)F$g zs+kJHJ9*TSvf`P9Ez7J2Xgeo>BAL4X%dvmd7yf?S z`{KGAe-8a)o${y7|Hl)wvuSSS5(DrfVCF4#TZB= zSWLwH%n-8t_Sv`d5d#HG*#&&1(vhkukLuIPUQ}qLp*fA!Idtdf8B=jd$oAXhxnqrW zkUI3RLM5{Ys{HMlPb6qi&&l#NIldUGCSt|t4_c7$DH{trWjeBtJP`8z_b}39je4GX zcmmmeI>Az&2B|Tvue9+Hhkky*<%X|8CCTw#6VS;TxF0zaCmvw)fpEY5>pcw%QjOTEOqi zd+wuHo+lD<_C+*%8whA+CMF$g0k432FI{q4fV3{Jv#X#I7Fmvxeqpx%?5JvAVF?!hO!)JCXhyRa;>p&6Eg)CCMenqWe;{FBPocJRLvxc;yY z1w!g#@*EmF!Gm|}@W@;fm=`!ouav5X@9*^4&R%W>>o+&=il6I&qXR<+hOj!IU-%3= zrDqt*8{4H@cqaa%PW&e>|E?SVxt;RaAet+D5>`~oTE$HNBcA`n>7V=k+5cbNGN@@z zBPK&|blZN`Ro+9u2r%}sxXl3nRZ+oa(==#2_=acaOgG4P>MhIvtOo(7$41SMp8%V{xbIpH!`|;r7W7K2f5{b2e_uWUdILgMdCpA8 z=-;p7vyuefmKPQpUPi(;mE8$WV@=5W)yhWIhrKASC@|n-M`HRwu)jjHLH(KU!VYi99V=lGkgsBVl*Hb%mQN5XoqBc0hL~c-$`bm=eL( zE3Iai_Xk%(_&Vj9V{2m&!HGvPnhQJMzPw$Qr&NNxPg@XN&J`^eTmbbHC$8 zJDPz|gZx@k^1&Pq7(JcwN1`A3I_|E-;#Od`YyIU!AeKv9zj!bdOjp)sAE|9Z?jhYV ztXUL9y0SHD;t7^N$tJp*yy!*5nSt{`+3kqb=A#$nPeiFVSyPlHiqT@sypQ6Y5+snm z{d|mBB2uz^#CmA45E4cG%UQb?Sln@JjOSH5ob7PoAci)B zczq?$!!ylr_Qdxa%=Bi^C>p3Y-&_f{A3Z(=MwdW1i&f`X1Arl)OE5*}12`A5zhIB^ z2BLHQ)s-b#V6}Sox_xv76mslRN|>&Pa_)0?wm}&;y*!4Y63@{TP#dgEbkQG-_>}61oqO=qOtpWK=ipv_X1Xz4cxw^k??{EvcBho zM|J->p8p)*|F^dpZzY~?vBC2H3t5Zj9I(D=!E~|@RwswAFIvdTqu{kRK5&ydR`;Fe z>^*JK0=nyBT9YH51NF?u#~PJo$S{!U<&@Kgs&<@Ac+QU1^KS+Y#@_5l#oeoZ@TXCc z--C);uUsn1x;PVk_H93EDG+II8m1tI6{p4e;dT_gUhIL(LNy}nlRSE5S0>Wk>N_$o z4N$n`D7Eu&3w)9h%ig8k1Jhbp5ABVh!RiMgF1e-*7#O<7ediDZOyllK@!c4NPgjcd z+6)FEZR4`~%vlC3AK&mIR)Gm)yYoLCa2)~Fxx6nS>xMw3VeG!@U@M3&n6IeIjzYpe z^oc&Bt?1K7@5Q}Z{m3w>qRam101{3s*H78aK-Pt1KJ;x6*{{9ovFZgKX{Fn2-Voo5 zj4KC7hq$mh#Y2?|y4`!^$-AkgeZCpur`mt!1ybSs88t<}$R03#eUv@e4}0B?+uf7T zD?v)jJb}(!{itSKV)rW-CNeH53aO)y|3}>aall+=KK)YB1Y!jL*td&g1cfQ2I`QqN zBV)TEn{o9v6z+T@A$=ko7RyZL)GAAX<%q(@@|VTP&uQq^u3c@&@S!D(2(c3_N)prL z0*EPsO~pv}di0%5)jPiKviWNYg8!7#RwMoV`m6M$(@jI#r&l05*HgD(!VGz)~W?^}Sz6*68e{_9;zC1Oc$+`ZtQjp7bG+JqViU{(Ke z)1q%VOubc)8uY4!opQ<#>I|EqhO;CuP7&*)W|o+SO*8}B1V^>k>o$nA`DAT>q!mI9 z!+1%x6|nwnhuPA|7p1XyGsea{2Fb7G3&B{`x1*|D4DFZ2!LfU+w>W``^F+ z=jVTK6V}e;S6h$3(#DdwmRrLh=k%7GxDo5)EIRKm$NDSLCWr1k5yAR?pR6RivMPZX ztub}?-AAOgzsG8IXf-0q&#t#u=|D>IPIq6CD9G6+(2wNTk36!cT|@1ui1Yox$@aBW zL}cH4+@-Y_jr}fq{YE=qc9QoLY+}rjE4=W@haV3X>4Ln3RU>PabAlD0#z6F&l zga<(ARNHLNNjj7?b!+rzVSOs=n439fgRuA`MK8>X25$ z==Vv*QV=z%B-pDK0!iRy#C3Bba$Hk#{6|z9a{Rg@gm+6P^0Pl8*4jZrmanGRGzm?J zSUR_Ejz0q#AJlrcGq)PLj(*hnfX%(={v1{hSisilmF4}?x3dvLGD?iVSAqyBzc22w z&qdU&>sDq(B?A}fhVhY9>^#>b!MUTM34$^@3ig}kfJP0=w(UiiVfOxn-uw6n(5;N| zxR(3{{EFNIe%ld|fv_y0CB730uccVWNKjGo!Lgz!3Ke;4zdRin+l#6U{U81gYekH! z>Ft!x5~R9D*>(HrGGv}SOWd0Th!{}nB(>oY))#R3eNLbp4BAwG9Y!Uf6rk}ov8oi< z)UrAcm^1*zOZH2wWg`$vc3opTSq;($qDwAy)`9L1Zwvaz2H=%+_@#Kg0$7fB6>WAP zg0q(wr)=71pvv@oY})YwktWM-gwa9~m*$n8-mh5@UTx;6{hSB_-9C?apJR0b{%)sj zk~P5lD7K4YQv+iU*KF%GYJy>2*Ho9pCg@V|d9xsm^^<5tT0588;mP9{U*FD173Y8?N$|DUbnH6F7dGzq%uynd#! zB=Zt2OzZB#V--BfQi zr5Evr@@yJX=|qiEhH6=wO^9nc*?j+OCUVZVdMy3&3;bSR(QA8!499<2CG%o)85xn> zellwR_<#RAzdzf5_5c6*`Jb)(ntj8@5124OT=C&GcAoI=UUg2dBOA?%l(6qrBO&!m z`K>`ESbak2>$QTQK_tz;J?*#l5OO>ro9f@gK%rc|#?JNwNS8eEbZIyC{9d`2doQ)3 z@QCmoZy#qO-e}8fH?B1R!7VhZ`UDNa#%u(CoRhTTGik;m1sN1-GD3~Ij*~W znuZ3#8#S&mhf%I<_wU0I{~Z4bPZXMT&BjsOHTJ#AtW4xI;&o6pp&v=_`^hH6T89Lj zc=J~e7XwSmm?ii2Vo>6Cesi+66wR9bs>=*$LslPH7tU&QBBg*^gk)YaN?u1hTFlps z3>70CLf&Sfs?F2@F40O5xV4Zm49yVUxpO4q9@YnB(Qy9qF&`zQ+o|2xDMR7Q3Wk;O zg-B^V-$ezVS1@+**(uk3SiLYucl#(-KQrVcvHmdg0fB6R31NxTMIX8-S!EZ>=?zvO`=aJaowivVf4YqsD7T0Yr<3f0&xZ>P0lKEW!2I z<7&N{7!|Ao(zQ!xtoPKynEq#v&o`<-IJ$s!K(Ya9wm{clX%4}N^y7Cry(w^UYPp*e#fuO5I4GOSXhFkFGVC(v>3b~)J zN1&T9xuJSw;$OaAe?0iJCHELwaSx6|V`fM3Aqo}PIz=0g&zE7x!O#ny52eV@9Ij;d9&BN*bCNsv1_XW1#u?PA%58Or*Y+a`jcp5DJZFv3PTF5Y@PP@?UqNV)e>Q zwn17)Am1X*{up|A>Fb36{_cNt38c!cbWF)flo)*)cDx@3*cy{`d*6N^V~Uww>h>)-n)5yhisP2Ii{ zlu8nT%X~%1Hg3+f{#`7ZdcPruL$4Z%3Mm^8f9gg%EzjIs+d@TNKBbGo8Z<=h-7+lR zhUHVtaCa*ate^R0E`jxQ4ibHS;@Wn;_lT=Co#Uc@Fc=n^JPzMOgu=;-jqBO#K;FQ~ zvDb$Pi;p)dtn)4d=QCcaOL2{0ezr^Ks#r6`{XD?3U$7AhD+i1#4mAVG?{@ivRh7V1 zIdy8dHxGgyCw+}ykp*MT0TGlnUMQ8oXMWH-2hEx`t|v5V=gh~J_T9D zHG|-R$d~h2f2Q1&hhLrC3mQj{>KzUr0K>K0Wp^nJgQj-`l6w)Vwx}bNxMs*U)gLNNPDojF^)9%-V`U&hf-G7vRbppaNcb`-o zz~;54SO@L5jDo@5)vm7=hhd2j@;y3@4rWRZwon_fc?{+q>4!UsFnezQLD$%9#Ot-@ zB9=@e3fH*t7FBFM$NJ$aUf*8C!Y@;9f~cq}p3g{SH4OqlJk zsh%?%yHV&jtHbxHEy&%n((LW?ViaV!$5>b7Js9kt&FZskg{>0(R5fgFTH%TPN_j3O z@NnMC2q_r`3)@41?>)vqw|i3N*t$uqkH;=?+jA1iryV^T!pC8eTISRB!g3azBT54(dg`av2Gu?kx3EI*{EucNz-8cC0&mc= zi5Jydx(zfJTU@oRhaf`NdWDhDAQb*yLp` zDBz09<15A!$aDMI86tTSRq3nG-YTC!XPu6oKcO;;tj0wS?bn#W7qMN|^L(4_G3%L+9E-D{)CKmlp$($b=qdDBf4qj!sbsd#*xY9V$1r&JVt>c?rs`&F zou8`0$^DhB9yBg=9rG}40Hs=W@Ap`Jaky}g?AV27a6FXz%=JqvgwIu?T52mO`Hf>` z6cQ+UojUjMYCq)aWHsF<41mHn_uAy0ObD@h-rtD z31ExJ9Vh9JfyI1In2q8%a0zooWnyvjpFLiUL)P-$PZMzM>UPQ^HkX}RCTGTbq6(zn z@`c#fWgwDY^24h?u|978b~QO|GLoO>zE2Op@_R3jvM+nFx?g*ik-{KW_uKN~;5Mud zz!=p!D0;gO$?vwBQTln2KMJsL04ZvCMR+8r@h}f3XclK7JE1 zJxzstpI=E{gdqsZd?~_zcm!P3Gb@8#{y85Ad{RIfcbNd8%Ny+swvK`C!in3DxJJQ( zQ+6YvaTvaQubUoi>c`$oA30Yk0B6C+-~7`nk@U09j|**_P*8(f3#TKAKYK$U92#UG9?+^&2=vAjcG%i*>}CK(a*BGh(OrJ>Y| zwsVULSf0B<->fRy9eFFaiB(!)eT5SxBKD`!;~@+AFe_0MWS zai65eSD(*C-sS}khP{E5~l0CIKaHyvk_D(c^fBD)u5ZR?)W9wH;ZEblG#SpCoNO0|ASY&y~ix_8jE zwGh_1Sbwvkc0tV0mG?h>4Z?L@#k2~iVc4=p{%7u?Vc^>5Yupz#3~5n&L(@|*n{%Aw zx>t-qs?o{4XJ{i3HurU>>)J73x9AG~@OvCcLbX{Je~&=@{mUFI4|{>De%9vm!Cc5P zp`J-tu0f6!TRA>9Q&6(VQ~#nzbkw%P<~G5Sfy(ZX#PT_1^Z_j zQd)URFY$IaA`IG#XuPULRQ|VXI~(HQYv6F0awG}vx!E3N!LBn|>eKm^hRus=SO_lm zaOQz?fXCod(RP&Ep-ld2K7hiQD=TE0N07U1Vx5}P1j@bTnc{YH5(#oqt51)QBVGw_ zlEVBj^6G4$s=dOzaF2Jm;nIPKTO5?1dfn~YtvCfowE34U(;=!CX{L^tt zK6tZkOKcBmfR+u`EmW+I7ca;-Y`%vK12v>Ruxp2^SKKem4zzjy>Q0Y|MAku=BC83N{}==n4o?4jALu}cP3!m`+W{;IFXVXI zz5t6-qy43=#i)3_hw$v8jTNJSK*}J5#eB4dQZCzyws#bNKI53KGImN%2WAQI7@!H>c$0YJh>tN+}8b`*~&H3iG!^pk*K|>C< z?g;8wW4EHS9h<{Bv)b&^OXO&9_2Hly7PoeD4iS%o!BcyeOV>&)U!V2ZCsT{nB|k*% z7@KZGS|!Pr4;-6NoYGHu310#-Y`vygSWyBJH)ytR{IPjC&#t-(u_~Aq5_hVJ@j$lu zET=zJ=OYb2neL&}`G`wWID|}homxEN+Jn=eyvI%JkZqw?MYeS(&((g)1 zV3Fq@lDIDrP|)4{))Y1Zgg2CGbC{GuxTVpH_-lm_eRSjZ8#^kXAZe>z%0cWrthJj} zAf*{>t%P2kKZ4~y)yGcdKEnF`uGCC@!)|zI4i-x@{gCFuBC&B0%lBNpf2Qr}flwdL zjTN`C`Sq`T)hG92^E~?&fk4LUo+ns+ldn`m{jVFI@3PB)l4CTm+Xl<$TX{K}XNo8$T4jobfIzb{LF7IC?d0U16S$CVYZ z_?Ha#GYp{vw|Z*Ou_0{TKgpYQxU?0hk{4DC#%F^0XitSxQ7O_eD0zPz%O}IroTg$N zyRbP=QPIIGeQ28p|Di3v`Vs%T3xNWwsVJ4cq+K=IhYYP{OpE=yk-&pTVjef!5f^*P zOwZ+NWH?)_Y<}b;((SvqVd+o-h+LRV?e=VkCE3n--*eddd7<*iJ2C9K+>Codr2W|S z-Yu+-)=cs#U{~xsiebe20Oh_+!N5YfK2&5Wnx#F#t5jm@2*_G#IK7$sQP@ zg2%GP`Nkb>KsDcLlXdkNxnGTXG}8J}SYd2DxnuyDvNt;!r8Cg%k2h6s zQW&V_OPUWOZvau&Tia+%^rIkW|ME;-?0TQA0b9Z}OA)dEL6FD&B1j&+@l0F|J1&x5 zr}c$0fvhPpI$BEx!+4R08}_CkmXKuiZ6es*?gv?}G2ub9vz%`q+B1S$zP_ha630;1 zLv|L^xiJ((xalm{J%R@Gnd8EDvHv5`h(9c-4>@+fx^R4{4%s?>(>ojb6=qlIRn3OB z0qJ>+C+U0=XvtcCS!Q7G%UqsO#YiL4uo!qJcOAYNS=RTn=ZVyoELA}@& zSbl6hkYsP1)C;=zzRyQJ!1DR1b^hmwiQwK5&_QlWL@ZTJw;P?4k@><-xnpNyfN+0~ zFC_}AZ{KogGj1w}@Y~OPUSz&T(vV0n?Jh+Rid9 zz_|B_Ka121t14R)zHTSM`VVeYw+m;LQJb z*8P9{2zl|uk9~RsV(+EZO1>GxKA(VePufC|hIT$1lI%cL=eEq8SfrpmXC6EY^X^CCM*AZp&tdC-z8rIp60A=qKQXws zxd)BC30(NhMM8wcmLogcYS5Vd?3`#GHosEk{y1U?TR%NKf3Fn#yorS5)?s})CUg(a zm(%*j!Rb_9Us>EJ=;f%znhXA8y>v_GyCZM)1jxu=m*~d&DA7OJgEZ*ab=+MFDdH~~ zaQ2>;u|p&c98LQugE95MLMu3%Ao>A$Z9A6T8jH=D49d=oEKra+d=R}5s$Wc>fvxLL|O4WOID(^hj&wwe>vlqLNQs$mrBAXh~*rpHl?CPON_<-5t z*`|81Og_vNiOrWsJ520rIMj|^f5S#(?JNbuF0$*6H#sQi^5lAh?QMwR_}-FDrXNwt z_8J{Oi_IBr?tC5=z(7=SiuiHN9g^HeyY)!}$os8qjPgC~I-g@-OP@)#A(GZF*?mK) z$c=AdJckFHGqs4ay_eYvTx3qCBbduU>?OB(w-L}Ct*(yT?GAg?SSxg6X2Nz@&@&xvn) z{5uw$H_vVMup&ZMP>87>OFOI@T9(!5#^zsadlNpVmBB3GX^pkaXK;^mIVGnU1GDd( zHYMyY0)b1a9HzuZP&)a|-RXNKq|ybxf55g>Q@64t}KTKq;4>cbP83t-Uq4f1si&|Q?cthhw8k8X^>H4^cz{{^SPJA*i@t zI(`>B->h1QYCqdQ1lsSs1adFv&mgQ0 zIDSOtDHCD_#*PcJ{^R)l6Mz42Z_^e^lfLYsgL3?aJ(eq_A!g!Zx+1sSNsMbpl-AyGSJ2ZuoH{JG}MT4Hbv2(YYr z%6L}@j{BIqXx7Cr<-Kai5K3T*mJyZaS`PKRw_eTJVEyKbWa&BgdaxC`-oJ6G9Vs zx`}BD=dtUHWv}V?j!RL|@a{(v-M^@){6UuvsT#{yb{Mf}J;&BH)uzXet*AqB^V}CI z)#Jf&>3Qk14R!EoPAvOWeG6!bEV^Ci=>(_sDI0?Bk|D*Eo@YuSfs#Qtqfwv>S|m*5 znd>O9I{!q)ye=7dKm3q!H7Ws7{gwO#9&BDuJ#hQgJuQe>c{z-=s~uI1j)@(NX+^W- zJGYJnRAKe#;IEr%91u&ekf0Sd=iY3xakCD#uASsk*z_g=JHFSgV!3D14AOO%f6w>j zLj8d!Q!IvV*c@B)svnghz%`S$QS2K)}3sDZ#TBzW6%PTZ$fp-b-Tc^+9xd)TxHzGmc z)Rtd+MY0gpW>bXWrVQkMlqt#yaiA*tGn&ZK2tnm-@@>vNz^k=MC0VTxGTokTF5&Ni z`v*>hKJ)GYd!Lzd>#6Sl!P}chQ{BCP;}j*4h!jPGImw)qwvr;GP*O@tGNq`Hp(s=+ znUXn~OXevd^Qj*{PJZqij{;lqHuh0E`e(U-5&tC80oa3C{*WTB@ zuGawa{mW$+9`^y~mF;}LnaN=K_*2C9D6IP+f7st45Z8yCTUA?hFoAmQzD=&nQ?Q@x zt8w7UB(y2&howeLf~kW0fZo{&nE!p;I5L6?4!qx6l`wDl*wD!7bay;o9Pd)$=O2ZZ z(t*p#zThjMLCoJA)EZMz*-nBFpsWY#R# zTbJNGdiV0jlvFBG4E$ke6g!N3lWVnE|!CPiuw=sP|OGR(vsp*se#cm%1$hEcwf6?g7I=_BP^8)iAl*fgK-sM?9{OY zkh}LZILf0IHPJlx@E8%1&uD0m)!6~mKR-P=Su}{2xW7x?-qnxhKD)2q@Us)SGlj{f znl&K*jn{YUUCl%Bswc-T&%}c{kIt_|i5dvpT^XS6OoX~#T`z{tA()Xp$J?5Kb;Crg z-|oe@FaP&Bac+LCphA{svd~NchI2a$WL*kTa6SLIb9-x0a7wrLaw@JTjD?FSd*q?; zrS=<&JMv&T)cVXNzfOp15*R)H3G>lu5k4K8MqqwFtM2w}towDAcV*j&KFDr9zvPgF zagB=MZucE>@V-gXiFMrvAnv{F-J1LfuUlDCjB0HlFJ-dMWQ+t1couMjaiEBWdbZpd zgPm!-doLiH_V=2>o&y8G*)(}#S2!8WFLHYJe4&Ec841hL;$h%i;!Qd| z(gTtw*G>tgw1J)I{NkFOtw2pl%i-JD3G%_JXNSLFT^|q64xg+2kSL{8u_X*0wk^L!NTr;Elbe@~#;s7=JqWd-uYm^1H%1g??Zi%ntNS>4vCA-ZtUkJ_r(# zxb0&z1ZjQGB%R!bVUp$Gkx1tupqgBxL`71eRBd%A6U#Upzd8QvDaNth3Ea13qx&Ry zH{1$48jX1%ecX@6tZ7j4TbZKm4>k1fn1NAUtG1fP8DNAUw_iJ^A-iGych*H3 z)b-RCFrgWER$%^)_;eaJ-M{ME-9iINnsYb@#_bl@uj+UFNCV-*t&F~wQ_z0HFOVr} z3gUQ=3YXeU|8HK;KgA{eyxp$q>i^TP|KA?J5BF;&9U=kRGr7Z0 z4eNeozpwf(gK;v&+gW8i31CcGyjyxC4)YuW4U+CxqhRNR{_rD2v=p`Gp8L;1^!WS> z){K-PBtuHAE}3CQ#HxPS^72tmpbBotWQsbpMgz1;^8r{4B%tIa_|LtJH5dn+&>kK+Et2amZA&YkJW$Mcec z!n^C?AQZ7`(yp}*UYDQQ?L33)OUzGpon;usI!6)aM{&KjZzX4!&R{nvbzV$ZVX6m> z1=eSsb|sjnt{?euD?o}C%j+3p19aTGs;%GL15D+^%097F@Zna~Ax`1G$nk)K8AnGT z)Y#n5Z>AsOHCzcj+q*z9_))2!cqdr#Ir%e*V4kZ<+pcwfWZ)+4uTQ$r59U<@CgvDl zukli&^7WT`FcqxO<<4#fxAws=GmBloD-rypH-Ze~{hH>?h7`Eo+9#VWK?0T8LPZhT zZXl5gv!nPs;NbOv(@*bog2$y3BDd0dfsuGrIl!(T&X_O?Z|Nd|^?v=*1IKWFdj7DK zcmFW7mG|21D93t+?6OPmUkm|p!{OWyb5vaaV~Sz&9R}n1&!-Kx4Z-%EOWsl5!%#m- zihg=>1gts?JhE|qP`jyQ*yY*?EUU9?e4ZYM0A0Rrzv(GBcG!q}#{n8_TDwhPKAi^J zD{g&sU7Ussht*x%Ud+PHbNdi49z66A6yUG(BN7mTermF84#+b>}GVH!S$Tk^EH?L+8_M+{`AlJ zbNutZKac76|9PH%T)KnFlcYKb<6nPe?wIL=nTr7|&a(s%-*H7@%&!<&N2fJ@Rc2uP z2FXo6q#32j6iYXD_8i5(CZlC?L$GUoEfMp{1>HEnkP(u6PW$d4VJDh@M4LdRJ zN5aYOsOSXvcrL#FaTKqcLG^j9B6$4?J4`$NBpSKYnB6#jqy_23en?5@>_r{F+PcfQ zF)sClbe_Xz3aZbl60jB`BdP2wom|GfXlI^ngHjge%_?g2g?MBjK3?X>dvj}GgS`Lx zyPpQYBa}F@ZyWAUR+HOpVkY3E15fGepJTwN^WgPntTX2t*rNAPf&vi%jRM2lh@e+x z^R-2>8P~^@dEFx`z`Q%qmI$RlIIqaCo3jRLBV+1)p03 zd_$1cEad3hzkRke0>=q8x0kTqsmGw#2JdGV@g;)337pnj?8qoXC> zCnPULe-$5w*|B_X_piesSNC8GKh_n!v~KL$HU>%)TUqyJjzRwxu|kV-DrlLe^2IIVI)GN`nlX*20eNo&;1?vwvi`weUe;tAXc8wp*<|B~0b8h51*6Rzu&FvIxJPvEzf(Jp zoLZgba-ez&)bErrTbTTX+oQ*sO9k6Xu}P zM@mo|<}rJ<>|1s3b2R4VZ9>Zu&1k`KQPA767X@e5j4RfV(cGo5dbXfp6h0&qaceWy zv4}nqJ#`P)w>L|7Xs1(A;80o2{)QoBF8iC$=@|(bGpUHq#}bk8tNaj`H(2L|zd?5Q zw#VQj|7>-aQw^vE-kE!}xeth|96yBIAA;w0YD3xFW1z3zZ2pRC66Sq0XD;F2k^bD$ zagu)?=Oj1>&NS2D;JYZMn-&9LoO1Kd{M|}mNZkMJ;Xo1M6FoL$fcbC>QER`u#Pp+* z>9ee5)nqim^|Q_43h85MV}2&?6%g*9o7n zJ>+L=hcvAe3uytY9bH=Mi|hP1BHMPQ0qgC)CtP+l zAk|*~VxG_q`-k3KyMpz`u5TK()xJLpv#K%|PD+f!%(qVI29;4@yuVoc<}S|ncSL0F z!oOdzrpLv_5X|%B?aZCVeOJ5u#eIh@$DqR3UUT;II9RRx*kG7G22x`Oowo|(^H~W$ zczh<{%Bv8s)82orxAb@;dn~`J)W>-79p;w5alFquBs8*j(m)?6T@|6xpcx)T=3sq9 zQ@c(MGS&gKPe01#+w^z6SNd~Fe_#4YU%&nw>DT{hq~HHf*Z)t?>rIs}wj7^;P4Y)3 z*Q*YJfnw}24?;T_HwaMIHReE*j^*oXeZ^?*I7x%&GR}Xq?-O1=>_iinvfB90l1pcb8kv9k|H(86_Up1n5}$z0riAj4t^IY^Y({FK5x?(KeyU#hYBIhOS0Wcs{!axvcwLyzqh>4& z!xrLNr^MV*P?5`veTLT~>WRl05?M3|a(upOp=T1~InOQr^22=})ja(zINy`c^>FgM zNCJP&!zT>#8^A6t=Y*J421>2lF0-(=4cUbIQzBD(k)gCczv&a)zm$xz?fFmK|K9iX zyB(ev%p8t++jk%xsZ0Tzlu{(lD6g{lUk0h1zEKA)c*0iPQM>^-b_9pv(1T+=rI8rdtaw%%_Csq0<0xl*cuvbA+YZCx=` zC`IXASW^gwhd+F048%D1Y*qVWwl=uvxAVJod_OoTO|=}s`qBwylE)dMW}tneVdA0m zSujhxQFecB61$u-SfdRTuFX-cQk^2Q)4r_`j& zfedcP)HIy~iD1CssIG3&0mfwu8ycxyV0_}Ssk-w3kc>l;Iin~b^*U%qlo7Av4q3s2 zTY8}VncYCyAjUn#o;oPr*$+dSC)p>T3;~&%wxt=@DZgAhwfBx95mYi>D2qnoIwfiG zH@9XB5HGwBmL6>ZmGg@hXI^!}y0RCZD;0zAaAl3{H*_o9#`S~lnwCd41gu{w zvzALuuoq|_j;YvV9ZfTTqt^>?mZ|JL~g1E^?;q3P#AUoJY2sBnas&dUhD&-|^eckKu>_@#eZu4NT%e%7@(j zC948awri@-I&>hf!jkvHjDyJhip)E2HeBb|ewlUkDit|wWpH}&Yy_Dcw_h|589~9f ziZ?Vqr=mNl9+93_LrBL}Q*-6=AZi+NSC-t-g>|fUsyy$iL|(pM*{n||0^gnA?s_L1 z!8LD~bon3&l;CF8?V(Yu>p`{^$)5s?SH>S$UQWS_r@NZFF@93r72aK0GX>-wvY+CF zaNkmP>($e^p7TOX;i_K63|JZ!4}KlSc<0o5%P-bckShP_8<2|iFO{peZ5&BO;^Tch zYIoZZ-^F_(FGG3}t<|EIRg{EGvo21VhGBgFK}JCi$$rFP^xHSN5%cXEx%NHVP>Cv@ zv9;xJr@}Um93JL4tOF6m?|5lu7*2*#URr&ffuMn2pX|po5WAVr_VatJGrSE(f}}^_ z$ISo%75rSUcJ(p}s3!n3hjn>f66UYAzx1rSj{ELH)U&a_VxWmO%P|y{1FWIvBE}A5 zd_Ey7O-Y~|V%MD&8P*&I9raa~=Jo%{KYJg{noL0`mFG?X0q40FL{Dxm!~5Rcn--$> zL-6iCt=H6m3P)yn=ZYVV1OINPxs}J$@OAodqgyT3du#vg#1=XX{EEiwgyu=GY07V3 zvH}TaY0lz<9YesoYIn_G|*kzo)lDCxtj`qz@`iChH-lkopqTLH~A2LgOG2hb8!mzNd595Xoz1S1E)C|E2 zUHNy4I-vh;TyyxvKF|-|Xl%5W0?9qcls85ULtX<<-tucIQfigmw)UWk$=VA^n-3v_{-4v=vrweqpyoZdc37$9Z}$MWeuR8WP?qaW1)H9<8=e>ZSo2R1RIb^L+U4`vCfUOCRZRM?a?L z2RqHKzSZrTfwCq4m<_jQp!B(andgVU;{VV4>Bsc(&-bMt|J%`?-|uF&F~vAwrLtbL?fwH23X%O{$%8` z()W<4Z9Rqmjq zW@{Hj&SoEZiQ1uxQNQfzGR8ZZQZ<9k`+#4}&pgQx`*;N0TQTy%`IG9)!yn{GKuXE6 zUs&4(ElwO~*AfZfMypBhpX`9{-_qLXM;|C`8?9Wyx(oa~qI+m=6L2%$XOZ{{`(QM1 znuv1Z{mtr(<(y=UAM+?ONzz<3f$b@GRcPUDn9D?%W0`j)vvAZyUjI zDUgL@GXY5ITdE_t`@#A9#}~D-RCua8Mq}|F0q6CqR!SUH;CRv1ab|f4VqE9+Qkn)p zfA3OKNH`IY(NfE7E)fa?jpF-$4gmMs9wQ-)vu3i;dlrrR`d4Zc)eJLnozL}!%=^1o zf7{1@O+r2m*aa&1Q{Uq{zf?T&Bd&AM6&n0Z*7_W5I0O7<7lwu%W+CqF zn=7ipv!Gig*0uC$2Cru=mgZQWg8sSxr{kY-|8L*tpI$dz@tN%n_O%IhPDqztn1G;1 z+i(1Oj_bP)s;NpjpP

Gq}8e&_AE&YUj5PvYz)OnANFR*3OdH~us0W&-@H~(j^v}oV}rD4s6rYo ztx7xUs}Ui%)xI?LGvW<#c1hh55Ak+^-1}cvKzx6boZY4>n0run!gI0?Rw5&p+1OUF zsmqtM(d>l{7Dsj*5Tw9#U!3{THG^7=qds(mKnI5jc4`*n0fw zFg%g*5OsVr1eB)@6J=Hu$T8EXV|?xb^LD}mHBvi}YNpiJW)q>r!AGNcsu}h;sR~G? zH-me3km4t)R_OU|X(KV#1;Jq) z?hWwzUv4#=gtY6y@2l&^A+a)-)Jz?Rg~T$mn=+F?nbOYto;wK*E14_Pj$)6Kli&{b zoP>B^?T#X!e;iqk|NVlBlQ$A4!Qy3q%pdIYZCTl7Ua3QdEB0?TK2s*cRko2S%3d-w zb*}y(x19_!p9^Tp*!Y9*^yFMV87LlI9d08Om~FlMQF)*bw3ZyVw!W$dkuo<`@q4de zQCLdg_U=-omzup0{-hqMJTmnA=|)6T#q)X+s%>b;+6Cx=R>W?SE6JoaqX?=a+w;@4 zXwJWMm5S*{6eFfO`jwUphJgV>RZ(?74gA3I*|8J6`H86;_6-7yHj9Ij1or)=hLj)U z>4rGt}{ep44@KGHf;@SnJY&zMNR|i_ZrX+`z;FYw|dZoDqNHc8$|innnj~s zMvy@QcgXD(RMe?>$1gl)7NOo0^rve`V@p5>meP4yme>gSI zW*1>UNAE^W71J2-G`@B1ICnYnf5S>X&w+WRzhbjbY6Bw3uDZE-LoFiw5WDAkvlxY% z1y^egWdO(Ar7bd>Tj_PRpK) zO@+Bjn~h@5rbDM-U&t*+BAQ$5TbyanLA?DJwYR(pLTN7zeGRP3!Iox!qGJZD^8|cN z{qnjEyi@fBevyb!b3My$X;T+O+}P=;gVkYJNU4P%-AS-l=voXHdp9^QJZ~VJ!t##2 z>jVw^DNwOudr_h35EP&NFmOom zHJ0zVDX_MkW5MDteJ3@#7|X|hpK^K^gkA6BLXMWR#h?g-hnib+CFNFZq4jLxHOWKrqn?J(Vjvf9ivKj=36>pGSeIGTz4 z*^QMf_m=~Ef4=i}t6nhN4|XbZBQUUERMf180p9w1YN}g?q3MbIj}uQQKrj6MN^>(A zv`LZznkr=AaPLce{+t46cNuYA`w*;dH+!sLLWLy}%kb(@Dn!?R;(T_D1}ZeZkXtDv z2#r$MtzcIGxfS2lvi4RZ>nFOgOzUn`X#Oy>bUy{j3Tk~$jvhiwbEccsN`}zN{a5y} zq+oGq!sng3sTaw#JS%&ss@_B<*Eg7p`Ir)lR*E0e9jma+Cx!*(Rq+P$8eGX*R zlgxHgDuEH<7|O=k4u^EQ?^JSjfmL2x@`knDQ2XFk+s;FM@cCuU$!Xzk5YT=~5l_Y( z$$NG4p?Dkkb_^cn>uQ7~cpH_TM*!m>Mv=648Th{_)fmjo1LpbfJoRhBke-|Hitgs8 z$jLiZ#WN`fd>Q>E9qL%#wU_DjB%~f9oGaMHg0Q;o$!(mDPgRqD2`1CwCpV14>)97t%DH1;DA)DOQ*0Fc>2LI2?H`50vki=0=1j=? zynTUmiV3nUROiDM3|Q468TYR`WaiY5pyOCRmsRaS4tzfb((N&nmD*zvb3}uCSd;c& zb;)Hvmc97hFDUgDk~h~V9`Xoj=hVC~p{e!j(j{~(jF@V`f;^_pRgwseTohyBOybV5wp9ld}S zKk&4=`_OAfdg59f1o&D650N$!Z%F2@&(oNAgmU-loVn!=)jl7j?z^ zA{!amo+B)UeQZUAoSpGb(=U;wda2Nm77^?s-F@6&_d)zzV7RX*893u^J!>o*0K&ed zj}AU4Tb^mu&>>Y3X0XU?92Dr@_vKk_jA`CmAb7h_KX5G7J$3e#G{NJ_6 z);4$j>W4(6GL^3YS?;Atir{9 zY@LH3r!4sdtCKoWgU`r&)&g&s#gvm^BM1lH*e>1Q32|)X@9zu9z-7(;J@0J;7WYTE zx=D6WsA4QId<2`%bCl2jx&TP8AVMX*GYv`4@#OD3l#ix-osT$cwW5-@LeJPeNXV>G zHEQ3*eiW0XDEQ=VH}e14-gd;X9#KqR>;Ikqyhxesp4pSaTAOoA>V!KuW#3o7#xlEPMX zfggPz?{>cPP3bI z;2$(-UF^ZGYwr6Gyhx*iwCFij-K0@SGWn*fqB0Kg5%xwuT*pDy%`{qHXAC0mT(o_S z%>(u_l2R-s#(~jrdGJ%|7?>qSu`gCJK%XN#XhxX|#}{RJ`EHUyO;zbe<9stvj;uLT z+?fN`E><}|)JqY|Cb8xyCoE6>oxEZM$qOrgJ|(| z^2vAXgUIvvarx_fmDASc(eHZ9jaqE6!T!nYQmfo1XAxSU`c zuq(KWod4Dhd2$D;#O27KPw$U-gRN&-K8sgU-cJFK@cO|7y+M#D+hm~5G6McQ#0g~| zI_&VL>?WL{L)D3g@0xxLf&E5RQ$CWWAyniU^2b`9gN~N=Mu1M>Z+`hb zi~<8X$~?+Q$UW>?f0R`X3Kh7X_;Vo&3>W53X6GgWug@f-_*@1eJ~~-nys{2`8ryb% z=Zh9(nfcmJ=XEn8-r4UIe5C^M+V{&ob9)HJUD>Nb-Zg?snBQSLT05{vmuGT#;CI|6gV{2*@isL05iYUj(eV0f|-P$$rlC@*o0VI3P_BI6tf=~Ydi{o?blh|NcAeX%~o|?uA>#q&k4#c zZ0-alDR)w19uW%pcQy#GZiSkOSr2*RPOvSRk+(4KfycMwlT6rq;a-+a_ixE=Sgcw} z|De-5Um@a!?{;6t$T#XJ-cE2WY7xZvbyjc!~ng+f`Vg*33aPbM}{RFu_ z^9}7-eTCu}SWfeV%Jy?JYr^%T~KaY%n3CAfu|6w|Kv3hu?Bw_W* z{on4cbj2xwp6xJ8_WyOola*bfB_<0madPZusq=3w{IYD zz;kaKeHelDR>c-(k$4i_Fz--5dw+?pMD6smg;j4p9 zi1=&pp7vZ9+H@f0!!H3cs`=KwEsipXgbwXxbJQC~S!PD8A+f_KCLkv)IDZIf9F!XU zZAC%UgXNDUxAda;v^T|HM2X1LdatNg|2vdq_(7Vzvl_-b6V6KQ?t#3c*Hm|}>4i<& zt-&RNy}+M&?xRU*FC03@C2xYQM}#J=u;lkbYrS|O<$P8@$!TANt zvjR-aYR$0u!Eogcc#e7YWxQxd5(Q$;^L_pFx(EcmY6lL5G@=-?#d_Yke#D$PsXRc z9>?-}tEG3;`;~~OAU-O>s6@t__HC3lAt1_?de{5A$`IkI{9x)-8{+*fk;Er3h?4vb z#T(Ys5pU|3NN>k6l;N=@dObfA=}lfy-NinNR<^3i*g1_Lk?NcFYqs_wck8hfd7B1g z&DU_Oo?eQWU|--blYsPg&wOy>dJl8o8aKP@J_f>$cW2tT0Ocl|FPtP*AkUyJ*+$ok zkoAg6;}1{X0`*eF*qPx{2vjdUc{ZpN%xVZh8?5tyC3VT2k@OmI7`RP$w&o)iugm8S zmSiD+nIjqDf?2TRAg6AESpzHzJt+-*MFfuGshV5O70?jleZ;sf0X*rtk~Lmoh%nA` zE-c^^a(`&%9wu6kWHqOK%SSp8i$&bY3yJ+mW3-SI^puRu?pNrqjq5|Q+ymVA_qL;$ z%{SEDy{eFt=SH6Q9@x6)EA7g6X5q*)kNDz4PZk7jyD#%)Yb`9rzr4ax-3EX7T&9($ zNWffMpJn)(3=UPfkv5?NP-fJhzHLJfyo3zvr&u4zOw{hp_FLM38RGbO=ZYpUYv-ZX zG*^QNTc>lT=SMILwbA#5I3ho^R-zL; zGcHauHM@Z=#!G1iyN;=UY49-lJ_3g>jD$vtG2!p|_HtatzFV|CczqI@RM~>J28;tS zF!$`u4kl=w);(DmJ`U_R-YVzSje+vBlzKKH2HaWsQb;?J4!oQ}j{X-IaQEuK8-3GJ z2vOnXKDTKM!g+dp1JxPOCYtarWt;)ox8L*ETw{XCP8QKFUW_6p_nJD5`_~R(cA{ROI=DmXqZkt%)gg{46%&iFAGKxkN?ziWhQDeHuYJtkL1YO+!c`qQK$;R#(&V@){`_??y2~ z_f)tZ)gx(LMW3dJwym;qduMKRjT3k?{$f_>Wf_YDZjH7MZ}V)A(bk z;lIB>U7M5~Vb2NJYnE{2$}c9|o_%1tvVem99&G2Hrd7aP?nGZxH399=Sa@ypy&Lg9 z_qDjCF@$smvaWS>&`{~fdUwflqsa8-pblrvUr+a8 zr9d)@_utFHcA*J{24#z#FfIZc=XV_H9c|DV>Rk8Y?HdR@Z$U4ATZg#HBBn$}+mUAv zN6Ou(E=0{Nn67JXM|ye^gw~v|XwLES53920NV;W@(IqU;vRp_IIm^)uq15Kx%8xQ3 z)UoEcY=3R3P0B`u4QF(kMb|0h$Yy&q)e(sIljvH9VX_>C`phGECrhCdzHy5W0?q!%8MgJABcS1?*2j5#hOl}^Qpe2E;w}{@lO?49zgthh~t593usJv%G}sl56}9# z>F2qK(6al&erNq&*yp=%P5Hq=Fe!PN%@a)jPrUyf_sezvJ$6Y)@~}Q1amp=Y9tS3< zTb|dkC?13LWrTF&w@lc;@8+6eH4fx8x#4ZPOfc**Tp^Rs1TOg#$J0hO-Fm0a@BP9jUlZWg9~Ts z#!#HVkcffxDB@e)F=MlWhIr*?-=|S1$mwyl^l#yI#M^VmroSZt38|)3UhZfC2LI;Y z?9*7?X_eKExzDYjaq+|XZ3!R2(^Nit%dJ)CS z(6bn;c%EX4Y$1w%skU5Z2uJ1$&R@YRAm_3N)Vrt_pd873dm{+&n zx))gq(r>y3BgTo4v`3_KmwzEN%nD@RNz6bRjFgR`XRtmeN4Hvy?pQ>qw7K-F{S(Yx zmpzr7nu~H@)VnI!5fSOv3!4u!*!S+TEzUx<3q@=^OwUek(o#`x^v97g}jhux;$v zFH?YCfN0A0!}dxQL>vv8w7OacCzHvJhu&cQiwCwxdIxkv z!y?5=_+AGntx%4PF=+;kE1Z$}YR!=0rus!9v;)#>qlWGGkg@*HtDBF`}(zf{SO`JIrQ!1IN0d8UtAYA3U@{9daWl$fy&zSLGSK3 zaOgPb)?j@F6vElJ#}db3)I)cWf#vbO5u;^R^ifbbVI9bi)d%Apr3EV3C*bq3S0yv& znb6ZFrGBsHpLNG&cb4Dx-qGgI%>^{5yQ#MOj(Ruv|LoIbuwlRFucx=1dh;2@KYrW! z{8k&1eY7ppoY0S^x_|e?sSF{JRtY*kD-|iZuRE=Cfrd6su|G9^K|^c>$(sh!si
l0S+gre0ewAo0wFMM9F0Vd!g$SEwBMw<%d8tjg|G`!L z*m}{Q6-w?y%@C7cp{j7D4m=%R3@%uff_u#L6jucSP2I8G{X?!7iBvI^0=5jJ+;y8D za>Qc!zRvJ(hp|y)uHUwFOL+_>5nl56CNogB9dr1W8Wm+cQ~w%ro`RA%$Ng4+Y)8xw zM-oPNeL8_Lh$GzeB$ zJnI*3*m#eQD3>i;BB*1iZRcgA_?C%&%y4|JWsD-RVA)H~Kd30{_V?X6b6CBxseaFu z3as7|v_;kaQaUo6O_#`jo&~&F?Uqxmz9{0>Cu4<~TC~{Q5>9VwLtMu%uR1e;)h9+{ z6<@fuqABrjMb@-RWLRiiG@kMTvd9Uq6K{Nlh&UpT1WzUStGS@OEgryO^KcjU80KT$ z&_Y;C9unCpx?Sk0C+y%`&Gvx30t}9&Uaq}a2OI;+ya5+p0tb)eMVXF5MCjW1mGfLF zDx7^M{C(#~#O$^A=YRPIn?LyF9WTg0%&=cm12UD!V87My{t+y1>RohA9I8ZSv9x%B z?+J)`_3Rym`2vvsHJvznAqaREIC5X;KF1t+TPr-51S&=+vZFyd)M*%o-+Y&Pf%V^=-LBX;qr?E^TPMp;hzx_B_NjHvgdQk4Y9#5A*8~bc z-MFTv0%H1~ocjE<1Uwbu59Fs}{UN%K#o3u3z@O8+(C;eNr>1>6a`Ji;5Nex8zV~4L zW-~g!Of|ZIza%^$@mM#^G+bi0cOZg76JgE%h#HuB$P5fTSp!j*tnWrwW7q%doafnl z`$5#IL}YE%2vpwS;2jV6?|5IH2Q0_o^6~#PF30KeD553goKZ3f3Zyt&4Qw4x=3c!e z>C8Xzz5F;sr==Hqtdk(4G@4j_VDdlxQOo^E_i~+do;1*4mU~YbOE4L1#UHtnMw`Km z%X9nJ(=I4Y_d&f_YYhslC~9PX-i281i9M~u>R@v$0;lD#3?n7uJc3Ft7473TanVSk zp{bFOnLUPegJ~^oL1QTs~c(}A4|Qz)CKNhdg<}z-LPN!+TMfM`arUO z-9uh6GTi;Mm{65F2>S-!S(W+*P;x%?K(Ss}^iN zMM0#_qOs@c9f(le#PcAz0CAj8Tz4p?4h+;I88WA;fWT_=?3!sR_!rI6n<|nJ^G%eA zWK22IpcxoOK__C#TPLfjKZpznzSsPwsc7cfPN~U*SSzV5J?r^;2FjNa&Z#t}BHwZ7 zq-#@9{Cu{l>9tm57W#9NseTTeemPahjS|5{W6PGk8;CHcOH2L6nGXJ@A5)LH6{1ic z6Ze~Zm8gc>uK4iUa^zIB$hF@u3i(gC|9WUs4N8_8?--g6F=u`~{(krzX6)&cQLNddFX zSRZ1yS}v=3A(*ka-aH|X)fG;bYuc`Cf(GrJA{UOYU^X<6c;7P$V!ogCEi%qUl&Mgj z?=_e!mCt>ie7FNKKi3}O)~W!V_nyaC?lyzLXvt$VS`7_>VX2gTH4u8)-f_nO7H4#x z5P6P%NSfVUxckK*=&9YE-F15iL>2_ad|aros^$LG)jBjF+uz;j1;apB?Xz69aR?p= zuXeo6K>`2Gb=o;v*glSk5NQ@`tgp4G=1yID8ze+4a}^9X!`Z;b4~@}8FneSwdZz~K zFHzpky%l?2_!SY~SF*#ft0iu1VFeA+%r}?t{}}eOW}7Kb2iuJ)w)MBjP~uVgJNyW?zC&_1 zcKM!)D4gq_iv>5LzxI9)L$cc&QZ_bM zL2;}Z+2ynD|qAmHf}0JxUgOV?DH=FJjb=PyfdfKdOYM-1&R$cg`hN@8vYaypk6@ka%# zN3}(6jKw}*eChUK5FbJ$4=E>!Lli^_pE+j4-ixGVlh3UjYeLLpDU4F{G_)g4@3hAv z0W|zh%Xvh0f|P%edaD@LFJINZt*~|&tN++3UhSkpZDth1B)%7%oM_?e9G)SO?k!6z zYFiP9FyOY9zwT8vY$TS#qudijJrHvH_9C_J})9(jT9Cg zctlSW0%NoLp>mU6=&*^k8Bres_OMg^;q#rq{N=VKwz(YqTeAr}BI3Y0gm2S>VL!0; zl5+n<&cpW0u)O4+YXJ4D9B!*gB%sU_OjTEofXW|1>)o>bFykI2nVm}14rKitGw}W)0)CEn& zR_JR_F>)8^1YbLvzL6oe4$3daW%Roj5`_kfgfYK!yR~dqo3{hs2URhT@ea_Gv7!k~ zcSF|x`6oZ7`r*ue(kbPeB!E9RBSb^HpzBiFW9F$Ia9=8o8PKOdidMoQ6JZ(%wp=tI z#bNuC%)hh$u~=_rJf+vD`Hd3DW$$fGjPCSd(FpB1G-6HozX?iUqd`vg4q zQ0k;;AeK^Im#g$Y@6)j-ED@&ej>79l4O;Tg=g(~u;|th+iMcPO(oWd8sgNcr{EP-0@;5sBWBZg`zPQC48XE(H1If1?r~j#AEuXjl zZ^!?uc>nwLYu!F>T>LQ!F$YIn&nhrs?%=jd&e%SQN~V%-q9`4VDd!H^VD(hb8)9vq zx?jQbLJvVm9<$<(>t+sfWm2wr@RA*lxlz4ZdC_%y_)Uy&n!WO zamRFr4ZpyRF5S1I64<(Su4Nbl+c)@ni`#noeky2s_1+W4>iEQ%-FcsGGN4fTi12q6 z?EBPdNl2KR0RC4;Zg2R_fLzHG^}NGWh+4bEr+#+`#09P8XwoEz;rjkX1}Z`0vcaW@ zeFey3Az3ZPnurX3md1y)_8A}5V2 z=HX(t5d1>LYwc(&#GFdz=<~<+jTHZSrSJxO-*zsI%Fkn9yYpMJsT6iTvfbJDWyJ_Q zjC))DXIBqcQ>U&4h?juY(~#0#xmf+)wD_|2R4ZbOI4$d_(Szc5zh}G^>qWMsb~>G9 z9Y{;d$W>#m4mo{ayRGoP|G)N6X3X3qLh7|S7d74i2zVN#z5OB?+&SxGg!5X#dfi0r z1L-QrI(9UcRj(9$SLtT<$6|f7hrXp)6t+Td(d_89%w$Jmz-D~qjmP1g& zrKIJhqO2 zl<>}r~X=z5;NVhY4EJ+vMI$7+n;k?Wxk;dtB>uu z?{Oq#0#+t9r^|~n;aR@@dxLZuB=PsV?vBIyM}A?e4rj6Tq59X8hG+yFoVJTBkAJpO(E z^6}sC{rBL#qG`^&6GVC7cN_oPGE`k1HF@Ad=OKGq^H zYF%+Y7{EnW3w6U#v8Mu9HGQ6*ZkCa(%9 z40AnAYhMPD zp({-BqrW%&Z-R$rIiHeLt0GLLuk@RA{7DeDb+i~*w;s&=cY(O{dv>R)x(RATni z+->R3K`_&?R5GQufpywP{^HBoz;Szqxi0lHvdyFzt}3cSzQOvck!cM`rFS!Lu{5^6 zPn|sBu(JRuI3;!)5HhiSZ9xm;!p*S0+%wf|D;bI>jx5f6AB5z)nVt?Nolx^JB|`Rl zH4qAWt6n@T2LI%{Qvy++fwWpqL_4b%X691lGY|Z0|IM?v9CiKBwZ=gP9uB}J(O$nF zyuFZCU0jf_-3mKCDeO|LZvu{6;%*XrL{N#i?^5B{4F^jb!nuqGfu2hiamMm04u?hQ z$c*U=4OEcv_5b$g;|Lh1 zg`Zy|jMekE2yHYO>W7-1C_(dve%OQx#P?(MbuPUXt~pO=zzDRnc#f@(a=9(8%Z(j} z3HzRu-c#eSlJmj!d8`jEtXAQJtlt>qu>BUxh#Uirr5E;CuZ3(_Rn{;8^)tsW>69`-Gn{PWoP+IC{zSMSEHe&j zd1>YGNB(&p|D@^K_-m5@4_>`5K_hjeGlyv>Py{Z1Hq^=m zf&2|&B428d`NU-wK_C4_+#Zb}AL z^)iqI?eM6~Pdf6wQ9QO{ii#8x*0fGqV0A)ouP>}}>pMAEKCq55OdXt7$m zf<>qabob0B=1pSzNk>lV+WyAYAxPdR_UJemTKi1}?#1?}85@-h}nUd61iltqMXg#F-ZEHXBRc@TNT{nQTgzPTuiSI=vH(0;V?`=c=nj_U0 zk74`u?2HZzHURt@V-aT&>j$AxP+;mYL5aSo+uS<}OmBq^)P`YD7Bm*wM#uX9VgiC` zb0o+O@?wylcR-eo$Y7!US8#vrq7b264iVo7+_PnsP!j)e#{#nz{0(;5g}=r6t^a-d z2FvxC<>TewV~5NswUr%MU%R*FQxB}aI5f^SY5%5b&?}J^%jW$8o$G(yxF3S`Q;+6$ zCmAx3Ud3<+6+Q+ zuyBFU3NLlbtA)c{FtZYF>HviiY=qCmX-E$*Nu$ z`96>lWj}zePgyw)tM&nvCo)<*rX6-nOJs-+U_MB`ad539g8x0;y{Stb(DNnLK;&IN ze2Fae-t0_)o~_D?WIGD9s@R4c;2DBV3b}zkx3Rju-j+L@*t!&9*8nFEVFZ|(sXp&- z(x9;Hp@HAVF~}8YDXz9+fZ)sA`D@tUFIfGdto8u5pLx8QzwZbct7khIn)qyDVEdR% z;@tAaKqCE{$+`OskS8uWTz@_U{yi~AhQujw*Gni+jt}e4o4q`=VR0B7qk-WrY*fOf9NR6Y)@bXk&3({`b|wm zhLH4=!Xp!kBvf*G#RTFYqK3~7Vjrwxkk;&1tNZWkp|B^Nn%_#s;=t2TagqwmT1@?L#*c7BQ1lJ8yW%lG#;liu*hwm`Amw(<` z(!!zgDhgO+sZpPd)PacbhS1|4xrpOS%I)CwMC3k|G5NK(2U%R-TpN=&fQSWK0^RnJ z5r@cH;iJ5LC@sZaI_6Xd;*z7MPPSGfmfJg@5~Rbx^hIgY_PZ^xukYR26DKIJn0FxW z(D)Ek@Ox#fmK}l@Cv+d#Tpfal+Yldv{T&tJ`az4E7y4m1MthFazZF6H?Vt+X0o*5O2 z6SHJjTqc9*=>RKV`!*nH@ow8F@)g+1Ju(AsVz0X)EOdnc5%`Ku6|8^M2lek;Qfp+W zaQWguM(#@*)DDYSe6|{fR^8Z=om;Va-_L=d`(gvot@_-(4C|NN5xekVZw|JvEj%Uq z2eyCC{Ayvk&w~-LV2df+@D1Dl%YDVd1VV^_$Q0Eg2v0b%UDF@r-zwf+X zS7U##Bjw=3I?;Ed@Pjb9YkJ!lP}0_F^9eAa_gYd#uQ1JN=b>W zq)>Oq-}QOcI^Vvl_xh~Q?|FXD zAHTo$x<~hY+WRt_TAz5^LLo%F|TKy|M&jCk2BBvPy7Gf@rs^SgZmX-ke$Ke zwUdb&TIpK0H`*0pqFMMcid?PN7DBqq-(1o#n;Bq5JS^lBoxS6Jl~Rbz$8hH|qdfR{cY!LEtp(zvSYHlx z^x*e0PTf0<$RKW6AsTXBh>Ie1`TeJ=hCr0)8Jw z9B5$Dx{k(=`Je@Jt2UHG_5h<_fn|wtIp&Yo*xZ~+Ax-RpK~8D4#6e}Bn7SS2Gwsh_ zZ5{0*j`=owuL*RM&%tmUX*gJ6XegFwSQKC{8dx0g+-~Qz!fZ@smxvi8wC<;en?k?!~=_d z<-N%3^WAK4U$F;7C(OU!fqr0jD-D;c>ILQ5>(^3FwSab=t-a%1Eu8e)#+F=F3mx&3 z2iSy~;X`!g1#91zoSrhL)5`X%Dx9u@9J|`MI%qtdPw7H9P;i; zdK~1!dZF(7qKL57J+LD?<4Q_NC%8Jf{TS?Ng@P0HPRDH9Am>#}9=~e`@^OpbWUZ%w z!{&fd`{;l2Q<&d7v!$lbUY$}OhD*FO%EQGzTH{Xjek3L= zhd{Ke<@AQaksj>1(dVs&2=@rh}> z%^Pcp^(n4tt*mz9_HcveF&)IqM2$(L6w`?AR5s6d!2v>fTiMXDX^@*hw zD(^ViNhdDzs(O=d(@4e!p7j2x9>o1MTZ>UzNtS;Z%j~XdqHUl5mD?^5Y!dB_B(bhV z{VH`;R~qN}Zbg-~P6Kc@K%q*qml%RC?0Ru#dYNM8zDr&Zn7Gb zKMx<*{ZtKA72gAYBhJ=NPj+Qrc?U?`N;1z1>x1A5ttdO~KXGg*cwatrc?3A+Ura1R z+==%3+=Iu@3vr`(o{@bhhf|ahPD%9hcVuw_{~%mz6iF-|><4;%rylPT^_BMvie1DnZ==uAJgUov^NFBHFEt2K*geXQj>!L*RjLH$pEX4><5T^@Bkl@L5R@SnBiu z@0H{9gQne}CjDTA>~I(8<{s2e5~aYf=ibfdkM&~y?eWLHW9`V63ug>H&4px}9jk4x zWN9m;encR#r1k!gzbynYv0~ZqC(kYXqVYaBz@FA@*r=Gt;hbp z@ZMY^u4iYwZD~5JF}~DTm(T=0--js{Y<;lm_2|jNnKan?GHKENd3~^?OYl9L0u{ux z9m=XjdSU07&f6fIPpu!GJTrB&1O(p(MI3sP1m-5bIxi>uAtN|PF7a3d$UMLN>gCQ) zP;g<){^*@*$PH2ZIf?ms*{49=IYk4HTdfAF%|lROX0TKLO&>%|2Ks5GbwgzBS%=8k z4rm`(aIy;Xks`M%uTyz@LHJIht&4Hd2Rg9nxWAspxVdu0KyRuu0}Q_0nfy?)7PoVN9f?&Kh(>fPK8eb7ke(Fevm#*!ii-u1{ExUU_mN|j0}yH&X}?-v`2XZbnEj3!w>Mi94YKaXb$HHH^=j1h`~9nJUmE87 z*8jToNQ#E@y1aLP;cwInifa?qLVdgIcOx2k>HXk3{Q49R&X2RZ_l)P+4uH#RU*T2D z7$8z#_GR_`5tzqz`HM!wAN|z*Qu>9k%$I-~2^mZ!xVbfg%lg;jWQnw!xYsm|{#{0{NoqMC^>37J_)NrqwSV$p0 z+vP5LS5Qg4iK(D9;`~^iE#;hUrW3BIY)ROQ@xCmP7l}e%3y;I`$#LYb@84_t!VvK_ zoCjAakZz*wJ$m8$&PF1%H-tU(LkV$E6Zl$l>or4xJ#Latd~q$n_poM3FxFRW z^X9GYD{Fv)MXGWQFR&i&xgbJ7paxF9@86q2uZDybdzP}CZiL>{R|j$|yJ38r=pZ>n zgAqxNOI29cO!-#+@ZMlMtlzUL*R!w*PV9jYTdhqr$Ltn7uoN0wb@@N3utLLp-B0`6>AIYDVJ;D2mMchwU>wy)gdQGKJZ&ZF>tb8Z_zuc|t zQ-^p_*Rya+unh4DrJcD+Tg|%Q?Bel9%TRYmcJzDmi})4@>ELv^ySEJvRme9SMPAJQ zaFOx0sUDDXjbE^s(FZK2MP(E7Ffae4GU#^RAm~yY3%<1C`kzN>!HLN~b$e!iWA+1& z$(*#yPe$N{vZ0gCt6@lpjEe2tI1J+Nr%i-b4MWNmXBG1a24wYnbyP_)fJ^$D)^6n4 zvUv!`IpR8jR-rQGSWv2?`8e@|X}(i3%}WQ49hXh|aculEnDdDal;7n(2zfr|;!O`H(1^_jB#^ zPUG0yTbWky2RHTi@9_8Uq^Gfc|AtRPkQF<-VUYy|%2u`XONb+%-O@f&Ehr8O%whv% zXEI5}tsir)h9xBA>)P$-oxc!04pyOkA;{BtVW)hq33)6#eY{E>GT@foKr4As2MfOP z34Jf^fYV8vAM@6B!@OO*Sy6et;QMli_eoxyx3W7A{0nyyJSOh{;m!a^UgMpt*^d3= zeZcDg)LHYsJioDZbtRZ6+^T1dD}tzDCyqd)R)Q@95g@|h?v|BP*h;=T8M%BeH zprKtk|9xmZ#8y@=^20iv2~T{|2S3cW-FC@cyBG5wH~5`>_}hR(nm>p4P&3@}=svR4 zwG~!!Fe-oH^Gyox*}Y^0^ZXeKk^8TpE}d<_5`z8n)a8)$58ho6@3q5psI(oLyr!tN zsMi-Y6k8yT^^&6>&7SLH++9^5eE!7of1k%Z zpShpeGWY-8{-@)ApU>Q8Ugw-2_+b2c9Z=FQ@x6#CLH>wmuz}|rFmIbDyRJQ($h#H3 z^meKyB~M3u{d1a0(r(Ut~dG`;xAgkb=G3(s3qliOgaJ!eW&U z=ePE!rSGZ*^8C{tHRN^7zbC<7kGu`L{jXzk@OvEy45mF!r$PJWsjO$}bXcc-w`*H| zKl~1P>k{#s2CqA^LY~lRz%~)&r^VI}LGzl~ENpRpyF5Rfqq7w#9p)Zk3Z)RWP@54{ z6$97aB-y`Cj36N$ZLjl#DV<2KB}>uIst0S0it)-HWxX7?LlT})uFid2n!p{vIf#J*gVV&F87=7+P&!03Yd-5gLrE{YN*GHpn<Xv?r*1XzPD6&p4it9Z;JJ;^H2AI>qxM^bvfcHQrsKkK2agUGCtLFJry=C zJ#Up8*ar?}ueKOU(?QMS-B7^oe$cv=@LSul4}NZ{ypnep>&|Y^nr4GAZu_m}7=b#} zR+U}cbJ7EFsapJVhvoq2HF+mb=F{PICQr66;`@Tdq$1T_2SH%Zirux$W#5_iKN9}*L+16&wp}l6 zmOXG3oZYGPE&rEZC-b;aw)*Ef&Qb8HU38Ae{omh*dHz$0$SwPlMxo4p-G)^wM&OcF zigpwIpX0`{!eWQ$1(qfVaR_z<-2=TR|*v5 z9;%9d^qz#NSvQGoEF&f#$48ZGvA+fc?vAo-BQ&||e&;WDlk-a)ulkHph}wml3l3iG zA-Zb%*~9r1B5r;@vv^GxiMhsVE6~?Ol6U?*W~X0D1iP>Du*`b{&f7-2Dx#5hRVlEX zC8rER0=e8uC(D5v7hHH<1a+BqB@RbPo<^Xk z{JgsOHrD&=?>*By_yueZufvj50aPfcKMUNI36odz?lc_E0OuaPM}a2`z;dOWz|uE0 z5G3otam=^_+^B19tloD6UB4yx6zVnHYZw)n-%9}=`}?}p_xfOJMrwg=_yANrINoys zb#L{l>6^xpe^7Th>$-GgCwR_o99)Sw#^BYXp^{B)u>ZW{@B0qjkiAsTkHc^`I|o~44@%>`X@eSL6EUapO?dH|Xg zRy}NE8vv`1_jLOe`ynKK`>9r}BMOB*U;H(a3Qju=9oj5VMDBdg9X z8i7Ww8=9G%BcM$C;Bay6C@}wC_V0V&wxCX(ti;2{{jJ0BA(?S}ENlqO7CN&3xHky* z?tcBi>p2K@R>z_ru3&(6bqXOXM_}RbUjKocqhNbF_xaiHf8qpY|6#Vw{=czaia&g;FJvHHNlH%a7}xpMO?Yym#U7?m$eXBb=YJzUz9ev&kn~h1 zF{fF5Ov3fE%JrTw9?2S#;2zd@S2~9@9nw(V|1lD1`tLqvN|qpw|8WAVd_BmPU#QO* zYy_1f+run3q2BNlbH3Ns4RCAY>)WS78sSlN#)%(qnjvqK7=!atJ6JRX#a`6u2Bpn` zEf4SYK(d0fgx36C2$;enXbw= zcA;+ku!Zn``i?!Ig_bKlG7T5+bAC@_G>+R+Wn0 zt1+R2UcmzXYkV}A_cb#2^jI%cFOCv;cnSR-0=?1_rl_#|?G+B^I4U>@aIa@IreQv! z{H*gB9d)WF=8IY5d|y4G{LQZ)UZ1u1-uRpj)XkiVoPGFr6g|syC{!pedDMEZqZdy0 zj+)taQ{j!IjJ_G-Y|JXBGQK+xfc11vPI57>8%&l{B2)W;+d%egNK+puAMTTJ#pk-d zCdY2=A}Y*p|1!1iI0Y8UXYR9{-vfsCpCoVy@!vvxoEjti!-WUPTe-n6Y<6=9dWBn}R~;P$!LFR$l0OE(p^~B`)Q>!Qy{@;Z zZX>YMO~7&egHbqfgf^^$alyCR4;$L)18`-_?;oo1}vm5b3jLwXKixD`wWOV&ac7xf#sgi z<(#Fcr?vDGX^BCeZR29aU#cTe)z32U_0BMWr~`jt$_R*m|0O(&IXF%IBt{~~qKjSrX{17>* zp7sjkt=Ky*qPtMHm_>Q|t=D0kC!gPJ6T}Ai z(W%9CYk{w#mLB$(M01k#?tKT0|5S0AK)va8uY|KtWMiFAxsui7DDuq=oK@M!DezDs z`u^T(e7d9~vT4zVEMf}~3{O8F}8{s_5UelPC4p^HM(k$|b z3KdzegQMlJZjHhwx~q_{@#5*!zIljeGn(A)v~>V9<>tDlI|pFuoP06+GR%7&v{3%W z_pk5H*NLt;0BZ#npN^YC9_pjyf{hrDcN=7h2dN;QE$Yy>jJZGa74thun^WF)HW+cN z8fy-Jhat$_uJSI2iuk*bO`F0up)ba%)~JE(K1i2{cUz9Siiwv$_bq>h^@IBKDK+m*EW0RvgdI=-U)? z^uWl~l==0@@0;63J#sDsaX~&JqRkfg+=sK4YscezNut3%LvZ3m{7gFL3Gyym-n#i~7%a-dC0{sVKJk`9 zd=>KN61H!cUj2jtxh^~fQC>3wrg_wOOVmG}g02}S-c*UnNB*;GLeVS)Enc-vG^;4{wO7v5pqe__LNx0xQI%=g$mPy_Wc_vkI&@TiB}qWB;0>+L2q?q5n)vF#*g z?_+^IddRbDj0EMdwRasfycN;{A0wk zdo*O#I37YC!VTBTrG9l#wxot7Eua=;%s$1pt*%9VwaKEL6^J{3zm;c!F!Iy_`y*=A zQ3pFnQpA6{0eGzklQq0sK)z3y_tkhURCL;HD9I=Uo7?Qq9+ZSbLd|ubADiBx&c%X< zgV!>McgM>$`$9hw?MRN$Bb$)7pXbc|b?hsnZj$E_?;hEk~WocCV+;S|l1k^Zvf-lI4ic ziA&G7srv+iSLDJD{rm(KH5FaF{Y7BYTYa~*tr+h9=zgP3K7)CBR&S%!7f`;>wf9I3 z>RldDo_em@2@y}I9ixu6t5c`XL@wHe$K@K`F~VHtJmu-2bvxcD@M(7Gwpe zo$5w?^|VSK2VX z(NU-D+`hmYbf~@h`24NY{VYRL1v6Ul*YjTop(_p*Wphbz{MMPfCK9(TnVlTJ(%w?aGal4iujR;(Y}7aRMdl6 z8x`b2jfG_Hjq;KAQQtRj_CX0-J+XWKY5BvmZNx$7F!#-(ZjwO%c!V0#LumCmZ{m*j zl3QXEWbdM0Ql>W%{Wy+7CLj0a96}zQ-Zs6}p1Dm#@<(8Nf%+HH6n1>4d{O{WQLx@s z_o)c>$IfTJ@Vo^E^gjjFr!;|cr}J^M2;|AQUXrcuuY2X89oiv6(b0=!F6YCDGs(jsg zqW5r}?!u8gV$&V|TCE@*G#~Y;H+a^9RJRA;ElM-$5Q-^i&vt`~1;>=uhZYD@%@W(Z zy9fkdjj;8U1QIr@&sU4yBme)Nh*t59GIDoiKp-!plxVxJQA=8sONfp`poQWifI^b6s^$bRS`{dV+$zxk9pth$eYe9c0 zu$@?Z)nZ{0#GF=l?Y7B>Yv6q3CFV(IzORq5r`H2{c>Pd$RX50IU7(-i>w}FmL;XjT zdg1z=>33_KdZ4;ZMPhJaFT{VlB4EMjg<#H2B0EdacVXWr)hl{E;2E^^_OT)gXnU+4 zQFKMzfs)qS)n^d@$noOdk7VyvzR!PpKjwM=-8N_)^^W$oQINc1cU`UT&v+A@ zOptmaH=Q z+D?r8mZ# zsgg7;*=b8B@-7+A2K;Hn=auSsxIizVv{a1D_h~0=ht!JiHB^y=r+k`fZqG=``WMm% z6Dq-X#YV2t_1)m0vHZvq6U66l@9mxDtOlE?B(3s=NrbWE{JRqL^U=F7Ry42>m&<7e8vSNzR85Uy4)6Cuc(0EG|5_- z)YLlQz9iBYpts@$b|wWMI}|_7nv0cI-K!CKhuew zZLBLnkqr_MiNtlZ#=UP_)6hTC z?(Cf|5UM(&fde>VybcA3_VFwm##L{qEjm0`&3Xv$;#K?m-@rIeH_ zRCs&+;EhTP%&%UTmD(qXxTMY%?(ADTpmdo^zD{&AFcjp+KBL}%%i`YeNAo-3jgHj3 zLR_}!DRNT zkHN_yXr$fIdspy>H_YtE%<+r4&1{+b_gm))#37D^x&QxbyRf$5&Q0Va<;E!TN4-Sf zx9R;KlphU%r~IxFQH*nHXRaxpKHUy{*(YyEi6TBtM1CenEsEIO64g_jsw6T`7y%k3 zO~iZ6#WBagPI7JAMDC6K6cTgrJ*OM$S;}0Oy>k-l0W9ggYZP}QKiEFV$bSps>Lt(G zF6pZy?Iilr@0IyPeDOy1n#1oQtL>ohLW^pMAH1OOFTY>a7Z?4SGlqcq{>=APeH;1X z#CgQiIv5}BJ%hM_&r;*_pJP7R@%8;d{_TKkqS! zx1?zR-?>4uc7+cAf+!k^>QBAMxPm$$TP!t8R-mrtTfg(Vo*zg7@56$W7uDdiK0v@A zrWZDbCT`ViMIH@XVD*|KB@omgx?}+8-5p1|JzN)&71mzcB%bl$u*WL*)E0}#x#I-g-v0@3(LJyws&uRn~GEd^y?JQ`T zuw1uJ$a**VGZXxSw-?;~_LflM4rp$%N1Z>7(CPs87-I8?|GevlG^iaLslgr(r;?2+ zi{5p@)8sOCDaB3*x%)m`D5L|-I;h5J!Kf?MX1(1Fc^%1BmYKOExkXBsZAM)$k>YHfQuOnzxy<>pYXolV z71#c7#rgH>)6H}9hM-naMK~x8>-V7(_Fqv4xBbB~`jq7Wypg8XS=G|uUDcC$pSL1k zY-8;*ng#m6$aPq>3f4l5v@l(GVFftff3`R#tp<1xTU}V!*#atu?B&b?Iw7a6jl(7a z`GK8Wb7rU~`9!(0u9}SkgX{X#))gbZ`NgteGb_wHJB+j&`}9Bu|EgvAKdHc~YW8j& z>grN-Zi$Uf{d4~R`+H|@Gh1dq@D;Q1-smw3-T_xf&D#Imhk4$=Ym+x0vl1Laf5cmv zac|~^fHQ5Ga|ZgEc60iuCgb?4R&jSycN6Bt%lFSDE1-_4ykgP57xC!x)A3ON@vhg_ zHjn6juO%^~scU!tY9Suc+fSOtbdj*dLC2P&FV*>Buj02|6k>TMYIVg%)UAn*Rq(3m zBox-_ZC<|-@6(ukIr1>-xw#ATdC?BLE3 z(sldy?TN+1L}I`5<>!5<_ebdxrPHXSeVf#bLP0y36m|~OUsys|R<3Zhsmp{1YKcAZ z!7Xs}PDcOdzFs&xk5kYQeRgFoaOn$t^d!2}9QK zmhU%h@%yBUm6^fokZY=g-f z0Sf^X~v zpOI&@v|scX&Z9{Wi;mlxp?_WgJ*6%d$C2e<`=dBc=Ez#Tdfb9}#EpUL zZXmvv83!}J<9}B-WA+E;`OIzRb${=#Nq8k6FE9e8#~0?mx!MnV)m&Crz3730Zbq#k z)|bYQP6qFns)nZH8ochd6(I7|iezEFNj8>?vV3(jSYH1i_ih;VkLW{7)?%Jn#4lq` zf>*_4u>K)HE6hF8S5aP)f%-L%uTO z*ANIeSM^2&jsUMqEW}2P!qGiXPqf;O!oz`MrM#>E%=gUqVZPtr?WA#!jpNBt7!z(c zUsdzxx|lZa8AHUOj@77otJPyYw)UR3lPVn?>z2g?SJL35c}bW&_Wzum-J^yVdq8J^@{Gj;tLe+Rz$0{7MPXF6SCGuf5s~f2xtZ$oYg1Ryr-({RGf7}5n931s|KU!eUqX(=Lz)UES@&v(LDVK|Wv?ft@T54QAfm7Z#&-Iw z7F^c^S=V%}#(Zlbjk`YoY)l(GJ9LQwqGI7SdwZ~6_Fo<15AL4X&;Q=f>^J=B86MKbqd+&D z3_i195N69dUDz<6m=TmFR`RI_R#C35WR>a!(O7R%R7GgP%D`15_BJ$Lfqxqp@EAx`k+^vH|UdKAFMr{>AZrw4-}4l593+a z58L(n)trwH!oZ`|1?vNcL3vO(dc)u_h+ZzH@gh%<`P|Isbkz!HJ-rff3Dy-=5&xGi zC-eCJz-M1fgGM26F|Dcq?-Lc*wry9$FlZOZ?O1c>Prv_rd@yI!rGLZxWBb!{Ur+T8 zLSDwGxZODV9DiP;Q6WwThX=Y|WR?z*t^+kUjSyd!P!udBhB!IK>feR|Pl(;K$YUU+#Ci@k_h&Nv{2)vhD{mz^Hf}Vv?XjfgV_8qXV}&hKw|U)&-PVa@FGQm zI$eu86dguW=Mk5rmhT?jy2clTLT)sk_RJ?SZu{b7#LLKxMT6I+h$7BhFMt;vS0`=#+rW9PX^A0Q8`udIcO=Sw2CjEI@663S2l0VpZ&uz3AS}9ujidFE zprks+br{!u$zR!DwWzj$a)&~2r~uYmx?XtM%Y6hTM?=+|rJ<<%F0lQmb|f^<#jX zkB&$u*6qE+Lt>XBUTbKx)4WR<$8Wmx`rRpxPM{q)bS}6Ybt;Nkl% z5I(luPlACy6c=RKpP&w?ap{dlkGpgbGGZZ38K?tzp3`npUXd`G_Q%E zt}o*qzYtqE>W(kiU(q+*3vD@5np-eW)AaRP(y&ky2r8yKS1IQ}RA+dgXTkMsL761RMh)@oO{pvFzVxYI*vWZ{GK+ifP;4og|OCY9}|$KkS1fUA1gN^4p8&i zh}>tK?`o1(A4lGB$(btAlI%Rp100Ay)bJjv;uHD%F~9A&Z*cyxEnQF;QcNKhf9ko+ z@sas{%x(Tg3+{`E2Yn=+K0G?o3n$Ef*4(S3K(wdptHhhA|FfLRHy(ldyYjb+Bjr95 z78A#t+Rg1mm-6i@A02(WL}#yVo*X1Xr_JkSxJQUd3lIA{j!_a+UJ<*LXM_|zSs0vx zdf!UHZ{^S1p#RsgbVFlW7dg3B%UXPQHOZh#8n1tfb&c}(Pxad38u!+=O`@s2RUjr$4 z^k}!de>n+B^(^0h^(|rO8qL)l!11>uQt|HmZpgbhVI>#eW%I1?Bv$JdmM2NAU!n+;?=qe+3*HJx*=_dxSt;P7InLSTKeUNp(K5t1uc ziWxnu110KR6}W~v*xPUqBM-_t!5H;pdR4a zg<9J$yLEvbWs_W@H{vC&q!eY6s9=!boj#r43%4x8Q}(*xJX-eT^}@5r1F$M@VN1mG zqT&O0jbnXJ{C2-ec0cG$uve6~4}zJJ=H}JPP_vQ$x^vid(gVlWquN6s*Py<^u&CsFj=nbJMeFjg zu{Hsydi<_+Y=VWmk}Q^B9>bE$H=rk!3Nmk-4*$Ts#QDwpk4|CTZKKjV@5*_&ZVf)- zH@FFPQBA4Fj9)YuEL?Zy9o897De1I78ybZ8mc@L=xc+CxE6nF-ZZlive&%~ITV{V? z9%r`9{meGq?Y_3+y-^7Jxn|NH>nL1CigT~?F>crrx`oltfU^;nfu)jNFio#&RQZm0 z6N^uG`2F%gslRSxs8ST+dTOjAdbxxssrB^uMc1P*A1{CA(sm-jo1PJ}x0}qJFsZjf z{NMTTlEgWd9#T*LQ1k=)ztP;H;*^L^k|Ewn^YU*dnr6Zo4DD*-uas_h>q7>yUcASp zRU;S9WUk&Tj_az}V~yF_`1^-(Db>%+XTY9`;q7UDqfm1`rD+4=w>^aC_fs8_x4uiu zZ*VgeT#w~TEKu(O+XpA~$B$7US&$aL>^%iIEM3^=6}CeL&&t`vgk;dZQ6Q4PtBPDR ztSfjz?;^UM&g!qKkf*f!kAv+K)9LZs0<1)F5JHn6g}@JbzA(XG{2&uoWtS;RL5 z@w?}jD94W=e=n;0lU`Oy@P`e=GDeoKCVCedCeu8uC)8oz~u=SMqOuZ*2^FhJZw6?gj4%SmMB zc+)`x_Y$Jg-S(Qpw45-O>^RuhR!GFvmhdEwhLD7dm1XTd#i)lE|DZU$4ekyRx#U+h zV17GH%{e_8`31M1oviXF8NW_GnPhb#f=bE`9PA&VL~ibLbU+=LzG?J7zk~uR<=0j& zHb7kh(2;oi4C|^RQkg^fbzmg@d+w573$PS5@?M$kf+z7j@my@((6nMEQI)R^x)hv5 zIlG#mD#nSjcdP{*7jS&q>emf%iSDlZP{+0Dcs8H<+d)|S%*D!}WC$KS@E6^bj_Xs~ zBLM=(uK=LbB&`n>bKP!7id5IQ!!^G3uU-XHUOXSSj1cSO6h4THBD z^?5qh-FnT`bCbA-AWg=0a>18=m=@C)QfWf|C`Xz8v$TGwy!f=R8S$YzFH}qS)S`Zj zjjzSmb1jhUSi};OSO;DAju@nFZa|#++~s1ob|^1ATjzm#MpY)<2IW)8Gu05)ShlVY zRx~>a?AuQR`JYzX*fKFrU3_}enN~VXxtru>UqF4L59b&5*rG1rn}7u;Gm&RxaQbb* z$q^7yn!Un6+%$c2NtsjXe?LB8zW0A>L(a;OJs*dmkllf1@OBu~2GuULst>|h*-H=h zWOjpg()t^n0qEy2x^IU7XDCs7RpII^QAU&^cPNQX;djfcPF;AigN!uY3oL2sCTb=v z5&S~f?+1-RYkLnlzhm*|`-&7|GFPoVfPStFR*86p!6xDy81<3UyOQ_|f9O?OkW3`s zji2C`E(D&(?oE~G8*dz8wDj%S9-wN~Un{ji{D-xx1a+bZE||$oYohMky4{N{R-mqz z|K3ZLZ-aVKx9+0&8aw2(FC5$I>DUV;V*07a*{GnuTk9CNH2P7~EgQvL--ASiyFimZ z;+L(0L+_%Fmm|B$E%ix$GVy)DwkASEUb=)|ZD)+2S~m)oFh4m>Yu(EKjUm zfqG^By>D_hSoV?vdI`sx;U;2}&b>wcQ96kjVO9Ns{N&wL^zH*(SjUbOYgsCU`Rg>V z&*k~(N6%s&nYO14C=b3ZJ0qGz?(e>N>eAzCl3=Lt^7s+t7gxs)$~rOXw&H-@XV`V?X|jzpH`Ej_-{P^7E+^Gls$F8(NXBU7V5f0cL`_MP44y zB{B_*@s1Bfu==*oGQ&g?b@&zE;lM~R&(f8;CSMLLFF!f#RA>e7 zEUw~L38(|?#Xczr`M`B3Ki;e-2gE0%h6as_p-VA6eO*KYybynE{JMw&^{GTf_-dh9Kb#j2pRm|Ifpzp3zjmEFK!dc9iI9vC)Dygu&7l^C z`T-w`lUk^M>buPE`or3ygO8~HT<1;~Zx#B9`kEcC60KzJpU0WkGuuK{A+rauST7Wg zoS0vPK3r!`xhvuK^N_M@0UyTsD*SYgc-ab&mwCT!i{EG9yToSY7*L76KbB{d##Rm)v+4R7St!~`u!+Frw{dCqN%$E5+f49uzf44?61~GAIfA|h%hSL`wRO5WraB%)= z{wAPdIhVfyb#eNp1-=(0k!$gcXN9jSNC@jho`7Z(@wmNl`@| ziILm=O}1fO#3p;q;SDY{BJ=8y`ht={qQzM)A1*db%G|fk-a0o-JXFRW)zl7=vI8Ac z=WftZ@B5^V%>G_-cm1N3c5W>s?Agavp@oI0b1-w8Gc*U5&)PMoyle&SgOl1}KByDD z{hp?dOqI2a(KCye>E<09_K0L2kIbOw}->;PQ=-7fblKgqzlgh)5 zB!p7zlI>hdnidV!lxRK#%l7wYT_wS-szS z3&`}Zv)gJlstM~z?2Jk#`eq%`OO8KUjQ)a8u7}9IAjxK|{hISjA&{|TBM0hw3F$6) zkUN5WV~ryU;kk~4C6c{~mXnPB-?kljOVM}rymoDYd=kd_+6^MIHQ-41A98+;c%m&Y z+)5)0z&s{qL;5T9;oI6CEBfFK>Xm)63PRsM!I95GpAiS8Kb|D=S)vo>>nGgXbEg}G zZyC(E#CAeMWDITAtsC{)o=a}JiQ~DmF~t=12;4gw5BuLiUSgo)()GdUFDF))D07(# zt$SE*t-VeI=U4fHjNPa&lJs`h0pxu?I(9Aa;C|G7)e919@%S_UGr#kH*AJAvHr%%B zVn9bzzWXMO1K2icJb0lt1k0ng7_FP3!*5VU9~YJNXUjlI7G(RIjK8qbApD z&F@P=xg}v-sI?mG?R&o5x!($nJ`FDGG<#rWRMHGo(n)I9op9Q1O(8Qj)ZTF09^#=xX;(1mA=clw6bp-DeSFw)LZ7vh*zD9$w>^v3 z#Vl-IlTbmlQ`QM?@%ccs?>f}~Tv-6Ri9Qadoz37ik8Y_kLIK)&x+ewKg>xTy&b*Gt zI{LmsgRO<=+Z5P27gdM-n;d)*vwIM-c%R)}J~<4`_q!H+jMB(~dC_`lO^>8@U|C%_ z@@`cmXde+hJ)T0+93H|TRzMVj_FD^};W64gtxM^^H86At-G8^=Xci6etKfn45I1CHR zcDjX<1fKtgwl|Na^6mb|357CbNJ(j+(0~Y$E=7?cg`!fD(4aCUB|@o)NTduILP%tu z(l$iMJkK1%F;j^|`0nSk*7@~YJ?p)m=lffqU;pfNU*~qteNOjvU;EnoHSk<6qbnr` zMVh^D2-Fb4ZK>>w^HsQyXMV`>qJTJ*onJRvkG{33rt#~nYC)e_A|9X-3e9hnhzDW6 z&xhkfD1S2XyUr0~cO#FeF6lj+z5ZxecJw6-fWHO z;xz6fQ-5Cmb#ojtihOUgqi$EZj}wzQ@?*SOxAihK4g%K`;p>B_5A{L#!j4wWCNN(8 zZgdp;Qi`(gd2KB}Lsf%akF-b)oU?k!`0+$LL{@*f@*bZTn}4N9Bs@aC(r4?sIMhLH z{k48moD<^cewCda-HAQ~cWW8b@&}DOcV=KRgbPz0R@1)?N@F>ne$*V&9|Zde`Oj9IP|- z{$|L`B2{O$z(Phf@l%BA-8s#ulXGCWM+Nn;b>t_1Ud2AYAK#1LnmvSaV7-|3O2q$u zQ`L)#?j}qduiE#$$G+d3iqmrc22vYvmuaD)ga|hLewE_+k}$a~-{0X;24ZxDM?{_4 zq3mp`?AP1q&t#%9uHuM1gojM=`w@3t<$Zs2&A<3+b9pH(>riKwdX1+Xd2-urQ$rT5 zhoFO_nfnyhTX%ZBziGA)`F-9RY#fMhPv8iUcg*l3-&V!c=Pj!v>c3;>`)_s=opRRY zzfpfo;iI)^?1dr1R&-QGvU!*|d^HN+aB!G7muhaa{WL%-5<2JZ-svTok0NCTjarGC zr{8I;my?9F<~$#22841u*iUn{fPij7{H3UV7#QH%=E#V=KE~mMMTbIMKVB*EI+fyn@4a+;OP^4|}D=GRAU9(B7chE#X6iHWJenlZ7O3hn-z+ za4B)#&#ap0P)I69H>6i@^CPkEFW(v?pTTX z!%t2=#IUKGDY_&X5}wKm&3jitu0vJ9O0G_@Ca*%wr_q0VLWKA6Qp_)R80|ij+5^4Y zpA~v8_QTne3qQ`vqn?xN*N3hzQD@e~fT@y-Ix1D$k0|2v{|Rg4sba1HxG%E7=A$6; zDyemMKH83=&&#);6MbV4YOdKQ?Spz`7ud|0IY(f4wW`m3bM&t|R=nmc;{=>(o!GaK zF#$?3!IIaIZ?`rm{k}^17)WXKs~&d1{u^)giT6s#uk+YgzTI&E%DdjmHpq8_V9;zy z%UCstwg#P#I+O>QVO9edBtJmyK)#sAr7~!pT`#fq8uAtfoNOL?_JXy&wc>Mj+`ljd zA8Qz)!V4jlx2sGCpf&Nt6O~JYAgZ(M`)DodEh%nutPuEEDCVt^w+e%P z)DY2FO56Ni^vBk7ysf6)Mdt7KWv3rSzVD6spc-!E|3_1fPkcli+xO+Rg@~J{rdhkb z^G4oZ$fe&KdFzQoa{FPvyiY`j=f%{ycYeSYZr~e?es!taq8;A^Bk!=WQFRyoKhmCi zEMlc33Uw9KIsD@}Oo!nRhrIJ)Rjk`w8jXlt)em2?TtZKI^#YUB>>7E@8w0dLirh7k zKOZeQs1?`>tQ9X+d_tl@=}g@1@b%Ti?DY5a!l#|&-ofja_Vo3U)}03OEC&XOjOQ<- z(&s}&DdzC<`I<-4BL$r>;d z+5b^`PY*b1?h?yv?E?-Im1U#IN9LJX7AaKm0XTkckpA6TN+SIYKT7bSZm`WWk1VEM z5?GX|O|RcicOo!p~Sc}i&@>S}GD zT-^+mAV*3~*e7u7%ui~&iN4r=OYPToRT1NjIdo?)))T)QDaPeeb%f)_M$d_#CB$vp z8Md4fPZD|HVi|oD;{0EH5XsM}1}mnASqmBuK~9#%u390VC^r}yY_%4={VhqRfAW9Y{r>knr;XFrd)hHAub~~&{*HD`i?nfL znZ7$I_7mW+naS2d!uU+ zBrG?OldVfYe_zILmtrc22lKCo2jrT_oRCU^#B2w-aG=bgzrLGHMu$9BOGmzMTi+UE z?Eg32I)9{)1=lkth9yEb+K6)85kZ!-^~9T9ZqO$f^|YF{T64U0BOFUBd{Qx=t<4M8 zrPJo25s`|L)Io9)D=Q^f1h ziaw&0LE;{S4iLkn2YsT_L&RZ>=OxR>LBgh@$D?qXN(v?KP4P-2-Y2?o&Yi27SX34{ z$#>-w!&V!HI$u44(rVPMS8G(Q<0U0_`!>VtXx;JNN(ZTwtGQ;`q4mQa|zu`$~KaZkDS ztWo1wH&BY-rDyvRq~3|#&#fy1DBFHQ(Y_h-*mttCv1NXw!&mc z4t2)92Rz$WO#aYA{dViiNx!Un;5((#uLkS9g=wmrg2kz@v0%N+xs*Y8s>h_*r7;NC zUpzc8#Eklhvn)A#4~~G%cj0q0H6yT5vdr(n?lD+dw2}3s-kyW(y+jQ+so_Bpa~ROpR*ci5aCeL)p%l?Ks&Z0Cc1 zAGulNUp)}*eH>W`bP2yM3j|idWo4IF+r---S}D3{N(k$JZyw*htKJW5kBA_vj|Mk3CP@BYsujYvr!NgA|f`vUwjVN8HbN*2sy& zZG=g1a&qWsJ<$==&w3MGOh#R}qEE275i6lAp~tr6AT#tl&E2;X60?-5qeQ8I26o1s zH~;)U{-^K#$mS5^bJR)D(=3P?+%OFN&yHt(G@yW=bV0bp=6rD5cz&u=;xlpBsPOFm zSS#t%kXkS9+DmvAZn!2N93Z|^YHyM_hDeS<%cK6rK{DC7VS?==mFR?dT&oA#H^R|Wy^^yAyrV3q(7$99~Zg;Ny z>>;toj=ot@iFlyapa&z^*YLTZMKqRRu&lTynua{x)ogBl7ZG3KVM;$0GmmcMp%!5Gs zUZ%rmo)Nl_C2T5-?}>xj%aMm(zJw>R`Rg9Zk8p4wozqzdd|tFY`_1nD2`Y{(l`&>V zU*>1;77dM&$B?*V+g-6-68KV6sWjez=nSV9Gz^x4aePy4N+{~Hh+3DL-fjnm^A^XF zgxf&=-tY$B<=v2?KEud_cut4g^PAb;B40)`O=5*(4|ucdRZMS2-F6wxKzYOk4fDgy zk?2AAzIXlNt>!^6S@Y}g#UF#fGO6TLw-$9RbOZBNTcgeo({uB?QpgwBAKF2AiTs)q zW3TxQ#^Io5lcxyAT~ms2Zsz0QomkMlN(_(p9(AuoefIG64!f?uMZJ^fsL(u!J`rDG zAmmC1SjgH+7HjTrM9((;9Ps>F2_fdS^~5~VK`5L99eYANf1iUQu6^m!`uy_pM07yDDM`g3{@KwnYvxGnODY3KbN|DGqb*JuCwD6A^Z{{R#CN>?^k&^p^kZy)vacR}F&$<%&mOy2NvO}09Q4~8iQG+!sqDTE zA|zv^Lb-steupdEBo0x?!N>h`8k`hTAuu~MYlyg?uZlrRjje>^nsQ=o9qQ(8>u0@i ztB9zDryUfHHUvSViy!Z&R)T%(v$Kr>{(|0QIP|zOO96Ct&(@PZSRR@XML}${YCCCFj!s+;PoI<4dlCsV2 zA@5_JUahO&vyY2Nb7 zr%~73$;X7@bsuqjY5we1;1Kc$=-=4Pj$pnYlQa5dgcRME>5tewL;^G_ojbMrh;e#> zh1*ao@%v)tdX*LT@uh{2b-PMIXsg^_wZo_f=+!?R>6t{nUHg&9rcp`!-nT3{YhOp? zRw^}DcAy_?rK|&$6Y+m1_J?iw76LY;)kZt17Q%zi9T4kjfXF?a?7v=Qg5va7|Dk75 zq)FG7TPrt}M1Ek2HKlt=Mq@*s7^Hm!y3{&BsprU>R(p3U`!;~XW4LI2Kb0f|3f!DV zJe8eJCX-mwCo-z7_v?Lc9BEpeIk%yx0OB^ho{FexhDzSNx0Xw>FCvz1*x!q~$hKWR z`{KI5(Lq))7W?ECEl<~{;Qv>``=}n{7yTf8C}@k;7W8WhMFZGFgU}VaZPi)y>r&<| zS^H}UaX=RzrftRg&UMTBYU_|e@Dp}8%ZU75al^LDwWznV`sRe!4Cgqk+WE4+8ufk; zHa}!7t{#J=CF=JQ?~VfFPYuq7Sk&|I_unubIRKK**TPQ&6N{WJ#s z4;nQ(H!+|;{Jm1a9m_sKr1faD-^(0O+BJ3sXNp$@V26Cfp#`s4`<_Wi#Fg+`+eGJCbt-uF(ZOH(bf zFKD0zMA;2I*WX3`37Lv-Ud69TXuvJ*O%tU=%XK7UKn;1RFQ-RKR<#rFh>yY3tzBg3 zr>HeoS2uCIY3nzg+)bu0scodBbP_*h8+&J^R-&w+${=fmzCM*%Ws@t42pyf{P~$#* zA|=JS@Zd!`ES_PPk4x(S$pveBiRJz9Z02BAVB-ix1w;um%OUQEN1SyD^1<|TkIoO% zA-~6BnXiuf00=H7oabCkg~J2TzYp(^y*C5>zaj5;|D)9WK+iTX8=mt{)64=prpL}D zS3eW#(VfgS-&)9{y1L~e@mR0S+r_X{fl4k28W~r#4-kHV@ScZ`10>WzMzmkHpEycf za2ge-kc8yoEq+H3uj^wJ+vJ~19P+fu_pb$@`{HQHCiF*pxwrI-0rFSq)bhq9H`Icb z(9pq?kdJ-&mYnL0c49%V*#5GvpLl00IRxYmlkL|fAMRxyBl_buwqkjsM3nx^J8{eh z=FMv(9TxhCLvC{ArAf@2ii530?iP~F+AfbV?-F1Q$}n+t`3zQ}ecJ7+i6l@u`1%$X z?8|Oz*j#$7jwEp3ID8rV2ca$o64xljq(XC@JpUnG(yrQN?U-Bv+aosw*pAiWy0nJ= z;Jq}U6zQiw7f2$4LMoIhWsVr4Ny-Alv(fn0(2H%PKG{>1D@k% zo%JN22)-7Pd_w&~0@ps@-x6O+5{^q)<|d>Qv+HT`+6o`Qf?JBu4t)(9J!Cmtr&~c7 zI<9{}J@J5usr(3+ZU|}kp+A7USpi9QmsFE}{2gm9HSOz#rRn!_F*wb0d<8dy_M%j zAtQ>}G0|}h9Bp$=s1jo!BrIo>Jvt1bjZSWNcn6^Caki=i=HqpfG55@zQ6Ke~*-qhu zJ-}RGjJ@qn(0g+G$IjwL2;GzDC2w5`)Q^2Uo4vlk^x-Z~g{&GdmSB0nR@eZ;0zb@G zP9YxnoQBwy;0{Reo~_+-s0|9QhQD$7-UR3N73m}#Lfr7&*;7Le6p%l5cX~V z_3!Kd6KV64HvaeV-{XJ3PCKSW+WEBj_c-mC7F}k4t*fxaehW$aSt5Y>O1N5f?C(mL zR=YE}H`j&)ZW3Sj@x^DtA1s>Ua;2UKx;%_1JKjc|S6_X>w4saSF14#KLR`GJ=$!0D z&2F+Y`CUJAO$X68<-NK=u7$MU(Gq(tfPF3R6c&}*e4^tz5?e$U27)7Wr`WKcVtlDT zz5N~fKghOb$vM2A9rIkPgqu=f+As9X2W7g0$C>m4Um z3-m9$+xa4YV;?!ysA3hf9P1cn5e4@#fALP6J(en+Oax~eF7uozgPVi3URR>JkvI2o zHC+$|62?Q=T|ZPqrh53?mbgM9x_7Ss$piE?OTFXoxtmH(3HypgZyX_X)REX2+c9FN zdBm}9?--e^6W!$*F-!uZ#YGni`-z!Xkk^OB4x-$@=Tze)>XCcgSgZXm9|ENpyxS9t zpzy$%K>j1BN3N1iKTJg)c1km^*jm&j5!!x5l@;@>L4`HNi&+2Dpo_^7enuz_AN6FR z9Ml9lSKmo)03BZUAyxY{XkxP3yvHn^C?}99e#8%_4B9`?JCcw3M{ZZjD_;_dB^R@O zMFFSH$3vsV6b3rhor2}%NjN#9`^3*ko)fu2Vwts zz1_Wf5VI=W<7eIk^yTI6`32CwDdMh`@A5XdZ&=;zqK5kE$GPbSpCQi~LDrr%-H^9j z^_d9;eM7%*WuZ?%-pwlJ?eeAnoZl+W%5j>z{@MT1<|%DH)8gObv}0QQ`@F@ir`Pu3 z`X97ynZZ-6fAVPFY1QNyhV->>z8vJKhKh!c!Pgu`;Bac~3biwlU^pwh`)X_(PueUm(Lp>I ziz+Mvn@PfD)#m*UHH0-n)yqu_@oL-4#7`ZHg`8(wI$GT8KU`)%UZEgQxabv7-YM0P%xiRO zt;U{{$=kUZ!t>Rz@4&90>khr3Yi?mCgM3b%oaJ_mXX`+ay2s*UbslkB+af%x*G_We z^0o5f`U&e< zm|c1)YF=1K=yb@af=mhI&TORa&?^Q9JFeSah@U8#<<^cAt0Gnx#&xq4>q+Kr)!#38 z>qzYJr09v)B_uM;ZmZSIJ>vbc=kxEjN|38M`MJ;;asSu7dgcP7ffALreMNaLsgTll zGcqeC9xMmz=FjDmz)MrFcPd+Bot7`B9rej{K69#H>&{01Tcg%}3h9J8n$~nmyO_i# z9+AKL>J!O4QuOxf)3?MSrbq77Vm{Du4Hp`Tpmas4m6g+DtV6|N2L3f}96cuq-{bI%|2 zz^a^EZQ+riCELd)P|jfA)Ise4_9gG` zhs;nQ_*9td>_)_`xVBwyX+fX5a}P&fwf}S9pLYHKTaorV&|(Su;riT_sB13wTxbpU z*IM;F8*WE;fEB%Z;YF5iNUF*I&2ps{GHvf}{xltpIOdm&`OW!c%f2EL8QfngGy9}@ z^R^I=4~E_~YdVO-^U8brHAFZL8%_A183tNVJ_+GhsbHr6&hCe|q`0 zCDs)X9fQQyWz!d7G_`L3=2D<>Xu_vv-w+JO?OAF#`sX=G zUA^<|!(S8d^L4d}n!q@86+ZfT)@c-WzS2#L|2YE1e8Ez3x})HpoONVdb}vM}@L4T_ z>vZ5zMQ+p7Y~mn5bvMjvAg!ldf{OCHiG$vFO4`{z!ZstfFlJ_3fSM zzivo=)d*^=sTS#)nIu>9hU?0!xc_gk=s1$!M<)3$-1d1rL`L5<-d@=@LN47obmN%Q z2;q;lJfEIENW3ndxp^TH{g9259v11f5W1(ni)D9m3G3jlBlY*ofxkfc$D^yCAtBjg zJ!@hzp>nEoYfK@YQs8L%)bDy?%;t4{;s`zm_FP`I=ul2FHM`l{rQ?akCN}AhrpWWV z5XOG7xf09{y>ITyw-nCSYZb*{B(3VE<8T~dV*rg zsqazO$7J8ii|0mR%VDQ26Ab91!bTC4z10gFM);QXsrVU~c5GUyin;(LGU5$o=wD*+Xs6Le?Ix&b z3fNMi-vmV9@mcpf=nwIB?e2o6E_m?4f1*tR^<}3D?*t4{f$gnAY5?L5qieE?6gOet z!iPb>8TI*mb>~c6zx=z80qwf~j!!wnNI$qn#rR3r@0b;We_U(xKtJ^Bx z)6+5zdwKW2lVruZ=YmRlz-$|^F*F*}yP!VYU?6v}ats(UF1w^plqQNFlNMM$!f7$4k`n?%=16!lVH)|#~u){`54TT#-!*ZvIfxfFU^obyhq=!O~r@z{X5?K@9*W+7J9o7#^73l zkj6XA_l{&b#11)=NgOtc)0r=*0Y`_x+Vd z#6O5|%=9vbkWj6Ph0k@EFYWzuxW5tcKH3o>9DV4UJGvyrg0%*y(u^h?ZF!{RtSCo9 zE<(}Qz7}XAePp!qU7b(dAkpeSsWbLqn2fGZir7LKBHAU5@s`L(;xXmXmAsDq(=9F9 zvMTkY&~&JICG{myn0Pf)@U#{JAN3saNiGDdM9UWsRMQB*na|)!^(vxoygNy(tB&xr zex02?S4+MH6)rs)T1*7bJbT(G_|_k8p0r&$*=|*w(VjNvYV`khq|(xZ@vAa25x^eP3cLVmfu zw0%qzFPAQeN}nSRyM~@cyJbVbrJ5`3{k33yIq$6FB;sgFN=}rfBA?dcj$Pzn7wi`e zEZe#ibwKm#oSh^3;N7v=BpdW?aFDlXTrGln>|vEUy!=D(O+nW5H1fRoiVxc~kMx0b zxsu*m^a;5{LPAOz`$5(S&v`V>6HlseT zkAuD?)&ZDPyIgkN8w9uBpK~Lq+oh?;puC$G@yI_O7OJEFUAey3_{4eKzsNAxoxWEK z9@>h&UI%NSL@{APEWRGB~)EwtzsLr#8a%3ZD=xaa?Qwyb;ru9;evGROW|_i5jU7Bh_4mMXqMe8(?T zEX01UvcWEqTknTqL)!E00*JFPc7G7OupIe7%U{ogF_%D~*v}OW4L}UpyCqo*$4}M)gKxUXCm>>01N&1`3rJ6UD!2VFIpiDiXl1Z}led66g;!xUn*1mIqXhnRkS^V5by#041oKM5L>Ap;D zu^&~$n`>+;uR9tlS;aQ%H8q3Ze&tmz1=S!p|73Hu>jPq;`m5rmMJW+{ENyALzLp4? z27LVHhV_4r_^>hCV&cFvJul=dNE{X}d|h+50(CnAZ!Vrd9XF@rn<=Xj3AM~5U0~@K zGP!*vTi&q^26ONf6LoEzN=L;n4* zwb5TuNn1Hs6#W!`FI&g|M7tYeE_fZjJw<`Gwzt=oEa?UQWAk)oR(;@g+*|aAXg_!! zV1Dz|eGu$#?|o*Aykln3gW?GRn13qBJ2FVR1>+84o~xr zCZexCW#Lb0BOaVpGjiOWM0ADzlqY>BDctkBBl}q!;%ZKI{X)LbBZjyejt-bVJERE6 zi+&`TXYVa5n~Oz%(Z`#1gw%r9NR3F{wjQYcICOQu><{n%@9W;H$t7OTj<^X4x`j6( znD^HmIqh-iPu`Stz)gv*b{w{Ql>8jzp@5xyr-e<^CuqOEVS=|Yk9+G6~lnp-p#5t`%f5dfwsIl&1{$M#khU^Qzwb}F&yEslwqj(CjyAqVA^05i| zLd)v-1q+C`*D1+L!#wCyV+2m64xqcH!QOUp7@W5H2&tj|2+vn-{tbKH106?z*pe*F zdrXz3Qi?k9Ik5Q%|KUC&18W;&yr^V*fX2C4c`A|lwD&okZ7*RmdizK?qMf)gY>{>= zDko;gYc|J-ya$#$pM=(~Z2_KV(rN1JQD^+*>o`8qI6sVY~I z`2zfu=rfF!Utr_V)Rk%bdU#kc+#|fD8`AmmeqOoX3stUfGF6Ny5V=O~)~1kdxHRP3 zu7`E@DBGIPKd{fkSAUi4Fv5E8;jl-5IKG+^E`Qb!V{o|6_0}r)F~}6?R@;U5!w=r} z-E-T9;OXU3AC@ZQ(KbvPmA^!P-l-wjiux0h;Fa1pDoBQ+blz%}F0CQfkF75|8Lj`zMSSg?^|88xR9mYtx zb!yn}9DaWCdggzT-~N65f9C`LH1GfEJlgrR7^@T}mZtgVcT>7o&*x71VOO-LzdH8e zYYUiiD9=#$yS}VD{Vn?AdCy()Ysn=R>r;n53E?^sHKB3s7UBR+AB?nRw-E~opWIUU zPNJipy>9g#^aWaQ?qIaSeF4*q;_XMxq&CPltXR30@GKj=Z!=m*SewEEI1d~~eT#;y zvunP9)MAm|i#)`ITD&Z@R_}$4CbP>g?;1uOz{TS)OwhNp~7-gxF{t4_|8em!~G<>>rnhEod202Ty6c)h{MURDG#abCp)_(olc;R zsl(&lP8+jO?}V(F@vldGJpGoN0TLgAv^Z#X zy%ZF)YvfM_V81XukLy`e6EXhY^Rz3NLb^Weq=Y@|Bi~Lwa~W&wBWg2`Zk>-ry;RBI zdJ7TMM-~h_ULuV1SC^ysGvg~LYF#-v;Z_H=h1OrZH3Gp`_YeZ`PQoK9k@^AuG7seYJ~j{DqICn4^N?I-!{Zmp)Y2Pw5~7mGxlWJ zWC$Z}uULEZ7wT>B?AlzpYWFy(^TlsnV=@YnY{UF{o`b+)R5*Vh{r^^-x^8%-dI;3d zYG{qLjKV73BVXA@5!e5m`&-GEQMj5SK=;iZd3;N@sJihYp4F8(I$iD0{rva*p&dK! zejc1#I{{wzeW>Ed4;FQ_z3J&?DOow&!Nf{YF0%n~i&^y7hzq_PnCaOWJk*{}p@Vb_6nGzwGb#KfTduUFb^m zL0Z0|YjSxHXt}?CEG>)A`O&@lDHExPd)RBOc{zi`3Nwu9HCGT0LEeC$;f;hEH?&n< z0`a{nt!ce>oUno$DICOT*Ci?)Np!2?V3)BAvFjZb=5F#6c2e{0(DbO^*>uCuc2N1Tt4 zbW=J9>NgynUPzVwgP)*1_eK?Ze~2C*gE7{~xCE>l54}laFUEX%RJ{G{=)-uTJR3CW zkyl5ID)Tg@Bf5z%EW5|Uhq@jP`{gWOU>J;DgmNaZrs2^SLZ`C6FXmhnT%d02xEgLEOIB=+SPZ(I&@Mttwg-GEl{UZNE!RKI(ymk1e9eM@p18FU);$6l)B+LRWNgl&}_<|lnYpMLgu!=75mSkhJ=kksK@u;ST|7m(K@VoIS~CUO3zD&Qb)loujOV{-Ut|!X!(v^9|4&))%uIb z2idTYpl`cn6ru!fct)F#!Bw?FuC;!n5TwHR_Vuz6sMs)@oO65_{Y@rq?x8+E|BZS> zNzOm@b=v%*-6yo;ielM)Cr^&U%z?FA3F-@WyK3It)P}yKTE$JxI;h`Q6TUJP^)Yvh zzwi0-g$nLxl7_W4(IPmY1p-uY@m zomtZ6Wz$6hQy*@beBMV`r&Z^~><5TL(=~Nff_(pvt{)v&43I~$Cl1}1?2EkuTok^{ISGxGG8n_gum1$Xp{gahzwy9#>aWqQ7?CGUOFk=^u zVb+ZVKi*?*$JHuGq%i$N6ZTE{)Jv2kPE$zPmf`2IXL^WlsXwRAPt;FjxqH9G4*5U% z;S4q3QMa?}ve@$|XW(x&tL|~FgQTA(Y2t`Glj2<-vD7pPN>qMY=|p}aa;pcmL*CUA zk0wFh$!!fpaJtTyjlPx`{RTR15aPO4EETIel1NzZ+P6Gk z_(J3^I|NPh))J|P4LXlCkw3WHpuf2*pLjdhlul(N!p0339aaX^!{^Yk8&Tt^lQ|r| z#$FV42lIq`B2+@}#ocBG>15bE;$8{u$QK7Op=vTJnAaI|5^OU{~c?q)3%4PcpVegc%k(BKq7*2`Y zoRJ>}zj&6sF!cHGyEXetcmVl|5^woyb-hH_O&!55XxOqDfYC{2Ylo!h19&abqlk^R>8(YY9(la%U=ZH_! zAL+c|@tKI0S2=Fa4<(_+3w%#BN}&BytM=i@b`bokIX8g&q)G0FPi#>q&0xECQGqV{ zH63DC*|mBER)@8&{Jd-oo?iSk;1@Fvp~pDF>h9WczEwQXhd70OS-eLF2(xU1 z$OZL5l9bSLsr1+Yaa5k|Rd(wmnR!~AeD{#gaPg3cQGP9PIA8Vs$>%uYk!`;m!dCIHTTic9Mb-1%pnx<2YwZz8hI5R zq|nvKXHElsleQl$@7L`l{QD(Xb}k~mdVTJ~^<%|^wRAmo5pjAd7FuRDp{;N;r_8hl z>kTPt)o0R~^TEo^nCAxW%bW#c_>!+94=A%l#|iVS63JGIp;tATyCeL>e`P+QY`Ilk zqmTrAnsE=(8f!o>rg-mcW8xrwsOD0W>EuYpX6_Q%I!pSDyd}1d5{6|Sy5K&(K z!?5La5ghS7^X%KFHmLCZbSfG7Rmr@(JGUpHe*EFOZ`bZ2k5u69*|MG3mksw_5wH*Y zc3#fE*_BcMPD9eZ%norq*Q2xI`w;J2(UrU>nhKZgCUdM720+c~o~1wXZ^~}Ttb2z# zUF)vYAGMFfK7Cex{5f^xr5G4A&LBU}Yl3Q(lz{z#s9kIHU57zb=FrL(z8`{>yb=%ZJ-{Xu33YEGmyJ~)ZEBa;Yy z`}X|F`4+>3KkVo%X)8+h5 zr!1G;Y9)f1pL!;Q+KH&nn+3-J)U(h_jZz+JCPLZVx}M{8MEU@syM3f1u$xLOmVRe>v89hF_OJ4dXh0p4 z7fn`FPQ)?Ve}}s_qDZv(aCaShHE1@5az>%9<)UsyhRy->r@SGs_v$(dNG+MDy~=<* za@&w5!TV7p#gy&qgIzVGaJTQGReQj>#X2}0{JoHdQ zsF(!Oj}K-3j)CnAZ{!(<>VZwhPFP=}14PAkXGmI?fKKW`mBJHoL{T&TepYNLp$i!} zB)anD+YrRt)^jb2|xj%Y@vkKB%*x&SECx;zS-P8RjicjV`9lR%v3Z`VUj1u0Zmr|V=p z5!4S>KG(9Q(y5@G=SDTS-3f+I3aWjBb3kfnP(w|*7-4Z^xTWjq?_i%^nq z*YPOoaEc1qk4qpw-SXq}om>wgGV@DG3hJq6E-`mAx`aHf@J23aS@ao@W=Ixl&Lx?5 zrIqUUB!jo@?$aF-jj)mB$9*Qu_w44($4<+3!9~a3(5Ch_xcw&V<-$JnPbix`y>n$Z z>O##4KAP=@Gb*b;{x;}^x6#TgEu(uu?qEho?Fz&_Flo5oOsBx)B##umP!9;C>MeCa z9cE3w=EWEC12EXN@Fw%!5L`$dlIigi;;Aa*8UcH59#ap*`L><1SG?VUKH8bW z^9C#vfcVsCYwiEMp3>&g-}8nxPK&g8CgquhTq@KJS5&XOjXD_)gO>7j&7*KiSAc6# zYZR82?w0dGp25>S!Eq#i9HcHfGrbRcyPp5{{Qs}#ncQOi zRkaiS*zP!M*PUyJT`i@5`R;1FVRP?Ehd$i+RTNo(` ztub72z7X?I6=9SHBBJcS+QL!ihdTYj`S?lH`&E?Tq>MKc{+q|%xtvD5%T4(#k%@K0 zgT9lS?PWO;To`2e%#FNr=S|%=`NH7auSb(I*7?9@JkFc23-Knm?gjH-Zvgsa>vJEw zT4DGbFOzsjCy1ZCY;|0p0$ZXy3uBO1apX4K@NGrhV&0tb$=P<0#E1F#*9OoMY;j(N z`L|lgZ40T?co?mnE3NcTCLVhZ$r@b#Line7qLn0?35SMT;-ebmFIeuot=-s5su=bC zme2GNv&r|ztYrE~;8a@e6L$)s`rdeceqSreZDr+-(XSw8(G|bAQ;(CT>t!d;WLJW% z@h#2;hYrXslO9m{-UbYZl2^7`pwF)A9;&J}?z_z{7bS&lMxU@CtzXu;#E6}rzdKpL|8mgjj8rAuql7UOZbjd&kl@og z_<1E|-9_(L=R?AN&nr8Rp)Z>fN5~4FBBJHSP%&#-PI909(%bR_b(^EE%5BL=CyL8= z?hjPUhN@quKN;_Cf`eCee%#;P1L=Aa5~Iga$5uU#`Yx~s*!3+wt+_%)Ki;h3aqkhJ zSlcibqo2&*_havNx}_856R<^MR?4Uw*UyOgA5wfnu>H*oPn+FCkiLsD(f@D|rb8R- zGA|6l4+F;ii=Y3j^FROR4gbvBr&~O-bnc9R!db4B>Z^y)&skPOtaA`D#e+-Py$~nh zy#HO|s!=FjFynT;JOM9GZrSgqGY**-jpv41N5Io(|I$?DQ7GNB=8{y>zh5tHUjIE$ zM%Bmm(7zamohO>_WboWCZj*U%in)TJO6(xQjUfTmC1C&j?)^R zp<5}alU_ftbAK;HZ`<$RF-wKtDL=a%dvG77aOK*%DAc>*Qg7$mR}F?0xVI#J%UVcn&&*5lLHAVrl%WJ8$r5Ir(SmDg&*pK+B zXW8zCztx7J%guqq9{m*-$E3_%+s1$;p+Jcziwb%&;qQByDBzW&(w9)s4)M>2k}vH- zy+*O=H3uFwLv*rqq@QR5RLES|zoa9QNIlr{+5>s|{JXdFWe0Q->NP=DhqNBTmd*7@ z<5e&5j@@;eA*hE)nVtMR`k{-2zH*88xq^IPOP$5@#h*!}gdl&~-4KW@itovftc6u6 zF|8?Hh;yYr6o^Yiy*k^P{r9l0Dmj_9?D&sH5RwucxKvpV#=#O>U%e;={SUowBv|r* zay#~p+Q}!FCnTElb|;a9HH42zJAzo5N9Vn3Ndx0Uwbx}d>L795Z-J3bn16?>x;5DJ zgHC}_h+RM(@KoO52-q46l)THYPp0D9^`kP!uc(kvo5%S!pFzGpsET#Q2aw2bmxc#U z6#!r9-hkBxjWD-L7-SA~Lhu}a`MP!Jqm=C9;pNu@7tghE`WX(uqM>UiL+KC{2XVfM z)5bbk&~$oH9?nNu*_q(7VbHnemYKHm4-Supzx$_o5%8+jGS6=es0_KWhkXViE>L&w zSQ{0>gIJe;*fj{VRo5%x*CP&eL#@$4Z{+z=&Zv6TVO@ub(mFZ#XWvKr99pE!6WY9> zef~d1+P;zYciorI$tpjafMKdt-Ic|05Kn&s{=MU%P_8aEx_JTuMq_iS6XT$EhGSt> z6+U-Tcd~k{{WDK#pG&(Q8XtxB`LuD`F)h-@X@5@}rybL-k2X$=v~k*bwD?cswAcTR zeP5<74NM|Gi|--Rr`}GWoS2uioTh;H%7(2q$g90zXT)QOItdx4+n%iz=>z4nT(9ja z5a(vn*OT)-A2LPCe;7E&5uwXr?VdU%#QC>Js`SBHV)a{GCBGKyUG!%~R{_=udKtg= z$~BPq^EV{C4NynZ=hShV%1@-JZIS8zuY1JL+4a$(9|gb^Q?l1kss#l1DW(d)?S)`1 z!zXH=M&R&B{ly0AFvKh>&st7V;bAg+?>1?I!$z=2`k`4>jNbiE` zyT8`Glt=!>+Jx<~H``(K!TGcUs5hAK_IQG>?^EKPBJX)z@-tzzUKQ5hfc>+rEh%Fi ztz=rOPb@m6lUVrfmE)@IBK#E}UMHd65s&Z#V`fS-3FmG$tq!avhL81kZvB)*LItj7 zCbSlTi(tv^XNp}g8`Cr&FHHrOA0~qS?5HCjeAH&kN95f;NYvJ4L7(1IcKXd?Z}2+h zRu0m+5>eaoGzL!e&2*1?{AeC|O4b*2KU94nlmWt}M@=Gy^S$T9Uxov#Mz@_o3hMY% zq|Th#(ggb*#RHOZS|PVEn6u+xD_nE3y<~W!7K9ET{URq=i26-6CBF}60MGgDUkV=p z2%7%N>3m%UiO$A$YyXAITjKdCaAXj6h{XtT;r+b-_qjGT^*^}Fs?f`Mn-|7G_q~T@ ziNXlzJ8ZIz***xdVfB8Gg$JN!OLMpn_Pet`ryJ18;Bhj2;!Zl$OU_nC&F!((?>6XJNfHZ6NlbZ`*DeOjkvI0ybuZPy+R<+{d6sa=sG)291W z_C&UFuk?}Z#Fh+giWGJimqeuGUXamoEMGX^tLHpQln z^PIERx2!d-PV207&R@@ZzwdoN-+bQZy*LWRXeZVp59yP!X`6FJFyi}a{ljM> z2;FUE8+QH6afED7TJrtH%jETM*{`)bD955t{?-Y;NqQ^WP9P3W*i> z<>P)ZF4ODpeki#!w=Elaxz`6(iWaJ2TrKOGi`w{~(4B0lwJ9Kxm>5Y;ti4)>{Z&Kq z)GxI}M5<0yq9W=9GAD%(OfV1N^Wnf&b&O9bn`#(#)Dn7vGVMv^oy(wHx$3hpnUL6i zrBzodAKH#?I^l4r0a}usk240kL2&uH&*CuD`O7;}BOBiZ(bJ3lW^!6V2M%0`&1ph@ z^@&s`oX-Pk-gG@lHoShwE=wJ$2mecK{Y={ih*Ng)$gXIHSz+_S>2?iJ$1c&1c0oR; zF2^O&_BSDVewFU^>9>gtU4H)`kx@Y7B&^a{g8YY1dREH#za%8&JaaV{RuFmfM9ojA zEFxp>gZoa5e8M{|*m~ZkCqTJwGvS;02J_HQzhrFefbyK#$~|(O5Yrc_o#NXD3d%9z z<<(6fvE*j?cJ*2?KCDj39u*${4wO$eVzq338vXgpX!=1XWq*q*^Mb;Elo1^ZT%dw14M_WL# zdGn22`4+I8Lz6KUw1L&dOug!%R!~pf-1vDD?w>A8+N@4Mz1^MfuhcQ|yl1Vh#Hcjl zS}4P5Pr`d4QPn&!q8a1BjZLC6d$`beEcz~2gz*?})2=bcDSu;L;uf#HCIZojnLw5z zgnhS%U$`M3dv|t$PCbqrW*oSyi8}M-d?nsL4bNPcU)dyrS)O++M>h+gZscsP3gSA? z{=@1+Pz}Zvjwv*28sqaSEPJ1SK>fTKsj^z<`XP1jmM!Bm;z0yyqj%F#*W*CA`o4xQ z{YU)ok>#)Xfwv;q>@mdoKa78DXW`U1NVfZyrJcMYXO;gB_nB_qpHwifjvg_u9M1*5 z)$YH&|F!S=t>=jUZn9h@udpoE2z53yeCwVgU-4@GP^Km3N7>1g-y3(t_pjtX=ixbN zjQ`dmw?sS_l(lsGwX_ZdZ_OBzcaSHhSnA_<3duy1-OQ_6FPMZT&upeN^4>+n=&dnm zs3#;WCcD}OFkUE8RH`wXP2?~SZRx{&T0P-~&0ecmM3a%dQ`5d@gg`sgx#L12NDOP!%7 z>xKulQOD?U^lGokb})OUW%fr$2LwK5#J_yl4lzXBe7-ju6r}&UM4=&WouxI}85lvB zx~%X#^=lpxVR_<&!A;zMKWJ;obH{bb&fMR&1XmDrH)Wu@?$Qr0nbaBXKw zY)c#R3fj(&S{&L1#_e{cjd2{<(N3*iw*>XTX1y^zpwbAz#4>0PGzW3K+e#@p`_Yg8XbbZ?iaeAI3!c9*>ePm{*k@y& z-&k<)C0`3Z%yzz%G`^1ql+q2s{y+1e)j<@db`|5n5|ZpnDi;ijvfkPx^I#~$UUQ6( zdJobc?pI*m9NCY>`#*V|^cT{WNfpjD$9#ZGqoxMS5ohdI<>INIC4#zA-Fxl0-?(z$ z;DgC>#1+1mR-2hm*uSH{9N)ciB;a* z#D3cN5XKC~YJVrx z(aIlAxae3-G}%7P^jKGg`{<~xJ?ELIs~!E+VOBQLWc9vvd=zzZPt|MQbT5Y^GAj=y zR3Tsd<_%9K_x8Zkfs_{PPUAM$3?gGxNFR3DJ{ ztJLX+CyH|uDv*Ei!m{?K(OisoSe;zx|zQ4;U_D7O1b!Rl20$(-{k-UYkD9c`mo2J7O4B^dohxy$AJ+xZ^zg$@@_MA`Pb(4 zL+`fUbU#HN{2}s?FFb}kXV!CE4T3qaL2IDmv>g}NCPBK3jrw52cGs)U^gg(J;%H-N zA`cKwu6A~)4_-^@w@SMq9_Zg4Z%Vhf^krJmIwcXLq^5d!As?sV{9RsrBRpqVP-Y=b zMO@9BZrSg-Jm_{%?}W#p{t}@;+%x+Vx}@xW6FF|8E;8Ei47GWF_#mYw19G@%S01?trN zI>LZZ)j0XXj;9FvyzI=Ybi|YBNKp8PFz&pc^C_dEh{%|@-(#SXPiR_El`fP%LVi7- z#XtC|;3T)iSXWpM){n;(-)pr(l~qE%JRkX3gk#U#DFWDUSs5RLdHqKieT_bd>vfW! zRT_>wh#nIs!_81P%_A=0h;<3>Uulx6d zh>Ti_xT0q)L5mLZ3kmPXmM`eXdD`OYFZEmTJnoHIh05YexS6L|PO(RQ8+o6CDmEc5yj#HsbC)fc^)!l97=PqJT-mb`xP)b|Nv zQG*a1&*Il2pMBiVUa=3=kpJR^yPoa=#9c)_KOc6d6Vl5geL`v7P%K|rdmeRgj5STH zugju-%Xmz4Yc`_l)Z#8Gv*N{Rp`s{2eVHg7R}^T3VO)K#Rfww> z51wvbqxw3Y3pLbH${?Eu^!+jSwswD+4<`Oy$?q%fCx4on8@SFA!bsNmiqX9SFxA+z zguuLU@p|HA@_D2s@Bg^TZ=2?;DZdoIPu3H+WW7gj{f^r(j*@;OXxG;NuwNnTlUDqB I|LgKU0qT%Z00000 diff --git a/train_formant.py b/train_formant.py index 1a71b796..132a0960 100644 --- a/train_formant.py +++ b/train_formant.py @@ -152,11 +152,12 @@ def train(cfg, logger, local_rank, world_size, distributed): save=local_rank == 0) # extra_checkpoint_data = checkpointer.load(ignore_last_checkpoint=False,ignore_auxiliary=True,file_name='./training_artifacts/ecog_residual_cycle/model_tmp_lod4.pth') - extra_checkpoint_data = checkpointer.load(ignore_last_checkpoint=True,ignore_auxiliary=cfg.FINETUNE.FINETUNE,file_name='./training_artifacts/formantsyth_NY742_constraintonFB_Bconstrainrefined_absfreq/model_epoch29.pth') + extra_checkpoint_data = checkpointer.load(ignore_last_checkpoint=True,ignore_auxiliary=cfg.FINETUNE.FINETUNE,file_name='./training_artifacts/formantsythv2wide_NY742_constraintonFB_Bconstrainrefined_absfreq_3formants/model_epoch1.pth') logger.info("Starting from epoch: %d" % (scheduler.start_epoch())) arguments.update(extra_checkpoint_data) + with open('train_param.json','r') as rfile: param = json.load(rfile) # data_param, train_param, test_param = param['Data'], param['Train'], param['Test'] @@ -187,6 +188,7 @@ def train(cfg, logger, local_rank, world_size, distributed): sample_dict_test = next(iter(dataset_test.iterator)) # sample_dict_test = concate_batch(sample_dict_test) sample_spec_test = sample_dict_test['spkr_re_batch_all'].to('cuda').float() + sample_label_test = sample_dict_test['label_batch_all'] if cfg.MODEL.ECOG: ecog_test = [sample_dict_test['ecog_re_batch_all'][i].to('cuda').float() for i in range(len(sample_dict_test['ecog_re_batch_all']))] mask_prior_test = [sample_dict_test['mask_all'][i].to('cuda').float() for i in range(len(sample_dict_test['mask_all']))] @@ -215,6 +217,7 @@ def train(cfg, logger, local_rank, world_size, distributed): # import pdb;pdb.set_trace() words = sample_dict_train['word_batch_all'].to('cuda').long() words = words.view(words.shape[0]*words.shape[1]) + labels = sample_dict_train['label_batch_all'] if cfg.MODEL.ECOG: ecog = [sample_dict_train['ecog_re_batch_all'][j].to('cuda').float() for j in range(len(sample_dict_train['ecog_re_batch_all']))] mask_prior = [sample_dict_train['mask_all'][j].to('cuda').float() for j in range(len(sample_dict_train['mask_all']))] @@ -255,8 +258,8 @@ def train(cfg, logger, local_rank, world_size, distributed): if local_rank == 0: print(3*torch.sigmoid(model.encoder.formant_bandwitdh_ratio)) checkpointer.save("model_epoch%d" % epoch) - save_sample(sample_spec_test,ecog_test,mask_prior_test,mni_coordinate_test,encoder,decoder,ecog_encoder if cfg.MODEL.ECOG else None,epoch=epoch,mode='test',path=cfg.OUTPUT_DIR,tracker = tracker) - save_sample(x,ecog,mask_prior,mni_coordinate_test,encoder,decoder,ecog_encoder if cfg.MODEL.ECOG else None,epoch=epoch,mode='train',path=cfg.OUTPUT_DIR,tracker = tracker) + save_sample(sample_spec_test,ecog_test,mask_prior_test,mni_coordinate_test,encoder,decoder,ecog_encoder if cfg.MODEL.ECOG else None,epoch=epoch,label=sample_label_test,mode='test',path=cfg.OUTPUT_DIR,tracker = tracker) + save_sample(x,ecog,mask_prior,mni_coordinate,encoder,decoder,ecog_encoder if cfg.MODEL.ECOG else None,epoch=epoch,label=labels,mode='train',path=cfg.OUTPUT_DIR,tracker = tracker) if __name__ == "__main__": From fc940b62cb19c4b4c07c28e9316fa2a62115ae00 Mon Sep 17 00:00:00 2001 From: Ran Wang Date: Fri, 23 Oct 2020 20:07:45 -0400 Subject: [PATCH 13/14] linearmel --- ECoGDataSet.py | 199 +++++- configs/ecog_style2.yaml | 32 +- dataloader_ecog.py | 1 + defaults.py | 15 +- formant_systh.py | 315 +++++++-- launcher.py | 5 +- lreq.py | 5 +- model_formant.py | 804 +++++++++++++++++++++-- net_formant.py | 887 ++++++++++++++++++++----- net_formant_masknormed.py | 1154 ++++++++++++++++++++++++++++++++ net_formant_wave2specbased.py | 1161 +++++++++++++++++++++++++++++++++ tracker.py | 6 +- train_formant.py | 113 +++- 13 files changed, 4397 insertions(+), 300 deletions(-) create mode 100644 net_formant_masknormed.py create mode 100644 net_formant_wave2specbased.py diff --git a/ECoGDataSet.py b/ECoGDataSet.py index db5b21f2..5ae44d41 100644 --- a/ECoGDataSet.py +++ b/ECoGDataSet.py @@ -10,9 +10,13 @@ import pandas from torch.utils.data import Dataset from defaults import get_cfg_defaults +from net_formant import wave2spec cfg = get_cfg_defaults() cfg.merge_from_file('configs/ecog_style2.yaml') BCTS = cfg.DATASET.BCTS +if not cfg.MODEL.POWER_SYNTH: + cfg.MODEL.NOISE_DB = cfg.MODEL.NOISE_DB_AMP + cfg.MODEL.MAX_DB = cfg.MODEL.MAX_DB_AMP class ECoGDataset(Dataset): """docstring for ECoGDataset""" @@ -136,6 +140,7 @@ def __init__(self, ReqSubjDict, mode = 'train', train_param = None,BCTS=None,wor [self.SelectRegion.extend(self.cortex[area]) for area in train_param["SelectRegion"]] self.BlockRegion = [] [self.BlockRegion.extend(self.cortex[area]) for area in train_param["BlockRegion"]] + self.wavebased = cfg.MODEL.WAVE_BASED self.ReshapeAsGrid = False if 'Transformer' in cfg.MODEL.MAPPING_FROM_ECOG else True self.Prod,self.UseGridOnly,self.SeqLen = train_param['Prod'],\ train_param['UseGridOnly'],\ @@ -145,6 +150,7 @@ def __init__(self, ReqSubjDict, mode = 'train', train_param = None,BCTS=None,wor self.DOWN_TF_FS = train_param['DOWN_TF_FS'] self.DOWN_ECOG_FS = train_param['DOWN_ECOG_FS'] self.TestNum_cum=np.array([],dtype=np.int32) + self.Wipenoise = False datapath = [] analysispath = [] @@ -173,6 +179,13 @@ def __init__(self, ReqSubjDict, mode = 'train', train_param = None,BCTS=None,wor label_alldataset = [] wave_alldataset = [] wave_re_alldataset = [] + wave_re_spec_alldataset = [] + wave_re_spec_amp_alldataset = [] + wave_re_denoise_alldataset = [] + wave_re_spec_denoise_alldataset = [] + wave_spec_alldataset = [] + noisesample_re_alldataset = [] + noisesample_alldataset = [] bad_samples_alldataset = [] baseline_alldataset = [] mni_coordinate_alldateset = [] @@ -234,8 +247,15 @@ def __init__(self, ReqSubjDict, mode = 'train', train_param = None,BCTS=None,wor end_ind_re_wave_down_valid_test_ =[] spkr_=[] wave_=[] + wave_spec_=[] spkr_re_=[] wave_re_=[] + noisesample_re_=[] + noisesample_=[] + wave_re_spec_=[] + wave_re_spec_amp_=[] + wave_re_denoise_=[] + wave_re_spec_denoise_=[] word_train=[] labels_train=[] word_test=[] @@ -313,6 +333,8 @@ def __init__(self, ReqSubjDict, mode = 'train', train_param = None,BCTS=None,wor ecog = signal.resample_poly(ecog,self.DOWN_ECOG_FS*10000,30517625,axis=0) if HD else signal.resample_poly(ecog,self.DOWN_ECOG_FS,self.ORG_ECOG_FS_NY,axis=0) # resample to 125 hz baseline_ind = np.concatenate([np.arange(start_ind_valid[i]-self.DOWN_ECOG_FS//4,start_ind_valid[i]-self.DOWN_ECOG_FS//20) \ for i in range(len(start_ind_valid))]) #baseline: 1/4 s - 1/20 s before stimulis onset + baseline_ind_spec = np.concatenate([np.arange((start_ind_valid[i]*1.0/self.DOWN_ECOG_FS*self.DOWN_TF_FS-self.DOWN_TF_FS//4).astype(np.int64),(start_ind_valid[i]*1.0/self.DOWN_ECOG_FS*self.DOWN_TF_FS-self.DOWN_TF_FS//8).astype(np.int64)) \ + for i in range(len(start_ind_valid))]) #baseline: 1/4 s - 1/8 s before stimulis onset baseline = ecog[baseline_ind] statics_ecog = baseline.mean(axis=0,keepdims=True)+1E-10, np.sqrt(baseline.var(axis=0, keepdims=True))+1E-10 @@ -393,15 +415,16 @@ def __init__(self, ReqSubjDict, mode = 'train', train_param = None,BCTS=None,wor if xx==0: statics_spkr = samples_for_statics.mean(axis=0,keepdims=True)+1E-10, np.sqrt(samples_for_statics.var(axis=0, keepdims=True))+1E-10 # print(statics_spkr) - for samples in range(start_ind.shape[0]): - if not np.isnan(start_ind[samples]): - if samples ==0: - spkr[:start_ind[samples]] = 0 - else: - spkr[end_ind[samples-1]:start_ind[samples]] = 0 - if samples ==start_ind.shape[0]-1: - spkr[end_ind[samples]:] = 0 - spkr = (np.clip(spkr,0.,50.)-25.)/25. + if self.Wipenoise: + for samples in range(start_ind.shape[0]): + if not np.isnan(start_ind[samples]): + if samples ==0: + spkr[:start_ind[samples]] = 0 + else: + spkr[end_ind[samples-1]:start_ind[samples]] = 0 + if samples ==start_ind.shape[0]-1: + spkr[end_ind[samples]:] = 0 + spkr = (np.clip(spkr,0.,70.)-35.)/35. # spkr = (spkr - statics_spkr[0])/statics_spkr[1] spkr_trim = np.zeros([int(ecog.shape[0]*1.0/self.DOWN_ECOG_FS*self.DOWN_TF_FS),spkr.shape[1]]) if spkr.shape[0]>spkr_trim.shape[0]: @@ -413,7 +436,7 @@ def __init__(self, ReqSubjDict, mode = 'train', train_param = None,BCTS=None,wor spkr_+=[spkr] if not self.Prod: - wavedata = wavedata = h5py.File(os.path.join(datapath_task,'spkr_16k.mat'),'r') + wavedata = h5py.File(os.path.join(datapath_task,'spkr_16k.mat'),'r') wavearray = np.asarray(wavedata['spkr']) wave_trim = np.zeros([int(ecog.shape[0]*1.0/self.DOWN_ECOG_FS*self.DOWN_WAVE_FS),wavearray.shape[1]]) else: @@ -427,11 +450,27 @@ def __init__(self, ReqSubjDict, mode = 'train', train_param = None,BCTS=None,wor wave_trim[:wavearray.shape[0]] = wavearray wavearray = wave_trim wave_+=[wavearray] - + if cfg.MODEL.WAVE_BASED: + wave_spec = wave2spec(torch.tensor(wavearray.T),n_fft=cfg.MODEL.N_FFT,noise_db=cfg.MODEL.NOISE_DB,max_db=cfg.MODEL.MAX_DB,power=2 if cfg.MODEL.POWER_SYNTH else 1)[0].detach().cpu().numpy() + # wave_spec_amp = wave2spec(torch.tensor(wavearray.T),n_fft=cfg.MODEL.N_FFT,noise_db=cfg.MODEL.NOISE_DB,max_db=cfg.MODEL.MAX_DB,to_db=False,power=2 if cfg.MODEL.POWER_SYNTH else 1)[0].detach().cpu().numpy() + noisesample = wave_spec[baseline_ind_spec] + for samples in range(start_ind.shape[0]): + if not np.isnan(start_ind[samples]): + if samples ==0: + wave_spec[:start_ind[samples]] = -1 + else: + wave_spec[end_ind[samples-1]:start_ind[samples]] = -1 + if samples ==start_ind.shape[0]-1: + wave_spec[end_ind[samples]:] = -1 + wave_spec_ +=[wave_spec] + else: + noisesample = spkr[...,baseline_ind_spec] + noisesample_ += [noisesample] if self.Prod: - spkr_redata = h5py.File(os.path.join(datapath_task,'TFzoom'+str(self.SpecBands)+'_denoise_16k_wide.mat'),'r') + # spkr_redata = h5py.File(os.path.join(datapath_task,'TFzoom'+str(self.SpecBands)+'_denoise_16k_wide.mat'),'r') + spkr_redata = h5py.File(os.path.join(datapath_task,'TFzoom'+str(self.SpecBands)+'_denoise_16k_lownoisedb.mat'),'r') # spkr_redata = h5py.File(os.path.join(datapath_task,'TFzoom'+str(self.SpecBands)+'_denoise_16k.mat'),'r') - # spkr_redata = h5py.File(os.path.join(datapath_task,'TFzoom'+str(self.SpecBands)+'_16k.mat'),'r') + # spkr_redata = h5py.File(os.path.join(datapath_task,'TFzoom'+str(self.SpecBands)+'_16k_log10.mat'),'r') spkr_re = np.asarray(spkr_redata['TFlog']) spkr_re = signal.resample(spkr_re,int(1.0*spkr_re.shape[0]/self.ORG_TF_FS*self.DOWN_TF_FS),axis=0) if HD: @@ -442,16 +481,18 @@ def __init__(self, ReqSubjDict, mode = 'train', train_param = None,BCTS=None,wor if xx==0: statics_spkr_re = samples_for_statics_re.mean(axis=0,keepdims=True)+1E-10, np.sqrt(samples_for_statics_re.var(axis=0, keepdims=True))+1E-10 # print(statics_spkr_re) - if subj is not "NY717" or (task_to_use is not 'VisRead' and task_to_use is not 'PicN'): - for samples in range(start_ind_re.shape[0]): - if not np.isnan(start_ind_re[samples]): - if samples ==0: - spkr_re[:start_ind_re[samples]] = 0 - else: - spkr_re[end_ind_re[samples-1]:start_ind_re[samples]] = 0 - if samples ==start_ind_re.shape[0]-1: - spkr_re[end_ind_re[samples]:] = 0 - spkr_re = (np.clip(spkr_re,0.,50.)-25.)/25. + if self.Wipenoise: + if subj is not "NY717" or (task_to_use is not 'VisRead' and task_to_use is not 'PicN'): + for samples in range(start_ind_re.shape[0]): + if not np.isnan(start_ind_re[samples]): + if samples ==0: + spkr_re[:start_ind_re[samples]] = 0 + else: + spkr_re[end_ind_re[samples-1]:start_ind_re[samples]] = 0 + if samples ==start_ind_re.shape[0]-1: + spkr_re[end_ind_re[samples]:] = 0 + spkr_re = (np.clip(spkr_re,0.,70.)-35.)/35. + # spkr_re = (np.clip(spkr_re,0.,50.)-25.)/25. # spkr_re = (spkr_re - statics_spkr_re[0])/statics_spkr_re[1] spkr_re_trim = np.zeros([int(ecog.shape[0]*1.0/self.DOWN_ECOG_FS*self.DOWN_TF_FS),spkr_re.shape[1]]) if spkr_re.shape[0]>spkr_re_trim.shape[0]: @@ -462,19 +503,59 @@ def __init__(self, ReqSubjDict, mode = 'train', train_param = None,BCTS=None,wor spkr_re = spkr_re_trim spkr_re_+=[spkr_re] - wave_redata = h5py.File(os.path.join(datapath_task,'zoom_denoise_16k.mat'),'r') - # wave_redata = h5py.File(os.path.join(datapath_task,'zoom_16k.mat'),'r') + + # wave_redata = h5py.File(os.path.join(datapath_task,'zoom_denoise_16k.mat'),'r') + wave_redata = h5py.File(os.path.join(datapath_task,'zoom_16k.mat'),'r') wave_rearray = np.asarray(wave_redata['zoom']) wave_rearray = wave_rearray.T wave_re_trim = np.zeros([int(ecog.shape[0]*1.0/self.DOWN_ECOG_FS*self.DOWN_WAVE_FS),wave_rearray.shape[1]]) + wave_redata_denoise = h5py.File(os.path.join(datapath_task,'zoom_denoise_16k.mat'),'r') + wave_rearray_denoise = np.asarray(wave_redata_denoise['zoom']) + wave_rearray_denoise = wave_rearray_denoise.T + wave_re_trim_denoise = np.zeros([int(ecog.shape[0]*1.0/self.DOWN_ECOG_FS*self.DOWN_WAVE_FS),wave_rearray_denoise.shape[1]]) if wave_rearray.shape[0]>wave_re_trim.shape[0]: wave_re_trim = wave_rearray[:wave_re_trim.shape[0]] wave_rearray = wave_re_trim + wave_re_trim_denoise = wave_rearray_denoise[:wave_re_trim_denoise.shape[0]] + wave_rearray_denoise = wave_re_trim_denoise + else: wave_re_trim[:wave_rearray.shape[0]] = wave_rearray wave_rearray = wave_re_trim + wave_re_trim_denoise[:wave_rearray_denoise.shape[0]] = wave_rearray_denoise + wave_rearray_denoise = wave_re_trim_denoise wave_re_+=[wave_rearray] + wave_re_denoise_+=[wave_rearray_denoise] + + if cfg.MODEL.WAVE_BASED: + wave_re_spec = wave2spec(torch.tensor(wave_rearray.T),n_fft=cfg.MODEL.N_FFT,noise_db=cfg.MODEL.NOISE_DB,max_db=cfg.MODEL.MAX_DB,power=2 if cfg.MODEL.POWER_SYNTH else 1)[0].detach().cpu().numpy() + wave_re_spec_amp = wave2spec(torch.tensor(wave_rearray.T),n_fft=cfg.MODEL.N_FFT,noise_db=cfg.MODEL.NOISE_DB,max_db=cfg.MODEL.MAX_DB,to_db=False,power=2 if cfg.MODEL.POWER_SYNTH else 1)[0].detach().cpu().numpy() + wave_re_spec_denoise = wave2spec(torch.tensor(wave_rearray_denoise.T),n_fft=cfg.MODEL.N_FFT,noise_db=cfg.MODEL.NOISE_DB,max_db=cfg.MODEL.MAX_DB,power=2 if cfg.MODEL.POWER_SYNTH else 1)[0].detach().cpu().numpy() + noisesample_re = wave_re_spec[baseline_ind_spec] + if self.Wipenoise: + if subj is not "NY717" or (task_to_use is not 'VisRead' and task_to_use is not 'PicN'): + for samples in range(start_ind_re.shape[0]): + if not np.isnan(start_ind_re[samples]): + if samples ==0: + wave_re_spec[:start_ind_re[samples]] = -1 + wave_re_spec_amp[:start_ind_re[samples]] = 0 + wave_re_spec_denoise[:start_ind_re[samples]] = -1 + else: + wave_re_spec[end_ind_re[samples-1]:start_ind_re[samples]] = -1 + wave_re_spec_amp[end_ind_re[samples-1]:start_ind_re[samples]] = 0 + wave_re_spec_denoise[end_ind_re[samples-1]:start_ind_re[samples]] = -1 + if samples ==start_ind_re.shape[0]-1: + wave_re_spec[end_ind_re[samples]:] = -1 + wave_re_spec_amp[end_ind_re[samples]:] = 0 + wave_re_spec_denoise[end_ind_re[samples]:] = -1 + wave_re_spec_ +=[wave_re_spec] + wave_re_spec_amp_ +=[wave_re_spec_amp] + wave_re_spec_denoise_ +=[wave_re_spec_denoise] + else: + noisesample_re = spkr_re[...,baseline_ind_spec] + noisesample_re_ += [noisesample_re] + if HD: label_mat = scipy.io.loadmat(os.path.join(analysispath_task,'Events.mat'))['Events']['word'][0][:event_range] @@ -571,6 +652,8 @@ def __init__(self, ReqSubjDict, mode = 'train', train_param = None,BCTS=None,wor mask_prior_alldataset += [mask] ecog_alldataset+= [ecog_] spkr_alldataset +=[np.concatenate(spkr_,axis=0)] + if self.wavebased: + wave_spec_alldataset +=[np.concatenate(wave_spec_,axis=0)] wave_alldataset +=[np.concatenate(wave_,axis=0)] start_ind_alldataset += [np.concatenate([np.concatenate(start_ind_train_,axis=0),np.concatenate(start_ind_test_,axis=0)])] start_ind_valid_alldataset += [np.concatenate([np.concatenate(start_ind_valid_train_,axis=0),np.concatenate(start_ind_valid_test_,axis=0)])] @@ -581,9 +664,17 @@ def __init__(self, ReqSubjDict, mode = 'train', train_param = None,BCTS=None,wor end_ind_wave_alldataset += [np.concatenate([np.concatenate(end_ind_wave_down_train_,axis=0),np.concatenate(end_ind_wave_down_test_,axis=0)])] end_ind_wave_valid_alldataset += [np.concatenate([np.concatenate(end_ind_wave_down_valid_train_,axis=0),np.concatenate(end_ind_wave_down_valid_test_,axis=0)])] spkr_static_alldataset +=[statics_spkr] + noisesample_alldataset +=[np.concatenate(noisesample_,axis=0)] if self.Prod: spkr_re_alldataset +=[np.concatenate(spkr_re_,axis=0)] + if self.wavebased: + wave_re_spec_alldataset +=[np.concatenate(wave_re_spec_,axis=0)] + wave_re_spec_amp_alldataset +=[np.concatenate(wave_re_spec_amp_,axis=0)] + wave_re_spec_denoise_alldataset +=[np.concatenate(wave_re_spec_denoise_,axis=0)] + noise = 10**(((np.concatenate(noisesample_re_,axis=0).mean(0)+1)/2*(cfg.MODEL.MAX_DB-cfg.MODEL.NOISE_DB)+cfg.MODEL.NOISE_DB)/10) + noisesample_re_alldataset +=[noise] wave_re_alldataset +=[np.concatenate(wave_re_,axis=0)] + wave_re_denoise_alldataset +=[np.concatenate(wave_re_denoise_,axis=0)] start_ind_re_alldataset += [np.concatenate([np.concatenate(start_ind_re_train_,axis=0),np.concatenate(start_ind_re_test_,axis=0)])] start_ind_re_valid_alldataset += [np.concatenate([np.concatenate(start_ind_re_valid_train_,axis=0),np.concatenate(start_ind_re_valid_test_,axis=0)])] start_ind_re_wave_alldataset += [np.concatenate([np.concatenate(start_ind_re_wave_down_train_,axis=0),np.concatenate(start_ind_re_wave_down_test_,axis=0)])] @@ -602,12 +693,17 @@ def __init__(self, ReqSubjDict, mode = 'train', train_param = None,BCTS=None,wor self.meta_data = {'ecog_alldataset':ecog_alldataset, 'spkr_alldataset':spkr_alldataset, 'wave_alldataset':wave_alldataset, + 'wave_spec_alldataset':wave_spec_alldataset, 'start_ind_alldataset':start_ind_alldataset, 'start_ind_wave_alldataset': start_ind_wave_alldataset, 'start_ind_valid_alldataset':start_ind_valid_alldataset, 'start_ind_wave_valid_alldataset': start_ind_wave_valid_alldataset, 'spkr_re_alldataset':spkr_re_alldataset, 'wave_re_alldataset':wave_re_alldataset, + 'wave_re_spec_alldataset':wave_re_spec_alldataset, + 'wave_re_spec_amp_alldataset':wave_re_spec_amp_alldataset, + 'wave_re_denoise_alldataset':wave_re_denoise_alldataset, + 'wave_re_spec_denoise_alldataset':wave_re_spec_denoise_alldataset, 'start_ind_re_alldataset':start_ind_re_alldataset, 'start_ind_re_wave_alldataset': start_ind_re_wave_alldataset, 'start_ind_re_valid_alldataset':start_ind_re_valid_alldataset, @@ -633,6 +729,7 @@ def __init__(self, ReqSubjDict, mode = 'train', train_param = None,BCTS=None,wor 'spkr_static_alldataset': spkr_static_alldataset, 'spkr_re_static_alldataset': spkr_re_static_alldataset, 'word_alldataset':word_alldataset, + 'noisesample_re_alldataset':noisesample_re_alldataset, } @@ -661,6 +758,7 @@ def __getitem__(self, idx): start_ind_valid_alldataset = self.meta_data['start_ind_valid_alldataset'] end_ind_valid_alldataset = self.meta_data['end_ind_valid_alldataset'] wave_alldataset = self.meta_data['wave_alldataset'] + wave_spec_alldataset = self.meta_data['wave_spec_alldataset'] spkr_static_alldataset = self.meta_data['spkr_static_alldataset'] if self.Prod: spkr_re_alldataset = self.meta_data['spkr_re_alldataset'] @@ -669,7 +767,11 @@ def __getitem__(self, idx): end_ind_re_valid_alldataset = self.meta_data['end_ind_re_valid_alldataset'] start_ind_re_wave_alldataset = self.meta_data['start_ind_re_wave_alldataset'] end_ind_re_alldataset = self.meta_data['end_ind_re_alldataset'] + wave_spec_re_alldataset = self.meta_data['wave_re_spec_alldataset'] + wave_spec_re_amp_alldataset = self.meta_data['wave_re_spec_amp_alldataset'] wave_re_alldataset = self.meta_data['wave_re_alldataset'] + wave_spec_re_denoise_alldataset = self.meta_data['wave_re_spec_denoise_alldataset'] + wave_re_denoise_alldataset = self.meta_data['wave_re_denoise_alldataset'] spkr_re_static_alldataset = self.meta_data['spkr_re_static_alldataset'] if not self.Prod: n_delay_1 = -16#28 # samples @@ -686,13 +788,20 @@ def __getitem__(self, idx): ecog_batch_all = [] spkr_batch_all = [] wave_batch_all = [] + wave_spec_batch_all = [] ecog_re_batch_all = [] spkr_re_batch_all = [] wave_re_batch_all = [] + wave_spec_re_batch_all = [] + wave_spec_re_amp_batch_all = [] + wave_re_denoise_batch_all = [] + wave_spec_re_denoise_batch_all = [] label_batch_all = [] word_batch_all = [] on_stage_batch_all = [] on_stage_re_batch_all = [] + on_stage_wider_batch_all = [] + on_stage_wider_re_batch_all = [] self.SeqLenSpkr = self.SeqLen*int(self.DOWN_TF_FS*1.0/self.DOWN_ECOG_FS) imagesize = 2**self.current_lod for i in range(num_dataset): @@ -713,6 +822,8 @@ def __getitem__(self, idx): # ecog_batch = np.zeros((self.SeqLen ,ecog_alldataset[i].shape[-1])) spkr_batch = np.zeros(( self.SeqLenSpkr,spkr_alldataset[i].shape[-1])) wave_batch = np.zeros(( (self.SeqLen*int(self.DOWN_WAVE_FS*1.0/self.DOWN_ECOG_FS)),wave_alldataset[i].shape[-1])) + if self.wavebased: + wave_spec_batch = np.zeros(( self.SeqLen, wave_spec_alldataset[i].shape[-1])) if self.Prod: start_indx_re = start_ind_re_valid_alldataset[i][rand_ind] end_indx_re = end_ind_re_valid_alldataset[i][rand_ind] @@ -720,6 +831,8 @@ def __getitem__(self, idx): # ecog_batch_re = np.zeros((self.SeqLen ,ecog_alldataset[i].shape[-1])) spkr_batch_re = np.zeros(( self.SeqLenSpkr,spkr_alldataset[i].shape[-1])) wave_batch_re = np.zeros(( (self.SeqLen*int(self.DOWN_WAVE_FS*1.0/self.DOWN_ECOG_FS)),wave_alldataset[i].shape[-1])) + if self.wavebased: + wave_spec_batch_re = np.zeros(( self.SeqLen, wave_spec_alldataset[i].shape[-1])) if self.mode =='train': # indx = np.maximum(indx+np.random.choice(np.arange(np.minimum(-(self.SeqLenSpkr-(end_indx-indx)),-1),np.maximum(-(self.SeqLenSpkr-(end_indx-indx)),0)),1)[0],0) @@ -741,16 +854,27 @@ def __getitem__(self, idx): # ecog_batch = ecog_alldataset[i][indx+n_delay_1:indx+self.SeqLen+n_delay_1] on_stage_batch = np.zeros([1,self.SeqLenSpkr]) on_stage_batch[:,np.maximum(start_indx-indx,0): np.minimum(end_indx-indx,self.SeqLenSpkr-1)] = 1.0 + on_stage_wider_batch = np.zeros([1,self.SeqLenSpkr]) + on_stage_wider_batch[:,np.maximum(start_indx-indx-5,0): np.minimum(end_indx-indx+5,self.SeqLenSpkr-1)] = 1.0 spkr_batch = spkr_alldataset[i][indx:indx+self.SeqLenSpkr] + if self.wavebased: + wave_spec_batch = wave_spec_alldataset[i][indx:indx+self.SeqLen] wave_batch = wave_alldataset[i][(indx*int(self.DOWN_WAVE_FS*1.0/self.DOWN_ECOG_FS)):((indx+self.SeqLen)*int(self.DOWN_WAVE_FS*1.0/self.DOWN_ECOG_FS))] if self.Prod: # indx_re = indx_re.item() on_stage_re_batch = np.zeros([1,self.SeqLenSpkr]) on_stage_re_batch[:,np.maximum(start_indx_re-indx_re,0): np.minimum(end_indx_re-indx_re,self.SeqLenSpkr-1)] = 1.0 + on_stage_wider_re_batch = np.zeros([1,self.SeqLenSpkr]) + on_stage_wider_re_batch[:,np.maximum(start_indx_re-indx_re-5,0): np.minimum(end_indx_re-indx_re+5,self.SeqLenSpkr-1)] = 1.0 ecog_batch_re = ecog_alldataset[i][indx_re+n_delay_1:indx_re+self.SeqLen+n_delay_2] # ecog_batch_re = ecog_alldataset[i][indx_re+n_delay_1:indx_re+self.SeqLen+n_delay_1] spkr_batch_re = spkr_re_alldataset[i][indx_re:indx_re+self.SeqLenSpkr] + if self.wavebased: + wave_spec_batch_re = wave_spec_re_alldataset[i][indx_re:indx_re+self.SeqLen] + wave_spec_batch_amp_re = wave_spec_re_amp_alldataset[i][indx_re:indx_re+self.SeqLen] + wave_spec_batch_re_denoise = wave_spec_re_denoise_alldataset[i][indx_re:indx_re+self.SeqLen] wave_batch_re = wave_re_alldataset[i][(indx_re*int(self.DOWN_WAVE_FS*1.0/self.DOWN_ECOG_FS)):((indx_re+self.SeqLen)*int(self.DOWN_WAVE_FS*1.0/self.DOWN_ECOG_FS))] + wave_batch_re_denoise = wave_re_denoise_alldataset[i][(indx_re*int(self.DOWN_WAVE_FS*1.0/self.DOWN_ECOG_FS)):((indx_re+self.SeqLen)*int(self.DOWN_WAVE_FS*1.0/self.DOWN_ECOG_FS))] mni_batch = self.meta_data['mni_coordinate_alldateset'][i] # ecog_batch = ecog_batch[np.newaxis,:,:] @@ -764,13 +888,22 @@ def __getitem__(self, idx): ecog_batch_all += [ecog_batch] spkr_batch_all += [spkr_batch[np.newaxis,...]] + if self.wavebased: + wave_spec_batch_all += [wave_spec_batch[np.newaxis,...]] wave_batch_all += [wave_batch.swapaxes(-2,-1)] on_stage_batch_all += [on_stage_batch] + on_stage_wider_batch_all += [on_stage_wider_batch] if self.Prod: ecog_re_batch_all += [ecog_batch_re] spkr_re_batch_all += [spkr_batch_re[np.newaxis,...]] + if self.wavebased: + wave_spec_re_batch_all += [wave_spec_batch_re[np.newaxis,...]] + wave_spec_re_amp_batch_all += [wave_spec_batch_amp_re[np.newaxis,...]] + wave_spec_re_denoise_batch_all += [wave_spec_batch_re_denoise[np.newaxis,...]] wave_re_batch_all += [wave_batch_re.swapaxes(-2,-1)] + wave_re_denoise_batch_all += [wave_batch_re_denoise.swapaxes(-2,-1)] on_stage_re_batch_all += [on_stage_re_batch] + on_stage_wider_re_batch_all += [on_stage_wider_re_batch] label_batch_all +=[label] word_batch_all +=[word] mni_coordinate_all +=[mni_batch.swapaxes(-2,-1)] @@ -791,11 +924,20 @@ def __getitem__(self, idx): spkr_batch_all = np.concatenate(spkr_batch_all,axis=0) wave_batch_all = np.concatenate(wave_batch_all,axis=0) + if self.wavebased: + wave_spec_batch_all = np.concatenate(wave_spec_batch_all,axis=0) on_stage_batch_all = np.concatenate(on_stage_batch_all,axis=0) + on_stage_wider_batch_all = np.concatenate(on_stage_wider_batch_all,axis=0) if self.Prod: spkr_re_batch_all = np.concatenate(spkr_re_batch_all,axis=0) + if self.wavebased: + wave_spec_re_batch_all = np.concatenate(wave_spec_re_batch_all,axis=0) + wave_spec_re_amp_batch_all = np.concatenate(wave_spec_re_amp_batch_all,axis=0) + wave_spec_re_denoise_batch_all = np.concatenate(wave_spec_re_denoise_batch_all,axis=0) wave_re_batch_all = np.concatenate(wave_re_batch_all,axis=0) + wave_re_denoise_batch_all = np.concatenate(wave_re_denoise_batch_all,axis=0) on_stage_re_batch_all = np.concatenate(on_stage_re_batch_all,axis=0) + on_stage_wider_re_batch_all = np.concatenate(on_stage_wider_re_batch_all,axis=0) label_batch_all = np.concatenate(label_batch_all,axis=0).tolist() word_batch_all = np.array(word_batch_all) baseline_batch_all = np.concatenate(self.meta_data['baseline_alldataset'],axis=0) @@ -805,9 +947,14 @@ def __getitem__(self, idx): return {'ecog_batch_all':ecog_batch_all, 'spkr_batch_all':spkr_batch_all, 'wave_batch_all':wave_batch_all, + 'wave_spec_batch_all':wave_spec_batch_all, 'ecog_re_batch_all':ecog_re_batch_all, 'spkr_re_batch_all':spkr_re_batch_all, 'wave_re_batch_all':wave_re_batch_all, + 'wave_spec_re_batch_all':wave_spec_re_batch_all, + 'wave_spec_re_amp_batch_all':wave_spec_re_amp_batch_all, + 'wave_re_denoise_batch_all':wave_re_denoise_batch_all, + 'wave_spec_re_denoise_batch_all':wave_spec_re_denoise_batch_all, # 'baseline_batch_all':baseline_batch_all, 'label_batch_all':label_batch_all, 'dataset_names':dataset_names, @@ -817,6 +964,8 @@ def __getitem__(self, idx): 'word_batch_all':word_batch_all, 'on_stage_batch_all':on_stage_batch_all, 'on_stage_re_batch_all':on_stage_re_batch_all, + 'on_stage_wider_batch_all':on_stage_wider_batch_all, + 'on_stage_wider_re_batch_all':on_stage_wider_re_batch_all, } diff --git a/configs/ecog_style2.yaml b/configs/ecog_style2.yaml index a7ebdb10..4080f348 100644 --- a/configs/ecog_style2.yaml +++ b/configs/ecog_style2.yaml @@ -21,7 +21,19 @@ DATASET: SUBJECT: ['NY742'] MODEL: #####TAKE OFF CHECKLIST!!!######## - N_FORMANTS: 3 + N_FORMANTS: 6 + N_FORMANTS_NOISE: 1 + N_FORMANTS_ECOG: 2 + WAVE_BASED : True + DO_MEL_GUIDE : False + BGNOISE_FROMDATA: True + N_FFT : 256 + NOISE_DB : -50 #-50 + MAX_DB : 22.5 #probablity 28 is better + NOISE_DB_AMP : -25 + MAX_DB_AMP : 14 + POWER_SYNTH: True + LESS_TEMPORAL_FEATURE: True LATENT_SPACE_SIZE: 128 LAYER_COUNT: 6 @@ -32,7 +44,8 @@ MODEL: TRUNCATIOM_CUTOFF: 5 CHANNELS: 1 UNIQ_WORDS: 50 - MAPPING_FROM_ECOG: "ECoGMappingTransformer" + MAPPING_FROM_ECOG: "ECoGMappingBottleneck" + # MAPPING_FROM_ECOG: "ECoGMappingTransformer" ECOG: False #will be overloaded if FINETUNE SUPLOSS_ON_ECOGF: False # will be overloaded to FIX_GEN if FINETUNE,spec supervise loss only apply to ecog encoder W_SUP: False @@ -50,8 +63,8 @@ MODEL: #T 4 8 16 32 64 128 ATTENTION: [False, False, False, False, False, False] HEADS: 1 - APPLY_PPL: True - APPLY_PPL_D: True + APPLY_PPL: False + APPLY_PPL_D: False PPL_WEIGHT: 100 PPL_GLOBAL_WEIGHT: 0 PPLD_WEIGHT: 1 @@ -65,8 +78,13 @@ MODEL: N_HEADS : 4 NON_LOCAL: True # ATTENTION: [] -# OUTPUT_DIR: training_artifacts/debug -OUTPUT_DIR: training_artifacts/formantsythv2wide_NY742_constraintonFB_Bconstrainrefined_absfreq_3formants +OUTPUT_DIR: training_artifacts/debug_ +# OUTPUT_DIR: training_artifacts/loudnesscomp_han5_ampamploss +# OUTPUT_DIR: training_artifacts/loudnesscomp_han5_ampsynth_masknormed +# OUTPUT_DIR: training_artifacts/debug_f1f2linearmel +# OUTPUT_DIR: training_artifacts/ecog_finetune_3ecogformants_han5_specsup_guidance_hamonicformantsemph +# OUTPUT_DIR: training_artifacts/ecog_finetune_3ecogformants_han5_specsup_guidance_hamonicnoiseformantsemphmore +# OUTPUT_DIR: training_artifacts/formantsythv2_wavebased_NY742_constraintonFB_Bconstrainrefined_absfreq_4formants_1noiseformants_bgnoise_noisemapping_freqconv_duomask # OUTPUT_DIR: training_artifacts/ecog_residual_latent128_temporal_lesstemporalfeature_noprogressive_HBw_ppl_ppld_localreg_ecogf_w_spec_sup # OUTPUT_DIR: training_artifacts/ecog_residual_latent128_temporal_lesstemporalfeature_ppl_ppld # OUTPUT_DIR: training_artifacts/ecog_residual_cycle_attention3264wStyleIN_specchan64_more_attentfeatures_heads4 @@ -75,7 +93,7 @@ FINETUNE: FINETUNE: False FIX_GEN: True ENCODER_GUIDE: True - SPECSUP: False + SPECSUP: True ##################################### TRAIN: diff --git a/dataloader_ecog.py b/dataloader_ecog.py index 21bdbcf4..2ffac8fc 100644 --- a/dataloader_ecog.py +++ b/dataloader_ecog.py @@ -35,6 +35,7 @@ class TFRecordsDataset: def __init__(self, cfg, logger, rank=0, world_size=1, buffer_size_mb=200, channels=3, seed=None, train=True, needs_labels=False,param=None): self.param = param self.dataset = ECoGDataset(cfg.DATASET.SUBJECT,mode='train' if train else 'test',world_size=world_size) + self.noise_dist = self.dataset.meta_data['noisesample_re_alldataset'][0] self.cfg = cfg self.logger = logger self.rank = rank diff --git a/defaults.py b/defaults.py index 801f8899..d0127df8 100644 --- a/defaults.py +++ b/defaults.py @@ -44,7 +44,19 @@ _C.MODEL = CN() -_C.MODEL.N_FORMANTS = 2 +_C.MODEL.N_FORMANTS = 4 +_C.MODEL.N_FORMANTS_NOISE = 2 +_C.MODEL.N_FORMANTS_ECOG = 3 +_C.MODEL.WAVE_BASED = False +_C.MODEL.DO_MEL_GUIDE = True +_C.MODEL.BGNOISE_FROMDATA = False +_C.MODEL.N_FFT = 256 +_C.MODEL.NOISE_DB = -50 +_C.MODEL.MAX_DB = 22.5 +_C.MODEL.NOISE_DB_AMP = -25 +_C.MODEL.MAX_DB_AMP = 14 +_C.MODEL.POWER_SYNTH = True + _C.MODEL.LAYER_COUNT = 6 _C.MODEL.START_CHANNEL_COUNT = 64 _C.MODEL.MAX_CHANNEL_COUNT = 512 @@ -85,6 +97,7 @@ _C.MODEL.COMMON_Z = True _C.MODEL.GAN = True + _C.MODEL.TRANSFORMER = CN() _C.MODEL.TRANSFORMER.HIDDEN_DIM = 256 _C.MODEL.TRANSFORMER.DIM_FEEDFORWARD = 256 diff --git a/formant_systh.py b/formant_systh.py index f981e83a..80c0d3cd 100755 --- a/formant_systh.py +++ b/formant_systh.py @@ -1,3 +1,4 @@ +import pdb import torch from torch import nn # from torch.nn import functional as F @@ -12,10 +13,94 @@ from torch.nn.parameter import Parameter from custom_adam import LREQAdam from ECoGDataSet import ECoGDataset -from net_formant import mel_scale +from net_formant import mel_scale, hz2ind import matplotlib.pyplot as plt # from matplotlib.pyplot import ion; ion() +import scipy.signal +import scipy.io.wavfile +import math +from net_formant import amplitude +import torchaudio +def spsi(msgram, fftsize, hop_length) : + """ + Takes a 2D spectrogram ([freqs,frames]), the fft legnth (= widnow length) and the hope size (both in units of samples). + Returns an audio signal. + """ + msgram = np.sqrt(msgram) + numBins, numFrames = msgram.shape + y_out=np.zeros(numFrames*hop_length+fftsize-hop_length) + + m_phase=np.zeros(numBins); + m_win=scipy.signal.hanning(fftsize, sym=True) # assumption here that hann was used to create the frames of the spectrogram + + #processes one frame of audio at a time + for i in range(numFrames) : + m_mag=msgram[:, i] + for j in range(1,numBins-1) : + if(m_mag[j]>m_mag[j-1] and m_mag[j]>m_mag[j+1]) : #if j is a peak + alpha=m_mag[j-1]; + beta=m_mag[j]; + gamma=m_mag[j+1]; + denom=alpha-2*beta+gamma; + + if(denom!=0) : + p=0.5*(alpha-gamma)/denom; + else : + p=0; + + #phaseRate=2*math.pi*(j-1+p)/fftsize; #adjusted phase rate + phaseRate=2*math.pi*(j+p)/fftsize; #adjusted phase rate + m_phase[j]= m_phase[j] + hop_length*phaseRate; #phase accumulator for this peak bin + peakPhase=m_phase[j]; + + # If actual peak is to the right of the bin freq + if (p>0) : + # First bin to right has pi shift + bin=j+1; + m_phase[bin]=peakPhase+math.pi; + + # Bins to left have shift of pi + bin=j-1; + while((bin>1) and (m_mag[bin]1) and (m_mag[bin]= 1).squeeze() + if inds.numel() > 0: + bin_labels[inds, labels[inds] - 1] = 1 + bin_label_weights = label_weights.view(-1, 1).expand( + label_weights.size(0), label_channels) + return bin_labels, bin_label_weights + +class GHMC(nn.Module): + def __init__( + self, + bins=30, + momentum=0, + use_sigmoid=True, + loss_weight=1.0): + super(GHMC, self).__init__() + self.bins = bins + self.momentum = momentum + self.edges = [float(x) / bins for x in range(bins+1)] + self.edges[-1] += 1e-6 + if momentum > 0: + self.acc_sum = [0.0 for _ in range(bins)] + self.use_sigmoid = use_sigmoid + self.loss_weight = loss_weight + + def forward(self, pred, target, label_weight, *args, **kwargs): + """ Args: + pred [batch_num, class_num]: + The direct prediction of classification fc layer. + target [batch_num, class_num]: + Binary class target for each sample. + label_weight [batch_num, class_num]: + the value is 1 if the sample is valid and 0 if ignored. + """ + if not self.use_sigmoid: + raise NotImplementedError + # the target should be binary class label + if pred.dim() != target.dim(): + target, label_weight = _expand_binary_labels(target, label_weight, pred.size(-1)) + target, label_weight = target.float(), label_weight.float() + edges = self.edges + mmt = self.momentum + weights = torch.zeros_like(pred) + + # gradient length + g = torch.abs(pred.sigmoid().detach() - target) + + valid = label_weight > 0 + tot = max(valid.float().sum().item(), 1.0) + n = 0 # n valid bins + for i in range(self.bins): + inds = (g >= edges[i]) & (g < edges[i+1]) & valid + num_in_bin = inds.sum().item() + if num_in_bin > 0: + if mmt > 0: + self.acc_sum[i] = mmt * self.acc_sum[i] \ + + (1 - mmt) * num_in_bin + weights[inds] = tot / self.acc_sum[i] + else: + weights[inds] = tot / num_in_bin + n += 1 + if n > 0: + weights = weights / n + + loss = F.binary_cross_entropy_with_logits( + pred, target, weights, reduction='sum') / tot + return loss * self.loss_weight + +class GHMR(nn.Module): + def __init__( + self, + mu=0.02, + bins=30, + momentum=0, + loss_weight=1.0): + super(GHMR, self).__init__() + self.mu = mu + self.bins = bins + self.edges = [float(x) / bins for x in range(bins+1)] + self.edges[-1] = 1e3 + self.momentum = momentum + if momentum > 0: + self.acc_sum = [0.0 for _ in range(bins)] + self.loss_weight = loss_weight + + def forward(self, pred, target, label_weight, avg_factor=None,reweight=1): + """ Args: + pred [batch_num, 4 (* class_num)]: + The prediction of box regression layer. Channel number can be 4 or + (4 * class_num) depending on whether it is class-agnostic. + target [batch_num, 4 (* class_num)]: + The target regression values with the same size of pred. + label_weight [batch_num, 4 (* class_num)]: + The weight of each sample, 0 if ignored. + """ + mu = self.mu + edges = self.edges + mmt = self.momentum + + # ASL1 loss + diff = pred - target + loss = torch.sqrt(diff * diff + mu * mu) - mu + + # gradient length + g = torch.abs(diff / torch.sqrt(mu * mu + diff * diff)).detach() + weights = torch.zeros_like(g) + + valid = label_weight > 0 + tot = max(label_weight.float().sum().item(), 1.0) + n = 0 # n: valid bins + for i in range(self.bins): + inds = (g >= edges[i]) & (g < edges[i+1]) & valid + num_in_bin = inds.sum().item() + if num_in_bin > 0: + n += 1 + if mmt > 0: + self.acc_sum[i] = mmt * self.acc_sum[i] \ + + (1 - mmt) * num_in_bin + weights[inds] = tot / self.acc_sum[i] + else: + weights[inds] = tot / num_in_bin + if n > 0: + weights /= n + + loss = loss * weights + loss = (loss*reweight).sum() / tot + return loss * self.loss_weight + +class LAE(nn.Module): + def __init__(self,mu=0.02, + bins=30, + momentum=0.75, + loss_weight=1.0,db=True,amp=True,noise_db=-50,max_db=22.5): + super(LAE, self).__init__() + self.db=db + self.amp = amp + self.noise_db = noise_db + self.max_db = max_db + if db: + self.ghm_db = GHMR(mu,bins,momentum,loss_weight) + if amp: + self.ghm_amp = GHMR(mu,bins,momentum,loss_weight) + + def forward(self, rec, spec, tracker=None,reweight=1): + if self.db: + loss_db = self.ghm_db(rec,spec,torch.ones(spec.shape),reweight=reweight) + if tracker is not None: + tracker.update(dict(Lae_db=loss_db)) + else: + loss_db = torch.tensor(0.0) + if self.amp: + spec_amp = amplitude(spec,noise_db=self.noise_db,max_db=self.max_db) + rec_amp = amplitude(rec,noise_db=self.noise_db,max_db=self.max_db) + loss_a = self.ghm_amp(rec_amp,spec_amp,torch.ones(spec_amp.shape),reweight=reweight) + if tracker is not None: + tracker.update(dict(Lae_a=loss_a)) + else: + loss_a = torch.tensor(0.0) + return loss_db+loss_a + + class Model(nn.Module): def __init__(self, generator="", encoder="", ecog_encoder_name="", - spec_chans = 128, n_formants=2, with_ecog = False, - hidden_dim=256,dim_feedforward=256,encoder_only=True,attentional_mask=False,n_heads=1,non_local=False): + spec_chans = 128, n_formants=2, n_formants_noise=2, n_formants_ecog=2, n_fft=256, noise_db=-50, max_db=22.5, wavebased = False, + with_ecog = False, ghm_loss=True,power_synth=True, + hidden_dim=256,dim_feedforward=256,encoder_only=True,attentional_mask=False,n_heads=1,non_local=False,do_mel_guide = True,noise_from_data=False,specsup=True): super(Model, self).__init__() self.spec_chans = spec_chans self.with_ecog = with_ecog self.ecog_encoder_name = ecog_encoder_name + self.n_formants_ecog = n_formants_ecog + self.wavebased = wavebased + self.n_fft = n_fft + self.n_mels = spec_chans + self.do_mel_guide = do_mel_guide + self.noise_db = noise_db + self.spec_sup = specsup + self.max_db = max_db + self.n_formants_noise = n_formants_noise + self.power_synth =power_synth self.decoder = GENERATORS[generator]( n_mels = spec_chans, - k = 30, + k = 40, + wavebased = wavebased, + n_fft = n_fft, + noise_db = noise_db, + max_db = max_db, + noise_from_data = noise_from_data, + return_wave = False, + power_synth=power_synth, ) + if do_mel_guide: + self.decoder_mel = GENERATORS[generator]( + n_mels = spec_chans, + k = 40, + wavebased = False, + n_fft = n_fft, + noise_db = noise_db, + max_db = max_db, + add_bgnoise = False, + ) self.encoder = ENCODERS[encoder]( n_mels = spec_chans, n_formants = n_formants, + n_formants_noise = n_formants_noise, + wavebased = wavebased, + hop_length = 128, + n_fft = n_fft, + noise_db = noise_db, + max_db = max_db, + power_synth = power_synth, ) if with_ecog: if 'Transformer' in ecog_encoder_name: self.ecog_encoder = ECOG_ENCODER[ecog_encoder_name]( - n_mels = spec_chans,n_formants = n_formants, + n_mels = spec_chans,n_formants = n_formants_ecog, hidden_dim=hidden_dim,dim_feedforward=dim_feedforward,n_heads=n_heads, encoder_only=encoder_only,attentional_mask=attentional_mask,non_local=non_local, ) else: self.ecog_encoder = ECOG_ENCODER[ecog_encoder_name]( - n_mels = spec_chans,n_formants = n_formants, + n_mels = spec_chans,n_formants = n_formants_ecog, ) + self.ghm_loss = ghm_loss + self.lae1 = LAE(noise_db=self.noise_db,max_db=self.max_db) + self.lae2 = LAE(amp=False) + self.lae3 = LAE(amp=False) + self.lae4 = LAE(amp=False) + self.lae5 = LAE(amp=False) + self.lae6 = LAE(amp=False) + self.lae7 = LAE(amp=False) + self.lae8 = LAE(amp=False) + + def noise_dist_init(self,dist): + with torch.no_grad(): + self.decoder.noise_dist = dist.reshape([1,1,1,dist.shape[0]]) def generate_fromecog(self, ecog = None, mask_prior = None, mni=None,return_components=False): components = self.ecog_encoder(ecog, mask_prior,mni) @@ -38,72 +258,576 @@ def generate_fromecog(self, ecog = None, mask_prior = None, mni=None,return_comp else: return rec - def generate_fromspec(self, spec, return_components=False): - components = self.encoder(spec) + def generate_fromspec(self, spec, return_components=False,x_denoise=None,duomask=False): + components = self.encoder(spec,x_denoise=x_denoise,duomask=duomask) rec = self.decoder.forward(components) if return_components: return rec, components else: return rec - def encode(self, spec): - components = self.encoder(spec) + def encode(self, spec,x_denoise=None,duomask=False,noise_level = None,x_amp=None): + components = self.encoder(spec,x_denoise=x_denoise,duomask=duomask,noise_level=noise_level,x_amp=x_amp) return components - - def forward(self, spec, ecog, mask_prior, on_stage, ae, tracker, encoder_guide, mni=None): + + def lae(self,spec,rec,db=True,amp=True,tracker=None,GHM=False): + if amp: + spec_amp = amplitude(spec,noise_db=self.noise_db,max_db=self.max_db) + rec_amp = amplitude(rec,noise_db=self.noise_db,max_db=self.max_db) + if self.power_synth: + spec_amp_ = spec_amp**0.5 + rec_amp_ = rec_amp**0.5 + else: + spec_amp_ = spec_amp + rec_amp_ = rec_amp + # spec_amp_ = spec_amp + # rec_amp_ = rec_amp + if GHM: + Lae_a = self.ghm_loss(rec_amp_,spec_amp_,torch.ones(spec_amp_))#*150 + Lae_a_l2 = torch.tensor([0.]) + else: + Lae_a = (spec_amp_-rec_amp_).abs().mean()#*150 + Lae_a_l2 = torch.sqrt((spec_amp_-rec_amp_)**2+1E-6).mean()#*150 + else: + Lae_a = torch.tensor(0.) + Lae_a_l2 = torch.tensor(0.) + if tracker is not None: + tracker.update(dict(Lae_a=Lae_a,Lae_a_l2=Lae_a_l2)) + if db: + if GHM: + Lae_db = self.ghm_loss(rec,spec,torch.ones(spec))#*150 + Lae_db_l2 = torch.tensor([0.]) + else: + Lae_db = (spec-rec).abs().mean() + Lae_db_l2 = torch.sqrt((spec-rec)**2+1E-6).mean() + else: + Lae_db = torch.tensor(0.) + Lae_db_l2 = torch.tensor(0.) + if tracker is not None: + tracker.update(dict(Lae_db=Lae_db,Lae_db_l2=Lae_db_l2)) + # return (Lae_a + Lae_a_l2)/2. + (Lae_db+Lae_db_l2)/2. + return Lae_a + Lae_db/2. + + def forward(self, spec, ecog, mask_prior, on_stage, on_stage_wider, ae, tracker, encoder_guide, x_mel=None,x_denoise=None, pitch_aug=False, duomask=False, mni=None,debug=False,x_amp=None,hamonic_bias=False): if ae: self.encoder.requires_grad_(True) # rec = self.generate_fromspec(spec) - components = self.encoder(spec) - + components = self.encoder(spec,x_denoise = x_denoise,duomask=duomask,noise_level = F.softplus(self.decoder.bgnoise_amp)*self.decoder.noise_dist.mean(),x_amp=x_amp) rec = self.decoder.forward(components) - Lae = torch.mean((rec - spec).abs()) - if components['freq_formants_hamon'].shape[1] > 1: - for formant in range(components['freq_formants_hamon'].shape[1]-1,0,-1): - components_copy = components - components_copy['freq_formants_hamon'] = components['freq_formants_hamon'][:,:formant] - components_copy['freq_formants_hamon_hz'] = components['freq_formants_hamon_hz'][:,:formant] - components_copy['bandwidth_formants_hamon'] = components['bandwidth_formants_hamon'][:,:formant] - components_copy['bandwidth_formants_hamon_hz'] = components['bandwidth_formants_hamon_hz'][:,:formant] - components_copy['amplitude_formants_hamon'] = components['amplitude_formants_hamon'][:,:formant] - rec = self.decoder.forward(components_copy) - Lae += torch.mean((rec - spec).abs()) - tracker.update(dict(Lae=Lae)) + freq_cord = torch.arange(self.spec_chans).reshape([1,1,1,self.spec_chans])/(1.0*self.spec_chans) + freq_cord2 = torch.arange(self.spec_chans+1).reshape([1,1,1,self.spec_chans+1])/(1.0*self.spec_chans) + freq_linear_reweighting = 1 if self.wavebased else (inverse_mel_scale(freq_cord2[...,1:])-inverse_mel_scale(freq_cord2[...,:-1]))/440*7 + # freq_linear_reweighting = 1 + # Lae = 4*self.lae((rec*freq_linear_reweighting)[...,128:],(spec*freq_linear_reweighting)[...,128:],tracker=tracker)#torch.mean((rec - spec).abs()*freq_linear_reweighting) + Lae = 4*self.lae(rec*freq_linear_reweighting,spec*freq_linear_reweighting,tracker=tracker)#torch.mean((rec - spec).abs()*freq_linear_reweighting) + if self.wavebased: + spec_amp = amplitude(spec,self.noise_db,self.max_db).transpose(-2,-1) + rec_amp = amplitude(rec,self.noise_db,self.max_db).transpose(-2,-1) + freq_cord2 = torch.arange(128+1).reshape([1,1,1,128+1])/(1.0*128) + freq_linear_reweighting2 = (inverse_mel_scale(freq_cord2[...,1:])-inverse_mel_scale(freq_cord2[...,:-1]))/440*7 + spec_mel = to_db(torchaudio.transforms.MelScale(f_max=8000,n_stft=self.n_fft)(spec_amp).transpose(-2,-1),self.noise_db,self.max_db) + rec_mel = to_db(torchaudio.transforms.MelScale(f_max=8000,n_stft=self.n_fft)(rec_amp).transpose(-2,-1),self.noise_db,self.max_db) + Lae += 4*self.lae(rec_mel*freq_linear_reweighting2,spec_mel*freq_linear_reweighting2,tracker=tracker) + + # hann_win = torch.hann_window(21,periodic=False).reshape([1,1,21,1]) + # spec_broud = to_db(F.conv2d(spec_amp,hann_win,padding=[10,0]).transpose(-2,-1),self.noise_db,self.max_db) + # rec_broud = to_db(F.conv2d(rec_amp,hann_win,padding=[10,0]).transpose(-2,-1),self.noise_db,self.max_db) + # Lae += 4*self.lae(rec_broud,spec_broud,tracker=tracker) + + if self.do_mel_guide: + rec_mel = self.decoder_mel.forward(components) + freq_linear_reweighting_mel = (inverse_mel_scale(freq_cord2[...,1:])-inverse_mel_scale(freq_cord2[...,:-1]))/440*7 + Lae_mel = 4*self.lae(rec_mel*freq_linear_reweighting_mel,x_mel*freq_linear_reweighting_mel,tracker=None) + tracker.update(dict(Lae_mel=Lae_mel)) + Lae+=Lae_mel + + # rec_denoise = self.decoder.forward(components,enable_hamon_excitation=False,enable_noise_excitation=False) + # Lae_noise = (50. if self.wavebased else 50.)*self.lae(rec_noise*(1-on_stage_wider.unsqueeze(-1)),spec*(1-on_stage_wider.unsqueeze(-1))) + # tracker.update(dict(Lae_noise=Lae_noise)) + # Lae += Lae_noise + + if self.wavebased: + if self.power_synth: + Lloudness = 10**6*(components['loudness']*(1-on_stage_wider)).mean() + else: + # Lloudness = 10**3*(components['loudness']*(1-on_stage_wider)).mean() + Lloudness = 10**6*(components['loudness']*(1-on_stage_wider)).mean() + # Lloudness = 10.**6*((components['loudness'])**2*(1-on_stage_wider)).mean() + tracker.update(dict(Lloudness=Lloudness)) + Lae += Lloudness + + if self.wavebased and x_denoise is not None: + thres = int(hz2ind(4000,self.n_fft)) if self.wavebased else mel_scale(self.spec_chans,4000,pt=False).astype(np.int32) + explosive=(torch.mean((spec*freq_linear_reweighting)[...,thres:],dim=-1)>torch.mean((spec*freq_linear_reweighting)[...,:thres],dim=-1)).to(torch.float32).unsqueeze(-1) + rec_denoise = self.decoder.forward(components,enable_hamon_excitation=True,enable_noise_excitation=True,enable_bgnoise=False) + Lae_denoise = 20*self.lae(rec_denoise*freq_linear_reweighting*explosive,x_denoise*freq_linear_reweighting*explosive) + tracker.update(dict(Lae_denoise=Lae_denoise)) + Lae += Lae_denoise + # import pdb;pdb.set_trace() + # if components['freq_formants_hamon'].shape[1] > 2: + freq_limit = self.encoder.formant_freq_limits_abs.squeeze() from net_formant import mel_scale - thres = mel_scale(self.spec_chans,4000,pt=False).astype(np.int32) - explosive=torch.sign(torch.mean(spec[...,thres:],dim=-1)-torch.mean(spec[...,:thres],dim=-1))*0.5+0.5 - Lexp = torch.mean((components['amplitudes'][:,0:1]-components['amplitudes'][:,1:2])*explosive) - return Lae + Lexp + freq_limit = hz2ind(freq_limit,self.n_fft).long() if self.wavebased else mel_scale(self.spec_chans,freq_limit).long() + if debug: + import pdb;pdb.set_trace() + + + # if True: + if not self.wavebased: + n_formant_noise = components['freq_formants_noise'].shape[1]-components['freq_formants_hamon'].shape[1] + for formant in range(components['freq_formants_hamon'].shape[1]-1,1,-1): + components_copy = {i:j.clone() for i,j in components.items()} + components_copy['freq_formants_hamon'] = components_copy['freq_formants_hamon'][:,:formant] + components_copy['freq_formants_hamon_hz'] = components_copy['freq_formants_hamon_hz'][:,:formant] + components_copy['bandwidth_formants_hamon'] = components_copy['bandwidth_formants_hamon'][:,:formant] + components_copy['bandwidth_formants_hamon_hz'] = components_copy['bandwidth_formants_hamon_hz'][:,:formant] + components_copy['amplitude_formants_hamon'] = components_copy['amplitude_formants_hamon'][:,:formant] + + if duomask: + # components_copy['freq_formants_noise'] = components_copy['freq_formants_noise'][:,:formant] + # components_copy['freq_formants_noise_hz'] = components_copy['freq_formants_noise_hz'][:,:formant] + # components_copy['bandwidth_formants_noise'] = components_copy['bandwidth_formants_noise'][:,:formant] + # components_copy['bandwidth_formants_noise_hz'] = components_copy['bandwidth_formants_noise_hz'][:,:formant] + # components_copy['amplitude_formants_noise'] = components_copy['amplitude_formants_noise'][:,:formant] + components_copy['freq_formants_noise'] = torch.cat([components_copy['freq_formants_noise'][:,:formant],components_copy['freq_formants_noise'][:,-n_formant_noise:]],dim=1) + components_copy['freq_formants_noise_hz'] = torch.cat([components_copy['freq_formants_noise_hz'][:,:formant],components_copy['freq_formants_noise_hz'][:,-n_formant_noise:]],dim=1) + components_copy['bandwidth_formants_noise'] = torch.cat([components_copy['bandwidth_formants_noise'][:,:formant],components_copy['bandwidth_formants_noise'][:,-n_formant_noise:]],dim=1) + components_copy['bandwidth_formants_noise_hz'] = torch.cat([components_copy['bandwidth_formants_noise_hz'][:,:formant],components_copy['bandwidth_formants_noise_hz'][:,-n_formant_noise:]],dim=1) + components_copy['amplitude_formants_noise'] = torch.cat([components_copy['amplitude_formants_noise'][:,:formant],components_copy['amplitude_formants_noise'][:,-n_formant_noise:]],dim=1) + # rec = self.decoder.forward(components_copy,enable_noise_excitation=True) + # Lae += self.lae(rec,spec,tracker=tracker)#torch.mean((rec - spec).abs()) + rec = self.decoder.forward(components_copy,enable_noise_excitation=True if self.wavebased else True) + Lae += 1*self.lae((rec*freq_linear_reweighting),(spec*freq_linear_reweighting),tracker=tracker)#torch.mean(((rec - spec).abs()*freq_linear_reweighting)[...,:freq_limit[formant-1]]) + # Lae += self.lae((rec*freq_linear_reweighting)[...,:freq_limit[formant-1]],(spec*freq_linear_reweighting)[...,:freq_limit[formant-1]],tracker=tracker)#torch.mean(((rec - spec).abs()*freq_linear_reweighting)[...,:freq_limit[formant-1]]) + # Lamp = 1*torch.mean(F.relu(-components['amplitude_formants_hamon'][:,0:3]+components['amplitude_formants_hamon'][:,1:4])*(components['amplitudes'][:,0:1]>components['amplitudes'][:,1:2]).float()) + # tracker.update(dict(Lamp=Lamp)) + # Lae+=Lamp + else: + Lamp = 10*torch.mean(F.relu(-components['amplitude_formants_hamon'][:,0:3]+components['amplitude_formants_hamon'][:,1:4])*(components['amplitudes'][:,0:1]>components['amplitudes'][:,1:2]).float()) + tracker.update(dict(Lamp=Lamp)) + Lae+=Lamp + tracker.update(dict(Lae=Lae)) + if debug: + import pdb;pdb.set_trace() + + + thres = int(hz2ind(4000,self.n_fft)) if self.wavebased else mel_scale(self.spec_chans,4000,pt=False).astype(np.int32) + explosive=torch.sign(torch.mean((spec*freq_linear_reweighting)[...,thres:],dim=-1)-torch.mean((spec*freq_linear_reweighting)[...,:thres],dim=-1))*0.5+0.5 + Lexp = torch.mean((components['amplitudes'][:,0:1]-components['amplitudes'][:,1:2])*explosive)*100 + tracker.update(dict(Lexp=Lexp)) + Lae += Lexp + + if hamonic_bias: + hamonic_loss = 1000*torch.mean((1-components['amplitudes'][:,0])*on_stage) + Lae += hamonic_loss + + # alphaloss=(F.relu(0.5-(components['amplitudes']-0.5).abs())*100).mean() + # Lae+=alphaloss + + if pitch_aug: + pitch_shift = (2**(-1.5+3*torch.rand([components['f0_hz'].shape[0]]).to(torch.float32)).reshape([components['f0_hz'].shape[0],1,1])) # +- 1 octave + # pitch_shift = (2**(torch.randint(-1,2,[components['f0_hz'].shape[0]]).to(torch.float32)).reshape([components['f0_hz'].shape[0],1,1])).clamp(min=88,max=616) # +- 1 octave + components['f0_hz'] = (components['f0_hz']*pitch_shift).clamp(min=88,max=300) + # components['f0'] = mel_scale(self.spec_chans,components['f0'])/self.spec_chans + rec_shift = self.decoder.forward(components) + components_enc = self.encoder(rec_shift,duomask=duomask,x_denoise=x_denoise,noise_level = F.softplus(self.decoder.bgnoise_amp)*self.decoder.noise_dist.mean(),x_amp=x_amp) + Lf0 = torch.mean((components_enc['f0_hz']/200-components['f0_hz']/200)**2) + rec_cycle = self.decoder.forward(components_enc) + Lae += self.lae(rec_shift*freq_linear_reweighting,rec_cycle*freq_linear_reweighting,tracker=tracker)#torch.mean((rec_shift-rec_cycle).abs()*freq_linear_reweighting) + # import pdb;pdb.set_trace() + else: + # Lf0 = torch.mean((F.relu(160 - components['f0_hz']) + F.relu(components['f0_hz']-420))/10) + Lf0 = torch.tensor([0.]) + # Lf0 = torch.tensor([0.]) + tracker.update(dict(Lf0=Lf0)) + + spec = spec.squeeze(dim=1).permute(0,2,1) #B * f * T + loudness = torch.mean(spec*0.5+0.5,dim=1,keepdim=True) + # import pdb;pdb.set_trace() + if self.wavebased: + # hamonic_components_diff = compdiffd2(components['f0_hz']*2) + compdiff(components['amplitudes'])*750.# + compdiff(components['amplitude_formants_hamon'])*750. + ((components['loudness']*components['amplitudes'][:,1:]/0.0001)**0.125).mean()*50 + compdiff(components['amplitude_formants_noise'])*750. + if self.power_synth: + hamonic_components_diff = compdiffd2(components['freq_formants_hamon_hz']*1.5) + compdiffd2(components['f0_hz']*2) + compdiff(components['bandwidth_formants_noise_hz'][:,components['freq_formants_hamon_hz'].shape[1]:]/5) + compdiff(components['freq_formants_noise_hz'][:,components['freq_formants_hamon_hz'].shape[1]:]/5)+ compdiff(components['amplitudes'])*750. + compdiffd2(components['amplitude_formants_hamon'])*1500.+ compdiffd2(components['amplitude_formants_noise'])*1500.# + ((components['loudness']*components['amplitudes'][:,1:]/0.0001)**0.125).mean()*50 + else: + # hamonic_components_diff = compdiffd2(components['freq_formants_hamon_hz']*1.5) + compdiffd2(components['f0_hz']*2) + compdiff(components['amplitudes'])*750. + compdiffd2(components['amplitude_formants_hamon'])*1500.+ compdiffd2(components['amplitude_formants_noise'])*1500.# + ((components['loudness']*components['amplitudes'][:,1:]/0.0001)**0.125).mean()*50 + hamonic_components_diff = compdiffd2(components['freq_formants_hamon_hz']*1.5) + compdiffd2(components['f0_hz']*2) + compdiff(components['bandwidth_formants_noise_hz'][:,components['freq_formants_hamon_hz'].shape[1]:]/5) + compdiff(components['freq_formants_noise_hz'][:,components['freq_formants_hamon_hz'].shape[1]:]/5)+ compdiff(components['amplitudes'])*750. + compdiffd2(components['amplitude_formants_hamon'])*1500.+ compdiffd2(components['amplitude_formants_noise'])*1500.# + ((components['loudness']*components['amplitudes'][:,1:]/0.0001)**0.125).mean()*50 + + # hamonic_components_diff = compdiffd2(components['freq_formants_hamon_hz']*1.5) + compdiffd2(components['f0_hz']*2) + compdiff(components['bandwidth_formants_noise_hz'][:,components['freq_formants_hamon_hz'].shape[1]:]/5) + compdiff(components['freq_formants_noise_hz'][:,components['freq_formants_hamon_hz'].shape[1]:]/5)+ compdiff(components['amplitudes'])*750.# + compdiff(components['amplitude_formants_hamon'])*750. + ((components['loudness']*components['amplitudes'][:,1:]/0.0001)**0.125).mean()*50 + compdiff(components['amplitude_formants_noise'])*750. + # hamonic_components_diff = compdiffd2(components['freq_formants_hamon_hz']*1.5) + compdiffd2(components['f0_hz']*8) + compdiff(components['bandwidth_formants_noise_hz'][:,components['freq_formants_hamon_hz'].shape[1]:]/5) + compdiff(components['freq_formants_noise_hz'][:,components['freq_formants_hamon_hz'].shape[1]:]/5)+ compdiff(components['amplitudes'])*750.# + compdiff(components['amplitude_formants_hamon'])*750. + ((components['loudness']*components['amplitudes'][:,1:]/0.0001)**0.125).mean()*50 + compdiff(components['amplitude_formants_noise'])*750. + # hamonic_components_diff = compdiffd2(components['freq_formants_hamon_hz']*2) + compdiffd2(components['f0_hz']/10) + compdiff(components['amplitude_formants_hamon'])*750. + compdiff(components['amplitude_formants_noise'])*750. + compdiffd2(components['freq_formants_noise_hz'][:,components['freq_formants_hamon_hz'].shape[1]:]/10) + compdiff(components['bandwidth_formants_noise_hz'][:,components['freq_formants_hamon_hz'].shape[1]:]/10) + # hamonic_components_diff = compdiffd2(components['freq_formants_hamon_hz'])+100*compdiffd2(components['f0_hz']*3) + compdiff(components['amplitude_formants_hamon'])*750. + compdiff(components['amplitude_formants_noise'])*750. #+ compdiff(components['freq_formants_noise_hz']*(1-on_stage_wider)) + else: + hamonic_components_diff = compdiff(components['freq_formants_hamon_hz']*(1-on_stage_wider))+compdiff(components['f0_hz']*(1-on_stage_wider)) + compdiff(components['amplitude_formants_hamon'])*750. + compdiff(components['amplitude_formants_noise'])*750. #+ compdiff(components['freq_formants_noise_hz']*(1-on_stage_wider)) + # hamonic_components_diff = compdiff(components['freq_formants_hamon_hz'])+compdiff(components['f0_hz']) + compdiff(components['amplitude_formants_hamon']*(1-on_stage_wider))*1500. + compdiff(components['amplitude_formants_noise']*(1-on_stage_wider))*1500. + compdiff(components['freq_formants_noise_hz']) + Ldiff = torch.mean(hamonic_components_diff)/2000. + # Ldiff = torch.mean(components['freq_formants_hamon'].var()+components['freq_formants_noise'].var())*10 + tracker.update(dict(Ldiff=Ldiff)) + Lae += Ldiff + # Ldiff = 0 + Lfreqorder = torch.mean(F.relu(components['freq_formants_hamon_hz'][:,:-1]-components['freq_formants_hamon_hz'][:,1:])) #+ (torch.mean(F.relu(components['freq_formants_noise_hz'][:,:-1]-components['freq_formants_noise_hz'][:,1:])) if components['freq_formants_noise_hz'].shape[1]>1 else 0) + + return Lae + Lf0 + Lfreqorder else: self.encoder.requires_grad_(False) rec,components_ecog = self.generate_fromecog(ecog,mask_prior,mni=mni,return_components=True) - Lrec = torch.mean((rec - spec)**2) + + ###### + if self.spec_sup: + if False:#self.ghm_loss: + Lrec = 0.3*self.lae1(rec,spec,tracker=tracker) + else: + Lrec = self.lae(rec,spec,tracker=tracker)#torch.mean((rec - spec)**2) + # Lamp = 10*torch.mean(F.relu(-components_ecog['amplitude_formants_hamon'][:,0:min(3,self.n_formants_ecog-1)]+components_ecog['amplitude_formants_hamon'][:,1:min(4,self.n_formants_ecog)])*(components_ecog['amplitudes'][:,0:1]>components_ecog['amplitudes'][:,1:2]).float()) + # tracker.update(dict(Lamp=Lamp)) + # Lrec+=Lamp + else: + Lrec = torch.tensor([0.0])# # Lrec = torch.mean((rec - spec).abs()) tracker.update(dict(Lrec=Lrec)) Lcomp = 0 if encoder_guide: - components_guide = self.encode(spec) + components_guide = self.encode(spec,x_denoise=x_denoise,duomask=duomask,noise_level = F.softplus(self.decoder.bgnoise_amp)*self.decoder.noise_dist.mean(),x_amp=x_amp) consonant_weight = 1#100*(torch.sign(components_guide['amplitudes'][:,1:]-0.5)*0.5+0.5) - for key in components_guide.keys(): + if self.power_synth: + loudness_db = torchaudio.transforms.AmplitudeToDB()(components_guide['loudness']) + loudness_db_norm = (loudness_db.clamp(min=-70)+70)/50 + else: + loudness_db = torchaudio.transforms.AmplitudeToDB()(components_guide['loudness']) + # loudness_db_norm = (loudness_db.clamp(min=-35)+35)/25 + loudness_db_norm = (loudness_db.clamp(min=-70)+70)/50 + # loudness_db = torchaudio.transforms.AmplitudeToDB()(components_guide['loudness']**2) + #loudness_db_norm = (loudness_db.clamp(min=-70)+70)/50 + for key in ['loudness','f0_hz','amplitudes','amplitude_formants_hamon','freq_formants_hamon_hz','amplitude_formants_noise','freq_formants_noise_hz','bandwidth_formants_noise']: + # if 'hz' in key: + # continue if key == 'loudness': - diff = torch.mean((components_guide[key] - components_ecog[key])**2) #+ torch.mean((components_guide[key] - components_ecog[key])**2 * on_stage * consonant_weight) - elif key in ['freq_formants', 'bandwidth_formants', 'amplitude_formants']: - diff = torch.mean((components_guide[key] - components_ecog[key])**2 * on_stage * consonant_weight) - else: - diff = torch.mean((components_guide[key] - components_ecog[key])**2 * on_stage) + if self.power_synth: + loudness_db_norm_ecog = (torchaudio.transforms.AmplitudeToDB()(components_ecog[key])+70)/50 + else: + loudness_db_norm_ecog = (torchaudio.transforms.AmplitudeToDB()(components_ecog[key])+70)/50 + # loudness_db_norm_ecog = (torchaudio.transforms.AmplitudeToDB()(components_ecog[key])+35)/25 + # loudness_db_norm_ecog = (torchaudio.transforms.AmplitudeToDB()(components_ecog[key]**2)+70)/50 + if False:#self.ghm_loss: + diff = self.lae2(loudness_db_norm, loudness_db_norm_ecog) + else: + diff = 3*torch.mean((loudness_db_norm - loudness_db_norm_ecog)**2)#+ torch.mean((components_guide[key] - components_ecog[key])**2 * on_stage * consonant_weight) + if key == 'f0_hz': + # diff = torch.mean((components_guide[key]*6 - components_ecog[key]*6)**2 * on_stage_wider * components_guide['loudness']/4) + diff = 0.3*torch.mean((components_guide[key]/200*5 - components_ecog[key]/200*5)**2 * on_stage_wider * loudness_db_norm) + if key in ['amplitudes']: + # if key in ['amplitudes','amplitudes_h']: + weight = on_stage_wider * loudness_db_norm + if self.ghm_loss: + # diff = 100*self.lae3(components_guide[key], components_ecog[key],reweight=weight) + diff = 30*self.lae3(components_guide[key], components_ecog[key],reweight=weight) + else: + diff = 10*torch.mean((components_guide[key] - components_ecog[key])**2 *weight) + if key in ['amplitude_formants_hamon']: + weight = components_guide['amplitudes'][:,0:1] * on_stage_wider * consonant_weight * loudness_db_norm + if False:#self.ghm_loss: + diff = 40*self.lae4(components_guide[key][:,:self.n_formants_ecog], components_ecog[key],reweight=weight) + # diff = 10*self.lae4(components_guide[key][:,:self.n_formants_ecog], components_ecog[key],reweight=weight) + else: + # diff = 100*torch.mean((components_guide[key][:,:self.n_formants_ecog] - components_ecog[key])**2 * weight) + # diff = 40*torch.mean((components_guide[key][:,:self.n_formants_ecog] - components_ecog[key])**2 * weight)/2 \ + # + 40*torch.mean((torchaudio.transforms.AmplitudeToDB()(components_guide[key][:,:self.n_formants_ecog])/100 - torchaudio.transforms.AmplitudeToDB()(components_ecog[key])/100)**2 * weight)/2 + diff = 40*torch.mean((components_guide[key][:,:self.n_formants_ecog] - components_ecog[key])**2 * weight) + # diff = 10*torch.mean((components_guide[key][:,:self.n_formants_ecog] - components_ecog[key])**2 * weight) + # diff = torch.mean((components_guide[key][:,:self.n_formants_ecog] - components_ecog[key])**2 * components_guide['amplitudes'][:,0:1] * on_stage_wider * components_guide['loudness']/4 * consonant_weight) + # if key in ['freq_formants_hamon']: + # diff = torch.mean((components_guide[key][:,:1]*10 - components_ecog[key][:,:1]*10)**2 * components_guide['amplitude_formants_hamon'][:,:1] * components_guide['amplitudes'][:,0:1] * on_stage_wider * consonant_weight * loudness_db_norm ) + # # diff = torch.mean((components_guide[key][:,:self.n_formants_ecog]*10 - components_ecog[key]*10)**2 * components_guide['amplitude_formants_hamon'][:,:self.n_formants_ecog] * components_guide['amplitudes'][:,0:1] * on_stage_wider * components_guide['loudness']/4 * consonant_weight) + # # diff = torch.mean((components_guide[key][:,:self.n_formants_ecog] - components_ecog[key])**2 * components_guide['amplitude_formants_hamon'][:,:self.n_formants_ecog] * components_ecog['amplitudes'][:,0:1] * on_stage_wider * consonant_weight) + + if key in ['freq_formants_hamon_hz']: + # weight = components_guide['amplitudes'][:,0:1] * on_stage_wider * consonant_weight * loudness_db_norm + weight = components_guide['amplitude_formants_hamon'][:,:self.n_formants_ecog] * components_guide['amplitudes'][:,0:1] * on_stage_wider * consonant_weight * loudness_db_norm + if False:#self.ghm_loss: + diff = 50*self.lae5(components_guide[key][:,:self.n_formants_ecog]/400 , components_ecog[key]/400, reweight=weight) + # diff = 15*self.lae5(components_guide[key][:,:self.n_formants_ecog]/400 , components_ecog[key]/400, reweight=weight) + else: + # diff = 300*torch.mean((components_guide[key][:,:self.n_formants_ecog]/2000*5 - components_ecog[key][:,:self.n_formants_ecog]/2000*5)**2 * weight) + diff = 100*torch.mean((components_guide[key][:,:self.n_formants_ecog]/2000*5 - components_ecog[key][:,:self.n_formants_ecog]/2000*5)**2 * weight) + # diff = 30*torch.mean((components_guide[key][:,:self.n_formants_ecog]/2000*5 - components_ecog[key][:,:self.n_formants_ecog]/2000*5)**2 * weight) + # diff = torch.mean((components_guide[key][:,:self.n_formants_ecog]*10 - components_ecog[key]*10)**2 * components_guide['amplitude_formants_hamon'][:,:self.n_formants_ecog] * components_guide['amplitudes'][:,0:1] * on_stage_wider * components_guide['loudness']/4 * consonant_weight) + # diff = torch.mean((components_guide[key][:,:self.n_formants_ecog] - components_ecog[key])**2 * components_guide['amplitude_formants_hamon'][:,:self.n_formants_ecog] * components_ecog['amplitudes'][:,0:1] * on_stage_wider * consonant_weight) + # if key in ['bandwidth_formants_hamon']: + # diff = torch.mean((components_guide[key][:,:self.n_formants_ecog]/4 - components_ecog[key]/4)**2 * components_guide['amplitude_formants_hamon'][:,:self.n_formants_ecog] * components_guide['amplitudes'][:,0:1] * on_stage_wider * consonant_weight) + # # diff = torch.mean((components_guide[key][:,:self.n_formants_ecog]/4 - components_ecog[key]/4)**2 * components_guide['amplitude_formants_hamon'][:,:self.n_formants_ecog] * components_guide['amplitudes'][:,0:1] * on_stage_wider * components_guide['loudness']/4 * consonant_weight) + # # diff = torch.mean((components_guide[key][:,:self.n_formants_ecog] - components_ecog[key])**2 * components_guide['amplitude_formants_hamon'][:,:self.n_formants_ecog] * components_ecog['amplitudes'][:,0:1] * on_stage_wider * consonant_weight) + + if key in ['amplitude_formants_noise']: + weight = components_guide['amplitudes'][:,1:2] * on_stage_wider * consonant_weight * loudness_db_norm + if False:#self.ghm_loss: + diff = self.lae6(components_guide[key],components_ecog[key],reweight=weight) + else: + # diff = 40*torch.mean((torch.cat([components_guide[key][:,:self.n_formants_ecog],components_guide[key][:,-self.n_formants_noise:]],dim=1) - components_ecog[key])**2 *weight)/2 \ + # + 40*torch.mean((torchaudio.transforms.AmplitudeToDB()(torch.cat([components_guide[key][:,:self.n_formants_ecog],components_guide[key][:,-self.n_formants_noise:]],dim=1))/100 - torchaudio.transforms.AmplitudeToDB()(components_ecog[key])/100)**2 * weight)/2 + diff = 40*torch.mean((torch.cat([components_guide[key][:,:self.n_formants_ecog],components_guide[key][:,-self.n_formants_noise:]],dim=1) - components_ecog[key])**2 *weight) + + if key in ['freq_formants_noise_hz']: + weight = components_guide['amplitudes'][:,1:2] * on_stage_wider * consonant_weight* loudness_db_norm + if False:#self.ghm_loss: + diff = 10*self.lae7(components_guide[key][:,-self.n_formants_noise:]/400,components_ecog[key][:,-self.n_formants_noise:]/400,reweight=weight) + else: + # diff = 30*torch.mean((components_guide[key][:,-self.n_formants_noise:]/2000*5 - components_ecog[key][:,-self.n_formants_noise:]/2000*5)**2 * weight) + diff = 3*torch.mean((components_guide[key][:,-self.n_formants_noise:]/2000*5 - components_ecog[key][:,-self.n_formants_noise:]/2000*5)**2 * weight) + # diff = torch.mean((components_guide[key][:,:self.n_formants_ecog] - components_ecog[key])**2 * components_guide['amplitude_formants_noise'][:,:self.n_formants_ecog] * components_guide['amplitudes'][:,1:2] * on_stage_wider * consonant_weight) + if key in ['bandwidth_formants_noise_hz']: + weight = components_guide['amplitudes'][:,1:2] * on_stage_wider * consonant_weight* loudness_db_norm + if False:#self.ghm_loss: + diff = 3*self.lae8(components_guide[key][:,-self.n_formants_noise:]/2000*5, components_ecog[key][:,-self.n_formants_noise:]/2000*5,reweight=weight) + else: + # diff = 30*torch.mean((components_guide[key][:,-self.n_formants_noise:]/2000*5 - components_ecog[key][:,-self.n_formants_noise:]/2000*5)**2 * weight) + diff = torch.mean((components_guide[key][:,-self.n_formants_noise:]/2000*5 - components_ecog[key][:,-self.n_formants_noise:]/2000*5)**2 * weight) tracker.update({key : diff}) Lcomp += diff + # import pdb; pdb.set_trace() Loss = Lrec+Lcomp + + hamonic_components_diff = compdiffd2(components_ecog['freq_formants_hamon_hz']*1.5) + compdiffd2(components_ecog['f0_hz']*2) + compdiff(components_ecog['bandwidth_formants_noise_hz'][:,components_ecog['freq_formants_hamon_hz'].shape[1]:]/5) + compdiff(components_ecog['freq_formants_noise_hz'][:,components_ecog['freq_formants_hamon_hz'].shape[1]:]/5)+ compdiff(components_ecog['amplitudes'])*750. + Ldiff = torch.mean(hamonic_components_diff)/2000. + tracker.update(dict(Ldiff=Ldiff)) + Loss += Ldiff + + freq_linear_reweighting = 1 + thres = int(hz2ind(4000,self.n_fft)) if self.wavebased else mel_scale(self.spec_chans,4000,pt=False).astype(np.int32) + explosive=torch.sign(torch.mean((spec*freq_linear_reweighting)[...,thres:],dim=-1)-torch.mean((spec*freq_linear_reweighting)[...,:thres],dim=-1))*0.5+0.5 + Lexp = torch.mean((components_ecog['amplitudes'][:,0:1]-components_ecog['amplitudes'][:,1:2])*explosive)*100 + tracker.update(dict(Lexp=Lexp)) + Loss += Lexp + + Lfreqorder = torch.mean(F.relu(components_ecog['freq_formants_hamon_hz'][:,:-1]-components_ecog['freq_formants_hamon_hz'][:,1:])) + Loss += Lfreqorder + return Loss + + ######### new balanced loss + # if self.spec_sup: + # if False:#self.ghm_loss: + # Lrec = 0.3*self.lae1(rec,spec,tracker=tracker) + # else: + # Lrec = self.lae(rec,spec,tracker=tracker)#torch.mean((rec - spec)**2) + # # Lamp = 10*torch.mean(F.relu(-components_ecog['amplitude_formants_hamon'][:,0:min(3,self.n_formants_ecog-1)]+components_ecog['amplitude_formants_hamon'][:,1:min(4,self.n_formants_ecog)])*(components_ecog['amplitudes'][:,0:1]>components_ecog['amplitudes'][:,1:2]).float()) + # # tracker.update(dict(Lamp=Lamp)) + # # Lrec+=Lamp + # else: + # Lrec = torch.tensor([0.0])# + # # Lrec = torch.mean((rec - spec).abs()) + # tracker.update(dict(Lrec=Lrec)) + # Lcomp = 0 + # if encoder_guide: + # components_guide = self.encode(spec,x_denoise=x_denoise,duomask=duomask,noise_level = F.softplus(self.decoder.bgnoise_amp)*self.decoder.noise_dist.mean(),x_amp=x_amp) + # consonant_weight = 1#100*(torch.sign(components_guide['amplitudes'][:,1:]-0.5)*0.5+0.5) + # if self.power_synth: + # loudness_db = torchaudio.transforms.AmplitudeToDB()(components_guide['loudness']) + # loudness_db_norm = (loudness_db.clamp(min=-70)+70)/50 + # else: + # loudness_db = torchaudio.transforms.AmplitudeToDB()(components_guide['loudness']) + # # loudness_db_norm = (loudness_db.clamp(min=-35)+35)/25 + # loudness_db_norm = (loudness_db.clamp(min=-70)+70)/50 + # # loudness_db = torchaudio.transforms.AmplitudeToDB()(components_guide['loudness']**2) + # for key in ['loudness','f0_hz','amplitudes','amplitude_formants_hamon','freq_formants_hamon_hz','amplitude_formants_noise','freq_formants_noise_hz','bandwidth_formants_noise_hz']: + # # if 'hz' in key: + # # continue + # if key == 'loudness': + # if self.power_synth: + # loudness_db_norm_ecog = (torchaudio.transforms.AmplitudeToDB()(components_ecog[key])+70)/50 + # else: + # # loudness_db_norm_ecog = (torchaudio.transforms.AmplitudeToDB()(components_ecog[key])+35)/25 + # loudness_db_norm_ecog = (torchaudio.transforms.AmplitudeToDB()(components_ecog[key])+70)/50 + # if False:#self.ghm_loss: + # diff = self.lae2(loudness_db_norm, loudness_db_norm_ecog) + # else: + # diff = 10*torch.mean((loudness_db_norm - loudness_db_norm_ecog)**2*on_stage_wider) + 1.5*10**6*torch.mean((components_guide['loudness'] - components_ecog['loudness'])**2*on_stage_wider) + 2*10**7*(components_ecog['loudness']**2*(1-on_stage_wider)).mean()#+ torch.mean((components_guide[key] - components_ecog[key])**2 * on_stage * consonant_weight) + # if key == 'f0_hz': + # # diff = torch.mean((components_guide[key]*6 - components_ecog[key]*6)**2 * on_stage_wider * components_guide['loudness']/4) + # diff = torch.mean((components_guide[key]/200*5 - components_ecog[key]/200*5)**2 * on_stage_wider * loudness_db_norm) + # if key in ['amplitudes']: + # # if key in ['amplitudes','amplitudes_h']: + # weight = on_stage_wider * loudness_db_norm + # if self.ghm_loss: + # # diff = 100*self.lae3(components_guide[key], components_ecog[key],reweight=weight) + # diff = 30*self.lae3(components_guide[key], components_ecog[key],reweight=weight) + # else: + # diff = 10*torch.mean((components_guide[key] - components_ecog[key])**2 *weight) + # if key in ['amplitude_formants_hamon']: + # weight = components_guide['amplitudes'][:,0:1] * on_stage_wider * consonant_weight * loudness_db_norm + # if False:#self.ghm_loss: + # diff = 40*self.lae4(components_guide[key][:,:self.n_formants_ecog], components_ecog[key],reweight=weight) + # # diff = 10*self.lae4(components_guide[key][:,:self.n_formants_ecog], components_ecog[key],reweight=weight) + # else: + # # diff = 100*torch.mean((components_guide[key][:,:self.n_formants_ecog] - components_ecog[key])**2 * weight) + # # diff = 40*torch.mean((components_guide[key][:,:self.n_formants_ecog] - components_ecog[key])**2 * weight)/2 \ + # # + 40*torch.mean((torchaudio.transforms.AmplitudeToDB()(components_guide[key][:,:self.n_formants_ecog])/100 - torchaudio.transforms.AmplitudeToDB()(components_ecog[key])/100)**2 * weight)/2 + # diff = 20*torch.mean((components_guide[key][:,:self.n_formants_ecog] - components_ecog[key])**2 * weight) + # # diff = 10*torch.mean((components_guide[key][:,:self.n_formants_ecog] - components_ecog[key])**2 * weight) + # # diff = torch.mean((components_guide[key][:,:self.n_formants_ecog] - components_ecog[key])**2 * components_guide['amplitudes'][:,0:1] * on_stage_wider * components_guide['loudness']/4 * consonant_weight) + # # if key in ['freq_formants_hamon']: + # # diff = torch.mean((components_guide[key][:,:1]*10 - components_ecog[key][:,:1]*10)**2 * components_guide['amplitude_formants_hamon'][:,:1] * components_guide['amplitudes'][:,0:1] * on_stage_wider * consonant_weight * loudness_db_norm ) + # # # diff = torch.mean((components_guide[key][:,:self.n_formants_ecog]*10 - components_ecog[key]*10)**2 * components_guide['amplitude_formants_hamon'][:,:self.n_formants_ecog] * components_guide['amplitudes'][:,0:1] * on_stage_wider * components_guide['loudness']/4 * consonant_weight) + # # # diff = torch.mean((components_guide[key][:,:self.n_formants_ecog] - components_ecog[key])**2 * components_guide['amplitude_formants_hamon'][:,:self.n_formants_ecog] * components_ecog['amplitudes'][:,0:1] * on_stage_wider * consonant_weight) + + # if key in ['freq_formants_hamon_hz']: + # # weight = components_guide['amplitudes'][:,0:1] * on_stage_wider * consonant_weight * loudness_db_norm + # weight = components_guide['amplitude_formants_hamon'][:,:self.n_formants_ecog] * components_guide['amplitudes'][:,0:1] * on_stage_wider * consonant_weight * loudness_db_norm + # if False:#self.ghm_loss: + # diff = 50*self.lae5(components_guide[key][:,:self.n_formants_ecog]/400 , components_ecog[key]/400, reweight=weight) + # # diff = 15*self.lae5(components_guide[key][:,:self.n_formants_ecog]/400 , components_ecog[key]/400, reweight=weight) + # else: + # diff = 150*torch.mean((components_guide['freq_formants_hamon'][:,:self.n_formants_ecog] - components_ecog['freq_formants_hamon'][:,:self.n_formants_ecog])**2 * weight) \ + # + 5*torch.mean((components_guide['freq_formants_hamon_hz'][:,:self.n_formants_ecog]/400 - components_ecog['freq_formants_hamon_hz'][:,:self.n_formants_ecog]/400)**2 * weight) + # # diff = 300*torch.mean((components_guide[key][:,:self.n_formants_ecog]/2000*5 - components_ecog[key][:,:self.n_formants_ecog]/2000*5)**2 * weight) + # # diff = 100*torch.mean((components_guide[key][:,:self.n_formants_ecog]/2000*5 - components_ecog[key][:,:self.n_formants_ecog]/2000*5)**2 * weight) + # # diff = 30*torch.mean((components_guide[key][:,:self.n_formants_ecog]/2000*5 - components_ecog[key][:,:self.n_formants_ecog]/2000*5)**2 * weight) + # # diff = torch.mean((components_guide[key][:,:self.n_formants_ecog]*10 - components_ecog[key]*10)**2 * components_guide['amplitude_formants_hamon'][:,:self.n_formants_ecog] * components_guide['amplitudes'][:,0:1] * on_stage_wider * components_guide['loudness']/4 * consonant_weight) + # # diff = torch.mean((components_guide[key][:,:self.n_formants_ecog] - components_ecog[key])**2 * components_guide['amplitude_formants_hamon'][:,:self.n_formants_ecog] * components_ecog['amplitudes'][:,0:1] * on_stage_wider * consonant_weight) + # # if key in ['bandwidth_formants_hamon']: + # # diff = torch.mean((components_guide[key][:,:self.n_formants_ecog]/4 - components_ecog[key]/4)**2 * components_guide['amplitude_formants_hamon'][:,:self.n_formants_ecog] * components_guide['amplitudes'][:,0:1] * on_stage_wider * consonant_weight) + # # # diff = torch.mean((components_guide[key][:,:self.n_formants_ecog]/4 - components_ecog[key]/4)**2 * components_guide['amplitude_formants_hamon'][:,:self.n_formants_ecog] * components_guide['amplitudes'][:,0:1] * on_stage_wider * components_guide['loudness']/4 * consonant_weight) + # # # diff = torch.mean((components_guide[key][:,:self.n_formants_ecog] - components_ecog[key])**2 * components_guide['amplitude_formants_hamon'][:,:self.n_formants_ecog] * components_ecog['amplitudes'][:,0:1] * on_stage_wider * consonant_weight) + + # if key in ['amplitude_formants_noise']: + # weight = components_guide['amplitudes'][:,1:2] * on_stage_wider * consonant_weight * loudness_db_norm + # if False:#self.ghm_loss: + # diff = self.lae6(components_guide[key],components_ecog[key],reweight=weight) + # else: + # # diff = 40*torch.mean((torch.cat([components_guide[key][:,:self.n_formants_ecog],components_guide[key][:,-self.n_formants_noise:]],dim=1) - components_ecog[key])**2 *weight)/2 \ + # # + 40*torch.mean((torchaudio.transforms.AmplitudeToDB()(torch.cat([components_guide[key][:,:self.n_formants_ecog],components_guide[key][:,-self.n_formants_noise:]],dim=1))/100 - torchaudio.transforms.AmplitudeToDB()(components_ecog[key])/100)**2 * weight)/2 + # diff = 40*torch.mean((torch.cat([components_guide[key][:,:self.n_formants_ecog],components_guide[key][:,-self.n_formants_noise:]],dim=1) - components_ecog[key])**2 *weight) + + # if key in ['freq_formants_noise_hz']: + # weight = components_guide['amplitudes'][:,1:2] * on_stage_wider * consonant_weight* loudness_db_norm + # if False:#self.ghm_loss: + # diff = 10*self.lae7(components_guide[key][:,-self.n_formants_noise:]/400,components_ecog[key][:,-self.n_formants_noise:]/400,reweight=weight) + # else: + # # diff = 30*torch.mean((components_guide[key][:,-self.n_formants_noise:]/2000*5 - components_ecog[key][:,-self.n_formants_noise:]/2000*5)**2 * weight) + # diff = 1.5*torch.mean((components_guide[key][:,-self.n_formants_noise:]/2000*5 - components_ecog[key][:,-self.n_formants_noise:]/2000*5)**2 * weight) + # # diff = torch.mean((components_guide[key][:,:self.n_formants_ecog] - components_ecog[key])**2 * components_guide['amplitude_formants_noise'][:,:self.n_formants_ecog] * components_guide['amplitudes'][:,1:2] * on_stage_wider * consonant_weight) + # if key in ['bandwidth_formants_noise_hz']: + # weight = components_guide['amplitudes'][:,1:2] * on_stage_wider * consonant_weight* loudness_db_norm + # if False:#self.ghm_loss: + # diff = 3*self.lae8(components_guide[key][:,-self.n_formants_noise:]/2000*5, components_ecog[key][:,-self.n_formants_noise:]/2000*5,reweight=weight) + # else: + # # diff = 30*torch.mean((components_guide[key][:,-self.n_formants_noise:]/2000*5 - components_ecog[key][:,-self.n_formants_noise:]/2000*5)**2 * weight) + # diff = 4*torch.mean((components_guide[key][:,-self.n_formants_noise:]/2000*5 - components_ecog[key][:,-self.n_formants_noise:]/2000*5)**2 * weight) + + # if key in ['loudness','freq_formants_hamon_hz']: + # diff = diff*10. + # tracker.update({key : diff}) + # Lcomp += diff + # Lcomp = Lcomp/20. + # Loss = Lrec+Lcomp + + # hamonic_components_diff = compdiffd2(components_ecog['freq_formants_hamon_hz']*1.5) + compdiffd2(components_ecog['f0_hz']*2) + compdiff(components_ecog['bandwidth_formants_noise_hz'][:,components_ecog['freq_formants_hamon_hz'].shape[1]:]/5) + compdiff(components_ecog['freq_formants_noise_hz'][:,components_ecog['freq_formants_hamon_hz'].shape[1]:]/5)+ compdiff(components_ecog['amplitudes'])*750. + # Ldiff = torch.mean(hamonic_components_diff)/2000. + # tracker.update(dict(Ldiff=Ldiff)) + # Loss += Ldiff + + # freq_linear_reweighting = 1 + # thres = int(hz2ind(4000,self.n_fft)) if self.wavebased else mel_scale(self.spec_chans,4000,pt=False).astype(np.int32) + # explosive=torch.sign(torch.mean((spec*freq_linear_reweighting)[...,thres:],dim=-1)-torch.mean((spec*freq_linear_reweighting)[...,:thres],dim=-1))*0.5+0.5 + # Lexp = torch.mean((components_ecog['amplitudes'][:,0:1]-components_ecog['amplitudes'][:,1:2])*explosive)*100 + # tracker.update(dict(Lexp=Lexp)) + # Loss += Lexp + + # Lfreqorder = torch.mean(F.relu(components_ecog['freq_formants_hamon_hz'][:,:-1]-components_ecog['freq_formants_hamon_hz'][:,1:])) + # Loss += Lfreqorder + + # return Loss + + ##### fit f1 freq only + # components_guide = self.encode(spec,x_denoise=x_denoise,duomask=duomask,noise_level = F.softplus(self.decoder.bgnoise_amp)*self.decoder.noise_dist.mean(),x_amp=x_amp) + # if self.power_synth: + # loudness_db = torchaudio.transforms.AmplitudeToDB()(components_guide['loudness']) + # loudness_db_norm = (loudness_db.clamp(min=-70)+70)/50 + # else: + # loudness_db = torchaudio.transforms.AmplitudeToDB()(components_guide['loudness']) + # #loudness_db_norm = (loudness_db.clamp(min=-35)+35)/25 + # loudness_db_norm = (loudness_db.clamp(min=-70)+70)/50 + # # loudness_db = torchaudio.transforms.AmplitudeToDB()(components_guide['loudness']**2) + # # loudness_db_norm = (loudness_db.clamp(min=-70)+70)/50 + # weight = components_guide['amplitudes'][:,0:1] * on_stage_wider * 1 * loudness_db_norm + # # weight = components_guide['amplitude_formants_hamon'][:,:self.n_formants_ecog] * components_guide['amplitudes'][:,0:1] * on_stage_wider * consonant_weight * loudness_db_norm + # if False:#self.ghm_loss: + # diff = 50*self.lae5(components_guide[freq_formants_hamon_hz][:,:self.n_formants_ecog]/400 , components_ecog[freq_formants_hamon_hz]/400, reweight=weight) + # # diff = 15*self.lae5(components_guide[freq_formants_hamon_hz][:,:self.n_formants_ecog]/400 , components_ecog[freq_formants_hamon_hz]/400, reweight=weight) + # else: + # diff = 75*torch.mean((components_guide['freq_formants_hamon'][:,:2] - components_ecog['freq_formants_hamon'][:,:2])**2 * weight) \ + # + 0.5*torch.mean((components_guide['freq_formants_hamon_hz'][:,:2]/400 - components_ecog['freq_formants_hamon_hz'][:,:2]/400)**2 * weight) + # # diff = 300*torch.mean((components_guide['freq_formants_hamon_hz'][:,:2]/2000*5 - components_ecog['freq_formants_hamon_hz'][:,:2]/2000*5)**2 * weight) + # # diff = 100*torch.mean((components_guide[freq_formants_hamon_hz][:,:self.n_formants_ecog]/2000*5 - components_ecog[freq_formants_hamon_hz][:,:self.n_formants_ecog]/2000*5)**2 * weight) + # # diff = 30*torch.mean((components_guide[freq_formants_hamon_hz][:,:self.n_formants_ecog]/2000*5 - components_ecog[freq_formants_hamon_hz][:,:self.n_formants_ecog]/2000*5)**2 * weight) + # # diff = torch.mean((components_guide[freq_formants_hamon_hz][:,:self.n_formants_ecog]*10 - components_ecog[freq_formants_hamon_hz]*10)**2 * components_guide['amplitude_formants_hamon'][:,:self.n_formants_ecog] * components_guide['amplitudes'][:,0:1] * on_stage_wider * components_guide['loudness']/4 * consonant_weight) + # # diff = torch.mean((components_guide[freq_formants_hamon_hz][:,:self.n_formants_ecog] - components_ecog[freq_formants_hamon_hz])**2 * components_guide['amplitude_formants_hamon'][:,:self.n_formants_ecog] * components_ecog['amplitudes'][:,0:1] * on_stage_wider * consonant_weight) + # Loss = diff + # return Loss + + #### fit loudness freq only + # components_guide = self.encode(spec,x_denoise=x_denoise,duomask=duomask,noise_level = F.softplus(self.decoder.bgnoise_amp)*self.decoder.noise_dist.mean(),x_amp=x_amp) + # if self.power_synth: + # loudness_db = torchaudio.transforms.AmplitudeToDB()(components_guide['loudness']) + # loudness_db_norm = (loudness_db.clamp(min=-70)+70)/50 + # else: + # loudness_db = torchaudio.transforms.AmplitudeToDB()(components_guide['loudness']) + # #loudness_db_norm = (loudness_db.clamp(min=-35)+35)/25 + # loudness_db_norm = (loudness_db.clamp(min=-70)+70)/50 + # # loudness_db = torchaudio.transforms.AmplitudeToDB()(components_guide['loudness']**2) + # # loudness_db_norm = (loudness_db.clamp(min=-70)+70)/50 + # if self.power_synth: + # loudness_db_norm_ecog = (torchaudio.transforms.AmplitudeToDB()(components_ecog['loudness'])+70)/50 + # else: + # # loudness_db_norm_ecog = (torchaudio.transforms.AmplitudeToDB()(components_ecog['loudness'])+35)/25 + # loudness_db_norm_ecog = (torchaudio.transforms.AmplitudeToDB()(components_ecog['loudness'])+70)/50 + # # loudness_db_norm_ecog = (torchaudio.transforms.AmplitudeToDB()(components_ecog['loudness']**2)+70)/50 + # weight = components_guide['loudness'][:,0:1] * on_stage_wider * 1 * loudness_db_norm + # # weight = components_guide['amplitude_formants_hamon'][:,:self.n_formants_ecog] * components_guide['amplitudes'][:,0:1] * on_stage_wider * consonant_weight * loudness_db_norm + # diff = 3*torch.mean((loudness_db_norm - loudness_db_norm_ecog)**2) + 10**6*torch.mean((components_guide['loudness'] - components_ecog['loudness'])**2) + # Loss = diff + + # Lloudness = 10**7*(components_ecog['loudness']**2*(1-on_stage_wider)).mean() + # Loss+=Lloudness + # return Loss + + #### fit f1f2 freq and loudness + # components_guide = self.encode(spec,x_denoise=x_denoise,duomask=duomask,noise_level = F.softplus(self.decoder.bgnoise_amp)*self.decoder.noise_dist.mean(),x_amp=x_amp) + # if self.power_synth: + # loudness_db = torchaudio.transforms.AmplitudeToDB()(components_guide['loudness']) + # loudness_db_norm = (loudness_db.clamp(min=-70)+70)/50 + # else: + # loudness_db = torchaudio.transforms.AmplitudeToDB()(components_guide['loudness']) + # #loudness_db_norm = (loudness_db.clamp(min=-35)+35)/25 + # loudness_db_norm = (loudness_db.clamp(min=-70)+70)/50 + # # loudness_db = torchaudio.transforms.AmplitudeToDB()(components_guide['loudness']**2) + # # loudness_db_norm = (loudness_db.clamp(min=-70)+70)/50 + # if self.power_synth: + # loudness_db_norm_ecog = (torchaudio.transforms.AmplitudeToDB()(components_ecog['loudness'])+70)/50 + # else: + # # loudness_db_norm_ecog = (torchaudio.transforms.AmplitudeToDB()(components_ecog['loudness'])+35)/25 + # loudness_db_norm_ecog = (torchaudio.transforms.AmplitudeToDB()(components_ecog['loudness'])+70)/50 + # # loudness_db_norm_ecog = (torchaudio.transforms.AmplitudeToDB()(components_ecog['loudness']**2)+70)/50 + # weight = components_guide['loudness'][:,0:1] * on_stage_wider * 1 * loudness_db_norm + # # weight = components_guide['amplitude_formants_hamon'][:,:self.n_formants_ecog] * components_guide['amplitudes'][:,0:1] * on_stage_wider * consonant_weight * loudness_db_norm + # diff = 3*torch.mean((loudness_db_norm - loudness_db_norm_ecog)**2) + 10**6*torch.mean((components_guide['loudness'] - components_ecog['loudness'])**2) + # Loss = diff + + # Lloudness = 10**7*(components_ecog['loudness']**2*(1-on_stage_wider)).mean() + # Loss+=Lloudness + + # weight = on_stage_wider * 1 * loudness_db_norm + # diff = 75*torch.mean((components_guide['freq_formants_hamon'][:,:2] - components_ecog['freq_formants_hamon'][:,:2])**2 * weight) \ + # + 0.5*torch.mean((components_guide['freq_formants_hamon_hz'][:,:2]/400 - components_ecog['freq_formants_hamon_hz'][:,:2]/400)**2 * weight) + # Loss+=diff + # return Loss def lerp(self, other, betta,w_classifier=False): if hasattr(other, 'module'): other = other.module with torch.no_grad(): - params = list(self.decoder.parameters()) + list(self.encoder.parameters()) + (list(self.ecog_encoder.parameters()) if self.with_ecog else []) - other_param = list(other.decoder.parameters()) + list(other.encoder.parameters()) + (list(other.ecog_encoder.parameters()) if self.with_ecog else []) + params = list(self.decoder.parameters()) + list(self.encoder.parameters()) + (list(self.ecog_encoder.parameters()) if self.with_ecog else []) + (list(self.decoder_mel.parameters()) if self.do_mel_guide else []) + other_param = list(other.decoder.parameters()) + list(other.encoder.parameters()) + (list(other.ecog_encoder.parameters()) if self.with_ecog else []) + (list(self.decoder_mel.parameters()) if self.do_mel_guide else []) for p, p_other in zip(params, other_param): p.data.lerp_(p_other.data, 1.0 - betta) - +# \ No newline at end of file diff --git a/net_formant.py b/net_formant.py index 9aa43e77..0fa4e58c 100644 --- a/net_formant.py +++ b/net_formant.py @@ -1,5 +1,8 @@ import os +import pdb +from random import triangular import torch +import torchaudio from torch import nn from torch.nn import functional as F from torch.nn import Parameter as P @@ -13,8 +16,39 @@ from transformer_models.transformer import Transformer as TransformerTS from transformer_models.transformer_nonlocal import Transformer as TransformerNL -# def mel_scale(n_mels,hz,min_octave=-31.,max_octave=95.): -def mel_scale(n_mels,hz,min_octave=-58.,max_octave=100.,pt=True): +def db(x,noise = -80, slope =35, powerdb=True): + if powerdb: + return ((2*torchaudio.transforms.AmplitudeToDB()(x)).clamp(min=noise)-slope-noise)/slope + else: + return ((torchaudio.transforms.AmplitudeToDB()(x)).clamp(min=noise)-slope-noise)/slope + +# def amplitude(x,noise=-80,slope=35): +# return 10**((x*slope+noise+slope)/20.) + +def amplitude(x,noise_db=-60,max_db=35,trim_noise=False): + if trim_noise: + x_db = (x+1)/2*(max_db-noise_db)+noise_db + if type(x) is np.ndarray: + return 10**(x_db/10)*(np.sign(x_db-noise_db)*0.5+0.5) + else: + return 10**(x_db/10)*((x_db-noise_db).sign()*0.5+0.5) + else: + return 10**(((x+1)/2*(max_db-noise_db)+noise_db)/10) + +def to_db(x,noise_db=-60,max_db=35): + return (torchaudio.transforms.AmplitudeToDB()(x)-noise_db)/(max_db-noise_db)*2-1 + +def wave2spec(wave,n_fft=256,wave_fr=16000,spec_fr=125,noise_db=-60,max_db=22.5,to_db=True,power=2): +# def wave2spec(wave,n_fft=256,wave_fr=16000,spec_fr=125,noise_db=-50,max_db=22.5,to_db=True): + if to_db: + return (torchaudio.transforms.AmplitudeToDB()(torchaudio.transforms.Spectrogram(n_fft*2-1,win_length=n_fft*2-1,hop_length=int(wave_fr/spec_fr),power=power)(wave)).clamp(min=noise_db,max=max_db).transpose(-2,-1)-noise_db)/(max_db-noise_db)*2-1 + else: + return torchaudio.transforms.Spectrogram(n_fft*2-1,win_length=n_fft*2-1,hop_length=int(wave_fr/spec_fr),power=power)(wave).transpose(-2,-1) + + +# def mel_scale(n_mels,hz,min_octave=-31.,max_octave=95.,pt=True): +# def mel_scale(n_mels,hz,min_octave=-58.,max_octave=100.,pt=True): +def mel_scale(n_mels,hz,min_octave=-31.,max_octave=102.,pt=True): #take absolute hz, return abs mel # return (torch.log2(hz/440)+31/24)*24*n_mels/126 if pt: @@ -23,25 +57,79 @@ def mel_scale(n_mels,hz,min_octave=-58.,max_octave=100.,pt=True): return (np.log2(hz/440.)-min_octave/24.)*24*n_mels/(max_octave-min_octave) # def inverse_mel_scale(mel,min_octave=-31.,max_octave=95.): -def inverse_mel_scale(mel,min_octave=-58.,max_octave=100.): +# def inverse_mel_scale(mel,min_octave=-58.,max_octave=100.): +def inverse_mel_scale(mel,min_octave=-31.,max_octave=102.): #take normalized mel, return absolute hz # return 440*2**(mel*126/24-31/24) return 440*2**(mel*(max_octave-min_octave)/24.+min_octave/24.) +# def mel_scale(n_mels,hz,f_min=160.,f_max=8000.,pt=True): +# #take absolute hz, return abs mel +# # return (torch.log2(hz/440)+31/24)*24*n_mels/126 +# m_min = 2595.0 * np.log10(1.0 + (f_min / 700.0)) +# m_max = 2595.0 * np.log10(1.0 + (f_max / 700.0)) +# m_min_ = m_min + (m_max-m_min)/(n_mels+1) +# m_max_ = m_max +# if pt: +# return (2595.0 * torch.log10(1.0 + (hz / 700.0))-m_min_)/(m_max_-m_min_)*n_mels +# else: +# return (2595.0 * np.log10(1.0 + (hz / 700.0))-m_min_)/(m_max_-m_min_)*n_mels + +# def inverse_mel_scale(mel,f_min=160.,f_max=8000.,n_mels=64): +# #take normalized mel, return absolute hz +# # return 440*2**(mel*126/24-31/24) +# m_min = 2595.0 * np.log10(1.0 + (f_min / 700.0)) +# m_max = 2595.0 * np.log10(1.0 + (f_max / 700.0)) +# m_min_ = m_min + (m_max-m_min)/(n_mels+1) +# m_max_ = m_max +# return 700.0 * (10**((mel*(m_max_-m_min_) + m_min_)/ 2595.0) - 1.0) + +def ind2hz(ind,n_fft,max_freq=8000.): + #input abs ind, output abs hz + return ind/(1.0*n_fft)*max_freq + +def hz2ind(hz,n_fft,max_freq=8000.): + # input abs hz, output abs ind + return hz/(1.0*max_freq)*n_fft + def bandwidth_mel(freqs_hz,bandwidth_hz,n_mels): - # input hz bandwidth, output abs bandwidth on mel - bandwidth_upper = freqs_hz+bandwidth_hz/2. - bandwidth_lower = torch.clamp(freqs_hz-bandwidth_hz/2.,min=1) - bandwidth = mel_scale(n_mels,bandwidth_upper) - mel_scale(n_mels,bandwidth_lower) - return bandwidth + # input hz bandwidth, output abs bandwidth on mel + bandwidth_upper = freqs_hz+bandwidth_hz/2. + bandwidth_lower = torch.clamp(freqs_hz-bandwidth_hz/2.,min=1) + bandwidth = mel_scale(n_mels,bandwidth_upper) - mel_scale(n_mels,bandwidth_lower) + return bandwidth +def torch_P2R(radii, angles): + return radii * torch.cos(angles),radii * torch.sin(angles) +def inverse_spec_to_audio(spec,n_fft = 511,win_length = 511,hop_length = 128,power_synth=True): + ''' + generate random phase, then use istft to inverse spec to audio + ''' + window = torch.hann_window(win_length) + angles = torch.randn_like(spec).uniform_(0, np.pi*2)#torch.zeros_like(spec)#torch.randn_like(spec).uniform_(0, np.pi*2) + spec = spec**0.5 if power_synth else spec + spec_complex = torch.stack(torch_P2R(spec, angles),dim=-1) #real and image in same dim + return torchaudio.functional.istft(spec_complex, n_fft=n_fft, window=window, center=True, win_length=win_length, hop_length=hop_length) @GENERATORS.register("GeneratorFormant") class FormantSysth(nn.Module): - def __init__(self, n_mels=64, k=30): + def __init__(self, n_mels=64, k=100, wavebased=False,n_fft=256,noise_db=-50,max_db=22.5,dbbased=False,add_bgnoise=True,log10=False,noise_from_data=False,return_wave=False,power_synth=False): super(FormantSysth, self).__init__() + self.wave_fr = 16e3 + self.spec_fr = 125 + self.n_fft = n_fft + self.noise_db=noise_db + self.max_db = max_db self.n_mels = n_mels self.k = k + self.dbbased=dbbased + self.log10 = log10 + self.add_bgnoise = add_bgnoise + self.wavebased=wavebased + self.noise_from_data = noise_from_data + self.linear_scale = wavebased + self.return_wave = return_wave + self.power_synth = power_synth self.timbre = Parameter(torch.Tensor(1,1,n_mels)) self.timbre_mapping = nn.Sequential( ln.Conv1d(1,128,1), @@ -49,73 +137,206 @@ def __init__(self, n_mels=64, k=30): ln.Conv1d(128,128,1), nn.LeakyReLU(0.2), ln.Conv1d(128,2,1), - nn.Sigmoid(), + # nn.Sigmoid(), + ) + self.bgnoise_mapping = nn.Sequential( + ln.Conv2d(2,2,[1,5],padding=[0,2],gain=1,bias=False), + # nn.Sigmoid(), + ) + self.noise_mapping = nn.Sequential( + ln.Conv2d(2,2,[1,5],padding=[0,2],gain=1,bias=False), + # nn.Sigmoid(), + ) + + self.bgnoise_mapping2 = nn.Sequential( + ln.Conv1d(1,128,1,1), + nn.LeakyReLU(0.2), + ln.Conv1d(128,128,1,1), + nn.LeakyReLU(0.2), + ln.Conv1d(128,1,1,gain=1,bias=False), + # nn.Sigmoid(), + ) + self.noise_mapping2 = nn.Sequential( + ln.Conv1d(1,128,1,1), + nn.LeakyReLU(0.2), + ln.Conv1d(128,128,1,1), + nn.LeakyReLU(0.2), + ln.Conv1d(128,1,1,1,gain=1,bias=False), + # nn.Sigmoid(), ) self.prior_exp = np.array([0.4963,0.0745,1.9018]) self.timbre_parameter = Parameter(torch.Tensor(2)) + self.wave_noise_amplifier = Parameter(torch.Tensor(1)) + self.wave_hamon_amplifier = Parameter(torch.Tensor(1)) + + if noise_from_data: + self.bgnoise_amp = Parameter(torch.Tensor(1)) + with torch.no_grad(): + nn.init.constant_(self.bgnoise_amp,1) + else: + self.bgnoise_dist = Parameter(torch.Tensor(1,1,1,self.n_fft if self.wavebased else self.n_mels)) + with torch.no_grad(): + nn.init.constant_(self.bgnoise_dist,1.0) # self.silient = Parameter(torch.Tensor(1,1,n_mels)) self.silient = -1 with torch.no_grad(): nn.init.constant_(self.timbre,1.0) nn.init.constant_(self.timbre_parameter[0],7) nn.init.constant_(self.timbre_parameter[1],0.004) + nn.init.constant_(self.wave_noise_amplifier,1) + nn.init.constant_(self.wave_hamon_amplifier,4.) + # nn.init.constant_(self.silient,-1.0) - def formant_mask(self,freq,bandwith,amplitude): - # freq, bandwith, amplitude: B*formants*time - freq_cord = torch.arange(self.n_mels) - time_cord = torch.arange(freq.shape[2]) - grid_time,grid_freq = torch.meshgrid(time_cord,freq_cord) - grid_time = grid_time.unsqueeze(dim=0).unsqueeze(dim=-1) #B,time,freq, 1 - grid_freq = grid_freq.unsqueeze(dim=0).unsqueeze(dim=-1) #B,time,freq, 1 - freq = freq.permute([0,2,1]).unsqueeze(dim=-2) #B,time,1, formants - bandwith = bandwith.permute([0,2,1]).unsqueeze(dim=-2) #B,time,1, formants - amplitude = amplitude.permute([0,2,1]).unsqueeze(dim=-2) #B,time,1, formants - # masks = amplitude*torch.exp(-0.693*(grid_freq-freq)**2/(2*(bandwith+0.001)**2)) #B,time,freqchans, formants - masks = amplitude*torch.exp(-(grid_freq-freq)**2/(2*(bandwith/np.sqrt(2*np.log(2))+0.001)**2)) #B,time,freqchans, formants - masks = masks.unsqueeze(dim=1) #B,1,time,freqchans, formants - return masks +# def formant_mask(self,freq,bandwith,amplitude): +# # freq, bandwith, amplitude: B*formants*time +# freq_cord = torch.arange(self.n_mels) +# time_cord = torch.arange(freq.shape[2]) +# grid_time,grid_freq = torch.meshgrid(time_cord,freq_cord) +# grid_time = grid_time.unsqueeze(dim=0).unsqueeze(dim=-1) #B,time,freq, 1 +# grid_freq = grid_freq.unsqueeze(dim=0).unsqueeze(dim=-1) #B,time,freq, 1 +# freq = freq.permute([0,2,1]).unsqueeze(dim=-2) #B,time,1, formants +# bandwith = bandwith.permute([0,2,1]).unsqueeze(dim=-2) #B,time,1, formants +# amplitude = amplitude.permute([0,2,1]).unsqueeze(dim=-2) #B,time,1, formants +# # masks = amplitude*torch.exp(-0.693*(grid_freq-freq)**2/(2*(bandwith+0.001)**2)) #B,time,freqchans, formants +# masks = amplitude*torch.exp(-(grid_freq-freq)**2/(2*(bandwith/np.sqrt(2*np.log(2))+0.001)**2)) #B,time,freqchans, formants +# masks = masks.unsqueeze(dim=1) #B,1,time,freqchans, formants +# return masks - def formant_mask_hz2mel(self,freq_hz,bandwith_hz,amplitude): + def formant_mask(self,freq_hz,bandwith_hz,amplitude,linear=False, triangle_mask = False,duomask=True, n_formant_noise=1,f0_hz=None): # freq, bandwith, amplitude: B*formants*time - freq_cord = torch.arange(self.n_mels) + freq_cord = torch.arange(self.n_fft if linear else self.n_mels) time_cord = torch.arange(freq_hz.shape[2]) grid_time,grid_freq = torch.meshgrid(time_cord,freq_cord) grid_time = grid_time.unsqueeze(dim=0).unsqueeze(dim=-1) #B,time,freq, 1 grid_freq = grid_freq.unsqueeze(dim=0).unsqueeze(dim=-1) #B,time,freq, 1 - grid_freq_hz = inverse_mel_scale(grid_freq/(self.n_mels*1.0)) + grid_freq_hz = ind2hz(grid_freq,self.n_fft,self.wave_fr/2) if linear else inverse_mel_scale(grid_freq/(self.n_mels*1.0)) freq_hz = freq_hz.permute([0,2,1]).unsqueeze(dim=-2) #B,time,1, formants bandwith_hz = bandwith_hz.permute([0,2,1]).unsqueeze(dim=-2) #B,time,1, formants amplitude = amplitude.permute([0,2,1]).unsqueeze(dim=-2) #B,time,1, formants - masks = amplitude*torch.exp(-0.693*(grid_freq_hz-freq_hz)**2/(2*(bandwith_hz+0.01)**2)) #B,time,freqchans, formants + if self.power_synth: + amplitude = amplitude + alpha = (2*np.sqrt(2*np.log(np.sqrt(2)))) + if self.return_wave: + t = torch.arange(int(f0_hz.shape[2]/self.spec_fr*self.wave_fr))/(1.0*self.wave_fr) #in second + t = t.unsqueeze(dim=0).unsqueeze(dim=0) #1, 1, time + k = (torch.arange(self.k)+1).reshape([1,self.k,1]) + # f0_hz_interp = F.interpolate(f0_hz,t.shape[-1],mode='linear',align_corners=False) #Bx1xT + # bandwith_hz_interp = F.interpolate(bandwith_hz.permute(0,2,3,1),[bandwith_hz.shape[-1],t.shape[-1]],mode='bilinear',align_corners=False).permute(0,3,1,2) #Bx1xT + # freq_hz_interp = F.interpolate(freq_hz.permute(0,2,3,1),[freq_hz.shape[-1],t.shape[-1]],mode='bilinear',align_corners=False).permute(0,3,1,2) #Bx1xT + k_f0 = k*f0_hz #BxkxT + k_f0 = k_f0.permute([0,2,1]).unsqueeze(-1) #BxTxkx1 + # masks = amplitude*torch.exp(-((grid_freq_hz-freq_hz))**2/(2*(bandwith_hz/alpha+0.01)**2)) if self.wavebased else amplitude*torch.exp(-(0.693*(grid_freq_hz-freq_hz))**2/(2*(bandwith_hz+0.01)**2)) #B,time,freqchans, formants + # amplitude_interp = F.interpolate(amplitude.permute(0,2,3,1),[amplitude.shape[-1],t.shape[-1]],mode='bilinear',align_corners=False).permute(0,3,1,2) #Bx1xT + hamonic_dist = (amplitude*torch.exp(-((k_f0-freq_hz))**2/(2*(bandwith_hz/alpha+0.01)**2))).sqrt().sum(-1).permute([0,2,1]) #BxkxT + # hamonic_dist = (amplitude*torch.exp(-((k_f0-freq_hz))**2/(2*(bandwith_hz/alpha+0.01)**2))).sum(-1).permute([0,2,1]) #BxkxT + hamonic_dist = F.interpolate(hamonic_dist,int(f0_hz.shape[2]/self.spec_fr*self.wave_fr),mode = 'linear',align_corners=False) + # if self.wavebased: + if triangle_mask: + if duomask: + # masks_hamon = amplitude[...,:-n_formant_noise]*torch.exp(-(0.693*(grid_freq_hz-freq_hz[...,:-n_formant_noise]))**2/(2*(bandwith_hz[...,:-n_formant_noise]+0.01)**2)) + masks_hamon = amplitude[...,:-n_formant_noise]*torch.exp(-((grid_freq_hz-freq_hz[...,:-n_formant_noise]))**2/(2*(bandwith_hz[...,:-n_formant_noise]/alpha+0.01)**2)) + bw = bandwith_hz[...,-n_formant_noise:] + masks_noise = F.relu(amplitude[...,-n_formant_noise:] * (1 - (1-1/np.sqrt(2))*2/(bw+0.01)*(grid_freq_hz-freq_hz[...,-n_formant_noise:]).abs())) + # masks_noise = amplitude[...,-n_formant_noise:] * (1 - 2/(bw+0.01)*(grid_freq_hz-freq_hz[...,-n_formant_noise:]).abs())*(-torch.sign(torch.abs(grid_freq_hz-freq_hz[...,-n_formant_noise:])/(bw+0.01)-0.5)*0.5+0.5) + masks = torch.cat([masks_hamon,masks_noise],dim=-1) + else: + masks = F.relu(amplitude * (1 - (1-1/np.sqrt(2))*2/(bandwith_hz+0.01)*(grid_freq_hz-freq_hz).abs())) + else: + # masks = amplitude*torch.exp(-(0.693*(grid_freq_hz-freq_hz))**2/(2*(bandwith_hz+0.01)**2)) + if self.power_synth: + masks = amplitude*torch.exp(-((grid_freq_hz-freq_hz))**2/(2*(bandwith_hz/alpha+0.01)**2)) + else: + # masks = amplitude*torch.exp(-((grid_freq_hz-freq_hz))**2/(2*(bandwith_hz/alpha+0.01)**2)) + masks = amplitude*(torch.exp(-((grid_freq_hz-freq_hz))**2/(2*(bandwith_hz/alpha+0.01)**2))+1E-6).sqrt() + # else: + # if triangle_mask: + # if duomask: + # # masks_hamon = amplitude[...,:-n_formant_noise]*torch.exp(-(0.693*(grid_freq_hz-freq_hz[...,:-n_formant_noise]))**2/(2*(bandwith_hz[...,:-n_formant_noise]+0.01)**2)) + # masks_hamon = amplitude[...,:-n_formant_noise]*torch.exp(-((grid_freq_hz-freq_hz[...,:-n_formant_noise]))**2/(2*(bandwith_hz[...,:-n_formant_noise]/alpha+0.01)**2)) + # bw = bandwith_hz[...,-n_formant_noise:] + # masks_noise = F.relu(amplitude[...,-n_formant_noise:] * (1 - (1-1/np.sqrt(2))*2/(bw+0.01)*(grid_freq_hz-freq_hz[...,-n_formant_noise:]).abs())) + # # masks_noise = amplitude[...,-n_formant_noise:] * (1 - 2/(bw+0.01)*(grid_freq_hz-freq_hz[...,-n_formant_noise:]).abs())*(-torch.sign(torch.abs(grid_freq_hz-freq_hz[...,-n_formant_noise:])/(bw+0.01)-0.5)*0.5+0.5) + # masks = torch.cat([masks_hamon,masks_noise],dim=-1) + # else: + # masks = F.relu(amplitude * (1 - (1-1/np.sqrt(2))*2/(bandwith_hz+0.01)*(grid_freq_hz-freq_hz).abs())) + # # masks = amplitude * (1 - 2/(bandwith_hz+0.01)*(grid_freq_hz-freq_hz).abs())*(-torch.sign(torch.abs(grid_freq_hz-freq_hz)/(bandwith_hz+0.01)-0.5)*0.5+0.5) + # else: + # # masks = amplitude*torch.exp(-(0.693*(grid_freq_hz-freq_hz))**2/(2*(bandwith_hz+0.01)**2)) + # masks = amplitude*torch.exp(-((grid_freq_hz-freq_hz))**2/(2*(bandwith_hz/alpha+0.01)**2)) + # masks = amplitude*torch.exp(-((grid_freq_hz-freq_hz))**2/(2*(bandwith_hz/(2*np.sqrt(2*np.log(2)))+0.01)**2)) #B,time,freqchans, formants + # masks = amplitude*torch.exp(-(0.693*(grid_freq_hz-freq_hz))**2/(2*(bandwith_hz+0.01)**2)) #B,time,freqchans, formants masks = masks.unsqueeze(dim=1) #B,1,time,freqchans, formants - return masks + if self.return_wave: + return masks, hamonic_dist#B,1,time,freqchans + else: + return masks - def voicing(self,f0_hz): + def voicing_wavebased(self,f0_hz): #f0: B*1*time, hz - freq_cord = torch.arange(self.n_mels) + t = torch.arange(int(f0_hz.shape[2]/self.spec_fr*self.wave_fr))/(1.0*self.wave_fr) #in second + t = t.unsqueeze(dim=0).unsqueeze(dim=0) #1, 1, time + k = (torch.arange(self.k)+1).reshape([1,self.k,1]) + f0_hz_interp = F.interpolate(f0_hz,t.shape[-1],mode='linear',align_corners=False) + k_f0 = k*f0_hz_interp + k_f0_sum = 2*np.pi*torch.cumsum(k_f0,-1)/(1.0*self.wave_fr) + wave_k = np.sqrt(2)*torch.sin(k_f0_sum) * (-torch.sign(k_f0-7800)*0.5+0.5) + # wave = 0.12*torch.sin(2*np.pi*k_f0*t) * (-torch.sign(k_f0-6000)*0.5+0.5) + # wave = 0.09*torch.sin(2*np.pi*k_f0*t) * (-torch.sign(k_f0-self.wave_fr/2)*0.5+0.5) + # wave = 0.09*torch.sigmoid(self.wave_hamon_amplifier) * torch.sin(2*np.pi*k_f0*t) * (-torch.sign(k_f0-self.wave_fr/2)*0.5+0.5) + wave = wave_k.sum(dim=1,keepdim=True) + # wave = F.softplus(self.wave_hamon_amplifier) * wave.sum(dim=1,keepdim=True) + spec = wave2spec(wave,self.n_fft,self.wave_fr,self.spec_fr,self.noise_db,self.max_db,to_db=self.dbbased,power=2. if self.power_synth else 1.) + if self.return_wave: + return spec,wave_k + else: + return spec + + def unvoicing_wavebased(self,f0_hz,bg=False,mapping=True): + # return torch.ones([1,1,f0_hz.shape[2],512]) + # noise = 0.3*torch.randn([1,1,int(f0_hz.shape[2]/self.spec_fr*self.wave_fr)]) + # noise = 0.03*torch.randn([1,1,int(f0_hz.shape[2]/self.spec_fr*self.wave_fr)]) + if bg: + noise = torch.randn([1,1,int(f0_hz.shape[2]/self.spec_fr*self.wave_fr)]) + if mapping: + noise = self.bgnoise_mapping2(noise) + else: + noise = np.sqrt(3.)*(2*torch.rand([1,1,int(f0_hz.shape[2]/self.spec_fr*self.wave_fr)])-1) + if mapping: + noise = self.noise_mapping2(noise) + # noise = 0.3 * torch.randn([1,1,int(f0_hz.shape[2]/self.spec_fr*self.wave_fr)]) + # noise = 0.3 * F.softplus(self.wave_noise_amplifier) * torch.randn([1,1,int(f0_hz.shape[2]/self.spec_fr*self.wave_fr)]) + return wave2spec(noise,self.n_fft,self.wave_fr,self.spec_fr,self.noise_db,self.max_db,to_db=self.dbbased,power=2. if self.power_synth else 1.) + + # def unvoicing_wavebased(self,f0_hz): + # # return torch.ones([1,1,f0_hz.shape[2],512]) + # # noise = 0.3*torch.randn([1,1,int(f0_hz.shape[2]/self.spec_fr*self.wave_fr)]) + # noise = 0.1*torch.randn([1,1,int(f0_hz.shape[2]/self.spec_fr*self.wave_fr)]) + # # noise = 0.3 * torch.sigmoid(self.wave_noise_amplifier) * torch.randn([1,1,int(f0_hz.shape[2]/self.spec_fr*self.wave_fr)]) + # return wave2spec(noise,self.n_fft,self.wave_fr,self.spec_fr,self.noise_db,self.max_db,to_db=False) + + def voicing_linear(self,f0_hz,bandwith=2.5): + #f0: B*1*time, hz + freq_cord = torch.arange(self.n_fft) time_cord = torch.arange(f0_hz.shape[2]) grid_time,grid_freq = torch.meshgrid(time_cord,freq_cord) grid_time = grid_time.unsqueeze(dim=0).unsqueeze(dim=-1) #B,time,freq, 1 grid_freq = grid_freq.unsqueeze(dim=0).unsqueeze(dim=-1) #B,time,freq, 1 - grid_freq_hz = inverse_mel_scale(grid_freq/(self.n_mels*1.0)) f0_hz = f0_hz.permute([0,2,1]).unsqueeze(dim=-2) #B,time,1, 1 f0_hz = f0_hz.repeat([1,1,1,self.k]) #B,time,1, self.k f0_hz = f0_hz*(torch.arange(self.k)+1).reshape([1,1,1,self.k]) - bandwith_hz = 24.7*(f0_hz*4.37/1000+1) - bandwith = bandwidth_mel(f0_hz,bandwith_hz,self.n_mels) + # bandwith=4 # bandwith_lower = torch.clamp(f0-bandwith/2,min=1) # bandwith_upper = f0+bandwith/2 # bandwith = mel_scale(self.n_mels,bandwith_upper) - mel_scale(self.n_mels,bandwith_lower) - f0 = mel_scale(self.n_mels,f0_hz) - # sigma = bandwith/(np.sqrt(2*np.log(2))); - sigma = bandwith/(2*np.sqrt(2*np.log(2))); + f0 = hz2ind(f0_hz,self.n_fft) + # hamonics = torch.exp(-(grid_freq-f0)**2/(2*sigma**2)) #gaussian # hamonics = (1-((grid_freq_hz-f0_hz)/(2*bandwith_hz/2))**2)*(-torch.sign(torch.abs(grid_freq_hz-f0_hz)/(2*bandwith_hz)-0.5)*0.5+0.5) #welch - switch = mel_scale(self.n_mels,torch.abs(self.timbre_parameter[0])*f0_hz[...,0]).unsqueeze(1) - slop = (torch.abs(self.timbre_parameter[1])*f0_hz[...,0]).unsqueeze(1) - freq_cord_reshape = freq_cord.reshape([1,1,1,self.n_mels]) - hamonics = (1-((grid_freq-f0)/(2.5*bandwith/2))**2)*(-torch.sign(torch.abs(grid_freq-f0)/(2.5*bandwith)-0.5)*0.5+0.5) #welch + freq_cord_reshape = freq_cord.reshape([1,1,1,self.n_fft]) + hamonics = (1 - 2/bandwith*(grid_freq-f0).abs())*(-torch.sign(torch.abs(grid_freq-f0)/(bandwith)-0.5)*0.5+0.5) #triangular + # hamonics = (1-((grid_freq-f0)/(bandwith/2))**2)*(-torch.sign(torch.abs(grid_freq-f0)/(bandwith)-0.5)*0.5+0.5) #welch + # hamonics = (1-((grid_freq-f0)/(2.5*bandwith/2))**2)*(-torch.sign(torch.abs(grid_freq-f0)/(2.5*bandwith)-0.5)*0.5+0.5) #welch # hamonics = (1-((grid_freq-f0)/(3*bandwith/2))**2)*(-torch.sign(torch.abs(grid_freq-f0)/(3*bandwith)-0.5)*0.5+0.5) #welch # hamonics = torch.cos(np.pi*torch.abs(grid_freq-f0)/(4*bandwith))**2*(-torch.sign(torch.abs(grid_freq-f0)/(4*bandwith)-0.5)*0.5+0.5) #hanning # hamonics = (hamonics.sum(dim=-1)).unsqueeze(dim=1) # B,1,T,F @@ -123,74 +344,246 @@ def voicing(self,f0_hz): # hamonics = ((hamonics.sum(dim=-1)).unsqueeze(dim=1)) * (1+ (torch.exp(-slop*(freq_cord_reshape-switch)*condition)-1)*condition) # B,1,T,F # hamonics = ((hamonics.sum(dim=-1)).unsqueeze(dim=1)-torch.abs(self.prior_exp_parameter[2])) * torch.exp(-torch.abs(self.prior_exp_parameter[1])*freq_cord.reshape([1,1,1,self.n_mels])) + torch.abs(self.prior_exp_parameter[2]) # B,1,T,F + # timbre_parameter = self.timbre_mapping(f0_hz[...,0,0].unsqueeze(1)).permute([0,2,1]).unsqueeze(1) + # condition = (torch.sign(freq_cord_reshape-torch.sigmoid(timbre_parameter[...,0:1])*self.n_mels)*0.5+0.5) + # hamonics = ((hamonics.sum(dim=-1)).unsqueeze(dim=1)) * (1+ (torch.exp(-0.01*torch.sigmoid(timbre_parameter[...,1:2])*(freq_cord_reshape-torch.sigmoid(timbre_parameter[...,0:1])*self.n_mels)*condition)-1)*condition) # B,1,T,F + # hamonics = ((hamonics.sum(dim=-1)).unsqueeze(dim=1)) * (1+ (torch.exp(-0.01*torch.sigmoid(timbre_parameter[...,1:2])*(freq_cord_reshape-torch.sigmoid(timbre_parameter[...,0:1])*self.n_mels)*condition)-1)*condition) * F.softplus(timbre_parameter[...,2:3]) + timbre_parameter[...,3:4] # B,1,T,F + # timbre = self.timbre_mapping(f0_hz[...,0,0].unsqueeze(1)).permute([0,2,1]) + # hamonics = (hamonics.sum(dim=-1)*timbre).unsqueeze(dim=1) # B,1,T,F + # hamonics = (hamonics.sum(dim=-1)*self.timbre).unsqueeze(dim=1) # B,1,T,F + + hamonics = (hamonics.sum(dim=-1)).unsqueeze(dim=1) + # hamonics = 180*F.softplus(self.wave_hamon_amplifier)*(hamonics.sum(dim=-1)).unsqueeze(dim=1) + + return hamonics + + def voicing(self,f0_hz): + #f0: B*1*time, hz + freq_cord = torch.arange(self.n_mels) + time_cord = torch.arange(f0_hz.shape[2]) + grid_time,grid_freq = torch.meshgrid(time_cord,freq_cord) + grid_time = grid_time.unsqueeze(dim=0).unsqueeze(dim=-1) #B,time,freq, 1 + grid_freq = grid_freq.unsqueeze(dim=0).unsqueeze(dim=-1) #B,time,freq, 1 + grid_freq_hz = inverse_mel_scale(grid_freq/(self.n_mels*1.0)) + f0_hz = f0_hz.permute([0,2,1]).unsqueeze(dim=-2) #B,time,1, 1 + f0_hz = f0_hz.repeat([1,1,1,self.k]) #B,time,1, self.k + f0_hz = f0_hz*(torch.arange(self.k)+1).reshape([1,1,1,self.k]) + if self.log10: + f0_mel = mel_scale(self.n_mels,f0_hz) + band_low_hz = inverse_mel_scale((f0_mel-1)/(self.n_mels*1.0),n_mels = self.n_mels) + band_up_hz = inverse_mel_scale((f0_mel+1)/(self.n_mels*1.0),n_mels = self.n_mels) + bandwith_hz = band_up_hz-band_low_hz + band_low_mel = mel_scale(self.n_mels,band_low_hz) + band_up_mel = mel_scale(self.n_mels,band_up_hz) + bandwith = band_up_mel-band_low_mel + else: + bandwith_hz = 24.7*(f0_hz*4.37/1000+1) + bandwith = bandwidth_mel(f0_hz,bandwith_hz,self.n_mels) + # bandwith_lower = torch.clamp(f0-bandwith/2,min=1) + # bandwith_upper = f0+bandwith/2 + # bandwith = mel_scale(self.n_mels,bandwith_upper) - mel_scale(self.n_mels,bandwith_lower) + f0 = mel_scale(self.n_mels,f0_hz) + switch = mel_scale(self.n_mels,torch.abs(self.timbre_parameter[0])*f0_hz[...,0]).unsqueeze(1) + slop = (torch.abs(self.timbre_parameter[1])*f0_hz[...,0]).unsqueeze(1) + freq_cord_reshape = freq_cord.reshape([1,1,1,self.n_mels]) + if not self.dbbased: + # sigma = bandwith/(np.sqrt(2*np.log(2))); + sigma = bandwith/(2*np.sqrt(2*np.log(2))); + hamonics = torch.exp(-(grid_freq-f0)**2/(2*sigma**2)) #gaussian + # hamonics = (1-((grid_freq_hz-f0_hz)/(2*bandwith_hz/2))**2)*(-torch.sign(torch.abs(grid_freq_hz-f0_hz)/(2*bandwith_hz)-0.5)*0.5+0.5) #welch + else: + # # hamonics = (1-((grid_freq-f0)/(1.75*bandwith/2))**2)*(-torch.sign(torch.abs(grid_freq-f0)/(1.75*bandwith)-0.5)*0.5+0.5) #welch + hamonics = (1-((grid_freq-f0)/(2.5*bandwith/2))**2)*(-torch.sign(torch.abs(grid_freq-f0)/(2.5*bandwith)-0.5)*0.5+0.5) #welch + # hamonics = (1-((grid_freq-f0)/(3*bandwith/2))**2)*(-torch.sign(torch.abs(grid_freq-f0)/(3*bandwith)-0.5)*0.5+0.5) #welch + # hamonics = torch.cos(np.pi*torch.abs(grid_freq-f0)/(4*bandwith))**2*(-torch.sign(torch.abs(grid_freq-f0)/(4*bandwith)-0.5)*0.5+0.5) #hanning + # hamonics = (hamonics.sum(dim=-1)).unsqueeze(dim=1) # B,1,T,F + # condition = (torch.sign(freq_cord_reshape-switch)*0.5+0.5) + # hamonics = ((hamonics.sum(dim=-1)).unsqueeze(dim=1)) * (1+ (torch.exp(-slop*(freq_cord_reshape-switch)*condition)-1)*condition) # B,1,T,F + # hamonics = ((hamonics.sum(dim=-1)).unsqueeze(dim=1)-torch.abs(self.prior_exp_parameter[2])) * torch.exp(-torch.abs(self.prior_exp_parameter[1])*freq_cord.reshape([1,1,1,self.n_mels])) + torch.abs(self.prior_exp_parameter[2]) # B,1,T,F + timbre_parameter = self.timbre_mapping(f0_hz[...,0,0].unsqueeze(1)).permute([0,2,1]).unsqueeze(1) - condition = (torch.sign(freq_cord_reshape-timbre_parameter[...,0:1]*self.n_mels)*0.5+0.5) - hamonics = ((hamonics.sum(dim=-1)).unsqueeze(dim=1)) * (1+ (torch.exp(-0.01*timbre_parameter[...,1:2]*(freq_cord_reshape-timbre_parameter[...,0:1]*self.n_mels)*condition)-1)*condition) # B,1,T,F + condition = (torch.sign(freq_cord_reshape-torch.sigmoid(timbre_parameter[...,0:1])*self.n_mels)*0.5+0.5) + amp = F.softplus(self.wave_hamon_amplifier) if self.dbbased else 180*F.softplus(self.wave_hamon_amplifier) + hamonics = amp * ((hamonics.sum(dim=-1)).unsqueeze(dim=1)) * (1+ (torch.exp(-0.01*torch.sigmoid(timbre_parameter[...,1:2])*(freq_cord_reshape-torch.sigmoid(timbre_parameter[...,0:1])*self.n_mels)*condition)-1)*condition) # B,1,T,F + # hamonics = ((hamonics.sum(dim=-1)).unsqueeze(dim=1)) * (1+ (torch.exp(-0.01*torch.sigmoid(timbre_parameter[...,1:2])*(freq_cord_reshape-torch.sigmoid(timbre_parameter[...,0:1])*self.n_mels)*condition)-1)*condition) * F.softplus(timbre_parameter[...,2:3]) + timbre_parameter[...,3:4] # B,1,T,F # timbre = self.timbre_mapping(f0_hz[...,0,0].unsqueeze(1)).permute([0,2,1]) # hamonics = (hamonics.sum(dim=-1)*timbre).unsqueeze(dim=1) # B,1,T,F # hamonics = (hamonics.sum(dim=-1)*self.timbre).unsqueeze(dim=1) # B,1,T,F + # return F.softplus(self.wave_hamon_amplifier)*hamonics return hamonics - def unvoicing(self,f0): - return torch.ones([f0.shape[0],1,f0.shape[2],self.n_mels]) + def unvoicing(self,f0,bg=False,mapping=True): + # return (0.25*torch.randn([f0.shape[0],1,f0.shape[2],self.n_mels]))+1 + rnd = torch.randn([f0.shape[0],2,f0.shape[2],self.n_fft if self.wavebased else self.n_mels]) + if mapping: + rnd = self.bgnoise_mapping(rnd) if bg else self.noise_mapping(rnd) + real = rnd[:,0:1] + img = rnd[:,1:2] + if self.dbbased: + return (2*torchaudio.transforms.AmplitudeToDB()(torch.sqrt(real**2 + img**2+1E-10))+80).clamp(min=0)/35 + # return (2*torchaudio.transforms.AmplitudeToDB()(F.softplus(self.wave_noise_amplifier)*torch.sqrt(torch.randn([f0.shape[0],1,f0.shape[2],self.n_mels])**2 + torch.randn([f0.shape[0],1,f0.shape[2],self.n_mels])**2))+80).clamp(min=0)/35 + else: + # return torch.ones([f0.shape[0],1,f0.shape[2],self.n_mels]) + return 180*F.softplus(self.wave_noise_amplifier) * torch.sqrt(real**2 + img**2+1E-10) + # return F.softplus(self.wave_noise_amplifier)*torch.sqrt(torch.randn([f0.shape[0],1,f0.shape[2],self.n_mels])**2 + torch.randn([f0.shape[0],1,f0.shape[2],self.n_mels])**2) - def forward(self,components): + # return (F.softplus(self.wave_noise_amplifier)) * (0.25*torch.randn([f0.shape[0],1,f0.shape[2],self.n_mels]))+1 + # return torch.ones([f0.shape[0],1,f0.shape[2],self.n_mels]) + + def forward(self,components,enable_hamon_excitation=True,enable_noise_excitation=True,enable_bgnoise=True): # f0: B*1*T, amplitudes: B*2(voicing,unvoicing)*T, freq_formants,bandwidth_formants,amplitude_formants: B*formants*T amplitudes = components['amplitudes'].unsqueeze(dim=-1) amplitudes_h = components['amplitudes_h'].unsqueeze(dim=-1) loudness = components['loudness'].unsqueeze(dim=-1) - f0_hz = inverse_mel_scale(components['f0']) + f0_hz = components['f0_hz'] # import pdb;pdb.set_trace() - self.hamonics = self.voicing(f0_hz) - self.noise = self.unvoicing(f0_hz) + if self.wavebased: + # self.hamonics = 1800*F.softplus(self.wave_hamon_amplifier)*self.voicing_linear(f0_hz) + # self.noise = 180*self.unvoicing(f0_hz,bg=False,mapping=False) + # self.bgnoise = 18*self.unvoicing(f0_hz,bg=True,mapping=False) + # import pdb;pdb.set_trace() + self.hamonics = self.voicing_wavebased(f0_hz) + self.noise = self.unvoicing_wavebased(f0_hz,bg=False,mapping=False) + self.bgnoise = self.unvoicing_wavebased(f0_hz,bg=True) + else: + self.hamonics = self.voicing(f0_hz) + self.noise = self.unvoicing(f0_hz,bg=False) + self.bgnoise = self.unvoicing(f0_hz,bg=True) # freq_formants = components['freq_formants']*self.n_mels # bandwidth_formants = components['bandwidth_formants']*self.n_mels # excitation = amplitudes[:,0:1]*hamonics # excitation = loudness*(amplitudes[:,0:1]*hamonics) - self.excitation_hamon = loudness*amplitudes[:,0:1]*(amplitudes_h[:,0:1]*self.hamonics + amplitudes_h[:,-1:]*self.noise) - # self.excitation_hamon = loudness*amplitudes[:,0:1]*self.hamonics - self.excitation_noise = loudness*amplitudes[:,-1:]*self.noise - self.mask_hamon = self.formant_mask_hz2mel(components['freq_formants_hamon_hz'],components['bandwidth_formants_hamon_hz'],components['amplitude_formants_hamon']) - self.mask_noise = self.formant_mask_hz2mel(components['freq_formants_noise_hz'],components['bandwidth_formants_noise_hz'],components['amplitude_formants_noise']) + # self.noise = self.noise + self.excitation_noise = loudness*(amplitudes[:,-1:])*self.noise if self.power_synth else loudness*amplitudes[:,-1:]*self.noise + duomask = components['freq_formants_noise_hz'].shape[1]>components['freq_formants_hamon_hz'].shape[1] + n_formant_noise = (components['freq_formants_noise_hz'].shape[1]-components['freq_formants_hamon_hz'].shape[1]) if duomask else components['freq_formants_noise_hz'].shape[1] + self.mask_hamon = self.formant_mask(components['freq_formants_hamon_hz'],components['bandwidth_formants_hamon_hz'],components['amplitude_formants_hamon'],linear = self.linear_scale,f0_hz = f0_hz) + self.mask_noise = self.formant_mask(components['freq_formants_noise_hz'],components['bandwidth_formants_noise_hz'],components['amplitude_formants_noise'],linear = self.linear_scale,triangle_mask=False if self.wavebased else True,duomask=duomask,n_formant_noise=n_formant_noise,f0_hz = f0_hz) # self.mask_hamon = self.formant_mask(components['freq_formants_hamon']*self.n_mels,components['bandwidth_formants_hamon'],components['amplitude_formants_hamon']) # self.mask_noise = self.formant_mask(components['freq_formants_noise']*self.n_mels,components['bandwidth_formants_noise'],components['amplitude_formants_noise']) + if self.return_wave: + self.hamonics,self.hamonics_wave = self.hamonics + self.mask_hamon, self.hamonic_dist = self.mask_hamon + self.mask_noise, self.mask_noise_only = self.mask_noise + if self.power_synth: + self.excitation_hamon_wave = F.interpolate((loudness[...,-1]*amplitudes[:,0:1][...,-1]).sqrt(),self.hamonics_wave.shape[-1],mode='linear',align_corners=False)*self.hamonics_wave + else: + self.excitation_hamon_wave = F.interpolate(loudness[...,-1]*amplitudes[:,0:1][...,-1],self.hamonics_wave.shape[-1],mode='linear',align_corners=False)*self.hamonics_wave + self.hamonics_wave_ = (self.excitation_hamon_wave*self.hamonic_dist).sum(1,keepdim=True) + self.mask_hamon_sum = self.mask_hamon.sum(dim=-1) self.mask_noise_sum = self.mask_noise.sum(dim=-1) - speech = self.excitation_hamon*self.mask_hamon_sum + self.excitation_noise*self.mask_noise_sum + self.silient*torch.ones(self.mask_hamon_sum.shape) - return speech + bgdist = F.softplus(self.bgnoise_amp)*self.noise_dist if self.noise_from_data else F.softplus(self.bgnoise_dist) + if self.power_synth: + self.excitation_hamon = loudness*(amplitudes[:,0:1])*self.hamonics + else: + self.excitation_hamon = loudness*amplitudes[:,0:1]*self.hamonics + # import pdb;pdb.set_trace() + self.noise_excitation = self.excitation_noise*self.mask_noise_sum + if self.return_wave: + self.noise_excitation_wave = 2*inverse_spec_to_audio(self.noise_excitation.squeeze(1).permute(0,2,1),n_fft=self.n_fft*2-1,power_synth=self.power_synth) + self.noise_excitation_wave = F.pad(self.noise_excitation_wave,[0,self.hamonics_wave_.shape[2]-self.noise_excitation_wave.shape[1]]) + self.noise_excitation_wave = self.noise_excitation_wave.unsqueeze(1) + self.rec_wave = self.noise_excitation_wave+self.hamonics_wave_ + if self.wavebased: + # import pdb; pdb.set_trace() + bgn = bgdist*self.bgnoise*0.0003 if (self.add_bgnoise and enable_bgnoise) else 0 + speech = ((self.excitation_hamon*self.mask_hamon_sum) if enable_hamon_excitation else torch.zeros(self.excitation_hamon.shape)) + (self.noise_excitation if enable_noise_excitation else 0) + bgn + # speech = ((self.excitation_hamon*self.mask_hamon_sum) if enable_hamon_excitation else torch.zeros(self.excitation_hamon.shape)) + (self.excitation_noise*self.mask_noise_sum if enable_noise_excitation else 0) + (((bgdist*self.bgnoise*0.0003) if not self.dbbased else (2*torchaudio.transforms.AmplitudeToDB()(bgdist*0.0003)/35. + self.bgnoise)) if (self.add_bgnoise and enable_bgnoise) else 0) + # speech = speech if self.power_synth else speech**2 + speech = (torchaudio.transforms.AmplitudeToDB()(speech).clamp(min=self.noise_db)-self.noise_db)/(self.max_db-self.noise_db)*2-1 + else: + # speech = ((self.excitation_hamon*self.mask_hamon_sum) if enable_hamon_excitation else torch.zeros(self.excitation_hamon.shape)) + (((bgdist*self.bgnoise*0.0003) if not self.dbbased else (2*torchaudio.transforms.AmplitudeToDB()(bgdist*0.0003)/35. + self.bgnoise)) if (self.add_bgnoise and enable_bgnoise) else 0) + (self.silient*torch.ones(self.mask_hamon_sum.shape) if self.dbbased else 0) + speech = ((self.excitation_hamon*self.mask_hamon_sum) if enable_hamon_excitation else torch.zeros(self.excitation_hamon.shape)) + (self.noise_excitation if enable_noise_excitation else 0) + (((bgdist*self.bgnoise*0.0003) if not self.dbbased else (2*torchaudio.transforms.AmplitudeToDB()(bgdist*0.0003)/35. + self.bgnoise)) if (self.add_bgnoise and enable_bgnoise) else 0) + (self.silient*torch.ones(self.mask_hamon_sum.shape) if self.dbbased else 0) + # speech = self.excitation_hamon*self.mask_hamon_sum + (self.excitation_noise*self.mask_noise_sum if enable_noise_excitation else 0) + self.silient*torch.ones(self.mask_hamon_sum.shape) + if not self.dbbased: + speech = db(speech) + + + # import pdb;pdb.set_trace() + if self.return_wave: + return speech,self.rec_wave + else: + return speech @ENCODERS.register("EncoderFormant") class FormantEncoder(nn.Module): - def __init__(self, n_mels=64, n_formants=4,min_octave=-31,max_octave=96): + def __init__(self, n_mels=64, n_formants=4,n_formants_noise=2,min_octave=-31,max_octave=96,wavebased=False,n_fft=256,noise_db=-50,max_db=22.5,broud=True,power_synth=False,hop_length=128): super(FormantEncoder, self).__init__() + self.wavebased = wavebased self.n_mels = n_mels self.n_formants = n_formants + self.n_formants_noise = n_formants_noise self.min_octave = min_octave self.max_octave = max_octave - + self.noise_db = noise_db + self.max_db = max_db + self.broud = broud + self.n_fft = n_fft + self.power_synth=power_synth self.formant_freq_limits_diff = torch.tensor([950.,2450.,2100.]).reshape([1,3,1]) #freq difference self.formant_freq_limits_diff_low = torch.tensor([300.,300.,0.]).reshape([1,3,1]) #freq difference + # self.formant_freq_limits_abs = torch.tensor([950.,2800.,3400.,4700.]).reshape([1,4,1]) #freq difference # self.formant_freq_limits_abs_low = torch.tensor([300.,600.,2800.,3400]).reshape([1,4,1]) #freq difference - self.formant_freq_limits_abs = torch.tensor([950.,3300.,3600.,4700.]).reshape([1,4,1]) #freq difference - self.formant_freq_limits_abs_low = torch.tensor([300.,600.,2700.,3400]).reshape([1,4,1]) #freq difference - self.formant_freq_limits_abs_noise = torch.tensor([7000.]).reshape([1,1,1]) #freq difference - self.formant_freq_limits_abs_noise_low = torch.tensor([4000.]).reshape([1,1,1]) #freq difference - self.formant_bandwitdh_ratio = Parameter(torch.Tensor(1)) + # self.formant_freq_limits_abs = torch.tensor([950.,3300.,3600.,4700.]).reshape([1,4,1]) #freq difference + # self.formant_freq_limits_abs_low = torch.tensor([300.,600.,2700.,3400]).reshape([1,4,1]) #freq difference + + self.formant_freq_limits_abs = torch.tensor([950.,3400.,3800.,5000.,6000.,7000.]).reshape([1,6,1]) #freq difference + self.formant_freq_limits_abs_low = torch.tensor([300.,700.,1800.,3400,5000.,6000.]).reshape([1,6,1]) #freq difference + # self.formant_freq_limits_abs_low = torch.tensor([300.,700.,2700.,3400]).reshape([1,4,1]) #freq difference + + # self.formant_freq_limits_abs_noise = torch.tensor([7000.]).reshape([1,1,1]) #freq difference + self.formant_freq_limits_abs_noise = torch.tensor([8000.,7000.,7000.]).reshape([1,3,1]) #freq difference + self.formant_freq_limits_abs_noise_low = torch.tensor([4000.,3000.,3000.]).reshape([1,3,1]) #freq difference + # self.formant_freq_limits_abs_noise_low = torch.tensor([4000.,500.,500.]).reshape([1,3,1]) #freq difference + + self.formant_bandwitdh_bias = Parameter(torch.Tensor(1)) self.formant_bandwitdh_slop = Parameter(torch.Tensor(1)) + self.formant_bandwitdh_thres = Parameter(torch.Tensor(1)) with torch.no_grad(): - nn.init.constant_(self.formant_bandwitdh_ratio,0) + nn.init.constant_(self.formant_bandwitdh_bias,0) nn.init.constant_(self.formant_bandwitdh_slop,0) + nn.init.constant_(self.formant_bandwitdh_thres,0) # self.formant_freq_limits = torch.cumsum(self.formant_freq_limits_diff,dim=0) # self.formant_freq_limits_mel = torch.cat([torch.tensor([0.]),mel_scale(n_mels,self.formant_freq_limits)/n_mels]) # self.formant_freq_limits_mel_diff = torch.reshape(self.formant_freq_limits_mel[1:]-self.formant_freq_limits_mel[:-1],[1,3,1]) + if broud: + if wavebased: + self.conv1_narrow = ln.Conv1d(n_fft,64,3,1,1) + self.conv1_mel = ln.Conv1d(128,64,3,1,1) + self.norm1_mel = nn.GroupNorm(32,64) + self.conv2_mel = ln.Conv1d(64,128,3,1,1) + self.norm2_mel = nn.GroupNorm(32,128) + self.conv_fundementals_mel = ln.Conv1d(128,128,3,1,1) + self.norm_fundementals_mel = nn.GroupNorm(32,128) + self.f0_drop_mel = nn.Dropout() + else: + self.conv1_narrow = ln.Conv1d(n_mels,64,3,1,1) + self.norm1_narrow = nn.GroupNorm(32,64) + self.conv2_narrow = ln.Conv1d(64,128,3,1,1) + self.norm2_narrow = nn.GroupNorm(32,128) - self.conv1 = ln.Conv1d(n_mels,64,3,1,1) + self.conv_fundementals_narrow = ln.Conv1d(128,128,3,1,1) + self.norm_fundementals_narrow = nn.GroupNorm(32,128) + self.f0_drop_narrow = nn.Dropout() + if wavebased: + self.conv_f0_narrow = ln.Conv1d(256,1,1,1,0) + else: + self.conv_f0_narrow = ln.Conv1d(128,1,1,1,0) + + self.conv_amplitudes_narrow = ln.Conv1d(128,2,1,1,0) + self.conv_amplitudes_h_narrow = ln.Conv1d(128,2,1,1,0) + + if wavebased: + self.conv1 = ln.Conv1d(n_fft,64,3,1,1) + else: + self.conv1 = ln.Conv1d(n_mels,64,3,1,1) self.norm1 = nn.GroupNorm(32,64) self.conv2 = ln.Conv1d(64,128,3,1,1) - self.norm2 = nn.GroupNorm(32,128) + self.norm2 = nn.GroupNorm(32,128) self.conv_fundementals = ln.Conv1d(128,128,3,1,1) self.norm_fundementals = nn.GroupNorm(32,128) @@ -199,46 +592,117 @@ def __init__(self, n_mels=64, n_formants=4,min_octave=-31,max_octave=96): self.conv_amplitudes = ln.Conv1d(128,2,1,1,0) self.conv_amplitudes_h = ln.Conv1d(128,2,1,1,0) - # self.conv_loudness = ln.Conv1d(128,1,1,1,0) + # self.conv_loudness = nn.Sequential(ln.Conv1d(n_fft if wavebased else n_mels,128,1,1,0), + # nn.LeakyReLU(0.2), + # ln.Conv1d(128,128,1,1,0), + # nn.LeakyReLU(0.2), + # ln.Conv1d(128,1,1,1,0,bias_initial=0.5),) + self.conv_loudness = nn.Sequential(ln.Conv1d(n_fft if wavebased else n_mels,128,1,1,0), + nn.LeakyReLU(0.2), + ln.Conv1d(128,128,1,1,0), + nn.LeakyReLU(0.2), + ln.Conv1d(128,1,1,1,0,bias_initial=-9. if power_synth else -4.6),) - self.conv_formants = ln.Conv1d(128,128,3,1,1) + if self.broud: + self.conv_formants = ln.Conv1d(128,128,3,1,1) + else: + self.conv_formants = ln.Conv1d(128,128,3,1,1) self.norm_formants = nn.GroupNorm(32,128) self.conv_formants_freqs = ln.Conv1d(128,n_formants,1,1,0) self.conv_formants_bandwidth = ln.Conv1d(128,n_formants,1,1,0) self.conv_formants_amplitude = ln.Conv1d(128,n_formants,1,1,0) - self.conv_formants_freqs_noise = ln.Conv1d(128,1,1,1,0) - self.conv_formants_bandwidth_noise = ln.Conv1d(128,1,1,1,0) - self.conv_formants_amplitude_noise = ln.Conv1d(128,1,1,1,0) + self.conv_formants_freqs_noise = ln.Conv1d(128,self.n_formants_noise,1,1,0) + self.conv_formants_bandwidth_noise = ln.Conv1d(128,self.n_formants_noise,1,1,0) + self.conv_formants_amplitude_noise = ln.Conv1d(128,self.n_formants_noise,1,1,0) self.amplifier = Parameter(torch.Tensor(1)) + self.bias = Parameter(torch.Tensor(1)) with torch.no_grad(): nn.init.constant_(self.amplifier,1.0) + nn.init.constant_(self.bias,-0.5) - def forward(self,x): + def forward(self,x,x_denoise=None,duomask=False,noise_level = None,x_amp=None): x = x.squeeze(dim=1).permute(0,2,1) #B * f * T - loudness = torch.mean(x*0.5+0.5,dim=1,keepdim=True) - loudness = F.softplus(self.amplifier)*loudness - x = F.leaky_relu(self.norm1(self.conv1(x)),0.2) - x_common = F.leaky_relu(self.norm2(self.conv2(x)),0.2) - - # loudness = F.relu(self.conv_loudness(x_common)) - amplitudes = F.softmax(self.conv_amplitudes(x_common),dim=1) - amplitudes_h = F.softmax(self.conv_amplitudes_h(x_common),dim=1) - - # x_fundementals = F.leaky_relu(self.norm_fundementals(self.conv_fundementals(x_common)),0.2) - x_fundementals = self.f0_drop(F.leaky_relu(self.norm_fundementals(self.conv_fundementals(x_common)),0.2)) - # f0 in mel: - # f0 = torch.sigmoid(self.conv_f0(x_fundementals)) - # f0 = F.tanh(self.conv_f0(x_fundementals)) * (16/64)*(self.n_mels/64) # 72hz < f0 < 446 hz - # f0 = torch.sigmoid(self.conv_f0(x_fundementals)) * (15/64)*(self.n_mels/64) # 179hz < f0 < 420 hz - # f0 = torch.sigmoid(self.conv_f0(x_fundementals)) * (22/64)*(self.n_mels/64) - (16/64)*(self.n_mels/64)# 72hz < f0 < 253 hz, human voice - # f0 = torch.sigmoid(self.conv_f0(x_fundementals)) * (11/64)*(self.n_mels/64) - (-2/64)*(self.n_mels/64)# 160hz < f0 < 300 hz, female voice - - # f0 in hz: - # f0 = torch.sigmoid(self.conv_f0(x_fundementals)) * 302 + 118 # 118hz < f0 < 420 hz - f0 = torch.sigmoid(self.conv_f0(x_fundementals)) * 240 + 180 # 180hz < f0 < 420 hz - f0 = torch.clamp(mel_scale(self.n_mels,f0)/(self.n_mels*1.0),min=0.0001) + if x_denoise is not None: + x_denoise = x_denoise.squeeze(dim=1).permute(0,2,1) + # x_denoise_amp = amplitude(x_denoise,self.noise_db,self.max_db) + # import pdb; pdb.set_trace() + if x_amp is None: + x_amp = amplitude(x,self.noise_db,self.max_db,trim_noise=True) + else: + x_amp = x_amp.squeeze(dim=1).permute(0,2,1) + hann_win = torch.hann_window(5,periodic=False).reshape([1,1,5,1]) + x_smooth = F.conv2d(x.unsqueeze(1).transpose(-2,-1),hann_win,padding=[2,0]).transpose(-2,-1).squeeze(1) + x_amp_smooth = F.conv2d(x_amp.unsqueeze(1).transpose(-2,-1),hann_win,padding=[2,0]).transpose(-2,-1).squeeze(1) + # loudness = F.softplus(self.amplifier)*(torch.mean(x_denoise_amp,dim=1,keepdim=True)) + # loudness = F.relu(F.softplus(self.amplifier)*(torch.mean(x_amp,dim=1,keepdim=True)-noise_level*0.0003)) + # loudness = torch.mean((x*0.5+0.5) if x_denoise is None else (x_denoise*0.5+0.5),dim=1,keepdim=True) + # loudness = F.softplus(self.amplifier)*(loudness) + # loudness = F.softplus(self.amplifier)*torch.mean(x_amp,dim=1,keepdim=True) + # loudness = F.softplus(self.amplifier)*F.relu(loudness - F.softplus(self.bias)) + if self.power_synth: + loudness = F.softplus((1. if self.wavebased else 1.0)*self.conv_loudness(x_smooth)) + else: + loudness = F.softplus((1. if self.wavebased else 1.0)*self.conv_loudness(x_smooth)) + # loudness = F.softplus((1. if self.wavebased else 1.0)*self.conv_loudness(x)) + # loudness = F.relu(self.conv_loudness(x)) + + # if not self.power_synth: + # loudness = loudness.sqrt() + + if self.broud: + x_narrow = x + x_narrow = F.leaky_relu(self.norm1_narrow(self.conv1_narrow(x_narrow)),0.2) + x_common_narrow = F.leaky_relu(self.norm2_narrow(self.conv2_narrow(x_narrow)),0.2) + amplitudes = F.softmax(self.conv_amplitudes_narrow(x_common_narrow),dim=1) + amplitudes_h = F.softmax(self.conv_amplitudes_h_narrow(x_common_narrow),dim=1) + x_fundementals_narrow = self.f0_drop_narrow(F.leaky_relu(self.norm_fundementals_narrow(self.conv_fundementals_narrow(x_common_narrow)),0.2)) + + x_amp = amplitude(x.unsqueeze(1),self.noise_db,self.max_db).transpose(-2,-1) + x_mel = to_db(torchaudio.transforms.MelScale(f_max=8000,n_stft=self.n_fft)(x_amp.transpose(-2,-1)),self.noise_db,self.max_db).squeeze(1) + x = F.leaky_relu(self.norm1_mel(self.conv1_mel(x_mel)),0.2) + x_common_mel = F.leaky_relu(self.norm2_mel(self.conv2_mel(x)),0.2) + x_fundementals_mel = self.f0_drop_mel(F.leaky_relu(self.norm_fundementals_mel(self.conv_fundementals_mel(x_common_mel)),0.2)) + + f0_hz = torch.sigmoid(self.conv_f0_narrow(torch.cat([x_fundementals_narrow,x_fundementals_mel],dim=1))) * 120 + 180 # 180hz < f0 < 300 hz + f0 = torch.clamp(mel_scale(self.n_mels,f0_hz)/(self.n_mels*1.0),min=0.0001) + + hann_win = torch.hann_window(21,periodic=False).reshape([1,1,21,1]) + x = to_db(F.conv2d(x_amp,hann_win,padding=[10,0]).transpose(-2,-1),self.noise_db,self.max_db).squeeze(1) + x = F.leaky_relu(self.norm1(self.conv1(x)),0.2) + x_common = F.leaky_relu(self.norm2(self.conv2(x)),0.2) + + + + else: + x = F.leaky_relu(self.norm1(self.conv1(x)),0.2) + x_common = F.leaky_relu(self.norm2(self.conv2(x)),0.2) + + + # loudness = F.relu(self.conv_loudness(x_common)) + # loudness = F.relu(self.conv_loudness(x_common)) +(10**(self.noise_db/10.-1) if self.wavebased else 0) + amplitudes = F.softmax(self.conv_amplitudes(x_common),dim=1) + amplitudes_h = F.softmax(self.conv_amplitudes_h(x_common),dim=1) + + # x_fundementals = F.leaky_relu(self.norm_fundementals(self.conv_fundementals(x_common)),0.2) + x_fundementals = self.f0_drop(F.leaky_relu(self.norm_fundementals(self.conv_fundementals(x_common)),0.2)) + # f0 in mel: + # f0 = torch.sigmoid(self.conv_f0(x_fundementals)) + # f0 = F.tanh(self.conv_f0(x_fundementals)) * (16/64)*(self.n_mels/64) # 72hz < f0 < 446 hz + # f0 = torch.sigmoid(self.conv_f0(x_fundementals)) * (15/64)*(self.n_mels/64) # 179hz < f0 < 420 hz + # f0 = torch.sigmoid(self.conv_f0(x_fundementals)) * (22/64)*(self.n_mels/64) - (16/64)*(self.n_mels/64)# 72hz < f0 < 253 hz, human voice + # f0 = torch.sigmoid(self.conv_f0(x_fundementals)) * (11/64)*(self.n_mels/64) - (-2/64)*(self.n_mels/64)# 160hz < f0 < 300 hz, female voice + + # f0 in hz: + # f0_hz = torch.sigmoid(self.conv_f0(x_fundementals)) * 120 + 180 # 180hz < f0 < 300 hz + f0_hz = torch.sigmoid(self.conv_f0(x_fundementals)) * 332 + 88 # 88hz < f0 < 420 hz + # f0_hz = torch.sigmoid(self.conv_f0(x_fundementals)) * 120 + 180 # 180hz < f0 < 300 hz + # f0_hz = torch.sigmoid(self.conv_f0(x_fundementals)) * 528 + 88 # 88hz < f0 < 616 hz + # f0_hz = torch.sigmoid(self.conv_f0(x_fundementals)) * 302 + 118 # 118hz < f0 < 420 hz + # f0_hz = torch.sigmoid(self.conv_f0(x_fundementals)) * 240 + 180 # 180hz < f0 < 420 hz + # f0_hz = torch.sigmoid(self.conv_f0(x_fundementals)) * 260 + 160 # 160hz < f0 < 420 hz + f0 = torch.clamp(mel_scale(self.n_mels,f0_hz)/(self.n_mels*1.0),min=0.0001) x_formants = F.leaky_relu(self.norm_formants(self.conv_formants(x_common)),0.2) formants_freqs = torch.sigmoid(self.conv_formants_freqs(x_formants)) @@ -250,22 +714,29 @@ def forward(self,x): # abs freq: formants_freqs_hz = formants_freqs*(self.formant_freq_limits_abs[:,:self.n_formants]-self.formant_freq_limits_abs_low[:,:self.n_formants])+self.formant_freq_limits_abs_low[:,:self.n_formants] # formants_freqs_hz = formants_freqs*6839 - formants_freqs = torch.clamp(mel_scale(self.n_mels,formants_freqs_hz)/(self.n_mels*1.0),min=0) + formants_freqs = torch.clamp(mel_scale(self.n_mels,formants_freqs_hz)/(self.n_mels*1.0),min=0) # formants_freqs = formants_freqs + f0 # formants_bandwidth_hz = torch.sigmoid(self.conv_formants_bandwidth(x_formants)) *6839 # formants_bandwidth_hz = torch.sigmoid(self.conv_formants_bandwidth(x_formants)) * 150 # formants_bandwidth_hz = 3*torch.sigmoid(self.formant_bandwitdh_ratio)*(0.075*torch.relu(formants_freqs_hz-1000)+100) - formants_bandwidth_hz = (torch.sigmoid(self.conv_formants_bandwidth(x_formants))) * (3*torch.sigmoid(self.formant_bandwitdh_ratio))*(0.075*torch.relu(formants_freqs_hz-1000)+100) + # formants_bandwidth_hz = (torch.sigmoid(self.conv_formants_bandwidth(x_formants))) * (3*torch.sigmoid(self.formant_bandwitdh_ratio))*(0.075*torch.relu(formants_freqs_hz-1000)+100) #good for spec based method + # formants_bandwidth_hz = ((3*torch.sigmoid(self.formant_bandwitdh_ratio))*(0.0125*torch.relu(formants_freqs_hz-1000)+100))#good for spec based method + formants_bandwidth_hz = 0.65*(0.00625*torch.relu(formants_freqs_hz)+375) + # formants_bandwidth_hz = (2**(torch.tanh(self.formant_bandwitdh_slop))*0.001*torch.relu(formants_freqs_hz-4000*torch.sigmoid(self.formant_bandwitdh_thres))+375*2**(torch.tanh(self.formant_bandwitdh_bias))) + # formants_bandwidth_hz = torch.exp(0.4*torch.tanh(self.conv_formants_bandwidth(x_formants))) * (0.00625*torch.relu(formants_freqs_hz-0)+375) + # formants_bandwidth_hz = (100*(torch.tanh(self.conv_formants_bandwidth(x_formants))) + (0.035*torch.relu(formants_freqs_hz-950)+250)) if self.wavebased else ((3*torch.sigmoid(self.formant_bandwitdh_ratio))*(0.0125*torch.relu(formants_freqs_hz-1000)+100))#good for spec based method + # formants_bandwidth_hz = (100*(torch.tanh(self.conv_formants_bandwidth(x_formants))) + (0.035*torch.relu(formants_freqs_hz-950)+250)) if self.wavebased else ((torch.sigmoid(self.conv_formants_bandwidth(x_formants))+0.2) * (3*torch.sigmoid(self.formant_bandwitdh_ratio))*(0.075*torch.relu(formants_freqs_hz-1000)+100))#good for spec based method # formants_bandwidth_hz = torch.sigmoid(self.conv_formants_bandwidth(x_formants)) * 3*torch.sigmoid(self.formant_bandwitdh_ratio)*(0.075*torch.relu(formants_freqs_hz-1000)+50) # formants_bandwidth_hz = torch.sigmoid(self.conv_formants_bandwidth(x_formants)) * (0.075*3*torch.sigmoid(self.formant_bandwitdh_slop)*torch.relu(formants_freqs_hz-1000)+(2*torch.sigmoid(self.formant_bandwitdh_ratio)+1)*50) # formants_bandwidth_hz = torch.sigmoid(self.conv_formants_bandwidth(x_formants)) * 3*torch.sigmoid(self.formant_bandwitdh_ratio)*(0.075*torch.sigmoid(self.formant_bandwitdh_slop)*torch.relu(formants_freqs_hz-1000)+50) - formants_bandwidth = bandwidth_mel(formants_freqs_hz,formants_bandwidth_hz,self.n_mels) + formants_bandwidth = bandwidth_mel(formants_freqs_hz,formants_bandwidth_hz,self.n_mels) # formants_bandwidth_upper = formants_freqs_hz+formants_bandwidth_hz/2 # formants_bandwidth_lower = torch.clamp(formants_freqs_hz-formants_bandwidth_hz/2,min=1) # formants_bandwidth = (mel_scale(self.n_mels,formants_bandwidth_upper) - mel_scale(self.n_mels,formants_bandwidth_lower))/(self.n_mels*1.0) # formants_amplitude = F.softmax(torch.cumsum(-F.relu(self.conv_formants_amplitude(x_formants)),dim=1),dim=1) - formants_amplitude = F.softmax(self.conv_formants_amplitude(x_formants),dim=1) + formants_amplitude_logit = self.conv_formants_amplitude(x_formants) + formants_amplitude = F.softmax(formants_amplitude_logit,dim=1) formants_freqs_noise = torch.sigmoid(self.conv_formants_freqs_noise(x_formants)) # # relative freq: @@ -274,14 +745,25 @@ def forward(self,x): # formants_freqs_hz = torch.cumsum(formants_freqs_hz,dim=1) # abs freq: - formants_freqs_hz_noise = formants_freqs_noise*(self.formant_freq_limits_abs_noise[:,:1]-self.formant_freq_limits_abs_noise_low[:,:1])+self.formant_freq_limits_abs_noise_low[:,:1] + formants_freqs_hz_noise = formants_freqs_noise*(self.formant_freq_limits_abs_noise[:,:self.n_formants_noise]-self.formant_freq_limits_abs_noise_low[:,:self.n_formants_noise])+self.formant_freq_limits_abs_noise_low[:,:self.n_formants_noise] + if duomask: + formants_freqs_hz_noise = torch.cat([formants_freqs_hz,formants_freqs_hz_noise],dim=1) # formants_freqs_hz = formants_freqs*6839 formants_freqs_noise = torch.clamp(mel_scale(self.n_mels,formants_freqs_hz_noise)/(self.n_mels*1.0),min=0) # formants_freqs = formants_freqs + f0 # formants_bandwidth_hz = torch.sigmoid(self.conv_formants_bandwidth(x_formants)) *6839 # formants_bandwidth_hz = torch.sigmoid(self.conv_formants_bandwidth(x_formants)) * 150 - formants_bandwidth_hz_noise = torch.sigmoid(self.conv_formants_bandwidth_noise(x_formants)) * 4000 + 1000 + # formants_bandwidth_hz_noise = torch.sigmoid(self.conv_formants_bandwidth_noise(x_formants)) * 8000 + 2000 + formants_bandwidth_hz_noise = self.conv_formants_bandwidth_noise(x_formants) + # formants_bandwidth_hz_noise_1 = F.softplus(formants_bandwidth_hz_noise[:,:1]) * 8000 + 2000 #2000-10000 + # formants_bandwidth_hz_noise_2 = torch.sigmoid(formants_bandwidth_hz_noise[:,1:]) * 2000 #0-2000 + formants_bandwidth_hz_noise_1 = F.softplus(formants_bandwidth_hz_noise[:,:1]) * 2344 + 586 #2000-10000 + formants_bandwidth_hz_noise_2 = torch.sigmoid(formants_bandwidth_hz_noise[:,1:]) * 586 #0-2000 + formants_bandwidth_hz_noise = torch.cat([formants_bandwidth_hz_noise_1,formants_bandwidth_hz_noise_2],dim=1) + if duomask: + formants_bandwidth_hz_noise = torch.cat([formants_bandwidth_hz,formants_bandwidth_hz_noise],dim=1) + # formants_bandwidth_hz_noise = torch.sigmoid(self.conv_formants_bandwidth_noise(x_formants)) * 4000 + 1000 # formants_bandwidth_hz_noise = torch.sigmoid(self.conv_formants_bandwidth_noise(x_formants)) * 4000 # formants_bandwidth_hz = torch.sigmoid(self.conv_formants_bandwidth(x_formants)) * 3*torch.sigmoid(self.formant_bandwitdh_ratio)*(0.075*torch.relu(formants_freqs_hz-1000)+50) # formants_bandwidth_hz = torch.sigmoid(self.conv_formants_bandwidth(x_formants)) * (0.075*3*torch.sigmoid(self.formant_bandwitdh_slop)*torch.relu(formants_freqs_hz-1000)+(2*torch.sigmoid(self.formant_bandwitdh_ratio)+1)*50) @@ -290,9 +772,13 @@ def forward(self,x): # formants_bandwidth_upper = formants_freqs_hz+formants_bandwidth_hz/2 # formants_bandwidth_lower = torch.clamp(formants_freqs_hz-formants_bandwidth_hz/2,min=1) # formants_bandwidth = (mel_scale(self.n_mels,formants_bandwidth_upper) - mel_scale(self.n_mels,formants_bandwidth_lower))/(self.n_mels*1.0) - formants_amplitude_noise = F.softmax(self.conv_formants_amplitude_noise(x_formants),dim=1) + formants_amplitude_noise_logit = self.conv_formants_amplitude_noise(x_formants) + if duomask: + formants_amplitude_noise_logit = torch.cat([formants_amplitude_logit,formants_amplitude_noise_logit],dim=1) + formants_amplitude_noise = F.softmax(formants_amplitude_noise_logit,dim=1) components = { 'f0':f0, + 'f0_hz':f0_hz, 'loudness':loudness, 'amplitudes':amplitudes, 'amplitudes_h':amplitudes_h, @@ -387,44 +873,76 @@ def forward(self,x): @ECOG_ENCODER.register("ECoGMappingBottleneck") class ECoGMapping_Bottleneck(nn.Module): - def __init__(self,n_mels,n_formants): - super(ECoGMapping_Bottleneck, self).__init__() - self.n_formants = n_formants - self.n_mels = n_mels - self.from_ecog = FromECoG(16,residual=True) - self.conv1 = ECoGMappingBlock(16,32,[5,1,1],residual=True,resample = [2,1,1],pool='MAX') - self.conv2 = ECoGMappingBlock(32,64,[3,1,1],residual=True,resample = [2,1,1],pool='MAX') - self.norm_mask = nn.GroupNorm(32,64) - self.mask = ln.Conv3d(64,1,[3,1,1],1,[1,0,0]) - self.conv3 = ECoGMappingBlock(64,128,[3,3,3],residual=True,resample = [2,2,2],pool='MAX') - self.conv4 = ECoGMappingBlock(128,256,[3,3,3],residual=True,resample = [2,2,2],pool='MAX') - self.norm = nn.GroupNorm(32,256) - self.conv5 = ln.Conv1d(256,256,3,1,1) - self.norm2 = nn.GroupNorm(32,256) - self.conv6 = ln.ConvTranspose1d(256, 128, 3, 2, 1, transform_kernel=True) - self.norm3 = nn.GroupNorm(32,128) - self.conv7 = ln.ConvTranspose1d(128, 64, 3, 2, 1, transform_kernel=True) - self.norm4 = nn.GroupNorm(32,64) - self.conv8 = ln.ConvTranspose1d(64, 32, 3, 2, 1, transform_kernel=True) - self.norm5 = nn.GroupNorm(32,32) - self.conv9 = ln.ConvTranspose1d(32, 32, 3, 2, 1, transform_kernel=True) - self.norm6 = nn.GroupNorm(32,32) - - self.conv_fundementals = ln.Conv1d(32,32,3,1,1) - self.norm_fundementals = nn.GroupNorm(32,32) - self.conv_f0 = ln.Conv1d(32,1,1,1,0) - self.conv_amplitudes = ln.Conv1d(32,2,1,1,0) - self.conv_loudness = ln.Conv1d(32,1,1,1,0) - - self.conv_formants = ln.Conv1d(32,32,3,1,1) - self.norm_formants = nn.GroupNorm(32,32) - self.conv_formants_freqs = ln.Conv1d(32,n_formants,1,1,0) - self.conv_formants_bandwidth = ln.Conv1d(32,n_formants,1,1,0) - self.conv_formants_amplitude = ln.Conv1d(32,n_formants,1,1,0) + def __init__(self,n_mels,n_formants,n_formants_noise=1): + super(ECoGMapping_Bottleneck, self).__init__() + self.n_formants = n_formants + self.n_mels = n_mels + self.n_formants_noise = n_formants_noise + + self.formant_freq_limits_diff = torch.tensor([950.,2450.,2100.]).reshape([1,3,1]) #freq difference + self.formant_freq_limits_diff_low = torch.tensor([300.,300.,0.]).reshape([1,3,1]) #freq difference + + # self.formant_freq_limits_abs = torch.tensor([950.,2800.,3400.,4700.]).reshape([1,4,1]) #freq difference + # self.formant_freq_limits_abs_low = torch.tensor([300.,600.,2800.,3400]).reshape([1,4,1]) #freq difference + + # self.formant_freq_limits_abs = torch.tensor([950.,3300.,3600.,4700.]).reshape([1,4,1]) #freq difference + # self.formant_freq_limits_abs_low = torch.tensor([300.,600.,2700.,3400]).reshape([1,4,1]) #freq difference + + self.formant_freq_limits_abs = torch.tensor([950.,3400.,3800.,5000.,6000.,7000.]).reshape([1,6,1]) #freq difference + self.formant_freq_limits_abs_low = torch.tensor([300.,700.,1800.,3400,5000.,6000.]).reshape([1,6,1]) #freq difference + + # self.formant_freq_limits_abs_noise = torch.tensor([7000.]).reshape([1,1,1]) #freq difference + self.formant_freq_limits_abs_noise = torch.tensor([8000.,7000.,7000.]).reshape([1,3,1]) #freq difference + self.formant_freq_limits_abs_noise_low = torch.tensor([4000.,3000.,3000.]).reshape([1,3,1]) #freq difference + + self.formant_bandwitdh_ratio = Parameter(torch.Tensor(1)) + self.formant_bandwitdh_slop = Parameter(torch.Tensor(1)) + with torch.no_grad(): + nn.init.constant_(self.formant_bandwitdh_ratio,0) + nn.init.constant_(self.formant_bandwitdh_slop,0) + + + self.from_ecog = FromECoG(16,residual=True) + self.conv1 = ECoGMappingBlock(16,32,[5,1,1],residual=True,resample = [2,1,1],pool='MAX') + self.conv2 = ECoGMappingBlock(32,64,[3,1,1],residual=True,resample = [2,1,1],pool='MAX') + self.norm_mask = nn.GroupNorm(32,64) + self.mask = ln.Conv3d(64,1,[3,1,1],1,[1,0,0]) + self.conv3 = ECoGMappingBlock(64,128,[3,3,3],residual=True,resample = [2,2,2],pool='MAX') + self.conv4 = ECoGMappingBlock(128,256,[3,3,3],residual=True,resample = [2,2,2],pool='MAX') + self.norm = nn.GroupNorm(32,256) + self.conv5 = ln.Conv1d(256,256,3,1,1) + self.norm2 = nn.GroupNorm(32,256) + self.conv6 = ln.ConvTranspose1d(256, 128, 3, 2, 1, transform_kernel=True) + self.norm3 = nn.GroupNorm(32,128) + self.conv7 = ln.ConvTranspose1d(128, 64, 3, 2, 1, transform_kernel=True) + self.norm4 = nn.GroupNorm(32,64) + self.conv8 = ln.ConvTranspose1d(64, 32, 3, 2, 1, transform_kernel=True) + self.norm5 = nn.GroupNorm(32,32) + self.conv9 = ln.ConvTranspose1d(32, 32, 3, 2, 1, transform_kernel=True) + self.norm6 = nn.GroupNorm(32,32) + + self.conv_fundementals = ln.Conv1d(32,32,3,1,1) + self.norm_fundementals = nn.GroupNorm(32,32) + self.f0_drop = nn.Dropout() + self.conv_f0 = ln.Conv1d(32,1,1,1,0) + self.conv_amplitudes = ln.Conv1d(32,2,1,1,0) + self.conv_amplitudes_h = ln.Conv1d(32,2,1,1,0) + self.conv_loudness = ln.Conv1d(32,1,1,1,0,bias_initial=-9.) + + self.conv_formants = ln.Conv1d(32,32,3,1,1) + self.norm_formants = nn.GroupNorm(32,32) + self.conv_formants_freqs = ln.Conv1d(32,n_formants,1,1,0) + self.conv_formants_bandwidth = ln.Conv1d(32,n_formants,1,1,0) + self.conv_formants_amplitude = ln.Conv1d(32,n_formants,1,1,0) + + self.conv_formants_freqs_noise = ln.Conv1d(32,n_formants_noise,1,1,0) + self.conv_formants_bandwidth_noise = ln.Conv1d(32,n_formants_noise,1,1,0) + self.conv_formants_amplitude_noise = ln.Conv1d(32,n_formants_noise,1,1,0) + def forward(self,ecog,mask_prior,mni): - x_common_all = [] - for d in range(len(ecog)): + x_common_all = [] + for d in range(len(ecog)): x = ecog[d] x = x.reshape([-1,1,x.shape[1],15,15]) mask_prior_d = mask_prior[d].reshape(-1,1,1,15,15) @@ -448,32 +966,77 @@ def forward(self,ecog,mask_prior,mni): x_common = F.leaky_relu(self.norm6(x),0.2) x_common_all += [x_common] - x_common = torch.cat(x_common_all,dim=0) - loudness = F.relu(self.conv_loudness(x_common)) - amplitudes = F.softmax(self.conv_amplitudes(x_common),dim=1) + x_common = torch.cat(x_common_all,dim=0) + loudness = F.softplus(self.conv_loudness(x_common)) + amplitudes = F.softmax(self.conv_amplitudes(x_common),dim=1) + amplitudes_h = F.softmax(self.conv_amplitudes_h(x_common),dim=1) - x_fundementals = F.leaky_relu(self.norm_fundementals(self.conv_fundementals(x_common)),0.2) - # f0 = torch.sigmoid(self.conv_f0(x_fundementals)) - # f0 = F.tanh(self.conv_f0(x_fundementals)) * (16/64)*(self.n_mels/64) # 72hz < f0 < 446 hz - f0 = torch.sigmoid(self.conv_f0(x_fundementals)) * (15/64)*(self.n_mels/64) # 179hz < f0 < 420 hz - # f0 = torch.sigmoid(self.conv_f0(x_fundementals)) * (22/64)*(self.n_mels/64) - (16/64)*(self.n_mels/64)# 72hz < f0 < 253 hz, human voice - # f0 = torch.sigmoid(self.conv_f0(x_fundementals)) * (11/64)*(self.n_mels/64) - (-2/64)*(self.n_mels/64)# 160hz < f0 < 300 hz, female voice - x_formants = F.leaky_relu(self.norm_formants(self.conv_formants(x_common)),0.2) - formants_freqs = torch.sigmoid(self.conv_formants_freqs(x_formants)) - formants_freqs = torch.cumsum(formants_freqs,dim=1) - formants_freqs = formants_freqs - # formants_freqs = formants_freqs + f0 - formants_bandwidth = torch.sigmoid(self.conv_formants_bandwidth(x_formants)) - formants_amplitude = F.softmax(self.conv_formants_amplitude(x_formants),dim=1) - - components = { 'f0':f0, - 'loudness':loudness, - 'amplitudes':amplitudes, - 'freq_formants':formants_freqs, - 'bandwidth_formants':formants_bandwidth, - 'amplitude_formants':formants_amplitude, - } - return components + # x_fundementals = F.leaky_relu(self.norm_fundementals(self.conv_fundementals(x_common)),0.2) + x_fundementals = self.f0_drop(F.leaky_relu(self.norm_fundementals(self.conv_fundementals(x_common)),0.2)) + # f0 = torch.sigmoid(self.conv_f0(x_fundementals)) + # f0 = F.tanh(self.conv_f0(x_fundementals)) * (16/64)*(self.n_mels/64) # 72hz < f0 < 446 hz + # f0 = torch.sigmoid(self.conv_f0(x_fundementals)) * (15/64)*(self.n_mels/64) # 179hz < f0 < 420 hz + # f0 = torch.sigmoid(self.conv_f0(x_fundementals)) * (22/64)*(self.n_mels/64) - (16/64)*(self.n_mels/64)# 72hz < f0 < 253 hz, human voice + # f0 = torch.sigmoid(self.conv_f0(x_fundementals)) * (11/64)*(self.n_mels/64) - (-2/64)*(self.n_mels/64)# 160hz < f0 < 300 hz, female voice + + # f0 in hz: + # f0 = torch.sigmoid(self.conv_f0(x_fundementals)) * 528 + 88 # 88hz < f0 < 616 hz + f0_hz = torch.sigmoid(self.conv_f0(x_fundementals)) * 332 + 88 # 88hz < f0 < 420 hz + f0 = torch.clamp(mel_scale(self.n_mels,f0_hz)/(self.n_mels*1.0),min=0.0001) + + x_formants = F.leaky_relu(self.norm_formants(self.conv_formants(x_common)),0.2) + formants_freqs = torch.sigmoid(self.conv_formants_freqs(x_formants)) + # formants_freqs = torch.cumsum(formants_freqs,dim=1) + # formants_freqs = formants_freqs + + # abs freq + formants_freqs_hz = formants_freqs*(self.formant_freq_limits_abs[:,:self.n_formants]-self.formant_freq_limits_abs_low[:,:self.n_formants])+self.formant_freq_limits_abs_low[:,:self.n_formants] + formants_freqs = torch.clamp(mel_scale(self.n_mels,formants_freqs_hz)/(self.n_mels*1.0),min=0) + + # formants_freqs = formants_freqs + f0 + # formants_bandwidth = torch.sigmoid(self.conv_formants_bandwidth(x_formants)) + # formants_bandwidth_hz = (3*torch.sigmoid(self.formant_bandwitdh_ratio))*(0.075*torch.relu(formants_freqs_hz-1000)+100) + formants_bandwidth_hz = 0.65*(0.00625*torch.relu(formants_freqs_hz)+375) + # formants_bandwidth_hz = (torch.sigmoid(self.conv_formants_bandwidth(x_formants))) * (3*torch.sigmoid(self.formant_bandwitdh_ratio))*(0.075*torch.relu(formants_freqs_hz-1000)+100) + formants_bandwidth = bandwidth_mel(formants_freqs_hz,formants_bandwidth_hz,self.n_mels) + formants_amplitude_logit = self.conv_formants_amplitude(x_formants) + formants_amplitude = F.softmax(formants_amplitude_logit,dim=1) + + formants_freqs_noise = torch.sigmoid(self.conv_formants_freqs_noise(x_formants)) + formants_freqs_hz_noise = formants_freqs_noise*(self.formant_freq_limits_abs_noise[:,:self.n_formants_noise]-self.formant_freq_limits_abs_noise_low[:,:self.n_formants_noise])+self.formant_freq_limits_abs_noise_low[:,:self.n_formants_noise] + # formants_freqs_hz_noise = formants_freqs_noise*(self.formant_freq_limits_abs_noise[:,:1]-self.formant_freq_limits_abs_noise_low[:,:1])+self.formant_freq_limits_abs_noise_low[:,:1] + formants_freqs_hz_noise = torch.cat([formants_freqs_hz,formants_freqs_hz_noise],dim=1) + formants_freqs_noise = torch.clamp(mel_scale(self.n_mels,formants_freqs_hz_noise)/(self.n_mels*1.0),min=0) + # formants_bandwidth_hz_noise = F.relu(self.conv_formants_bandwidth_noise(x_formants)) * 8000 + 2000 + # formants_bandwidth_noise = bandwidth_mel(formants_freqs_hz_noise,formants_bandwidth_hz_noise,self.n_mels) + # formants_amplitude_noise = F.softmax(self.conv_formants_amplitude_noise(x_formants),dim=1) + formants_bandwidth_hz_noise = self.conv_formants_bandwidth_noise(x_formants) + formants_bandwidth_hz_noise_1 = F.softplus(formants_bandwidth_hz_noise[:,:1]) * 2344 + 586 #2000-10000 + formants_bandwidth_hz_noise_2 = torch.sigmoid(formants_bandwidth_hz_noise[:,1:]) * 586 #0-2000 + formants_bandwidth_hz_noise = torch.cat([formants_bandwidth_hz_noise_1,formants_bandwidth_hz_noise_2],dim=1) + formants_bandwidth_hz_noise = torch.cat([formants_bandwidth_hz,formants_bandwidth_hz_noise],dim=1) + formants_bandwidth_noise = bandwidth_mel(formants_freqs_hz_noise,formants_bandwidth_hz_noise,self.n_mels) + formants_amplitude_noise_logit = self.conv_formants_amplitude_noise(x_formants) + formants_amplitude_noise_logit = torch.cat([formants_amplitude_logit,formants_amplitude_noise_logit],dim=1) + formants_amplitude_noise = F.softmax(formants_amplitude_noise_logit,dim=1) + + components = { 'f0':f0, + 'f0_hz':f0_hz, + 'loudness':loudness, + 'amplitudes':amplitudes, + 'amplitudes_h':amplitudes_h, + 'freq_formants_hamon':formants_freqs, + 'bandwidth_formants_hamon':formants_bandwidth, + 'freq_formants_hamon_hz':formants_freqs_hz, + 'bandwidth_formants_hamon_hz':formants_bandwidth_hz, + 'amplitude_formants_hamon':formants_amplitude, + 'freq_formants_noise':formants_freqs_noise, + 'bandwidth_formants_noise':formants_bandwidth_noise, + 'freq_formants_noise_hz':formants_freqs_hz_noise, + 'bandwidth_formants_noise_hz':formants_bandwidth_hz_noise, + 'amplitude_formants_noise':formants_amplitude_noise, + } + return components class BackBone(nn.Module): diff --git a/net_formant_masknormed.py b/net_formant_masknormed.py new file mode 100644 index 00000000..b379f39a --- /dev/null +++ b/net_formant_masknormed.py @@ -0,0 +1,1154 @@ +import os +import pdb +from random import triangular +import torch +import torchaudio +from torch import nn +from torch.nn import functional as F +from torch.nn import Parameter as P +from torch.nn import init +from torch.nn.parameter import Parameter +import numpy as np +import lreq as ln +import math +from registry import * +from transformer_models.position_encoding import build_position_encoding +from transformer_models.transformer import Transformer as TransformerTS +from transformer_models.transformer_nonlocal import Transformer as TransformerNL + +def db(x,noise = -80, slope =35, powerdb=True): + if powerdb: + return ((2*torchaudio.transforms.AmplitudeToDB()(x)).clamp(min=noise)-slope-noise)/slope + else: + return ((torchaudio.transforms.AmplitudeToDB()(x)).clamp(min=noise)-slope-noise)/slope + +# def amplitude(x,noise=-80,slope=35): +# return 10**((x*slope+noise+slope)/20.) + +def amplitude(x,noise_db=-60,max_db=35,trim_noise=False): + if trim_noise: + x_db = (x+1)/2*(max_db-noise_db)+noise_db + if type(x) is np.ndarray: + return 10**(x_db/10)*(np.sign(x_db-noise_db)*0.5+0.5) + else: + return 10**(x_db/10)*((x_db-noise_db).sign()*0.5+0.5) + else: + return 10**(((x+1)/2*(max_db-noise_db)+noise_db)/10) + +def to_db(x,noise_db=-60,max_db=35): + return (torchaudio.transforms.AmplitudeToDB()(x)-noise_db)/(max_db-noise_db)*2-1 + +def wave2spec(wave,n_fft=256,wave_fr=16000,spec_fr=125,noise_db=-60,max_db=22.5,to_db=True,power=2): +# def wave2spec(wave,n_fft=256,wave_fr=16000,spec_fr=125,noise_db=-50,max_db=22.5,to_db=True): + if to_db: + return (torchaudio.transforms.AmplitudeToDB()(torchaudio.transforms.Spectrogram(n_fft*2-1,win_length=n_fft*2-1,hop_length=int(wave_fr/spec_fr),power=power)(wave)).clamp(min=noise_db,max=max_db).transpose(-2,-1)-noise_db)/(max_db-noise_db)*2-1 + else: + return torchaudio.transforms.Spectrogram(n_fft*2-1,win_length=n_fft*2-1,hop_length=int(wave_fr/spec_fr),power=power)(wave).transpose(-2,-1) + + +# def mel_scale(n_mels,hz,min_octave=-31.,max_octave=95.,pt=True): +# def mel_scale(n_mels,hz,min_octave=-58.,max_octave=100.,pt=True): +def mel_scale(n_mels,hz,min_octave=-31.,max_octave=102.,pt=True): + #take absolute hz, return abs mel + # return (torch.log2(hz/440)+31/24)*24*n_mels/126 + if pt: + return (torch.log2(hz/440.)-min_octave/24.)*24*n_mels/(max_octave-min_octave) + else: + return (np.log2(hz/440.)-min_octave/24.)*24*n_mels/(max_octave-min_octave) + +# def inverse_mel_scale(mel,min_octave=-31.,max_octave=95.): +# def inverse_mel_scale(mel,min_octave=-58.,max_octave=100.): +def inverse_mel_scale(mel,min_octave=-31.,max_octave=102.): + #take normalized mel, return absolute hz + # return 440*2**(mel*126/24-31/24) + return 440*2**(mel*(max_octave-min_octave)/24.+min_octave/24.) + +# def mel_scale(n_mels,hz,f_min=160.,f_max=8000.,pt=True): +# #take absolute hz, return abs mel +# # return (torch.log2(hz/440)+31/24)*24*n_mels/126 +# m_min = 2595.0 * np.log10(1.0 + (f_min / 700.0)) +# m_max = 2595.0 * np.log10(1.0 + (f_max / 700.0)) +# m_min_ = m_min + (m_max-m_min)/(n_mels+1) +# m_max_ = m_max +# if pt: +# return (2595.0 * torch.log10(1.0 + (hz / 700.0))-m_min_)/(m_max_-m_min_)*n_mels +# else: +# return (2595.0 * np.log10(1.0 + (hz / 700.0))-m_min_)/(m_max_-m_min_)*n_mels + +# def inverse_mel_scale(mel,f_min=160.,f_max=8000.,n_mels=64): +# #take normalized mel, return absolute hz +# # return 440*2**(mel*126/24-31/24) +# m_min = 2595.0 * np.log10(1.0 + (f_min / 700.0)) +# m_max = 2595.0 * np.log10(1.0 + (f_max / 700.0)) +# m_min_ = m_min + (m_max-m_min)/(n_mels+1) +# m_max_ = m_max +# return 700.0 * (10**((mel*(m_max_-m_min_) + m_min_)/ 2595.0) - 1.0) + +def ind2hz(ind,n_fft,max_freq=8000.): + #input abs ind, output abs hz + return ind/(1.0*n_fft)*max_freq + +def hz2ind(hz,n_fft,max_freq=8000.): + # input abs hz, output abs ind + return hz/(1.0*max_freq)*n_fft + +def bandwidth_mel(freqs_hz,bandwidth_hz,n_mels): + # input hz bandwidth, output abs bandwidth on mel + bandwidth_upper = freqs_hz+bandwidth_hz/2. + bandwidth_lower = torch.clamp(freqs_hz-bandwidth_hz/2.,min=1) + bandwidth = mel_scale(n_mels,bandwidth_upper) - mel_scale(n_mels,bandwidth_lower) + return bandwidth + +def torch_P2R(radii, angles): + return radii * torch.cos(angles),radii * torch.sin(angles) +def inverse_spec_to_audio(spec,n_fft = 511,win_length = 511,hop_length = 128,power_synth=True): + ''' + generate random phase, then use istft to inverse spec to audio + ''' + window = torch.hann_window(win_length) + angles = torch.randn_like(spec).uniform_(0, np.pi*2)#torch.zeros_like(spec)#torch.randn_like(spec).uniform_(0, np.pi*2) + spec = spec**0.5 if power_synth else spec + spec_complex = torch.stack(torch_P2R(spec, angles),dim=-1) #real and image in same dim + return torchaudio.functional.istft(spec_complex, n_fft=n_fft, window=window, center=True, win_length=win_length, hop_length=hop_length) + +@GENERATORS.register("GeneratorFormant") +class FormantSysth(nn.Module): + def __init__(self, n_mels=64, k=100, wavebased=False,n_fft=256,noise_db=-50,max_db=22.5,dbbased=False,add_bgnoise=True,log10=False,noise_from_data=False,return_wave=False,power_synth=False): + super(FormantSysth, self).__init__() + self.wave_fr = 16e3 + self.spec_fr = 125 + self.n_fft = n_fft + self.noise_db=noise_db + self.max_db = max_db + self.n_mels = n_mels + self.k = k + self.dbbased=dbbased + self.log10 = log10 + self.add_bgnoise = add_bgnoise + self.wavebased=wavebased + self.noise_from_data = noise_from_data + self.linear_scale = wavebased + self.return_wave = return_wave + self.power_synth = power_synth + self.timbre = Parameter(torch.Tensor(1,1,n_mels)) + self.timbre_mapping = nn.Sequential( + ln.Conv1d(1,128,1), + nn.LeakyReLU(0.2), + ln.Conv1d(128,128,1), + nn.LeakyReLU(0.2), + ln.Conv1d(128,2,1), + # nn.Sigmoid(), + ) + self.bgnoise_mapping = nn.Sequential( + ln.Conv2d(2,2,[1,5],padding=[0,2],gain=1,bias=False), + # nn.Sigmoid(), + ) + self.noise_mapping = nn.Sequential( + ln.Conv2d(2,2,[1,5],padding=[0,2],gain=1,bias=False), + # nn.Sigmoid(), + ) + + self.bgnoise_mapping2 = nn.Sequential( + ln.Conv1d(1,128,1,1), + nn.LeakyReLU(0.2), + ln.Conv1d(128,128,1,1), + nn.LeakyReLU(0.2), + ln.Conv1d(128,1,1,gain=1,bias=False), + # nn.Sigmoid(), + ) + self.noise_mapping2 = nn.Sequential( + ln.Conv1d(1,128,1,1), + nn.LeakyReLU(0.2), + ln.Conv1d(128,128,1,1), + nn.LeakyReLU(0.2), + ln.Conv1d(128,1,1,1,gain=1,bias=False), + # nn.Sigmoid(), + ) + self.prior_exp = np.array([0.4963,0.0745,1.9018]) + self.timbre_parameter = Parameter(torch.Tensor(2)) + self.wave_noise_amplifier = Parameter(torch.Tensor(1)) + self.wave_hamon_amplifier = Parameter(torch.Tensor(1)) + + if noise_from_data: + self.bgnoise_amp = Parameter(torch.Tensor(1)) + with torch.no_grad(): + nn.init.constant_(self.bgnoise_amp,1) + else: + self.bgnoise_dist = Parameter(torch.Tensor(1,1,1,self.n_fft if self.wavebased else self.n_mels)) + with torch.no_grad(): + nn.init.constant_(self.bgnoise_dist,1.0) + # self.silient = Parameter(torch.Tensor(1,1,n_mels)) + self.silient = -1 + with torch.no_grad(): + nn.init.constant_(self.timbre,1.0) + nn.init.constant_(self.timbre_parameter[0],7) + nn.init.constant_(self.timbre_parameter[1],0.004) + nn.init.constant_(self.wave_noise_amplifier,1) + nn.init.constant_(self.wave_hamon_amplifier,4.) + + # nn.init.constant_(self.silient,-1.0) + +# def formant_mask(self,freq,bandwith,amplitude): +# # freq, bandwith, amplitude: B*formants*time +# freq_cord = torch.arange(self.n_mels) +# time_cord = torch.arange(freq.shape[2]) +# grid_time,grid_freq = torch.meshgrid(time_cord,freq_cord) +# grid_time = grid_time.unsqueeze(dim=0).unsqueeze(dim=-1) #B,time,freq, 1 +# grid_freq = grid_freq.unsqueeze(dim=0).unsqueeze(dim=-1) #B,time,freq, 1 +# freq = freq.permute([0,2,1]).unsqueeze(dim=-2) #B,time,1, formants +# bandwith = bandwith.permute([0,2,1]).unsqueeze(dim=-2) #B,time,1, formants +# amplitude = amplitude.permute([0,2,1]).unsqueeze(dim=-2) #B,time,1, formants +# # masks = amplitude*torch.exp(-0.693*(grid_freq-freq)**2/(2*(bandwith+0.001)**2)) #B,time,freqchans, formants +# masks = amplitude*torch.exp(-(grid_freq-freq)**2/(2*(bandwith/np.sqrt(2*np.log(2))+0.001)**2)) #B,time,freqchans, formants +# masks = masks.unsqueeze(dim=1) #B,1,time,freqchans, formants +# return masks + + def formant_mask(self,freq_hz,bandwith_hz,amplitude,linear=False, triangle_mask = False,duomask=True, n_formant_noise=1,f0_hz=None,noise=False): + # freq, bandwith, amplitude: B*formants*time + freq_cord = torch.arange(self.n_fft if linear else self.n_mels) + time_cord = torch.arange(freq_hz.shape[2]) + grid_time,grid_freq = torch.meshgrid(time_cord,freq_cord) + grid_time = grid_time.unsqueeze(dim=0).unsqueeze(dim=-1) #B,time,freq, 1 + grid_freq = grid_freq.unsqueeze(dim=0).unsqueeze(dim=-1) #B,time,freq, 1 + grid_freq_hz = ind2hz(grid_freq,self.n_fft,self.wave_fr/2) if linear else inverse_mel_scale(grid_freq/(self.n_mels*1.0)) + freq_hz = freq_hz.permute([0,2,1]).unsqueeze(dim=-2) #B,time,1, formants + bandwith_hz = bandwith_hz.permute([0,2,1]).unsqueeze(dim=-2) #B,time,1, formants + amplitude = amplitude.permute([0,2,1]).unsqueeze(dim=-2) #B,time,1, formants + if self.power_synth: + amplitude = amplitude + alpha = (2*np.sqrt(2*np.log(np.sqrt(2)))) + if not noise: + k = (torch.arange(self.k)+1).reshape([1,self.k,1]) + k_f0 = k*f0_hz #BxkxT + freq_range = (-torch.sign(k_f0-7800)*0.5+0.5) + k_f0 = k_f0.permute([0,2,1]).unsqueeze(-1) #BxTxkx1 + hamonic_dist = (amplitude*(torch.exp(-((k_f0-freq_hz))**2/(2*(bandwith_hz/alpha+0.01)**2))+1E-6).sqrt()).sum(-1).permute([0,2,1]) #BxkxT + norm = (((hamonic_dist*freq_range)**2).sum(1,keepdim=True)+1E-10).sqrt()+1E-10 #Bx1xT + hamonic_dist = (hamonic_dist*freq_range)/norm # sum_k(hamonic_dist**2) = 1, + if self.return_wave: + t = torch.arange(int(f0_hz.shape[2]/self.spec_fr*self.wave_fr))/(1.0*self.wave_fr) #in second + hamonic_dist = F.interpolate(hamonic_dist,int(f0_hz.shape[2]/self.spec_fr*self.wave_fr),mode = 'linear',align_corners=False) + # if self.wavebased: + if triangle_mask: + if duomask: + # masks_hamon = amplitude[...,:-n_formant_noise]*torch.exp(-(0.693*(grid_freq_hz-freq_hz[...,:-n_formant_noise]))**2/(2*(bandwith_hz[...,:-n_formant_noise]+0.01)**2)) + masks_hamon = amplitude[...,:-n_formant_noise]*torch.exp(-((grid_freq_hz-freq_hz[...,:-n_formant_noise]))**2/(2*(bandwith_hz[...,:-n_formant_noise]/alpha+0.01)**2)) + bw = bandwith_hz[...,-n_formant_noise:] + masks_noise = F.relu(amplitude[...,-n_formant_noise:] * (1 - (1-1/np.sqrt(2))*2/(bw+0.01)*(grid_freq_hz-freq_hz[...,-n_formant_noise:]).abs())) + # masks_noise = amplitude[...,-n_formant_noise:] * (1 - 2/(bw+0.01)*(grid_freq_hz-freq_hz[...,-n_formant_noise:]).abs())*(-torch.sign(torch.abs(grid_freq_hz-freq_hz[...,-n_formant_noise:])/(bw+0.01)-0.5)*0.5+0.5) + masks = torch.cat([masks_hamon,masks_noise],dim=-1) + else: + masks = F.relu(amplitude * (1 - (1-1/np.sqrt(2))*2/(bandwith_hz+0.01)*(grid_freq_hz-freq_hz).abs())) + else: + # masks = amplitude*torch.exp(-(0.693*(grid_freq_hz-freq_hz))**2/(2*(bandwith_hz+0.01)**2)) + if self.power_synth: + masks = amplitude*torch.exp(-((grid_freq_hz-freq_hz))**2/(2*(bandwith_hz/alpha+0.01)**2)) + else: + # masks = amplitude*torch.exp(-((grid_freq_hz-freq_hz))**2/(2*(bandwith_hz/alpha+0.01)**2)) + masks = amplitude*(torch.exp(-((grid_freq_hz-freq_hz))**2/(2*(bandwith_hz/alpha+0.01)**2))+1E-6).sqrt() + # masks = amplitude*(torch.exp(-((grid_freq_hz-freq_hz))**2/((2*(bandwith_hz/alpha+0.01)**2)))+1E-10).sqrt() #B,t,freq,formants + if noise: + masks_sum = masks.sum(-1,keepdim=True) + masks = masks/((((masks_sum**2).sum(-2,keepdim=True)/self.n_fft)+1E-10).sqrt()+1E-10) + masks = masks.unsqueeze(dim=1) #B,1,time,freqchans, formants + return masks + else: + masks = masks/norm.squeeze(1).unsqueeze(-1).unsqueeze(-1) + masks = masks.unsqueeze(dim=1) #B,1,time,freqchans, formants + if self.return_wave: + return masks, hamonic_dist#B,1,time,freqchans + else: + return masks + # else: + # if triangle_mask: + # if duomask: + # # masks_hamon = amplitude[...,:-n_formant_noise]*torch.exp(-(0.693*(grid_freq_hz-freq_hz[...,:-n_formant_noise]))**2/(2*(bandwith_hz[...,:-n_formant_noise]+0.01)**2)) + # masks_hamon = amplitude[...,:-n_formant_noise]*torch.exp(-((grid_freq_hz-freq_hz[...,:-n_formant_noise]))**2/(2*(bandwith_hz[...,:-n_formant_noise]/alpha+0.01)**2)) + # bw = bandwith_hz[...,-n_formant_noise:] + # masks_noise = F.relu(amplitude[...,-n_formant_noise:] * (1 - (1-1/np.sqrt(2))*2/(bw+0.01)*(grid_freq_hz-freq_hz[...,-n_formant_noise:]).abs())) + # # masks_noise = amplitude[...,-n_formant_noise:] * (1 - 2/(bw+0.01)*(grid_freq_hz-freq_hz[...,-n_formant_noise:]).abs())*(-torch.sign(torch.abs(grid_freq_hz-freq_hz[...,-n_formant_noise:])/(bw+0.01)-0.5)*0.5+0.5) + # masks = torch.cat([masks_hamon,masks_noise],dim=-1) + # else: + # masks = F.relu(amplitude * (1 - (1-1/np.sqrt(2))*2/(bandwith_hz+0.01)*(grid_freq_hz-freq_hz).abs())) + # # masks = amplitude * (1 - 2/(bandwith_hz+0.01)*(grid_freq_hz-freq_hz).abs())*(-torch.sign(torch.abs(grid_freq_hz-freq_hz)/(bandwith_hz+0.01)-0.5)*0.5+0.5) + # else: + # # masks = amplitude*torch.exp(-(0.693*(grid_freq_hz-freq_hz))**2/(2*(bandwith_hz+0.01)**2)) + # masks = amplitude*torch.exp(-((grid_freq_hz-freq_hz))**2/(2*(bandwith_hz/alpha+0.01)**2)) + # masks = amplitude*torch.exp(-((grid_freq_hz-freq_hz))**2/(2*(bandwith_hz/(2*np.sqrt(2*np.log(2)))+0.01)**2)) #B,time,freqchans, formants + # masks = amplitude*torch.exp(-(0.693*(grid_freq_hz-freq_hz))**2/(2*(bandwith_hz+0.01)**2)) #B,time,freqchans, formants + # masks = masks.unsqueeze(dim=1) #B,1,time,freqchans, formants + # if self.return_wave: + # return masks, hamonic_dist#B,1,time,freqchans + # else: + # return masks + + def voicing_wavebased(self,f0_hz): + #f0: B*1*time, hz + t = torch.arange(int(f0_hz.shape[2]/self.spec_fr*self.wave_fr))/(1.0*self.wave_fr) #in second + t = t.unsqueeze(dim=0).unsqueeze(dim=0) #1, 1, time + k = (torch.arange(self.k)+1).reshape([1,self.k,1]) + f0_hz_interp = F.interpolate(f0_hz,t.shape[-1],mode='linear',align_corners=False) + k_f0 = k*f0_hz_interp + k_f0_sum = 2*np.pi*torch.cumsum(k_f0,-1)/(1.0*self.wave_fr) + wave_k = np.sqrt(2)*torch.sin(k_f0_sum) * (-torch.sign(k_f0-7800)*0.5+0.5) + # wave = 0.12*torch.sin(2*np.pi*k_f0*t) * (-torch.sign(k_f0-6000)*0.5+0.5) + # wave = 0.09*torch.sin(2*np.pi*k_f0*t) * (-torch.sign(k_f0-self.wave_fr/2)*0.5+0.5) + # wave = 0.09*torch.sigmoid(self.wave_hamon_amplifier) * torch.sin(2*np.pi*k_f0*t) * (-torch.sign(k_f0-self.wave_fr/2)*0.5+0.5) + wave = wave_k.sum(dim=1,keepdim=True) + # wave = F.softplus(self.wave_hamon_amplifier) * wave.sum(dim=1,keepdim=True) + spec = wave2spec(wave,self.n_fft,self.wave_fr,self.spec_fr,self.noise_db,self.max_db,to_db=self.dbbased,power=2. if self.power_synth else 1.) + if self.return_wave: + return spec,wave_k + else: + return spec + + def unvoicing_wavebased(self,f0_hz,bg=False,mapping=True): + # return torch.ones([1,1,f0_hz.shape[2],512]) + # noise = 0.3*torch.randn([1,1,int(f0_hz.shape[2]/self.spec_fr*self.wave_fr)]) + # noise = 0.03*torch.randn([1,1,int(f0_hz.shape[2]/self.spec_fr*self.wave_fr)]) + if bg: + noise = torch.randn([1,1,int(f0_hz.shape[2]/self.spec_fr*self.wave_fr)]) + if mapping: + noise = self.bgnoise_mapping2(noise) + else: + noise = np.sqrt(3.)*(2*torch.rand([1,1,int(f0_hz.shape[2]/self.spec_fr*self.wave_fr)])-1) + if mapping: + noise = self.noise_mapping2(noise) + # noise = 0.3 * torch.randn([1,1,int(f0_hz.shape[2]/self.spec_fr*self.wave_fr)]) + # noise = 0.3 * F.softplus(self.wave_noise_amplifier) * torch.randn([1,1,int(f0_hz.shape[2]/self.spec_fr*self.wave_fr)]) + return wave2spec(noise,self.n_fft,self.wave_fr,self.spec_fr,self.noise_db,self.max_db,to_db=self.dbbased,power=2. if self.power_synth else 1.) + + # def unvoicing_wavebased(self,f0_hz): + # # return torch.ones([1,1,f0_hz.shape[2],512]) + # # noise = 0.3*torch.randn([1,1,int(f0_hz.shape[2]/self.spec_fr*self.wave_fr)]) + # noise = 0.1*torch.randn([1,1,int(f0_hz.shape[2]/self.spec_fr*self.wave_fr)]) + # # noise = 0.3 * torch.sigmoid(self.wave_noise_amplifier) * torch.randn([1,1,int(f0_hz.shape[2]/self.spec_fr*self.wave_fr)]) + # return wave2spec(noise,self.n_fft,self.wave_fr,self.spec_fr,self.noise_db,self.max_db,to_db=False) + + def voicing_linear(self,f0_hz,bandwith=2.5): + #f0: B*1*time, hz + freq_cord = torch.arange(self.n_fft) + time_cord = torch.arange(f0_hz.shape[2]) + grid_time,grid_freq = torch.meshgrid(time_cord,freq_cord) + grid_time = grid_time.unsqueeze(dim=0).unsqueeze(dim=-1) #B,time,freq, 1 + grid_freq = grid_freq.unsqueeze(dim=0).unsqueeze(dim=-1) #B,time,freq, 1 + f0_hz = f0_hz.permute([0,2,1]).unsqueeze(dim=-2) #B,time,1, 1 + f0_hz = f0_hz.repeat([1,1,1,self.k]) #B,time,1, self.k + f0_hz = f0_hz*(torch.arange(self.k)+1).reshape([1,1,1,self.k]) + # bandwith=4 + # bandwith_lower = torch.clamp(f0-bandwith/2,min=1) + # bandwith_upper = f0+bandwith/2 + # bandwith = mel_scale(self.n_mels,bandwith_upper) - mel_scale(self.n_mels,bandwith_lower) + f0 = hz2ind(f0_hz,self.n_fft) + + # hamonics = torch.exp(-(grid_freq-f0)**2/(2*sigma**2)) #gaussian + # hamonics = (1-((grid_freq_hz-f0_hz)/(2*bandwith_hz/2))**2)*(-torch.sign(torch.abs(grid_freq_hz-f0_hz)/(2*bandwith_hz)-0.5)*0.5+0.5) #welch + freq_cord_reshape = freq_cord.reshape([1,1,1,self.n_fft]) + hamonics = (1 - 2/bandwith*(grid_freq-f0).abs())*(-torch.sign(torch.abs(grid_freq-f0)/(bandwith)-0.5)*0.5+0.5) #triangular + # hamonics = (1-((grid_freq-f0)/(bandwith/2))**2)*(-torch.sign(torch.abs(grid_freq-f0)/(bandwith)-0.5)*0.5+0.5) #welch + # hamonics = (1-((grid_freq-f0)/(2.5*bandwith/2))**2)*(-torch.sign(torch.abs(grid_freq-f0)/(2.5*bandwith)-0.5)*0.5+0.5) #welch + # hamonics = (1-((grid_freq-f0)/(3*bandwith/2))**2)*(-torch.sign(torch.abs(grid_freq-f0)/(3*bandwith)-0.5)*0.5+0.5) #welch + # hamonics = torch.cos(np.pi*torch.abs(grid_freq-f0)/(4*bandwith))**2*(-torch.sign(torch.abs(grid_freq-f0)/(4*bandwith)-0.5)*0.5+0.5) #hanning + # hamonics = (hamonics.sum(dim=-1)).unsqueeze(dim=1) # B,1,T,F + # condition = (torch.sign(freq_cord_reshape-switch)*0.5+0.5) + # hamonics = ((hamonics.sum(dim=-1)).unsqueeze(dim=1)) * (1+ (torch.exp(-slop*(freq_cord_reshape-switch)*condition)-1)*condition) # B,1,T,F + # hamonics = ((hamonics.sum(dim=-1)).unsqueeze(dim=1)-torch.abs(self.prior_exp_parameter[2])) * torch.exp(-torch.abs(self.prior_exp_parameter[1])*freq_cord.reshape([1,1,1,self.n_mels])) + torch.abs(self.prior_exp_parameter[2]) # B,1,T,F + + # timbre_parameter = self.timbre_mapping(f0_hz[...,0,0].unsqueeze(1)).permute([0,2,1]).unsqueeze(1) + # condition = (torch.sign(freq_cord_reshape-torch.sigmoid(timbre_parameter[...,0:1])*self.n_mels)*0.5+0.5) + # hamonics = ((hamonics.sum(dim=-1)).unsqueeze(dim=1)) * (1+ (torch.exp(-0.01*torch.sigmoid(timbre_parameter[...,1:2])*(freq_cord_reshape-torch.sigmoid(timbre_parameter[...,0:1])*self.n_mels)*condition)-1)*condition) # B,1,T,F + # hamonics = ((hamonics.sum(dim=-1)).unsqueeze(dim=1)) * (1+ (torch.exp(-0.01*torch.sigmoid(timbre_parameter[...,1:2])*(freq_cord_reshape-torch.sigmoid(timbre_parameter[...,0:1])*self.n_mels)*condition)-1)*condition) * F.softplus(timbre_parameter[...,2:3]) + timbre_parameter[...,3:4] # B,1,T,F + # timbre = self.timbre_mapping(f0_hz[...,0,0].unsqueeze(1)).permute([0,2,1]) + # hamonics = (hamonics.sum(dim=-1)*timbre).unsqueeze(dim=1) # B,1,T,F + # hamonics = (hamonics.sum(dim=-1)*self.timbre).unsqueeze(dim=1) # B,1,T,F + + hamonics = (hamonics.sum(dim=-1)).unsqueeze(dim=1) + # hamonics = 180*F.softplus(self.wave_hamon_amplifier)*(hamonics.sum(dim=-1)).unsqueeze(dim=1) + + return hamonics + + def voicing(self,f0_hz): + #f0: B*1*time, hz + freq_cord = torch.arange(self.n_mels) + time_cord = torch.arange(f0_hz.shape[2]) + grid_time,grid_freq = torch.meshgrid(time_cord,freq_cord) + grid_time = grid_time.unsqueeze(dim=0).unsqueeze(dim=-1) #B,time,freq, 1 + grid_freq = grid_freq.unsqueeze(dim=0).unsqueeze(dim=-1) #B,time,freq, 1 + grid_freq_hz = inverse_mel_scale(grid_freq/(self.n_mels*1.0)) + f0_hz = f0_hz.permute([0,2,1]).unsqueeze(dim=-2) #B,time,1, 1 + f0_hz = f0_hz.repeat([1,1,1,self.k]) #B,time,1, self.k + f0_hz = f0_hz*(torch.arange(self.k)+1).reshape([1,1,1,self.k]) + if self.log10: + f0_mel = mel_scale(self.n_mels,f0_hz) + band_low_hz = inverse_mel_scale((f0_mel-1)/(self.n_mels*1.0),n_mels = self.n_mels) + band_up_hz = inverse_mel_scale((f0_mel+1)/(self.n_mels*1.0),n_mels = self.n_mels) + bandwith_hz = band_up_hz-band_low_hz + band_low_mel = mel_scale(self.n_mels,band_low_hz) + band_up_mel = mel_scale(self.n_mels,band_up_hz) + bandwith = band_up_mel-band_low_mel + else: + bandwith_hz = 24.7*(f0_hz*4.37/1000+1) + bandwith = bandwidth_mel(f0_hz,bandwith_hz,self.n_mels) + # bandwith_lower = torch.clamp(f0-bandwith/2,min=1) + # bandwith_upper = f0+bandwith/2 + # bandwith = mel_scale(self.n_mels,bandwith_upper) - mel_scale(self.n_mels,bandwith_lower) + f0 = mel_scale(self.n_mels,f0_hz) + switch = mel_scale(self.n_mels,torch.abs(self.timbre_parameter[0])*f0_hz[...,0]).unsqueeze(1) + slop = (torch.abs(self.timbre_parameter[1])*f0_hz[...,0]).unsqueeze(1) + freq_cord_reshape = freq_cord.reshape([1,1,1,self.n_mels]) + if not self.dbbased: + # sigma = bandwith/(np.sqrt(2*np.log(2))); + sigma = bandwith/(2*np.sqrt(2*np.log(2))); + hamonics = torch.exp(-(grid_freq-f0)**2/(2*sigma**2)) #gaussian + # hamonics = (1-((grid_freq_hz-f0_hz)/(2*bandwith_hz/2))**2)*(-torch.sign(torch.abs(grid_freq_hz-f0_hz)/(2*bandwith_hz)-0.5)*0.5+0.5) #welch + else: + # # hamonics = (1-((grid_freq-f0)/(1.75*bandwith/2))**2)*(-torch.sign(torch.abs(grid_freq-f0)/(1.75*bandwith)-0.5)*0.5+0.5) #welch + hamonics = (1-((grid_freq-f0)/(2.5*bandwith/2))**2)*(-torch.sign(torch.abs(grid_freq-f0)/(2.5*bandwith)-0.5)*0.5+0.5) #welch + # hamonics = (1-((grid_freq-f0)/(3*bandwith/2))**2)*(-torch.sign(torch.abs(grid_freq-f0)/(3*bandwith)-0.5)*0.5+0.5) #welch + # hamonics = torch.cos(np.pi*torch.abs(grid_freq-f0)/(4*bandwith))**2*(-torch.sign(torch.abs(grid_freq-f0)/(4*bandwith)-0.5)*0.5+0.5) #hanning + # hamonics = (hamonics.sum(dim=-1)).unsqueeze(dim=1) # B,1,T,F + # condition = (torch.sign(freq_cord_reshape-switch)*0.5+0.5) + # hamonics = ((hamonics.sum(dim=-1)).unsqueeze(dim=1)) * (1+ (torch.exp(-slop*(freq_cord_reshape-switch)*condition)-1)*condition) # B,1,T,F + # hamonics = ((hamonics.sum(dim=-1)).unsqueeze(dim=1)-torch.abs(self.prior_exp_parameter[2])) * torch.exp(-torch.abs(self.prior_exp_parameter[1])*freq_cord.reshape([1,1,1,self.n_mels])) + torch.abs(self.prior_exp_parameter[2]) # B,1,T,F + + timbre_parameter = self.timbre_mapping(f0_hz[...,0,0].unsqueeze(1)).permute([0,2,1]).unsqueeze(1) + condition = (torch.sign(freq_cord_reshape-torch.sigmoid(timbre_parameter[...,0:1])*self.n_mels)*0.5+0.5) + amp = F.softplus(self.wave_hamon_amplifier) if self.dbbased else 180*F.softplus(self.wave_hamon_amplifier) + hamonics = amp * ((hamonics.sum(dim=-1)).unsqueeze(dim=1)) * (1+ (torch.exp(-0.01*torch.sigmoid(timbre_parameter[...,1:2])*(freq_cord_reshape-torch.sigmoid(timbre_parameter[...,0:1])*self.n_mels)*condition)-1)*condition) # B,1,T,F + # hamonics = ((hamonics.sum(dim=-1)).unsqueeze(dim=1)) * (1+ (torch.exp(-0.01*torch.sigmoid(timbre_parameter[...,1:2])*(freq_cord_reshape-torch.sigmoid(timbre_parameter[...,0:1])*self.n_mels)*condition)-1)*condition) * F.softplus(timbre_parameter[...,2:3]) + timbre_parameter[...,3:4] # B,1,T,F + # timbre = self.timbre_mapping(f0_hz[...,0,0].unsqueeze(1)).permute([0,2,1]) + # hamonics = (hamonics.sum(dim=-1)*timbre).unsqueeze(dim=1) # B,1,T,F + # hamonics = (hamonics.sum(dim=-1)*self.timbre).unsqueeze(dim=1) # B,1,T,F + # return F.softplus(self.wave_hamon_amplifier)*hamonics + return hamonics + + def unvoicing(self,f0,bg=False,mapping=True): + # return (0.25*torch.randn([f0.shape[0],1,f0.shape[2],self.n_mels]))+1 + rnd = torch.randn([f0.shape[0],2,f0.shape[2],self.n_fft if self.wavebased else self.n_mels]) + if mapping: + rnd = self.bgnoise_mapping(rnd) if bg else self.noise_mapping(rnd) + real = rnd[:,0:1] + img = rnd[:,1:2] + if self.dbbased: + return (2*torchaudio.transforms.AmplitudeToDB()(torch.sqrt(real**2 + img**2+1E-10))+80).clamp(min=0)/35 + # return (2*torchaudio.transforms.AmplitudeToDB()(F.softplus(self.wave_noise_amplifier)*torch.sqrt(torch.randn([f0.shape[0],1,f0.shape[2],self.n_mels])**2 + torch.randn([f0.shape[0],1,f0.shape[2],self.n_mels])**2))+80).clamp(min=0)/35 + else: + # return torch.ones([f0.shape[0],1,f0.shape[2],self.n_mels]) + return 180*F.softplus(self.wave_noise_amplifier) * torch.sqrt(real**2 + img**2+1E-10) + # return F.softplus(self.wave_noise_amplifier)*torch.sqrt(torch.randn([f0.shape[0],1,f0.shape[2],self.n_mels])**2 + torch.randn([f0.shape[0],1,f0.shape[2],self.n_mels])**2) + + # return (F.softplus(self.wave_noise_amplifier)) * (0.25*torch.randn([f0.shape[0],1,f0.shape[2],self.n_mels]))+1 + # return torch.ones([f0.shape[0],1,f0.shape[2],self.n_mels]) + + def forward(self,components,enable_hamon_excitation=True,enable_noise_excitation=True,enable_bgnoise=True): + # f0: B*1*T, amplitudes: B*2(voicing,unvoicing)*T, freq_formants,bandwidth_formants,amplitude_formants: B*formants*T + amplitudes = components['amplitudes'].unsqueeze(dim=-1) + amplitudes_h = components['amplitudes_h'].unsqueeze(dim=-1) + loudness = components['loudness'].unsqueeze(dim=-1) + f0_hz = components['f0_hz'] + # import pdb;pdb.set_trace() + if self.wavebased: + # self.hamonics = 1800*F.softplus(self.wave_hamon_amplifier)*self.voicing_linear(f0_hz) + # self.noise = 180*self.unvoicing(f0_hz,bg=False,mapping=False) + # self.bgnoise = 18*self.unvoicing(f0_hz,bg=True,mapping=False) + # import pdb;pdb.set_trace() + self.hamonics = self.voicing_wavebased(f0_hz) + self.noise = self.unvoicing_wavebased(f0_hz,bg=False,mapping=False) + self.bgnoise = self.unvoicing_wavebased(f0_hz,bg=True) + else: + self.hamonics = self.voicing(f0_hz) + self.noise = self.unvoicing(f0_hz,bg=False) + self.bgnoise = self.unvoicing(f0_hz,bg=True) + # freq_formants = components['freq_formants']*self.n_mels + # bandwidth_formants = components['bandwidth_formants']*self.n_mels + # excitation = amplitudes[:,0:1]*hamonics + # excitation = loudness*(amplitudes[:,0:1]*hamonics) + + self.excitation_noise = loudness*(amplitudes[:,-1:])*self.noise if self.power_synth else (loudness*amplitudes[:,-1:]+1E-10).sqrt()*self.noise + duomask = components['freq_formants_noise_hz'].shape[1]>components['freq_formants_hamon_hz'].shape[1] + n_formant_noise = (components['freq_formants_noise_hz'].shape[1]-components['freq_formants_hamon_hz'].shape[1]) if duomask else components['freq_formants_noise_hz'].shape[1] + self.mask_hamon = self.formant_mask(components['freq_formants_hamon_hz'],components['bandwidth_formants_hamon_hz'],components['amplitude_formants_hamon'],linear = self.linear_scale,f0_hz = f0_hz) + self.mask_noise = self.formant_mask(components['freq_formants_noise_hz'],components['bandwidth_formants_noise_hz'],components['amplitude_formants_noise'],linear = self.linear_scale,triangle_mask=False if self.wavebased else True,duomask=duomask,n_formant_noise=n_formant_noise,f0_hz = f0_hz,noise=True) + # self.mask_hamon = self.formant_mask(components['freq_formants_hamon']*self.n_mels,components['bandwidth_formants_hamon'],components['amplitude_formants_hamon']) + # self.mask_noise = self.formant_mask(components['freq_formants_noise']*self.n_mels,components['bandwidth_formants_noise'],components['amplitude_formants_noise']) + if self.return_wave: + self.hamonics,self.hamonics_wave = self.hamonics + self.mask_hamon, self.hamonic_dist = self.mask_hamon + self.mask_noise = self.mask_noise + if self.power_synth: + self.excitation_hamon_wave = F.interpolate(loudness[...,-1].sqrt()*amplitudes[:,0:1][...,-1],self.hamonics_wave.shape[-1],mode='linear',align_corners=False)*self.hamonics_wave + else: + self.excitation_hamon_wave = F.interpolate((loudness[...,-1]*amplitudes[:,0:1][...,-1]+1E-10).sqrt(),self.hamonics_wave.shape[-1],mode='linear',align_corners=False)*self.hamonics_wave + self.hamonics_wave_ = (self.excitation_hamon_wave*self.hamonic_dist).sum(1,keepdim=True) + # self.hamonics = 40*self.hamonics + self.mask_hamon_sum = 1.414*self.mask_hamon.sum(dim=-1) + # self.mask_hamon_sum = self.mask_hamon.sum(dim=-1) + self.mask_noise_sum = self.mask_noise.sum(dim=-1) + bgdist = F.softplus(self.bgnoise_amp)*self.noise_dist if self.noise_from_data else F.softplus(self.bgnoise_dist) + if self.power_synth: + self.excitation_hamon = loudness*(amplitudes[:,0:1])*self.hamonics + else: + self.excitation_hamon = (loudness*amplitudes[:,0:1]+1E-10).sqrt()*self.hamonics + # import pdb;pdb.set_trace() + self.noise_excitation = self.excitation_noise*self.mask_noise_sum + # import pdb; pdb.set_trace() + if self.return_wave: + self.noise_excitation_wave = 2*inverse_spec_to_audio(self.noise_excitation.squeeze(1).permute(0,2,1),n_fft=self.n_fft*2-1,power_synth=self.power_synth) + self.noise_excitation_wave = F.pad(self.noise_excitation_wave,[0,self.hamonics_wave_.shape[2]-self.noise_excitation_wave.shape[1]]) + self.noise_excitation_wave = self.noise_excitation_wave.unsqueeze(1) + self.rec_wave = self.noise_excitation_wave+self.hamonics_wave_ + if self.wavebased: + # import pdb; pdb.set_trace() + bgn = bgdist*self.bgnoise*0.0003 if (self.add_bgnoise and enable_bgnoise) else 0 + speech = ((self.excitation_hamon*self.mask_hamon_sum) if enable_hamon_excitation else torch.zeros(self.excitation_hamon.shape)) + (self.noise_excitation if enable_noise_excitation else 0) + bgn + # speech = ((self.excitation_hamon*self.mask_hamon_sum) if enable_hamon_excitation else torch.zeros(self.excitation_hamon.shape)) + (self.excitation_noise*self.mask_noise_sum if enable_noise_excitation else 0) + (((bgdist*self.bgnoise*0.0003) if not self.dbbased else (2*torchaudio.transforms.AmplitudeToDB()(bgdist*0.0003)/35. + self.bgnoise)) if (self.add_bgnoise and enable_bgnoise) else 0) + # speech = speech if self.power_synth else speech**2 + speech = (torchaudio.transforms.AmplitudeToDB()(speech).clamp(min=self.noise_db)-self.noise_db)/(self.max_db-self.noise_db)*2-1 + else: + # speech = ((self.excitation_hamon*self.mask_hamon_sum) if enable_hamon_excitation else torch.zeros(self.excitation_hamon.shape)) + (((bgdist*self.bgnoise*0.0003) if not self.dbbased else (2*torchaudio.transforms.AmplitudeToDB()(bgdist*0.0003)/35. + self.bgnoise)) if (self.add_bgnoise and enable_bgnoise) else 0) + (self.silient*torch.ones(self.mask_hamon_sum.shape) if self.dbbased else 0) + speech = ((self.excitation_hamon*self.mask_hamon_sum) if enable_hamon_excitation else torch.zeros(self.excitation_hamon.shape)) + (self.noise_excitation if enable_noise_excitation else 0) + (((bgdist*self.bgnoise*0.0003) if not self.dbbased else (2*torchaudio.transforms.AmplitudeToDB()(bgdist*0.0003)/35. + self.bgnoise)) if (self.add_bgnoise and enable_bgnoise) else 0) + (self.silient*torch.ones(self.mask_hamon_sum.shape) if self.dbbased else 0) + # speech = self.excitation_hamon*self.mask_hamon_sum + (self.excitation_noise*self.mask_noise_sum if enable_noise_excitation else 0) + self.silient*torch.ones(self.mask_hamon_sum.shape) + if not self.dbbased: + speech = db(speech) + # import pdb; pdb.set_trace() + if self.return_wave: + return speech,self.rec_wave + else: + return speech + +@ENCODERS.register("EncoderFormant") +class FormantEncoder(nn.Module): + def __init__(self, n_mels=64, n_formants=4,n_formants_noise=2,min_octave=-31,max_octave=96,wavebased=False,n_fft=256,noise_db=-50,max_db=22.5,broud=True,power_synth=False,hop_length=128): + super(FormantEncoder, self).__init__() + self.wavebased = wavebased + self.n_mels = n_mels + self.n_formants = n_formants + self.n_formants_noise = n_formants_noise + self.min_octave = min_octave + self.max_octave = max_octave + self.noise_db = noise_db + self.max_db = max_db + self.broud = broud + self.n_fft = n_fft + self.power_synth=power_synth + self.formant_freq_limits_diff = torch.tensor([950.,2450.,2100.]).reshape([1,3,1]) #freq difference + self.formant_freq_limits_diff_low = torch.tensor([300.,300.,0.]).reshape([1,3,1]) #freq difference + + # self.formant_freq_limits_abs = torch.tensor([950.,2800.,3400.,4700.]).reshape([1,4,1]) #freq difference + # self.formant_freq_limits_abs_low = torch.tensor([300.,600.,2800.,3400]).reshape([1,4,1]) #freq difference + + # self.formant_freq_limits_abs = torch.tensor([950.,3300.,3600.,4700.]).reshape([1,4,1]) #freq difference + # self.formant_freq_limits_abs_low = torch.tensor([300.,600.,2700.,3400]).reshape([1,4,1]) #freq difference + + self.formant_freq_limits_abs = torch.tensor([950.,3400.,3800.,5000.,6000.,7000.]).reshape([1,6,1]) #freq difference + self.formant_freq_limits_abs_low = torch.tensor([300.,700.,1800.,3400,5000.,6000.]).reshape([1,6,1]) #freq difference + # self.formant_freq_limits_abs_low = torch.tensor([300.,700.,2700.,3400]).reshape([1,4,1]) #freq difference + + # self.formant_freq_limits_abs_noise = torch.tensor([7000.]).reshape([1,1,1]) #freq difference + self.formant_freq_limits_abs_noise = torch.tensor([8000.,7000.,7000.]).reshape([1,3,1]) #freq difference + self.formant_freq_limits_abs_noise_low = torch.tensor([4000.,3000.,3000.]).reshape([1,3,1]) #freq difference + # self.formant_freq_limits_abs_noise_low = torch.tensor([4000.,500.,500.]).reshape([1,3,1]) #freq difference + + self.formant_bandwitdh_bias = Parameter(torch.Tensor(1)) + self.formant_bandwitdh_slop = Parameter(torch.Tensor(1)) + self.formant_bandwitdh_thres = Parameter(torch.Tensor(1)) + with torch.no_grad(): + nn.init.constant_(self.formant_bandwitdh_bias,0) + nn.init.constant_(self.formant_bandwitdh_slop,0) + nn.init.constant_(self.formant_bandwitdh_thres,0) + + # self.formant_freq_limits = torch.cumsum(self.formant_freq_limits_diff,dim=0) + # self.formant_freq_limits_mel = torch.cat([torch.tensor([0.]),mel_scale(n_mels,self.formant_freq_limits)/n_mels]) + # self.formant_freq_limits_mel_diff = torch.reshape(self.formant_freq_limits_mel[1:]-self.formant_freq_limits_mel[:-1],[1,3,1]) + if broud: + if wavebased: + self.conv1_narrow = ln.Conv1d(n_fft,64,3,1,1) + self.conv1_mel = ln.Conv1d(128,64,3,1,1) + self.norm1_mel = nn.GroupNorm(32,64) + self.conv2_mel = ln.Conv1d(64,128,3,1,1) + self.norm2_mel = nn.GroupNorm(32,128) + self.conv_fundementals_mel = ln.Conv1d(128,128,3,1,1) + self.norm_fundementals_mel = nn.GroupNorm(32,128) + self.f0_drop_mel = nn.Dropout() + else: + self.conv1_narrow = ln.Conv1d(n_mels,64,3,1,1) + self.norm1_narrow = nn.GroupNorm(32,64) + self.conv2_narrow = ln.Conv1d(64,128,3,1,1) + self.norm2_narrow = nn.GroupNorm(32,128) + + self.conv_fundementals_narrow = ln.Conv1d(128,128,3,1,1) + self.norm_fundementals_narrow = nn.GroupNorm(32,128) + self.f0_drop_narrow = nn.Dropout() + if wavebased: + self.conv_f0_narrow = ln.Conv1d(256,1,1,1,0) + else: + self.conv_f0_narrow = ln.Conv1d(128,1,1,1,0) + + self.conv_amplitudes_narrow = ln.Conv1d(128,2,1,1,0) + self.conv_amplitudes_h_narrow = ln.Conv1d(128,2,1,1,0) + + if wavebased: + self.conv1 = ln.Conv1d(n_fft,64,3,1,1) + else: + self.conv1 = ln.Conv1d(n_mels,64,3,1,1) + self.norm1 = nn.GroupNorm(32,64) + self.conv2 = ln.Conv1d(64,128,3,1,1) + self.norm2 = nn.GroupNorm(32,128) + + self.conv_fundementals = ln.Conv1d(128,128,3,1,1) + self.norm_fundementals = nn.GroupNorm(32,128) + self.f0_drop = nn.Dropout() + self.conv_f0 = ln.Conv1d(128,1,1,1,0) + + self.conv_amplitudes = ln.Conv1d(128,2,1,1,0) + self.conv_amplitudes_h = ln.Conv1d(128,2,1,1,0) + # self.conv_loudness = nn.Sequential(ln.Conv1d(n_fft if wavebased else n_mels,128,1,1,0), + # nn.LeakyReLU(0.2), + # ln.Conv1d(128,128,1,1,0), + # nn.LeakyReLU(0.2), + # ln.Conv1d(128,1,1,1,0,bias_initial=0.5),) + self.conv_loudness = nn.Sequential(ln.Conv1d(n_fft if wavebased else n_mels,128,1,1,0), + nn.LeakyReLU(0.2), + ln.Conv1d(128,128,1,1,0), + nn.LeakyReLU(0.2), + ln.Conv1d(128,1,1,1,0,bias_initial=-9.),) + + if self.broud: + self.conv_formants = ln.Conv1d(128,128,3,1,1) + else: + self.conv_formants = ln.Conv1d(128,128,3,1,1) + self.norm_formants = nn.GroupNorm(32,128) + self.conv_formants_freqs = ln.Conv1d(128,n_formants,1,1,0) + self.conv_formants_bandwidth = ln.Conv1d(128,n_formants,1,1,0) + self.conv_formants_amplitude = ln.Conv1d(128,n_formants,1,1,0) + + self.conv_formants_freqs_noise = ln.Conv1d(128,self.n_formants_noise,1,1,0) + self.conv_formants_bandwidth_noise = ln.Conv1d(128,self.n_formants_noise,1,1,0) + self.conv_formants_amplitude_noise = ln.Conv1d(128,self.n_formants_noise,1,1,0) + + self.amplifier = Parameter(torch.Tensor(1)) + self.bias = Parameter(torch.Tensor(1)) + with torch.no_grad(): + nn.init.constant_(self.amplifier,1.0) + nn.init.constant_(self.bias,-0.5) + + def forward(self,x,x_denoise=None,duomask=False,noise_level = None,x_amp=None): + x = x.squeeze(dim=1).permute(0,2,1) #B * f * T + if x_denoise is not None: + x_denoise = x_denoise.squeeze(dim=1).permute(0,2,1) + # x_denoise_amp = amplitude(x_denoise,self.noise_db,self.max_db) + # import pdb; pdb.set_trace() + if x_amp is None: + x_amp = amplitude(x,self.noise_db,self.max_db,trim_noise=True) + else: + x_amp = x_amp.squeeze(dim=1).permute(0,2,1) + # import pdb; pdb.set_trace() + hann_win = torch.hann_window(5,periodic=False).reshape([1,1,5,1]) + x_smooth = F.conv2d(x.unsqueeze(1).transpose(-2,-1),hann_win,padding=[2,0]).transpose(-2,-1).squeeze(1) + x_amp_smooth = F.conv2d(x_amp.unsqueeze(1).transpose(-2,-1),hann_win,padding=[2,0]).transpose(-2,-1).squeeze(1) + # loudness = F.softplus(self.amplifier)*(torch.mean(x_denoise_amp,dim=1,keepdim=True)) + # loudness = F.relu(F.softplus(self.amplifier)*(torch.mean(x_amp,dim=1,keepdim=True)-noise_level*0.0003)) + # loudness = torch.mean((x*0.5+0.5) if x_denoise is None else (x_denoise*0.5+0.5),dim=1,keepdim=True) + # loudness = F.softplus(self.amplifier)*(loudness) + # loudness = F.softplus(self.amplifier)*torch.mean(x_amp,dim=1,keepdim=True) + # loudness = F.softplus(self.amplifier)*F.relu(loudness - F.softplus(self.bias)) + if self.power_synth: + loudness = F.softplus((1. if self.wavebased else 1.0)*self.conv_loudness(x_smooth)) + else: + # loudness = F.softplus((1. if self.wavebased else 1.0)*self.conv_loudness(x_amp**2)) + loudness = F.softplus((1. if self.wavebased else 1.0)*self.conv_loudness(x_smooth)) + # loudness = F.relu(self.conv_loudness(x)) + + # if not self.power_synth: + # loudness = loudness.sqrt() + + if self.broud: + x_narrow = x + x_narrow = F.leaky_relu(self.norm1_narrow(self.conv1_narrow(x_narrow)),0.2) + x_common_narrow = F.leaky_relu(self.norm2_narrow(self.conv2_narrow(x_narrow)),0.2) + amplitudes = F.softmax(self.conv_amplitudes_narrow(x_common_narrow),dim=1) + amplitudes_h = F.softmax(self.conv_amplitudes_h_narrow(x_common_narrow),dim=1) + x_fundementals_narrow = self.f0_drop_narrow(F.leaky_relu(self.norm_fundementals_narrow(self.conv_fundementals_narrow(x_common_narrow)),0.2)) + + x_amp = amplitude(x.unsqueeze(1),self.noise_db,self.max_db).transpose(-2,-1) + x_mel = to_db(torchaudio.transforms.MelScale(f_max=8000,n_stft=self.n_fft)(x_amp.transpose(-2,-1)),self.noise_db,self.max_db).squeeze(1) + x = F.leaky_relu(self.norm1_mel(self.conv1_mel(x_mel)),0.2) + x_common_mel = F.leaky_relu(self.norm2_mel(self.conv2_mel(x)),0.2) + x_fundementals_mel = self.f0_drop_mel(F.leaky_relu(self.norm_fundementals_mel(self.conv_fundementals_mel(x_common_mel)),0.2)) + + f0_hz = torch.sigmoid(self.conv_f0_narrow(torch.cat([x_fundementals_narrow,x_fundementals_mel],dim=1))) * 120 + 180 # 180hz < f0 < 300 hz + f0 = torch.clamp(mel_scale(self.n_mels,f0_hz)/(self.n_mels*1.0),min=0.0001) + + hann_win = torch.hann_window(21,periodic=False).reshape([1,1,21,1]) + x = to_db(F.conv2d(x_amp,hann_win,padding=[10,0]).transpose(-2,-1),self.noise_db,self.max_db).squeeze(1) + x = F.leaky_relu(self.norm1(self.conv1(x)),0.2) + x_common = F.leaky_relu(self.norm2(self.conv2(x)),0.2) + + + + else: + x = F.leaky_relu(self.norm1(self.conv1(x)),0.2) + x_common = F.leaky_relu(self.norm2(self.conv2(x)),0.2) + + + # loudness = F.relu(self.conv_loudness(x_common)) + # loudness = F.relu(self.conv_loudness(x_common)) +(10**(self.noise_db/10.-1) if self.wavebased else 0) + amplitudes = F.softmax(self.conv_amplitudes(x_common),dim=1) + amplitudes_h = F.softmax(self.conv_amplitudes_h(x_common),dim=1) + + # x_fundementals = F.leaky_relu(self.norm_fundementals(self.conv_fundementals(x_common)),0.2) + x_fundementals = self.f0_drop(F.leaky_relu(self.norm_fundementals(self.conv_fundementals(x_common)),0.2)) + # f0 in mel: + # f0 = torch.sigmoid(self.conv_f0(x_fundementals)) + # f0 = F.tanh(self.conv_f0(x_fundementals)) * (16/64)*(self.n_mels/64) # 72hz < f0 < 446 hz + # f0 = torch.sigmoid(self.conv_f0(x_fundementals)) * (15/64)*(self.n_mels/64) # 179hz < f0 < 420 hz + # f0 = torch.sigmoid(self.conv_f0(x_fundementals)) * (22/64)*(self.n_mels/64) - (16/64)*(self.n_mels/64)# 72hz < f0 < 253 hz, human voice + # f0 = torch.sigmoid(self.conv_f0(x_fundementals)) * (11/64)*(self.n_mels/64) - (-2/64)*(self.n_mels/64)# 160hz < f0 < 300 hz, female voice + + # f0 in hz: + # f0_hz = torch.sigmoid(self.conv_f0(x_fundementals)) * 120 + 180 # 180hz < f0 < 300 hz + f0_hz = torch.sigmoid(self.conv_f0(x_fundementals)) * 332 + 88 # 88hz < f0 < 420 hz + # f0_hz = torch.sigmoid(self.conv_f0(x_fundementals)) * 120 + 180 # 180hz < f0 < 300 hz + # f0_hz = torch.sigmoid(self.conv_f0(x_fundementals)) * 528 + 88 # 88hz < f0 < 616 hz + # f0_hz = torch.sigmoid(self.conv_f0(x_fundementals)) * 302 + 118 # 118hz < f0 < 420 hz + # f0_hz = torch.sigmoid(self.conv_f0(x_fundementals)) * 240 + 180 # 180hz < f0 < 420 hz + # f0_hz = torch.sigmoid(self.conv_f0(x_fundementals)) * 260 + 160 # 160hz < f0 < 420 hz + f0 = torch.clamp(mel_scale(self.n_mels,f0_hz)/(self.n_mels*1.0),min=0.0001) + + x_formants = F.leaky_relu(self.norm_formants(self.conv_formants(x_common)),0.2) + formants_freqs = torch.sigmoid(self.conv_formants_freqs(x_formants)) + # # relative freq: + # formants_freqs_hz = formants_freqs*(self.formant_freq_limits_diff[:,:self.n_formants]-self.formant_freq_limits_diff_low[:,:self.n_formants])+self.formant_freq_limits_diff_low[:,:self.n_formants] + # # formants_freqs_hz = formants_freqs*6839 + # formants_freqs_hz = torch.cumsum(formants_freqs_hz,dim=1) + + # abs freq: + formants_freqs_hz = formants_freqs*(self.formant_freq_limits_abs[:,:self.n_formants]-self.formant_freq_limits_abs_low[:,:self.n_formants])+self.formant_freq_limits_abs_low[:,:self.n_formants] + # formants_freqs_hz = formants_freqs*6839 + formants_freqs = torch.clamp(mel_scale(self.n_mels,formants_freqs_hz)/(self.n_mels*1.0),min=0) + + # formants_freqs = formants_freqs + f0 + # formants_bandwidth_hz = torch.sigmoid(self.conv_formants_bandwidth(x_formants)) *6839 + # formants_bandwidth_hz = torch.sigmoid(self.conv_formants_bandwidth(x_formants)) * 150 + # formants_bandwidth_hz = 3*torch.sigmoid(self.formant_bandwitdh_ratio)*(0.075*torch.relu(formants_freqs_hz-1000)+100) + # formants_bandwidth_hz = (torch.sigmoid(self.conv_formants_bandwidth(x_formants))) * (3*torch.sigmoid(self.formant_bandwitdh_ratio))*(0.075*torch.relu(formants_freqs_hz-1000)+100) #good for spec based method + # formants_bandwidth_hz = ((3*torch.sigmoid(self.formant_bandwitdh_ratio))*(0.0125*torch.relu(formants_freqs_hz-1000)+100))#good for spec based method + formants_bandwidth_hz = 0.65*(0.00625*torch.relu(formants_freqs_hz)+375) + # formants_bandwidth_hz = (2**(torch.tanh(self.formant_bandwitdh_slop))*0.001*torch.relu(formants_freqs_hz-4000*torch.sigmoid(self.formant_bandwitdh_thres))+375*2**(torch.tanh(self.formant_bandwitdh_bias))) + # formants_bandwidth_hz = torch.exp(0.4*torch.tanh(self.conv_formants_bandwidth(x_formants))) * (0.00625*torch.relu(formants_freqs_hz-0)+375) + # formants_bandwidth_hz = (100*(torch.tanh(self.conv_formants_bandwidth(x_formants))) + (0.035*torch.relu(formants_freqs_hz-950)+250)) if self.wavebased else ((3*torch.sigmoid(self.formant_bandwitdh_ratio))*(0.0125*torch.relu(formants_freqs_hz-1000)+100))#good for spec based method + # formants_bandwidth_hz = (100*(torch.tanh(self.conv_formants_bandwidth(x_formants))) + (0.035*torch.relu(formants_freqs_hz-950)+250)) if self.wavebased else ((torch.sigmoid(self.conv_formants_bandwidth(x_formants))+0.2) * (3*torch.sigmoid(self.formant_bandwitdh_ratio))*(0.075*torch.relu(formants_freqs_hz-1000)+100))#good for spec based method + # formants_bandwidth_hz = torch.sigmoid(self.conv_formants_bandwidth(x_formants)) * 3*torch.sigmoid(self.formant_bandwitdh_ratio)*(0.075*torch.relu(formants_freqs_hz-1000)+50) + # formants_bandwidth_hz = torch.sigmoid(self.conv_formants_bandwidth(x_formants)) * (0.075*3*torch.sigmoid(self.formant_bandwitdh_slop)*torch.relu(formants_freqs_hz-1000)+(2*torch.sigmoid(self.formant_bandwitdh_ratio)+1)*50) + # formants_bandwidth_hz = torch.sigmoid(self.conv_formants_bandwidth(x_formants)) * 3*torch.sigmoid(self.formant_bandwitdh_ratio)*(0.075*torch.sigmoid(self.formant_bandwitdh_slop)*torch.relu(formants_freqs_hz-1000)+50) + formants_bandwidth = bandwidth_mel(formants_freqs_hz,formants_bandwidth_hz,self.n_mels) + # formants_bandwidth_upper = formants_freqs_hz+formants_bandwidth_hz/2 + # formants_bandwidth_lower = torch.clamp(formants_freqs_hz-formants_bandwidth_hz/2,min=1) + # formants_bandwidth = (mel_scale(self.n_mels,formants_bandwidth_upper) - mel_scale(self.n_mels,formants_bandwidth_lower))/(self.n_mels*1.0) + # formants_amplitude = F.softmax(torch.cumsum(-F.relu(self.conv_formants_amplitude(x_formants)),dim=1),dim=1) + formants_amplitude_logit = self.conv_formants_amplitude(x_formants) + formants_amplitude = F.softmax(formants_amplitude_logit,dim=1) + + formants_freqs_noise = torch.sigmoid(self.conv_formants_freqs_noise(x_formants)) + # # relative freq: + # formants_freqs_hz = formants_freqs*(self.formant_freq_limits_diff[:,:self.n_formants]-self.formant_freq_limits_diff_low[:,:self.n_formants])+self.formant_freq_limits_diff_low[:,:self.n_formants] + # # formants_freqs_hz = formants_freqs*6839 + # formants_freqs_hz = torch.cumsum(formants_freqs_hz,dim=1) + + # abs freq: + formants_freqs_hz_noise = formants_freqs_noise*(self.formant_freq_limits_abs_noise[:,:self.n_formants_noise]-self.formant_freq_limits_abs_noise_low[:,:self.n_formants_noise])+self.formant_freq_limits_abs_noise_low[:,:self.n_formants_noise] + if duomask: + formants_freqs_hz_noise = torch.cat([formants_freqs_hz,formants_freqs_hz_noise],dim=1) + # formants_freqs_hz = formants_freqs*6839 + formants_freqs_noise = torch.clamp(mel_scale(self.n_mels,formants_freqs_hz_noise)/(self.n_mels*1.0),min=0) + + # formants_freqs = formants_freqs + f0 + # formants_bandwidth_hz = torch.sigmoid(self.conv_formants_bandwidth(x_formants)) *6839 + # formants_bandwidth_hz = torch.sigmoid(self.conv_formants_bandwidth(x_formants)) * 150 + # formants_bandwidth_hz_noise = torch.sigmoid(self.conv_formants_bandwidth_noise(x_formants)) * 8000 + 2000 + formants_bandwidth_hz_noise = self.conv_formants_bandwidth_noise(x_formants) + # formants_bandwidth_hz_noise_1 = F.softplus(formants_bandwidth_hz_noise[:,:1]) * 8000 + 2000 #2000-10000 + # formants_bandwidth_hz_noise_2 = torch.sigmoid(formants_bandwidth_hz_noise[:,1:]) * 2000 #0-2000 + formants_bandwidth_hz_noise_1 = F.softplus(formants_bandwidth_hz_noise[:,:1]) * 2344 + 586 #2000-10000 + formants_bandwidth_hz_noise_2 = torch.sigmoid(formants_bandwidth_hz_noise[:,1:]) * 586 #0-2000 + formants_bandwidth_hz_noise = torch.cat([formants_bandwidth_hz_noise_1,formants_bandwidth_hz_noise_2],dim=1) + if duomask: + formants_bandwidth_hz_noise = torch.cat([formants_bandwidth_hz,formants_bandwidth_hz_noise],dim=1) + # formants_bandwidth_hz_noise = torch.sigmoid(self.conv_formants_bandwidth_noise(x_formants)) * 4000 + 1000 + # formants_bandwidth_hz_noise = torch.sigmoid(self.conv_formants_bandwidth_noise(x_formants)) * 4000 + # formants_bandwidth_hz = torch.sigmoid(self.conv_formants_bandwidth(x_formants)) * 3*torch.sigmoid(self.formant_bandwitdh_ratio)*(0.075*torch.relu(formants_freqs_hz-1000)+50) + # formants_bandwidth_hz = torch.sigmoid(self.conv_formants_bandwidth(x_formants)) * (0.075*3*torch.sigmoid(self.formant_bandwitdh_slop)*torch.relu(formants_freqs_hz-1000)+(2*torch.sigmoid(self.formant_bandwitdh_ratio)+1)*50) + # formants_bandwidth_hz = torch.sigmoid(self.conv_formants_bandwidth(x_formants)) * 3*torch.sigmoid(self.formant_bandwitdh_ratio)*(0.075*torch.sigmoid(self.formant_bandwitdh_slop)*torch.relu(formants_freqs_hz-1000)+50) + formants_bandwidth_noise = bandwidth_mel(formants_freqs_hz_noise,formants_bandwidth_hz_noise,self.n_mels) + # formants_bandwidth_upper = formants_freqs_hz+formants_bandwidth_hz/2 + # formants_bandwidth_lower = torch.clamp(formants_freqs_hz-formants_bandwidth_hz/2,min=1) + # formants_bandwidth = (mel_scale(self.n_mels,formants_bandwidth_upper) - mel_scale(self.n_mels,formants_bandwidth_lower))/(self.n_mels*1.0) + formants_amplitude_noise_logit = self.conv_formants_amplitude_noise(x_formants) + if duomask: + formants_amplitude_noise_logit = torch.cat([formants_amplitude_logit,formants_amplitude_noise_logit],dim=1) + formants_amplitude_noise = F.softmax(formants_amplitude_noise_logit,dim=1) + + components = { 'f0':f0, + 'f0_hz':f0_hz, + 'loudness':loudness, + 'amplitudes':amplitudes, + 'amplitudes_h':amplitudes_h, + 'freq_formants_hamon':formants_freqs, + 'bandwidth_formants_hamon':formants_bandwidth, + 'freq_formants_hamon_hz':formants_freqs_hz, + 'bandwidth_formants_hamon_hz':formants_bandwidth_hz, + 'amplitude_formants_hamon':formants_amplitude, + 'freq_formants_noise':formants_freqs_noise, + 'bandwidth_formants_noise':formants_bandwidth_noise, + 'freq_formants_noise_hz':formants_freqs_hz_noise, + 'bandwidth_formants_noise_hz':formants_bandwidth_hz_noise, + 'amplitude_formants_noise':formants_amplitude_noise, + } + return components + +class FromECoG(nn.Module): + def __init__(self, outputs,residual=False,shape='3D'): + super().__init__() + self.residual=residual + if shape =='3D': + self.from_ecog = ln.Conv3d(1, outputs, [9,1,1], 1, [4,0,0]) + else: + self.from_ecog = ln.Conv2d(1, outputs, [9,1], 1, [4,0]) + + def forward(self, x): + x = self.from_ecog(x) + if not self.residual: + x = F.leaky_relu(x, 0.2) + return x + +class ECoGMappingBlock(nn.Module): + def __init__(self, inputs, outputs, kernel_size,dilation=1,fused_scale=True,residual=False,resample=[],pool=None,shape='3D'): + super(ECoGMappingBlock, self).__init__() + self.residual = residual + self.pool = pool + self.inputs_resample = resample + self.dim_missmatch = (inputs!=outputs) + self.resample = resample + if not self.resample: + self.resample=1 + self.padding = list(np.array(dilation)*(np.array(kernel_size)-1)//2) + if shape=='2D': + conv=ln.Conv2d + maxpool = nn.MaxPool2d + avgpool = nn.AvgPool2d + if shape=='3D': + conv=ln.Conv3d + maxpool = nn.MaxPool3d + avgpool = nn.AvgPool3d + # self.padding = [dilation[i]*(kernel_size[i]-1)//2 for i in range(len(dilation))] + if residual: + self.norm1 = nn.GroupNorm(min(inputs,32),inputs) + else: + self.norm1 = nn.GroupNorm(min(outputs,32),outputs) + if pool is None: + self.conv1 = conv(inputs, outputs, kernel_size, self.resample, self.padding, dilation=dilation, bias=False) + else: + self.conv1 = conv(inputs, outputs, kernel_size, 1, self.padding, dilation=dilation, bias=False) + self.pool1 = maxpool(self.resample,self.resample) if self.pool=='Max' else avgpool(self.resample,self.resample) + if self.inputs_resample or self.dim_missmatch: + if pool is None: + self.convskip = conv(inputs, outputs, kernel_size, self.resample, self.padding, dilation=dilation, bias=False) + else: + self.convskip = conv(inputs, outputs, kernel_size, 1, self.padding, dilation=dilation, bias=False) + self.poolskip = maxpool(self.resample,self.resample) if self.pool=='Max' else avgpool(self.resample,self.resample) + + self.conv2 = conv(outputs, outputs, kernel_size, 1, self.padding, dilation=dilation, bias=False) + self.norm2 = nn.GroupNorm(min(outputs,32),outputs) + + def forward(self,x): + if self.residual: + x = F.leaky_relu(self.norm1(x),0.2) + if self.inputs_resample or self.dim_missmatch: + # x_skip = F.avg_pool3d(x,self.resample,self.resample) + x_skip = self.convskip(x) + if self.pool is not None: + x_skip = self.poolskip(x_skip) + else: + x_skip = x + x = F.leaky_relu(self.norm2(self.conv1(x)),0.2) + if self.pool is not None: + x = self.poolskip(x) + x = self.conv2(x) + x = x_skip + x + else: + x = F.leaky_relu(self.norm1(self.conv1(x)),0.2) + x = F.leaky_relu(self.norm2(self.conv2(x)),0.2) + return x + + + +@ECOG_ENCODER.register("ECoGMappingBottleneck") +class ECoGMapping_Bottleneck(nn.Module): + def __init__(self,n_mels,n_formants,n_formants_noise=1): + super(ECoGMapping_Bottleneck, self).__init__() + self.n_formants = n_formants + self.n_mels = n_mels + self.n_formants_noise = n_formants_noise + + self.formant_freq_limits_diff = torch.tensor([950.,2450.,2100.]).reshape([1,3,1]) #freq difference + self.formant_freq_limits_diff_low = torch.tensor([300.,300.,0.]).reshape([1,3,1]) #freq difference + + # self.formant_freq_limits_abs = torch.tensor([950.,2800.,3400.,4700.]).reshape([1,4,1]) #freq difference + # self.formant_freq_limits_abs_low = torch.tensor([300.,600.,2800.,3400]).reshape([1,4,1]) #freq difference + + # self.formant_freq_limits_abs = torch.tensor([950.,3300.,3600.,4700.]).reshape([1,4,1]) #freq difference + # self.formant_freq_limits_abs_low = torch.tensor([300.,600.,2700.,3400]).reshape([1,4,1]) #freq difference + + self.formant_freq_limits_abs = torch.tensor([950.,3400.,3800.,5000.,6000.,7000.]).reshape([1,6,1]) #freq difference + self.formant_freq_limits_abs_low = torch.tensor([300.,700.,1800.,3400,5000.,6000.]).reshape([1,6,1]) #freq difference + + # self.formant_freq_limits_abs_noise = torch.tensor([7000.]).reshape([1,1,1]) #freq difference + self.formant_freq_limits_abs_noise = torch.tensor([8000.,7000.,7000.]).reshape([1,3,1]) #freq difference + self.formant_freq_limits_abs_noise_low = torch.tensor([4000.,3000.,3000.]).reshape([1,3,1]) #freq difference + + self.formant_bandwitdh_ratio = Parameter(torch.Tensor(1)) + self.formant_bandwitdh_slop = Parameter(torch.Tensor(1)) + with torch.no_grad(): + nn.init.constant_(self.formant_bandwitdh_ratio,0) + nn.init.constant_(self.formant_bandwitdh_slop,0) + + + self.from_ecog = FromECoG(16,residual=True) + self.conv1 = ECoGMappingBlock(16,32,[5,1,1],residual=True,resample = [2,1,1],pool='MAX') + self.conv2 = ECoGMappingBlock(32,64,[3,1,1],residual=True,resample = [2,1,1],pool='MAX') + self.norm_mask = nn.GroupNorm(32,64) + self.mask = ln.Conv3d(64,1,[3,1,1],1,[1,0,0]) + self.conv3 = ECoGMappingBlock(64,128,[3,3,3],residual=True,resample = [2,2,2],pool='MAX') + self.conv4 = ECoGMappingBlock(128,256,[3,3,3],residual=True,resample = [2,2,2],pool='MAX') + self.norm = nn.GroupNorm(32,256) + self.conv5 = ln.Conv1d(256,256,3,1,1) + self.norm2 = nn.GroupNorm(32,256) + self.conv6 = ln.ConvTranspose1d(256, 128, 3, 2, 1, transform_kernel=True) + self.norm3 = nn.GroupNorm(32,128) + self.conv7 = ln.ConvTranspose1d(128, 64, 3, 2, 1, transform_kernel=True) + self.norm4 = nn.GroupNorm(32,64) + self.conv8 = ln.ConvTranspose1d(64, 32, 3, 2, 1, transform_kernel=True) + self.norm5 = nn.GroupNorm(32,32) + self.conv9 = ln.ConvTranspose1d(32, 32, 3, 2, 1, transform_kernel=True) + self.norm6 = nn.GroupNorm(32,32) + + self.conv_fundementals = ln.Conv1d(32,32,3,1,1) + self.norm_fundementals = nn.GroupNorm(32,32) + self.f0_drop = nn.Dropout() + self.conv_f0 = ln.Conv1d(32,1,1,1,0) + self.conv_amplitudes = ln.Conv1d(32,2,1,1,0) + self.conv_amplitudes_h = ln.Conv1d(32,2,1,1,0) + self.conv_loudness = ln.Conv1d(32,1,1,1,0,bias_initial=-9.) + + self.conv_formants = ln.Conv1d(32,32,3,1,1) + self.norm_formants = nn.GroupNorm(32,32) + self.conv_formants_freqs = ln.Conv1d(32,n_formants,1,1,0) + self.conv_formants_bandwidth = ln.Conv1d(32,n_formants,1,1,0) + self.conv_formants_amplitude = ln.Conv1d(32,n_formants,1,1,0) + + self.conv_formants_freqs_noise = ln.Conv1d(32,n_formants_noise,1,1,0) + self.conv_formants_bandwidth_noise = ln.Conv1d(32,n_formants_noise,1,1,0) + self.conv_formants_amplitude_noise = ln.Conv1d(32,n_formants_noise,1,1,0) + + + def forward(self,ecog,mask_prior,mni): + x_common_all = [] + for d in range(len(ecog)): + x = ecog[d] + x = x.reshape([-1,1,x.shape[1],15,15]) + mask_prior_d = mask_prior[d].reshape(-1,1,1,15,15) + x = self.from_ecog(x) + x = self.conv1(x) + x = self.conv2(x) + mask = torch.sigmoid(self.mask(F.leaky_relu(self.norm_mask(x),0.2))) + mask = mask[:,:,4:] + if mask_prior is not None: + mask = mask*mask_prior_d + x = x[:,:,4:] + x = x*mask + x = self.conv3(x) + x = self.conv4(x) + x = x.max(-1)[0].max(-1)[0] + x = self.conv5(F.leaky_relu(self.norm(x),0.2)) + x = self.conv6(F.leaky_relu(self.norm2(x),0.2)) + x = self.conv7(F.leaky_relu(self.norm3(x),0.2)) + x = self.conv8(F.leaky_relu(self.norm4(x),0.2)) + x = self.conv9(F.leaky_relu(self.norm5(x),0.2)) + x_common = F.leaky_relu(self.norm6(x),0.2) + x_common_all += [x_common] + + x_common = torch.cat(x_common_all,dim=0) + loudness = F.softplus(self.conv_loudness(x_common)) + amplitudes = F.softmax(self.conv_amplitudes(x_common),dim=1) + amplitudes_h = F.softmax(self.conv_amplitudes_h(x_common),dim=1) + + # x_fundementals = F.leaky_relu(self.norm_fundementals(self.conv_fundementals(x_common)),0.2) + x_fundementals = self.f0_drop(F.leaky_relu(self.norm_fundementals(self.conv_fundementals(x_common)),0.2)) + # f0 = torch.sigmoid(self.conv_f0(x_fundementals)) + # f0 = F.tanh(self.conv_f0(x_fundementals)) * (16/64)*(self.n_mels/64) # 72hz < f0 < 446 hz + # f0 = torch.sigmoid(self.conv_f0(x_fundementals)) * (15/64)*(self.n_mels/64) # 179hz < f0 < 420 hz + # f0 = torch.sigmoid(self.conv_f0(x_fundementals)) * (22/64)*(self.n_mels/64) - (16/64)*(self.n_mels/64)# 72hz < f0 < 253 hz, human voice + # f0 = torch.sigmoid(self.conv_f0(x_fundementals)) * (11/64)*(self.n_mels/64) - (-2/64)*(self.n_mels/64)# 160hz < f0 < 300 hz, female voice + + # f0 in hz: + # f0 = torch.sigmoid(self.conv_f0(x_fundementals)) * 528 + 88 # 88hz < f0 < 616 hz + f0_hz = torch.sigmoid(self.conv_f0(x_fundementals)) * 332 + 88 # 88hz < f0 < 420 hz + f0 = torch.clamp(mel_scale(self.n_mels,f0_hz)/(self.n_mels*1.0),min=0.0001) + + x_formants = F.leaky_relu(self.norm_formants(self.conv_formants(x_common)),0.2) + formants_freqs = torch.sigmoid(self.conv_formants_freqs(x_formants)) + # formants_freqs = torch.cumsum(formants_freqs,dim=1) + # formants_freqs = formants_freqs + + # abs freq + formants_freqs_hz = formants_freqs*(self.formant_freq_limits_abs[:,:self.n_formants]-self.formant_freq_limits_abs_low[:,:self.n_formants])+self.formant_freq_limits_abs_low[:,:self.n_formants] + formants_freqs = torch.clamp(mel_scale(self.n_mels,formants_freqs_hz)/(self.n_mels*1.0),min=0) + + # formants_freqs = formants_freqs + f0 + # formants_bandwidth = torch.sigmoid(self.conv_formants_bandwidth(x_formants)) + # formants_bandwidth_hz = (3*torch.sigmoid(self.formant_bandwitdh_ratio))*(0.075*torch.relu(formants_freqs_hz-1000)+100) + formants_bandwidth_hz = 0.65*(0.00625*torch.relu(formants_freqs_hz)+375) + # formants_bandwidth_hz = (torch.sigmoid(self.conv_formants_bandwidth(x_formants))) * (3*torch.sigmoid(self.formant_bandwitdh_ratio))*(0.075*torch.relu(formants_freqs_hz-1000)+100) + formants_bandwidth = bandwidth_mel(formants_freqs_hz,formants_bandwidth_hz,self.n_mels) + formants_amplitude_logit = self.conv_formants_amplitude(x_formants) + formants_amplitude = F.softmax(formants_amplitude_logit,dim=1) + + formants_freqs_noise = torch.sigmoid(self.conv_formants_freqs_noise(x_formants)) + formants_freqs_hz_noise = formants_freqs_noise*(self.formant_freq_limits_abs_noise[:,:self.n_formants_noise]-self.formant_freq_limits_abs_noise_low[:,:self.n_formants_noise])+self.formant_freq_limits_abs_noise_low[:,:self.n_formants_noise] + # formants_freqs_hz_noise = formants_freqs_noise*(self.formant_freq_limits_abs_noise[:,:1]-self.formant_freq_limits_abs_noise_low[:,:1])+self.formant_freq_limits_abs_noise_low[:,:1] + formants_freqs_hz_noise = torch.cat([formants_freqs_hz,formants_freqs_hz_noise],dim=1) + formants_freqs_noise = torch.clamp(mel_scale(self.n_mels,formants_freqs_hz_noise)/(self.n_mels*1.0),min=0) + # formants_bandwidth_hz_noise = F.relu(self.conv_formants_bandwidth_noise(x_formants)) * 8000 + 2000 + # formants_bandwidth_noise = bandwidth_mel(formants_freqs_hz_noise,formants_bandwidth_hz_noise,self.n_mels) + # formants_amplitude_noise = F.softmax(self.conv_formants_amplitude_noise(x_formants),dim=1) + formants_bandwidth_hz_noise = self.conv_formants_bandwidth_noise(x_formants) + formants_bandwidth_hz_noise_1 = F.softplus(formants_bandwidth_hz_noise[:,:1]) * 2344 + 586 #2000-10000 + formants_bandwidth_hz_noise_2 = torch.sigmoid(formants_bandwidth_hz_noise[:,1:]) * 586 #0-2000 + formants_bandwidth_hz_noise = torch.cat([formants_bandwidth_hz_noise_1,formants_bandwidth_hz_noise_2],dim=1) + formants_bandwidth_hz_noise = torch.cat([formants_bandwidth_hz,formants_bandwidth_hz_noise],dim=1) + formants_bandwidth_noise = bandwidth_mel(formants_freqs_hz_noise,formants_bandwidth_hz_noise,self.n_mels) + formants_amplitude_noise_logit = self.conv_formants_amplitude_noise(x_formants) + formants_amplitude_noise_logit = torch.cat([formants_amplitude_logit,formants_amplitude_noise_logit],dim=1) + formants_amplitude_noise = F.softmax(formants_amplitude_noise_logit,dim=1) + + components = { 'f0':f0, + 'f0_hz':f0_hz, + 'loudness':loudness, + 'amplitudes':amplitudes, + 'amplitudes_h':amplitudes_h, + 'freq_formants_hamon':formants_freqs, + 'bandwidth_formants_hamon':formants_bandwidth, + 'freq_formants_hamon_hz':formants_freqs_hz, + 'bandwidth_formants_hamon_hz':formants_bandwidth_hz, + 'amplitude_formants_hamon':formants_amplitude, + 'freq_formants_noise':formants_freqs_noise, + 'bandwidth_formants_noise':formants_bandwidth_noise, + 'freq_formants_noise_hz':formants_freqs_hz_noise, + 'bandwidth_formants_noise_hz':formants_bandwidth_hz_noise, + 'amplitude_formants_noise':formants_amplitude_noise, + } + return components + + +class BackBone(nn.Module): + def __init__(self,attentional_mask=True): + super(BackBone, self).__init__() + self.attentional_mask = attentional_mask + self.from_ecog = FromECoG(16,residual=True,shape='2D') + self.conv1 = ECoGMappingBlock(16,32,[5,1],residual=True,resample = [1,1],shape='2D') + self.conv2 = ECoGMappingBlock(32,64,[3,1],residual=True,resample = [1,1],shape='2D') + self.norm_mask = nn.GroupNorm(32,64) + self.mask = ln.Conv2d(64,1,[3,1],1,[1,0]) + + def forward(self,ecog): + x_common_all = [] + mask_all=[] + for d in range(len(ecog)): + x = ecog[d] + x = x.unsqueeze(1) + x = self.from_ecog(x) + x = self.conv1(x) + x = self.conv2(x) + if self.attentional_mask: + mask = F.relu(self.mask(F.leaky_relu(self.norm_mask(x),0.2))) + mask = mask[:,:,16:] + x = x[:,:,16:] + mask_all +=[mask] + else: + # mask = torch.sigmoid(self.mask(F.leaky_relu(self.norm_mask(x),0.2))) + # mask = mask[:,:,16:] + x = x[:,:,16:] + # x = x*mask + + x_common_all +=[x] + + x_common = torch.cat(x_common_all,dim=0) + if self.attentional_mask: + mask = torch.cat(mask_all,dim=0) + return x_common,mask.squeeze(1) if self.attentional_mask else None + +class ECoGEncoderFormantHeads(nn.Module): + def __init__(self,inputs,n_mels,n_formants): + super(ECoGEncoderFormantHeads,self).__init__() + self.n_mels = n_mels + self.f0 = ln.Conv1d(inputs,1,1) + self.loudness = ln.Conv1d(inputs,1,1) + self.amplitudes = ln.Conv1d(inputs,2,1) + self.freq_formants = ln.Conv1d(inputs,n_formants,1) + self.bandwidth_formants = ln.Conv1d(inputs,n_formants,1) + self.amplitude_formants = ln.Conv1d(inputs,n_formants,1) + + def forward(self,x): + loudness = F.relu(self.loudness(x)) + f0 = torch.sigmoid(self.f0(x)) * (15/64)*(self.n_mels/64) # 179hz < f0 < 420 hz + # f0 = torch.sigmoid(self.conv_f0(x_fundementals)) * (22/64)*(self.n_mels/64) - (16/64)*(self.n_mels/64)# 72hz < f0 < 253 hz, human voice + # f0 = torch.sigmoid(self.conv_f0(x_fundementals)) * (11/64)*(self.n_mels/64) - (-2/64)*(self.n_mels/64)# 160hz < f0 < 300 hz, female voice + amplitudes = F.softmax(self.amplitudes(x),dim=1) + freq_formants = torch.sigmoid(self.freq_formants(x)) + freq_formants = torch.cumsum(freq_formants,dim=1) + bandwidth_formants = torch.sigmoid(self.bandwidth_formants(x)) + amplitude_formants = F.softmax(self.amplitude_formants(x),dim=1) + return {'f0':f0, + 'loudness':loudness, + 'amplitudes':amplitudes, + 'freq_formants':freq_formants, + 'bandwidth_formants':bandwidth_formants, + 'amplitude_formants':amplitude_formants,} + +@ECOG_ENCODER.register("ECoGMappingTransformer") +class ECoGMapping_Transformer(nn.Module): + def __init__(self,n_mels,n_formants,SeqLen=128,hidden_dim=256,dim_feedforward=256,encoder_only=False,attentional_mask=False,n_heads=1,non_local=False): + super(ECoGMapping_Transformer, self).__init__() + self.n_mels = n_mels, + self.n_formant = n_formants, + self.encoder_only = encoder_only, + self.attentional_mask = attentional_mask, + self.backbone = BackBone(attentional_mask=attentional_mask) + self.position_encoding = build_position_encoding(SeqLen,hidden_dim,'MNI') + self.input_proj = ln.Conv2d(64, hidden_dim, kernel_size=1) + if non_local: + Transformer = TransformerNL + else: + Transformer = TransformerTS + self.transformer = Transformer(d_model=hidden_dim, nhead=n_heads, num_encoder_layers=6, + num_decoder_layers=6, dim_feedforward=dim_feedforward, dropout=0.1, + activation="relu", normalize_before=False, + return_intermediate_dec=False,encoder_only = encoder_only) + self.output_proj = ECoGEncoderFormantHeads(hidden_dim,n_mels,n_formants) + self.query_embed = nn.Embedding(SeqLen, hidden_dim) + + def forward(self,x,mask_prior,mni): + features,mask = self.backbone(x) + pos = self.position_encoding(mni) + hs = self.transformer(self.input_proj(features), mask if self.attentional_mask else None, self.query_embed.weight, pos) + if not self.encoder_only: + hs,encoded = hs + out = self.output_proj(hs) + else: + _,encoded = hs + encoded = encoded.max(-1)[0] + out = self.output_proj(encoded) + return out + + + diff --git a/net_formant_wave2specbased.py b/net_formant_wave2specbased.py new file mode 100644 index 00000000..9e82750e --- /dev/null +++ b/net_formant_wave2specbased.py @@ -0,0 +1,1161 @@ +import os +import pdb +from random import triangular +import torch +import torchaudio +from torch import nn +from torch.nn import functional as F +from torch.nn import Parameter as P +from torch.nn import init +from torch.nn.parameter import Parameter +import numpy as np +import lreq as ln +import math +from registry import * +from transformer_models.position_encoding import build_position_encoding +from transformer_models.transformer import Transformer as TransformerTS +from transformer_models.transformer_nonlocal import Transformer as TransformerNL + +def db(x,noise = -80, slope =35, powerdb=True): + if powerdb: + return ((2*torchaudio.transforms.AmplitudeToDB()(x)).clamp(min=noise)-slope-noise)/slope + else: + return ((torchaudio.transforms.AmplitudeToDB()(x)).clamp(min=noise)-slope-noise)/slope + +# def amplitude(x,noise=-80,slope=35): +# return 10**((x*slope+noise+slope)/20.) + +def amplitude(x,noise_db=-60,max_db=35,trim_noise=False): + if trim_noise: + x_db = (x+1)/2*(max_db-noise_db)+noise_db + if type(x) is np.ndarray: + return 10**(x_db/10)*(np.sign(x_db-noise_db)*0.5+0.5) + else: + return 10**(x_db/10)*((x_db-noise_db).sign()*0.5+0.5) + else: + return 10**(((x+1)/2*(max_db-noise_db)+noise_db)/10) + +def to_db(x,noise_db=-60,max_db=35): + return (torchaudio.transforms.AmplitudeToDB()(x)-noise_db)/(max_db-noise_db)*2-1 + +def wave2spec(wave,n_fft=256,wave_fr=16000,spec_fr=125,noise_db=-60,max_db=22.5,to_db=True,power=2): +# def wave2spec(wave,n_fft=256,wave_fr=16000,spec_fr=125,noise_db=-50,max_db=22.5,to_db=True): + if to_db: + return (torchaudio.transforms.AmplitudeToDB()(torchaudio.transforms.Spectrogram(n_fft*2-1,win_length=n_fft*2-1,hop_length=int(wave_fr/spec_fr),power=power)(wave)).clamp(min=noise_db,max=max_db).transpose(-2,-1)-noise_db)/(max_db-noise_db)*2-1 + else: + return torchaudio.transforms.Spectrogram(n_fft*2-1,win_length=n_fft*2-1,hop_length=int(wave_fr/spec_fr),power=power)(wave).transpose(-2,-1) + + +# def mel_scale(n_mels,hz,min_octave=-31.,max_octave=95.,pt=True): +# def mel_scale(n_mels,hz,min_octave=-58.,max_octave=100.,pt=True): +def mel_scale(n_mels,hz,min_octave=-31.,max_octave=102.,pt=True): + #take absolute hz, return abs mel + # return (torch.log2(hz/440)+31/24)*24*n_mels/126 + if pt: + return (torch.log2(hz/440.)-min_octave/24.)*24*n_mels/(max_octave-min_octave) + else: + return (np.log2(hz/440.)-min_octave/24.)*24*n_mels/(max_octave-min_octave) + +# def inverse_mel_scale(mel,min_octave=-31.,max_octave=95.): +# def inverse_mel_scale(mel,min_octave=-58.,max_octave=100.): +def inverse_mel_scale(mel,min_octave=-31.,max_octave=102.): + #take normalized mel, return absolute hz + # return 440*2**(mel*126/24-31/24) + return 440*2**(mel*(max_octave-min_octave)/24.+min_octave/24.) + +# def mel_scale(n_mels,hz,f_min=160.,f_max=8000.,pt=True): +# #take absolute hz, return abs mel +# # return (torch.log2(hz/440)+31/24)*24*n_mels/126 +# m_min = 2595.0 * np.log10(1.0 + (f_min / 700.0)) +# m_max = 2595.0 * np.log10(1.0 + (f_max / 700.0)) +# m_min_ = m_min + (m_max-m_min)/(n_mels+1) +# m_max_ = m_max +# if pt: +# return (2595.0 * torch.log10(1.0 + (hz / 700.0))-m_min_)/(m_max_-m_min_)*n_mels +# else: +# return (2595.0 * np.log10(1.0 + (hz / 700.0))-m_min_)/(m_max_-m_min_)*n_mels + +# def inverse_mel_scale(mel,f_min=160.,f_max=8000.,n_mels=64): +# #take normalized mel, return absolute hz +# # return 440*2**(mel*126/24-31/24) +# m_min = 2595.0 * np.log10(1.0 + (f_min / 700.0)) +# m_max = 2595.0 * np.log10(1.0 + (f_max / 700.0)) +# m_min_ = m_min + (m_max-m_min)/(n_mels+1) +# m_max_ = m_max +# return 700.0 * (10**((mel*(m_max_-m_min_) + m_min_)/ 2595.0) - 1.0) + +def ind2hz(ind,n_fft,max_freq=8000.): + #input abs ind, output abs hz + return ind/(1.0*n_fft)*max_freq + +def hz2ind(hz,n_fft,max_freq=8000.): + # input abs hz, output abs ind + return hz/(1.0*max_freq)*n_fft + +def bandwidth_mel(freqs_hz,bandwidth_hz,n_mels): + # input hz bandwidth, output abs bandwidth on mel + bandwidth_upper = freqs_hz+bandwidth_hz/2. + bandwidth_lower = torch.clamp(freqs_hz-bandwidth_hz/2.,min=1) + bandwidth = mel_scale(n_mels,bandwidth_upper) - mel_scale(n_mels,bandwidth_lower) + return bandwidth + +def torch_P2R(radii, angles): + return radii * torch.cos(angles),radii * torch.sin(angles) +def inverse_spec_to_audio(spec,n_fft = 511,win_length = 511,hop_length = 128,power_synth=True): + ''' + generate random phase, then use istft to inverse spec to audio + ''' + window = torch.hann_window(win_length) + angles = torch.randn_like(spec).uniform_(0, np.pi*2)#torch.zeros_like(spec)#torch.randn_like(spec).uniform_(0, np.pi*2) + spec = spec**0.5 if power_synth else spec + spec_complex = torch.stack(torch_P2R(spec, angles),dim=-1) #real and image in same dim + return torchaudio.functional.istft(spec_complex, n_fft=n_fft, window=window, center=True, win_length=win_length, hop_length=hop_length) + +@GENERATORS.register("GeneratorFormant") +class FormantSysth(nn.Module): + def __init__(self, n_mels=64, k=100, wavebased=False,n_fft=256,noise_db=-50,max_db=22.5,dbbased=False,add_bgnoise=True,log10=False,noise_from_data=False,return_wave=False,power_synth=False): + super(FormantSysth, self).__init__() + self.wave_fr = 16e3 + self.spec_fr = 125 + self.n_fft = n_fft + self.noise_db=noise_db + self.max_db = max_db + self.n_mels = n_mels + self.k = k + self.dbbased=dbbased + self.log10 = log10 + self.add_bgnoise = add_bgnoise + self.wavebased=wavebased + self.noise_from_data = noise_from_data + self.linear_scale = wavebased + self.return_wave = return_wave + self.power_synth = power_synth + self.timbre = Parameter(torch.Tensor(1,1,n_mels)) + self.timbre_mapping = nn.Sequential( + ln.Conv1d(1,128,1), + nn.LeakyReLU(0.2), + ln.Conv1d(128,128,1), + nn.LeakyReLU(0.2), + ln.Conv1d(128,2,1), + # nn.Sigmoid(), + ) + self.bgnoise_mapping = nn.Sequential( + ln.Conv2d(2,2,[1,5],padding=[0,2],gain=1,bias=False), + # nn.Sigmoid(), + ) + self.noise_mapping = nn.Sequential( + ln.Conv2d(2,2,[1,5],padding=[0,2],gain=1,bias=False), + # nn.Sigmoid(), + ) + + self.bgnoise_mapping2 = nn.Sequential( + ln.Conv1d(1,128,1,1), + nn.LeakyReLU(0.2), + ln.Conv1d(128,128,1,1), + nn.LeakyReLU(0.2), + ln.Conv1d(128,1,1,gain=1,bias=False), + # nn.Sigmoid(), + ) + self.noise_mapping2 = nn.Sequential( + ln.Conv1d(1,128,1,1), + nn.LeakyReLU(0.2), + ln.Conv1d(128,128,1,1), + nn.LeakyReLU(0.2), + ln.Conv1d(128,1,1,1,gain=1,bias=False), + # nn.Sigmoid(), + ) + self.prior_exp = np.array([0.4963,0.0745,1.9018]) + self.timbre_parameter = Parameter(torch.Tensor(2)) + self.wave_noise_amplifier = Parameter(torch.Tensor(1)) + self.wave_hamon_amplifier = Parameter(torch.Tensor(1)) + + if noise_from_data: + self.bgnoise_amp = Parameter(torch.Tensor(1)) + with torch.no_grad(): + nn.init.constant_(self.bgnoise_amp,1) + else: + self.bgnoise_dist = Parameter(torch.Tensor(1,1,1,self.n_fft if self.wavebased else self.n_mels)) + with torch.no_grad(): + nn.init.constant_(self.bgnoise_dist,1.0) + # self.silient = Parameter(torch.Tensor(1,1,n_mels)) + self.silient = -1 + with torch.no_grad(): + nn.init.constant_(self.timbre,1.0) + nn.init.constant_(self.timbre_parameter[0],7) + nn.init.constant_(self.timbre_parameter[1],0.004) + nn.init.constant_(self.wave_noise_amplifier,1) + nn.init.constant_(self.wave_hamon_amplifier,4.) + + # nn.init.constant_(self.silient,-1.0) + +# def formant_mask(self,freq,bandwith,amplitude): +# # freq, bandwith, amplitude: B*formants*time +# freq_cord = torch.arange(self.n_mels) +# time_cord = torch.arange(freq.shape[2]) +# grid_time,grid_freq = torch.meshgrid(time_cord,freq_cord) +# grid_time = grid_time.unsqueeze(dim=0).unsqueeze(dim=-1) #B,time,freq, 1 +# grid_freq = grid_freq.unsqueeze(dim=0).unsqueeze(dim=-1) #B,time,freq, 1 +# freq = freq.permute([0,2,1]).unsqueeze(dim=-2) #B,time,1, formants +# bandwith = bandwith.permute([0,2,1]).unsqueeze(dim=-2) #B,time,1, formants +# amplitude = amplitude.permute([0,2,1]).unsqueeze(dim=-2) #B,time,1, formants +# # masks = amplitude*torch.exp(-0.693*(grid_freq-freq)**2/(2*(bandwith+0.001)**2)) #B,time,freqchans, formants +# masks = amplitude*torch.exp(-(grid_freq-freq)**2/(2*(bandwith/np.sqrt(2*np.log(2))+0.001)**2)) #B,time,freqchans, formants +# masks = masks.unsqueeze(dim=1) #B,1,time,freqchans, formants +# return masks + + def formant_mask(self,freq_hz,bandwith_hz,amplitude,linear=False, triangle_mask = False,duomask=True, n_formant_noise=1,f0_hz=None,noise=False): + # freq, bandwith, amplitude: B*formants*time + freq_cord = torch.arange(self.n_fft if linear else self.n_mels) + time_cord = torch.arange(freq_hz.shape[2]) + grid_time,grid_freq = torch.meshgrid(time_cord,freq_cord) + grid_time = grid_time.unsqueeze(dim=0).unsqueeze(dim=-1) #B,time,freq, 1 + grid_freq = grid_freq.unsqueeze(dim=0).unsqueeze(dim=-1) #B,time,freq, 1 + grid_freq_hz = ind2hz(grid_freq,self.n_fft,self.wave_fr/2) if linear else inverse_mel_scale(grid_freq/(self.n_mels*1.0)) + freq_hz = freq_hz.permute([0,2,1]).unsqueeze(dim=-2) #B,time,1, formants + bandwith_hz = bandwith_hz.permute([0,2,1]).unsqueeze(dim=-2) #B,time,1, formants + amplitude = amplitude.permute([0,2,1]).unsqueeze(dim=-2) #B,time,1, formants + if self.power_synth: + amplitude = amplitude + alpha = (2*np.sqrt(2*np.log(np.sqrt(2)))) + if not noise: + # t = torch.arange(int(f0_hz.shape[2]/self.spec_fr*self.wave_fr))/(1.0*self.wave_fr) #in second + # t = t.unsqueeze(dim=0).unsqueeze(dim=0) #1, 1, time + k = (torch.arange(self.k)+1).reshape([1,self.k,1]) + # f0_hz_interp = F.interpolate(f0_hz,t.shape[-1],mode='linear',align_corners=False) #Bx1xT + # bandwith_hz_interp = F.interpolate(bandwith_hz.permute(0,2,3,1),[bandwith_hz.shape[-1],t.shape[-1]],mode='bilinear',align_corners=False).permute(0,3,1,2) #Bx1xT + # freq_hz_interp = F.interpolate(freq_hz.permute(0,2,3,1),[freq_hz.shape[-1],t.shape[-1]],mode='bilinear',align_corners=False).permute(0,3,1,2) #Bx1xT + k_f0 = k*f0_hz #BxkxT + freq_range = (-torch.sign(k_f0-7800)*0.5+0.5) #BxkxT + k_f0 = k_f0.permute([0,2,1]).unsqueeze(-1) #BxTxkx1 + # masks = amplitude*torch.exp(-((grid_freq_hz-freq_hz))**2/(2*(bandwith_hz/alpha+0.01)**2)) if self.wavebased else amplitude*torch.exp(-(0.693*(grid_freq_hz-freq_hz))**2/(2*(bandwith_hz+0.01)**2)) #B,time,freqchans, formants + # amplitude_interp = F.interpolate(amplitude.permute(0,2,3,1),[amplitude.shape[-1],t.shape[-1]],mode='bilinear',align_corners=False).permute(0,3,1,2) #Bx1xT + hamonic_dist = (amplitude*(torch.exp(-((k_f0-freq_hz))**2/(2*(bandwith_hz/alpha+0.01)**2))+1E-6).sqrt()).sum(-1).permute([0,2,1]) #BxkxT + hamonic_dist = (hamonic_dist*freq_range)/((((hamonic_dist*freq_range)**2).sum(1,keepdim=True)+1E-10).sqrt()+1E-10) # sum_k(hamonic_dist**2) = 1 + hamonic_dist = F.interpolate(hamonic_dist,int(f0_hz.shape[2]/self.spec_fr*self.wave_fr),mode = 'linear',align_corners=False) + return hamonic_dist # B,k,T + else: + masks = amplitude*(torch.exp(-((grid_freq_hz-freq_hz))**2/(2*(bandwith_hz/alpha+0.01)**2))+1E-6).sqrt() #B,time,freqchans, formants + masks = masks.sum(-1) #B,time,freqchans + masks = masks/((((masks**2).sum(-1,keepdim=True)/self.n_fft)+1E-10).sqrt()+1E-10) + masks = masks.unsqueeze(dim=1) #B,1,time,freqchans + return masks #B,1,time,freqchans + + # if self.wavebased: + # if triangle_mask: + # if duomask: + # # masks_hamon = amplitude[...,:-n_formant_noise]*torch.exp(-(0.693*(grid_freq_hz-freq_hz[...,:-n_formant_noise]))**2/(2*(bandwith_hz[...,:-n_formant_noise]+0.01)**2)) + # masks_hamon = amplitude[...,:-n_formant_noise]*torch.exp(-((grid_freq_hz-freq_hz[...,:-n_formant_noise]))**2/(2*(bandwith_hz[...,:-n_formant_noise]/alpha+0.01)**2)) + # bw = bandwith_hz[...,-n_formant_noise:] + # masks_noise = F.relu(amplitude[...,-n_formant_noise:] * (1 - (1-1/np.sqrt(2))*2/(bw+0.01)*(grid_freq_hz-freq_hz[...,-n_formant_noise:]).abs())) + # # masks_noise = amplitude[...,-n_formant_noise:] * (1 - 2/(bw+0.01)*(grid_freq_hz-freq_hz[...,-n_formant_noise:]).abs())*(-torch.sign(torch.abs(grid_freq_hz-freq_hz[...,-n_formant_noise:])/(bw+0.01)-0.5)*0.5+0.5) + # masks = torch.cat([masks_hamon,masks_noise],dim=-1) + # else: + # masks = F.relu(amplitude * (1 - (1-1/np.sqrt(2))*2/(bandwith_hz+0.01)*(grid_freq_hz-freq_hz).abs())) + # else: + # # masks = amplitude*torch.exp(-(0.693*(grid_freq_hz-freq_hz))**2/(2*(bandwith_hz+0.01)**2)) + # if self.power_synth: + # masks = amplitude*torch.exp(-((grid_freq_hz-freq_hz))**2/(2*(bandwith_hz/alpha+0.01)**2)) + # else: + # # masks = amplitude*torch.exp(-((grid_freq_hz-freq_hz))**2/(2*(bandwith_hz/alpha+0.01)**2)) + # masks = amplitude*(torch.exp(-((grid_freq_hz-freq_hz))**2/(2*(bandwith_hz/alpha+0.01)**2))+1E-6).sqrt() + # else: + # if triangle_mask: + # if duomask: + # # masks_hamon = amplitude[...,:-n_formant_noise]*torch.exp(-(0.693*(grid_freq_hz-freq_hz[...,:-n_formant_noise]))**2/(2*(bandwith_hz[...,:-n_formant_noise]+0.01)**2)) + # masks_hamon = amplitude[...,:-n_formant_noise]*torch.exp(-((grid_freq_hz-freq_hz[...,:-n_formant_noise]))**2/(2*(bandwith_hz[...,:-n_formant_noise]/alpha+0.01)**2)) + # bw = bandwith_hz[...,-n_formant_noise:] + # masks_noise = F.relu(amplitude[...,-n_formant_noise:] * (1 - (1-1/np.sqrt(2))*2/(bw+0.01)*(grid_freq_hz-freq_hz[...,-n_formant_noise:]).abs())) + # # masks_noise = amplitude[...,-n_formant_noise:] * (1 - 2/(bw+0.01)*(grid_freq_hz-freq_hz[...,-n_formant_noise:]).abs())*(-torch.sign(torch.abs(grid_freq_hz-freq_hz[...,-n_formant_noise:])/(bw+0.01)-0.5)*0.5+0.5) + # masks = torch.cat([masks_hamon,masks_noise],dim=-1) + # else: + # masks = F.relu(amplitude * (1 - (1-1/np.sqrt(2))*2/(bandwith_hz+0.01)*(grid_freq_hz-freq_hz).abs())) + # # masks = amplitude * (1 - 2/(bandwith_hz+0.01)*(grid_freq_hz-freq_hz).abs())*(-torch.sign(torch.abs(grid_freq_hz-freq_hz)/(bandwith_hz+0.01)-0.5)*0.5+0.5) + # else: + # # masks = amplitude*torch.exp(-(0.693*(grid_freq_hz-freq_hz))**2/(2*(bandwith_hz+0.01)**2)) + # masks = amplitude*torch.exp(-((grid_freq_hz-freq_hz))**2/(2*(bandwith_hz/alpha+0.01)**2)) + # masks = amplitude*torch.exp(-((grid_freq_hz-freq_hz))**2/(2*(bandwith_hz/(2*np.sqrt(2*np.log(2)))+0.01)**2)) #B,time,freqchans, formants + # masks = amplitude*torch.exp(-(0.693*(grid_freq_hz-freq_hz))**2/(2*(bandwith_hz+0.01)**2)) #B,time,freqchans, formants + + def voicing_wavebased(self,f0_hz): + #f0: B*1*time, hz + t = torch.arange(int(f0_hz.shape[2]/self.spec_fr*self.wave_fr))/(1.0*self.wave_fr) #in second + t = t.unsqueeze(dim=0).unsqueeze(dim=0) #1, 1, time + k = (torch.arange(self.k)+1).reshape([1,self.k,1]) + f0_hz_interp = F.interpolate(f0_hz,t.shape[-1],mode='linear',align_corners=False) + k_f0 = k*f0_hz_interp + k_f0_sum = 2*np.pi*torch.cumsum(k_f0,-1)/(1.0*self.wave_fr) + wave_k = np.sqrt(2)*torch.sin(k_f0_sum) * (-torch.sign(k_f0-7800)*0.5+0.5) + # wave = 0.12*torch.sin(2*np.pi*k_f0*t) * (-torch.sign(k_f0-6000)*0.5+0.5) + # wave = 0.09*torch.sin(2*np.pi*k_f0*t) * (-torch.sign(k_f0-self.wave_fr/2)*0.5+0.5) + # wave = 0.09*torch.sigmoid(self.wave_hamon_amplifier) * torch.sin(2*np.pi*k_f0*t) * (-torch.sign(k_f0-self.wave_fr/2)*0.5+0.5) + # wave = wave_k.sum(dim=1,keepdim=True) + # wave = F.softplus(self.wave_hamon_amplifier) * wave.sum(dim=1,keepdim=True) + # spec = wave2spec(wave,self.n_fft,self.wave_fr,self.spec_fr,self.noise_db,self.max_db,to_db=self.dbbased,power=2. if self.power_synth else 1.) + return wave_k #B,k,T + # if self.return_wave: + # return spec,wave_k + # else: + # return spec + + def unvoicing_wavebased(self,f0_hz,bg=False,mapping=True): + # return torch.ones([1,1,f0_hz.shape[2],512]) + # noise = 0.3*torch.randn([1,1,int(f0_hz.shape[2]/self.spec_fr*self.wave_fr)]) + # noise = 0.03*torch.randn([1,1,int(f0_hz.shape[2]/self.spec_fr*self.wave_fr)]) + if bg: + noise = torch.randn([1,1,int(f0_hz.shape[2]/self.spec_fr*self.wave_fr)]) + if mapping: + noise = self.bgnoise_mapping2(noise) + else: + noise = np.sqrt(3.)*(2*torch.rand([1,1,int(f0_hz.shape[2]/self.spec_fr*self.wave_fr)])-1) + if mapping: + noise = self.noise_mapping2(noise) + # noise = 0.3 * torch.randn([1,1,int(f0_hz.shape[2]/self.spec_fr*self.wave_fr)]) + # noise = 0.3 * F.softplus(self.wave_noise_amplifier) * torch.randn([1,1,int(f0_hz.shape[2]/self.spec_fr*self.wave_fr)]) + return wave2spec(noise,self.n_fft,self.wave_fr,self.spec_fr,self.noise_db,self.max_db,to_db=False,power=2. if self.power_synth else 1.) + # return torchaudio.transforms.Spectrogram(self.n_fft*2-1,win_length=self.n_fft*2-1,hop_length=int(self.wave_fr/self.spec_fr),power=2. if self.power_synth else 1.)(noise) + + # def unvoicing_wavebased(self,f0_hz): + # # return torch.ones([1,1,f0_hz.shape[2],512]) + # # noise = 0.3*torch.randn([1,1,int(f0_hz.shape[2]/self.spec_fr*self.wave_fr)]) + # noise = 0.1*torch.randn([1,1,int(f0_hz.shape[2]/self.spec_fr*self.wave_fr)]) + # # noise = 0.3 * torch.sigmoid(self.wave_noise_amplifier) * torch.randn([1,1,int(f0_hz.shape[2]/self.spec_fr*self.wave_fr)]) + # return wave2spec(noise,self.n_fft,self.wave_fr,self.spec_fr,self.noise_db,self.max_db,to_db=False) + + def voicing_linear(self,f0_hz,bandwith=2.5): + #f0: B*1*time, hz + freq_cord = torch.arange(self.n_fft) + time_cord = torch.arange(f0_hz.shape[2]) + grid_time,grid_freq = torch.meshgrid(time_cord,freq_cord) + grid_time = grid_time.unsqueeze(dim=0).unsqueeze(dim=-1) #B,time,freq, 1 + grid_freq = grid_freq.unsqueeze(dim=0).unsqueeze(dim=-1) #B,time,freq, 1 + f0_hz = f0_hz.permute([0,2,1]).unsqueeze(dim=-2) #B,time,1, 1 + f0_hz = f0_hz.repeat([1,1,1,self.k]) #B,time,1, self.k + f0_hz = f0_hz*(torch.arange(self.k)+1).reshape([1,1,1,self.k]) + # bandwith=4 + # bandwith_lower = torch.clamp(f0-bandwith/2,min=1) + # bandwith_upper = f0+bandwith/2 + # bandwith = mel_scale(self.n_mels,bandwith_upper) - mel_scale(self.n_mels,bandwith_lower) + f0 = hz2ind(f0_hz,self.n_fft) + + # hamonics = torch.exp(-(grid_freq-f0)**2/(2*sigma**2)) #gaussian + # hamonics = (1-((grid_freq_hz-f0_hz)/(2*bandwith_hz/2))**2)*(-torch.sign(torch.abs(grid_freq_hz-f0_hz)/(2*bandwith_hz)-0.5)*0.5+0.5) #welch + freq_cord_reshape = freq_cord.reshape([1,1,1,self.n_fft]) + hamonics = (1 - 2/bandwith*(grid_freq-f0).abs())*(-torch.sign(torch.abs(grid_freq-f0)/(bandwith)-0.5)*0.5+0.5) #triangular + # hamonics = (1-((grid_freq-f0)/(bandwith/2))**2)*(-torch.sign(torch.abs(grid_freq-f0)/(bandwith)-0.5)*0.5+0.5) #welch + # hamonics = (1-((grid_freq-f0)/(2.5*bandwith/2))**2)*(-torch.sign(torch.abs(grid_freq-f0)/(2.5*bandwith)-0.5)*0.5+0.5) #welch + # hamonics = (1-((grid_freq-f0)/(3*bandwith/2))**2)*(-torch.sign(torch.abs(grid_freq-f0)/(3*bandwith)-0.5)*0.5+0.5) #welch + # hamonics = torch.cos(np.pi*torch.abs(grid_freq-f0)/(4*bandwith))**2*(-torch.sign(torch.abs(grid_freq-f0)/(4*bandwith)-0.5)*0.5+0.5) #hanning + # hamonics = (hamonics.sum(dim=-1)).unsqueeze(dim=1) # B,1,T,F + # condition = (torch.sign(freq_cord_reshape-switch)*0.5+0.5) + # hamonics = ((hamonics.sum(dim=-1)).unsqueeze(dim=1)) * (1+ (torch.exp(-slop*(freq_cord_reshape-switch)*condition)-1)*condition) # B,1,T,F + # hamonics = ((hamonics.sum(dim=-1)).unsqueeze(dim=1)-torch.abs(self.prior_exp_parameter[2])) * torch.exp(-torch.abs(self.prior_exp_parameter[1])*freq_cord.reshape([1,1,1,self.n_mels])) + torch.abs(self.prior_exp_parameter[2]) # B,1,T,F + + # timbre_parameter = self.timbre_mapping(f0_hz[...,0,0].unsqueeze(1)).permute([0,2,1]).unsqueeze(1) + # condition = (torch.sign(freq_cord_reshape-torch.sigmoid(timbre_parameter[...,0:1])*self.n_mels)*0.5+0.5) + # hamonics = ((hamonics.sum(dim=-1)).unsqueeze(dim=1)) * (1+ (torch.exp(-0.01*torch.sigmoid(timbre_parameter[...,1:2])*(freq_cord_reshape-torch.sigmoid(timbre_parameter[...,0:1])*self.n_mels)*condition)-1)*condition) # B,1,T,F + # hamonics = ((hamonics.sum(dim=-1)).unsqueeze(dim=1)) * (1+ (torch.exp(-0.01*torch.sigmoid(timbre_parameter[...,1:2])*(freq_cord_reshape-torch.sigmoid(timbre_parameter[...,0:1])*self.n_mels)*condition)-1)*condition) * F.softplus(timbre_parameter[...,2:3]) + timbre_parameter[...,3:4] # B,1,T,F + # timbre = self.timbre_mapping(f0_hz[...,0,0].unsqueeze(1)).permute([0,2,1]) + # hamonics = (hamonics.sum(dim=-1)*timbre).unsqueeze(dim=1) # B,1,T,F + # hamonics = (hamonics.sum(dim=-1)*self.timbre).unsqueeze(dim=1) # B,1,T,F + + hamonics = (hamonics.sum(dim=-1)).unsqueeze(dim=1) + # hamonics = 180*F.softplus(self.wave_hamon_amplifier)*(hamonics.sum(dim=-1)).unsqueeze(dim=1) + + return hamonics + + def voicing(self,f0_hz): + #f0: B*1*time, hz + freq_cord = torch.arange(self.n_mels) + time_cord = torch.arange(f0_hz.shape[2]) + grid_time,grid_freq = torch.meshgrid(time_cord,freq_cord) + grid_time = grid_time.unsqueeze(dim=0).unsqueeze(dim=-1) #B,time,freq, 1 + grid_freq = grid_freq.unsqueeze(dim=0).unsqueeze(dim=-1) #B,time,freq, 1 + grid_freq_hz = inverse_mel_scale(grid_freq/(self.n_mels*1.0)) + f0_hz = f0_hz.permute([0,2,1]).unsqueeze(dim=-2) #B,time,1, 1 + f0_hz = f0_hz.repeat([1,1,1,self.k]) #B,time,1, self.k + f0_hz = f0_hz*(torch.arange(self.k)+1).reshape([1,1,1,self.k]) + if self.log10: + f0_mel = mel_scale(self.n_mels,f0_hz) + band_low_hz = inverse_mel_scale((f0_mel-1)/(self.n_mels*1.0),n_mels = self.n_mels) + band_up_hz = inverse_mel_scale((f0_mel+1)/(self.n_mels*1.0),n_mels = self.n_mels) + bandwith_hz = band_up_hz-band_low_hz + band_low_mel = mel_scale(self.n_mels,band_low_hz) + band_up_mel = mel_scale(self.n_mels,band_up_hz) + bandwith = band_up_mel-band_low_mel + else: + bandwith_hz = 24.7*(f0_hz*4.37/1000+1) + bandwith = bandwidth_mel(f0_hz,bandwith_hz,self.n_mels) + # bandwith_lower = torch.clamp(f0-bandwith/2,min=1) + # bandwith_upper = f0+bandwith/2 + # bandwith = mel_scale(self.n_mels,bandwith_upper) - mel_scale(self.n_mels,bandwith_lower) + f0 = mel_scale(self.n_mels,f0_hz) + switch = mel_scale(self.n_mels,torch.abs(self.timbre_parameter[0])*f0_hz[...,0]).unsqueeze(1) + slop = (torch.abs(self.timbre_parameter[1])*f0_hz[...,0]).unsqueeze(1) + freq_cord_reshape = freq_cord.reshape([1,1,1,self.n_mels]) + if not self.dbbased: + # sigma = bandwith/(np.sqrt(2*np.log(2))); + sigma = bandwith/(2*np.sqrt(2*np.log(2))); + hamonics = torch.exp(-(grid_freq-f0)**2/(2*sigma**2)) #gaussian + # hamonics = (1-((grid_freq_hz-f0_hz)/(2*bandwith_hz/2))**2)*(-torch.sign(torch.abs(grid_freq_hz-f0_hz)/(2*bandwith_hz)-0.5)*0.5+0.5) #welch + else: + # # hamonics = (1-((grid_freq-f0)/(1.75*bandwith/2))**2)*(-torch.sign(torch.abs(grid_freq-f0)/(1.75*bandwith)-0.5)*0.5+0.5) #welch + hamonics = (1-((grid_freq-f0)/(2.5*bandwith/2))**2)*(-torch.sign(torch.abs(grid_freq-f0)/(2.5*bandwith)-0.5)*0.5+0.5) #welch + # hamonics = (1-((grid_freq-f0)/(3*bandwith/2))**2)*(-torch.sign(torch.abs(grid_freq-f0)/(3*bandwith)-0.5)*0.5+0.5) #welch + # hamonics = torch.cos(np.pi*torch.abs(grid_freq-f0)/(4*bandwith))**2*(-torch.sign(torch.abs(grid_freq-f0)/(4*bandwith)-0.5)*0.5+0.5) #hanning + # hamonics = (hamonics.sum(dim=-1)).unsqueeze(dim=1) # B,1,T,F + # condition = (torch.sign(freq_cord_reshape-switch)*0.5+0.5) + # hamonics = ((hamonics.sum(dim=-1)).unsqueeze(dim=1)) * (1+ (torch.exp(-slop*(freq_cord_reshape-switch)*condition)-1)*condition) # B,1,T,F + # hamonics = ((hamonics.sum(dim=-1)).unsqueeze(dim=1)-torch.abs(self.prior_exp_parameter[2])) * torch.exp(-torch.abs(self.prior_exp_parameter[1])*freq_cord.reshape([1,1,1,self.n_mels])) + torch.abs(self.prior_exp_parameter[2]) # B,1,T,F + + timbre_parameter = self.timbre_mapping(f0_hz[...,0,0].unsqueeze(1)).permute([0,2,1]).unsqueeze(1) + condition = (torch.sign(freq_cord_reshape-torch.sigmoid(timbre_parameter[...,0:1])*self.n_mels)*0.5+0.5) + amp = F.softplus(self.wave_hamon_amplifier) if self.dbbased else 180*F.softplus(self.wave_hamon_amplifier) + hamonics = amp * ((hamonics.sum(dim=-1)).unsqueeze(dim=1)) * (1+ (torch.exp(-0.01*torch.sigmoid(timbre_parameter[...,1:2])*(freq_cord_reshape-torch.sigmoid(timbre_parameter[...,0:1])*self.n_mels)*condition)-1)*condition) # B,1,T,F + # hamonics = ((hamonics.sum(dim=-1)).unsqueeze(dim=1)) * (1+ (torch.exp(-0.01*torch.sigmoid(timbre_parameter[...,1:2])*(freq_cord_reshape-torch.sigmoid(timbre_parameter[...,0:1])*self.n_mels)*condition)-1)*condition) * F.softplus(timbre_parameter[...,2:3]) + timbre_parameter[...,3:4] # B,1,T,F + # timbre = self.timbre_mapping(f0_hz[...,0,0].unsqueeze(1)).permute([0,2,1]) + # hamonics = (hamonics.sum(dim=-1)*timbre).unsqueeze(dim=1) # B,1,T,F + # hamonics = (hamonics.sum(dim=-1)*self.timbre).unsqueeze(dim=1) # B,1,T,F + # return F.softplus(self.wave_hamon_amplifier)*hamonics + return hamonics + + def unvoicing(self,f0,bg=False,mapping=True): + # return (0.25*torch.randn([f0.shape[0],1,f0.shape[2],self.n_mels]))+1 + rnd = torch.randn([f0.shape[0],2,f0.shape[2],self.n_fft if self.wavebased else self.n_mels]) + if mapping: + rnd = self.bgnoise_mapping(rnd) if bg else self.noise_mapping(rnd) + real = rnd[:,0:1] + img = rnd[:,1:2] + if self.dbbased: + return (2*torchaudio.transforms.AmplitudeToDB()(torch.sqrt(real**2 + img**2+1E-10))+80).clamp(min=0)/35 + # return (2*torchaudio.transforms.AmplitudeToDB()(F.softplus(self.wave_noise_amplifier)*torch.sqrt(torch.randn([f0.shape[0],1,f0.shape[2],self.n_mels])**2 + torch.randn([f0.shape[0],1,f0.shape[2],self.n_mels])**2))+80).clamp(min=0)/35 + else: + # return torch.ones([f0.shape[0],1,f0.shape[2],self.n_mels]) + return 180*F.softplus(self.wave_noise_amplifier) * torch.sqrt(real**2 + img**2+1E-10) + # return F.softplus(self.wave_noise_amplifier)*torch.sqrt(torch.randn([f0.shape[0],1,f0.shape[2],self.n_mels])**2 + torch.randn([f0.shape[0],1,f0.shape[2],self.n_mels])**2) + + # return (F.softplus(self.wave_noise_amplifier)) * (0.25*torch.randn([f0.shape[0],1,f0.shape[2],self.n_mels]))+1 + # return torch.ones([f0.shape[0],1,f0.shape[2],self.n_mels]) + + def forward(self,components,enable_hamon_excitation=True,enable_noise_excitation=True,enable_bgnoise=True): + # f0: B*1*T, amplitudes: B*2(voicing,unvoicing)*T, freq_formants,bandwidth_formants,amplitude_formants: B*formants*T + amplitudes = components['amplitudes'].unsqueeze(dim=-1) + amplitudes_h = components['amplitudes_h'].unsqueeze(dim=-1) + loudness = components['loudness'].unsqueeze(dim=-1) + f0_hz = components['f0_hz'] + # import pdb;pdb.set_trace() + if self.wavebased: + # self.hamonics = 1800*F.softplus(self.wave_hamon_amplifier)*self.voicing_linear(f0_hz) + # self.noise = 180*self.unvoicing(f0_hz,bg=False,mapping=False) + # self.bgnoise = 18*self.unvoicing(f0_hz,bg=True,mapping=False) + # import pdb;pdb.set_trace() + self.hamonics_wave = self.voicing_wavebased(f0_hz) + self.noise = self.unvoicing_wavebased(f0_hz,bg=False,mapping=False) + self.bgnoise = self.unvoicing_wavebased(f0_hz,bg=True) + else: + self.hamonics = self.voicing(f0_hz) + self.noise = self.unvoicing(f0_hz,bg=False) + self.bgnoise = self.unvoicing(f0_hz,bg=True) + # freq_formants = components['freq_formants']*self.n_mels + # bandwidth_formants = components['bandwidth_formants']*self.n_mels + # excitation = amplitudes[:,0:1]*hamonics + # excitation = loudness*(amplitudes[:,0:1]*hamonics) + + self.excitation_noise = loudness*(amplitudes[:,-1:])*self.noise if self.power_synth else (loudness*amplitudes[:,-1:]+1E-10).sqrt()*self.noise + duomask = components['freq_formants_noise_hz'].shape[1]>components['freq_formants_hamon_hz'].shape[1] + n_formant_noise = (components['freq_formants_noise_hz'].shape[1]-components['freq_formants_hamon_hz'].shape[1]) if duomask else components['freq_formants_noise_hz'].shape[1] + self.hamonic_dist = self.formant_mask(components['freq_formants_hamon_hz'],components['bandwidth_formants_hamon_hz'],components['amplitude_formants_hamon'],linear = self.linear_scale,f0_hz = f0_hz) + self.mask_noise = self.formant_mask(components['freq_formants_noise_hz'],components['bandwidth_formants_noise_hz'],components['amplitude_formants_noise'],linear = self.linear_scale,triangle_mask=False if self.wavebased else True,duomask=duomask,n_formant_noise=n_formant_noise,f0_hz = f0_hz,noise=True) + # self.mask_hamon = self.formant_mask(components['freq_formants_hamon']*self.n_mels,components['bandwidth_formants_hamon'],components['amplitude_formants_hamon']) + # self.mask_noise = self.formant_mask(components['freq_formants_noise']*self.n_mels,components['bandwidth_formants_noise'],components['amplitude_formants_noise']) + if self.power_synth: + self.excitation_hamon_wave = F.interpolate(loudness[...,-1]*amplitudes[:,0:1][...,-1],self.hamonics_wave.shape[-1],mode='linear',align_corners=False)*self.hamonics_wave + else: + self.excitation_hamon_wave = F.interpolate((loudness[...,-1]*amplitudes[:,0:1][...,-1]+1E-10).sqrt(),self.hamonics_wave.shape[-1],mode='linear',align_corners=False)*self.hamonics_wave + self.hamonics_wave_ = (self.excitation_hamon_wave*self.hamonic_dist).sum(1,keepdim=True) + + bgdist = F.softplus(self.bgnoise_amp)*self.noise_dist if self.noise_from_data else F.softplus(self.bgnoise_dist) + # if self.power_synth: + # self.excitation_hamon = loudness*(amplitudes[:,0:1])*self.hamonics + # else: + # self.excitation_hamon = loudness*amplitudes[:,0:1]*self.hamonics + # import pdb;pdb.set_trace() + self.noise_excitation = self.excitation_noise*self.mask_noise + + self.noise_excitation_wave = 2*inverse_spec_to_audio(self.noise_excitation.squeeze(1).permute(0,2,1),n_fft=self.n_fft*2-1,power_synth=self.power_synth) + self.noise_excitation_wave = F.pad(self.noise_excitation_wave,[0,self.hamonics_wave_.shape[2]-self.noise_excitation_wave.shape[1]]) + self.noise_excitation_wave = self.noise_excitation_wave.unsqueeze(1) + self.rec_wave_clean = self.noise_excitation_wave+self.hamonics_wave_ + + if self.add_bgnoise and enable_bgnoise: + self.bgn = bgdist*self.bgnoise*0.0003 + self.bgn_wave = 2*inverse_spec_to_audio(self.bgn.squeeze(1).permute(0,2,1),n_fft=self.n_fft*2-1,power_synth=self.power_synth) + self.bgn_wave = F.pad(self.bgn_wave,[0,self.hamonics_wave_.shape[2]-self.bgn_wave.shape[1]]) + self.bgn_wave = self.bgn_wave.unsqueeze(1) + self.rec_wave = self.rec_wave_clean + self.bgn_wave + else: + self.rec_wave = self.rec_wave_clean + + speech = wave2spec(self.rec_wave,self.n_fft,self.wave_fr,self.spec_fr,self.noise_db,self.max_db,to_db=True,power=2 if self.power_synth else 1) + # if self.wavebased: + # # import pdb; pdb.set_trace() + # bgn = bgdist*self.bgnoise*0.0003 if (self.add_bgnoise and enable_bgnoise) else 0 + # speech = ((self.excitation_hamon*self.mask_hamon_sum) if enable_hamon_excitation else torch.zeros(self.excitation_hamon.shape)) + (self.noise_excitation if enable_noise_excitation else 0) + bgn + # # speech = ((self.excitation_hamon*self.mask_hamon_sum) if enable_hamon_excitation else torch.zeros(self.excitation_hamon.shape)) + (self.excitation_noise*self.mask_noise_sum if enable_noise_excitation else 0) + (((bgdist*self.bgnoise*0.0003) if not self.dbbased else (2*torchaudio.transforms.AmplitudeToDB()(bgdist*0.0003)/35. + self.bgnoise)) if (self.add_bgnoise and enable_bgnoise) else 0) + # # speech = speech if self.power_synth else speech**2 + # speech = (torchaudio.transforms.AmplitudeToDB()(speech).clamp(min=self.noise_db)-self.noise_db)/(self.max_db-self.noise_db)*2-1 + # else: + # # speech = ((self.excitation_hamon*self.mask_hamon_sum) if enable_hamon_excitation else torch.zeros(self.excitation_hamon.shape)) + (((bgdist*self.bgnoise*0.0003) if not self.dbbased else (2*torchaudio.transforms.AmplitudeToDB()(bgdist*0.0003)/35. + self.bgnoise)) if (self.add_bgnoise and enable_bgnoise) else 0) + (self.silient*torch.ones(self.mask_hamon_sum.shape) if self.dbbased else 0) + # speech = ((self.excitation_hamon*self.mask_hamon_sum) if enable_hamon_excitation else torch.zeros(self.excitation_hamon.shape)) + (self.noise_excitation if enable_noise_excitation else 0) + (((bgdist*self.bgnoise*0.0003) if not self.dbbased else (2*torchaudio.transforms.AmplitudeToDB()(bgdist*0.0003)/35. + self.bgnoise)) if (self.add_bgnoise and enable_bgnoise) else 0) + (self.silient*torch.ones(self.mask_hamon_sum.shape) if self.dbbased else 0) + # # speech = self.excitation_hamon*self.mask_hamon_sum + (self.excitation_noise*self.mask_noise_sum if enable_noise_excitation else 0) + self.silient*torch.ones(self.mask_hamon_sum.shape) + # if not self.dbbased: + # speech = db(speech) + + + # import pdb;pdb.set_trace() + if self.return_wave: + return speech,self.rec_wave_clean + else: + return speech + +@ENCODERS.register("EncoderFormant") +class FormantEncoder(nn.Module): + def __init__(self, n_mels=64, n_formants=4,n_formants_noise=2,min_octave=-31,max_octave=96,wavebased=False,hop_length=128,n_fft=256,noise_db=-50,max_db=22.5,broud=True,power_synth=False): + super(FormantEncoder, self).__init__() + self.wavebased = wavebased + self.n_mels = n_mels + self.n_formants = n_formants + self.n_formants_noise = n_formants_noise + self.min_octave = min_octave + self.max_octave = max_octave + self.noise_db = noise_db + self.max_db = max_db + self.broud = broud + self.hop_length = hop_length + self.n_fft = n_fft + self.power_synth=power_synth + self.formant_freq_limits_diff = torch.tensor([950.,2450.,2100.]).reshape([1,3,1]) #freq difference + self.formant_freq_limits_diff_low = torch.tensor([300.,300.,0.]).reshape([1,3,1]) #freq difference + + # self.formant_freq_limits_abs = torch.tensor([950.,2800.,3400.,4700.]).reshape([1,4,1]) #freq difference + # self.formant_freq_limits_abs_low = torch.tensor([300.,600.,2800.,3400]).reshape([1,4,1]) #freq difference + + # self.formant_freq_limits_abs = torch.tensor([950.,3300.,3600.,4700.]).reshape([1,4,1]) #freq difference + # self.formant_freq_limits_abs_low = torch.tensor([300.,600.,2700.,3400]).reshape([1,4,1]) #freq difference + + self.formant_freq_limits_abs = torch.tensor([950.,3400.,3800.,5000.,6000.,7000.]).reshape([1,6,1]) #freq difference + self.formant_freq_limits_abs_low = torch.tensor([300.,700.,1800.,3400,5000.,6000.]).reshape([1,6,1]) #freq difference + # self.formant_freq_limits_abs_low = torch.tensor([300.,700.,2700.,3400]).reshape([1,4,1]) #freq difference + + # self.formant_freq_limits_abs_noise = torch.tensor([7000.]).reshape([1,1,1]) #freq difference + self.formant_freq_limits_abs_noise = torch.tensor([8000.,7000.,7000.]).reshape([1,3,1]) #freq difference + self.formant_freq_limits_abs_noise_low = torch.tensor([4000.,3000.,3000.]).reshape([1,3,1]) #freq difference + # self.formant_freq_limits_abs_noise_low = torch.tensor([4000.,500.,500.]).reshape([1,3,1]) #freq difference + + self.formant_bandwitdh_bias = Parameter(torch.Tensor(1)) + self.formant_bandwitdh_slop = Parameter(torch.Tensor(1)) + self.formant_bandwitdh_thres = Parameter(torch.Tensor(1)) + with torch.no_grad(): + nn.init.constant_(self.formant_bandwitdh_bias,0) + nn.init.constant_(self.formant_bandwitdh_slop,0) + nn.init.constant_(self.formant_bandwitdh_thres,0) + + # self.formant_freq_limits = torch.cumsum(self.formant_freq_limits_diff,dim=0) + # self.formant_freq_limits_mel = torch.cat([torch.tensor([0.]),mel_scale(n_mels,self.formant_freq_limits)/n_mels]) + # self.formant_freq_limits_mel_diff = torch.reshape(self.formant_freq_limits_mel[1:]-self.formant_freq_limits_mel[:-1],[1,3,1]) + if broud: + if wavebased: + self.conv1_narrow = ln.Conv1d(n_fft,64,3,1,1) + self.conv1_mel = ln.Conv1d(128,64,3,1,1) + self.norm1_mel = nn.GroupNorm(32,64) + self.conv2_mel = ln.Conv1d(64,128,3,1,1) + self.norm2_mel = nn.GroupNorm(32,128) + self.conv_fundementals_mel = ln.Conv1d(128,128,3,1,1) + self.norm_fundementals_mel = nn.GroupNorm(32,128) + self.f0_drop_mel = nn.Dropout() + else: + self.conv1_narrow = ln.Conv1d(n_mels,64,3,1,1) + self.norm1_narrow = nn.GroupNorm(32,64) + self.conv2_narrow = ln.Conv1d(64,128,3,1,1) + self.norm2_narrow = nn.GroupNorm(32,128) + + self.conv_fundementals_narrow = ln.Conv1d(128,128,3,1,1) + self.norm_fundementals_narrow = nn.GroupNorm(32,128) + self.f0_drop_narrow = nn.Dropout() + if wavebased: + self.conv_f0_narrow = ln.Conv1d(256,1,1,1,0) + else: + self.conv_f0_narrow = ln.Conv1d(128,1,1,1,0) + + self.conv_amplitudes_narrow = ln.Conv1d(128,2,1,1,0) + self.conv_amplitudes_h_narrow = ln.Conv1d(128,2,1,1,0) + + if wavebased: + self.conv1 = ln.Conv1d(n_fft,64,3,1,1) + else: + self.conv1 = ln.Conv1d(n_mels,64,3,1,1) + self.norm1 = nn.GroupNorm(32,64) + self.conv2 = ln.Conv1d(64,128,3,1,1) + self.norm2 = nn.GroupNorm(32,128) + + self.conv_fundementals = ln.Conv1d(128,128,3,1,1) + self.norm_fundementals = nn.GroupNorm(32,128) + self.f0_drop = nn.Dropout() + self.conv_f0 = ln.Conv1d(128,1,1,1,0) + + self.conv_amplitudes = ln.Conv1d(128,2,1,1,0) + self.conv_amplitudes_h = ln.Conv1d(128,2,1,1,0) + # self.conv_loudness = nn.Sequential(ln.Conv1d(n_fft if wavebased else n_mels,128,1,1,0), + # nn.LeakyReLU(0.2), + # ln.Conv1d(128,128,1,1,0), + # nn.LeakyReLU(0.2), + # ln.Conv1d(128,1,1,1,0,bias_initial=0.5),) + self.conv_loudness = nn.Sequential(ln.Conv1d(n_fft if wavebased else n_mels,128,1,1,0), + nn.LeakyReLU(0.2), + ln.Conv1d(128,128,1,1,0), + nn.LeakyReLU(0.2), + ln.Conv1d(128,1,1,1,0,bias_initial=-9.),) + # self.conv_loudness_power = nn.Sequential(ln.Conv1d(1,128,1,1,0), + # nn.LeakyReLU(0.2), + # ln.Conv1d(128,128,1,1,0), + # nn.LeakyReLU(0.2), + # ln.Conv1d(128,1,1,1,0,bias_initial=-9.),) + + if self.broud: + self.conv_formants = ln.Conv1d(128,128,3,1,1) + else: + self.conv_formants = ln.Conv1d(128,128,3,1,1) + self.norm_formants = nn.GroupNorm(32,128) + self.conv_formants_freqs = ln.Conv1d(128,n_formants,1,1,0) + self.conv_formants_bandwidth = ln.Conv1d(128,n_formants,1,1,0) + self.conv_formants_amplitude = ln.Conv1d(128,n_formants,1,1,0) + + self.conv_formants_freqs_noise = ln.Conv1d(128,self.n_formants_noise,1,1,0) + self.conv_formants_bandwidth_noise = ln.Conv1d(128,self.n_formants_noise,1,1,0) + self.conv_formants_amplitude_noise = ln.Conv1d(128,self.n_formants_noise,1,1,0) + + self.amplifier = Parameter(torch.Tensor(1)) + self.bias = Parameter(torch.Tensor(1)) + with torch.no_grad(): + nn.init.constant_(self.amplifier,1.0) + nn.init.constant_(self.bias,0.) + + def forward(self,x,x_denoise=None,duomask=False,noise_level = None,x_amp=None): + x = x.squeeze(dim=1).permute(0,2,1) #B * f * T + if x_denoise is not None: + x_denoise = x_denoise.squeeze(dim=1).permute(0,2,1) + # x_denoise_amp = amplitude(x_denoise,self.noise_db,self.max_db) + # import pdb; pdb.set_trace() + if x_amp is None: + x_amp = amplitude(x,self.noise_db,self.max_db,trim_noise=True) + else: + x_amp = x_amp.squeeze(dim=1).permute(0,2,1) + hann_win = torch.hann_window(5,periodic=False).reshape([1,1,5,1]) + x_smooth = F.conv2d(x.unsqueeze(1).transpose(-2,-1),hann_win,padding=[2,0]).transpose(-2,-1).squeeze(1) + # loudness = F.softplus(self.amplifier)*(torch.mean(x_denoise_amp,dim=1,keepdim=True)) + # loudness = F.relu(F.softplus(self.amplifier)*(torch.mean(x_amp,dim=1,keepdim=True)-noise_level*0.0003)) + # loudness = torch.mean((x*0.5+0.5) if x_denoise is None else (x_denoise*0.5+0.5),dim=1,keepdim=True) + # loudness = F.softplus(self.amplifier)*(loudness) + # loudness = F.softplus(self.amplifier)*torch.mean(x_amp,dim=1,keepdim=True) + # loudness = F.softplus(self.amplifier)*F.relu(loudness - F.softplus(self.bias)) + if self.power_synth: + loudness = F.softplus((1. if self.wavebased else 1.0)*self.conv_loudness(x_smooth)) + else: + # loudness = F.softplus(self.amplifier)*F.relu((x_amp**2).sum(1,keepdim=True)/self.hop_length/383.-self.bias) + loudness = F.softplus((1. if self.wavebased else 1.0)*self.conv_loudness(x_smooth)) # compute power loudness + # loudness = F.softplus(self.amplifier)*F.relu((x_amp**2).sum(1,keepdim=True)-self.bias) # compute power loudness + # loudness = F.softplus((1. if self.wavebased else 1.0)*self.conv_loudness(x)) + # loudness = F.relu(self.conv_loudness(x)) + + # if not self.power_synth: + # loudness = loudness.sqrt() + + if self.broud: + x_narrow = x + x_narrow = F.leaky_relu(self.norm1_narrow(self.conv1_narrow(x_narrow)),0.2) + x_common_narrow = F.leaky_relu(self.norm2_narrow(self.conv2_narrow(x_narrow)),0.2) + amplitudes = F.softmax(self.conv_amplitudes_narrow(x_common_narrow),dim=1) + amplitudes_h = F.softmax(self.conv_amplitudes_h_narrow(x_common_narrow),dim=1) + x_fundementals_narrow = self.f0_drop_narrow(F.leaky_relu(self.norm_fundementals_narrow(self.conv_fundementals_narrow(x_common_narrow)),0.2)) + + x_amp = amplitude(x.unsqueeze(1),self.noise_db,self.max_db).transpose(-2,-1) + x_mel = to_db(torchaudio.transforms.MelScale(f_max=8000,n_stft=self.n_fft)(x_amp.transpose(-2,-1)),self.noise_db,self.max_db).squeeze(1) + x = F.leaky_relu(self.norm1_mel(self.conv1_mel(x_mel)),0.2) + x_common_mel = F.leaky_relu(self.norm2_mel(self.conv2_mel(x)),0.2) + x_fundementals_mel = self.f0_drop_mel(F.leaky_relu(self.norm_fundementals_mel(self.conv_fundementals_mel(x_common_mel)),0.2)) + + f0_hz = torch.sigmoid(self.conv_f0_narrow(torch.cat([x_fundementals_narrow,x_fundementals_mel],dim=1))) * 120 + 180 # 180hz < f0 < 300 hz + f0 = torch.clamp(mel_scale(self.n_mels,f0_hz)/(self.n_mels*1.0),min=0.0001) + + hann_win = torch.hann_window(21,periodic=False).reshape([1,1,21,1]) + x = to_db(F.conv2d(x_amp,hann_win,padding=[10,0]).transpose(-2,-1),self.noise_db,self.max_db).squeeze(1) + x = F.leaky_relu(self.norm1(self.conv1(x)),0.2) + x_common = F.leaky_relu(self.norm2(self.conv2(x)),0.2) + + + + else: + x = F.leaky_relu(self.norm1(self.conv1(x)),0.2) + x_common = F.leaky_relu(self.norm2(self.conv2(x)),0.2) + + + # loudness = F.relu(self.conv_loudness(x_common)) + # loudness = F.relu(self.conv_loudness(x_common)) +(10**(self.noise_db/10.-1) if self.wavebased else 0) + amplitudes = F.softmax(self.conv_amplitudes(x_common),dim=1) + amplitudes_h = F.softmax(self.conv_amplitudes_h(x_common),dim=1) + + # x_fundementals = F.leaky_relu(self.norm_fundementals(self.conv_fundementals(x_common)),0.2) + x_fundementals = self.f0_drop(F.leaky_relu(self.norm_fundementals(self.conv_fundementals(x_common)),0.2)) + # f0 in mel: + # f0 = torch.sigmoid(self.conv_f0(x_fundementals)) + # f0 = F.tanh(self.conv_f0(x_fundementals)) * (16/64)*(self.n_mels/64) # 72hz < f0 < 446 hz + # f0 = torch.sigmoid(self.conv_f0(x_fundementals)) * (15/64)*(self.n_mels/64) # 179hz < f0 < 420 hz + # f0 = torch.sigmoid(self.conv_f0(x_fundementals)) * (22/64)*(self.n_mels/64) - (16/64)*(self.n_mels/64)# 72hz < f0 < 253 hz, human voice + # f0 = torch.sigmoid(self.conv_f0(x_fundementals)) * (11/64)*(self.n_mels/64) - (-2/64)*(self.n_mels/64)# 160hz < f0 < 300 hz, female voice + + # f0 in hz: + # f0_hz = torch.sigmoid(self.conv_f0(x_fundementals)) * 120 + 180 # 180hz < f0 < 300 hz + f0_hz = torch.sigmoid(self.conv_f0(x_fundementals)) * 332 + 88 # 88hz < f0 < 420 hz + # f0_hz = torch.sigmoid(self.conv_f0(x_fundementals)) * 120 + 180 # 180hz < f0 < 300 hz + # f0_hz = torch.sigmoid(self.conv_f0(x_fundementals)) * 528 + 88 # 88hz < f0 < 616 hz + # f0_hz = torch.sigmoid(self.conv_f0(x_fundementals)) * 302 + 118 # 118hz < f0 < 420 hz + # f0_hz = torch.sigmoid(self.conv_f0(x_fundementals)) * 240 + 180 # 180hz < f0 < 420 hz + # f0_hz = torch.sigmoid(self.conv_f0(x_fundementals)) * 260 + 160 # 160hz < f0 < 420 hz + f0 = torch.clamp(mel_scale(self.n_mels,f0_hz)/(self.n_mels*1.0),min=0.0001) + + x_formants = F.leaky_relu(self.norm_formants(self.conv_formants(x_common)),0.2) + formants_freqs = torch.sigmoid(self.conv_formants_freqs(x_formants)) + # # relative freq: + # formants_freqs_hz = formants_freqs*(self.formant_freq_limits_diff[:,:self.n_formants]-self.formant_freq_limits_diff_low[:,:self.n_formants])+self.formant_freq_limits_diff_low[:,:self.n_formants] + # # formants_freqs_hz = formants_freqs*6839 + # formants_freqs_hz = torch.cumsum(formants_freqs_hz,dim=1) + + # abs freq: + formants_freqs_hz = formants_freqs*(self.formant_freq_limits_abs[:,:self.n_formants]-self.formant_freq_limits_abs_low[:,:self.n_formants])+self.formant_freq_limits_abs_low[:,:self.n_formants] + # formants_freqs_hz = formants_freqs*6839 + formants_freqs = torch.clamp(mel_scale(self.n_mels,formants_freqs_hz)/(self.n_mels*1.0),min=0) + + # formants_freqs = formants_freqs + f0 + # formants_bandwidth_hz = torch.sigmoid(self.conv_formants_bandwidth(x_formants)) *6839 + # formants_bandwidth_hz = torch.sigmoid(self.conv_formants_bandwidth(x_formants)) * 150 + # formants_bandwidth_hz = 3*torch.sigmoid(self.formant_bandwitdh_ratio)*(0.075*torch.relu(formants_freqs_hz-1000)+100) + # formants_bandwidth_hz = (torch.sigmoid(self.conv_formants_bandwidth(x_formants))) * (3*torch.sigmoid(self.formant_bandwitdh_ratio))*(0.075*torch.relu(formants_freqs_hz-1000)+100) #good for spec based method + # formants_bandwidth_hz = ((3*torch.sigmoid(self.formant_bandwitdh_ratio))*(0.0125*torch.relu(formants_freqs_hz-1000)+100))#good for spec based method + formants_bandwidth_hz = 0.65*(0.00625*torch.relu(formants_freqs_hz)+375) + # formants_bandwidth_hz = (2**(torch.tanh(self.formant_bandwitdh_slop))*0.001*torch.relu(formants_freqs_hz-4000*torch.sigmoid(self.formant_bandwitdh_thres))+375*2**(torch.tanh(self.formant_bandwitdh_bias))) + # formants_bandwidth_hz = torch.exp(0.4*torch.tanh(self.conv_formants_bandwidth(x_formants))) * (0.00625*torch.relu(formants_freqs_hz-0)+375) + # formants_bandwidth_hz = (100*(torch.tanh(self.conv_formants_bandwidth(x_formants))) + (0.035*torch.relu(formants_freqs_hz-950)+250)) if self.wavebased else ((3*torch.sigmoid(self.formant_bandwitdh_ratio))*(0.0125*torch.relu(formants_freqs_hz-1000)+100))#good for spec based method + # formants_bandwidth_hz = (100*(torch.tanh(self.conv_formants_bandwidth(x_formants))) + (0.035*torch.relu(formants_freqs_hz-950)+250)) if self.wavebased else ((torch.sigmoid(self.conv_formants_bandwidth(x_formants))+0.2) * (3*torch.sigmoid(self.formant_bandwitdh_ratio))*(0.075*torch.relu(formants_freqs_hz-1000)+100))#good for spec based method + # formants_bandwidth_hz = torch.sigmoid(self.conv_formants_bandwidth(x_formants)) * 3*torch.sigmoid(self.formant_bandwitdh_ratio)*(0.075*torch.relu(formants_freqs_hz-1000)+50) + # formants_bandwidth_hz = torch.sigmoid(self.conv_formants_bandwidth(x_formants)) * (0.075*3*torch.sigmoid(self.formant_bandwitdh_slop)*torch.relu(formants_freqs_hz-1000)+(2*torch.sigmoid(self.formant_bandwitdh_ratio)+1)*50) + # formants_bandwidth_hz = torch.sigmoid(self.conv_formants_bandwidth(x_formants)) * 3*torch.sigmoid(self.formant_bandwitdh_ratio)*(0.075*torch.sigmoid(self.formant_bandwitdh_slop)*torch.relu(formants_freqs_hz-1000)+50) + formants_bandwidth = bandwidth_mel(formants_freqs_hz,formants_bandwidth_hz,self.n_mels) + # formants_bandwidth_upper = formants_freqs_hz+formants_bandwidth_hz/2 + # formants_bandwidth_lower = torch.clamp(formants_freqs_hz-formants_bandwidth_hz/2,min=1) + # formants_bandwidth = (mel_scale(self.n_mels,formants_bandwidth_upper) - mel_scale(self.n_mels,formants_bandwidth_lower))/(self.n_mels*1.0) + # formants_amplitude = F.softmax(torch.cumsum(-F.relu(self.conv_formants_amplitude(x_formants)),dim=1),dim=1) + formants_amplitude_logit = self.conv_formants_amplitude(x_formants) + formants_amplitude = F.softmax(formants_amplitude_logit,dim=1) + + formants_freqs_noise = torch.sigmoid(self.conv_formants_freqs_noise(x_formants)) + # # relative freq: + # formants_freqs_hz = formants_freqs*(self.formant_freq_limits_diff[:,:self.n_formants]-self.formant_freq_limits_diff_low[:,:self.n_formants])+self.formant_freq_limits_diff_low[:,:self.n_formants] + # # formants_freqs_hz = formants_freqs*6839 + # formants_freqs_hz = torch.cumsum(formants_freqs_hz,dim=1) + + # abs freq: + formants_freqs_hz_noise = formants_freqs_noise*(self.formant_freq_limits_abs_noise[:,:self.n_formants_noise]-self.formant_freq_limits_abs_noise_low[:,:self.n_formants_noise])+self.formant_freq_limits_abs_noise_low[:,:self.n_formants_noise] + if duomask: + formants_freqs_hz_noise = torch.cat([formants_freqs_hz,formants_freqs_hz_noise],dim=1) + # formants_freqs_hz = formants_freqs*6839 + formants_freqs_noise = torch.clamp(mel_scale(self.n_mels,formants_freqs_hz_noise)/(self.n_mels*1.0),min=0) + + # formants_freqs = formants_freqs + f0 + # formants_bandwidth_hz = torch.sigmoid(self.conv_formants_bandwidth(x_formants)) *6839 + # formants_bandwidth_hz = torch.sigmoid(self.conv_formants_bandwidth(x_formants)) * 150 + # formants_bandwidth_hz_noise = torch.sigmoid(self.conv_formants_bandwidth_noise(x_formants)) * 8000 + 2000 + formants_bandwidth_hz_noise = self.conv_formants_bandwidth_noise(x_formants) + # formants_bandwidth_hz_noise_1 = F.softplus(formants_bandwidth_hz_noise[:,:1]) * 8000 + 2000 #2000-10000 + # formants_bandwidth_hz_noise_2 = torch.sigmoid(formants_bandwidth_hz_noise[:,1:]) * 2000 #0-2000 + formants_bandwidth_hz_noise_1 = F.softplus(formants_bandwidth_hz_noise[:,:1]) * 2344 + 586 #2000-10000 + formants_bandwidth_hz_noise_2 = torch.sigmoid(formants_bandwidth_hz_noise[:,1:]) * 586 #0-2000 + formants_bandwidth_hz_noise = torch.cat([formants_bandwidth_hz_noise_1,formants_bandwidth_hz_noise_2],dim=1) + if duomask: + formants_bandwidth_hz_noise = torch.cat([formants_bandwidth_hz,formants_bandwidth_hz_noise],dim=1) + # formants_bandwidth_hz_noise = torch.sigmoid(self.conv_formants_bandwidth_noise(x_formants)) * 4000 + 1000 + # formants_bandwidth_hz_noise = torch.sigmoid(self.conv_formants_bandwidth_noise(x_formants)) * 4000 + # formants_bandwidth_hz = torch.sigmoid(self.conv_formants_bandwidth(x_formants)) * 3*torch.sigmoid(self.formant_bandwitdh_ratio)*(0.075*torch.relu(formants_freqs_hz-1000)+50) + # formants_bandwidth_hz = torch.sigmoid(self.conv_formants_bandwidth(x_formants)) * (0.075*3*torch.sigmoid(self.formant_bandwitdh_slop)*torch.relu(formants_freqs_hz-1000)+(2*torch.sigmoid(self.formant_bandwitdh_ratio)+1)*50) + # formants_bandwidth_hz = torch.sigmoid(self.conv_formants_bandwidth(x_formants)) * 3*torch.sigmoid(self.formant_bandwitdh_ratio)*(0.075*torch.sigmoid(self.formant_bandwitdh_slop)*torch.relu(formants_freqs_hz-1000)+50) + formants_bandwidth_noise = bandwidth_mel(formants_freqs_hz_noise,formants_bandwidth_hz_noise,self.n_mels) + # formants_bandwidth_upper = formants_freqs_hz+formants_bandwidth_hz/2 + # formants_bandwidth_lower = torch.clamp(formants_freqs_hz-formants_bandwidth_hz/2,min=1) + # formants_bandwidth = (mel_scale(self.n_mels,formants_bandwidth_upper) - mel_scale(self.n_mels,formants_bandwidth_lower))/(self.n_mels*1.0) + formants_amplitude_noise_logit = self.conv_formants_amplitude_noise(x_formants) + if duomask: + formants_amplitude_noise_logit = torch.cat([formants_amplitude_logit,formants_amplitude_noise_logit],dim=1) + formants_amplitude_noise = F.softmax(formants_amplitude_noise_logit,dim=1) + + components = { 'f0':f0, + 'f0_hz':f0_hz, + 'loudness':loudness, + 'amplitudes':amplitudes, + 'amplitudes_h':amplitudes_h, + 'freq_formants_hamon':formants_freqs, + 'bandwidth_formants_hamon':formants_bandwidth, + 'freq_formants_hamon_hz':formants_freqs_hz, + 'bandwidth_formants_hamon_hz':formants_bandwidth_hz, + 'amplitude_formants_hamon':formants_amplitude, + 'freq_formants_noise':formants_freqs_noise, + 'bandwidth_formants_noise':formants_bandwidth_noise, + 'freq_formants_noise_hz':formants_freqs_hz_noise, + 'bandwidth_formants_noise_hz':formants_bandwidth_hz_noise, + 'amplitude_formants_noise':formants_amplitude_noise, + } + return components + +class FromECoG(nn.Module): + def __init__(self, outputs,residual=False,shape='3D'): + super().__init__() + self.residual=residual + if shape =='3D': + self.from_ecog = ln.Conv3d(1, outputs, [9,1,1], 1, [4,0,0]) + else: + self.from_ecog = ln.Conv2d(1, outputs, [9,1], 1, [4,0]) + + def forward(self, x): + x = self.from_ecog(x) + if not self.residual: + x = F.leaky_relu(x, 0.2) + return x + +class ECoGMappingBlock(nn.Module): + def __init__(self, inputs, outputs, kernel_size,dilation=1,fused_scale=True,residual=False,resample=[],pool=None,shape='3D'): + super(ECoGMappingBlock, self).__init__() + self.residual = residual + self.pool = pool + self.inputs_resample = resample + self.dim_missmatch = (inputs!=outputs) + self.resample = resample + if not self.resample: + self.resample=1 + self.padding = list(np.array(dilation)*(np.array(kernel_size)-1)//2) + if shape=='2D': + conv=ln.Conv2d + maxpool = nn.MaxPool2d + avgpool = nn.AvgPool2d + if shape=='3D': + conv=ln.Conv3d + maxpool = nn.MaxPool3d + avgpool = nn.AvgPool3d + # self.padding = [dilation[i]*(kernel_size[i]-1)//2 for i in range(len(dilation))] + if residual: + self.norm1 = nn.GroupNorm(min(inputs,32),inputs) + else: + self.norm1 = nn.GroupNorm(min(outputs,32),outputs) + if pool is None: + self.conv1 = conv(inputs, outputs, kernel_size, self.resample, self.padding, dilation=dilation, bias=False) + else: + self.conv1 = conv(inputs, outputs, kernel_size, 1, self.padding, dilation=dilation, bias=False) + self.pool1 = maxpool(self.resample,self.resample) if self.pool=='Max' else avgpool(self.resample,self.resample) + if self.inputs_resample or self.dim_missmatch: + if pool is None: + self.convskip = conv(inputs, outputs, kernel_size, self.resample, self.padding, dilation=dilation, bias=False) + else: + self.convskip = conv(inputs, outputs, kernel_size, 1, self.padding, dilation=dilation, bias=False) + self.poolskip = maxpool(self.resample,self.resample) if self.pool=='Max' else avgpool(self.resample,self.resample) + + self.conv2 = conv(outputs, outputs, kernel_size, 1, self.padding, dilation=dilation, bias=False) + self.norm2 = nn.GroupNorm(min(outputs,32),outputs) + + def forward(self,x): + if self.residual: + x = F.leaky_relu(self.norm1(x),0.2) + if self.inputs_resample or self.dim_missmatch: + # x_skip = F.avg_pool3d(x,self.resample,self.resample) + x_skip = self.convskip(x) + if self.pool is not None: + x_skip = self.poolskip(x_skip) + else: + x_skip = x + x = F.leaky_relu(self.norm2(self.conv1(x)),0.2) + if self.pool is not None: + x = self.poolskip(x) + x = self.conv2(x) + x = x_skip + x + else: + x = F.leaky_relu(self.norm1(self.conv1(x)),0.2) + x = F.leaky_relu(self.norm2(self.conv2(x)),0.2) + return x + + + +@ECOG_ENCODER.register("ECoGMappingBottleneck") +class ECoGMapping_Bottleneck(nn.Module): + def __init__(self,n_mels,n_formants,n_formants_noise=1): + super(ECoGMapping_Bottleneck, self).__init__() + self.n_formants = n_formants + self.n_mels = n_mels + self.n_formants_noise = n_formants_noise + + self.formant_freq_limits_diff = torch.tensor([950.,2450.,2100.]).reshape([1,3,1]) #freq difference + self.formant_freq_limits_diff_low = torch.tensor([300.,300.,0.]).reshape([1,3,1]) #freq difference + + # self.formant_freq_limits_abs = torch.tensor([950.,2800.,3400.,4700.]).reshape([1,4,1]) #freq difference + # self.formant_freq_limits_abs_low = torch.tensor([300.,600.,2800.,3400]).reshape([1,4,1]) #freq difference + + # self.formant_freq_limits_abs = torch.tensor([950.,3300.,3600.,4700.]).reshape([1,4,1]) #freq difference + # self.formant_freq_limits_abs_low = torch.tensor([300.,600.,2700.,3400]).reshape([1,4,1]) #freq difference + + self.formant_freq_limits_abs = torch.tensor([950.,3400.,3800.,5000.,6000.,7000.]).reshape([1,6,1]) #freq difference + self.formant_freq_limits_abs_low = torch.tensor([300.,700.,1800.,3400,5000.,6000.]).reshape([1,6,1]) #freq difference + + # self.formant_freq_limits_abs_noise = torch.tensor([7000.]).reshape([1,1,1]) #freq difference + self.formant_freq_limits_abs_noise = torch.tensor([8000.,7000.,7000.]).reshape([1,3,1]) #freq difference + self.formant_freq_limits_abs_noise_low = torch.tensor([4000.,3000.,3000.]).reshape([1,3,1]) #freq difference + + self.formant_bandwitdh_ratio = Parameter(torch.Tensor(1)) + self.formant_bandwitdh_slop = Parameter(torch.Tensor(1)) + with torch.no_grad(): + nn.init.constant_(self.formant_bandwitdh_ratio,0) + nn.init.constant_(self.formant_bandwitdh_slop,0) + + + self.from_ecog = FromECoG(16,residual=True) + self.conv1 = ECoGMappingBlock(16,32,[5,1,1],residual=True,resample = [2,1,1],pool='MAX') + self.conv2 = ECoGMappingBlock(32,64,[3,1,1],residual=True,resample = [2,1,1],pool='MAX') + self.norm_mask = nn.GroupNorm(32,64) + self.mask = ln.Conv3d(64,1,[3,1,1],1,[1,0,0]) + self.conv3 = ECoGMappingBlock(64,128,[3,3,3],residual=True,resample = [2,2,2],pool='MAX') + self.conv4 = ECoGMappingBlock(128,256,[3,3,3],residual=True,resample = [2,2,2],pool='MAX') + self.norm = nn.GroupNorm(32,256) + self.conv5 = ln.Conv1d(256,256,3,1,1) + self.norm2 = nn.GroupNorm(32,256) + self.conv6 = ln.ConvTranspose1d(256, 128, 3, 2, 1, transform_kernel=True) + self.norm3 = nn.GroupNorm(32,128) + self.conv7 = ln.ConvTranspose1d(128, 64, 3, 2, 1, transform_kernel=True) + self.norm4 = nn.GroupNorm(32,64) + self.conv8 = ln.ConvTranspose1d(64, 32, 3, 2, 1, transform_kernel=True) + self.norm5 = nn.GroupNorm(32,32) + self.conv9 = ln.ConvTranspose1d(32, 32, 3, 2, 1, transform_kernel=True) + self.norm6 = nn.GroupNorm(32,32) + + self.conv_fundementals = ln.Conv1d(32,32,3,1,1) + self.norm_fundementals = nn.GroupNorm(32,32) + self.f0_drop = nn.Dropout() + self.conv_f0 = ln.Conv1d(32,1,1,1,0) + self.conv_amplitudes = ln.Conv1d(32,2,1,1,0) + self.conv_amplitudes_h = ln.Conv1d(32,2,1,1,0) + self.conv_loudness = ln.Conv1d(32,1,1,1,0,bias_initial=-9.) + + self.conv_formants = ln.Conv1d(32,32,3,1,1) + self.norm_formants = nn.GroupNorm(32,32) + self.conv_formants_freqs = ln.Conv1d(32,n_formants,1,1,0) + self.conv_formants_bandwidth = ln.Conv1d(32,n_formants,1,1,0) + self.conv_formants_amplitude = ln.Conv1d(32,n_formants,1,1,0) + + self.conv_formants_freqs_noise = ln.Conv1d(32,n_formants_noise,1,1,0) + self.conv_formants_bandwidth_noise = ln.Conv1d(32,n_formants_noise,1,1,0) + self.conv_formants_amplitude_noise = ln.Conv1d(32,n_formants_noise,1,1,0) + + + def forward(self,ecog,mask_prior,mni): + x_common_all = [] + for d in range(len(ecog)): + x = ecog[d] + x = x.reshape([-1,1,x.shape[1],15,15]) + mask_prior_d = mask_prior[d].reshape(-1,1,1,15,15) + x = self.from_ecog(x) + x = self.conv1(x) + x = self.conv2(x) + mask = torch.sigmoid(self.mask(F.leaky_relu(self.norm_mask(x),0.2))) + mask = mask[:,:,4:] + if mask_prior is not None: + mask = mask*mask_prior_d + x = x[:,:,4:] + x = x*mask + x = self.conv3(x) + x = self.conv4(x) + x = x.max(-1)[0].max(-1)[0] + x = self.conv5(F.leaky_relu(self.norm(x),0.2)) + x = self.conv6(F.leaky_relu(self.norm2(x),0.2)) + x = self.conv7(F.leaky_relu(self.norm3(x),0.2)) + x = self.conv8(F.leaky_relu(self.norm4(x),0.2)) + x = self.conv9(F.leaky_relu(self.norm5(x),0.2)) + x_common = F.leaky_relu(self.norm6(x),0.2) + x_common_all += [x_common] + + x_common = torch.cat(x_common_all,dim=0) + loudness = F.softplus(self.conv_loudness(x_common)) + amplitudes = F.softmax(self.conv_amplitudes(x_common),dim=1) + amplitudes_h = F.softmax(self.conv_amplitudes_h(x_common),dim=1) + + # x_fundementals = F.leaky_relu(self.norm_fundementals(self.conv_fundementals(x_common)),0.2) + x_fundementals = self.f0_drop(F.leaky_relu(self.norm_fundementals(self.conv_fundementals(x_common)),0.2)) + # f0 = torch.sigmoid(self.conv_f0(x_fundementals)) + # f0 = F.tanh(self.conv_f0(x_fundementals)) * (16/64)*(self.n_mels/64) # 72hz < f0 < 446 hz + # f0 = torch.sigmoid(self.conv_f0(x_fundementals)) * (15/64)*(self.n_mels/64) # 179hz < f0 < 420 hz + # f0 = torch.sigmoid(self.conv_f0(x_fundementals)) * (22/64)*(self.n_mels/64) - (16/64)*(self.n_mels/64)# 72hz < f0 < 253 hz, human voice + # f0 = torch.sigmoid(self.conv_f0(x_fundementals)) * (11/64)*(self.n_mels/64) - (-2/64)*(self.n_mels/64)# 160hz < f0 < 300 hz, female voice + + # f0 in hz: + # f0 = torch.sigmoid(self.conv_f0(x_fundementals)) * 528 + 88 # 88hz < f0 < 616 hz + f0_hz = torch.sigmoid(self.conv_f0(x_fundementals)) * 332 + 88 # 88hz < f0 < 420 hz + f0 = torch.clamp(mel_scale(self.n_mels,f0_hz)/(self.n_mels*1.0),min=0.0001) + + x_formants = F.leaky_relu(self.norm_formants(self.conv_formants(x_common)),0.2) + formants_freqs = torch.sigmoid(self.conv_formants_freqs(x_formants)) + # formants_freqs = torch.cumsum(formants_freqs,dim=1) + # formants_freqs = formants_freqs + + # abs freq + formants_freqs_hz = formants_freqs*(self.formant_freq_limits_abs[:,:self.n_formants]-self.formant_freq_limits_abs_low[:,:self.n_formants])+self.formant_freq_limits_abs_low[:,:self.n_formants] + formants_freqs = torch.clamp(mel_scale(self.n_mels,formants_freqs_hz)/(self.n_mels*1.0),min=0) + + # formants_freqs = formants_freqs + f0 + # formants_bandwidth = torch.sigmoid(self.conv_formants_bandwidth(x_formants)) + # formants_bandwidth_hz = (3*torch.sigmoid(self.formant_bandwitdh_ratio))*(0.075*torch.relu(formants_freqs_hz-1000)+100) + formants_bandwidth_hz = 0.65*(0.00625*torch.relu(formants_freqs_hz)+375) + # formants_bandwidth_hz = (torch.sigmoid(self.conv_formants_bandwidth(x_formants))) * (3*torch.sigmoid(self.formant_bandwitdh_ratio))*(0.075*torch.relu(formants_freqs_hz-1000)+100) + formants_bandwidth = bandwidth_mel(formants_freqs_hz,formants_bandwidth_hz,self.n_mels) + formants_amplitude_logit = self.conv_formants_amplitude(x_formants) + formants_amplitude = F.softmax(formants_amplitude_logit,dim=1) + + formants_freqs_noise = torch.sigmoid(self.conv_formants_freqs_noise(x_formants)) + formants_freqs_hz_noise = formants_freqs_noise*(self.formant_freq_limits_abs_noise[:,:self.n_formants_noise]-self.formant_freq_limits_abs_noise_low[:,:self.n_formants_noise])+self.formant_freq_limits_abs_noise_low[:,:self.n_formants_noise] + # formants_freqs_hz_noise = formants_freqs_noise*(self.formant_freq_limits_abs_noise[:,:1]-self.formant_freq_limits_abs_noise_low[:,:1])+self.formant_freq_limits_abs_noise_low[:,:1] + formants_freqs_hz_noise = torch.cat([formants_freqs_hz,formants_freqs_hz_noise],dim=1) + formants_freqs_noise = torch.clamp(mel_scale(self.n_mels,formants_freqs_hz_noise)/(self.n_mels*1.0),min=0) + # formants_bandwidth_hz_noise = F.relu(self.conv_formants_bandwidth_noise(x_formants)) * 8000 + 2000 + # formants_bandwidth_noise = bandwidth_mel(formants_freqs_hz_noise,formants_bandwidth_hz_noise,self.n_mels) + # formants_amplitude_noise = F.softmax(self.conv_formants_amplitude_noise(x_formants),dim=1) + formants_bandwidth_hz_noise = self.conv_formants_bandwidth_noise(x_formants) + formants_bandwidth_hz_noise_1 = F.softplus(formants_bandwidth_hz_noise[:,:1]) * 2344 + 586 #2000-10000 + formants_bandwidth_hz_noise_2 = torch.sigmoid(formants_bandwidth_hz_noise[:,1:]) * 586 #0-2000 + formants_bandwidth_hz_noise = torch.cat([formants_bandwidth_hz_noise_1,formants_bandwidth_hz_noise_2],dim=1) + formants_bandwidth_hz_noise = torch.cat([formants_bandwidth_hz,formants_bandwidth_hz_noise],dim=1) + formants_bandwidth_noise = bandwidth_mel(formants_freqs_hz_noise,formants_bandwidth_hz_noise,self.n_mels) + formants_amplitude_noise_logit = self.conv_formants_amplitude_noise(x_formants) + formants_amplitude_noise_logit = torch.cat([formants_amplitude_logit,formants_amplitude_noise_logit],dim=1) + formants_amplitude_noise = F.softmax(formants_amplitude_noise_logit,dim=1) + + components = { 'f0':f0, + 'f0_hz':f0_hz, + 'loudness':loudness, + 'amplitudes':amplitudes, + 'amplitudes_h':amplitudes_h, + 'freq_formants_hamon':formants_freqs, + 'bandwidth_formants_hamon':formants_bandwidth, + 'freq_formants_hamon_hz':formants_freqs_hz, + 'bandwidth_formants_hamon_hz':formants_bandwidth_hz, + 'amplitude_formants_hamon':formants_amplitude, + 'freq_formants_noise':formants_freqs_noise, + 'bandwidth_formants_noise':formants_bandwidth_noise, + 'freq_formants_noise_hz':formants_freqs_hz_noise, + 'bandwidth_formants_noise_hz':formants_bandwidth_hz_noise, + 'amplitude_formants_noise':formants_amplitude_noise, + } + return components + + +class BackBone(nn.Module): + def __init__(self,attentional_mask=True): + super(BackBone, self).__init__() + self.attentional_mask = attentional_mask + self.from_ecog = FromECoG(16,residual=True,shape='2D') + self.conv1 = ECoGMappingBlock(16,32,[5,1],residual=True,resample = [1,1],shape='2D') + self.conv2 = ECoGMappingBlock(32,64,[3,1],residual=True,resample = [1,1],shape='2D') + self.norm_mask = nn.GroupNorm(32,64) + self.mask = ln.Conv2d(64,1,[3,1],1,[1,0]) + + def forward(self,ecog): + x_common_all = [] + mask_all=[] + for d in range(len(ecog)): + x = ecog[d] + x = x.unsqueeze(1) + x = self.from_ecog(x) + x = self.conv1(x) + x = self.conv2(x) + if self.attentional_mask: + mask = F.relu(self.mask(F.leaky_relu(self.norm_mask(x),0.2))) + mask = mask[:,:,16:] + x = x[:,:,16:] + mask_all +=[mask] + else: + # mask = torch.sigmoid(self.mask(F.leaky_relu(self.norm_mask(x),0.2))) + # mask = mask[:,:,16:] + x = x[:,:,16:] + # x = x*mask + + x_common_all +=[x] + + x_common = torch.cat(x_common_all,dim=0) + if self.attentional_mask: + mask = torch.cat(mask_all,dim=0) + return x_common,mask.squeeze(1) if self.attentional_mask else None + +class ECoGEncoderFormantHeads(nn.Module): + def __init__(self,inputs,n_mels,n_formants): + super(ECoGEncoderFormantHeads,self).__init__() + self.n_mels = n_mels + self.f0 = ln.Conv1d(inputs,1,1) + self.loudness = ln.Conv1d(inputs,1,1) + self.amplitudes = ln.Conv1d(inputs,2,1) + self.freq_formants = ln.Conv1d(inputs,n_formants,1) + self.bandwidth_formants = ln.Conv1d(inputs,n_formants,1) + self.amplitude_formants = ln.Conv1d(inputs,n_formants,1) + + def forward(self,x): + loudness = F.relu(self.loudness(x)) + f0 = torch.sigmoid(self.f0(x)) * (15/64)*(self.n_mels/64) # 179hz < f0 < 420 hz + # f0 = torch.sigmoid(self.conv_f0(x_fundementals)) * (22/64)*(self.n_mels/64) - (16/64)*(self.n_mels/64)# 72hz < f0 < 253 hz, human voice + # f0 = torch.sigmoid(self.conv_f0(x_fundementals)) * (11/64)*(self.n_mels/64) - (-2/64)*(self.n_mels/64)# 160hz < f0 < 300 hz, female voice + amplitudes = F.softmax(self.amplitudes(x),dim=1) + freq_formants = torch.sigmoid(self.freq_formants(x)) + freq_formants = torch.cumsum(freq_formants,dim=1) + bandwidth_formants = torch.sigmoid(self.bandwidth_formants(x)) + amplitude_formants = F.softmax(self.amplitude_formants(x),dim=1) + return {'f0':f0, + 'loudness':loudness, + 'amplitudes':amplitudes, + 'freq_formants':freq_formants, + 'bandwidth_formants':bandwidth_formants, + 'amplitude_formants':amplitude_formants,} + +@ECOG_ENCODER.register("ECoGMappingTransformer") +class ECoGMapping_Transformer(nn.Module): + def __init__(self,n_mels,n_formants,SeqLen=128,hidden_dim=256,dim_feedforward=256,encoder_only=False,attentional_mask=False,n_heads=1,non_local=False): + super(ECoGMapping_Transformer, self).__init__() + self.n_mels = n_mels, + self.n_formant = n_formants, + self.encoder_only = encoder_only, + self.attentional_mask = attentional_mask, + self.backbone = BackBone(attentional_mask=attentional_mask) + self.position_encoding = build_position_encoding(SeqLen,hidden_dim,'MNI') + self.input_proj = ln.Conv2d(64, hidden_dim, kernel_size=1) + if non_local: + Transformer = TransformerNL + else: + Transformer = TransformerTS + self.transformer = Transformer(d_model=hidden_dim, nhead=n_heads, num_encoder_layers=6, + num_decoder_layers=6, dim_feedforward=dim_feedforward, dropout=0.1, + activation="relu", normalize_before=False, + return_intermediate_dec=False,encoder_only = encoder_only) + self.output_proj = ECoGEncoderFormantHeads(hidden_dim,n_mels,n_formants) + self.query_embed = nn.Embedding(SeqLen, hidden_dim) + + def forward(self,x,mask_prior,mni): + features,mask = self.backbone(x) + pos = self.position_encoding(mni) + hs = self.transformer(self.input_proj(features), mask if self.attentional_mask else None, self.query_embed.weight, pos) + if not self.encoder_only: + hs,encoded = hs + out = self.output_proj(hs) + else: + _,encoded = hs + encoded = encoded.max(-1)[0] + out = self.output_proj(encoded) + return out + + + diff --git a/tracker.py b/tracker.py index 55969dab..99633cb7 100644 --- a/tracker.py +++ b/tracker.py @@ -59,11 +59,12 @@ def mean(self,dim=[]): class LossTracker: - def __init__(self, output_folder='.'): + def __init__(self, output_folder='.',test=False): self.tracks = OrderedDict() self.epochs = [] self.means_over_epochs = OrderedDict() self.output_folder = output_folder + self.filename = 'log_test.csv' if test else 'log_train.csv' def update(self, d): for k, v in d.items(): @@ -91,8 +92,7 @@ def register_means(self, epoch): value.reset() else: self.means_over_epochs[key].append(None) - - with open(os.path.join(self.output_folder, 'log.csv'), mode='w') as csv_file: + with open(os.path.join(self.output_folder, self.filename), mode='w') as csv_file: fieldnames = ['epoch'] + [key+str(i) for key in list(self.tracks.keys()) for i in range(self.means_over_epochs[key][0].size)] # fieldnames = ['epoch'] + [list(self.tracks.keys())] writer = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) diff --git a/train_formant.py b/train_formant.py index 132a0960..72ed286b 100644 --- a/train_formant.py +++ b/train_formant.py @@ -13,6 +13,8 @@ # limitations under the License. # ============================================================================== import json +from os import terminal_size +import pdb import torch.utils.data from torchvision.utils import save_image from net_formant import * @@ -45,6 +47,12 @@ def train(cfg, logger, local_rank, world_size, distributed): ecog_encoder_name=cfg.MODEL.MAPPING_FROM_ECOG, spec_chans = cfg.DATASET.SPEC_CHANS, n_formants = cfg.MODEL.N_FORMANTS, + n_formants_noise = cfg.MODEL.N_FORMANTS_NOISE, + n_formants_ecog = cfg.MODEL.N_FORMANTS_ECOG, + wavebased = cfg.MODEL.WAVE_BASED, + n_fft=cfg.MODEL.N_FFT, + noise_db=cfg.MODEL.NOISE_DB, + max_db=cfg.MODEL.MAX_DB, with_ecog = cfg.MODEL.ECOG, hidden_dim=cfg.MODEL.TRANSFORMER.HIDDEN_DIM, dim_feedforward=cfg.MODEL.TRANSFORMER.DIM_FEEDFORWARD, @@ -52,6 +60,10 @@ def train(cfg, logger, local_rank, world_size, distributed): attentional_mask=cfg.MODEL.TRANSFORMER.ATTENTIONAL_MASK, n_heads = cfg.MODEL.TRANSFORMER.N_HEADS, non_local = cfg.MODEL.TRANSFORMER.NON_LOCAL, + do_mel_guide = cfg.MODEL.DO_MEL_GUIDE, + noise_from_data = cfg.MODEL.BGNOISE_FROMDATA, + specsup=cfg.FINETUNE.SPECSUP, + power_synth = cfg.MODEL.POWER_SYNTH, ) model.cuda(local_rank) model.train() @@ -62,6 +74,12 @@ def train(cfg, logger, local_rank, world_size, distributed): ecog_encoder_name=cfg.MODEL.MAPPING_FROM_ECOG, spec_chans = cfg.DATASET.SPEC_CHANS, n_formants = cfg.MODEL.N_FORMANTS, + n_formants_noise = cfg.MODEL.N_FORMANTS_NOISE, + n_formants_ecog = cfg.MODEL.N_FORMANTS_ECOG, + wavebased = cfg.MODEL.WAVE_BASED, + n_fft=cfg.MODEL.N_FFT, + noise_db=cfg.MODEL.NOISE_DB, + max_db=cfg.MODEL.MAX_DB, with_ecog = cfg.MODEL.ECOG, hidden_dim=cfg.MODEL.TRANSFORMER.HIDDEN_DIM, dim_feedforward=cfg.MODEL.TRANSFORMER.DIM_FEEDFORWARD, @@ -69,6 +87,10 @@ def train(cfg, logger, local_rank, world_size, distributed): attentional_mask=cfg.MODEL.TRANSFORMER.ATTENTIONAL_MASK, n_heads = cfg.MODEL.TRANSFORMER.N_HEADS, non_local = cfg.MODEL.TRANSFORMER.NON_LOCAL, + do_mel_guide = cfg.MODEL.DO_MEL_GUIDE, + noise_from_data = cfg.MODEL.BGNOISE_FROMDATA, + specsup=cfg.FINETUNE.SPECSUP, + power_synth = cfg.MODEL.POWER_SYNTH, ) model_s.cuda(local_rank) model_s.eval() @@ -86,11 +108,15 @@ def train(cfg, logger, local_rank, world_size, distributed): encoder = model.module.encoder if hasattr(model.module,'ecog_encoder'): ecog_encoder = model.module.ecog_encoder + if hasattr(model.module,'decoder_mel'): + decoder_mel = model.module.decoder_mel else: decoder = model.decoder encoder = model.encoder if hasattr(model,'ecog_encoder'): ecog_encoder = model.ecog_encoder + if hasattr(model,'decoder_mel'): + decoder_mel = model.decoder_mel count_param_override.print = lambda a: logger.info(a) @@ -115,10 +141,17 @@ def train(cfg, logger, local_rank, world_size, distributed): ], lr=cfg.TRAIN.BASE_LEARNING_RATE, betas=(cfg.TRAIN.ADAM_BETA_0, cfg.TRAIN.ADAM_BETA_1), weight_decay=0) else: - optimizer = LREQAdam([ - {'params': encoder.parameters()}, - {'params': decoder.parameters()} - ], lr=cfg.TRAIN.BASE_LEARNING_RATE, betas=(cfg.TRAIN.ADAM_BETA_0, cfg.TRAIN.ADAM_BETA_1), weight_decay=0) + if cfg.MODEL.DO_MEL_GUIDE: + optimizer = LREQAdam([ + {'params': encoder.parameters()}, + {'params': decoder.parameters()}, + {'params': decoder_mel.parameters()}, + ], lr=cfg.TRAIN.BASE_LEARNING_RATE, betas=(cfg.TRAIN.ADAM_BETA_0, cfg.TRAIN.ADAM_BETA_1), weight_decay=0) + else: + optimizer = LREQAdam([ + {'params': encoder.parameters()}, + {'params': decoder.parameters()} + ], lr=cfg.TRAIN.BASE_LEARNING_RATE, betas=(cfg.TRAIN.ADAM_BETA_0, cfg.TRAIN.ADAM_BETA_1), weight_decay=0) scheduler = ComboMultiStepLR(optimizers= {'optimizer': optimizer}, @@ -131,28 +164,38 @@ def train(cfg, logger, local_rank, world_size, distributed): } if hasattr(model,'ecog_encoder'): model_dict['ecog_encoder'] = ecog_encoder + if hasattr(model,'decoder_mel'): + model_dict['decoder_mel'] = decoder_mel if local_rank == 0: model_dict['encoder_s'] = model_s.encoder model_dict['generator_s'] = model_s.decoder if hasattr(model_s,'ecog_encoder'): model_dict['ecog_encoder_s'] = model_s.ecog_encoder + if hasattr(model_s,'decoder_mel'): + model_dict['decoder_mel_s'] = model_s.decoder_mel tracker = LossTracker(cfg.OUTPUT_DIR) + tracker_test = LossTracker(cfg.OUTPUT_DIR,test=True) auxiliary = { 'optimizer': optimizer, 'scheduler': scheduler, - 'tracker': tracker + 'tracker': tracker, + 'tracker_test':tracker_test, } - checkpointer = Checkpointer(cfg, + checkpointer = Checkpointer(cfg, model_dict, auxiliary, logger=logger, save=local_rank == 0) - # extra_checkpoint_data = checkpointer.load(ignore_last_checkpoint=False,ignore_auxiliary=True,file_name='./training_artifacts/ecog_residual_cycle/model_tmp_lod4.pth') - extra_checkpoint_data = checkpointer.load(ignore_last_checkpoint=True,ignore_auxiliary=cfg.FINETUNE.FINETUNE,file_name='./training_artifacts/formantsythv2wide_NY742_constraintonFB_Bconstrainrefined_absfreq_3formants/model_epoch1.pth') + extra_checkpoint_data = checkpointer.load(ignore_last_checkpoint=True,ignore_auxiliary=cfg.FINETUNE.FINETUNE,file_name='./training_artifacts/ecog_finetune_3ecogformants_han5_specsup_guidance_hamonicformantsemph/model_epoch23.pth') + # extra_checkpoint_data = checkpointer.load(ignore_last_checkpoint=False,ignore_auxiliary=cfg.FINETUNE.FINETUNE,file_name='./training_artifacts/debug_f1f2linearmel/model_epoch27.pth') + # extra_checkpoint_data = checkpointer.load(ignore_last_checkpoint=False,ignore_auxiliary=cfg.FINETUNE.FINETUNE,file_name='./training_artifacts/debug_fitf1f2freqonly/model_epoch28.pth') + # extra_checkpoint_data = checkpointer.load(ignore_last_checkpoint=False,ignore_auxiliary=cfg.FINETUNE.FINETUNE,file_name='./training_artifacts/debug_fitf1f2freqonly/model_epoch6.pth') + # extra_checkpoint_data = checkpointer.load(ignore_last_checkpoint=True,ignore_auxiliary=cfg.FINETUNE.FINETUNE,file_name='./training_artifacts/loudnesscomp_han5/model_epoch50.pth') + # extra_checkpoint_data = checkpointer.load(ignore_last_checkpoint=False,ignore_auxiliary=cfg.FINETUNE.FINETUNE,file_name='./training_artifacts/test_9/model_epoch30.pth') logger.info("Starting from epoch: %d" % (scheduler.start_epoch())) arguments.update(extra_checkpoint_data) @@ -163,7 +206,11 @@ def train(cfg, logger, local_rank, world_size, distributed): # data_param, train_param, test_param = param['Data'], param['Train'], param['Test'] dataset = TFRecordsDataset(cfg, logger, rank=local_rank, world_size=world_size, buffer_size_mb=1024, channels=cfg.MODEL.CHANNELS,param=param) dataset_test = TFRecordsDataset(cfg, logger, rank=local_rank, world_size=world_size, buffer_size_mb=1024, channels=cfg.MODEL.CHANNELS,train=False,param=param) - + # noise_dist = dataset.noise_dist + noise_dist = torch.from_numpy(dataset.noise_dist).to('cuda').float() + if cfg.MODEL.BGNOISE_FROMDATA: + model_s.noise_dist_init(noise_dist) + model.noise_dist_init(noise_dist) rnd = np.random.RandomState(3456) # latents = rnd.randn(len(dataset_test.dataset), cfg.MODEL.LATENT_SPACE_SIZE) # samplez = torch.tensor(latents).float().cuda() @@ -187,7 +234,15 @@ def train(cfg, logger, local_rank, world_size, distributed): dataset_test.reset(cfg.DATASET.MAX_RESOLUTION_LEVEL, len(dataset_test.dataset)) sample_dict_test = next(iter(dataset_test.iterator)) # sample_dict_test = concate_batch(sample_dict_test) - sample_spec_test = sample_dict_test['spkr_re_batch_all'].to('cuda').float() + sample_wave_test = sample_dict_test['wave_re_batch_all'].to('cuda').float() + if cfg.MODEL.WAVE_BASED: + sample_spec_test = sample_dict_test['wave_spec_re_batch_all'].to('cuda').float() + sample_spec_amp_test = sample_dict_test['wave_spec_re_amp_batch_all'].to('cuda').float() + sample_spec_denoise_test = sample_dict_test['wave_spec_re_denoise_batch_all'].to('cuda').float() + # sample_spec_test = wave2spec(sample_wave_test,n_fft=cfg.MODEL.N_FFT,noise_db=cfg.MODEL.NOISE_DB,max_db=cfg.MODEL.MAX_DB) + else: + sample_spec_test = sample_dict_test['spkr_re_batch_all'].to('cuda').float() + sample_spec_denoise_test = None#sample_dict_test['wave_spec_re_denoise_batch_all'].to('cuda').float() sample_label_test = sample_dict_test['label_batch_all'] if cfg.MODEL.ECOG: ecog_test = [sample_dict_test['ecog_re_batch_all'][i].to('cuda').float() for i in range(len(sample_dict_test['ecog_re_batch_all']))] @@ -197,9 +252,16 @@ def train(cfg, logger, local_rank, world_size, distributed): ecog_test = None mask_prior_test = None mni_coordinate_test = None + sample_spec_mel_test = sample_dict_test['spkr_re_batch_all'].to('cuda').float() if cfg.MODEL.DO_MEL_GUIDE else None + on_stage_test = sample_dict_test['on_stage_re_batch_all'].to('cuda').float() + on_stage_wider_test = sample_dict_test['on_stage_wider_re_batch_all'].to('cuda').float() # sample = next(make_dataloader(cfg, logger, dataset, 32, local_rank)) # sample = (sample / 127.5 - 1.) - + # import pdb; pdb.set_trace() + duomask=True + # model.eval() + # Lrec = model(sample_spec_test, x_denoise = sample_spec_denoise_test,x_mel = sample_spec_mel_test,ecog=ecog_test if cfg.MODEL.ECOG else None, mask_prior=mask_prior_test if cfg.MODEL.ECOG else None, on_stage = on_stage_test,on_stage_wider = on_stage_wider_test, ae = not cfg.MODEL.ECOG, tracker = tracker_test, encoder_guide=cfg.MODEL.W_SUP,pitch_aug=False,duomask=duomask,mni=mni_coordinate_test,debug = False,x_amp=sample_spec_amp_test,hamonic_bias = False) + # save_sample(sample_spec_test,ecog_test,mask_prior_test,mni_coordinate_test,encoder,decoder,ecog_encoder if cfg.MODEL.ECOG else None,x_denoise=sample_spec_denoise_test,x_mel = sample_spec_mel_test,decoder_mel=decoder_mel if cfg.MODEL.DO_MEL_GUIDE else None,epoch=0,label=sample_label_test,mode='test',path=cfg.OUTPUT_DIR,tracker = tracker_test,linear=cfg.MODEL.WAVE_BASED,n_fft=cfg.MODEL.N_FFT,duomask=True) for epoch in range(cfg.TRAIN.TRAIN_EPOCHS): model.train() @@ -207,14 +269,23 @@ def train(cfg, logger, local_rank, world_size, distributed): model.train() need_permute = False epoch_start_time = time.time() - i = 0 for sample_dict_train in tqdm(iter(dataset.iterator)): + # import pdb; pdb.set_trace() # sample_dict_train = concate_batch(sample_dict_train) i += 1 - x_orig = sample_dict_train['spkr_re_batch_all'].to('cuda').float() + wave_orig = sample_dict_train['wave_re_batch_all'].to('cuda').float() + if cfg.MODEL.WAVE_BASED: + # x_orig = wave2spec(wave_orig,n_fft=cfg.MODEL.N_FFT,noise_db=cfg.MODEL.NOISE_DB,max_db=cfg.MODEL.MAX_DB) + x_orig = sample_dict_train['wave_spec_re_batch_all'].to('cuda').float() + x_orig_amp = sample_dict_train['wave_spec_re_amp_batch_all'].to('cuda').float() + x_orig_denoise = sample_dict_train['wave_spec_re_denoise_batch_all'].to('cuda').float() + else: + x_orig = sample_dict_train['spkr_re_batch_all'].to('cuda').float() + x_orig_denoise = None#sample_dict_train['wave_spec_re_denoise_batch_all'].to('cuda').float() + on_stage = sample_dict_train['on_stage_re_batch_all'].to('cuda').float() - # import pdb;pdb.set_trace() + on_stage_wider = sample_dict_train['on_stage_wider_re_batch_all'].to('cuda').float() words = sample_dict_train['word_batch_all'].to('cuda').long() words = words.view(words.shape[0]*words.shape[1]) labels = sample_dict_train['label_batch_all'] @@ -227,6 +298,7 @@ def train(cfg, logger, local_rank, world_size, distributed): mask_prior = None mni_coordinate = None x = x_orig + x_mel = sample_dict_train['spkr_re_batch_all'].to('cuda').float() if cfg.MODEL.DO_MEL_GUIDE else None # x.requires_grad = True # apply_cycle = cfg.MODEL.CYCLE and True # apply_w_classifier = cfg.MODEL.W_CLASSIFIER and True @@ -236,15 +308,14 @@ def train(cfg, logger, local_rank, world_size, distributed): # apply_encoder_guide = (cfg.FINETUNE.ENCODER_GUIDE or cfg.MODEL.W_SUP) and True # apply_sup = cfg.FINETUNE.SPECSUP - if (cfg.MODEL.ECOG): optimizer.zero_grad() - Lrec = model(x, ecog=ecog, mask_prior=mask_prior, on_stage = on_stage, ae = False, tracker = tracker, encoder_guide=cfg.MODEL.W_SUP,mni=mni_coordinate) + Lrec = model(x, x_denoise = x_orig_denoise,x_mel = x_mel,ecog=ecog, mask_prior=mask_prior, on_stage = on_stage,on_stage_wider = on_stage_wider, ae = False, tracker = tracker, encoder_guide=cfg.MODEL.W_SUP,duomask=duomask,mni=mni_coordinate,x_amp=x_orig_amp) (Lrec).backward() optimizer.step() else: optimizer.zero_grad() - Lrec = model(x, ecog=None, mask_prior=None, on_stage = None, ae = True, tracker = tracker, encoder_guide=cfg.MODEL.W_SUP,mni=mni_coordinate) + Lrec = model(x, x_denoise = x_orig_denoise,x_mel = x_mel,ecog=None, mask_prior=None, on_stage = on_stage,on_stage_wider = on_stage_wider, ae = True, tracker = tracker, encoder_guide=cfg.MODEL.W_SUP,pitch_aug=False,duomask=duomask,mni=mni_coordinate,debug = False,x_amp=x_orig_amp,hamonic_bias = False)#epoch<2) (Lrec).backward() optimizer.step() @@ -256,10 +327,12 @@ def train(cfg, logger, local_rank, world_size, distributed): if local_rank == 0: - print(3*torch.sigmoid(model.encoder.formant_bandwitdh_ratio)) + print(2**(torch.tanh(model.encoder.formant_bandwitdh_slop))) checkpointer.save("model_epoch%d" % epoch) - save_sample(sample_spec_test,ecog_test,mask_prior_test,mni_coordinate_test,encoder,decoder,ecog_encoder if cfg.MODEL.ECOG else None,epoch=epoch,label=sample_label_test,mode='test',path=cfg.OUTPUT_DIR,tracker = tracker) - save_sample(x,ecog,mask_prior,mni_coordinate,encoder,decoder,ecog_encoder if cfg.MODEL.ECOG else None,epoch=epoch,label=labels,mode='train',path=cfg.OUTPUT_DIR,tracker = tracker) + model.eval() + Lrec = model(sample_spec_test, x_denoise = sample_spec_denoise_test,x_mel = sample_spec_mel_test,ecog=ecog_test if cfg.MODEL.ECOG else None, mask_prior=mask_prior_test if cfg.MODEL.ECOG else None, on_stage = on_stage_test,on_stage_wider = on_stage_wider_test, ae = not cfg.MODEL.ECOG, tracker = tracker_test, encoder_guide=cfg.MODEL.W_SUP,pitch_aug=False,duomask=duomask,mni=mni_coordinate_test,debug = False,x_amp=sample_spec_amp_test,hamonic_bias = False) + save_sample(sample_spec_test,ecog_test,mask_prior_test,mni_coordinate_test,encoder,decoder,ecog_encoder if cfg.MODEL.ECOG else None,x_denoise=sample_spec_denoise_test,x_mel = sample_spec_mel_test,decoder_mel=decoder_mel if cfg.MODEL.DO_MEL_GUIDE else None,epoch=epoch,label=sample_label_test,mode='test',path=cfg.OUTPUT_DIR,tracker = tracker_test,linear=cfg.MODEL.WAVE_BASED,n_fft=cfg.MODEL.N_FFT,duomask=duomask,x_amp=sample_spec_amp_test) + save_sample(x,ecog,mask_prior,mni_coordinate,encoder,decoder,ecog_encoder if cfg.MODEL.ECOG else None,x_denoise=x_orig_denoise,x_mel = sample_spec_mel_test,decoder_mel=decoder_mel if cfg.MODEL.DO_MEL_GUIDE else None,epoch=epoch,label=labels,mode='train',path=cfg.OUTPUT_DIR,tracker = tracker,linear=cfg.MODEL.WAVE_BASED,n_fft=cfg.MODEL.N_FFT,duomask=duomask,x_amp=x_orig_amp) if __name__ == "__main__": From c95c4e28779ca884b8abcc91337b13c56efbc826 Mon Sep 17 00:00:00 2001 From: james20141606 <240682348@qq.com> Date: Mon, 2 Nov 2020 19:17:28 -0500 Subject: [PATCH 14/14] update --- .DS_Store | Bin 0 -> 10244 bytes ECoGDataSet.py | 4 +- configs/ecog_style2.yaml | 21 +- configs/ecog_style2_a.yaml | 121 ++++++ defaults.py | 5 + formant_systh.py | 22 +- launcher.py | 8 +- model_formant.py | 320 +++++++++++++--- net_formant.py | 510 +++++++++++++++++++++++-- train_formant.py => train_formant_a.py | 81 +++- train_formant_e.py | 427 +++++++++++++++++++++ train_param.json | 2 +- transformer_models/util/__init__.py | 1 + transformer_models/util/box_ops.py | 88 +++++ transformer_models/util/misc.py | 416 ++++++++++++++++++++ transformer_models/util/plot_utils.py | 65 ++++ 16 files changed, 1986 insertions(+), 105 deletions(-) create mode 100644 .DS_Store create mode 100644 configs/ecog_style2_a.yaml rename train_formant.py => train_formant_a.py (75%) create mode 100644 train_formant_e.py create mode 100644 transformer_models/util/__init__.py create mode 100644 transformer_models/util/box_ops.py create mode 100644 transformer_models/util/misc.py create mode 100644 transformer_models/util/plot_utils.py diff --git a/.DS_Store b/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..accf93c51df28ee0229b40745daa9a8b40b8d741 GIT binary patch literal 10244 zcmeHMTWl3Y82*ltC5(@ZtE>^?Q&Z%cab)=Y~--`8k%%DxE#2Oi?sNo2MP^9t=jiBP+^> zEmrO@8||~A!A#um2?B4jZ)Fa+&S+X+*c$ke<@!#J2fMaIE=PJ@-wsAwg1m2s{5svj zpeU-MruD^RV-3mrTC;vb{dlc8)>OZ_)@<0gd3;<|s#euEcN`l!b$aZ~_&v{y%rO2s zKt)leFHBF<3rAWr9!s<2*+QCM6w*|+xkktCPA#p^AEBr38jjq+4-YuGkYrk~2r1)^ zY~b(9In3Uh4ZQr2AEx!ntmS3%o)tMeJg?h5?Sy@ib24JRLt&7MTDfDNOR-Fs*7bbo z+~rV!JL0raSg50Vu6$8q#p-p*hArFMF3-EELf5nf3wuZ0(9L+xK`YEUzU}%0`$t_n z8azaR`H_3vb#kg&iI4R7){t}g=BmZIvBX$fh!Vb6Vimnw>C0!_6L~i}CGKIE%T+~x zV-Y40DyD9A^5~oI|ZpYevml3b1-YN+f>Fy4zG$4}0#WSrwt$#!ZT< z+xtB$<9M;Ev58cjy(0q6SlN7sLQh}Ymkox7D4$TMjh0qL(J%J1d+)KrsB75q>GnK3 z(`Z+egUqtSwjEokPOTBM&Jn0iCB;ak%3?qr)sbCr1RNNKQ}7r(4o|>~@EV+lH{l)l z04~BM_y|6Mui!iQ0e*s?;TQN7en$mW)UX^4T!GcN5|h}3TW~8jV+(fT9^8vv*n>y0 zAJb^z01o2`%wdFQ@Bw@fAHqlRX?zJ^##iuFd>!AyxA6kLi|^sb_$huZ9@Qra%imiGSuI&APEfTbkHU>~c zj81kizX>6E2sO-w>G^g%4cNXS-;N<&yb;6gsn~BqNFGjcJe;K1&Hu-L2FUflT>oEd KEBSx2{{I7?y%bLX literal 0 HcmV?d00001 diff --git a/ECoGDataSet.py b/ECoGDataSet.py index 5ae44d41..508b9c10 100644 --- a/ECoGDataSet.py +++ b/ECoGDataSet.py @@ -141,7 +141,7 @@ def __init__(self, ReqSubjDict, mode = 'train', train_param = None,BCTS=None,wor self.BlockRegion = [] [self.BlockRegion.extend(self.cortex[area]) for area in train_param["BlockRegion"]] self.wavebased = cfg.MODEL.WAVE_BASED - self.ReshapeAsGrid = False if 'Transformer' in cfg.MODEL.MAPPING_FROM_ECOG else True + self.ReshapeAsGrid = False if ('lstm') or ('Transformer') in cfg.MODEL.MAPPING_FROM_ECOG else True self.Prod,self.UseGridOnly,self.SeqLen = train_param['Prod'],\ train_param['UseGridOnly'],\ train_param['SeqLen'], @@ -339,7 +339,7 @@ def __init__(self, ReqSubjDict, mode = 'train', train_param = None,BCTS=None,wor statics_ecog = baseline.mean(axis=0,keepdims=True)+1E-10, np.sqrt(baseline.var(axis=0, keepdims=True))+1E-10 ecog = (ecog - statics_ecog[0])/statics_ecog[1] - ecog = np.minimum(ecog,5) + ecog = np.minimum(ecog,10)#5) ecog_len_+= [ecog.shape[0]] ecog_+=[ecog] diff --git a/configs/ecog_style2.yaml b/configs/ecog_style2.yaml index 4080f348..8c085b96 100644 --- a/configs/ecog_style2.yaml +++ b/configs/ecog_style2.yaml @@ -23,7 +23,7 @@ MODEL: #####TAKE OFF CHECKLIST!!!######## N_FORMANTS: 6 N_FORMANTS_NOISE: 1 - N_FORMANTS_ECOG: 2 + N_FORMANTS_ECOG: 6 WAVE_BASED : True DO_MEL_GUIDE : False BGNOISE_FROMDATA: True @@ -44,7 +44,15 @@ MODEL: TRUNCATIOM_CUTOFF: 5 CHANNELS: 1 UNIQ_WORDS: 50 - MAPPING_FROM_ECOG: "ECoGMappingBottleneck" + #MAPPING_FROM_ECOG: "ECoGMappingBottleneck" #ECoGMappingBottlenecklstm1, ECoGMappingBottlenecklstm2 + #MAPPING_FROM_ECOG: "ECoGMappingBottlenecklstm1" + #MAPPING_FROM_ECOG: "ECoGMappingBottlenecklstm" + MAPPING_FROM_ECOG: "ECoGMappingBottlenecklstm_pure" + ONEDCONFIRST: True + RNN_TYPE: 'LSTM' + RNN_LAYERS: 4 + RNN_COMPUTE_DB_LOUDNESS: True + BIDIRECTION: True # MAPPING_FROM_ECOG: "ECoGMappingTransformer" ECOG: False #will be overloaded if FINETUNE SUPLOSS_ON_ECOGF: False # will be overloaded to FIX_GEN if FINETUNE,spec supervise loss only apply to ecog encoder @@ -78,7 +86,11 @@ MODEL: N_HEADS : 4 NON_LOCAL: True # ATTENTION: [] -OUTPUT_DIR: training_artifacts/debug_ +#OUTPUT_DIR: output/ecog_10241800_lstm1 #training_artifacts/debug +#OUTPUT_DIR: output/ecog_10241800_lstm2 +#OUTPUT_DIR: output/ecog_11011800_conv #after change loudness encoder +#OUTPUT_DIR: output/ecog_11011800_lstm1 #after change loudness encoder +OUTPUT_DIR: output/ecog_11021800_lstm1 #after change loudness encoder # OUTPUT_DIR: training_artifacts/loudnesscomp_han5_ampamploss # OUTPUT_DIR: training_artifacts/loudnesscomp_han5_ampsynth_masknormed # OUTPUT_DIR: training_artifacts/debug_f1f2linearmel @@ -88,9 +100,8 @@ OUTPUT_DIR: training_artifacts/debug_ # OUTPUT_DIR: training_artifacts/ecog_residual_latent128_temporal_lesstemporalfeature_noprogressive_HBw_ppl_ppld_localreg_ecogf_w_spec_sup # OUTPUT_DIR: training_artifacts/ecog_residual_latent128_temporal_lesstemporalfeature_ppl_ppld # OUTPUT_DIR: training_artifacts/ecog_residual_cycle_attention3264wStyleIN_specchan64_more_attentfeatures_heads4 - FINETUNE: - FINETUNE: False + FINETUNE: True FIX_GEN: True ENCODER_GUIDE: True SPECSUP: True diff --git a/configs/ecog_style2_a.yaml b/configs/ecog_style2_a.yaml new file mode 100644 index 00000000..d16a3a5f --- /dev/null +++ b/configs/ecog_style2_a.yaml @@ -0,0 +1,121 @@ + # Config for training ALAE on FFHQ at resolution 1024x1024 + +NAME: ecog +DATASET: + PART_COUNT: 16 + SIZE: 60000 + FFHQ_SOURCE: /data/datasets/ffhq-dataset/tfrecords/ffhq/ffhq-r%02d.tfrecords + PATH: /data/datasets/ffhq-dataset_new/tfrecords/ffhq/splitted/ffhq-r%02d.tfrecords.%03d + + FLIP_IMAGES: False + + PART_COUNT_TEST: 4 + PATH_TEST: /data/datasets/ffhq-dataset_new/tfrecords/ffhq-test/splitted/ffhq-r%02d.tfrecords.%03d + + SAMPLES_PATH: '' + STYLE_MIX_PATH: style_mixing/test_images/set_ecog + SPEC_CHANS: 64 + TEMPORAL_SAMPLES: 128 + BCTS: True + MAX_RESOLUTION_LEVEL: 7 + SUBJECT: ['NY742'] +MODEL: + #####TAKE OFF CHECKLIST!!!######## + N_FORMANTS: 6 + N_FORMANTS_NOISE: 1 + N_FORMANTS_ECOG: 2 + WAVE_BASED : True + DO_MEL_GUIDE : False + BGNOISE_FROMDATA: True + N_FFT : 256 + NOISE_DB : -50 #-50 + MAX_DB : 22.5 #probablity 28 is better + NOISE_DB_AMP : -25 + MAX_DB_AMP : 14 + POWER_SYNTH: True + + LESS_TEMPORAL_FEATURE: True + LATENT_SPACE_SIZE: 128 + LAYER_COUNT: 6 + MAX_CHANNEL_COUNT: 512 + START_CHANNEL_COUNT: 16 + DLATENT_AVG_BETA: 0.995 + MAPPING_LAYERS: 8 + TRUNCATIOM_CUTOFF: 5 + CHANNELS: 1 + UNIQ_WORDS: 50 + MAPPING_FROM_ECOG: "ECoGMappingBottleneck" + # MAPPING_FROM_ECOG: "ECoGMappingTransformer" + ECOG: False #will be overloaded if FINETUNE + SUPLOSS_ON_ECOGF: False # will be overloaded to FIX_GEN if FINETUNE,spec supervise loss only apply to ecog encoder + W_SUP: False + GAN: True + GENERATOR: "GeneratorFormant" + ENCODER: "EncoderFormant" + AVERAGE_W: True + TEMPORAL_W: True + GLOBAL_W: True + TEMPORAL_GLOBAL_CAT: True + RESIDUAL: True + W_CLASSIFIER: False + CYCLE: False + ATTENTIONAL_STYLE: True + #T 4 8 16 32 64 128 + ATTENTION: [False, False, False, False, False, False] + HEADS: 1 + APPLY_PPL: False + APPLY_PPL_D: False + PPL_WEIGHT: 100 + PPL_GLOBAL_WEIGHT: 0 + PPLD_WEIGHT: 1 + PPLD_GLOBAL_WEIGHT: 0 + COMMON_Z: True + TRANSFORMER: + HIDDEN_DIM : 256 + DIM_FEEDFORWARD : 256 + ENCODER_ONLY : False + ATTENTIONAL_MASK : False + N_HEADS : 4 + NON_LOCAL: True + # ATTENTION: [] +OUTPUT_DIR: output/audio_11021700 #training_artifacts/debug_ +# OUTPUT_DIR: training_artifacts/loudnesscomp_han5_ampamploss +# OUTPUT_DIR: training_artifacts/loudnesscomp_han5_ampsynth_masknormed +# OUTPUT_DIR: training_artifacts/debug_f1f2linearmel +# OUTPUT_DIR: training_artifacts/ecog_finetune_3ecogformants_han5_specsup_guidance_hamonicformantsemph +# OUTPUT_DIR: training_artifacts/ecog_finetune_3ecogformants_han5_specsup_guidance_hamonicnoiseformantsemphmore +# OUTPUT_DIR: training_artifacts/formantsythv2_wavebased_NY742_constraintonFB_Bconstrainrefined_absfreq_4formants_1noiseformants_bgnoise_noisemapping_freqconv_duomask +# OUTPUT_DIR: training_artifacts/ecog_residual_latent128_temporal_lesstemporalfeature_noprogressive_HBw_ppl_ppld_localreg_ecogf_w_spec_sup +# OUTPUT_DIR: training_artifacts/ecog_residual_latent128_temporal_lesstemporalfeature_ppl_ppld +# OUTPUT_DIR: training_artifacts/ecog_residual_cycle_attention3264wStyleIN_specchan64_more_attentfeatures_heads4 + +FINETUNE: + FINETUNE: False + FIX_GEN: True + ENCODER_GUIDE: True + SPECSUP: True +##################################### + +TRAIN: + PROGRESSIVE: False + W_WEIGHT: 1 + CYCLE_WEIGHT: 1 + BASE_LEARNING_RATE: 0.002 + EPOCHS_PER_LOD: 16 + LEARNING_DECAY_RATE: 0.1 + LEARNING_DECAY_STEPS: [96] + TRAIN_EPOCHS: 60 + # 4 8 16 32 64 128 256 + LOD_2_BATCH_8GPU: [512, 256, 128, 64, 32, 32] # If GPU memory ~16GB reduce last number from 32 to 24 + LOD_2_BATCH_4GPU: [64, 64, 64, 64, 32, 16] + LOD_2_BATCH_2GPU: [64, 64, 64, 64, 32, 16] + # LOD_2_BATCH_1GPU: [512, 256, 128, 64, 32, 16] + # LOD_2_BATCH_1GPU: [512, 256, 128, 64, 32, 32] + # LOD_2_BATCH_1GPU: [512, 256, 128, 64, 32, 16] + # LOD_2_BATCH_1GPU: [128, 128, 128, 128, 64, 32] + # LOD_2_BATCH_1GPU: [512, 256, 256, 128, 64, 16] + LOD_2_BATCH_1GPU: [64, 64, 64, 64, 32, 16] + BATCH_SIZE : 32 + # BATCH_SIZE : 2 + LEARNING_RATES: [0.0015, 0.0015, 0.0015, 0.002, 0.003, 0.003] + # LEARNING_RATES: [0.0015, 0.0015, 0.0005, 0.0003, 0.0003, 0.0002] diff --git a/defaults.py b/defaults.py index d0127df8..612f850f 100644 --- a/defaults.py +++ b/defaults.py @@ -72,6 +72,11 @@ _C.MODEL.MAPPING_TO_LATENT = "MappingToLatent" _C.MODEL.MAPPING_FROM_LATENT = "MappingFromLatent" _C.MODEL.MAPPING_FROM_ECOG = "ECoGMappingDefault" +_C.MODEL.ONEDCONFIRST = True +_C.MODEL.RNN_TYPE = 'LSTM' +_C.MODEL.RNN_LAYERS = 4 +_C.MODEL.RNN_COMPUTE_DB_LOUDNESS = True +_C.MODEL.BIDIRECTION = True _C.MODEL.Z_REGRESSION = False _C.MODEL.AVERAGE_W = False _C.MODEL.TEMPORAL_W = False diff --git a/formant_systh.py b/formant_systh.py index 80c0d3cd..9fc21e21 100755 --- a/formant_systh.py +++ b/formant_systh.py @@ -198,7 +198,9 @@ def subfigure_plot(ax,spec,components,n_mels,which_formant='hamon',formant_line= if ecog is not None: comp_ecog = ecog['amplitude_formants_hamon'] if title=='amplitude_hamon' else ecog['amplitude_formants_noise'] plt.sca(ax) - plt.yticks(range(0,200,20),(np.arange(0,1,20)).astype(str)) + ax.set_yticks(np.arange(0,200,20)) + ax.set_yticklabels(np.arange(0,200,20)/200) + #plt.yticks(range(0,200,20),(np.arange(0,1,20)).astype(str)) ax.imshow(np.clip(1-spec.detach().cpu().numpy().squeeze().T,0,1),vmin=0.0,vmax=1.0) for i in range(comp.shape[1]): ax.plot(200*comp[:,i].squeeze().detach().cpu().numpy().T,linewidth=2,color=clrs[i]) @@ -372,10 +374,7 @@ def save_sample(sample,ecog,mask_prior,mni,encoder,decoder,ecog_encoder,epoch,la fig.savefig(f, bbox_inches='tight',dpi=80) plt.close(fig) - scipy.io.wavfile.write(f2+'denoisewave.wav',16000,torch.cat(rec_denoise_wave_all.unbind(),1)[0].detach().cpu().numpy()) - if ecog_encoder is not None: - scipy.io.wavfile.write(f2+'denoiseecogwave.wav',16000,torch.cat(rec_denoise_ecog_wave_all.unbind(),1)[0].detach().cpu().numpy()) - + if linear: rec_all = amplitude(torch.cat((2*rec_all[:,0]-1).unbind(),0).transpose(-2,-1).detach().cpu().numpy(),-50,22.5,trim_noise=False) rec_wave = spsi(rec_all,(n_fft-1)*2,128) @@ -393,9 +392,18 @@ def save_sample(sample,ecog,mask_prior,mni,encoder,decoder,ecog_encoder,epoch,la save_image(resultsample, f2, nrow=resultsample.shape[0]//(2 if ecog_encoder is None else 3)) # import pdb;pdb.set_trace() - + if ecog_encoder is not None: + scipy.io.wavfile.write(f2+'denoisewave.wav',16000,torch.cat(rec_denoise_wave_all.unbind(),1)[0].detach().cpu().numpy()) + scipy.io.wavfile.write(f2+'denoiseecogwave.wav',16000,torch.cat(rec_denoise_ecog_wave_all.unbind(),1)[0].detach().cpu().numpy()) + if mode =='test': + return torch.cat(rec_denoise_ecog_wave_all.unbind(),1)[0].detach().cpu().numpy() + else: + scipy.io.wavfile.write(f2+'denoisewave.wav',16000,torch.cat(rec_denoise_wave_all.unbind(),1)[0].detach().cpu().numpy()) + if mode =='test': + return torch.cat(rec_denoise_wave_all.unbind(),1)[0].detach().cpu().numpy() + + - return def main(): OUTPUT_DIR = 'training_artifacts/formantsysth_voicingandunvoicing_loudness_NY742' diff --git a/launcher.py b/launcher.py index e50b15b4..9563d996 100644 --- a/launcher.py +++ b/launcher.py @@ -111,7 +111,8 @@ def _run(rank, world_size, fn, defaults, write_log, no_cuda, args): cleanup() -def run(fn, defaults, description='', default_config='configs/experiment.yaml', world_size=1, write_log=False, no_cuda=False): +def run(fn, defaults, description='', default_config='configs/experiment.yaml', world_size=1, write_log=False, no_cuda=False,args=None): + ''' parser = argparse.ArgumentParser(description=description) parser.add_argument( "-c", "--config-file", @@ -126,13 +127,14 @@ def run(fn, defaults, description='', default_config='configs/experiment.yaml', default=None, nargs=argparse.REMAINDER, ) - + args = parser.parse_args() + ''' import multiprocessing cpu_count = multiprocessing.cpu_count() os.environ["OMP_NUM_THREADS"] = str(max(1, int(cpu_count / world_size))) del multiprocessing - args = parser.parse_args() + if world_size > 1: mp.spawn(_run, diff --git a/model_formant.py b/model_formant.py index 40916677..188a1a20 100644 --- a/model_formant.py +++ b/model_formant.py @@ -177,8 +177,9 @@ def forward(self, rec, spec, tracker=None,reweight=1): class Model(nn.Module): def __init__(self, generator="", encoder="", ecog_encoder_name="", spec_chans = 128, n_formants=2, n_formants_noise=2, n_formants_ecog=2, n_fft=256, noise_db=-50, max_db=22.5, wavebased = False, - with_ecog = False, ghm_loss=True,power_synth=True, - hidden_dim=256,dim_feedforward=256,encoder_only=True,attentional_mask=False,n_heads=1,non_local=False,do_mel_guide = True,noise_from_data=False,specsup=True): + with_ecog = False, ghm_loss=True,power_synth=True,apply_flooding=True,ecog_compute_db_loudness=False, + hidden_dim=256,dim_feedforward=256,encoder_only=True,attentional_mask=False,n_heads=1,non_local=False,do_mel_guide = True,noise_from_data=False,specsup=True,\ + onedconfirst=True,rnn_type = 'LSTM',rnn_layers = 4,compute_db_loudness=True,bidirection = True): super(Model, self).__init__() self.spec_chans = spec_chans self.with_ecog = with_ecog @@ -191,6 +192,7 @@ def __init__(self, generator="", encoder="", ecog_encoder_name="", self.noise_db = noise_db self.spec_sup = specsup self.max_db = max_db + self.apply_flooding = apply_flooding self.n_formants_noise = n_formants_noise self.power_synth =power_synth self.decoder = GENERATORS[generator]( @@ -231,10 +233,12 @@ def __init__(self, generator="", encoder="", ecog_encoder_name="", n_mels = spec_chans,n_formants = n_formants_ecog, hidden_dim=hidden_dim,dim_feedforward=dim_feedforward,n_heads=n_heads, encoder_only=encoder_only,attentional_mask=attentional_mask,non_local=non_local, + compute_db_loudness = ecog_compute_db_loudness, ) else: self.ecog_encoder = ECOG_ENCODER[ecog_encoder_name]( n_mels = spec_chans,n_formants = n_formants_ecog, + compute_db_loudness = ecog_compute_db_loudness, ) self.ghm_loss = ghm_loss self.lae1 = LAE(noise_db=self.noise_db,max_db=self.max_db) @@ -258,30 +262,30 @@ def generate_fromecog(self, ecog = None, mask_prior = None, mni=None,return_comp else: return rec - def generate_fromspec(self, spec, return_components=False,x_denoise=None,duomask=False): - components = self.encoder(spec,x_denoise=x_denoise,duomask=duomask) + def generate_fromspec(self, spec, return_components=False,x_denoise=None,duomask=False):#,gender='Female'): + components = self.encoder(spec,x_denoise=x_denoise,duomask=duomask)#,gender=gender) rec = self.decoder.forward(components) if return_components: return rec, components else: return rec - def encode(self, spec,x_denoise=None,duomask=False,noise_level = None,x_amp=None): - components = self.encoder(spec,x_denoise=x_denoise,duomask=duomask,noise_level=noise_level,x_amp=x_amp) + def encode(self, spec,x_denoise=None,duomask=False,noise_level = None,x_amp=None):#,gender='Female'): + components = self.encoder(spec,x_denoise=x_denoise,duomask=duomask,noise_level=noise_level,x_amp=x_amp)#,gender=gender) return components def lae(self,spec,rec,db=True,amp=True,tracker=None,GHM=False): if amp: spec_amp = amplitude(spec,noise_db=self.noise_db,max_db=self.max_db) rec_amp = amplitude(rec,noise_db=self.noise_db,max_db=self.max_db) - if self.power_synth: - spec_amp_ = spec_amp**0.5 - rec_amp_ = rec_amp**0.5 - else: - spec_amp_ = spec_amp - rec_amp_ = rec_amp - # spec_amp_ = spec_amp - # rec_amp_ = rec_amp + # if self.power_synth: + # spec_amp_ = spec_amp**0.5 + # rec_amp_ = rec_amp**0.5 + # else: + # spec_amp_ = spec_amp + # rec_amp_ = rec_amp + spec_amp_ = spec_amp + rec_amp_ = rec_amp if GHM: Lae_a = self.ghm_loss(rec_amp_,spec_amp_,torch.ones(spec_amp_))#*150 Lae_a_l2 = torch.tensor([0.]) @@ -307,12 +311,18 @@ def lae(self,spec,rec,db=True,amp=True,tracker=None,GHM=False): tracker.update(dict(Lae_db=Lae_db,Lae_db_l2=Lae_db_l2)) # return (Lae_a + Lae_a_l2)/2. + (Lae_db+Lae_db_l2)/2. return Lae_a + Lae_db/2. + + def flooding(self, loss,beta): + if self.apply_flooding: + return (loss-beta).abs()+beta + else: + return loss - def forward(self, spec, ecog, mask_prior, on_stage, on_stage_wider, ae, tracker, encoder_guide, x_mel=None,x_denoise=None, pitch_aug=False, duomask=False, mni=None,debug=False,x_amp=None,hamonic_bias=False): + def forward(self, spec, ecog, mask_prior, on_stage, on_stage_wider, ae, tracker, encoder_guide, x_mel=None,x_denoise=None, pitch_aug=False, duomask=False, mni=None,debug=False,x_amp=None,hamonic_bias=False,x_amp_from_denoise=False,gender='Female'): if ae: self.encoder.requires_grad_(True) # rec = self.generate_fromspec(spec) - components = self.encoder(spec,x_denoise = x_denoise,duomask=duomask,noise_level = F.softplus(self.decoder.bgnoise_amp)*self.decoder.noise_dist.mean(),x_amp=x_amp) + components = self.encoder(spec,x_denoise = x_denoise,duomask=duomask,noise_level = F.softplus(self.decoder.bgnoise_amp)*self.decoder.noise_dist.mean(),x_amp=x_amp)#,gender=gender) rec = self.decoder.forward(components) freq_cord = torch.arange(self.spec_chans).reshape([1,1,1,self.spec_chans])/(1.0*self.spec_chans) @@ -347,15 +357,16 @@ def forward(self, spec, ecog, mask_prior, on_stage, on_stage_wider, ae, tracker, # tracker.update(dict(Lae_noise=Lae_noise)) # Lae += Lae_noise - if self.wavebased: - if self.power_synth: - Lloudness = 10**6*(components['loudness']*(1-on_stage_wider)).mean() - else: - # Lloudness = 10**3*(components['loudness']*(1-on_stage_wider)).mean() - Lloudness = 10**6*(components['loudness']*(1-on_stage_wider)).mean() - # Lloudness = 10.**6*((components['loudness'])**2*(1-on_stage_wider)).mean() - tracker.update(dict(Lloudness=Lloudness)) - Lae += Lloudness + if x_amp_from_denoise: + if self.wavebased: + if self.power_synth: + Lloudness = 10**6*(components['loudness']*(1-on_stage_wider)).mean() + else: + # Lloudness = 10**3*(components['loudness']*(1-on_stage_wider)).mean() + Lloudness = 10**6*(components['loudness']*(1-on_stage_wider)).mean() + # Lloudness = 10.**6*((components['loudness'])**2*(1-on_stage_wider)).mean() + tracker.update(dict(Lloudness=Lloudness)) + Lae += Lloudness if self.wavebased and x_denoise is not None: thres = int(hz2ind(4000,self.n_fft)) if self.wavebased else mel_scale(self.spec_chans,4000,pt=False).astype(np.int32) @@ -431,7 +442,7 @@ def forward(self, spec, ecog, mask_prior, on_stage, on_stage_wider, ae, tracker, components['f0_hz'] = (components['f0_hz']*pitch_shift).clamp(min=88,max=300) # components['f0'] = mel_scale(self.spec_chans,components['f0'])/self.spec_chans rec_shift = self.decoder.forward(components) - components_enc = self.encoder(rec_shift,duomask=duomask,x_denoise=x_denoise,noise_level = F.softplus(self.decoder.bgnoise_amp)*self.decoder.noise_dist.mean(),x_amp=x_amp) + components_enc = self.encoder(rec_shift,duomask=duomask,x_denoise=x_denoise,noise_level = F.softplus(self.decoder.bgnoise_amp)*self.decoder.noise_dist.mean(),x_amp=x_amp)#,gender=gender) Lf0 = torch.mean((components_enc['f0_hz']/200-components['f0_hz']/200)**2) rec_cycle = self.decoder.forward(components_enc) Lae += self.lae(rec_shift*freq_linear_reweighting,rec_cycle*freq_linear_reweighting,tracker=tracker)#torch.mean((rec_shift-rec_cycle).abs()*freq_linear_reweighting) @@ -467,12 +478,14 @@ def forward(self, spec, ecog, mask_prior, on_stage, on_stage_wider, ae, tracker, # Ldiff = 0 Lfreqorder = torch.mean(F.relu(components['freq_formants_hamon_hz'][:,:-1]-components['freq_formants_hamon_hz'][:,1:])) #+ (torch.mean(F.relu(components['freq_formants_noise_hz'][:,:-1]-components['freq_formants_noise_hz'][:,1:])) if components['freq_formants_noise_hz'].shape[1]>1 else 0) - return Lae + Lf0 + Lfreqorder - else: + return Lae + Lf0 + Lfreqorder,tracker + else: #ecog to audio self.encoder.requires_grad_(False) rec,components_ecog = self.generate_fromecog(ecog,mask_prior,mni=mni,return_components=True) - ###### + ###### mel db flooding + betas = {'loudness':0.01,'freq_formants_hamon':0.0025,'f0_hz':0.,'amplitudes':0.,'amplitude_formants_hamon':0.,'amplitude_formants_noise':0.,'freq_formants_noise':0.05,'bandwidth_formants_noise_hz':0.01} + alpha = {'loudness':1.,'freq_formants_hamon':4.,'f0_hz':1.,'amplitudes':1.,'amplitude_formants_hamon':1.,'amplitude_formants_noise':1.,'freq_formants_noise':1.,'bandwidth_formants_noise_hz':1.} if self.spec_sup: if False:#self.ghm_loss: Lrec = 0.3*self.lae1(rec,spec,tracker=tracker) @@ -487,7 +500,7 @@ def forward(self, spec, ecog, mask_prior, on_stage, on_stage_wider, ae, tracker, tracker.update(dict(Lrec=Lrec)) Lcomp = 0 if encoder_guide: - components_guide = self.encode(spec,x_denoise=x_denoise,duomask=duomask,noise_level = F.softplus(self.decoder.bgnoise_amp)*self.decoder.noise_dist.mean(),x_amp=x_amp) + components_guide = self.encode(spec,x_denoise=x_denoise,duomask=duomask,noise_level = F.softplus(self.decoder.bgnoise_amp)*self.decoder.noise_dist.mean(),x_amp=x_amp)#,gender=gender) consonant_weight = 1#100*(torch.sign(components_guide['amplitudes'][:,1:]-0.5)*0.5+0.5) if self.power_synth: loudness_db = torchaudio.transforms.AmplitudeToDB()(components_guide['loudness']) @@ -498,7 +511,7 @@ def forward(self, spec, ecog, mask_prior, on_stage, on_stage_wider, ae, tracker, loudness_db_norm = (loudness_db.clamp(min=-70)+70)/50 # loudness_db = torchaudio.transforms.AmplitudeToDB()(components_guide['loudness']**2) #loudness_db_norm = (loudness_db.clamp(min=-70)+70)/50 - for key in ['loudness','f0_hz','amplitudes','amplitude_formants_hamon','freq_formants_hamon_hz','amplitude_formants_noise','freq_formants_noise_hz','bandwidth_formants_noise']: + for key in ['loudness','f0_hz','amplitudes','amplitude_formants_hamon','freq_formants_hamon','amplitude_formants_noise','freq_formants_noise','bandwidth_formants_noise_hz']: # if 'hz' in key: # continue if key == 'loudness': @@ -511,18 +524,27 @@ def forward(self, spec, ecog, mask_prior, on_stage, on_stage_wider, ae, tracker, if False:#self.ghm_loss: diff = self.lae2(loudness_db_norm, loudness_db_norm_ecog) else: - diff = 3*torch.mean((loudness_db_norm - loudness_db_norm_ecog)**2)#+ torch.mean((components_guide[key] - components_ecog[key])**2 * on_stage * consonant_weight) + diff = alpha['loudness']*15*torch.mean((loudness_db_norm - loudness_db_norm_ecog)**2)#+ torch.mean((components_guide[key] - components_ecog[key])**2 * on_stage * consonant_weight) + diff = self.flooding(diff,alpha['loudness']*betas['loudness']) + tracker.update({'loudness_metric' : torch.mean((loudness_db_norm - loudness_db_norm_ecog)**2*on_stage_wider)}) + if key == 'f0_hz': # diff = torch.mean((components_guide[key]*6 - components_ecog[key]*6)**2 * on_stage_wider * components_guide['loudness']/4) - diff = 0.3*torch.mean((components_guide[key]/200*5 - components_ecog[key]/200*5)**2 * on_stage_wider * loudness_db_norm) + diff = alpha['f0_hz']*0.3*torch.mean((components_guide[key]/200*5 - components_ecog[key]/200*5)**2 * on_stage_wider * loudness_db_norm) + diff = self.flooding(diff,alpha['f0_hz']*betas['f0_hz']) + tracker.update({'f0_metric' : torch.mean((components_guide['f0_hz']/200*5 - components_ecog['f0_hz']/200*5)**2 * on_stage_wider * loudness_db_norm)}) + if key in ['amplitudes']: # if key in ['amplitudes','amplitudes_h']: weight = on_stage_wider * loudness_db_norm if self.ghm_loss: # diff = 100*self.lae3(components_guide[key], components_ecog[key],reweight=weight) - diff = 30*self.lae3(components_guide[key], components_ecog[key],reweight=weight) + diff = alpha['amplitudes']*30*self.lae3(components_guide[key], components_ecog[key],reweight=weight) else: - diff = 10*torch.mean((components_guide[key] - components_ecog[key])**2 *weight) + diff = alpha['amplitudes']*10*torch.mean((components_guide[key] - components_ecog[key])**2 *weight) + diff = self.flooding(diff,alpha['amplitudes']*betas['amplitudes']) + tracker.update({'amplitudes_metric' : torch.mean((components_guide['amplitudes'] - components_ecog['amplitudes'])**2 *weight)}) + if key in ['amplitude_formants_hamon']: weight = components_guide['amplitudes'][:,0:1] * on_stage_wider * consonant_weight * loudness_db_norm if False:#self.ghm_loss: @@ -532,23 +554,40 @@ def forward(self, spec, ecog, mask_prior, on_stage, on_stage_wider, ae, tracker, # diff = 100*torch.mean((components_guide[key][:,:self.n_formants_ecog] - components_ecog[key])**2 * weight) # diff = 40*torch.mean((components_guide[key][:,:self.n_formants_ecog] - components_ecog[key])**2 * weight)/2 \ # + 40*torch.mean((torchaudio.transforms.AmplitudeToDB()(components_guide[key][:,:self.n_formants_ecog])/100 - torchaudio.transforms.AmplitudeToDB()(components_ecog[key])/100)**2 * weight)/2 - diff = 40*torch.mean((components_guide[key][:,:self.n_formants_ecog] - components_ecog[key])**2 * weight) + diff = alpha['amplitude_formants_hamon']*40*torch.mean((components_guide[key][:,:self.n_formants_ecog] - components_ecog[key])**2 * weight) # diff = 10*torch.mean((components_guide[key][:,:self.n_formants_ecog] - components_ecog[key])**2 * weight) # diff = torch.mean((components_guide[key][:,:self.n_formants_ecog] - components_ecog[key])**2 * components_guide['amplitudes'][:,0:1] * on_stage_wider * components_guide['loudness']/4 * consonant_weight) # if key in ['freq_formants_hamon']: # diff = torch.mean((components_guide[key][:,:1]*10 - components_ecog[key][:,:1]*10)**2 * components_guide['amplitude_formants_hamon'][:,:1] * components_guide['amplitudes'][:,0:1] * on_stage_wider * consonant_weight * loudness_db_norm ) # # diff = torch.mean((components_guide[key][:,:self.n_formants_ecog]*10 - components_ecog[key]*10)**2 * components_guide['amplitude_formants_hamon'][:,:self.n_formants_ecog] * components_guide['amplitudes'][:,0:1] * on_stage_wider * components_guide['loudness']/4 * consonant_weight) + # # diff = torch.mean((components_guide[key][:,:self.n_formants_ecog] - components_ecog[key])**2 * components_guide['amplitude_formants_hamon'][:,:self.n_formants_ecog] * components_ecog['amplitudes'][:,0:1] * on_stage_wider * consonant_weight) + diff = self.flooding(diff,alpha['amplitude_formants_hamon']*betas['amplitude_formants_hamon']) + tracker.update({'amplitude_formants_hamon_metric' : torch.mean((components_guide['amplitude_formants_hamon'][:,:self.n_formants_ecog] - components_ecog['amplitude_formants_hamon'])**2 * weight)}) + + # if key in ['freq_formants_hamon_hz']: + # # weight = components_guide['amplitudes'][:,0:1] * on_stage_wider * consonant_weight * loudness_db_norm + # weight = components_guide['amplitude_formants_hamon'][:,:self.n_formants_ecog] * components_guide['amplitudes'][:,0:1] * on_stage_wider * consonant_weight * loudness_db_norm + # if False:#self.ghm_loss: + # diff = 50*self.lae5(components_guide[key][:,:self.n_formants_ecog]/400 , components_ecog[key]/400, reweight=weight) + # # diff = 15*self.lae5(components_guide[key][:,:self.n_formants_ecog]/400 , components_ecog[key]/400, reweight=weight) + # else: + # # diff = 300*torch.mean((components_guide['freq_formants_hamon'][:,:2] - components_ecog['freq_formants_hamon'][:,:2])**2 * weight) + # # diff = 300*torch.mean((components_guide[key][:,:self.n_formants_ecog]/2000*5 - components_ecog[key][:,:self.n_formants_ecog]/2000*5)**2 * weight) + # diff = 100*torch.mean((components_guide[key][:,:self.n_formants_ecog]/2000*5 - components_ecog[key][:,:self.n_formants_ecog]/2000*5)**2 * weight) + # # diff = 30*torch.mean((components_guide[key][:,:self.n_formants_ecog]/2000*5 - components_ecog[key][:,:self.n_formants_ecog]/2000*5)**2 * weight) + # # diff = torch.mean((components_guide[key][:,:self.n_formants_ecog]*10 - components_ecog[key]*10)**2 * components_guide['amplitude_formants_hamon'][:,:self.n_formants_ecog] * components_guide['amplitudes'][:,0:1] * on_stage_wider * components_guide['loudness']/4 * consonant_weight) # # diff = torch.mean((components_guide[key][:,:self.n_formants_ecog] - components_ecog[key])**2 * components_guide['amplitude_formants_hamon'][:,:self.n_formants_ecog] * components_ecog['amplitudes'][:,0:1] * on_stage_wider * consonant_weight) - if key in ['freq_formants_hamon_hz']: - # weight = components_guide['amplitudes'][:,0:1] * on_stage_wider * consonant_weight * loudness_db_norm - weight = components_guide['amplitude_formants_hamon'][:,:self.n_formants_ecog] * components_guide['amplitudes'][:,0:1] * on_stage_wider * consonant_weight * loudness_db_norm + if key in ['freq_formants_hamon']: + weight = components_guide['amplitudes'][:,0:1] * on_stage_wider * consonant_weight * loudness_db_norm + # weight = components_guide['amplitude_formants_hamon'][:,:self.n_formants_ecog] * components_guide['amplitudes'][:,0:1] * on_stage_wider * consonant_weight * loudness_db_norm if False:#self.ghm_loss: diff = 50*self.lae5(components_guide[key][:,:self.n_formants_ecog]/400 , components_ecog[key]/400, reweight=weight) # diff = 15*self.lae5(components_guide[key][:,:self.n_formants_ecog]/400 , components_ecog[key]/400, reweight=weight) else: + diff = alpha['freq_formants_hamon']*300*torch.mean((components_guide[key][:,:self.n_formants_ecog] - components_ecog[key][:,:self.n_formants_ecog])**2 * weight) # diff = 300*torch.mean((components_guide[key][:,:self.n_formants_ecog]/2000*5 - components_ecog[key][:,:self.n_formants_ecog]/2000*5)**2 * weight) - diff = 100*torch.mean((components_guide[key][:,:self.n_formants_ecog]/2000*5 - components_ecog[key][:,:self.n_formants_ecog]/2000*5)**2 * weight) + # diff = 100*torch.mean((components_guide[key][:,:self.n_formants_ecog]/2000*5 - components_ecog[key][:,:self.n_formants_ecog]/2000*5)**2 * weight) # diff = 30*torch.mean((components_guide[key][:,:self.n_formants_ecog]/2000*5 - components_ecog[key][:,:self.n_formants_ecog]/2000*5)**2 * weight) # diff = torch.mean((components_guide[key][:,:self.n_formants_ecog]*10 - components_ecog[key]*10)**2 * components_guide['amplitude_formants_hamon'][:,:self.n_formants_ecog] * components_guide['amplitudes'][:,0:1] * on_stage_wider * components_guide['loudness']/4 * consonant_weight) # diff = torch.mean((components_guide[key][:,:self.n_formants_ecog] - components_ecog[key])**2 * components_guide['amplitude_formants_hamon'][:,:self.n_formants_ecog] * components_ecog['amplitudes'][:,0:1] * on_stage_wider * consonant_weight) @@ -556,7 +595,10 @@ def forward(self, spec, ecog, mask_prior, on_stage, on_stage_wider, ae, tracker, # diff = torch.mean((components_guide[key][:,:self.n_formants_ecog]/4 - components_ecog[key]/4)**2 * components_guide['amplitude_formants_hamon'][:,:self.n_formants_ecog] * components_guide['amplitudes'][:,0:1] * on_stage_wider * consonant_weight) # # diff = torch.mean((components_guide[key][:,:self.n_formants_ecog]/4 - components_ecog[key]/4)**2 * components_guide['amplitude_formants_hamon'][:,:self.n_formants_ecog] * components_guide['amplitudes'][:,0:1] * on_stage_wider * components_guide['loudness']/4 * consonant_weight) # # diff = torch.mean((components_guide[key][:,:self.n_formants_ecog] - components_ecog[key])**2 * components_guide['amplitude_formants_hamon'][:,:self.n_formants_ecog] * components_ecog['amplitudes'][:,0:1] * on_stage_wider * consonant_weight) - + diff = self.flooding(diff,alpha['freq_formants_hamon']*betas['freq_formants_hamon']) + tracker.update({'freq_formants_hamon_hz_metric_2' : torch.mean((components_guide['freq_formants_hamon_hz'][:,:2]/400 - components_ecog['freq_formants_hamon_hz'][:,:2]/400)**2 * weight)}) + tracker.update({','+str(self.n_formants_ecog) : torch.mean((components_guide['freq_formants_hamon_hz'][:,:self.n_formants_ecog]/400 - components_ecog['freq_formants_hamon_hz'][:,:self.n_formants_ecog]/400)**2 * weight)}) + if key in ['amplitude_formants_noise']: weight = components_guide['amplitudes'][:,1:2] * on_stage_wider * consonant_weight * loudness_db_norm if False:#self.ghm_loss: @@ -564,23 +606,53 @@ def forward(self, spec, ecog, mask_prior, on_stage, on_stage_wider, ae, tracker, else: # diff = 40*torch.mean((torch.cat([components_guide[key][:,:self.n_formants_ecog],components_guide[key][:,-self.n_formants_noise:]],dim=1) - components_ecog[key])**2 *weight)/2 \ # + 40*torch.mean((torchaudio.transforms.AmplitudeToDB()(torch.cat([components_guide[key][:,:self.n_formants_ecog],components_guide[key][:,-self.n_formants_noise:]],dim=1))/100 - torchaudio.transforms.AmplitudeToDB()(components_ecog[key])/100)**2 * weight)/2 - diff = 40*torch.mean((torch.cat([components_guide[key][:,:self.n_formants_ecog],components_guide[key][:,-self.n_formants_noise:]],dim=1) - components_ecog[key])**2 *weight) + diff = alpha['amplitude_formants_noise']*40*torch.mean((torch.cat([components_guide[key][:,:self.n_formants_ecog],components_guide[key][:,-self.n_formants_noise:]],dim=1) - components_ecog[key])**2 *weight) + diff = self.flooding(diff,alpha['amplitude_formants_noise']*betas['amplitude_formants_noise']) + tracker.update({'amplitude_formants_noise_metric': torch.mean((torch.cat([components_guide[key][:,:self.n_formants_ecog],components_guide[key][:,-self.n_formants_noise:]],dim=1) - components_ecog[key])**2 *weight)}) - if key in ['freq_formants_noise_hz']: + if key in ['freq_formants_noise']: weight = components_guide['amplitudes'][:,1:2] * on_stage_wider * consonant_weight* loudness_db_norm if False:#self.ghm_loss: diff = 10*self.lae7(components_guide[key][:,-self.n_formants_noise:]/400,components_ecog[key][:,-self.n_formants_noise:]/400,reweight=weight) else: + # diff = 30*torch.mean((components_guide[key][:,-self.n_formants_noise:]/2000*5 - components_ecog[key][:,-self.n_formants_noise:]/2000*5)**2 * weight) - diff = 3*torch.mean((components_guide[key][:,-self.n_formants_noise:]/2000*5 - components_ecog[key][:,-self.n_formants_noise:]/2000*5)**2 * weight) + diff = alpha['freq_formants_noise']*12000*torch.mean((components_guide[key][:,-self.n_formants_noise:] - components_ecog[key][:,-self.n_formants_noise:])**2 * weight) # diff = torch.mean((components_guide[key][:,:self.n_formants_ecog] - components_ecog[key])**2 * components_guide['amplitude_formants_noise'][:,:self.n_formants_ecog] * components_guide['amplitudes'][:,1:2] * on_stage_wider * consonant_weight) + diff = self.flooding(diff,alpha['freq_formants_noise']*betas['freq_formants_noise']) + tracker.update({'freq_formants_noise_metic': torch.mean((components_guide['freq_formants_noise_hz'][:,-self.n_formants_noise:]/2000*5 - components_ecog['freq_formants_noise_hz'][:,-self.n_formants_noise:]/2000*5)**2 * weight)}) + + # if key in ['freq_formants_noise_hz']: + # weight = components_guide['amplitudes'][:,1:2] * on_stage_wider * consonant_weight* loudness_db_norm + # if False:#self.ghm_loss: + # diff = 10*self.lae7(components_guide[key][:,-self.n_formants_noise:]/400,components_ecog[key][:,-self.n_formants_noise:]/400,reweight=weight) + # else: + + # # diff = 30*torch.mean((components_guide[key][:,-self.n_formants_noise:]/2000*5 - components_ecog[key][:,-self.n_formants_noise:]/2000*5)**2 * weight) + # diff = 3*torch.mean((components_guide[key][:,-self.n_formants_noise:]/2000*5 - components_ecog[key][:,-self.n_formants_noise:]/2000*5)**2 * weight) + # # diff = torch.mean((components_guide[key][:,:self.n_formants_ecog] - components_ecog[key])**2 * components_guide['amplitude_formants_noise'][:,:self.n_formants_ecog] * components_guide['amplitudes'][:,1:2] * on_stage_wider * consonant_weight) + # diff = self.flooding(diff,betas['freq_formants_noise_hz']) + # tracker.update({'freq_formants_noise_hz_metic': torch.mean((components_guide[key][:,-self.n_formants_noise:]/2000*5 - components_ecog[key][:,-self.n_formants_noise:]/2000*5)**2 * weight)}) + if key in ['bandwidth_formants_noise_hz']: weight = components_guide['amplitudes'][:,1:2] * on_stage_wider * consonant_weight* loudness_db_norm if False:#self.ghm_loss: diff = 3*self.lae8(components_guide[key][:,-self.n_formants_noise:]/2000*5, components_ecog[key][:,-self.n_formants_noise:]/2000*5,reweight=weight) else: # diff = 30*torch.mean((components_guide[key][:,-self.n_formants_noise:]/2000*5 - components_ecog[key][:,-self.n_formants_noise:]/2000*5)**2 * weight) - diff = torch.mean((components_guide[key][:,-self.n_formants_noise:]/2000*5 - components_ecog[key][:,-self.n_formants_noise:]/2000*5)**2 * weight) + diff = alpha['bandwidth_formants_noise_hz']*3*torch.mean((components_guide[key][:,-self.n_formants_noise:]/2000*5 - components_ecog[key][:,-self.n_formants_noise:]/2000*5)**2 * weight) + diff = self.flooding(diff,alpha['bandwidth_formants_noise_hz']*betas['bandwidth_formants_noise_hz']) + tracker.update({'bandwidth_formants_noise_hz_metic': torch.mean((components_guide[key][:,-self.n_formants_noise:]/2000*5 - components_ecog[key][:,-self.n_formants_noise:]/2000*5)**2 * weight)}) + + # if key in ['bandwidth_formants_noise']: + # weight = components_guide['amplitudes'][:,1:2] * on_stage_wider * consonant_weight* loudness_db_norm + # if False:#self.ghm_loss: + # diff = 3*self.lae8(components_guide[key][:,-self.n_formants_noise:], components_ecog[key][:,-self.n_formants_noise:],reweight=weight) + # else: + # # diff = 30*torch.mean((components_guide[key][:,-self.n_formants_noise:] - components_ecog[key][:,-self.n_formants_noise:]/2000*5)**2 * weight) + # diff = 300*torch.mean((components_guide[key][:,-self.n_formants_noise:] - components_ecog[key][:,-self.n_formants_noise:])**2 * weight) + # diff = self.flooding(diff,betas['bandwidth_formants_noise']) + # tracker.update({'bandwidth_formants_noise_metic': torch.mean((components_guide['bandwidth_formants_noise_hz'][:,-self.n_formants_noise:]/2000*5 - components_ecog['bandwidth_formants_noise_hz'][:,-self.n_formants_noise:]/2000*5)**2 * weight)}) tracker.update({key : diff}) Lcomp += diff # import pdb; pdb.set_trace() @@ -602,8 +674,164 @@ def forward(self, spec, ecog, mask_prior, on_stage, on_stage_wider, ae, tracker, Lfreqorder = torch.mean(F.relu(components_ecog['freq_formants_hamon_hz'][:,:-1]-components_ecog['freq_formants_hamon_hz'][:,1:])) Loss += Lfreqorder - return Loss + return Loss,tracker + # ''' + # loss_weights_dict = {'Lrec':1,'loudness':15,'f0_hz':0.3,'amplitudes':30,\ + # 'amplitude_formants_hamon':40, 'freq_formants_hamon_hz':200,'amplitude_formants_noise':40,\ + # 'freq_formants_noise_hz':3,'bandwidth_formants_noise_hz':1,'Ldiff':1/2000.,\ + # 'Lexp':100, 'Lfreqorder':1 } + # self.encoder.requires_grad_(False) + # rec,components_ecog = self.generate_fromecog(ecog,mask_prior,mni=mni,return_components=True) + + # ###### + # if self.spec_sup: + # if False:#self.ghm_loss: + # Lrec = 0.3*self.lae1(rec,spec,tracker=tracker) + # else: + # Lrec = loss_weights_dict['Lrec']*self.lae(rec,spec,tracker=tracker)#torch.mean((rec - spec)**2) + # # Lamp = 10*torch.mean(F.relu(-components_ecog['amplitude_formants_hamon'][:,0:min(3,self.n_formants_ecog-1)]+components_ecog['amplitude_formants_hamon'][:,1:min(4,self.n_formants_ecog)])*(components_ecog['amplitudes'][:,0:1]>components_ecog['amplitudes'][:,1:2]).float()) + # # tracker.update(dict(Lamp=Lamp)) + # # Lrec+=Lamp + # else: + # Lrec = torch.tensor([0.0])# + # # Lrec = torch.mean((rec - spec).abs()) + # tracker.update(dict(Lrec=Lrec)) + # Lcomp = 0 + # if encoder_guide: + # components_guide = self.encode(spec,x_denoise=x_denoise,duomask=duomask,noise_level = F.softplus(self.decoder.bgnoise_amp)*self.decoder.noise_dist.mean(),x_amp=x_amp) + # consonant_weight = 1#100*(torch.sign(components_guide['amplitudes'][:,1:]-0.5)*0.5+0.5) + # if self.power_synth: + # loudness_db = torchaudio.transforms.AmplitudeToDB()(components_guide['loudness']) + # loudness_db_norm = (loudness_db.clamp(min=-70)+70)/50 + # else: + # loudness_db = torchaudio.transforms.AmplitudeToDB()(components_guide['loudness']) + # # loudness_db_norm = (loudness_db.clamp(min=-35)+35)/25 + # loudness_db_norm = (loudness_db.clamp(min=-70)+70)/50 + # # loudness_db = torchaudio.transforms.AmplitudeToDB()(components_guide['loudness']**2) + # #loudness_db_norm = (loudness_db.clamp(min=-70)+70)/50 + # for key in ['loudness','f0_hz','amplitudes','amplitude_formants_hamon','freq_formants_hamon_hz','amplitude_formants_noise','freq_formants_noise_hz','bandwidth_formants_noise']: + # # if 'hz' in key: + # # continue + # #''' + # if key == 'loudness': + # if self.power_synth: + # loudness_db_norm_ecog = (torchaudio.transforms.AmplitudeToDB()(components_ecog[key])+70)/50 + # else: + # loudness_db_norm_ecog = (torchaudio.transforms.AmplitudeToDB()(components_ecog[key])+70)/50 + # # loudness_db_norm_ecog = (torchaudio.transforms.AmplitudeToDB()(components_ecog[key])+35)/25 + # # loudness_db_norm_ecog = (torchaudio.transforms.AmplitudeToDB()(components_ecog[key]**2)+70)/50 + # if False:#self.ghm_loss: + # diff = self.lae2(loudness_db_norm, loudness_db_norm_ecog) + # else: + # diff = loss_weights_dict[key]*torch.mean((loudness_db_norm - loudness_db_norm_ecog)**2)#+ torch.mean((components_guide[key] - components_ecog[key])**2 * on_stage * consonant_weight) + # #''' + # if key == 'loudness': + # if self.power_synth: + # loudness_db_norm_ecog = (torchaudio.transforms.AmplitudeToDB()(components_ecog[key])+70)/50 + # else: + # loudness_db_norm_ecog = (torchaudio.transforms.AmplitudeToDB()(components_ecog[key])+70)/50 + # # loudness_db_norm_ecog = (torchaudio.transforms.AmplitudeToDB()(components_ecog[key])+35)/25 + # # loudness_db_norm_ecog = (torchaudio.transforms.AmplitudeToDB()(components_ecog[key]**2)+70)/50 + # if False:#self.ghm_loss: + # diff = self.lae2(loudness_db_norm, loudness_db_norm_ecog) + # else: + # diff = alpha['loudness']*loss_weights_dict[key]*torch.mean((loudness_db_norm - loudness_db_norm_ecog)**2)#+ torch.mean((components_guide[key] - components_ecog[key])**2 * on_stage * consonant_weight) + # diff = self.flooding(diff,alpha['loudness']*betas['loudness']) + # tracker.update({'loudness_metric' : torch.mean((loudness_db_norm - loudness_db_norm_ecog)**2*on_stage_wider)}) + + # if key == 'f0_hz': + # # diff = torch.mean((components_guide[key]*6 - components_ecog[key]*6)**2 * on_stage_wider * components_guide['loudness']/4) + # diff = loss_weights_dict[key]*torch.mean((components_guide[key]/200*5 - components_ecog[key]/200*5)**2 * on_stage_wider * loudness_db_norm) + # if key in ['amplitudes']: + # # if key in ['amplitudes','amplitudes_h']: + # weight = on_stage_wider * loudness_db_norm + # if self.ghm_loss: + # # diff = 100*self.lae3(components_guide[key], components_ecog[key],reweight=weight) + # diff = loss_weights_dict[key]*self.lae3(components_guide[key], components_ecog[key],reweight=weight) + # else: + # diff = 10*torch.mean((components_guide[key] - components_ecog[key])**2 *weight) + # if key in ['amplitude_formants_hamon']: + # weight = components_guide['amplitudes'][:,0:1] * on_stage_wider * consonant_weight * loudness_db_norm + # if False:#self.ghm_loss: + # diff = 40*self.lae4(components_guide[key][:,:self.n_formants_ecog], components_ecog[key],reweight=weight) + # # diff = 10*self.lae4(components_guide[key][:,:self.n_formants_ecog], components_ecog[key],reweight=weight) + # else: + # # diff = 100*torch.mean((components_guide[key][:,:self.n_formants_ecog] - components_ecog[key])**2 * weight) + # # diff = 40*torch.mean((components_guide[key][:,:self.n_formants_ecog] - components_ecog[key])**2 * weight)/2 \ + # # + 40*torch.mean((torchaudio.transforms.AmplitudeToDB()(components_guide[key][:,:self.n_formants_ecog])/100 - torchaudio.transforms.AmplitudeToDB()(components_ecog[key])/100)**2 * weight)/2 + # diff = loss_weights_dict[key]*torch.mean((components_guide[key][:,:self.n_formants_ecog] - components_ecog[key])**2 * weight) + # # diff = 10*torch.mean((components_guide[key][:,:self.n_formants_ecog] - components_ecog[key])**2 * weight) + # # diff = torch.mean((components_guide[key][:,:self.n_formants_ecog] - components_ecog[key])**2 * components_guide['amplitudes'][:,0:1] * on_stage_wider * components_guide['loudness']/4 * consonant_weight) + # # if key in ['freq_formants_hamon']: + # # diff = torch.mean((components_guide[key][:,:1]*10 - components_ecog[key][:,:1]*10)**2 * components_guide['amplitude_formants_hamon'][:,:1] * components_guide['amplitudes'][:,0:1] * on_stage_wider * consonant_weight * loudness_db_norm ) + # # # diff = torch.mean((components_guide[key][:,:self.n_formants_ecog]*10 - components_ecog[key]*10)**2 * components_guide['amplitude_formants_hamon'][:,:self.n_formants_ecog] * components_guide['amplitudes'][:,0:1] * on_stage_wider * components_guide['loudness']/4 * consonant_weight) + # # # diff = torch.mean((components_guide[key][:,:self.n_formants_ecog] - components_ecog[key])**2 * components_guide['amplitude_formants_hamon'][:,:self.n_formants_ecog] * components_ecog['amplitudes'][:,0:1] * on_stage_wider * consonant_weight) + + # if key in ['freq_formants_hamon_hz']: + # # weight = components_guide['amplitudes'][:,0:1] * on_stage_wider * consonant_weight * loudness_db_norm + # weight = components_guide['amplitude_formants_hamon'][:,:self.n_formants_ecog] * components_guide['amplitudes'][:,0:1] * on_stage_wider * consonant_weight * loudness_db_norm + # if False:#self.ghm_loss: + # diff = 50*self.lae5(components_guide[key][:,:self.n_formants_ecog]/400 , components_ecog[key]/400, reweight=weight) + # # diff = 15*self.lae5(components_guide[key][:,:self.n_formants_ecog]/400 , components_ecog[key]/400, reweight=weight) + # else: + # # diff = 300*torch.mean((components_guide[key][:,:self.n_formants_ecog]/2000*5 - components_ecog[key][:,:self.n_formants_ecog]/2000*5)**2 * weight) + # diff = loss_weights_dict[key]*torch.mean((components_guide[key][:,:self.n_formants_ecog]/2000*5 - components_ecog[key][:,:self.n_formants_ecog]/2000*5)**2 * weight) + # # diff = 30*torch.mean((components_guide[key][:,:self.n_formants_ecog]/2000*5 - components_ecog[key][:,:self.n_formants_ecog]/2000*5)**2 * weight) + # # diff = torch.mean((components_guide[key][:,:self.n_formants_ecog]*10 - components_ecog[key]*10)**2 * components_guide['amplitude_formants_hamon'][:,:self.n_formants_ecog] * components_guide['amplitudes'][:,0:1] * on_stage_wider * components_guide['loudness']/4 * consonant_weight) + # # diff = torch.mean((components_guide[key][:,:self.n_formants_ecog] - components_ecog[key])**2 * components_guide['amplitude_formants_hamon'][:,:self.n_formants_ecog] * components_ecog['amplitudes'][:,0:1] * on_stage_wider * consonant_weight) + # # if key in ['bandwidth_formants_hamon']: + # # diff = torch.mean((components_guide[key][:,:self.n_formants_ecog]/4 - components_ecog[key]/4)**2 * components_guide['amplitude_formants_hamon'][:,:self.n_formants_ecog] * components_guide['amplitudes'][:,0:1] * on_stage_wider * consonant_weight) + # # # diff = torch.mean((components_guide[key][:,:self.n_formants_ecog]/4 - components_ecog[key]/4)**2 * components_guide['amplitude_formants_hamon'][:,:self.n_formants_ecog] * components_guide['amplitudes'][:,0:1] * on_stage_wider * components_guide['loudness']/4 * consonant_weight) + # # # diff = torch.mean((components_guide[key][:,:self.n_formants_ecog] - components_ecog[key])**2 * components_guide['amplitude_formants_hamon'][:,:self.n_formants_ecog] * components_ecog['amplitudes'][:,0:1] * on_stage_wider * consonant_weight) + + # if key in ['amplitude_formants_noise']: + # weight = components_guide['amplitudes'][:,1:2] * on_stage_wider * consonant_weight * loudness_db_norm + # if False:#self.ghm_loss: + # diff = self.lae6(components_guide[key],components_ecog[key],reweight=weight) + # else: + # # diff = 40*torch.mean((torch.cat([components_guide[key][:,:self.n_formants_ecog],components_guide[key][:,-self.n_formants_noise:]],dim=1) - components_ecog[key])**2 *weight)/2 \ + # # + 40*torch.mean((torchaudio.transforms.AmplitudeToDB()(torch.cat([components_guide[key][:,:self.n_formants_ecog],components_guide[key][:,-self.n_formants_noise:]],dim=1))/100 - torchaudio.transforms.AmplitudeToDB()(components_ecog[key])/100)**2 * weight)/2 + # diff = loss_weights_dict[key]*torch.mean((torch.cat([components_guide[key][:,:self.n_formants_ecog],components_guide[key][:,-self.n_formants_noise:]],dim=1) - components_ecog[key])**2 *weight) + + # if key in ['freq_formants_noise_hz']: + # weight = components_guide['amplitudes'][:,1:2] * on_stage_wider * consonant_weight* loudness_db_norm + # if False:#self.ghm_loss: + # diff = 10*self.lae7(components_guide[key][:,-self.n_formants_noise:]/400,components_ecog[key][:,-self.n_formants_noise:]/400,reweight=weight) + # else: + # # diff = 30*torch.mean((components_guide[key][:,-self.n_formants_noise:]/2000*5 - components_ecog[key][:,-self.n_formants_noise:]/2000*5)**2 * weight) + # diff = loss_weights_dict[key]*torch.mean((components_guide[key][:,-self.n_formants_noise:]/2000*5 - components_ecog[key][:,-self.n_formants_noise:]/2000*5)**2 * weight) + # # diff = torch.mean((components_guide[key][:,:self.n_formants_ecog] - components_ecog[key])**2 * components_guide['amplitude_formants_noise'][:,:self.n_formants_ecog] * components_guide['amplitudes'][:,1:2] * on_stage_wider * consonant_weight) + # if key in ['bandwidth_formants_noise_hz']: + # weight = components_guide['amplitudes'][:,1:2] * on_stage_wider * consonant_weight* loudness_db_norm + # if False:#self.ghm_loss: + # diff = 3*self.lae8(components_guide[key][:,-self.n_formants_noise:]/2000*5, components_ecog[key][:,-self.n_formants_noise:]/2000*5,reweight=weight) + # else: + # # diff = 30*torch.mean((components_guide[key][:,-self.n_formants_noise:]/2000*5 - components_ecog[key][:,-self.n_formants_noise:]/2000*5)**2 * weight) + # diff = loss_weights_dict[key]*torch.mean((components_guide[key][:,-self.n_formants_noise:]/2000*5 - components_ecog[key][:,-self.n_formants_noise:]/2000*5)**2 * weight) + # tracker.update({key : diff}) + # Lcomp += diff + # # import pdb; pdb.set_trace() + # Loss = Lrec+Lcomp + + # hamonic_components_diff = compdiffd2(components_ecog['freq_formants_hamon_hz']*1.5) + compdiffd2(components_ecog['f0_hz']*2) + compdiff(components_ecog['bandwidth_formants_noise_hz'][:,components_ecog['freq_formants_hamon_hz'].shape[1]:]/5) + compdiff(components_ecog['freq_formants_noise_hz'][:,components_ecog['freq_formants_hamon_hz'].shape[1]:]/5)+ compdiff(components_ecog['amplitudes'])*750. + # Ldiff = loss_weights_dict['Ldiff']*torch.mean(hamonic_components_diff) + # tracker.update(dict(Ldiff=Ldiff)) + # Loss += Ldiff + + # freq_linear_reweighting = 1 + # thres = int(hz2ind(4000,self.n_fft)) if self.wavebased else mel_scale(self.spec_chans,4000,pt=False).astype(np.int32) + # explosive=torch.sign(torch.mean((spec*freq_linear_reweighting)[...,thres:],dim=-1)-torch.mean((spec*freq_linear_reweighting)[...,:thres],dim=-1))*0.5+0.5 + # Lexp = loss_weights_dict['Lexp']*torch.mean((components_ecog['amplitudes'][:,0:1]-components_ecog['amplitudes'][:,1:2])*explosive) + # tracker.update(dict(Lexp=Lexp)) + # Loss += Lexp + + # Lfreqorder = loss_weights_dict['Lfreqorder']*torch.mean(F.relu(components_ecog['freq_formants_hamon_hz'][:,:-1]-components_ecog['freq_formants_hamon_hz'][:,1:])) + # Loss += Lfreqorder + # #print ('tracker content:',tracker,tracker.keys()) + # return Loss,tracker + # ''' + ######### new balanced loss # if self.spec_sup: # if False:#self.ghm_loss: diff --git a/net_formant.py b/net_formant.py index 0fa4e58c..b8fed7c6 100644 --- a/net_formant.py +++ b/net_formant.py @@ -12,9 +12,10 @@ import lreq as ln import math from registry import * -from transformer_models.position_encoding import build_position_encoding -from transformer_models.transformer import Transformer as TransformerTS -from transformer_models.transformer_nonlocal import Transformer as TransformerNL +#from transformer_models.position_encoding import build_position_encoding +#from transformer_models.transformer import Transformer as TransformerTS +# +# from transformer_models.transformer_nonlocal import Transformer as TransformerNL def db(x,noise = -80, slope =35, powerdb=True): if powerdb: @@ -873,7 +874,7 @@ def forward(self,x): @ECOG_ENCODER.register("ECoGMappingBottleneck") class ECoGMapping_Bottleneck(nn.Module): - def __init__(self,n_mels,n_formants,n_formants_noise=1): + def __init__(self,n_mels,n_formants,n_formants_noise=1,compute_db_loudness=True): super(ECoGMapping_Bottleneck, self).__init__() self.n_formants = n_formants self.n_mels = n_mels @@ -901,7 +902,11 @@ def __init__(self,n_mels,n_formants,n_formants_noise=1): nn.init.constant_(self.formant_bandwitdh_ratio,0) nn.init.constant_(self.formant_bandwitdh_slop,0) - + self.compute_db_loudness = compute_db_loudness + if compute_db_loudness: + self.conv_loudness = ln.Conv1d(32,1,1,1,0) + else: + self.conv_loudness = ln.Conv1d(32,1,1,1,0,bias_initial=-9.) self.from_ecog = FromECoG(16,residual=True) self.conv1 = ECoGMappingBlock(16,32,[5,1,1],residual=True,resample = [2,1,1],pool='MAX') self.conv2 = ECoGMappingBlock(32,64,[3,1,1],residual=True,resample = [2,1,1],pool='MAX') @@ -967,49 +972,357 @@ def forward(self,ecog,mask_prior,mni): x_common_all += [x_common] x_common = torch.cat(x_common_all,dim=0) - loudness = F.softplus(self.conv_loudness(x_common)) - amplitudes = F.softmax(self.conv_amplitudes(x_common),dim=1) + if self.compute_db_loudness: + loudness = F.sigmoid(self.conv_loudness(x_common)) #0-1 + loudness = loudness*200-100 #-100 ~ 100 db + loudness = 10**(loudness/10.) #amplitude + else: + loudness = F.softplus(self.conv_loudness(x_common)) + logits = self.conv_amplitudes(x_common) + amplitudes = F.softmax(logits,dim=1) + amplitudes_logsoftmax = F.log_softmax(logits,dim=1) amplitudes_h = F.softmax(self.conv_amplitudes_h(x_common),dim=1) - - # x_fundementals = F.leaky_relu(self.norm_fundementals(self.conv_fundementals(x_common)),0.2) x_fundementals = self.f0_drop(F.leaky_relu(self.norm_fundementals(self.conv_fundementals(x_common)),0.2)) - # f0 = torch.sigmoid(self.conv_f0(x_fundementals)) - # f0 = F.tanh(self.conv_f0(x_fundementals)) * (16/64)*(self.n_mels/64) # 72hz < f0 < 446 hz - # f0 = torch.sigmoid(self.conv_f0(x_fundementals)) * (15/64)*(self.n_mels/64) # 179hz < f0 < 420 hz - # f0 = torch.sigmoid(self.conv_f0(x_fundementals)) * (22/64)*(self.n_mels/64) - (16/64)*(self.n_mels/64)# 72hz < f0 < 253 hz, human voice - # f0 = torch.sigmoid(self.conv_f0(x_fundementals)) * (11/64)*(self.n_mels/64) - (-2/64)*(self.n_mels/64)# 160hz < f0 < 300 hz, female voice - - # f0 in hz: - # f0 = torch.sigmoid(self.conv_f0(x_fundementals)) * 528 + 88 # 88hz < f0 < 616 hz f0_hz = torch.sigmoid(self.conv_f0(x_fundementals)) * 332 + 88 # 88hz < f0 < 420 hz f0 = torch.clamp(mel_scale(self.n_mels,f0_hz)/(self.n_mels*1.0),min=0.0001) x_formants = F.leaky_relu(self.norm_formants(self.conv_formants(x_common)),0.2) formants_freqs = torch.sigmoid(self.conv_formants_freqs(x_formants)) - # formants_freqs = torch.cumsum(formants_freqs,dim=1) - # formants_freqs = formants_freqs + formants_freqs_hz = formants_freqs*(self.formant_freq_limits_abs[:,:self.n_formants]-self.formant_freq_limits_abs_low[:,:self.n_formants])+self.formant_freq_limits_abs_low[:,:self.n_formants] + formants_freqs = torch.clamp(mel_scale(self.n_mels,formants_freqs_hz)/(self.n_mels*1.0),min=0) + formants_bandwidth_hz = 0.65*(0.00625*torch.relu(formants_freqs_hz)+375) + formants_bandwidth = bandwidth_mel(formants_freqs_hz,formants_bandwidth_hz,self.n_mels) + formants_amplitude_logit = self.conv_formants_amplitude(x_formants) + formants_amplitude = F.softmax(formants_amplitude_logit,dim=1) + + formants_freqs_noise = torch.sigmoid(self.conv_formants_freqs_noise(x_formants)) + formants_freqs_hz_noise = formants_freqs_noise*(self.formant_freq_limits_abs_noise[:,:self.n_formants_noise]-self.formant_freq_limits_abs_noise_low[:,:self.n_formants_noise])+self.formant_freq_limits_abs_noise_low[:,:self.n_formants_noise] + formants_freqs_hz_noise = torch.cat([formants_freqs_hz,formants_freqs_hz_noise],dim=1) + formants_freqs_noise = torch.clamp(mel_scale(self.n_mels,formants_freqs_hz_noise)/(self.n_mels*1.0),min=0) + formants_bandwidth_hz_noise = self.conv_formants_bandwidth_noise(x_formants) + formants_bandwidth_hz_noise_1 = F.softplus(formants_bandwidth_hz_noise[:,:1]) * 2344 + 586 #2000-10000 + formants_bandwidth_hz_noise_2 = torch.sigmoid(formants_bandwidth_hz_noise[:,1:]) * 586 #0-2000 + formants_bandwidth_hz_noise = torch.cat([formants_bandwidth_hz_noise_1,formants_bandwidth_hz_noise_2],dim=1) + formants_bandwidth_hz_noise = torch.cat([formants_bandwidth_hz,formants_bandwidth_hz_noise],dim=1) + formants_bandwidth_noise = bandwidth_mel(formants_freqs_hz_noise,formants_bandwidth_hz_noise,self.n_mels) + formants_amplitude_noise_logit = self.conv_formants_amplitude_noise(x_formants) + formants_amplitude_noise_logit = torch.cat([formants_amplitude_logit,formants_amplitude_noise_logit],dim=1) + formants_amplitude_noise = F.softmax(formants_amplitude_noise_logit,dim=1) + + components = { 'f0':f0, + 'f0_hz':f0_hz, + 'loudness':loudness, + 'amplitudes':amplitudes, + 'amplitudes_h':amplitudes_h, + 'freq_formants_hamon':formants_freqs, + 'bandwidth_formants_hamon':formants_bandwidth, + 'freq_formants_hamon_hz':formants_freqs_hz, + 'bandwidth_formants_hamon_hz':formants_bandwidth_hz, + 'amplitude_formants_hamon':formants_amplitude, + 'freq_formants_noise':formants_freqs_noise, + 'bandwidth_formants_noise':formants_bandwidth_noise, + 'freq_formants_noise_hz':formants_freqs_hz_noise, + 'bandwidth_formants_noise_hz':formants_bandwidth_hz_noise, + 'amplitude_formants_noise':formants_amplitude_noise, + } + return components + - # abs freq +@ECOG_ENCODER.register("ECoGMappingBottlenecklstm1") +class ECoGMapping_Bottlenecklstm1(nn.Module): + def __init__(self,n_mels,n_formants,n_formants_noise=1,compute_db_loudness=True): + super(ECoGMapping_Bottlenecklstm1, self).__init__() + self.n_formants = n_formants + self.n_mels = n_mels + self.n_formants_noise = n_formants_noise + self.formant_freq_limits_diff = torch.tensor([950.,2450.,2100.]).reshape([1,3,1]) #freq difference + self.formant_freq_limits_diff_low = torch.tensor([300.,300.,0.]).reshape([1,3,1]) #freq difference + # self.formant_freq_limits_abs = torch.tensor([950.,2800.,3400.,4700.]).reshape([1,4,1]) #freq difference + # self.formant_freq_limits_abs_low = torch.tensor([300.,600.,2800.,3400]).reshape([1,4,1]) #freq difference + # self.formant_freq_limits_abs = torch.tensor([950.,3300.,3600.,4700.]).reshape([1,4,1]) #freq difference + # self.formant_freq_limits_abs_low = torch.tensor([300.,600.,2700.,3400]).reshape([1,4,1]) #freq difference + print ('******************************************************') + print ('*******************LSTM ECOG ENCODER******************') + print ('******************************************************') + self.formant_freq_limits_abs = torch.tensor([950.,3400.,3800.,5000.,6000.,7000.]).reshape([1,6,1]) #freq difference + self.formant_freq_limits_abs_low = torch.tensor([300.,700.,1800.,3400,5000.,6000.]).reshape([1,6,1]) #freq difference + + # self.formant_freq_limits_abs_noise = torch.tensor([7000.]).reshape([1,1,1]) #freq difference + self.formant_freq_limits_abs_noise = torch.tensor([8000.,7000.,7000.]).reshape([1,3,1]) #freq difference + self.formant_freq_limits_abs_noise_low = torch.tensor([4000.,3000.,3000.]).reshape([1,3,1]) #freq difference + + self.formant_bandwitdh_ratio = Parameter(torch.Tensor(1)) + self.formant_bandwitdh_slop = Parameter(torch.Tensor(1)) + with torch.no_grad(): + nn.init.constant_(self.formant_bandwitdh_ratio,0) + nn.init.constant_(self.formant_bandwitdh_slop,0) + self.compute_db_loudness = compute_db_loudness + if compute_db_loudness: + self.conv_loudness = ln.Conv1d(32,1,1,1,0) + else: + self.conv_loudness = ln.Conv1d(32,1,1,1,0,bias_initial=-9.) + self.from_ecog = FromECoG(16,residual=True) + self.conv1 = ECoGMappingBlock(16,32,[5,1,1],residual=True,resample = [2,1,1],pool='MAX') + self.conv2 = ECoGMappingBlock(32,64,[3,1,1],residual=True,resample = [2,1,1],pool='MAX') + self.norm_mask = nn.GroupNorm(32,64) + self.mask = ln.Conv3d(64,1,[3,1,1],1,[1,0,0]) + self.conv3 = ECoGMappingBlock(64,64,[3,3,3],residual=True,resample = [1,2,2],pool='MAX') + self.conv4 = ECoGMappingBlock(64,64,[3,3,3],residual=True,resample = [1,2,2],pool='MAX') + #self.norm = nn.GroupNorm(32,256) + #self.conv5 = ln.Conv1d(256,256,3,1,1) + self.norm2 = nn.GroupNorm(32,64) + self.conv6 = ln.ConvTranspose1d(64, 64, 3, 2, 1, transform_kernel=True) + self.norm3 = nn.GroupNorm(32,64) + self.conv7 = ln.ConvTranspose1d(64, 64, 3, 2, 1, transform_kernel=True) + #self.norm4 = nn.GroupNorm(32,64) + #self.conv8 = ln.ConvTranspose1d(64, 32, 3, 2, 1, transform_kernel=True) + #self.norm5 = nn.GroupNorm(32,32) + #self.conv9 = ln.ConvTranspose1d(32, 32, 3, 2, 1, transform_kernel=True) + self.norm6 = nn.GroupNorm(32,32) + self.lstm = nn.LSTM(64, 16, num_layers=3, bidirectional=True, dropout=0.1, batch_first=True) + + self.conv_fundementals = ln.Conv1d(32,32,3,1,1) + self.norm_fundementals = nn.GroupNorm(32,32) + self.f0_drop = nn.Dropout() + self.conv_f0 = ln.Conv1d(32,1,1,1,0) + self.conv_amplitudes = ln.Conv1d(32,2,1,1,0) + self.conv_amplitudes_h = ln.Conv1d(32,2,1,1,0) + self.conv_loudness = ln.Conv1d(32,1,1,1,0,bias_initial=-9.) + + self.conv_formants = ln.Conv1d(32,32,3,1,1) + self.norm_formants = nn.GroupNorm(32,32) + self.conv_formants_freqs = ln.Conv1d(32,n_formants,1,1,0) + self.conv_formants_bandwidth = ln.Conv1d(32,n_formants,1,1,0) + self.conv_formants_amplitude = ln.Conv1d(32,n_formants,1,1,0) + + self.conv_formants_freqs_noise = ln.Conv1d(32,n_formants_noise,1,1,0) + self.conv_formants_bandwidth_noise = ln.Conv1d(32,n_formants_noise,1,1,0) + self.conv_formants_amplitude_noise = ln.Conv1d(32,n_formants_noise,1,1,0) + + def forward(self,ecog,mask_prior,mni): + x_common_all = [] + for d in range(len(ecog)): + x = ecog[d] + x = x.reshape([-1,1,x.shape[1],15,15]) + mask_prior_d = mask_prior[d].reshape(-1,1,1,15,15) + #print ('x,mask_prior_d,mni :', x.shape,mask_prior_d.shape, mni.shape) + x = self.from_ecog(x) + #print ('from ecog: ',x.shape) + x = self.conv1(x) + #print ('conv1 x: ',x.shape) + x = self.conv2(x) + #print ('conv2 x: ',x.shape) + mask = torch.sigmoid(self.mask(F.leaky_relu(self.norm_mask(x),0.2))) + mask = mask[:,:,4:] + if mask_prior is not None: + mask = mask*mask_prior_d + #print ('mask: ',mask.shape) + x = x[:,:,4:] + x = x*mask + #print ('attention x: ', x.shape) + x = self.conv3(x) + #print ('conv3 x: ',x.shape) + x = self.conv4(x) + #print ('conv4 x: ',x.shape) + x = x.max(-1)[0].max(-1)[0] + #print ('max x: ',x.shape) + #x = self.conv5(F.leaky_relu(self.norm(x),0.2)) + #print ('conv5 x: ',x.shape) + x = self.conv6(F.leaky_relu(self.norm2(x),0.2)) + #print ('conv6 x: ',x.shape) + x = self.conv7(F.leaky_relu(self.norm3(x),0.2)) + #print ('conv7 x: ',x.shape) + x = x .permute(0,2,1) + #print ('reshape x:',x.shape) + #x = self.conv8(F.leaky_relu(self.norm4(x),0.2)) + #print ('conv8 x: ',x.shape) + #x = self.conv9(F.leaky_relu(self.norm5(x),0.2)) + #print ('conv9 x: ',x.shape) + x = self.lstm(x)[0] + #print ('lstm x:',x.shape) + x = x .permute(0,2,1) + x_common = F.leaky_relu(self.norm6(x),0.2) + #print ('common x: ',x_common.shape) + x_common_all += [x_common] + + x_common = torch.cat(x_common_all,dim=0) + if self.compute_db_loudness: + loudness = F.sigmoid(self.conv_loudness(x_common)) #0-1 + loudness = loudness*200-100 #-100 ~ 100 db + loudness = 10**(loudness/10.) #amplitude + else: + loudness = F.softplus(self.conv_loudness(x_common)) + logits = self.conv_amplitudes(x_common) + amplitudes = F.softmax(logits,dim=1) + amplitudes_logsoftmax = F.log_softmax(logits,dim=1) + amplitudes_h = F.softmax(self.conv_amplitudes_h(x_common),dim=1) + x_fundementals = self.f0_drop(F.leaky_relu(self.norm_fundementals(self.conv_fundementals(x_common)),0.2)) + f0_hz = torch.sigmoid(self.conv_f0(x_fundementals)) * 332 + 88 # 88hz < f0 < 420 hz + f0 = torch.clamp(mel_scale(self.n_mels,f0_hz)/(self.n_mels*1.0),min=0.0001) + + x_formants = F.leaky_relu(self.norm_formants(self.conv_formants(x_common)),0.2) + formants_freqs = torch.sigmoid(self.conv_formants_freqs(x_formants)) formants_freqs_hz = formants_freqs*(self.formant_freq_limits_abs[:,:self.n_formants]-self.formant_freq_limits_abs_low[:,:self.n_formants])+self.formant_freq_limits_abs_low[:,:self.n_formants] formants_freqs = torch.clamp(mel_scale(self.n_mels,formants_freqs_hz)/(self.n_mels*1.0),min=0) + formants_bandwidth_hz = 0.65*(0.00625*torch.relu(formants_freqs_hz)+375) + formants_bandwidth = bandwidth_mel(formants_freqs_hz,formants_bandwidth_hz,self.n_mels) + formants_amplitude_logit = self.conv_formants_amplitude(x_formants) + formants_amplitude = F.softmax(formants_amplitude_logit,dim=1) + + formants_freqs_noise = torch.sigmoid(self.conv_formants_freqs_noise(x_formants)) + formants_freqs_hz_noise = formants_freqs_noise*(self.formant_freq_limits_abs_noise[:,:self.n_formants_noise]-self.formant_freq_limits_abs_noise_low[:,:self.n_formants_noise])+self.formant_freq_limits_abs_noise_low[:,:self.n_formants_noise] + formants_freqs_hz_noise = torch.cat([formants_freqs_hz,formants_freqs_hz_noise],dim=1) + formants_freqs_noise = torch.clamp(mel_scale(self.n_mels,formants_freqs_hz_noise)/(self.n_mels*1.0),min=0) + formants_bandwidth_hz_noise = self.conv_formants_bandwidth_noise(x_formants) + formants_bandwidth_hz_noise_1 = F.softplus(formants_bandwidth_hz_noise[:,:1]) * 2344 + 586 #2000-10000 + formants_bandwidth_hz_noise_2 = torch.sigmoid(formants_bandwidth_hz_noise[:,1:]) * 586 #0-2000 + formants_bandwidth_hz_noise = torch.cat([formants_bandwidth_hz_noise_1,formants_bandwidth_hz_noise_2],dim=1) + formants_bandwidth_hz_noise = torch.cat([formants_bandwidth_hz,formants_bandwidth_hz_noise],dim=1) + formants_bandwidth_noise = bandwidth_mel(formants_freqs_hz_noise,formants_bandwidth_hz_noise,self.n_mels) + formants_amplitude_noise_logit = self.conv_formants_amplitude_noise(x_formants) + formants_amplitude_noise_logit = torch.cat([formants_amplitude_logit,formants_amplitude_noise_logit],dim=1) + formants_amplitude_noise = F.softmax(formants_amplitude_noise_logit,dim=1) - # formants_freqs = formants_freqs + f0 - # formants_bandwidth = torch.sigmoid(self.conv_formants_bandwidth(x_formants)) - # formants_bandwidth_hz = (3*torch.sigmoid(self.formant_bandwitdh_ratio))*(0.075*torch.relu(formants_freqs_hz-1000)+100) + components = { 'f0':f0, + 'f0_hz':f0_hz, + 'loudness':loudness, + 'amplitudes':amplitudes, + 'amplitudes_h':amplitudes_h, + 'freq_formants_hamon':formants_freqs, + 'bandwidth_formants_hamon':formants_bandwidth, + 'freq_formants_hamon_hz':formants_freqs_hz, + 'bandwidth_formants_hamon_hz':formants_bandwidth_hz, + 'amplitude_formants_hamon':formants_amplitude, + 'freq_formants_noise':formants_freqs_noise, + 'bandwidth_formants_noise':formants_bandwidth_noise, + 'freq_formants_noise_hz':formants_freqs_hz_noise, + 'bandwidth_formants_noise_hz':formants_bandwidth_hz_noise, + 'amplitude_formants_noise':formants_amplitude_noise, + } + return components + +@ECOG_ENCODER.register("ECoGMappingBottlenecklstm2") +class ECoGMapping_Bottlenecklstm2(nn.Module): + def __init__(self,n_mels,n_formants,n_formants_noise=1,compute_db_loudness=True): + super(ECoGMapping_Bottlenecklstm2, self).__init__() + self.n_formants = n_formants + self.n_mels = n_mels + self.n_formants_noise = n_formants_noise + self.formant_freq_limits_diff = torch.tensor([950.,2450.,2100.]).reshape([1,3,1]) #freq difference + self.formant_freq_limits_diff_low = torch.tensor([300.,300.,0.]).reshape([1,3,1]) #freq difference + # self.formant_freq_limits_abs = torch.tensor([950.,2800.,3400.,4700.]).reshape([1,4,1]) #freq difference + # self.formant_freq_limits_abs_low = torch.tensor([300.,600.,2800.,3400]).reshape([1,4,1]) #freq difference + # self.formant_freq_limits_abs = torch.tensor([950.,3300.,3600.,4700.]).reshape([1,4,1]) #freq difference + # self.formant_freq_limits_abs_low = torch.tensor([300.,600.,2700.,3400]).reshape([1,4,1]) #freq difference + self.formant_freq_limits_abs = torch.tensor([950.,3400.,3800.,5000.,6000.,7000.]).reshape([1,6,1]) #freq difference + self.formant_freq_limits_abs_low = torch.tensor([300.,700.,1800.,3400,5000.,6000.]).reshape([1,6,1]) #freq difference + print ('******************************************************') + print ('*******************LSTM ECOG ENCODER******************') + print ('******************************************************') + # self.formant_freq_limits_abs_noise = torch.tensor([7000.]).reshape([1,1,1]) #freq difference + self.formant_freq_limits_abs_noise = torch.tensor([8000.,7000.,7000.]).reshape([1,3,1]) #freq difference + self.formant_freq_limits_abs_noise_low = torch.tensor([4000.,3000.,3000.]).reshape([1,3,1]) #freq difference + + self.formant_bandwitdh_ratio = Parameter(torch.Tensor(1)) + self.formant_bandwitdh_slop = Parameter(torch.Tensor(1)) + with torch.no_grad(): + nn.init.constant_(self.formant_bandwitdh_ratio,0) + nn.init.constant_(self.formant_bandwitdh_slop,0) + self.compute_db_loudness = compute_db_loudness + if compute_db_loudness: + self.conv_loudness = ln.Conv1d(32,1,1,1,0) + else: + self.conv_loudness = ln.Conv1d(32,1,1,1,0,bias_initial=-9.) + self.from_ecog = FromECoG(16,residual=True) + self.conv1 = ECoGMappingBlock(16,32,[5,1,1],residual=True,resample = [2,1,1],pool='MAX') + #self.conv2 = ECoGMappingBlock(32,64,[3,1,1],residual=True,resample = [2,1,1],pool='MAX') + #self.norm_mask = nn.GroupNorm(32,64) + #self.mask = ln.Conv3d(64,1,[3,1,1],1,[1,0,0]) + self.conv3 = ECoGMappingBlock(32,32,[3,3,3],residual=True,resample = [1,2,2],pool='MAX') + self.conv4 = ECoGMappingBlock(32,32,[3,3,3],residual=True,resample = [1,2,2],pool='MAX') + #self.norm = nn.GroupNorm(32,256) + #self.conv5 = ln.Conv1d(256,256,3,1,1) + self.norm3 = nn.GroupNorm(32,32) + self.conv7 = ln.ConvTranspose1d(32, 32, 3, 2, 1, transform_kernel=True) + self.norm6 = nn.GroupNorm(32,32) + self.lstm = nn.LSTM(32, 16, num_layers=4, bidirectional=True, dropout=0.1, batch_first=True) + + self.conv_fundementals = ln.Conv1d(32,32,3,1,1) + self.norm_fundementals = nn.GroupNorm(32,32) + self.f0_drop = nn.Dropout() + self.conv_f0 = ln.Conv1d(32,1,1,1,0) + self.conv_amplitudes = ln.Conv1d(32,2,1,1,0) + self.conv_amplitudes_h = ln.Conv1d(32,2,1,1,0) + self.conv_loudness = ln.Conv1d(32,1,1,1,0,bias_initial=-9.) + + self.conv_formants = ln.Conv1d(32,32,3,1,1) + self.norm_formants = nn.GroupNorm(32,32) + self.conv_formants_freqs = ln.Conv1d(32,n_formants,1,1,0) + self.conv_formants_bandwidth = ln.Conv1d(32,n_formants,1,1,0) + self.conv_formants_amplitude = ln.Conv1d(32,n_formants,1,1,0) + + self.conv_formants_freqs_noise = ln.Conv1d(32,n_formants_noise,1,1,0) + self.conv_formants_bandwidth_noise = ln.Conv1d(32,n_formants_noise,1,1,0) + self.conv_formants_amplitude_noise = ln.Conv1d(32,n_formants_noise,1,1,0) + + def forward(self,ecog,mask_prior,mni): + x_common_all = [] + for d in range(len(ecog)): + x = ecog[d] + x = x.reshape([-1,1,x.shape[1],15,15]) + mask_prior_d = mask_prior[d].reshape(-1,1,1,15,15) + #print ('x,mask_prior_d,mni :', x.shape,mask_prior_d.shape, mni.shape) + x = self.from_ecog(x) + #print ('from ecog: ',x.shape) + x = self.conv1(x) + #print ('conv1 x: ',x.shape) + x = x[:,:,4:-4] + #print ('x truncate: ',x.shape) + x = self.conv3(x) + #print ('conv3 x: ',x.shape) + x = self.conv4(x) + #print ('conv4 x: ',x.shape) + x = x.max(-1)[0].max(-1)[0] + #print ('max x: ',x.shape) + x = self.conv7(F.leaky_relu(self.norm3(x),0.2)) + #print ('conv7 x: ',x.shape) + x = x .permute(0,2,1) + #print ('reshape x:',x.shape) + x = self.lstm(x)[0] + #print ('lstm x:',x.shape) + x = x .permute(0,2,1) + x_common = F.leaky_relu(self.norm6(x),0.2) + #print ('common x: ',x_common.shape) + x_common_all += [x_common] + + x_common = torch.cat(x_common_all,dim=0) + if self.compute_db_loudness: + loudness = F.sigmoid(self.conv_loudness(x_common)) #0-1 + loudness = loudness*200-100 #-100 ~ 100 db + loudness = 10**(loudness/10.) #amplitude + else: + loudness = F.softplus(self.conv_loudness(x_common)) + logits = self.conv_amplitudes(x_common) + amplitudes = F.softmax(logits,dim=1) + amplitudes_logsoftmax = F.log_softmax(logits,dim=1) + amplitudes_h = F.softmax(self.conv_amplitudes_h(x_common),dim=1) + x_fundementals = self.f0_drop(F.leaky_relu(self.norm_fundementals(self.conv_fundementals(x_common)),0.2)) + f0_hz = torch.sigmoid(self.conv_f0(x_fundementals)) * 332 + 88 # 88hz < f0 < 420 hz + f0 = torch.clamp(mel_scale(self.n_mels,f0_hz)/(self.n_mels*1.0),min=0.0001) + + x_formants = F.leaky_relu(self.norm_formants(self.conv_formants(x_common)),0.2) + formants_freqs = torch.sigmoid(self.conv_formants_freqs(x_formants)) + formants_freqs_hz = formants_freqs*(self.formant_freq_limits_abs[:,:self.n_formants]-self.formant_freq_limits_abs_low[:,:self.n_formants])+self.formant_freq_limits_abs_low[:,:self.n_formants] + formants_freqs = torch.clamp(mel_scale(self.n_mels,formants_freqs_hz)/(self.n_mels*1.0),min=0) formants_bandwidth_hz = 0.65*(0.00625*torch.relu(formants_freqs_hz)+375) - # formants_bandwidth_hz = (torch.sigmoid(self.conv_formants_bandwidth(x_formants))) * (3*torch.sigmoid(self.formant_bandwitdh_ratio))*(0.075*torch.relu(formants_freqs_hz-1000)+100) formants_bandwidth = bandwidth_mel(formants_freqs_hz,formants_bandwidth_hz,self.n_mels) formants_amplitude_logit = self.conv_formants_amplitude(x_formants) formants_amplitude = F.softmax(formants_amplitude_logit,dim=1) formants_freqs_noise = torch.sigmoid(self.conv_formants_freqs_noise(x_formants)) formants_freqs_hz_noise = formants_freqs_noise*(self.formant_freq_limits_abs_noise[:,:self.n_formants_noise]-self.formant_freq_limits_abs_noise_low[:,:self.n_formants_noise])+self.formant_freq_limits_abs_noise_low[:,:self.n_formants_noise] - # formants_freqs_hz_noise = formants_freqs_noise*(self.formant_freq_limits_abs_noise[:,:1]-self.formant_freq_limits_abs_noise_low[:,:1])+self.formant_freq_limits_abs_noise_low[:,:1] formants_freqs_hz_noise = torch.cat([formants_freqs_hz,formants_freqs_hz_noise],dim=1) formants_freqs_noise = torch.clamp(mel_scale(self.n_mels,formants_freqs_hz_noise)/(self.n_mels*1.0),min=0) - # formants_bandwidth_hz_noise = F.relu(self.conv_formants_bandwidth_noise(x_formants)) * 8000 + 2000 - # formants_bandwidth_noise = bandwidth_mel(formants_freqs_hz_noise,formants_bandwidth_hz_noise,self.n_mels) - # formants_amplitude_noise = F.softmax(self.conv_formants_amplitude_noise(x_formants),dim=1) formants_bandwidth_hz_noise = self.conv_formants_bandwidth_noise(x_formants) formants_bandwidth_hz_noise_1 = F.softplus(formants_bandwidth_hz_noise[:,:1]) * 2344 + 586 #2000-10000 formants_bandwidth_hz_noise_2 = torch.sigmoid(formants_bandwidth_hz_noise[:,1:]) * 586 #0-2000 @@ -1039,6 +1352,147 @@ def forward(self,ecog,mask_prior,mni): return components +def RNN_layer(in_ch,out_ch,rnn_type = 'LSTM',rnn_layers = 4,bidirection = True): + dropoutratio = 0 if rnn_layers ==1 else 0.1 + if rnn_type =='LSTM': + return nn.LSTM(in_ch,out_ch, num_layers=rnn_layers, bidirectional=bidirection, batch_first=True,dropout=dropoutratio) + elif rnn_type =='GRU': + return nn.GRU(in_ch,out_ch, num_layers=rnn_layers, bidirectional=bidirection, batch_first=True,dropout=dropoutratio) + +@ECOG_ENCODER.register("ECoGMappingBottlenecklstm_pure") +class ECoGMapping_Bottlenecklstm_new(nn.Module): + def __init__(self,n_mels,n_formants,n_formants_noise=1,onedconfirst=True,rnn_type = 'LSTM',\ + rnn_layers = 4,compute_db_loudness=True,bidirection = True): + super(ECoGMapping_Bottlenecklstm_new, self).__init__() + self.n_formants = n_formants + self.n_mels = n_mels + self.n_formants_noise = n_formants_noise + self.formant_freq_limits_diff = torch.tensor([950.,2450.,2100.]).reshape([1,3,1]) #freq difference + self.formant_freq_limits_diff_low = torch.tensor([300.,300.,0.]).reshape([1,3,1]) #freq difference + # self.formant_freq_limits_abs = torch.tensor([950.,2800.,3400.,4700.]).reshape([1,4,1]) #freq difference + # self.formant_freq_limits_abs_low = torch.tensor([300.,600.,2800.,3400]).reshape([1,4,1]) #freq difference + # self.formant_freq_limits_abs = torch.tensor([950.,3300.,3600.,4700.]).reshape([1,4,1]) #freq difference + # self.formant_freq_limits_abs_low = torch.tensor([300.,600.,2700.,3400]).reshape([1,4,1]) #freq difference + self.formant_freq_limits_abs = torch.tensor([950.,3400.,3800.,5000.,6000.,7000.]).reshape([1,6,1]) #freq difference + self.formant_freq_limits_abs_low = torch.tensor([300.,700.,1800.,3400,5000.,6000.]).reshape([1,6,1]) #freq difference + print ('******************************************************') + print ('*******************PURE!! LSTM ECOG ENCODER******************') + print ('******************************************************') + # self.formant_freq_limits_abs_noise = torch.tensor([7000.]).reshape([1,1,1]) #freq difference + self.formant_freq_limits_abs_noise = torch.tensor([8000.,7000.,7000.]).reshape([1,3,1]) #freq difference + self.formant_freq_limits_abs_noise_low = torch.tensor([4000.,3000.,3000.]).reshape([1,3,1]) #freq difference + self.onedconfirst = onedconfirst + if onedconfirst: + self.prelayer = ln.Conv2d(1,1,(9,1),1,(4,0))#ln.Conv1d(144,144,3,2,1) + else: + self.prelayer = RNN_layer(80, 40,rnn_type = rnn_type, rnn_layers = 1,bidirection = True) + + self.formant_bandwitdh_ratio = Parameter(torch.Tensor(1)) + self.formant_bandwitdh_slop = Parameter(torch.Tensor(1)) + with torch.no_grad(): + nn.init.constant_(self.formant_bandwitdh_ratio,0) + nn.init.constant_(self.formant_bandwitdh_slop,0) + self.compute_db_loudness = compute_db_loudness + + + self.lstm = RNN_layer(80, 32//(bidirection+1), rnn_layers=rnn_layers, bidirection=bidirection) + + if compute_db_loudness: + self.conv_loudness = RNN_layer(32, 1, rnn_layers=1, bidirection=False) + else: + self.conv_loudness = RNN_layer(32, 1, rnn_layers=1, bidirection=False) + self.conv_fundementals = RNN_layer(32, 32//(bidirection+1), rnn_layers=1, bidirection=bidirection) + #self.norm_fundementals = nn.GroupNorm(32,32) + self.f0_drop = nn.Dropout() + self.conv_f0 = RNN_layer(32, 1, rnn_layers=1, bidirection=False) + self.conv_amplitudes = RNN_layer(32, 2//(bidirection+1), rnn_layers=1, bidirection=bidirection) + self.conv_amplitudes_h = RNN_layer(32, 2//(bidirection+1), rnn_layers=1, bidirection=bidirection) + self.conv_formants = RNN_layer(32, 32//(bidirection+1), rnn_layers=1, bidirection=bidirection) + #self.norm_formants = nn.GroupNorm(32,32) + self.conv_formants_freqs = RNN_layer(32, n_formants//(bidirection+1), rnn_layers=1, bidirection=bidirection) + self.conv_formants_bandwidth = RNN_layer(32, n_formants//(bidirection+1), rnn_layers=1, bidirection=bidirection) + self.conv_formants_amplitude = RNN_layer(32, n_formants//(bidirection+1), rnn_layers=1, bidirection=bidirection) + #self.conv_formants_freqs_noise = RNN_layer(32, n_formants_noise//(bidirection+1), rnn_layers=1, bidirection=bidirection) + #self.conv_formants_bandwidth_noise = RNN_layer(32, n_formants_noise//(bidirection+1), rnn_layers=1, bidirection=bidirection) + #self.conv_formants_amplitude_noise = RNN_layer(32, n_formants_noise//(bidirection+1), rnn_layers=1, bidirection=bidirection) + self.conv_formants_freqs_noise = RNN_layer(32, n_formants_noise, rnn_layers=1, bidirection=False) + self.conv_formants_bandwidth_noise = RNN_layer(32, n_formants_noise , rnn_layers=1, bidirection=False) + self.conv_formants_amplitude_noise = RNN_layer(32, n_formants_noise , rnn_layers=1, bidirection=False) + + + def forward(self,ecog,mask_prior,mni): + x_common_all = [] + for d in range(len(ecog)): + x = ecog[d] + print ('x: ',x.shape) + x = torch.squeeze(self.prelayer(torch.unsqueeze(x,dim=1)),dim=1) if self.onedconfirst else self.prelayer(x)[0] + print ('x prelayer, convfirst ',self.onedconfirst,x.shape) + x = x[:,8:-8] + x = self.lstm(x)[0] + print ('lstm x:',x.shape) + x_common = torch.tanh(x).permute(0,2,1) + print ('common x: ',x_common.shape) + x_common_all += [x_common] + + x_common = torch.cat(x_common_all,dim=0).permute(0,2,1) + print ('common x: ',x_common.shape) + if self.compute_db_loudness: + loudness = torch.sigmoid(self.conv_loudness(x_common)[0].permute(0,2,1)) #0-1 + loudness = loudness*200-100 #-100 ~ 100 db + loudness = 10**(loudness/10.) #amplitude + else: + loudness = F.softplus(self.conv_loudness(x_common)[0].permute(0,2,1)) + logits = self.conv_amplitudes(x_common)[0].permute(0,2,1) + amplitudes = F.softmax(logits,dim=1) + amplitudes_logsoftmax = F.log_softmax(logits,dim=1) + amplitudes_h = F.softmax(self.conv_amplitudes_h(x_common)[0].permute(0,2,1),dim=1) + x_fundementals = self.f0_drop(F.leaky_relu( self.conv_fundementals(x_common)[0],0.2)) + print ('x_fundementals: ',x_fundementals.shape) + f0_hz = torch.sigmoid(self.conv_f0(x_fundementals)[0].permute(0,2,1)) * 332 + 88 # 88hz < f0 < 420 hz + f0 = torch.clamp(mel_scale(self.n_mels,f0_hz)/(self.n_mels*1.0),min=0.0001) + + x_formants = F.leaky_relu( self.conv_formants(x_common)[0],0.2) + formants_freqs = torch.sigmoid(self.conv_formants_freqs(x_formants)[0].permute(0,2,1)) + formants_freqs_hz = formants_freqs*(self.formant_freq_limits_abs[:,:self.n_formants]-self.formant_freq_limits_abs_low[:,:self.n_formants])+self.formant_freq_limits_abs_low[:,:self.n_formants] + formants_freqs = torch.clamp(mel_scale(self.n_mels,formants_freqs_hz)/(self.n_mels*1.0),min=0) + formants_bandwidth_hz = 0.65*(0.00625*torch.relu(formants_freqs_hz)+375) + formants_bandwidth = bandwidth_mel(formants_freqs_hz,formants_bandwidth_hz,self.n_mels) + formants_amplitude_logit = self.conv_formants_amplitude(x_formants)[0].permute(0,2,1) + formants_amplitude = F.softmax(formants_amplitude_logit,dim=1) + + formants_freqs_noise = torch.sigmoid(self.conv_formants_freqs_noise(x_formants)[0].permute(0,2,1)) + formants_freqs_hz_noise = formants_freqs_noise*(self.formant_freq_limits_abs_noise[:,:self.n_formants_noise]-self.formant_freq_limits_abs_noise_low[:,:self.n_formants_noise])+self.formant_freq_limits_abs_noise_low[:,:self.n_formants_noise] + formants_freqs_hz_noise = torch.cat([formants_freqs_hz,formants_freqs_hz_noise],dim=1) + formants_freqs_noise = torch.clamp(mel_scale(self.n_mels,formants_freqs_hz_noise)/(self.n_mels*1.0),min=0) + formants_bandwidth_hz_noise = self.conv_formants_bandwidth_noise(x_formants)[0].permute(0,2,1) + formants_bandwidth_hz_noise_1 = F.softplus(formants_bandwidth_hz_noise[:,:1]) * 2344 + 586 #2000-10000 + formants_bandwidth_hz_noise_2 = torch.sigmoid(formants_bandwidth_hz_noise[:,1:]) * 586 #0-2000 + formants_bandwidth_hz_noise = torch.cat([formants_bandwidth_hz_noise_1,formants_bandwidth_hz_noise_2],dim=1) + formants_bandwidth_hz_noise = torch.cat([formants_bandwidth_hz,formants_bandwidth_hz_noise],dim=1) + formants_bandwidth_noise = bandwidth_mel(formants_freqs_hz_noise,formants_bandwidth_hz_noise,self.n_mels) + formants_amplitude_noise_logit = self.conv_formants_amplitude_noise(x_formants)[0].permute(0,2,1) + formants_amplitude_noise_logit = torch.cat([formants_amplitude_logit,formants_amplitude_noise_logit],dim=1) + formants_amplitude_noise = F.softmax(formants_amplitude_noise_logit,dim=1) + + components = { 'f0':f0, + 'f0_hz':f0_hz, + 'loudness':loudness, + 'amplitudes':amplitudes, + 'amplitudes_h':amplitudes_h, + 'freq_formants_hamon':formants_freqs, + 'bandwidth_formants_hamon':formants_bandwidth, + 'freq_formants_hamon_hz':formants_freqs_hz, + 'bandwidth_formants_hamon_hz':formants_bandwidth_hz, + 'amplitude_formants_hamon':formants_amplitude, + 'freq_formants_noise':formants_freqs_noise, + 'bandwidth_formants_noise':formants_bandwidth_noise, + 'freq_formants_noise_hz':formants_freqs_hz_noise, + 'bandwidth_formants_noise_hz':formants_bandwidth_hz_noise, + 'amplitude_formants_noise':formants_amplitude_noise, + } + return components + + class BackBone(nn.Module): def __init__(self,attentional_mask=True): super(BackBone, self).__init__() diff --git a/train_formant.py b/train_formant_a.py similarity index 75% rename from train_formant.py rename to train_formant_a.py index 72ed286b..63736fc2 100644 --- a/train_formant.py +++ b/train_formant_a.py @@ -38,8 +38,20 @@ from ECoGDataSet import concate_batch from formant_systh import save_sample +from tensorboardX import SummaryWriter + + +import argparse + +parser = argparse.ArgumentParser(description='Process some integers.') + +parser.add_argument('-m', '--modeldir', type=str,default=' ', + help='') +argus = parser.parse_args() + def train(cfg, logger, local_rank, world_size, distributed): + writer = SummaryWriter(cfg.OUTPUT_DIR) torch.cuda.set_device(local_rank) model = Model( generator=cfg.MODEL.GENERATOR, @@ -191,11 +203,6 @@ def train(cfg, logger, local_rank, world_size, distributed): save=local_rank == 0) extra_checkpoint_data = checkpointer.load(ignore_last_checkpoint=True,ignore_auxiliary=cfg.FINETUNE.FINETUNE,file_name='./training_artifacts/ecog_finetune_3ecogformants_han5_specsup_guidance_hamonicformantsemph/model_epoch23.pth') - # extra_checkpoint_data = checkpointer.load(ignore_last_checkpoint=False,ignore_auxiliary=cfg.FINETUNE.FINETUNE,file_name='./training_artifacts/debug_f1f2linearmel/model_epoch27.pth') - # extra_checkpoint_data = checkpointer.load(ignore_last_checkpoint=False,ignore_auxiliary=cfg.FINETUNE.FINETUNE,file_name='./training_artifacts/debug_fitf1f2freqonly/model_epoch28.pth') - # extra_checkpoint_data = checkpointer.load(ignore_last_checkpoint=False,ignore_auxiliary=cfg.FINETUNE.FINETUNE,file_name='./training_artifacts/debug_fitf1f2freqonly/model_epoch6.pth') - # extra_checkpoint_data = checkpointer.load(ignore_last_checkpoint=True,ignore_auxiliary=cfg.FINETUNE.FINETUNE,file_name='./training_artifacts/loudnesscomp_han5/model_epoch50.pth') - # extra_checkpoint_data = checkpointer.load(ignore_last_checkpoint=False,ignore_auxiliary=cfg.FINETUNE.FINETUNE,file_name='./training_artifacts/test_9/model_epoch30.pth') logger.info("Starting from epoch: %d" % (scheduler.start_epoch())) arguments.update(extra_checkpoint_data) @@ -262,6 +269,7 @@ def train(cfg, logger, local_rank, world_size, distributed): # model.eval() # Lrec = model(sample_spec_test, x_denoise = sample_spec_denoise_test,x_mel = sample_spec_mel_test,ecog=ecog_test if cfg.MODEL.ECOG else None, mask_prior=mask_prior_test if cfg.MODEL.ECOG else None, on_stage = on_stage_test,on_stage_wider = on_stage_wider_test, ae = not cfg.MODEL.ECOG, tracker = tracker_test, encoder_guide=cfg.MODEL.W_SUP,pitch_aug=False,duomask=duomask,mni=mni_coordinate_test,debug = False,x_amp=sample_spec_amp_test,hamonic_bias = False) # save_sample(sample_spec_test,ecog_test,mask_prior_test,mni_coordinate_test,encoder,decoder,ecog_encoder if cfg.MODEL.ECOG else None,x_denoise=sample_spec_denoise_test,x_mel = sample_spec_mel_test,decoder_mel=decoder_mel if cfg.MODEL.DO_MEL_GUIDE else None,epoch=0,label=sample_label_test,mode='test',path=cfg.OUTPUT_DIR,tracker = tracker_test,linear=cfg.MODEL.WAVE_BASED,n_fft=cfg.MODEL.N_FFT,duomask=True) + n_iter = 0 for epoch in range(cfg.TRAIN.TRAIN_EPOCHS): model.train() @@ -271,6 +279,7 @@ def train(cfg, logger, local_rank, world_size, distributed): epoch_start_time = time.time() i = 0 for sample_dict_train in tqdm(iter(dataset.iterator)): + n_iter +=1 # import pdb; pdb.set_trace() # sample_dict_train = concate_batch(sample_dict_train) i += 1 @@ -310,12 +319,32 @@ def train(cfg, logger, local_rank, world_size, distributed): if (cfg.MODEL.ECOG): optimizer.zero_grad() - Lrec = model(x, x_denoise = x_orig_denoise,x_mel = x_mel,ecog=ecog, mask_prior=mask_prior, on_stage = on_stage,on_stage_wider = on_stage_wider, ae = False, tracker = tracker, encoder_guide=cfg.MODEL.W_SUP,duomask=duomask,mni=mni_coordinate,x_amp=x_orig_amp) + Lrec,tracker = model(x, x_denoise = x_orig_denoise,x_mel = x_mel,ecog=ecog, mask_prior=mask_prior, on_stage = on_stage,on_stage_wider = on_stage_wider, ae = False, tracker = tracker, encoder_guide=cfg.MODEL.W_SUP,duomask=duomask,mni=mni_coordinate,x_amp=x_orig_amp) + #print ('tracker',tracker,tracker.tracks) + #for key in ['Lae_a','Lae_a_l2','Lae_db','Lae_db_l2','Lloudness','Lae_denoise','Lamp','Lae','Lexp','Lf0','Ldiff']: + # print ('tracker, ',key, tracker.tracks[key].mean(dim=0)) + ecog_key_list = ['Lae_a','Lae_a_l2','Lae_db','Lae_db_l2','Lloudness','Lae_denoise','Lamp','Lae','Lexp','Lf0','Ldiff'] + if n_iter %10==0: + #print ('write tensorboard') + writer.add_scalars('data/loss_group', {key: tracker.tracks[key].mean(dim=0) for key in ecog_key_list}, n_iter) + for key in ecog_key_list: + writer.add_scalar('data/'+key, tracker.tracks[key].mean(dim=0), n_iter) + #pass #log in tensorboard later!! + #tracker Lae_a: 0.0536877, Lae_a_l2: 0.0538647, Lae_db: 0.1655398, Lae_db_l2: 0.1655714, Lloudness: 1.0384552, Lae_denoise: 0.0485827, Lamp: 0.0000148, Lae: 2.1787138, Lexp: -1.9956266, Lf0: 0.0000000, Ldiff: 0.0568467 (Lrec).backward() optimizer.step() else: optimizer.zero_grad() - Lrec = model(x, x_denoise = x_orig_denoise,x_mel = x_mel,ecog=None, mask_prior=None, on_stage = on_stage,on_stage_wider = on_stage_wider, ae = True, tracker = tracker, encoder_guide=cfg.MODEL.W_SUP,pitch_aug=False,duomask=duomask,mni=mni_coordinate,debug = False,x_amp=x_orig_amp,hamonic_bias = False)#epoch<2) + Lrec,tracker = model(x, x_denoise = x_orig_denoise,x_mel = x_mel,ecog=None, mask_prior=None, on_stage = on_stage,on_stage_wider = on_stage_wider, ae = True, tracker = tracker, encoder_guide=cfg.MODEL.W_SUP,pitch_aug=False,duomask=duomask,mni=mni_coordinate,debug = False,x_amp=x_orig_amp,hamonic_bias = False)#epoch<2) + #print ('tracker',tracker,tracker.tracks) + #for key in ['Lae_a','Lae_a_l2','Lae_db','Lae_db_l2','Lloudness','Lae_denoise','Lamp','Lae','Lexp','Lf0','Ldiff']: + #print ('tracker, ',key, tracker.tracks[key].mean(dim=0)) + ecog_key_list = ['Lae_a','Lae_a_l2','Lae_db','Lae_db_l2','Lloudness','Lae_denoise','Lamp','Lae','Lexp','Lf0','Ldiff'] + if n_iter %10==0: + #print ('write tensorboard') + writer.add_scalars('data/loss_group', {key: tracker.tracks[key].mean(dim=0) for key in ecog_key_list}, n_iter) + for key in ecog_key_list: + writer.add_scalar('data/'+key, tracker.tracks[key].mean(dim=0), n_iter) (Lrec).backward() optimizer.step() @@ -324,18 +353,44 @@ def train(cfg, logger, local_rank, world_size, distributed): epoch_end_time = time.time() per_epoch_ptime = epoch_end_time - epoch_start_time - + #print ('test save sample') + #save_sample(sample_spec_test,ecog_test,mask_prior_test,mni_coordinate_test,encoder,decoder,ecog_encoder if cfg.MODEL.ECOG else None,x_denoise=sample_spec_denoise_test,x_mel = sample_spec_mel_test,decoder_mel=decoder_mel if cfg.MODEL.DO_MEL_GUIDE else None,epoch=epoch,label=sample_label_test,mode='test',path=cfg.OUTPUT_DIR,tracker = tracker_test,linear=cfg.MODEL.WAVE_BASED,n_fft=cfg.MODEL.N_FFT,duomask=duomask,x_amp=sample_spec_amp_test) + #save_sample(x,ecog,mask_prior,mni_coordinate,encoder,decoder,ecog_encoder if cfg.MODEL.ECOG else None,x_denoise=x_orig_denoise,x_mel = sample_spec_mel_test,decoder_mel=decoder_mel if cfg.MODEL.DO_MEL_GUIDE else None,epoch=epoch,label=labels,mode='train',path=cfg.OUTPUT_DIR,tracker = tracker,linear=cfg.MODEL.WAVE_BASED,n_fft=cfg.MODEL.N_FFT,duomask=duomask,x_amp=x_orig_amp) + #print ('finish') if local_rank == 0: print(2**(torch.tanh(model.encoder.formant_bandwitdh_slop))) checkpointer.save("model_epoch%d" % epoch) model.eval() Lrec = model(sample_spec_test, x_denoise = sample_spec_denoise_test,x_mel = sample_spec_mel_test,ecog=ecog_test if cfg.MODEL.ECOG else None, mask_prior=mask_prior_test if cfg.MODEL.ECOG else None, on_stage = on_stage_test,on_stage_wider = on_stage_wider_test, ae = not cfg.MODEL.ECOG, tracker = tracker_test, encoder_guide=cfg.MODEL.W_SUP,pitch_aug=False,duomask=duomask,mni=mni_coordinate_test,debug = False,x_amp=sample_spec_amp_test,hamonic_bias = False) - save_sample(sample_spec_test,ecog_test,mask_prior_test,mni_coordinate_test,encoder,decoder,ecog_encoder if cfg.MODEL.ECOG else None,x_denoise=sample_spec_denoise_test,x_mel = sample_spec_mel_test,decoder_mel=decoder_mel if cfg.MODEL.DO_MEL_GUIDE else None,epoch=epoch,label=sample_label_test,mode='test',path=cfg.OUTPUT_DIR,tracker = tracker_test,linear=cfg.MODEL.WAVE_BASED,n_fft=cfg.MODEL.N_FFT,duomask=duomask,x_amp=sample_spec_amp_test) - save_sample(x,ecog,mask_prior,mni_coordinate,encoder,decoder,ecog_encoder if cfg.MODEL.ECOG else None,x_denoise=x_orig_denoise,x_mel = sample_spec_mel_test,decoder_mel=decoder_mel if cfg.MODEL.DO_MEL_GUIDE else None,epoch=epoch,label=labels,mode='train',path=cfg.OUTPUT_DIR,tracker = tracker,linear=cfg.MODEL.WAVE_BASED,n_fft=cfg.MODEL.N_FFT,duomask=duomask,x_amp=x_orig_amp) - + if epoch%1==0: + #first mode is test + reconaudio = save_sample(sample_spec_test,ecog_test,mask_prior_test,mni_coordinate_test,encoder,decoder,ecog_encoder if cfg.MODEL.ECOG else None,x_denoise=sample_spec_denoise_test,x_mel = sample_spec_mel_test,decoder_mel=decoder_mel if cfg.MODEL.DO_MEL_GUIDE else None,epoch=epoch,label=sample_label_test,mode='test',path=cfg.OUTPUT_DIR,tracker = tracker_test,linear=cfg.MODEL.WAVE_BASED,n_fft=cfg.MODEL.N_FFT,duomask=duomask,x_amp=sample_spec_amp_test) + save_sample(x,ecog,mask_prior,mni_coordinate,encoder,decoder,ecog_encoder if cfg.MODEL.ECOG else None,x_denoise=x_orig_denoise,x_mel = sample_spec_mel_test,decoder_mel=decoder_mel if cfg.MODEL.DO_MEL_GUIDE else None,epoch=epoch,label=labels,mode='train',path=cfg.OUTPUT_DIR,tracker = tracker,linear=cfg.MODEL.WAVE_BASED,n_fft=cfg.MODEL.N_FFT,duomask=duomask,x_amp=x_orig_amp) + writer.add_audio('reconaudio', reconaudio, n_iter, sample_rate=16000) + writer.export_scalars_to_json(cfg.OUTPUT_DIR+"/all_scalars.json") + writer.close() if __name__ == "__main__": gpu_count = torch.cuda.device_count() - run(train, get_cfg_defaults(), description='StyleGAN', default_config='configs/ecog_style2.yaml', - world_size=gpu_count) + cfg = get_cfg_defaults() + parser = argparse.ArgumentParser(description='formant') + parser.add_argument( + "-c", "--config-file", + default='configs/ecog_style2_a.yaml', + metavar="FILE", + help="path to config file", + type=str, + ) + parser.add_argument( + "opts", + help="Modify config options using the command-line", + default=None, + nargs=argparse.REMAINDER, + ) + args = parser.parse_args() + + #if args.modeldir !='': + # cfg.OUTPUT_DIR = args.modeldir + run(train, cfg, description='StyleGAN', default_config='configs/ecog_style2_a.yaml', + world_size=gpu_count,args=args) diff --git a/train_formant_e.py b/train_formant_e.py new file mode 100644 index 00000000..b80b1f59 --- /dev/null +++ b/train_formant_e.py @@ -0,0 +1,427 @@ +# Copyright 2019-2020 Stanislav Pidhorskyi +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +import json +from os import terminal_size +import pdb +import torch.utils.data +from torchvision.utils import save_image +from net_formant import * +import os +import utils +from checkpointer import Checkpointer +from scheduler import ComboMultiStepLR +from custom_adam import LREQAdam +from dataloader_ecog import * +from tqdm import tqdm +from dlutils.pytorch import count_parameters +import dlutils.pytorch.count_parameters as count_param_override +from tracker import LossTracker +from model_formant import Model +from launcher import run +from defaults import get_cfg_defaults +import lod_driver +from PIL import Image +import numpy as np +from torch import autograd +from ECoGDataSet import concate_batch +from formant_systh import save_sample +import argparse +from tensorboardX import SummaryWriter + + + + +def train(cfg, logger, local_rank, world_size, distributed): + writer = SummaryWriter(cfg.OUTPUT_DIR) + torch.cuda.set_device(local_rank) + model = Model( + generator=cfg.MODEL.GENERATOR, + encoder=cfg.MODEL.ENCODER, + ecog_encoder_name=cfg.MODEL.MAPPING_FROM_ECOG, + spec_chans = cfg.DATASET.SPEC_CHANS, + n_formants = cfg.MODEL.N_FORMANTS, + n_formants_noise = cfg.MODEL.N_FORMANTS_NOISE, + n_formants_ecog = cfg.MODEL.N_FORMANTS_ECOG, + wavebased = cfg.MODEL.WAVE_BASED, + n_fft=cfg.MODEL.N_FFT, + noise_db=cfg.MODEL.NOISE_DB, + max_db=cfg.MODEL.MAX_DB, + with_ecog = cfg.MODEL.ECOG, + hidden_dim=cfg.MODEL.TRANSFORMER.HIDDEN_DIM, + dim_feedforward=cfg.MODEL.TRANSFORMER.DIM_FEEDFORWARD, + encoder_only=cfg.MODEL.TRANSFORMER.ENCODER_ONLY, + attentional_mask=cfg.MODEL.TRANSFORMER.ATTENTIONAL_MASK, + n_heads = cfg.MODEL.TRANSFORMER.N_HEADS, + non_local = cfg.MODEL.TRANSFORMER.NON_LOCAL, + do_mel_guide = cfg.MODEL.DO_MEL_GUIDE, + noise_from_data = cfg.MODEL.BGNOISE_FROMDATA, + specsup=cfg.FINETUNE.SPECSUP, + power_synth = cfg.MODEL.POWER_SYNTH, + onedconfirst=cfg.MODEL.ONEDCONFIRST, + rnn_type = cfg.MODEL.RNN_TYPE, + rnn_layers = cfg.MODEL.RNN_LAYERS, + compute_db_loudness=cfg.MODEL.RNN_COMPUTE_DB_LOUDNESS, + bidirection = cfg.MODEL.BIDIRECTION + ) + model.cuda(local_rank) + model.train() + + model_s = Model( + generator=cfg.MODEL.GENERATOR, + encoder=cfg.MODEL.ENCODER, + ecog_encoder_name=cfg.MODEL.MAPPING_FROM_ECOG, + spec_chans = cfg.DATASET.SPEC_CHANS, + n_formants = cfg.MODEL.N_FORMANTS, + n_formants_noise = cfg.MODEL.N_FORMANTS_NOISE, + n_formants_ecog = cfg.MODEL.N_FORMANTS_ECOG, + wavebased = cfg.MODEL.WAVE_BASED, + n_fft=cfg.MODEL.N_FFT, + noise_db=cfg.MODEL.NOISE_DB, + max_db=cfg.MODEL.MAX_DB, + with_ecog = cfg.MODEL.ECOG, + hidden_dim=cfg.MODEL.TRANSFORMER.HIDDEN_DIM, + dim_feedforward=cfg.MODEL.TRANSFORMER.DIM_FEEDFORWARD, + encoder_only=cfg.MODEL.TRANSFORMER.ENCODER_ONLY, + attentional_mask=cfg.MODEL.TRANSFORMER.ATTENTIONAL_MASK, + n_heads = cfg.MODEL.TRANSFORMER.N_HEADS, + non_local = cfg.MODEL.TRANSFORMER.NON_LOCAL, + do_mel_guide = cfg.MODEL.DO_MEL_GUIDE, + noise_from_data = cfg.MODEL.BGNOISE_FROMDATA, + specsup=cfg.FINETUNE.SPECSUP, + power_synth = cfg.MODEL.POWER_SYNTH, + onedconfirst=cfg.MODEL.ONEDCONFIRST, + rnn_type = cfg.MODEL.RNN_TYPE, + rnn_layers = cfg.MODEL.RNN_LAYERS, + compute_db_loudness=cfg.MODEL.RNN_COMPUTE_DB_LOUDNESS, + bidirection = cfg.MODEL.BIDIRECTION + ) + model_s.cuda(local_rank) + model_s.eval() + model_s.requires_grad_(False) + # print(model) + if distributed: + model = nn.parallel.DistributedDataParallel( + model, + device_ids=[local_rank], + broadcast_buffers=False, + bucket_cap_mb=25, + find_unused_parameters=True) + model.device_ids = None + decoder = model.module.decoder + encoder = model.module.encoder + if hasattr(model.module,'ecog_encoder'): + ecog_encoder = model.module.ecog_encoder + if hasattr(model.module,'decoder_mel'): + decoder_mel = model.module.decoder_mel + else: + decoder = model.decoder + encoder = model.encoder + if hasattr(model,'ecog_encoder'): + ecog_encoder = model.ecog_encoder + if hasattr(model,'decoder_mel'): + decoder_mel = model.decoder_mel + + count_param_override.print = lambda a: logger.info(a) + + logger.info("Trainable parameters generator:") + count_parameters(decoder) + + logger.info("Trainable parameters discriminator:") + count_parameters(encoder) + + arguments = dict() + arguments["iteration"] = 0 + + if cfg.MODEL.ECOG: + if cfg.MODEL.SUPLOSS_ON_ECOGF: + optimizer = LREQAdam([ + {'params': ecog_encoder.parameters()} + ], lr=cfg.TRAIN.BASE_LEARNING_RATE, betas=(cfg.TRAIN.ADAM_BETA_0, cfg.TRAIN.ADAM_BETA_1), weight_decay=0) + else: + optimizer = LREQAdam([ + {'params': ecog_encoder.parameters()}, + {'params': decoder.parameters()}, + ], lr=cfg.TRAIN.BASE_LEARNING_RATE, betas=(cfg.TRAIN.ADAM_BETA_0, cfg.TRAIN.ADAM_BETA_1), weight_decay=0) + + else: + if cfg.MODEL.DO_MEL_GUIDE: + optimizer = LREQAdam([ + {'params': encoder.parameters()}, + {'params': decoder.parameters()}, + {'params': decoder_mel.parameters()}, + ], lr=cfg.TRAIN.BASE_LEARNING_RATE, betas=(cfg.TRAIN.ADAM_BETA_0, cfg.TRAIN.ADAM_BETA_1), weight_decay=0) + else: + optimizer = LREQAdam([ + {'params': encoder.parameters()}, + {'params': decoder.parameters()} + ], lr=cfg.TRAIN.BASE_LEARNING_RATE, betas=(cfg.TRAIN.ADAM_BETA_0, cfg.TRAIN.ADAM_BETA_1), weight_decay=0) + + scheduler = ComboMultiStepLR(optimizers= + {'optimizer': optimizer}, + milestones=cfg.TRAIN.LEARNING_DECAY_STEPS, + gamma=cfg.TRAIN.LEARNING_DECAY_RATE, + reference_batch_size=32, base_lr=cfg.TRAIN.LEARNING_RATES) + model_dict = { + 'encoder': encoder, + 'generator': decoder, + } + if hasattr(model,'ecog_encoder'): + model_dict['ecog_encoder'] = ecog_encoder + if hasattr(model,'decoder_mel'): + model_dict['decoder_mel'] = decoder_mel + if local_rank == 0: + model_dict['encoder_s'] = model_s.encoder + model_dict['generator_s'] = model_s.decoder + if hasattr(model_s,'ecog_encoder'): + model_dict['ecog_encoder_s'] = model_s.ecog_encoder + if hasattr(model_s,'decoder_mel'): + model_dict['decoder_mel_s'] = model_s.decoder_mel + + tracker = LossTracker(cfg.OUTPUT_DIR) + tracker_test = LossTracker(cfg.OUTPUT_DIR,test=True) + + auxiliary = { + 'optimizer': optimizer, + 'scheduler': scheduler, + 'tracker': tracker, + 'tracker_test':tracker_test, + } + + checkpointer = Checkpointer(cfg, + model_dict, + auxiliary, + logger=logger, + save=local_rank == 0) + + extra_checkpoint_data = checkpointer.load(ignore_last_checkpoint=False,ignore_auxiliary=cfg.FINETUNE.FINETUNE,file_name='output/10231600/model_epoch56.pth') + logger.info("Starting from epoch: %d" % (scheduler.start_epoch())) + + arguments.update(extra_checkpoint_data) + + + with open('train_param.json','r') as rfile: + param = json.load(rfile) + # data_param, train_param, test_param = param['Data'], param['Train'], param['Test'] + dataset = TFRecordsDataset(cfg, logger, rank=local_rank, world_size=world_size, buffer_size_mb=1024, channels=cfg.MODEL.CHANNELS,param=param) + dataset_test = TFRecordsDataset(cfg, logger, rank=local_rank, world_size=world_size, buffer_size_mb=1024, channels=cfg.MODEL.CHANNELS,train=False,param=param) + # noise_dist = dataset.noise_dist + noise_dist = torch.from_numpy(dataset.noise_dist).to('cuda').float() + if cfg.MODEL.BGNOISE_FROMDATA: + model_s.noise_dist_init(noise_dist) + model.noise_dist_init(noise_dist) + rnd = np.random.RandomState(3456) + # latents = rnd.randn(len(dataset_test.dataset), cfg.MODEL.LATENT_SPACE_SIZE) + # samplez = torch.tensor(latents).float().cuda() + + + if cfg.DATASET.SAMPLES_PATH: + path = cfg.DATASET.SAMPLES_PATH + src = [] + with torch.no_grad(): + for filename in list(os.listdir(path))[:32]: + img = np.asarray(Image.open(os.path.join(path, filename))) + if img.shape[2] == 4: + img = img[:, :, :3] + im = img.transpose((2, 0, 1)) + x = torch.tensor(np.asarray(im, dtype=np.float32), requires_grad=True).cuda() / 127.5 - 1. + if x.shape[0] == 4: + x = x[:3] + src.append(x) + sample = torch.stack(src) + else: + dataset_test.reset(cfg.DATASET.MAX_RESOLUTION_LEVEL, len(dataset_test.dataset)) + sample_dict_test = next(iter(dataset_test.iterator)) + # sample_dict_test = concate_batch(sample_dict_test) + sample_wave_test = sample_dict_test['wave_re_batch_all'].to('cuda').float() + if cfg.MODEL.WAVE_BASED: + sample_spec_test = sample_dict_test['wave_spec_re_batch_all'].to('cuda').float() + sample_spec_amp_test = sample_dict_test['wave_spec_re_amp_batch_all'].to('cuda').float() + sample_spec_denoise_test = sample_dict_test['wave_spec_re_denoise_batch_all'].to('cuda').float() + # sample_spec_test = wave2spec(sample_wave_test,n_fft=cfg.MODEL.N_FFT,noise_db=cfg.MODEL.NOISE_DB,max_db=cfg.MODEL.MAX_DB) + else: + sample_spec_test = sample_dict_test['spkr_re_batch_all'].to('cuda').float() + sample_spec_denoise_test = None#sample_dict_test['wave_spec_re_denoise_batch_all'].to('cuda').float() + sample_label_test = sample_dict_test['label_batch_all'] + if cfg.MODEL.ECOG: + ecog_test = [sample_dict_test['ecog_re_batch_all'][i].to('cuda').float() for i in range(len(sample_dict_test['ecog_re_batch_all']))] + mask_prior_test = [sample_dict_test['mask_all'][i].to('cuda').float() for i in range(len(sample_dict_test['mask_all']))] + mni_coordinate_test = sample_dict_test['mni_coordinate_all'].to('cuda').float() + else: + ecog_test = None + mask_prior_test = None + mni_coordinate_test = None + sample_spec_mel_test = sample_dict_test['spkr_re_batch_all'].to('cuda').float() if cfg.MODEL.DO_MEL_GUIDE else None + on_stage_test = sample_dict_test['on_stage_re_batch_all'].to('cuda').float() + on_stage_wider_test = sample_dict_test['on_stage_wider_re_batch_all'].to('cuda').float() + # sample = next(make_dataloader(cfg, logger, dataset, 32, local_rank)) + # sample = (sample / 127.5 - 1.) + # import pdb; pdb.set_trace() + duomask=True + # model.eval() + # Lrec = model(sample_spec_test, x_denoise = sample_spec_denoise_test,x_mel = sample_spec_mel_test,ecog=ecog_test if cfg.MODEL.ECOG else None, mask_prior=mask_prior_test if cfg.MODEL.ECOG else None, on_stage = on_stage_test,on_stage_wider = on_stage_wider_test, ae = not cfg.MODEL.ECOG, tracker = tracker_test, encoder_guide=cfg.MODEL.W_SUP,pitch_aug=False,duomask=duomask,mni=mni_coordinate_test,debug = False,x_amp=sample_spec_amp_test,hamonic_bias = False) + # save_sample(sample_spec_test,ecog_test,mask_prior_test,mni_coordinate_test,encoder,decoder,ecog_encoder if cfg.MODEL.ECOG else None,x_denoise=sample_spec_denoise_test,x_mel = sample_spec_mel_test,decoder_mel=decoder_mel if cfg.MODEL.DO_MEL_GUIDE else None,epoch=0,label=sample_label_test,mode='test',path=cfg.OUTPUT_DIR,tracker = tracker_test,linear=cfg.MODEL.WAVE_BASED,n_fft=cfg.MODEL.N_FFT,duomask=True) + n_iter = 0 + for epoch in range(cfg.TRAIN.TRAIN_EPOCHS): + model.train() + + # batches = make_dataloader(cfg, logger, dataset, lod2batch.get_per_GPU_batch_size(), local_rank) + model.train() + need_permute = False + epoch_start_time = time.time() + i = 0 + for sample_dict_train in tqdm(iter(dataset.iterator)): + n_iter +=1 + # import pdb; pdb.set_trace() + # sample_dict_train = concate_batch(sample_dict_train) + i += 1 + wave_orig = sample_dict_train['wave_re_batch_all'].to('cuda').float() + if cfg.MODEL.WAVE_BASED: + # x_orig = wave2spec(wave_orig,n_fft=cfg.MODEL.N_FFT,noise_db=cfg.MODEL.NOISE_DB,max_db=cfg.MODEL.MAX_DB) + x_orig = sample_dict_train['wave_spec_re_batch_all'].to('cuda').float() + x_orig_amp = sample_dict_train['wave_spec_re_amp_batch_all'].to('cuda').float() + x_orig_denoise = sample_dict_train['wave_spec_re_denoise_batch_all'].to('cuda').float() + else: + x_orig = sample_dict_train['spkr_re_batch_all'].to('cuda').float() + x_orig_denoise = None#sample_dict_train['wave_spec_re_denoise_batch_all'].to('cuda').float() + + on_stage = sample_dict_train['on_stage_re_batch_all'].to('cuda').float() + on_stage_wider = sample_dict_train['on_stage_wider_re_batch_all'].to('cuda').float() + words = sample_dict_train['word_batch_all'].to('cuda').long() + words = words.view(words.shape[0]*words.shape[1]) + labels = sample_dict_train['label_batch_all'] + if cfg.MODEL.ECOG: + ecog = [sample_dict_train['ecog_re_batch_all'][j].to('cuda').float() for j in range(len(sample_dict_train['ecog_re_batch_all']))] + mask_prior = [sample_dict_train['mask_all'][j].to('cuda').float() for j in range(len(sample_dict_train['mask_all']))] + mni_coordinate = sample_dict_train['mni_coordinate_all'].to('cuda').float() + else: + ecog = None + mask_prior = None + mni_coordinate = None + x = x_orig + x_mel = sample_dict_train['spkr_re_batch_all'].to('cuda').float() if cfg.MODEL.DO_MEL_GUIDE else None + # x.requires_grad = True + # apply_cycle = cfg.MODEL.CYCLE and True + # apply_w_classifier = cfg.MODEL.W_CLASSIFIER and True + # apply_gp = True + # apply_ppl = cfg.MODEL.APPLY_PPL and True + # apply_ppl_d = cfg.MODEL.APPLY_PPL_D and True + # apply_encoder_guide = (cfg.FINETUNE.ENCODER_GUIDE or cfg.MODEL.W_SUP) and True + # apply_sup = cfg.FINETUNE.SPECSUP + + if (cfg.MODEL.ECOG): + optimizer.zero_grad() + Lrec,tracker = model(x, x_denoise = x_orig_denoise,x_mel = x_mel,ecog=ecog, mask_prior=mask_prior, on_stage = on_stage,on_stage_wider = on_stage_wider, ae = False, tracker = tracker, encoder_guide=cfg.MODEL.W_SUP,duomask=duomask,mni=mni_coordinate,x_amp=x_orig_amp) + #print ('tracker',tracker,tracker.tracks) + #for key in ['Lae_a','Lae_a_l2','Lae_db','Lae_db_l2','Lloudness','Lae_denoise','Lamp','Lae','Lexp','Lf0','Ldiff']: + # print ('tracker, ',key, tracker.tracks[key].mean(dim=0)) + #Lae_a: 0.0938077, Lae_a_l2: 0.0938695, Lae_db: 0.2939660, Lae_db_l2: 0.2939744, Lrec: 0.2407907, loudness: 0.5835954, f0_hz: 0.3419901, amplitudes: 0.2974630, amplitude_formants_hamon: 2.7913144, freq_formants_hamon_hz: 4.6389437, amplitude_formants_noise: 0.2281826, freq_formants_noise_hz: 1.5711578, bandwidth_formants_noise: 1.5711578, Ldiff: 0.3452123, Lexp: -0.0683261 + ecog_key_list = ['Lae_a','Lae_a_l2','Lae_db','Lae_db_l2','Lrec','loudness','f0_hz','amplitudes','amplitude_formants_hamon','freq_formants_hamon_hz','amplitude_formants_noise','freq_formants_noise_hz','bandwidth_formants_noise','Ldiff','Lexp'] + metric_key_list = ['loudness_metric','f0_metric','amplitudes_metric','amplitude_formants_hamon_metric','freq_formants_hamon_hz_metric_2','freq_formants_hamon_hz_metric_6','amplitude_formants_noise_metric'] + if n_iter %10==0: + #print ('write tensorboard') + writer.add_scalars('data/loss_group', {key: tracker.tracks[key].mean(dim=0) for key in ecog_key_list}, n_iter) + writer.add_scalars('data/metric_group', {key: tracker.tracks[key].mean(dim=0) for key in metric_key_list}, n_iter) + for key in ecog_key_list: + writer.add_scalar('data/'+key, tracker.tracks[key].mean(dim=0), n_iter) + for key in metric_key_list: + writer.add_scalar('data/'+key, tracker.tracks[key].mean(dim=0), n_iter) + #pass #log in tensorboard later!! + #tracker Lae_a: 0.0536877, Lae_a_l2: 0.0538647, Lae_db: 0.1655398, Lae_db_l2: 0.1655714, Lloudness: 1.0384552, Lae_denoise: 0.0485827, Lamp: 0.0000148, Lae: 2.1787138, Lexp: -1.9956266, Lf0: 0.0000000, Ldiff: 0.0568467 + (Lrec).backward() + optimizer.step() + else: + optimizer.zero_grad() + Lrec,tracker = model(x, x_denoise = x_orig_denoise,x_mel = x_mel,ecog=None, mask_prior=None, on_stage = on_stage,on_stage_wider = on_stage_wider, ae = True, tracker = tracker, encoder_guide=cfg.MODEL.W_SUP,pitch_aug=False,duomask=duomask,mni=mni_coordinate,debug = False,x_amp=x_orig_amp,hamonic_bias = False)#epoch<2) + print ('tracker',tracker,tracker.tracks) + #for key in ['Lae_a','Lae_a_l2','Lae_db','Lae_db_l2','Lloudness','Lae_denoise','Lamp','Lae','Lexp','Lf0','Ldiff']: + #print ('tracker, ',key, tracker.tracks[key].mean(dim=0)) + ecog_key_list = ['Lae_a','Lae_a_l2','Lae_db','Lae_db_l2','Lloudness','Lae_denoise','Lamp','Lae','Lexp','Lf0','Ldiff'] + if n_iter %10==0: + #print ('write tensorboard') + writer.add_scalars('data/loss_group', {key: tracker.tracks[key].mean(dim=0) for key in ecog_key_list}, n_iter) + for key in ecog_key_list: + writer.add_scalar('data/'+key, tracker.tracks[key].mean(dim=0), n_iter) + (Lrec).backward() + optimizer.step() + + betta = 0.5 ** (cfg.TRAIN.BATCH_SIZE / (10 * 1000.0)) + model_s.lerp(model, betta,w_classifier = cfg.MODEL.W_CLASSIFIER) + + epoch_end_time = time.time() + per_epoch_ptime = epoch_end_time - epoch_start_time + #print ('test save sample') + #save_sample(sample_spec_test,ecog_test,mask_prior_test,mni_coordinate_test,encoder,decoder,ecog_encoder if cfg.MODEL.ECOG else None,x_denoise=sample_spec_denoise_test,x_mel = sample_spec_mel_test,decoder_mel=decoder_mel if cfg.MODEL.DO_MEL_GUIDE else None,epoch=epoch,label=sample_label_test,mode='test',path=cfg.OUTPUT_DIR,tracker = tracker_test,linear=cfg.MODEL.WAVE_BASED,n_fft=cfg.MODEL.N_FFT,duomask=duomask,x_amp=sample_spec_amp_test) + #save_sample(x,ecog,mask_prior,mni_coordinate,encoder,decoder,ecog_encoder if cfg.MODEL.ECOG else None,x_denoise=x_orig_denoise,x_mel = sample_spec_mel_test,decoder_mel=decoder_mel if cfg.MODEL.DO_MEL_GUIDE else None,epoch=epoch,label=labels,mode='train',path=cfg.OUTPUT_DIR,tracker = tracker,linear=cfg.MODEL.WAVE_BASED,n_fft=cfg.MODEL.N_FFT,duomask=duomask,x_amp=x_orig_amp) + #print ('finish') + + if local_rank == 0: + print(2**(torch.tanh(model.encoder.formant_bandwitdh_slop))) + checkpointer.save("model_epoch%d" % epoch) + model.eval() + Lrec = model(sample_spec_test, x_denoise = sample_spec_denoise_test,x_mel = sample_spec_mel_test,ecog=ecog_test if cfg.MODEL.ECOG else None, mask_prior=mask_prior_test if cfg.MODEL.ECOG else None, on_stage = on_stage_test,on_stage_wider = on_stage_wider_test, ae = not cfg.MODEL.ECOG, tracker = tracker_test, encoder_guide=cfg.MODEL.W_SUP,pitch_aug=False,duomask=duomask,mni=mni_coordinate_test,debug = False,x_amp=sample_spec_amp_test,hamonic_bias = False) + if epoch%1==0: + #first mode is test + reconaudio = save_sample(sample_spec_test,ecog_test,mask_prior_test,mni_coordinate_test,encoder,decoder,ecog_encoder if cfg.MODEL.ECOG else None,x_denoise=sample_spec_denoise_test,x_mel = sample_spec_mel_test,decoder_mel=decoder_mel if cfg.MODEL.DO_MEL_GUIDE else None,epoch=epoch,label=sample_label_test,mode='test',path=cfg.OUTPUT_DIR,tracker = tracker_test,linear=cfg.MODEL.WAVE_BASED,n_fft=cfg.MODEL.N_FFT,duomask=duomask,x_amp=sample_spec_amp_test) + save_sample(x,ecog,mask_prior,mni_coordinate,encoder,decoder,ecog_encoder if cfg.MODEL.ECOG else None,x_denoise=x_orig_denoise,x_mel = sample_spec_mel_test,decoder_mel=decoder_mel if cfg.MODEL.DO_MEL_GUIDE else None,epoch=epoch,label=labels,mode='train',path=cfg.OUTPUT_DIR,tracker = tracker,linear=cfg.MODEL.WAVE_BASED,n_fft=cfg.MODEL.N_FFT,duomask=duomask,x_amp=x_orig_amp) + writer.add_audio('reconaudio', reconaudio, n_iter, sample_rate=16000) + writer.export_scalars_to_json(cfg.OUTPUT_DIR+"/all_scalars.json") + writer.close() + +if __name__ == "__main__": + gpu_count = torch.cuda.device_count() + cfg = get_cfg_defaults() + + #if args.modeldir !='': + # cfg.OUTPUT_DIR = args.modeldir + parser = argparse.ArgumentParser(description='ecog formant model') + parser.add_argument( + "-c", "--config-file", + default='configs/ecog_style2.yaml', + metavar="FILE", + help="path to config file", + type=str, + ) + parser.add_argument( + "opts", + help="Modify config options using the command-line", + default=None, + nargs=argparse.REMAINDER, + ) + parser.add_argument('--ONEDCONFIRST', type=int,default=1, + help='use one d conv before lstm') + parser.add_argument('--RNN_TYPE', type=str,default='LSTM', + help='LSTM or GRU') + parser.add_argument('--RNN_LAYERS', type=int,default=4, + help='lstm layers') + parser.add_argument('--RNN_COMPUTE_DB_LOUDNESS', type=int,default=1, + help='RNN_COMPUTE_DB_LOUDNESS') + parser.add_argument('--BIDIRECTION', type=int,default=1, + help='BIDIRECTION') + parser.add_argument('--MAPPING_FROM_ECOG', type=str,default='ECoGMappingBottlenecklstm_pure', + help='MAPPING_FROM_ECOG') + parser.add_argument('--OUTPUT_DIR', type=str,default='output/ecog_11021800_lstmpure', + help='OUTPUT_DIR') + args = parser.parse_args() + + cfg.MODEL.ONEDCONFIRST = True if args.ONEDCONFIRST==1 else False + cfg.MODEL.RNN_TYPE = args.RNN_TYPE + cfg.MODEL.RNN_LAYERS = args.RNN_LAYERS + cfg.MODEL.RNN_COMPUTE_DB_LOUDNESS = True if args.RNN_COMPUTE_DB_LOUDNESS==1 else False + cfg.MODEL.BIDIRECTION = True if args.BIDIRECTION==1 else False + cfg.OUTPUT_DIR = args.OUTPUT_DIR +'_{}_{}_bi_{}_1dconv_{}'.format(args.RNN_TYPE,args.RNN_LAYERS,str(cfg.MODEL.BIDIRECTION),str(cfg.MODEL.ONEDCONFIRST)) + cfg.MODEL.MAPPING_FROM_ECOG = args.MAPPING_FROM_ECOG + + run(train, cfg, description='StyleGAN', default_config='configs/ecog_style2.yaml', + world_size=gpu_count,args=args) diff --git a/train_param.json b/train_param.json index 43ccc0b7..6f7ac467 100644 --- a/train_param.json +++ b/train_param.json @@ -4,7 +4,7 @@ "SelectRegion":["AUDITORY","BROCA","MOTO","SENSORY"], "BlockRegion":[], "UseGridOnly":true, - "ReshapeAsGrid":true, + "ReshapeAsGrid":false, "SeqLen":128, "DOWN_TF_FS": 125, "DOWN_ECOG_FS": 125, diff --git a/transformer_models/util/__init__.py b/transformer_models/util/__init__.py new file mode 100644 index 00000000..168f9979 --- /dev/null +++ b/transformer_models/util/__init__.py @@ -0,0 +1 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved diff --git a/transformer_models/util/box_ops.py b/transformer_models/util/box_ops.py new file mode 100644 index 00000000..9c088e5b --- /dev/null +++ b/transformer_models/util/box_ops.py @@ -0,0 +1,88 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +""" +Utilities for bounding box manipulation and GIoU. +""" +import torch +from torchvision.ops.boxes import box_area + + +def box_cxcywh_to_xyxy(x): + x_c, y_c, w, h = x.unbind(-1) + b = [(x_c - 0.5 * w), (y_c - 0.5 * h), + (x_c + 0.5 * w), (y_c + 0.5 * h)] + return torch.stack(b, dim=-1) + + +def box_xyxy_to_cxcywh(x): + x0, y0, x1, y1 = x.unbind(-1) + b = [(x0 + x1) / 2, (y0 + y1) / 2, + (x1 - x0), (y1 - y0)] + return torch.stack(b, dim=-1) + + +# modified from torchvision to also return the union +def box_iou(boxes1, boxes2): + area1 = box_area(boxes1) + area2 = box_area(boxes2) + + lt = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2] + rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2] + + wh = (rb - lt).clamp(min=0) # [N,M,2] + inter = wh[:, :, 0] * wh[:, :, 1] # [N,M] + + union = area1[:, None] + area2 - inter + + iou = inter / union + return iou, union + + +def generalized_box_iou(boxes1, boxes2): + """ + Generalized IoU from https://giou.stanford.edu/ + + The boxes should be in [x0, y0, x1, y1] format + + Returns a [N, M] pairwise matrix, where N = len(boxes1) + and M = len(boxes2) + """ + # degenerate boxes gives inf / nan results + # so do an early check + assert (boxes1[:, 2:] >= boxes1[:, :2]).all() + assert (boxes2[:, 2:] >= boxes2[:, :2]).all() + iou, union = box_iou(boxes1, boxes2) + + lt = torch.min(boxes1[:, None, :2], boxes2[:, :2]) + rb = torch.max(boxes1[:, None, 2:], boxes2[:, 2:]) + + wh = (rb - lt).clamp(min=0) # [N,M,2] + area = wh[:, :, 0] * wh[:, :, 1] + + return iou - (area - union) / area + + +def masks_to_boxes(masks): + """Compute the bounding boxes around the provided masks + + The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions. + + Returns a [N, 4] tensors, with the boxes in xyxy format + """ + if masks.numel() == 0: + return torch.zeros((0, 4), device=masks.device) + + h, w = masks.shape[-2:] + + y = torch.arange(0, h, dtype=torch.float) + x = torch.arange(0, w, dtype=torch.float) + y, x = torch.meshgrid(y, x) + + x_mask = (masks * x.unsqueeze(0)) + x_max = x_mask.flatten(1).max(-1)[0] + x_min = x_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0] + + y_mask = (masks * y.unsqueeze(0)) + y_max = y_mask.flatten(1).max(-1)[0] + y_min = y_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0] + + return torch.stack([x_min, y_min, x_max, y_max], 1) diff --git a/transformer_models/util/misc.py b/transformer_models/util/misc.py new file mode 100644 index 00000000..45d055d9 --- /dev/null +++ b/transformer_models/util/misc.py @@ -0,0 +1,416 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +""" +Misc functions, including distributed helpers. + +Mostly copy-paste from torchvision references. +""" +import os +import subprocess +import time +from collections import defaultdict, deque +import datetime +import pickle +from typing import Optional, List + +import torch +import torch.distributed as dist +from torch import Tensor + +# needed due to empty tensor bug in pytorch and torchvision 0.5 +import torchvision +if float(torchvision.__version__[:3]) < 0.7: + from torchvision.ops import _new_empty_tensor + from torchvision.ops.misc import _output_size + + +class SmoothedValue(object): + """Track a series of values and provide access to smoothed values over a + window or the global series average. + """ + + def __init__(self, window_size=20, fmt=None): + if fmt is None: + fmt = "{median:.4f} ({global_avg:.4f})" + self.deque = deque(maxlen=window_size) + self.total = 0.0 + self.count = 0 + self.fmt = fmt + + def update(self, value, n=1): + self.deque.append(value) + self.count += n + self.total += value * n + + def synchronize_between_processes(self): + """ + Warning: does not synchronize the deque! + """ + if not is_dist_avail_and_initialized(): + return + t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda') + dist.barrier() + dist.all_reduce(t) + t = t.tolist() + self.count = int(t[0]) + self.total = t[1] + + @property + def median(self): + d = torch.tensor(list(self.deque)) + return d.median().item() + + @property + def avg(self): + d = torch.tensor(list(self.deque), dtype=torch.float32) + return d.mean().item() + + @property + def global_avg(self): + return self.total / self.count + + @property + def max(self): + return max(self.deque) + + @property + def value(self): + return self.deque[-1] + + def __str__(self): + return self.fmt.format( + median=self.median, + avg=self.avg, + global_avg=self.global_avg, + max=self.max, + value=self.value) + + +def all_gather(data): + """ + Run all_gather on arbitrary picklable data (not necessarily tensors) + Args: + data: any picklable object + Returns: + list[data]: list of data gathered from each rank + """ + world_size = get_world_size() + if world_size == 1: + return [data] + + # serialized to a Tensor + buffer = pickle.dumps(data) + storage = torch.ByteStorage.from_buffer(buffer) + tensor = torch.ByteTensor(storage).to("cuda") + + # obtain Tensor size of each rank + local_size = torch.tensor([tensor.numel()], device="cuda") + size_list = [torch.tensor([0], device="cuda") for _ in range(world_size)] + dist.all_gather(size_list, local_size) + size_list = [int(size.item()) for size in size_list] + max_size = max(size_list) + + # receiving Tensor from all ranks + # we pad the tensor because torch all_gather does not support + # gathering tensors of different shapes + tensor_list = [] + for _ in size_list: + tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device="cuda")) + if local_size != max_size: + padding = torch.empty(size=(max_size - local_size,), dtype=torch.uint8, device="cuda") + tensor = torch.cat((tensor, padding), dim=0) + dist.all_gather(tensor_list, tensor) + + data_list = [] + for size, tensor in zip(size_list, tensor_list): + buffer = tensor.cpu().numpy().tobytes()[:size] + data_list.append(pickle.loads(buffer)) + + return data_list + + +def reduce_dict(input_dict, average=True): + """ + Args: + input_dict (dict): all the values will be reduced + average (bool): whether to do average or sum + Reduce the values in the dictionary from all processes so that all processes + have the averaged results. Returns a dict with the same fields as + input_dict, after reduction. + """ + world_size = get_world_size() + if world_size < 2: + return input_dict + with torch.no_grad(): + names = [] + values = [] + # sort the keys so that they are consistent across processes + for k in sorted(input_dict.keys()): + names.append(k) + values.append(input_dict[k]) + values = torch.stack(values, dim=0) + dist.all_reduce(values) + if average: + values /= world_size + reduced_dict = {k: v for k, v in zip(names, values)} + return reduced_dict + + +class MetricLogger(object): + def __init__(self, delimiter="\t"): + self.meters = defaultdict(SmoothedValue) + self.delimiter = delimiter + + def update(self, **kwargs): + for k, v in kwargs.items(): + if isinstance(v, torch.Tensor): + v = v.item() + assert isinstance(v, (float, int)) + self.meters[k].update(v) + + def __getattr__(self, attr): + if attr in self.meters: + return self.meters[attr] + if attr in self.__dict__: + return self.__dict__[attr] + raise AttributeError("'{}' object has no attribute '{}'".format( + type(self).__name__, attr)) + + def __str__(self): + loss_str = [] + for name, meter in self.meters.items(): + loss_str.append( + "{}: {}".format(name, str(meter)) + ) + return self.delimiter.join(loss_str) + + def synchronize_between_processes(self): + for meter in self.meters.values(): + meter.synchronize_between_processes() + + def add_meter(self, name, meter): + self.meters[name] = meter + + def log_every(self, iterable, print_freq, header=None): + i = 0 + if not header: + header = '' + start_time = time.time() + end = time.time() + iter_time = SmoothedValue(fmt='{avg:.4f}') + data_time = SmoothedValue(fmt='{avg:.4f}') + space_fmt = ':' + str(len(str(len(iterable)))) + 'd' + if torch.cuda.is_available(): + log_msg = self.delimiter.join([ + header, + '[{0' + space_fmt + '}/{1}]', + 'eta: {eta}', + '{meters}', + 'time: {time}', + 'data: {data}', + 'max mem: {memory:.0f}' + ]) + else: + log_msg = self.delimiter.join([ + header, + '[{0' + space_fmt + '}/{1}]', + 'eta: {eta}', + '{meters}', + 'time: {time}', + 'data: {data}' + ]) + MB = 1024.0 * 1024.0 + for obj in iterable: + data_time.update(time.time() - end) + yield obj + iter_time.update(time.time() - end) + if i % print_freq == 0 or i == len(iterable) - 1: + eta_seconds = iter_time.global_avg * (len(iterable) - i) + eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) + if torch.cuda.is_available(): + print(log_msg.format( + i, len(iterable), eta=eta_string, + meters=str(self), + time=str(iter_time), data=str(data_time), + memory=torch.cuda.max_memory_allocated() / MB)) + else: + print(log_msg.format( + i, len(iterable), eta=eta_string, + meters=str(self), + time=str(iter_time), data=str(data_time))) + i += 1 + end = time.time() + total_time = time.time() - start_time + total_time_str = str(datetime.timedelta(seconds=int(total_time))) + print('{} Total time: {} ({:.4f} s / it)'.format( + header, total_time_str, total_time / len(iterable))) + + +def get_sha(): + cwd = os.path.dirname(os.path.abspath(__file__)) + + def _run(command): + return subprocess.check_output(command, cwd=cwd).decode('ascii').strip() + sha = 'N/A' + diff = "clean" + branch = 'N/A' + try: + sha = _run(['git', 'rev-parse', 'HEAD']) + subprocess.check_output(['git', 'diff'], cwd=cwd) + diff = _run(['git', 'diff-index', 'HEAD']) + diff = "has uncommited changes" if diff else "clean" + branch = _run(['git', 'rev-parse', '--abbrev-ref', 'HEAD']) + except Exception: + pass + message = f"sha: {sha}, status: {diff}, branch: {branch}" + return message + + +def collate_fn(batch): + batch = list(zip(*batch)) + batch[0] = NestedTensor.from_tensor_list(batch[0]) + return tuple(batch) + + +class NestedTensor(object): + def __init__(self, tensors, mask): + self.tensors = tensors + self.mask = mask + + def to(self, *args, **kwargs): + cast_tensor = self.tensors.to(*args, **kwargs) + cast_mask = self.mask.to(*args, **kwargs) if self.mask is not None else None + return type(self)(cast_tensor, cast_mask) + + def decompose(self): + return self.tensors, self.mask + + @classmethod + def from_tensor_list(cls, tensor_list): + # TODO make this more general + if tensor_list[0].ndim == 3: + # TODO make it support different-sized images + max_size = tuple(max(s) for s in zip(*[img.shape for img in tensor_list])) + # min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list])) + batch_shape = (len(tensor_list),) + max_size + b, c, h, w = batch_shape + dtype = tensor_list[0].dtype + device = tensor_list[0].device + tensor = torch.zeros(batch_shape, dtype=dtype, device=device) + mask = torch.ones((b, h, w), dtype=torch.bool, device=device) + for img, pad_img, m in zip(tensor_list, tensor, mask): + pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) + m[: img.shape[1], :img.shape[2]] = False + else: + raise ValueError('not supported') + return cls(tensor, mask) + + def __repr__(self): + return repr(self.tensors) + + +def setup_for_distributed(is_master): + """ + This function disables printing when not in master process + """ + import builtins as __builtin__ + builtin_print = __builtin__.print + + def print(*args, **kwargs): + force = kwargs.pop('force', False) + if is_master or force: + builtin_print(*args, **kwargs) + + __builtin__.print = print + + +def is_dist_avail_and_initialized(): + if not dist.is_available(): + return False + if not dist.is_initialized(): + return False + return True + + +def get_world_size(): + if not is_dist_avail_and_initialized(): + return 1 + return dist.get_world_size() + + +def get_rank(): + if not is_dist_avail_and_initialized(): + return 0 + return dist.get_rank() + + +def is_main_process(): + return get_rank() == 0 + + +def save_on_master(*args, **kwargs): + if is_main_process(): + torch.save(*args, **kwargs) + + +def init_distributed_mode(args): + if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ: + args.rank = int(os.environ["RANK"]) + args.world_size = int(os.environ['WORLD_SIZE']) + args.gpu = int(os.environ['LOCAL_RANK']) + elif 'SLURM_PROCID' in os.environ: + args.rank = int(os.environ['SLURM_PROCID']) + args.gpu = args.rank % torch.cuda.device_count() + else: + print('Not using distributed mode') + args.distributed = False + return + + args.distributed = True + + torch.cuda.set_device(args.gpu) + args.dist_backend = 'nccl' + print('| distributed init (rank {}): {}'.format( + args.rank, args.dist_url), flush=True) + torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url, + world_size=args.world_size, rank=args.rank) + torch.distributed.barrier() + setup_for_distributed(args.rank == 0) + + +@torch.no_grad() +def accuracy(output, target, topk=(1,)): + """Computes the precision@k for the specified values of k""" + if target.numel() == 0: + return [torch.zeros([], device=output.device)] + maxk = max(topk) + batch_size = target.size(0) + + _, pred = output.topk(maxk, 1, True, True) + pred = pred.t() + correct = pred.eq(target.view(1, -1).expand_as(pred)) + + res = [] + for k in topk: + correct_k = correct[:k].view(-1).float().sum(0) + res.append(correct_k.mul_(100.0 / batch_size)) + return res + + +def interpolate(input, size=None, scale_factor=None, mode="nearest", align_corners=None): + # type: (Tensor, Optional[List[int]], Optional[float], str, Optional[bool]) -> Tensor + """ + Equivalent to nn.functional.interpolate, but with support for empty batch sizes. + This will eventually be supported natively by PyTorch, and this + class can go away. + """ + if float(torchvision.__version__[:3]) < 0.7: + if input.numel() > 0: + return torch.nn.functional.interpolate( + input, size, scale_factor, mode, align_corners + ) + + output_shape = _output_size(2, input, size, scale_factor) + output_shape = list(input.shape[:-2]) + list(output_shape) + return _new_empty_tensor(input, output_shape) + else: + return torchvision.ops.misc.interpolate(input, size, scale_factor, mode, align_corners) diff --git a/transformer_models/util/plot_utils.py b/transformer_models/util/plot_utils.py new file mode 100644 index 00000000..4a03f43f --- /dev/null +++ b/transformer_models/util/plot_utils.py @@ -0,0 +1,65 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +""" +Plotting utilities to visualize training logs. +""" +import torch +import pandas as pd +from pathlib import Path +import seaborn as sns +import matplotlib.pyplot as plt + + +def plot_logs(logs, fields=('class_error', 'loss_bbox_unscaled', 'mAP'), ewm_col=0): + dfs = [pd.read_json(Path(p) / 'log.txt', lines=True) for p in logs] + + fig, axs = plt.subplots(ncols=len(fields), figsize=(16, 5)) + + for df, color in zip(dfs, sns.color_palette(n_colors=len(logs))): + for j, field in enumerate(fields): + if field == 'mAP': + coco_eval = pd.DataFrame(pd.np.stack(df.test_coco_eval.dropna().values)[:, 1]).ewm(com=ewm_col).mean() + axs[j].plot(coco_eval, c=color) + else: + df.interpolate().ewm(com=ewm_col).mean().plot( + y=[f'train_{field}', f'test_{field}'], + ax=axs[j], + color=[color] * 2, + style=['-', '--'] + ) + for ax, field in zip(axs, fields): + ax.legend([Path(p).name for p in logs]) + ax.set_title(field) + + +def plot_precision_recall(files, naming_scheme='iter'): + if naming_scheme == 'exp_id': + # name becomes exp_id + names = [f.parts[-3] for f in files] + elif naming_scheme == 'iter': + names = [f.stem for f in files] + else: + raise ValueError(f'not supported {naming_scheme}') + fig, axs = plt.subplots(ncols=2, figsize=(16, 5)) + for f, color, name in zip(files, sns.color_palette("Blues", n_colors=len(files)), names): + data = torch.load(f) + # precision is n_iou, n_points, n_cat, n_area, max_det + precision = data['precision'] + recall = data['params'].recThrs + scores = data['scores'] + # take precision for all classes, all areas and 100 detections + precision = precision[0, :, :, 0, -1].mean(1) + scores = scores[0, :, :, 0, -1].mean(1) + prec = precision.mean() + rec = data['recall'][0, :, 0, -1].mean() + print(f'{naming_scheme} {name}: mAP@50={prec * 100: 05.1f}, ' + + f'score={scores.mean():0.3f}, ' + + f'f1={2 * prec * rec / (prec + rec + 1e-8):0.3f}' + ) + axs[0].plot(recall, precision, c=color) + axs[1].plot(recall, scores, c=color) + + axs[0].set_title('Precision / Recall') + axs[0].legend(names) + axs[1].set_title('Scores / Recall') + axs[1].legend(names) + return fig, axs