From 991e3d9a14d6c40c72f2bf2039c1512e020f16fc Mon Sep 17 00:00:00 2001 From: Shencong-Ni Date: Fri, 20 Oct 2023 12:06:05 +0800 Subject: [PATCH] update --- analysis/gain_fluctuation.py | 497 ------------------------- analysis/sigCoherence_fft.py | 65 ---- analysis/spk_cohe.py | 488 ------------------------ analysis/spk_continuousSig_coupling.py | 153 -------- analysis/spk_phase_sync.py | 132 ------- 5 files changed, 1335 deletions(-) delete mode 100644 analysis/gain_fluctuation.py delete mode 100644 analysis/sigCoherence_fft.py delete mode 100644 analysis/spk_cohe.py delete mode 100644 analysis/spk_continuousSig_coupling.py delete mode 100644 analysis/spk_phase_sync.py diff --git a/analysis/gain_fluctuation.py b/analysis/gain_fluctuation.py deleted file mode 100644 index 34d8777..0000000 --- a/analysis/gain_fluctuation.py +++ /dev/null @@ -1,497 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Thu May 13 15:03:10 2021 - -@author: shni2598 -""" - - -import mydata -import numpy as np -from scipy import optimize - - -#%% - -find_fluc_addi = lambda fluc, ac, train_activity: np.sum((train_activity - fluc*ac)**2) - -find_fluc_addi_exact = lambda ac, train_activity: (ac*train_activity).sum()/(ac*ac).sum() - -#%% -# cp = np.array([2,1,3,4,5]) -# train = np.array([1,2,5,6,4]) -# fluc_addi_test = optimize.fmin(find_fluc_addi, 0, (cp, train),disp=False) - -# find_fluc_addi_exact(cp, train) - -#%% -find_fluc_multip = lambda fluc,mc,train_activity: np.sum((train_activity - (fluc*mc))**2) # fluc[0]=m fluc[1]=a - -find_fluc_multip_exact = lambda mc, train_activity: (mc*train_activity).sum()/(mc*mc).sum() - -#%% -solve_ambiguity = lambda k, mean_actvity, multip_coup, addi_coup: np.sum((multip_coup - k*addi_coup - mean_actvity)**2) - -solve_ambiguity_exact = lambda mean_actvity, mc, ac: (ac*(mc-mean_actvity)).sum()/(ac*ac).sum() -#%% -# mc = np.array([2,3,2,4,5]) -# ac = np.array([2,1,3,4,5]) -# train = np.array([1,2,5,6,4])*2 - -# k = optimize.fmin(solve_ambiguity, 0, (train, mc, ac),disp=False) -# solve_ambiguity_exact(train, mc, ac) -#%% -find_fluc_affine = lambda fluc,mc,ac,train_activity: np.sum((train_activity - (fluc[0]*mc+fluc[1]*ac))**2) # fluc[0]=m fluc[1]=a - -def find_fluc_affine_exact(mc, ac, train_activity): - coup = np.hstack((mc.reshape(-1,1),ac.reshape(-1,1))) - return np.dot(np.dot(np.linalg.inv(np.dot(coup.T,coup)), coup.T), train_activity.reshape(-1,1)).reshape(-1) -#%% -# mc = np.array([2,3,2,4,5]) -# ac = np.array([2,1,3,4,5]) -# train = np.array([1,2,5,6,4])*2 - -# (fluc_m, fluc_a) = find_fluc_affine_exact(mc, ac, train) -# #%% -# (fluc_m, fluc_a) = optimize.fmin(find_fluc_affine, [1,0], (mc, ac, train),disp=False) - -#%% -def quality_index(MUA_multi_predict_test, MUA_multi_test, MUA_multi_predict_indep, MUA_multi_indep): - e_test = np.sum(MUA_multi_predict_test + (MUA_multi_predict_test - MUA_multi_test)**2, 1) - - e_indep = np.sum(MUA_multi_predict_indep + (MUA_multi_predict_indep - MUA_multi_indep)**2, 1) - - return 1 - e_test/e_indep - -#%% -def _multiplicative_fluc(spk_data, stim_indptr): - - approx_spk_data = np.zeros(spk_data.shape) - - for st in range(len(stim_indptr)-1): - #spk_data[:,st] = spk_data[:, stim_indptr[st]:stim_indptr[st+1]].mean(1) - - u, s, vh = np.linalg.svd(spk_data[:, stim_indptr[st]:stim_indptr[st+1]], full_matrices=False) - - approx_spk_data[:, stim_indptr[st]:stim_indptr[st+1]] = s[0]*np.dot(u[:,0].reshape(-1,1), vh[0,:].reshape(1,-1)) - - return approx_spk_data - - -def _additive_fluc(residual_data): - - u, s, vh = np.linalg.svd(residual_data, full_matrices=False) - - return s[0]*np.dot(u[:,0].reshape(-1,1), vh[0,:].reshape(1,-1)) - - -def multiMUA_crosstrial(spk_mat, dura, record_neugroup, dt=0.1, returnHz=True): - ''' - dura: - analysis duration; ms - returnHz : - get the firing rate instead of spike counts. The default is True. - ''' - dt_ = int(round(1/dt)) - spk_mat = spk_mat.tocsr() - - MUA = np.zeros([len(record_neugroup), len(dura)]) - for neu_i, neu in enumerate(record_neugroup): - for stim_i, stim_dura in enumerate(dura): - if returnHz: - MUA[neu_i, stim_i] = spk_mat[neu, stim_dura[0]*dt_:stim_dura[1]*dt_].sum()/((stim_dura[1]-stim_dura[0])/1000)/(neu.shape[0]) - else: - MUA[neu_i, stim_i] = spk_mat[neu, stim_dura[0]*dt_:stim_dura[1]*dt_].sum() - return MUA - -#%% -def get_addititive_fluc(spk_mat, dura, record_neugroup, stim_indptr): - - MUA_multi = multiMUA_crosstrial(spk_mat, dura, record_neugroup) - mean_MUA = np.zeros([MUA_multi.shape[0], stim_indptr.shape[0]-1]) - for st in range(len(stim_indptr)-1): - mean_MUA[:,st] = MUA_multi[:, stim_indptr[st]:stim_indptr[st+1]].mean(1) - - mean_MUA_mat = np.zeros(MUA_multi.shape) - for st in range(len(stim_indptr)-1): - mean_MUA_mat[:, stim_indptr[st]:stim_indptr[st+1]] = np.tile(mean_MUA[:,st].reshape(-1,1), stim_indptr[st+1]-stim_indptr[st]) - - u, s, vh = np.linalg.svd((MUA_multi - mean_MUA_mat), full_matrices=False) - approx = s[0]*np.dot(u[:,0].reshape(-1,1), vh[0,:].reshape(1,-1)) - - #fluc_addi = approx[0,:]/approx[0,:].mean() - fluc_addi = approx[0,:]/approx[0,:].std() - #coup_addi = approx.mean(1) - coup_addi = approx.std(1) - - R = mydata.mydata() - R.fluc_addi = fluc_addi - R.coup_addi = coup_addi - R.mean_MUA = mean_MUA - R.mean_MUA_mat = mean_MUA_mat - R.MUA_multi = MUA_multi - R.stim_indptr = stim_indptr - #R.u = u - #R.s = s - #R.vh = vh - - return R -#%% -def get_multiplicative_fluc(spk_mat, dura, record_neugroup, stim_indptr): - - MUA_multi = multiMUA_crosstrial(spk_mat, dura, record_neugroup) - - approx_MUA_multip = _multiplicative_fluc(MUA_multi, stim_indptr) - - fluc_multip = np.zeros(approx_MUA_multip.shape[1]) - coup_multip = np.zeros([approx_MUA_multip.shape[0], len(stim_indptr)-1]) - for st in range(len(stim_indptr)-1): - fluc_multip[stim_indptr[st]:stim_indptr[st+1]] = approx_MUA_multip[0,stim_indptr[st]:stim_indptr[st+1]]/approx_MUA_multip[0,stim_indptr[st]:stim_indptr[st+1]].mean() - coup_multip[:,st] = approx_MUA_multip[:,stim_indptr[st]:stim_indptr[st+1]].mean(1) - - R = mydata.mydata() - R.fluc_multip = fluc_multip - R.coup_multip = coup_multip - R.approx_MUA_multip = approx_MUA_multip - R.MUA_multi = MUA_multi - R.stim_indptr = stim_indptr - - return R - -#%% -def get_affine_fluc(spk_mat, dura, record_neugroup, stim_indptr, trialforSolveAmbiguity=0): - - MUA_multi = multiMUA_crosstrial(spk_mat, dura, record_neugroup) - - - approx_MUA_multip = _multiplicative_fluc(MUA_multi, stim_indptr) - approx_MUA_addi = _additive_fluc(MUA_multi - approx_MUA_multip) - approx_MUA_multip = _multiplicative_fluc(MUA_multi - approx_MUA_addi, stim_indptr) - - error_pre = ((MUA_multi - (approx_MUA_multip + approx_MUA_addi))**2).sum()/(MUA_multi.shape[0]*MUA_multi.shape[1]) - do_iteration = True - #iter_n = 0 - while do_iteration: - #print('iter_n:',iter_n); iter_n += 1 - approx_MUA_addi = _additive_fluc(MUA_multi - approx_MUA_multip) - approx_MUA_multip = _multiplicative_fluc(MUA_multi - approx_MUA_addi, stim_indptr) - error = ((MUA_multi - (approx_MUA_multip + approx_MUA_addi))**2).sum()/(MUA_multi.shape[0]*MUA_multi.shape[1]) - #print(error_pre - error) - do_iteration = error_pre - error >= 1e-10 - error_pre = error - - #addi_coup_train = approx_MUA_addi.mean(1) - coup_addi = approx_MUA_addi.std(1) - fluc_addi = approx_MUA_addi[0,:]/coup_addi[0] - #print((approx_MUA_addi[0,:]/coup_addi[0])[0:2], (approx_MUA_addi[1,:]/coup_addi[1])[0:2]) - - - fluc_multip = np.zeros(approx_MUA_multip.shape[1]) - coup_multip = np.zeros([approx_MUA_multip.shape[0], len(stim_indptr)-1]) - for st in range(len(stim_indptr)-1): - fluc_multip[stim_indptr[st]:stim_indptr[st+1]] = approx_MUA_multip[0,stim_indptr[st]:stim_indptr[st+1]]/approx_MUA_multip[0,stim_indptr[st]:stim_indptr[st+1]].mean() - coup_multip[:,st] = approx_MUA_multip[:,stim_indptr[st]:stim_indptr[st+1]].mean(1) - - - - multi_coup_1 = coup_multip[:, trialforSolveAmbiguity] # approx_MUA_multip[:,stim_indptr[0]:stim_indptr[1]].mean(1) - MUA_mean_1 = MUA_multi[:,stim_indptr[trialforSolveAmbiguity]:stim_indptr[trialforSolveAmbiguity+1]].mean(1) - - #k = optimize.fmin(solve_ambiguity, 0, (MUA_mean_1, multi_coup_1, coup_addi),disp=False) - k = solve_ambiguity_exact(MUA_mean_1, multi_coup_1, coup_addi) - print('ambiguity:',k) - - #coup_multip -= k[0]*coup_addi.reshape(-1,1) - #fluc_addi += k[0]*fluc_multip - coup_multip -= k*coup_addi.reshape(-1,1) - fluc_addi += k*fluc_multip - - - # multi_coup_train = approx_MUA_multip[:,stim_indptr[stim_i]:stim_indptr[stim_i+1]].mean(1) - - # multi_coup_train = multi_coup_train - k[0]*addi_coup_train - R = mydata.mydata() - R.fluc_multip = fluc_multip - R.coup_multip = coup_multip - R.fluc_addi = fluc_addi - R.coup_addi = coup_addi - - R.approx_MUA_multip = approx_MUA_multip - R.approx_MUA_addi = approx_MUA_addi - R.MUA_multi = MUA_multi - R.stim_indptr = stim_indptr - - return R - -#%% - -def xvalid_indep(spk_mat, dura, record_neugroup, stim_indptr): - - MUA_multi = multiMUA_crosstrial(spk_mat, dura, record_neugroup) - - #select_trial = np.ones(MUA_multi.shape[1],dtype=bool) - - MUA_multi_train = MUA_multi[:,1:].copy() - - MUA_multi_predict = np.zeros(MUA_multi.shape) - - tst_t_sum = 0 - for tst_t in range(MUA_multi.shape[1]): - #print('test_trial: ',tst_t) - if tst_t > 0: - MUA_multi_train[:,tst_t-1] = MUA_multi[:,tst_t-1] - #if tst_t < 10: - #print(MUA_multi_train[:,:10]) - # if tst_t == 0: - # select_trial[tst_t] = False - # else: - # select_trial[tst_t] = False - # select_trial[tst_t-1] = True - - for ii in range(1,len(stim_indptr)): - if tst_t < stim_indptr[ii]: - stim_indptr_train = stim_indptr.copy() - stim_indptr_train[ii:] -= 1 - - stim_i = ii - 1 - tst_t_sum += 1 - #print('stim_i:',stim_i, 'tst_t_sum:',tst_t_sum) - #print(stim_indptr_train) - if tst_t_sum >= 50: - tst_t_sum -= 50 #print() - - break - - - MUA_multi_predict[:,tst_t] = MUA_multi_train[:, stim_indptr_train[stim_i]:stim_indptr_train[stim_i+1]].mean(1) - - - return MUA_multi, MUA_multi_predict - - - -#%% - -def xvalid_addititive_fluc(spk_mat, dura, record_neugroup, stim_indptr): - - MUA_multi = multiMUA_crosstrial(spk_mat, dura, record_neugroup) - - #select_trial = np.ones(MUA_multi.shape[1],dtype=bool) - - MUA_multi_train = MUA_multi[:,1:].copy() - - MUA_multi_predict = np.zeros(MUA_multi.shape) - - tst_t_sum = 0 - for tst_t in range(MUA_multi.shape[1]): - #print('test_trial: ',tst_t) - if tst_t > 0: - MUA_multi_train[:,tst_t-1] = MUA_multi[:,tst_t-1] - #if tst_t < 10: - #print(MUA_multi_train[:,:10]) - # if tst_t == 0: - # select_trial[tst_t] = False - # else: - # select_trial[tst_t] = False - # select_trial[tst_t-1] = True - - for ii in range(1,len(stim_indptr)): - if tst_t < stim_indptr[ii]: - stim_indptr_train = stim_indptr.copy() - stim_indptr_train[ii:] -= 1 - - stim_i = ii - 1 - tst_t_sum += 1 - #print('stim_i:',stim_i, 'tst_t_sum:',tst_t_sum) - #print(stim_indptr_train) - if tst_t_sum >= 50: - tst_t_sum -= 50 #print() - - break - - - #MUA_multi_train = MUA_multi[:, select_trial] - - mean_MUA_train = np.zeros([MUA_multi_train.shape[0], stim_indptr_train.shape[0]-1]) - for st in range(len(stim_indptr_train)-1): - mean_MUA_train[:,st] = MUA_multi_train[:, stim_indptr_train[st]:stim_indptr_train[st+1]].mean(1) - - mean_MUA_mat_train = np.zeros(MUA_multi_train.shape) - for st in range(len(stim_indptr_train)-1): - mean_MUA_mat_train[:, stim_indptr_train[st]:stim_indptr_train[st+1]] = np.tile(mean_MUA_train[:,st].reshape(-1,1), stim_indptr_train[st+1]-stim_indptr_train[st]) - - u, s, vh = np.linalg.svd((MUA_multi_train - mean_MUA_mat_train), full_matrices=False) - approx = s[0]*np.dot(u[:,0].reshape(-1,1), vh[0,:].reshape(1,-1)) - - #fluc_addi = approx[0,:]/approx[0,:].mean() - #fluc_addi = approx[0,:]/approx[0,:].std() - #print(fluc_addi) - #print(approx[0,:]) - #print(approx[0,:].mean()) - #coup_addi = approx.mean(1) - coup_addi = approx.std(1) - - for unit in range(len(record_neugroup)): - #print('test_unit: ',unit) - coup_addi_train = np.delete(coup_addi, unit) - resid_MUA_unit_train = np.delete(MUA_multi[:,tst_t] - mean_MUA_train[:,stim_i], unit) - - #fluc_addi_test = optimize.fmin(find_fluc_addi, 0, (coup_addi_train, resid_MUA_unit_train),disp=False) - fluc_addi_test = find_fluc_addi_exact(coup_addi_train, resid_MUA_unit_train) - - #print(resid_MUA_unit_train, coup_addi_train) - #print(fluc_addi_test) - MUA_multi_predict[unit, tst_t] = mean_MUA_train[:,stim_i][unit] + fluc_addi_test*coup_addi[unit] - - - return MUA_multi, MUA_multi_predict - - - -#%% -def xvalid_multiplicative_fluc(spk_mat, dura, record_neugroup, stim_indptr): - - MUA_multi = multiMUA_crosstrial(spk_mat, dura, record_neugroup) - - MUA_multi_train = MUA_multi[:,1:].copy() - - MUA_multi_predict = np.zeros(MUA_multi.shape) - - tst_t_sum = 0 - for tst_t in range(MUA_multi.shape[1]): - #print('test_trial: ',tst_t) - if tst_t > 0: - MUA_multi_train[:,tst_t-1] = MUA_multi[:,tst_t-1] - #if tst_t < 10: - #print(MUA_multi_train[:,:10]) - # if tst_t == 0: - # select_trial[tst_t] = False - # else: - # select_trial[tst_t] = False - # select_trial[tst_t-1] = True - - for ii in range(1,len(stim_indptr)): - if tst_t < stim_indptr[ii]: - stim_indptr_train = stim_indptr.copy() - stim_indptr_train[ii:] -= 1 - - stim_i = ii - 1 - tst_t_sum += 1 - #print('stim_i:',stim_i, 'tst_t_sum:',tst_t_sum) - #print(stim_indptr_train) - if tst_t_sum >= 50: - tst_t_sum -= 50 #print() - - break - - approx_MUA_multip = _multiplicative_fluc(MUA_multi_train, stim_indptr_train) - - - multi_coup_train = approx_MUA_multip[:,stim_indptr_train[stim_i]:stim_indptr_train[stim_i+1]].mean(1) - - for unit in range(len(record_neugroup)): - multi_coup_train_unit = np.delete(multi_coup_train, unit) - MUA_unit_train = np.delete(MUA_multi[:,tst_t], unit) - - #fluc_m = optimize.fmin(find_fluc_multip, 1, (multi_coup_train_unit, MUA_unit_train),disp=False) - fluc_m = find_fluc_multip_exact(multi_coup_train_unit, MUA_unit_train) - #print(fluc_m.shape) - #MUA_multi_predict[unit, tst_t] = fluc_m[0]*multi_coup_train[unit] #+ fluc_a*addi_coup_train[unit] - MUA_multi_predict[unit, tst_t] = fluc_m*multi_coup_train[unit] #+ fluc_a*addi_coup_train[unit] - - return MUA_multi, MUA_multi_predict - - - -#%% -def xvalid_affine_fluc(spk_mat, dura, record_neugroup, stim_indptr, trialforSolveAmbiguity=0): - - MUA_multi = multiMUA_crosstrial(spk_mat, dura, record_neugroup) - - MUA_multi_train = MUA_multi[:,1:].copy() - - MUA_multi_predict = np.zeros(MUA_multi.shape) - - tst_t_sum = 0 - for tst_t in range(MUA_multi.shape[1]): - #print('test_trial: ',tst_t) - if tst_t > 0: - MUA_multi_train[:,tst_t-1] = MUA_multi[:,tst_t-1] - #if tst_t < 10: - #print(MUA_multi_train[:,:10]) - # if tst_t == 0: - # select_trial[tst_t] = False - # else: - # select_trial[tst_t] = False - # select_trial[tst_t-1] = True - - for ii in range(1,len(stim_indptr)): - if tst_t < stim_indptr[ii]: - stim_indptr_train = stim_indptr.copy() - stim_indptr_train[ii:] -= 1 - - stim_i = ii - 1 - tst_t_sum += 1 - #print('stim_i:',stim_i, 'tst_t_sum:',tst_t_sum) - #print(stim_indptr_train) - if tst_t_sum >= 50: - tst_t_sum -= 50 #print() - - break - - approx_MUA_multip = _multiplicative_fluc(MUA_multi_train, stim_indptr_train) - approx_MUA_addi = _additive_fluc(MUA_multi_train - approx_MUA_multip) - approx_MUA_multip = _multiplicative_fluc(MUA_multi_train - approx_MUA_addi, stim_indptr_train) - - error_pre = ((MUA_multi_train - (approx_MUA_multip + approx_MUA_addi))**2).sum()/(MUA_multi_train.shape[0]*MUA_multi_train.shape[1]) - do_iteration = True - #iter_n = 0 - while do_iteration: - #print('iter_n:',iter_n); iter_n += 1 - approx_MUA_addi = _additive_fluc(MUA_multi_train - approx_MUA_multip) - approx_MUA_multip = _multiplicative_fluc(MUA_multi_train - approx_MUA_addi, stim_indptr_train) - error = ((MUA_multi_train - (approx_MUA_multip + approx_MUA_addi))**2).sum()/(MUA_multi_train.shape[0]*MUA_multi_train.shape[1]) - #print(error_pre - error) - do_iteration = error_pre - error >= 1e-10 - error_pre = error - - #addi_coup_train = approx_MUA_addi.mean(1) - addi_coup_train = approx_MUA_addi.std(1) - - tsa = trialforSolveAmbiguity - multi_coup_1 = approx_MUA_multip[:,stim_indptr_train[tsa]:stim_indptr_train[tsa+1]].mean(1) - MUA_mean_1 = MUA_multi_train[:,stim_indptr_train[tsa]:stim_indptr_train[tsa+1]].mean(1) - - #k = optimize.fmin(solve_ambiguity, 0, (MUA_mean_1, multi_coup_1, addi_coup_train),disp=False) - k = solve_ambiguity_exact(MUA_mean_1, multi_coup_1, addi_coup_train) - - print('ambiguity:',k) - - multi_coup_train = approx_MUA_multip[:,stim_indptr_train[stim_i]:stim_indptr_train[stim_i+1]].mean(1) - - #multi_coup_train = multi_coup_train - k[0]*addi_coup_train - multi_coup_train = multi_coup_train - k*addi_coup_train - - - for unit in range(len(record_neugroup)): - #print('test_unit: ',unit) - addi_coup_train_unit = np.delete(addi_coup_train, unit) - multi_coup_train_unit = np.delete(multi_coup_train, unit) - MUA_unit_train = np.delete(MUA_multi[:,tst_t], unit) - - #find_fluc_affine = lambda m,a,mc,ac,mean_activity: np.sum((mean_activity - (m*mc+a*ac))**2) - if np.all(multi_coup_train_unit/addi_coup_train_unit == (multi_coup_train_unit/addi_coup_train_unit)[0]): - (fluc_m, fluc_a) = optimize.fmin(find_fluc_affine, [1,0], (multi_coup_train_unit, addi_coup_train_unit, MUA_unit_train),disp=False) - else: - (fluc_m, fluc_a) = find_fluc_affine_exact(multi_coup_train_unit, addi_coup_train_unit, MUA_unit_train) - - #print(fluc_m, fluc_a) - MUA_multi_predict[unit, tst_t] = fluc_m*multi_coup_train[unit] + fluc_a*addi_coup_train[unit] - #resid_MUA_unit_train = np.delete(MUA_multi[:,tst_t] - mean_MUA_train[:,stim_i], unit) - #fluc_addi_test = optimize.fmin(find_fluc, 0, (resid_MUA_unit_train, coup_addi_train),disp=True) - #print(resid_MUA_unit_train, coup_addi_train) - #print(fluc_addi_test) - #MUA_multi_predict[unit, tst_t] = mean_MUA_train[:,stim_i][unit] + fluc_addi_test*addi_coup_train[unit] - - return MUA_multi, MUA_multi_predict diff --git a/analysis/sigCoherence_fft.py b/analysis/sigCoherence_fft.py deleted file mode 100644 index 0a1031c..0000000 --- a/analysis/sigCoherence_fft.py +++ /dev/null @@ -1,65 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Fri Dec 10 21:48:48 2021 - -@author: shni2598 -""" - -import numpy as np -#from scipy.signal.windows import dpss -#%% - -def get_sigCoherence_fft(sig_1, sig_2, analy_dura, tap_win, disgard_t=0, win=200): - - # used multitaper methods to achieve optimal spectral concentration; - - - sig_len = analy_dura[0,1] - analy_dura[0,0] # ms - sample_t = np.arange(disgard_t, sig_len+1, win) - - # nw = 2.5 # time half bandwidth: nw/sig_len(in second); - # Kmax = round(2*nw - 1) - # dpss_w = dpss(win, NW=nw, Kmax=Kmax, sym=False, norm=None, return_ratios=False) # use Slepian functions - - if len(tap_win.shape) == 1: - tap_win = tap_win.reshape(1,-1) - - xspect = [] - spect1 = [] - spect2 = [] - for dura in analy_dura: - - for t in sample_t[:-1]: - - xspect_, spect1_, spect2_ = get_spect_xspect(sig_1[dura[0]+t:dura[0]+t+win], sig_2[dura[0]+t:dura[0]+t+win], tap_win) - xspect.append(xspect_) - spect1.append(spect1_) - spect2.append(spect2_) - - xspect_m = np.array(xspect).mean(0) - spect1_m = np.array(spect1).mean(0) - spect2_m = np.array(spect2).mean(0) - - - cohe = np.abs(xspect_m/np.sqrt(spect1_m*spect2_m)) - - return cohe - - -#%% - -def get_spect_xspect(sig1, sig2, tap_win): - - tap_sig1 = tap_win*sig1 - sig1_fft = np.fft.rfft(tap_sig1) - - - tap_sig2 = tap_win*sig2 - sig2_fft = np.fft.rfft(tap_sig2) - - xspect = (sig1_fft*np.conj(sig2_fft)).mean(0) - spect1 = (np.abs(sig1_fft)**2).mean(0) - spect2 = (np.abs(sig2_fft)**2).mean(0) - - return xspect, spect1, spect2 \ No newline at end of file diff --git a/analysis/spk_cohe.py b/analysis/spk_cohe.py deleted file mode 100644 index 3e9fbc9..0000000 --- a/analysis/spk_cohe.py +++ /dev/null @@ -1,488 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Mon Jun 28 15:55:34 2021 - -@author: shni2598 -""" - -#%% -import firing_rate_analysis as fra -import frequency_analysis as fqa -import mydata -import numpy as np - -#%% -def spk_mua_coherence(spk_mat, mua_mat, dura, discard_init = 200, hfwin_mua_seg=150, dt = 0.1): - """ - Methods from "Modulation of Oscillatory Neuronal Synchronization by Selective Visual Attention" - Use MUA instead of LFP - Parameters - ---------- - spk_mat : TYPE - spike sparse matrix. - mua_mat : TYPE - mua sparse matrix. - dura : TYPE - 2D array; start and end of analysis period - discard_init : TYPE, optional - ms; length of initial period of MUA to be discraded ; The default is 200 ms. - hfwin_mua_seg : TYPE, optional - ms; length of half mua segment The default is 150 ms. - dt : TYPE, optional - ms; sample interval. The default is 0.1 ms. - - Returns - ------- - None. - - """ - #discard_init = 200 - #hfwin_mua_seg = 150 - #dt = 0.1 - dt_1 = int(round(1/dt)) - - - - discard_init = int(round(discard_init/dt)) - hfwin_mua_seg = int(round(hfwin_mua_seg/dt)) - - total_spk = 0 - #i = 0 - - for dura_t in dura: - #print(total_spk) - #print(i) - #i+=1 - mua = fra.get_spkcount_sum_sparmat(mua_mat, start_time=dura_t[0], end_time=dura_t[1],\ - sample_interval = dt, window = 3, dt = dt) - - spk_t = spk_mat[:, dura_t[0]*dt_1:dura_t[1]*dt_1].nonzero()[1] - - spk_t = spk_t[(spk_t > (discard_init + hfwin_mua_seg)) & (spk_t < (mua.shape[0] - hfwin_mua_seg))] - #print(spk_t.shape) - for spk in spk_t: - total_spk += 1 - mua_i = mua[spk-hfwin_mua_seg:spk+hfwin_mua_seg] - coef = np.fft.rfft(mua_i - mua_i.mean())/(2*hfwin_mua_seg) - if total_spk == 1: - - stMua_pw = np.zeros(coef.shape) - staMua_coef = np.zeros(coef.shape, dtype=complex) - staMua = np.zeros(mua_i.shape) - #print(coef.shape) - stMua_pw += np.abs(coef)**2 - staMua_coef += coef - staMua += mua_i - - stMua_pw /= total_spk - staMua_coef /= total_spk - staMua /= total_spk - - cohe = np.abs(staMua_coef)**2/stMua_pw - freq = np.fft.rfftfreq(hfwin_mua_seg*2, d=dt/1000) - power, _ = fqa.myfft(staMua-staMua.mean(), Fs=dt_1*1000, power=True) - - R = mydata.mydata() - R.cohe = cohe - R.freq = freq - R.staRef_pw = power # Ref: MUA - R.staRef = staMua - - return R #cohe, power, total_mua_seg, freq - -#%% -# mua_loca = [0, 0] -# mua_range = 5 -# mua_neuron = cn.findnearbyneuron.findnearbyneuron(data.a1.param.e_lattice, mua_loca, mua_range, data.a1.param.width) - -# spk_loca = [0, 0] -# spk_range = 2 -# spk_neuron = cn.findnearbyneuron.findnearbyneuron(data.a1.param.e_lattice, spk_loca, spk_range, data.a1.param.width) -# #%% -# simu_time_tot = data.param.simutime -# data.a1.ge.get_sparse_spk_matrix([data.a1.param.Ne, simu_time_tot*10], 'csc') - -# mua_mat = data.a1.ge.spk_matrix[mua_neuron] -# spk_mat = data.a1.ge.spk_matrix[spk_neuron] - -# #%% -# for st in range(n_StimAmp): - -# dura_noatt = data.a1.param.stim1.stim_on[st*n_perStimAmp:(st+1)*n_perStimAmp].copy() -# dura_att = data.a1.param.stim1.stim_on[(st+n_StimAmp)*n_perStimAmp:(st+n_StimAmp+1)*n_perStimAmp].copy() - -# # #%% -# # cohe_noatt, mua_pow_noatt, mua_sta_noatt, freq = spk_mua_coherence(spk_mat, mua_mat, dura_noatt, discard_init = 200, hfwin_mua_seg=150, dt = 0.1) -# # cohe_att, mua_pow_att, mua_sta_att, freq = spk_mua_coherence(spk_mat, mua_mat, dura_att, discard_init = 200, hfwin_mua_seg=150, dt = 0.1) -# #%% -# R_noatt = spk_mua_coherence(spk_mat, mua_mat, dura_noatt, discard_init = 200, hfwin_mua_seg=150, dt = 0.1) -# R_att = spk_mua_coherence(spk_mat, mua_mat, dura_att, discard_init = 200, hfwin_mua_seg=150, dt = 0.1) -#%% -def spk_lfp_coherence(spk_mat, lfp, dura, discard_init = 200, hfwin_lfp_seg=150, dt = 0.1): - """ - Methods from "Modulation of Oscillatory Neuronal Synchronization by Selective Visual Attention" - Parameters - ---------- - spk_mat : TYPE - spike sparse matrix. - lfp : TYPE - LFP signal, 1D array. - dura : TYPE - 2D array; start and end of analysis period - discard_init : TYPE, optional - ms; length of initial period of LFP to be discraded ; The default is 200 ms. - hfwin_lfp_seg : TYPE, optional - ms; length of half LFP segment The default is 150 ms. - dt : TYPE, optional - ms; sample interval. The default is 0.1 ms. - - Returns - ------- - None. - - """ - #discard_init = 200 - #hfwin_lfp_seg = 150 - #dt = 0.1 - dt_1 = int(round(1/dt)) - - - - discard_init = int(round(discard_init/dt)) - hfwin_lfp_seg = int(round(hfwin_lfp_seg/dt)) - - total_spk = 0 - #i = 0 - - for dura_t in dura: - #print(total_spk) - #print(i) - #i+=1 - # mua = fra.get_spkcount_sum_sparmat(mua_mat, start_time=dura_t[0], end_time=dura_t[1],\ - # sample_interval = dt, window = 3, dt = dt) - lfp_tri = lfp[dura_t[0]*dt_1:dura_t[1]*dt_1] - - spk_t = spk_mat[:, dura_t[0]*dt_1:dura_t[1]*dt_1].nonzero()[1] - - spk_t = spk_t[(spk_t > (discard_init + hfwin_lfp_seg)) & (spk_t < (lfp_tri.shape[0] - hfwin_lfp_seg))] - #print(spk_t.shape) - for spk in spk_t: - total_spk += 1 - lfp_i = lfp_tri[spk-hfwin_lfp_seg:spk+hfwin_lfp_seg] - coef = np.fft.rfft(lfp_i - lfp_i.mean())/(2*hfwin_lfp_seg) - if total_spk == 1: - - stLfp_pw = np.zeros(coef.shape) - staLfp_coef = np.zeros(coef.shape, dtype=complex) - staLfp = np.zeros(lfp_i.shape) - #print(coef.shape) - stLfp_pw += np.abs(coef)**2 - staLfp_coef += coef - staLfp += lfp_i - - stLfp_pw /= total_spk - staLfp_coef /= total_spk - staLfp /= total_spk - - cohe = np.abs(staLfp_coef)**2/stLfp_pw - freq = np.fft.rfftfreq(hfwin_lfp_seg*2, d=dt/1000) - power, _ = fqa.myfft(staLfp-staLfp.mean(), Fs=dt_1*1000, power=True) - - R = mydata.mydata() - R.cohe = cohe - R.freq = freq - R.staRef_pw = power # Ref: LFP - R.staRef = staLfp - - return R #cohe, power, total_mua_seg, freq - -#%% -def spk_lfp_coupling(spk_mat, sig, dura, discard_init_end = 200, dt = 0.1,usemua=True, get_ppc=False): - """ - Methods from "Gamma rhythm communication between entorhinal cortex and dentate gyrus neuronal assemblies" - Phase lock value, instead of PPC, is used, for saving computing time - Parameters - ---------- - spk_mat : TYPE - spike sparse matrix. - sig : TYPE - signal, 1D array. - dura : TYPE - 2D array; start and end of analysis period - discard_init_end : TYPE, optional - ms; length of initial and end period of sig to be discraded ; The default is 200 ms. - dt : TYPE, optional - ms; spikes sample interval. The default is 0.1 ms. - - Returns - ------- - None. - - """ - #discard_init = 200 - #hfwin_lfp_seg = 150 - #dt = 0.1 - dt_1 = int(round(1/dt)) - - - - discard_init_end = int(round(discard_init_end/dt)) - # hfwin_lfp_seg = int(round(hfwin_lfp_seg/dt)) - - # total_spk = 0 - - freq_range = [0.5,200] - sampling_period = 0.001 - maxscale = int(np.ceil(np.log2((1/sampling_period)/freq_range[0])*10)) - minscale = int(np.floor(np.log2((1/sampling_period)/freq_range[1])*10)) - scale = 2**(np.arange(minscale, maxscale + 1, 3)/10) - wavelet_name = 'cmor1.5-1' - #i = 0 - ang = [[] for _ in range(len(scale))] - for dura_t in dura: - #print(total_spk) - #print(i) - #i+=1 - # mua = fra.get_spkcount_sum_sparmat(mua_mat, start_time=dura_t[0], end_time=dura_t[1],\ - # sample_interval = dt, window = 3, dt = dt) - lfp_tri = sig[dura_t[0]:dura_t[1]] - coef, freq = fqa.mycwt(lfp_tri, wavelet_name, sampling_period, scale = scale, method = 'fft', L1_norm = True) - - spk_t = spk_mat[:, dura_t[0]*dt_1:dura_t[1]*dt_1].nonzero()[1] - - spk_t = spk_t[(spk_t > (discard_init_end)) & (spk_t < ((dura_t[1]-dura_t[0])*dt_1 - discard_init_end))] - - if usemua: - spk_t = np.round(spk_t * dt - 0.5).astype(int) - else: - spk_t = np.round(spk_t * dt).astype(int) - - for coef_i, coef_f in enumerate(coef): - ang[coef_i].append(np.angle(coef_f[spk_t])) - #% - if get_ppc: - ppc = np.zeros(len(ang)) - plv = np.zeros(len(ang)) - - for coef_i in range(len(coef)): - - ang[coef_i] = np.hstack(ang[coef_i]) - if get_ppc: - ppc[coef_i] = get_ppc(ang[coef_i]) - plv[coef_i] = get_plv(ang[coef_i]) - - if get_ppc: - return ppc, plv, freq - else: - return plv, freq - - -def spk_triggeredlfp(spk_mat, lfp, dura, discard_init_end = 101, hfwin_lfp_seg=100, dt = 0.1): - - #discard_init = 200 - #hfwin_lfp_seg = 150 - #dt = 0.1 - dt_1 = int(round(1/dt)) - - - - discard_init_end = int(round(discard_init_end/dt)) - # hfwin_lfp_seg = int(round(hfwin_lfp_seg/dt)) - - total_spk = 0 - - lfp_spktri = np.zeros(hfwin_lfp_seg*2+1) - for dura_t in dura: - #print(total_spk) - #print(i) - #i+=1 - # mua = fra.get_spkcount_sum_sparmat(mua_mat, start_time=dura_t[0], end_time=dura_t[1],\ - # sample_interval = dt, window = 3, dt = dt) - lfp_tri = lfp[dura_t[0]:dura_t[1]] - - spk_t = spk_mat[:, dura_t[0]*dt_1:dura_t[1]*dt_1].nonzero()[1] - - spk_t = spk_t[(spk_t > (discard_init_end)) & (spk_t < ((dura_t[1]-dura_t[0])*dt_1 - discard_init_end))] - - spk_t = np.round(spk_t * dt).astype(int) - - for spk in spk_t: - total_spk += 1 - lfp_spktri += lfp_tri[spk-hfwin_lfp_seg:spk+hfwin_lfp_seg+1] - - lfp_spktri /= total_spk - - return lfp_spktri - -#%% -def get_ppc(d): - - d_len = d.shape[0] - sum_cos = 0 - for ii in range(0, d_len-1): - sum_cos += np.cos(d[ii] - d[ii+1:]).sum() - - return sum_cos/((d_len-1)*d_len/2) - -def get_plv(d): - return np.abs((np.exp(d*1j)).mean()) - - - - -#%% -def spk_continousSig_coupling(spk_mat, sig, dura, discard_init_end = 200, dt = 0.1,usemua=True, return_ppc=False): - """ - Methods from "Gamma rhythm communication between entorhinal cortex and dentate gyrus neuronal assemblies" - Phase lock value, instead of PPC, is used, for saving computing time - Parameters - ---------- - spk_mat : TYPE - spike sparse matrix. - sig : TYPE - signal, 1D array. - dura : TYPE - 2D array; start and end of analysis period - discard_init_end : TYPE, optional - ms; length of initial and end period of sig to be discraded ; The default is 200 ms. - dt : TYPE, optional - ms; spikes sample interval. The default is 0.1 ms. - - Returns - ------- - None. - - """ - #discard_init = 200 - #hfwin_lfp_seg = 150 - #dt = 0.1 - dt_1 = int(round(1/dt)) - - - - discard_init_end = int(round(discard_init_end/dt)) - # hfwin_lfp_seg = int(round(hfwin_lfp_seg/dt)) - - # total_spk = 0 - - freq_range = [25,200] - sampling_period = 0.001 - maxscale = int(np.ceil(np.log2((1/sampling_period)/freq_range[0])*10)) - minscale = int(np.floor(np.log2((1/sampling_period)/freq_range[1])*10)) - # scale = 2**(np.arange(minscale, maxscale + 1, 3)/10) - scale = 2**(np.linspace(minscale, maxscale, 20).astype(int)/10) - wavelet_name = 'cmor15-1' - #i = 0 - ang = [[] for _ in range(len(scale))] - for dura_t in dura: - #print(total_spk) - #print(i) - #i+=1 - # mua = fra.get_spkcount_sum_sparmat(mua_mat, start_time=dura_t[0], end_time=dura_t[1],\ - # sample_interval = dt, window = 3, dt = dt) - lfp_tri = sig[dura_t[0]:dura_t[1]] - coef, freq = fqa.mycwt(lfp_tri, wavelet_name, sampling_period, scale = scale, method = 'fft', L1_norm = True) - - spk_t = spk_mat[:, dura_t[0]*dt_1:dura_t[1]*dt_1].nonzero()[1] - - spk_t = spk_t[(spk_t > (discard_init_end)) & (spk_t < ((dura_t[1]-dura_t[0])*dt_1 - discard_init_end))] - - if usemua: - spk_t = np.round(spk_t * dt - 0.5).astype(int) - else: - spk_t = np.round(spk_t * dt).astype(int) - - for coef_i, coef_f in enumerate(coef): - ang[coef_i].append(np.angle(coef_f[spk_t])) - #% - if return_ppc: - ppc = np.zeros(len(ang)) - plv = np.zeros(len(ang)) - - for coef_i in range(len(coef)): - - ang[coef_i] = np.hstack(ang[coef_i]) - if return_ppc: - ppc[coef_i] = get_ppc(ang[coef_i]) - plv[coef_i] = get_plv(ang[coef_i]) - - if return_ppc: - return ppc, plv, freq - else: - return plv, freq - - - - -#%% -""" -a = (1+1j ) * np.arange(5) - -np.abs(a.mean())/np.sqrt((a*np.conj(a)).mean()) - - - -#%% -def get_power(d): - - n = d.shape[0] - coef = np.fft.rfft(d) - power = 2*np.abs(coef)**2/(n**2) - power[0] /= 2 - - return power - -#%% - -fs = 1000 -T = 5 - -t = np.arange(fs*T)/fs - -#%% -f1 = 5 -f2 = 40 -s1 = np.sin(2*np.pi*f1*t) -s2 = np.sin(2*np.pi*f2*t) - -s = s1 + s2 -#%% -ts2 = fs/f2 -tspk = np.random.choice(np.arange(150,5000-150,ts2,dtype=int),20,replace=False) -tspk = np.sort(tspk) -#%% -lfp_sta = np.zeros(301) -lfp_st_pw = np.zeros(151) -for ts in tspk: - - lfp_st = s[ts-150:ts+150+1]*np.random.rand()*2 - lfp_st_pw += get_power(lfp_st) - lfp_sta += lfp_st - -lfp_st_pw /= tspk.shape[0] -lfp_sta /= tspk.shape[0] -#%% -lfp_sta_pw = get_power(lfp_sta) -cohe = lfp_sta_pw/lfp_st_pw -#%% -plt.figure() -plt.plot(np.fft.rfftfreq(301,0.001),cohe) - -#%% -a = np.arange(5) -b = np.arange(5) - -p1 = get_power(a) -p2 = get_power(b) -p12 = get_power((a+b)/2) - -p12/((p1 + p2)/2) - - -""" - - - - - diff --git a/analysis/spk_continuousSig_coupling.py b/analysis/spk_continuousSig_coupling.py deleted file mode 100644 index f470371..0000000 --- a/analysis/spk_continuousSig_coupling.py +++ /dev/null @@ -1,153 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Tue Jan 24 15:04:18 2023 - -@author: shni2598 -""" - - - -import frequency_analysis as fqa -import numpy as np -import mydata -#%% -def wvtphase(spk_mat, sig, analy_dura, onoff_stats_sens, onoff_stats_asso, return_ppc=False): - - dt_1 = 10 - dt = 0.1 - init_disgard = 200 - - n_mua_neuron = spk_mat.shape[0] - phase_f_on = [[] for ii in range(n_mua_neuron)] - phase_f_off = [[] for ii in range(n_mua_neuron)] - - freq_range = [15,200] - sampling_period = 0.001 - maxscale = int(np.ceil(np.log2((1/sampling_period)/freq_range[0])*10)) - minscale = int(np.floor(np.log2((1/sampling_period)/freq_range[1])*10)) - # scale = 2**(np.arange(minscale, maxscale + 1, 3)/10) - scale = 2**(np.linspace(minscale, maxscale, 20).astype(int)/10) - wavelet_name = 'cmor15-1' - - - for tri_i, dura in enumerate(analy_dura): - - start_time = dura[0] + init_disgard + 5 - end_time = dura[1] - 4 - t_ext = 500 - - sig_i = sig[start_time-t_ext:end_time+t_ext] - - coef, freq = fqa.mycwt(sig_i, wavelet_name, sampling_period, scale = scale, method = 'fft', L1_norm = True) - - coef = coef[:,t_ext:-t_ext] - - onoff_bool_bothOn = np.logical_and(onoff_stats_sens.onoff_bool[tri_i], onoff_stats_asso.onoff_bool[tri_i]) - onoff_bool_off1 = np.logical_not(onoff_stats_sens.onoff_bool[tri_i]) - onoff_bool_off2 = np.logical_not(onoff_stats_asso.onoff_bool[tri_i]) - onoff_bool_bothOff = np.logical_and(onoff_bool_off1, onoff_bool_off2) - - spk_mat_tri = spk_mat[:, (dura[0] + init_disgard + 5)*dt_1 : (dura[1] - 5)*dt_1] - - #% - for n_id in range(0, n_mua_neuron): - spk_t = spk_mat_tri.indices[spk_mat_tri.indptr[n_id]:spk_mat_tri.indptr[n_id+1]] - - usemua = 0 - if usemua: - spk_t = np.round(spk_t * dt - 0.5).astype(int) - else: - spk_t = np.round(spk_t * dt).astype(int) - - #% - - spk_t_bothOn = spk_t[onoff_bool_bothOn[spk_t]] - - - spk_t_bothOff = spk_t[onoff_bool_bothOff[spk_t]] - - #% - - phase_f_on_i = np.zeros([len(coef), spk_t_bothOn.shape[0]]) - phase_f_off_i = np.zeros([len(coef), spk_t_bothOff.shape[0]]) - - for ii, coef_i in enumerate(coef): - phase_f_on_i[ii] = np.angle(coef_i)[spk_t_bothOn] - phase_f_off_i[ii] = np.angle(coef_i)[spk_t_bothOff] - - phase_f_on[n_id].append(phase_f_on_i) - phase_f_off[n_id].append(phase_f_off_i) - - #% - indptr_on = np.zeros(n_mua_neuron + 1) - indptr_off = np.zeros(n_mua_neuron + 1) - len_on = 0; - len_off = 0; - for n_id in range(0, n_mua_neuron): - phase_f_on[n_id] = np.hstack(phase_f_on[n_id]).astype('float32') - phase_f_off[n_id] = np.hstack(phase_f_off[n_id]).astype('float32') - len_on += phase_f_on[n_id].shape[1] - len_off += phase_f_off[n_id].shape[1] - indptr_on[n_id + 1] = len_on - indptr_off[n_id + 1] = len_off - - plv_f_bothOn = np.zeros([n_mua_neuron, phase_f_on[0].shape[0]]) - plv_f_bothOff = np.zeros([n_mua_neuron, phase_f_on[0].shape[0]]) - - for n_id in range(0, n_mua_neuron): - - plv_f_bothOn[n_id] = get_plv(phase_f_on[n_id]) - plv_f_bothOff[n_id] = get_plv(phase_f_off[n_id]) - - if return_ppc: - - ppc_f_bothOn = np.zeros([n_mua_neuron,phase_f_on[0].shape[0]]) - ppc_f_bothOff = np.zeros([n_mua_neuron,phase_f_on[0].shape[0]]) - - for n_id in range(0, n_mua_neuron): - - ppc_f_bothOn[n_id] = get_ppc(phase_f_on[n_id]) - ppc_f_bothOff[n_id] = get_ppc(phase_f_off[n_id]) - - phase_f_on = np.hstack(phase_f_on) - phase_f_off = np.hstack(phase_f_off) - - if return_ppc: - data = mydata.mydata() - data.phase_f_on = phase_f_on - data.phase_f_off = phase_f_off - data.indptr_on = indptr_on - data.indptr_off = indptr_off - data.plv_f_bothOn = plv_f_bothOn - data.plv_f_bothOff = plv_f_bothOff - data.ppc_f_bothOn = ppc_f_bothOn - data.ppc_f_bothOff = ppc_f_bothOff - data.freq = freq - return data - else: - data = mydata.mydata() - data.phase_f_on = phase_f_on - data.phase_f_off = phase_f_off - data.indptr_on = indptr_on - data.indptr_off = indptr_off - data.plv_f_bothOn = plv_f_bothOn - data.plv_f_bothOff = plv_f_bothOff - data.freq = freq - return data - - -def get_ppc(d): - - d_len = d.shape[1] - sum_cos = np.zeros(d.shape[0]) - for ii in range(0, d_len-1): - sum_cos += np.cos(d[:,ii:ii+1] - d[:,ii+1:]).sum(1) - - return sum_cos/((d_len-1)*d_len/2) - -def get_plv(d): - return np.abs((np.exp(d*1j)).mean(1)) - - - diff --git a/analysis/spk_phase_sync.py b/analysis/spk_phase_sync.py deleted file mode 100644 index 4b1df4d..0000000 --- a/analysis/spk_phase_sync.py +++ /dev/null @@ -1,132 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Sat Sep 18 00:07:48 2021 - -@author: shni2598 -""" - - -import frequency_analysis as fqa -import firing_rate_analysis as fra -import numpy as np - -import get_onoff_cpts -#%% - - -def get_spk_phase_sync(spk_mat, spk_mat_phase, dura, onoff_bool_sens, onoff_bool_asso, ignTrans, passband): - - - #ignTrans = 200 - - supply_start = 5 # 10: ms, length of window used in ON-OFF states detection - supply_end = 5 - MUA_window = 1 - # bothON_ = np.logical_and(data_anly.onoff_sens.stim_noatt[0].onoff_bool[0], data_anly.onoff_asso.stim_noatt[0].onoff_bool[0]) - # bothON_ = np.concatenate([np.zeros(supply_start,dtype=bool), bothON_, np.zeros(supply_end,dtype=bool)]) - # bothOFF_ = np.logical_not(np.logical_or(onoff_bool_sens[t_i], onoff_bool_asso[t_i])) - # bothOFF_ = np.concatenate([np.zeros(supply_start,dtype=bool), bothOFF_, np.zeros(supply_end,dtype=bool)]) - - neu_n = spk_mat.shape[0] - spk_phase_on = [[[[] for _ in range(len(dura))] for __ in range(neu_n)] for ___ in range(len(passband))] - spk_phase_off = [[[[] for _ in range(len(dura))] for __ in range(neu_n)] for ___ in range(len(passband))] - - - for t_i, t in enumerate(dura): - print('trial: %d'%t_i) - MUA_forPhase = fra.get_spkcount_sum_sparmat(spk_mat_phase, t[0], t[1],\ - sample_interval = 0.1, window = MUA_window, dt = 0.1)/spk_mat_phase.shape[0]/(MUA_window/1000) - - - - - bothON_ = np.logical_and(onoff_bool_sens[t_i], onoff_bool_asso[t_i]) - bothON_ = np.concatenate([np.zeros(supply_start,dtype=bool), bothON_, np.zeros(supply_end,dtype=bool)]) - bothOFF_ = np.logical_not(np.logical_or(onoff_bool_sens[t_i], onoff_bool_asso[t_i])) - bothOFF_ = np.concatenate([np.zeros(supply_start,dtype=bool), bothOFF_, np.zeros(supply_end,dtype=bool)]) - - - on_t, off_t, onset_t_bON, offset_t_bON = get_onoff_cpts.get_onoff_cpts(bothON_) - - if offset_t_bON[0] < onset_t_bON[0]: - offset_t_bON = np.delete(offset_t_bON, 0) - if onset_t_bON[-1] > offset_t_bON[-1]: - onset_t_bON = np.delete(onset_t_bON, -1) - - onset_t_bON += ignTrans - offset_t_bON += ignTrans - - on_t, off_t, onset_t_bOFF, offset_t_bOFF = get_onoff_cpts.get_onoff_cpts(bothOFF_) - - if offset_t_bOFF[0] < onset_t_bOFF[0]: - offset_t_bOFF = np.delete(offset_t_bOFF, 0) - if onset_t_bOFF[-1] > offset_t_bOFF[-1]: - onset_t_bOFF = np.delete(onset_t_bOFF, -1) - - onset_t_bOFF += ignTrans - offset_t_bOFF += ignTrans - - - # _, MUA_2_filt_hil = fqa.get_filt_hilb_1(MUA_2, passband, Fs = 10000, filterOrder = 8) - # MUA_2_phase = np.angle(MUA_2_filt_hil) - # MUA_2_phase = np.concatenate([np.zeros(5), MUA_2_phase, np.zeros(4)]) - - for pb_i in range(passband.shape[0]): - - _, MUA_forPhase_filt_hil = fqa.get_filt_hilb_1(MUA_forPhase, passband[pb_i], Fs = 10000, filterOrder = 8) - MUA_phase = np.angle(MUA_forPhase_filt_hil) - MUA_phase = np.concatenate([np.zeros(5), MUA_phase, np.zeros(4)]); #print(MUA_phase.shape) - - for n_i in range(neu_n): - for ont, offt in zip(onset_t_bON, offset_t_bON): - spk_phase_on[pb_i][n_i][t_i].append(MUA_phase[ont*10 + spk_mat[n_i, (t[0]+ont)*10:(t[0]+offt)*10].nonzero()[1]]) - - if len(spk_phase_on[pb_i][n_i][t_i]) == 0: - spk_phase_on[pb_i][n_i][t_i] = np.array([]) - else: - spk_phase_on[pb_i][n_i][t_i] = np.concatenate(spk_phase_on[pb_i][n_i][t_i]) - - for ont, offt in zip(onset_t_bOFF, offset_t_bOFF): - spk_phase_off[pb_i][n_i][t_i].append(MUA_phase[ont*10 + spk_mat[n_i, (t[0]+ont)*10:(t[0]+offt)*10].nonzero()[1]]) - - if len(spk_phase_off[pb_i][n_i][t_i]) == 0: - spk_phase_off[pb_i][n_i][t_i] = np.array([]) - else: - spk_phase_off[pb_i][n_i][t_i] = np.concatenate(spk_phase_off[pb_i][n_i][t_i]) - - - ppc_bothON = np.zeros([passband.shape[0], neu_n]) - ppc_bothOFF = np.zeros([passband.shape[0], neu_n]) - - for pb_i in range(passband.shape[0]): - print('pass band:',passband[pb_i]) - for n_i in range(neu_n): - - ppc_bothON[pb_i][n_i] = get_ppc(spk_phase_on[pb_i][n_i]) - ppc_bothOFF[pb_i][n_i] = get_ppc(spk_phase_off[pb_i][n_i]) - - return ppc_bothON, ppc_bothOFF - - -def get_ppc(spk_phase): - trial_n = len(spk_phase) - ppc = 0 - nonzero_t_n = 0 - # n_phase = 0 - # for item in spk_phase: - # n_phase += len(item) - # print('n_phase:%d'%n_phase) - for ti in range(trial_n): - for tj in range(trial_n): - #print(ti, tj, nonzero_t_n) - if ti == tj: - continue - if spk_phase[ti].shape[0] == 0 or spk_phase[tj].shape[0] == 0: - continue - nonzero_t_n += 1 - - ppc += np.cos(spk_phase[ti].reshape(-1,1) - spk_phase[tj]).mean() - - ppc /= nonzero_t_n - return ppc