-
Notifications
You must be signed in to change notification settings - Fork 10
/
autocorrect_tp.py
352 lines (319 loc) · 14.8 KB
/
autocorrect_tp.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
#
# Routines to try to correct total power issues
#
# History
# 2019-08-12 DG
# Initial start of history log. Fixed a problem in tp_bgnd() for
# nans in the data
# 2019-12-31 DG
# Check for how many good data points for a given frequecy in autocorrect_tp(),
# and skip that frequency is less than 10% of time samples.
# 2020-10-27 DG
# Fix crash that occurred in tp_bgnd() where there were NO good data.
# 2021-08-02 DG
# Some minor tweaks to tp_bgnd() and created a new routine that does all
# antennas and polarizations, called tp_bgnd_all(), which is otherwise the
# same.
# 2021-09-25 DG
# Change minimum length for background subtraction (tp_bgnd, etc.) to 1200
# and allow it to shrink further if some data are missing.
#
import pipeline_cal as pc
import gaincal2 as gc
import attncal as ac
import numpy as np
from util import Time, nearest_val_idx, ant_str2list
import matplotlib.pylab as plt
from matplotlib.dates import DateFormatter
def autocorrect(out,ant_str='ant1-13',brange=[0,300]):
nt = len(out['time'])
nf = len(out['fghz'])
pfac1 = (out['p'][:,:,:,:-1] - out['p'][:,:,:,1:])/out['p'][:,:,:,:-1]
trange = Time(out['time'][[0,-1]],format='jd')
src_lev = gc.get_fem_level(trange) # Read FEM levels from SQL
# Match times with data
tidx = nearest_val_idx(out['time'],src_lev['times'].jd)
# Find attenuation changes
for ant in range(13):
for pol in range(2):
if pol == 0:
lev = src_lev['hlev'][ant,tidx]
else:
lev = src_lev['vlev'][ant,tidx]
jidx, = np.where(abs(lev[:-1] - lev[1:]) == 1)
for freq in range(nf):
idx, = np.where(np.logical_and(abs(pfac1[ant,pol,freq]) > 0.05,abs(pfac1[ant,pol,freq]) < 0.95))
for i in range(len(idx-1)):
if idx[i] in jidx or idx[i] in jidx-1:
out['p'][ant,pol,freq,idx[i]+1:] /= (1-pfac1[ant,pol,freq,idx[i]])
# Time of total power calibration is 20 UT on the date given
tptime = Time(np.floor(trange[0].mjd) + 20./24.,format='mjd')
calfac = pc.get_calfac(tptime)
tpcalfac = calfac['tpcalfac']
tpoffsun = calfac['tpoffsun']
hlev = src_lev['hlev'][:13,0]
vlev = src_lev['vlev'][:13,0]
attn_dict = ac.read_attncal(trange[0])[0] # Read GCAL attn from SQL
attn = np.zeros((13,2,nf))
for i in range(13):
attn[i,0] = attn_dict['attn'][hlev[i],0,0]
attn[i,1] = attn_dict['attn'][vlev[i],0,1]
print 'Ant',i+1,attn[i,0,20],attn[i,1,20]
attnfac = 10**(attn/10.)
for i in range(13): print attnfac[i,0,20],attnfac[i,1,20]
for i in range(nt):
out['p'][:13,:,:,i] = (out['p'][:13,:,:,i]*attnfac - tpoffsun)*tpcalfac
antlist = ant_str2list(ant_str)
bg = np.zeros_like(out['p'])
# Subtract background for each antenna/polarization
for ant in antlist:
for pol in range(2):
bg[ant,pol] = np.median(out['p'][ant,pol,:,brange[0]:brange[1]],1).repeat(nt).reshape(nf,nt)
#out['p'][ant,pol] -= bg
# Form median over antennas/pols
med = np.mean(np.median((out['p']-bg)[antlist],0),0)
# Do background subtraction once more for good measure
bgd = np.median(med[:,brange[0]:brange[1]],1).repeat(nt).reshape(nf,nt)
med -= bgd
pdata = np.log10(med)
f, ax = plt.subplots(1,1)
vmax = np.median(np.nanmax(pdata,1))
im = ax.pcolormesh(Time(out['time'],format='jd').plot_date,out['fghz'],pdata,vmin=1,vmax=vmax)
ax.axvspan(Time(out['time'][brange[0]],format='jd').plot_date,Time(out['time'][brange[1]],format='jd').plot_date,color='w',alpha=0.3)
cbar = plt.colorbar(im,ax=ax)
cbar.set_label('Log Flux Density [sfu]')
ax.xaxis_date()
ax.xaxis.set_major_formatter(DateFormatter("%H:%M"))
ax.set_ylim(out['fghz'][0], out['fghz'][-1])
ax.set_xlabel('Time [UT]')
ax.set_ylabel('Frequency [GHz]')
return {'caldata':out, 'med_sub':med, 'bgd':bg}
# def tp_bgnd(tpdata):
# ''' Create time-variable background from ROACH inlet temperature
# This may be a crude correction, but it has been seen to work.
# Inputs:
# tpdata dictionary returned by read_idb() NB: tpdata is not changed.
# Returns:
# bgnd The background fluctuation array of size (nf,nt) to be
# subtracted from any antenna's total power (or mean of
# antenna total powers)
# '''
# import dbutil as db
# import read_idb as ri
# from util import Time
# outfghz = tpdata['fghz']
# try:
# outtime = tpdata['time']
# trange = Time(outtime[[0,-1]],format='jd')
# except:
# outtime = tpdata['ut_mjd']
# trange = Time(outtime[[0,-1]],format='mjd')
# tstr = trange.lv.astype(int).astype(str)
# nt = len(outtime)
# nf = len(outfghz)
# outpd = Time(outtime,format='jd').plot_date
# cursor = db.get_cursor()
# version = db.find_table_version(cursor,int(tstr[0]))
# query = 'select * from fV'+version+'_vD8 where (Timestamp between '+tstr[0]+' and '+tstr[1]+')'
# data, msg = db.do_query(cursor, query)
# pd = Time(data['Timestamp'][::8].astype(int),format='lv').plot_date
# inlet = data['Sche_Data_Roac_TempInlet'].reshape(len(pd),8) # Inlet temperature variation
# sinlet = np.sum(inlet.astype(float),1)
# sint = np.interp(outpd,pd,sinlet)
# sint = np.roll(sint,-90) # Shift phase of variation by 90 s earlier
# sint -= np.mean(sint) # Remove offset, to provide zero-mean fluctuation
# bgnd = np.zeros((nf, nt), float)
# for i in range(nf):
# if 13.5 < outfghz[i] < 14.0:
# bgnd[i] = sint*2
# elif 14.0 < outfghz[i] < 15.0:
# bgnd[i] = sint*5
# elif outfghz[i] > 15.0:
# bgnd[i] = sint*3
# return bgnd
def smooth(x,window_len=11,window='hanning'):
"""smooth the data using a window with requested size.
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by introducing reflected copies of the signal
(with the window size) in both ends so that transient parts are minimized
in the begining and end part of the output signal.
input:
x: the input signal
window_len: the dimension of the smoothing window; should be an odd integer
window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'
flat window will produce a moving average smoothing.
output:
the smoothed signal
example:
t=linspace(-2,2,0.1)
x=sin(t)+randn(len(t))*0.1
y=smooth(x)
see also:
numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve
scipy.signal.lfilter
TODO: the window parameter could be the window itself if an array instead of a string
NOTE: length(output) != length(input), to correct this: return y[(window_len/2-1):-(window_len/2)] instead of just y.
"""
if x.ndim != 1:
raise ValueError, "smooth only accepts 1 dimension arrays."
if x.size < window_len:
raise ValueError, "Input vector needs to be bigger than window size."
if window_len<3:
return x
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError, "Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'"
s = np.r_[x[window_len-1:0:-1], x, x[-2:-window_len-1:-1]]
#print(len(s))
if window == 'flat': #moving average
w = np.ones(window_len,'d')
else:
w = eval('np.'+window+'(window_len)')
y = np.convolve(w/w.sum(), s, mode='valid')
return y
def tp_bgnd(tpdata):
''' Create time-variable background from ROACH inlet temperature
This version is far superior to the earlier, crude version, but
beware that it works best for a long timerange of data, especially
when there is a flare in the data.
Inputs:
tpdata dictionary returned by read_idb() NB: tpdata is not changed.
Returns:
bgnd The background fluctuation array of size (nf,nt) to be
subtracted from any antenna's total power (or mean of
antenna total powers)
'''
import dbutil as db
from util import Time, nearest_val_idx
outfghz = tpdata['fghz']
try:
outtime = tpdata['time']
trange = Time(outtime[[0,-1]],format='jd')
except:
outtime = tpdata['ut_mjd']
trange = Time(outtime[[0,-1]],format='mjd')
tstr = trange.lv.astype(int).astype(str)
nt = len(outtime)
if nt < 1200:
print 'TP_BGND: Error, timebase too small. Must have at least 1200 time samples.'
return None
nf = len(outfghz)
outpd = Time(outtime,format='jd').plot_date
cursor = db.get_cursor()
version = db.find_table_version(cursor,int(tstr[0]))
query = 'select * from fV'+version+'_vD8 where (Timestamp between '+tstr[0]+' and '+tstr[1]+')'
data, msg = db.do_query(cursor, query)
pd = Time(data['Timestamp'][::8].astype(int),format='lv').plot_date
inlet = data['Sche_Data_Roac_TempInlet'].reshape(len(pd),8) # Inlet temperature variation
sinlet = np.sum(inlet.astype(float),1)
# Eliminate 0 values in sinlet by replacing with nearest good value
bad, = np.where(sinlet == 0)
good, = np.where(sinlet != 0)
idx = nearest_val_idx(bad,good) # Find locations of nearest good values to bad ones
sinlet[bad] = sinlet[good[idx]] # Overwrite bad values with good ones
sinlet -= np.mean(sinlet) # Remove offset, to provide zero-mean fluctuation
sinlet = np.roll(sinlet,-110) # Shift phase of variation by 110 s earlier (seems to be needed)
# Interpolate sinlet values to the times in the data
sint = np.interp(outpd,pd,sinlet)
sdev = np.std(sint)
sint_ok = np.abs(sint) < 2*sdev
bgnd = np.zeros((nf, nt), float)
for i in range(nf):
wlen = min(nt,2000)
if wlen % 2 != 0:
wlen -= 1
# Subtract smooth trend from data
sig = tpdata['p'][i] - smooth(tpdata['p'][i],wlen,'blackman')[wlen/2:-(wlen/2-1)]
# Eliminate the worst outliers and repeat
stdev = np.nanstd(sig)
good, = np.where(np.abs(sig) < stdev)
if len(good) > nt*0.1:
wlen = min(len(good),2000)
if wlen % 2 != 0:
wlen -= 1
# Subtract smooth trend from data
sig = tpdata['p'][i,good] - smooth(tpdata['p'][i,good],wlen,'blackman')[wlen/2:-(wlen/2-1)]
sint_i = sint[good]
stdev = np.std(sig)
# Final check for data quality
good, = np.where(np.logical_and(sig < 2*stdev, sint_ok[good]))
if len(good) > nt*0.1:
p = np.polyfit(sint_i[good],sig[good],1)
else:
p = [1.,0.]
# Apply correction for this frequency
bgnd[i] = sint*p[0] + p[1]
return bgnd
def tp_bgnd_all(tpdata):
''' Create time-variable background from ROACH inlet temperature
This version is far superior to the earlier, crude version, but
beware that it works best for a long timerange of data, especially
when there is a flare in the data.
Inputs:
tpdata dictionary returned by read_idb() NB: tpdata is not changed.
Returns:
bgnd The background fluctuation array of size (nf,nt) to be
subtracted from any antenna's total power (or mean of
antenna total powers)
'''
import dbutil as db
from util import Time, nearest_val_idx
outfghz = tpdata['fghz']
try:
outtime = tpdata['time']
trange = Time(outtime[[0,-1]],format='jd')
except:
outtime = tpdata['ut_mjd']
trange = Time(outtime[[0,-1]],format='mjd')
nt = len(outtime)
if nt < 1200:
print 'TP_BGND: Error, timebase too small. Must have at least 1200 time samples.'
return None
nf = len(outfghz)
outpd = Time(outtime,format='jd').plot_date
cursor = db.get_cursor()
data = db.get_dbrecs(cursor, dimension=8, timestamp=trange)
pd = Time(data['Timestamp'][:,0].astype(int),format='lv').plot_date
inlet = data['Sche_Data_Roac_TempInlet'] # Inlet temperature variation
sinlet = np.sum(inlet.astype(float),1)
# Eliminate 0 values in sinlet by replacing with nearest good value
bad, = np.where(sinlet == 0)
good, = np.where(sinlet != 0)
idx = nearest_val_idx(bad,good) # Find locations of nearest good values to bad ones
sinlet[bad] = sinlet[good[idx]] # Overwrite bad values with good ones
sinlet -= np.mean(sinlet) # Remove offset, to provide zero-mean fluctuation
sinlet = np.roll(sinlet,-110) # Shift phase of variation by 110 s earlier (seems to be needed)
# Interpolate sinlet values to the times in the data
sint = np.interp(outpd,pd,sinlet)
# sint = np.roll(sint,-90) # Shift phase of variation by 90 s earlier
# sint -= np.mean(sint) # Remove offset, to provide zero-mean fluctuation
sdev = np.std(sint)
sint_ok = np.abs(sint) < 2*sdev
bgnd = np.zeros((13, 2, nf, nt), float)
for ant in range(13):
for pol in range(2):
for i in range(nf):
# Subtract smooth trend from data
nt = len(tpdata['p'][ant,pol,i])
wlen = min(nt,2000)
if wlen % 2 != 0:
wlen -= 1
sig = tpdata['p'][ant,pol,i] - smooth(tpdata['p'][ant,pol,i],wlen,'blackman')[wlen/2:-(wlen/2-1)]
# Eliminate the worst outliers and repeat
stdev = np.nanstd(sig)
good, = np.where(np.abs(sig) < 2*stdev)
if len(good) > nt*0.1:
wlen = min(len(good),2000)
if wlen % 2 != 0:
wlen -= 1
sig = tpdata['p'][ant,pol,i,good] - smooth(tpdata['p'][ant,pol,i,good],wlen,'blackman')[wlen/2:-(wlen/2-1)]
sint_i = sint[good]
stdev = np.std(sig)
# Final check for data quality
good, = np.where(np.logical_and(sig < 2*stdev, sint_ok[good]))
if len(good) > nt*0.1:
p = np.polyfit(sint_i[good],sig[good],1)
else:
p = [1.,0.]
# Apply correction for this frequency
bgnd[ant,pol,i] = sint*p[0] + p[1]
return bgnd