Skip to content

Commit

Permalink
Merge pull request #7 from rcjackson/new_version
Browse files Browse the repository at this point in the history
New version
  • Loading branch information
rcjackson authored Jun 12, 2023
2 parents 79067aa + ef8f5fd commit e2d3513
Show file tree
Hide file tree
Showing 12 changed files with 94 additions and 106 deletions.
31 changes: 31 additions & 0 deletions .github/workflows/python-app.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
# This workflow will install Python dependencies, run tests and lint with a single version of Python
# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions

name: Python application

on:
push:
branches: [ master ]
pull_request:
branches: [ master ]

jobs:
build:

runs-on: ubuntu-latest

steps:
- uses: actions/checkout@v2
- name: Set up Python 3.10
uses: actions/setup-python@v2
with:
python-version: "3.10"
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install flake8 pytest
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
pip install .
- name: Test with pytest
run: |
pytest
13 changes: 3 additions & 10 deletions LICENSE
Original file line number Diff line number Diff line change
@@ -1,25 +1,18 @@
Copyright © 2019, UChicago Argonne, LLC

All Rights Reserved

Software Name: HighIQ

Software Name: HighIQ SF-20-013
By: Argonne National Laboratory

OPEN SOURCE LICENSE

Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:

1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.

2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.

3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.

******************************************************************************************************

******************************************************************************************************
DISCLAIMER

THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

***************************************************************************************************
***************************************************************************************************
8 changes: 4 additions & 4 deletions doc/source/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))

import sphinx_theme

# -- Project information -----------------------------------------------------

Expand Down Expand Up @@ -46,7 +46,7 @@
'gallery_dirs': 'source/auto_examples'
}

#Add any paths that contain templates here, relative to this directory.
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']

# List of patterns, relative to source directory, that match files and
Expand All @@ -67,9 +67,9 @@
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_theme = 'neo_rtd_theme'
import sphinx_theme

html_theme_path = [sphinx_theme.get_html_theme_path()]

# Numpy autodoc attributes
numpydoc_show_class_members = True
autosummary_generate = True
autosummary_generate = True
2 changes: 1 addition & 1 deletion examples/plot_moments.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,4 +25,4 @@
my_ds["doppler_velocity_max_peak"].plot(ax=ax[1], y='range')
plt.show()

test_file.close()
test_file.close()
6 changes: 3 additions & 3 deletions examples/plot_power_spectra.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,8 @@
# Plot the power spectra for a given time and height
my_time = datetime(2017, 8, 4, 0, 40, 59)
fig, ax = plt.subplots(1, 2, figsize=(15, 5))
my_ds["power_spectra_normed_interp"].sel(time=my_time, range=350., method='nearest').plot(ax=ax[0])
my_ds["power_spectra_normed_interp"].sel(time=my_time, range=950., method='nearest').plot(ax=ax[1])
out_ds["power_spectral_density"].sel(time=my_time, range=350., method='nearest').plot(ax=ax[0])
out_ds["power_spectral_density"].sel(time=my_time, range=950., method='nearest').plot(ax=ax[1])
plt.show()

test_file.close()
test_file.close()
7 changes: 6 additions & 1 deletion examples/read_00_file.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,11 @@
"""
Example on reading raw Halo Photonics data
------------------------------------------
"""
import highiq

ds = highiq.io.read_00_data('sgpdlacfC1.00.20220107.141001.raw.aet_Stare_107_20220107_14.raw',
'sgpdlprofcalC1.home_point')
my_spectra = highiq.calc.get_psd(ds)
print(ds)
print(ds)
24 changes: 6 additions & 18 deletions highiq/calc/moments.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@

def _gpu_calc_power(psd, dV, block_size=200, normed=True):
shp = psd.shape

power = np.zeros((shp[0], shp[1]))
if len(shp) == 3:
gpu_array = tf.constant(psd, dtype=tf.float32)
Expand All @@ -25,7 +24,6 @@ def _gpu_calc_power(psd, dV, block_size=200, normed=True):

def _gpu_calc_velocity(psd, power, vel_bins, dV):
shp = psd.shape
times = shp[0]
gpu_array = tf.constant(psd, dtype=tf.float32)
power_array = tf.constant(power, dtype=tf.float32)
vel_bins_tiled = np.tile(vel_bins, (shp[0], shp[1], 1))
Expand All @@ -35,7 +33,6 @@ def _gpu_calc_velocity(psd, power, vel_bins, dV):


def _gpu_calc_velocity_dumb(psd, vel_bins):
shp = psd.shape
dV = np.diff(vel_bins)[0]
vel_min = vel_bins.min()
gpu_array = tf.constant(psd)
Expand All @@ -55,9 +52,8 @@ def _gpu_calc_spectral_width(psd, power, vel_bins, velocity, dV):

velocity_array = tf.transpose(np.tile(velocity, (shp[2], 1, 1)), [1, 2, 0])
vel_bins_tiled = np.tile(vel_bins, (times, shp[1], 1))
gpu_array = tf.math.sqrt(1 / power_array *
tf.math.reduce_sum(
(vel_bins_tiled - velocity_array)**2 * gpu_array, axis=2))
gpu_array = tf.math.sqrt(1 / power_array * tf.math.reduce_sum(
(vel_bins_tiled - velocity_array)**2 * gpu_array, axis=2))
specwidth = gpu_array.numpy()
return specwidth

Expand All @@ -73,16 +69,14 @@ def _gpu_calc_skewness(psd, power, vel_bins, velocity, spec_width, dV):
velocity_array = tf.transpose(np.tile(velocity, (shp[2], 1, 1)), [1, 2, 0])
vel_bins_tiled = np.tile(vel_bins, (times, shp[1], 1))
gpu_array = 1 / power_array * tf.math.reduce_sum(
(vel_bins_tiled - velocity_array)**3 * gpu_array, axis=2)
(vel_bins_tiled - velocity_array)**3 * gpu_array, axis=2)
skewness = gpu_array.numpy()
return skewness


def _gpu_calc_kurtosis(psd, power, vel_bins, velocity, spec_width, dV):
shp = psd.shape
times = shp[0]
kurtosis = np.zeros((shp[0], shp[1]))

gpu_array = tf.constant(psd.values, dtype=tf.float32)
power_array = tf.constant(power, dtype=tf.float32)
spec_width_array = tf.constant(spec_width, dtype=tf.float32)
Expand Down Expand Up @@ -121,8 +115,7 @@ def get_lidar_moments(spectra, snr_thresh=0, block_size_ratio=1.0, which_moments
The database with the Doppler lidar moments.
"""
if 'power_spectral_density' not in spectra.variables.keys():
raise ValueError("You must calculate the power spectra " +
"before calculating moments!")
raise ValueError("You must calculate the power spectra before calculating moments!")
if which_moments is None:
which_moments = ['snr', 'doppler_velocity', 'spectral_width',
'skewness', 'kurtosis']
Expand All @@ -137,13 +130,8 @@ def get_lidar_moments(spectra, snr_thresh=0, block_size_ratio=1.0, which_moments
linear_psd_0filled = linear_psd.fillna(0).values
power = _gpu_calc_power(
linear_psd_0filled, dV)
nyquist_range = spectra['vel_bins'].values.max() - spectra['vel_bins'].values.min()
if ('doppler_velocity' in which_moments or 'spectral_width' in which_moments or
'skewness' in which_moments or 'kurtosis' in which_moments):
velocity = _gpu_calc_velocity(
linear_psd_0filled, power,
spectra['vel_bins'].values, dV)

velocity = _gpu_calc_velocity(linear_psd_0filled, power, spectra['vel_bins'].values, dV)

if 'snr' in which_moments:
power_with_noise = dV * spectra['power_spectral_density'].sum(dim='vel_bins')
spectra['snr'] = power_with_noise / (dV * len(spectra['vel_bins']))
Expand Down
76 changes: 30 additions & 46 deletions highiq/calc/spectra.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,13 +9,12 @@

def _fast_expand(complex_array, factor, num_per_block=200):
shp = complex_array.shape
times = shp[0]
expanded_array = np.zeros((shp[0], shp[1], shp[2] * factor))
weights = np.tile(np.arange(0, factor) / factor, (shp[0], shp[1], 1))
for l in range(shp[2]):
gpu_array = tf.constant(complex_array[:, :, l], dtype=tf.float32)
if l < shp[2] - 1:
gpu_array2 = tf.constant(complex_array[:, :, l + 1], dtype=tf.float32)
for i in range(shp[2]):
gpu_array = tf.constant(complex_array[:, :, i], dtype=tf.float32)
if i < shp[2] - 1:
gpu_array2 = tf.constant(complex_array[:, :, i + 1], dtype=tf.float32)
diff_array = gpu_array2 - gpu_array
else:
diff_array = tf.zeros((shp[0], shp[1]))
Expand All @@ -25,9 +24,10 @@ def _fast_expand(complex_array, factor, num_per_block=200):
diff_array = tf.transpose(
np.tile(diff_array, (factor, 1, 1)), [1, 2, 0])
temp_array = diff_array * weights + rep_array
expanded_array[:, :, factor * l:factor * (l + 1)] = temp_array.numpy()
expanded_array[:, :, factor * i:factor * (i + 1)] = temp_array.numpy()
return expanded_array


def get_psd(spectra, gate_resolution=60., wavelength=None, fs=None, nfft=1024, time_window=None,
acf_name='acf', acf_bkg_name='acf_bkg', block_size_ratio=1.0):
"""
Expand Down Expand Up @@ -68,45 +68,37 @@ def get_psd(spectra, gate_resolution=60., wavelength=None, fs=None, nfft=1024, t

Q_ = UnitRegistry().Quantity
if fs is None:
if not "sample_rate" in spectra.attrs:
raise KeyError("If sample frequency is not specified, then ACT Dataset must contain a sample_rate " +
"attribute!")
if "sample_rate" not in spectra.attrs:
raise KeyError("If sample frequency is not specified, then ACT Dataset must contain a sample_rate attribute!")

fs_pint = Q_(spectra.attrs["sample_rate"])
fs_pint = fs_pint.to("Hz")
fs = fs_pint.magnitude


if wavelength is None:
if not "wavelength" in spectra.attrs:
if "wavelength" not in spectra.attrs:
raise KeyError("If wavelength is not specified, then the dataset must contain a wavelength attribute!")
fs_pint = Q_(spectra.attrs["wavelength"])
fs_pint = fs_pint.to("m")
wavelength = fs_pint.magnitude

if time_window is not None:
spectra = spectra.resample(time='%ds' % int(time_window)).mean()
else:
spectra[acf_bkg_name] = xr.DataArray(np.ones(spectra[acf_name].shape),
dims=spectra[acf_name].dims) * spectra[acf_bkg_name]
dims=spectra[acf_name].dims) * spectra[acf_bkg_name]

num_gates = int(gate_resolution / 3)
complex_coeff_in = spectra[acf_name].isel(complex=0).values +\
spectra[acf_name].isel(complex=1).values * 1j
complex_coeff_in = spectra[acf_name].isel(complex=0).values + \
spectra[acf_name].isel(complex=1).values * 1j

complex_coeff = np.zeros(
(complex_coeff_in.shape[0], int(complex_coeff_in.shape[1] / num_gates),
complex_coeff_in.shape[2]), dtype=np.complex128)
(complex_coeff_in.shape[0], int(complex_coeff_in.shape[1] / num_gates),
complex_coeff_in.shape[2]), dtype=np.complex128)
for i in range(complex_coeff.shape[1]):
complex_coeff[:, i, :] = np.mean(complex_coeff_in[
:, (num_gates * i):(num_gates * i+1), :], axis=1)


complex_coeff[:, i, :] = np.mean(complex_coeff_in[:, (num_gates * i):(num_gates * i + 1), :], axis=1)
del complex_coeff_in

ntimes = complex_coeff.shape[0]
freq = np.fft.fftfreq(nfft) * fs

spectra.attrs['nyquist_velocity'] = "%f m s-1" % (wavelength / (4 * 1 / fs))
spectra['freq_bins'] = xr.DataArray(freq, dims=['freq'])
spectra['freq_bins'].attrs["long_name"] = "Doppler spectra bins in frequency units"
Expand All @@ -118,23 +110,21 @@ def get_psd(spectra, gate_resolution=60., wavelength=None, fs=None, nfft=1024, t
spectra['vel_bins'] = xr.DataArray(vel_bins, dims=('vel_bins'), attrs=attrs_dict)
spectra['freq_bins'] = spectra['freq_bins'][inds_sorted]

complex_coeff_bkg_in = (spectra[acf_bkg_name].isel(complex=0).values +
spectra[acf_bkg_name].isel(complex=1).values * 1j)
complex_coeff_bkg_in = (spectra[acf_bkg_name].isel(complex=0).values + spectra[acf_bkg_name].isel(complex=1).values * 1j)

complex_coeff_bkg = np.zeros(
(complex_coeff_bkg_in.shape[0], int(complex_coeff_bkg_in.shape[1] / num_gates),
complex_coeff_bkg_in.shape[2]), dtype=np.complex128)
(complex_coeff_bkg_in.shape[0], int(complex_coeff_bkg_in.shape[1] / num_gates),
complex_coeff_bkg_in.shape[2]), dtype=np.complex128)
for i in range(complex_coeff.shape[1]):
complex_coeff_bkg[:, i, :] = np.mean(complex_coeff_bkg_in[:,
(num_gates * i):(num_gates * i+1), :], axis=1)
complex_coeff_bkg[:, i, :] = np.mean(complex_coeff_bkg_in[:, (num_gates * i):(num_gates * i + 1), :], axis=1)
num_lags = complex_coeff_bkg_in.shape[2]
if nfft < num_lags:
raise RuntimeError("Number of points in FFT < number of lags in sample!")
pad_after = int((nfft - num_lags))
pad_before = 0
pad_lengths = tf.constant([[0, 0], [0, 0], [pad_before, pad_after]])
frames = tf.pad(complex_coeff, pad_lengths, mode='CONSTANT', constant_values=0)

power = np.abs(tf.signal.fft(frames).numpy())
attrs_dict = {'long_name': 'Range', 'units': 'm'}
spectra['range'] = xr.DataArray(
Expand All @@ -143,18 +133,15 @@ def get_psd(spectra, gate_resolution=60., wavelength=None, fs=None, nfft=1024, t
spectra['power'] = xr.DataArray(
power[:, :, inds_sorted], dims=(('time', 'range', 'vel_bins')))
frames = tf.pad(complex_coeff_bkg, pad_lengths, mode='CONSTANT', constant_values=0)

power = np.abs(tf.signal.fft(frames).numpy())

spectra['power_bkg'] = xr.DataArray(
power[:, :, inds_sorted], dims=('time', 'range', 'vel_bins'))

# Subtract background noise
spectra['power_spectral_density'] = spectra['power'] / spectra['power_bkg']
spectra['power_spectral_density'].attrs["long_name"] = "Power spectral density"
spectra['power_spectral_density'].attrs["units"] = ''


spectra['range'].attrs['long_name'] = "Range"
spectra['range'].attrs['units'] = 'm'
spectra['vel_bins'].attrs['long_name'] = "Doppler velocity"
Expand Down Expand Up @@ -186,40 +173,37 @@ def calc_num_peaks(my_spectra, **kwargs):
shp = my_array.shape
num_peaks = np.zeros((shp[0], shp[1]))
peak_loc = np.nan * np.ones((shp[0], shp[1], 5))

vel_bins = my_spectra['vel_bins'].values

if not 'prominence' in kwargs.keys():
if 'prominence' not in kwargs.keys():
prominence = 0.01
else:
prominence = kwargs.pop('prominence')

if not 'width' in kwargs.keys():
if 'width' not in kwargs.keys():
width = 8
else:
width = kwargs.pop('width')

if not 'height' in kwargs.keys():
if 'height' not in kwargs.keys():
height = 1.5
else:
height = kwargs.pop('height')

for i in range(shp[0]):
for j in range(shp[1]):
max_spec = spectra[i, j].max()
peaks = find_peaks(my_array[i, j], prominence=prominence, width=width, **kwargs)[0]
peaks = find_peaks(my_array[i, j], prominence=prominence, width=width, height=height, **kwargs)[0]
num_peaks[i, j] = len(peaks)
for k in range(len(peaks)):
if k > 4:
continue
peak_loc[i, j, k] = vel_bins[peaks[k]]


my_spectra['npeaks'] = xr.DataArray(num_peaks, dims=('time', 'range'))
my_spectra['npeaks'].attrs['long_name'] = "Number of peaks in Doppler spectra"
my_spectra['npeaks'].attrs['units'] = "1"

my_spectra['peak_velocities'] = xr.DataArray(peak_loc, dims=('time', 'range', 'peak_no'))
my_spectra['peak_velocities'] = xr.DataArray(peak_loc, dims=('time', 'range', 'peak_no'))
my_spectra['peak_velocities'].attrs['long_name'] = "Dopper velocity peaks"
my_spectra['peak_velocities'].attrs['units'] = 'm s-1'
return my_spectra
2 changes: 1 addition & 1 deletion highiq/io/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,4 +13,4 @@
load_arm_netcdf
"""
from .arm_data import load_arm_netcdf, read_00_data
from .arm_data import load_arm_netcdf, read_00_data
Loading

0 comments on commit e2d3513

Please sign in to comment.