Skip to content

Commit

Permalink
Implement intertia_compensation_system.py analysis
Browse files Browse the repository at this point in the history
  • Loading branch information
b1quint committed Sep 14, 2023
1 parent a26a970 commit 93caea9
Showing 1 changed file with 250 additions and 25 deletions.
275 changes: 250 additions & 25 deletions python/lsst/summit/testing/analysis/m1m3/inertia_compensation_system.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,16 @@
#!/usr/bin/python
import logging
import numpy as np
import pandas as pd

from astropy import units as u
from astropy.time import Time
from datetime import timedelta

from lsst.summit.utils.efdUtils import getEfdData, makeEfdClient
from lsst.summit.utils.tmaUtils import TMAEventMaker, TMAEvent

# TODO @bquint: fix logger - not working now.

formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
formatter.datefmt = "%Y-%m-%d %H:%M:%S"

Expand All @@ -30,12 +36,163 @@ class InertiaCompensationSystemAnalysis:
Abtract representation of a slew event.
"""

# ToDo @bquint
def __init__(event: TMAEvent):
raise NotImplementedError
def __init__(
self,
event: TMAEvent,
inner_pad: float = 0.0,
outer_pad: float = 0.0,
n_sigma: float = 1.0,
):
self.event = event
self.inner_pad = inner_pad * u.second
self.outer_pad = outer_pad * u.second
self.n_sigma = n_sigma
self.client = makeEfdClient()

# TODO: Find a better way to implement the logger inside the class
self.logger = logger

self.number_of_hardpoints = 6
self.measured_forces_topics = [
f"measuredForce{i}" for i in range(self.number_of_hardpoints)
]

self.df = None
self.stats = None

async def run(self):
"""Performs all the measurements"""

Check failure on line 64 in python/lsst/summit/testing/analysis/m1m3/inertia_compensation_system.py

View workflow job for this annotation

GitHub Actions / lint

Ruff (D400)

python/lsst/summit/testing/analysis/m1m3/inertia_compensation_system.py:64:9: D400 First line should end with a period

Check failure on line 64 in python/lsst/summit/testing/analysis/m1m3/inertia_compensation_system.py

View workflow job for this annotation

GitHub Actions / lint

Ruff (D401)

python/lsst/summit/testing/analysis/m1m3/inertia_compensation_system.py:64:9: D401 First line of docstring should be in imperative mood: "Performs all the measurements"
self.df = self.query_dataset()
self.stats = self.get_stats()

def query_dataset(self):
"""
Queries all the relevant data, resample them to have the same requency and merge them in a single dataframe.

Check failure on line 70 in python/lsst/summit/testing/analysis/m1m3/inertia_compensation_system.py

View workflow job for this annotation

GitHub Actions / lint

Ruff (W505)

python/lsst/summit/testing/analysis/m1m3/inertia_compensation_system.py:70:80: W505 Doc line too long (116 > 79 characters)

Check failure on line 70 in python/lsst/summit/testing/analysis/m1m3/inertia_compensation_system.py

View workflow job for this annotation

GitHub Actions / lint

Ruff (E501)

python/lsst/summit/testing/analysis/m1m3/inertia_compensation_system.py:70:111: E501 Line too long (116 > 110 characters)
Returns
-------
pd.DataFrame
"""

Check failure on line 75 in python/lsst/summit/testing/analysis/m1m3/inertia_compensation_system.py

View workflow job for this annotation

GitHub Actions / lint

Ruff (D401)

python/lsst/summit/testing/analysis/m1m3/inertia_compensation_system.py:69:9: D401 First line of docstring should be in imperative mood: "Queries all the relevant data, resample them to have the same requency and merge them in a single dataframe."
self.logger.info("Querying dataset: m1m3 hp meadures forces")
hp_measured_forces = getEfdData(
self.client,
"lsst.sal.MTM1M3.hardpointActuatorData",
columns=self.measured_forces_topics,
event=self.event,
prePadding=self.outer_pad,
postPadding=self.outer_pad,
warn=False,
)

self.logger.info("Querying dataset: mtmount azimuth torque and velocity")
tma_az = getEfdData(
self.client,
"lsst.sal.MTMount.azimuth",
columns=["timestamp", "actualTorque", "actualVelocity"],
event=self.event,
prePadding=self.outer_pad,
postPadding=self.outer_pad,
warn=False,
)

tma_az = tma_az.rename(
columns={
"actualTorque": "az_actual_torque",
"actualVelocity": "az_actual_velocity",
}
)
tma_az["timestamp"] = Time(
tma_az["timestamp"], format="unix_tai", scale="utc"
).datetime
tma_az.set_index("timestamp", inplace=True)
tma_az.index = tma_az.index.tz_localize("UTC")

self.logger.info("Querying dataset: mtmount elevation torque and velocity")
tma_el = getEfdData(
self.client,
"lsst.sal.MTMount.elevation",
columns=["timestamp", "actualTorque", "actualVelocity"],
event=self.event,
prePadding=self.outer_pad,
postPadding=self.outer_pad,
warn=False,
)

tma_el = tma_el.rename(
columns={
"actualTorque": "el_actual_torque",
"actualVelocity": "el_actual_velocity",
}
)
tma_el["timestamp"] = Time(
tma_el["timestamp"], format="unix_tai", scale="utc"
).datetime
tma_el.set_index("timestamp", inplace=True)
tma_el.index = tma_el.index.tz_localize("UTC")

merge_cfg = {
"left_index": True,
"right_index": True,
"tolerance": timedelta(seconds=1),
"direction": "nearest",
}

merged_df = pd.merge_asof(hp_measured_forces, tma_az, **merge_cfg)
merged_df = pd.merge_asof(merged_df, tma_el, **merge_cfg)
merged_df[["az_actual_torque", "el_actual_torque"]] = (
1e-3 * merged_df[["az_actual_torque", "el_actual_torque"]]
)

return merged_df

def get_stats(self):
"""
Calculate statistics for each column in a given dataset.
Returns
-------
pandas.DataFrame
A DataFrame containing calculated statistics for each column in the dataset.

Check failure on line 155 in python/lsst/summit/testing/analysis/m1m3/inertia_compensation_system.py

View workflow job for this annotation

GitHub Actions / lint

Ruff (W505)

python/lsst/summit/testing/analysis/m1m3/inertia_compensation_system.py:155:80: W505 Doc line too long (88 > 79 characters)
For each column, the statistics include minimum, maximum, and peak-to-peak values.

Check failure on line 156 in python/lsst/summit/testing/analysis/m1m3/inertia_compensation_system.py

View workflow job for this annotation

GitHub Actions / lint

Ruff (W505)

python/lsst/summit/testing/analysis/m1m3/inertia_compensation_system.py:156:80: W505 Doc line too long (94 > 79 characters)
Notes
-----
This function computes statistics for each column in the provided dataset. It utilizes the `get_minmax` function

Check failure on line 160 in python/lsst/summit/testing/analysis/m1m3/inertia_compensation_system.py

View workflow job for this annotation

GitHub Actions / lint

Ruff (W505)

python/lsst/summit/testing/analysis/m1m3/inertia_compensation_system.py:160:80: W505 Doc line too long (120 > 79 characters)

Check failure on line 160 in python/lsst/summit/testing/analysis/m1m3/inertia_compensation_system.py

View workflow job for this annotation

GitHub Actions / lint

Ruff (E501)

python/lsst/summit/testing/analysis/m1m3/inertia_compensation_system.py:160:111: E501 Line too long (120 > 110 characters)
to calculate minimum, maximum, and peak-to-peak values for each column's data.

Check failure on line 161 in python/lsst/summit/testing/analysis/m1m3/inertia_compensation_system.py

View workflow job for this annotation

GitHub Actions / lint

Ruff (W505)

python/lsst/summit/testing/analysis/m1m3/inertia_compensation_system.py:161:80: W505 Doc line too long (86 > 79 characters)
"""
cols = self.measured_forces_topics
full_slew_stats = pd.DataFrame(
data=[self.get_slew_minmax(self.df[col]) for col in cols], index=cols
)
self.logger.info("Find stable time window")
begin, end = self.find_stable_region()

self.logger.debug("Update begin and end times")
begin = begin + self.inner_pad
end = end - self.inner_pad

self.logger.debug("Calculate statistics in stable time window")
stable_slew_stats = pd.DataFrame(
data=[
self.get_stats_in_torqueless_interval(
self.df[col].loc[begin.isot : end.isot]
)
for col in cols
],
index=cols,
)

self.logger.debug("Concatenate statistics")
stats = pd.concat((full_slew_stats, stable_slew_stats), axis=1)

return stats

def get_midppoint(self):
"""Returns the halfway point between begin and end"""
return self.df.index[len(self.df.index) // 2]

@staticmethod
def get_minmax_from_series(s):
def get_slew_minmax(s):
"""
Calculate minimum, maximum, and peak-to-peak values for a data-series.
Expand All @@ -47,8 +204,7 @@ def get_minmax_from_series(s):
Returns
-------
pandas.Series
A Series containing the following calculated values for the two
halves of the input Series:
A Series containing the following calculated values for the two halves of the input Series:
- min: Minimum value of the Series.
- max: Maximum value of the Series.
- ptp: Peak-to-peak (ptp) value of the Series (abs(max - min)).
Expand All @@ -61,23 +217,26 @@ def get_minmax_from_series(s):
return result

@staticmethod
def get_avgmedstd_from_series(s):
def get_stats_in_torqueless_interval(s):
"""
Calculate average, median, and standard deviation for a data-series.
Calculate statistical measures within a torqueless interval.
Parameters
----------
This static method computes descriptive statistics for a given pandas Series within
a torqueless interval. The torqueless interval represents a period of the data
analysis when no external torque is applied.
Parameters:
-----------
s : pandas.Series
The input pandas Series containing data.
A pandas Series containing data values for analysis.
Returns
-------
Returns:
--------
pandas.Series
A Series containing the following calculated values for the two
halves of the input Series:
- mean: The mean value of the Series.
- median: The median value of the Series.
- std: The standard deviation of the Series.
A pandas Series containing the following statistical measures:
- Mean: The arithmetic mean of the data.
- Median: The median value of the data.
- Standard Deviation (Std): The standard deviation of the data.
"""
result = pd.Series(
data=[s.mean(), s.median(), s.std()],
Expand All @@ -86,6 +245,56 @@ def get_avgmedstd_from_series(s):
)
return result

def find_stable_region(self):
"""
ToDo @b1quint: add docstring
"""
az_torque = self.df["az_actual_torque"]
az_torque_regions = find_adjacent_true_regions(
np.abs(az_torque - az_torque.mean()) < self.n_sigma * az_torque.std()
)

el_torque = self.df["el_actual_torque"]
el_torque_regions = find_adjacent_true_regions(
np.abs(el_torque - el_torque.mean()) < self.n_sigma * el_torque.std()
)

stable_begin = max([reg[0] for reg in az_torque_regions + el_torque_regions])
stable_begin = Time(stable_begin, scale="utc")

stable_end = min([reg[-1] for reg in az_torque_regions + el_torque_regions])
stable_end = Time(stable_end, scale="utc")

return stable_begin, stable_end


def find_adjacent_true_regions(series, min_adjacent=None):
"""
Find regions in a boolean Series containing adjacent True values.
Parameters
----------
series : pd.Series
The boolean Series to search for regions.
min_adjacent : int, optional
Minimum number of adjacent True values in a region.
Defaults to half size of the series.
Returns
-------
list
A list of tuples representing the start and end indices of regions
containing more than or equal to min_adjacent adjacent True values.
"""
min_adjacent = min_adjacent if min_adjacent else 0.5 * series.size
regions = []
for key, group in series.groupby((series != series.shift()).cumsum()):
if key and len(group) >= min_adjacent:
region_indices = group.index
regions.append((region_indices.min(), region_indices.max()))
return regions


def get_tma_slew_event(day_obs, seq_number):
"""
Expand All @@ -111,16 +320,16 @@ def get_tma_slew_event(day_obs, seq_number):
the specified day of observation (dayObs). The events are filtered to
include only those that start after 1 second before the specified start time
and end before 1 second after the specified end time.
Returs
Returs
------
lsst.summit.utils.tmaUtils.TMAEvent
A TMA slew event that occurred within the specified time range.
A TMA slew event that occurred within the specified time range.
Raises
------
ValueError
If no events are found for the specified time range.
If no events are found for the specified time range.
ValueError
If more than one event is found for the specified time range.
"""
Expand All @@ -142,7 +351,7 @@ def get_tma_slew_event(day_obs, seq_number):
if len(single_event) == 0:
raise ValueError(f"Could not find any events for {day_obs}. ")

return single_event[0]
return single_event[0]


def evaluate_single_slew(day_obs, seq_number):
Expand All @@ -159,7 +368,23 @@ def evaluate_single_slew(day_obs, seq_number):
"""
logger.info("Retriving TMA slew event.")
event = get_tma_slew_event(day_obs, seq_number)


logger.info("Start inertia compensation system analysis.")
performance_analysis = InertiaCompensationSystemAnalysis(
event,
inner_pad=1.0,
outer_pad=1.0,
n_sigma=1.0,
)

logger.info("Query datasets")
performance_analysis.df = performance_analysis.query_dataset()

logger.info("Calculate statistics")
performance_analysis.stats = performance_analysis.get_stats()

print(performance_analysis.stats)


if __name__ == "__main__":
logger.info("Start")
Expand Down

0 comments on commit 93caea9

Please sign in to comment.