@@ -55,6 +55,10 @@ class GroupedDataSubscriber(Subscriber):
55
55
the subsecond distribution, some data will be downsampled. Downsampled data count is tracked and reported to through the
56
56
downsampled_count property.
57
57
58
+ Only a single one-second data buffer will be published at a time. If data cannot be processed within the one-second
59
+ window, a warning message will be displayed and the data will be skipped. The number of skipped data sets is tracked
60
+ and reported through the process_missed_count property.
61
+
58
62
This example depends on a semi-accurate system clock to group data by timestamp. If the system clock is not accurate,
59
63
data may not be grouped as expected.
60
64
"""
@@ -96,6 +100,11 @@ def __init__(self):
96
100
self ._downsampled_count_lock = threading .Lock ()
97
101
self ._downsampled_count = 0
98
102
103
+ self ._process_lock = threading .Lock ()
104
+
105
+ self ._process_missed_count_lock = threading .Lock ()
106
+ self ._process_missed_count = 0
107
+
99
108
# Set up event handlers for STTP API
100
109
self .set_subscriptionupdated_receiver (self ._subscription_updated )
101
110
self .set_newmeasurements_receiver (self ._new_measurements )
@@ -118,7 +127,25 @@ def downsampled_count(self, value: np.int32):
118
127
119
128
with self ._downsampled_count_lock :
120
129
self ._downsampled_count = value
130
+
131
+ @property
132
+ def process_missed_count (self ) -> int :
133
+ """
134
+ Gets the count of missed data processing.
135
+ """
136
+
137
+ with self ._process_missed_count_lock :
138
+ return self ._process_missed_count
121
139
140
+ @process_missed_count .setter
141
+ def process_missed_count (self , value : np .int32 ):
142
+ """
143
+ Sets the count of missed data processing.
144
+ """
145
+
146
+ with self ._process_missed_count_lock :
147
+ self ._process_missed_count = value
148
+
122
149
def set_grouped_data_receiver (self , callback : Optional [Callable [[GroupedDataSubscriber , np .uint64 , Dict [np .uint64 , Dict [UUID , Measurement ]]], None ]]):
123
150
"""
124
151
Defines the callback function that handles grouped data that has been received.
@@ -209,9 +236,8 @@ def _new_measurements(self, measurements: List[Measurement]):
209
236
if current_time - timestamp >= window_size :
210
237
grouped_data = self ._grouped_data .pop (timestamp )
211
238
212
- # Call user defined data function handler with grouped data
213
- if self ._grouped_data_receiver is not None :
214
- self ._grouped_data_receiver (self , timestamp , grouped_data )
239
+ # Call user defined data function handler with one-second grouped data buffer on a separate thread
240
+ threading .Thread (target = self ._publish_data , args = (timestamp , grouped_data ), name = "PublishDataThread" ).start ()
215
241
216
242
# Provide user feedback on data reception
217
243
if time () - self ._lastmessage < 5.0 :
@@ -236,6 +262,40 @@ def _new_measurements(self, measurements: List[Measurement]):
236
262
finally :
237
263
self ._lastmessage = time ()
238
264
265
+ def _publish_data (self , timestamp : np .uint64 , data_buffer : Dict [np .uint64 , Dict [UUID , Measurement ]]):
266
+ time_str = Ticks .to_shortstring (timestamp ).split ("." )[0 ]
267
+
268
+ if self ._process_lock .acquire (False ):
269
+ try :
270
+ process_started = time ()
271
+
272
+ if self ._grouped_data_receiver is not None :
273
+ self ._grouped_data_receiver (self , timestamp , data_buffer )
274
+
275
+ self .statusmessage (f"Data publication for buffer at { time_str } processed in { self ._get_elapsed_time_str (time () - process_started )} .\n " )
276
+ finally :
277
+ self ._process_lock .release ()
278
+ else :
279
+ with self ._process_missed_count_lock :
280
+ self ._process_missed_count += 1
281
+ self .errormessage (f"WARNING: Data publication missed for buffer at { time_str } , a previous data buffer is still processing. { self ._process_missed_count :,} data sets missed so far...\n " )
282
+
283
+ def _get_elapsed_time_str (self , elapsed : float ) -> str :
284
+ hours , rem = divmod (elapsed , 3600 )
285
+ minutes , seconds = divmod (rem , 60 )
286
+ milliseconds = (elapsed - int (elapsed )) * 1000
287
+
288
+ if hours < 1.0 :
289
+ if minutes < 1.0 :
290
+ if seconds < 1.0 :
291
+ return f"{ int (milliseconds ):03} ms"
292
+
293
+ return f"{ int (seconds ):02} .{ int (milliseconds ):03} sec"
294
+
295
+ return f"{ int (minutes ):02} :{ int (seconds ):02} .{ int (milliseconds ):03} "
296
+
297
+ return f"{ int (hours ):02} :{ int (minutes ):02} :{ int (seconds ):02} .{ int (milliseconds ):03} "
298
+
239
299
def _connection_terminated (self ):
240
300
# Call default implementation which will display a connection terminated message to stderr
241
301
self .default_connectionterminated_receiver ()
@@ -277,6 +337,10 @@ def process_data(subscriber: GroupedDataSubscriber, timestamp: np.uint64, data_b
277
337
"""
278
338
User defined callback function that handles grouped data that has been received.
279
339
340
+ Note: This function is called by the subscriber when grouped data is available for processing.
341
+ Normally the function is called once per second with a buffer of grouped data for the second.
342
+ The call frequency can be higher if the processing of the data takes longer than a second.
343
+
280
344
Parameters:
281
345
timestamp: The timestamp, at top of second, for the grouped data
282
346
data_buffer: The grouped one second data buffer:
@@ -328,7 +392,7 @@ def process_data(subscriber: GroupedDataSubscriber, timestamp: np.uint64, data_b
328
392
329
393
average_frequency = frequency_sum / frequency_count
330
394
331
- print (f"Average frequency for { frequency_count :,} values in second { Ticks .to_shortstring (timestamp )} : { average_frequency :.6f} Hz" )
395
+ print (f"Average frequency for { frequency_count :,} values in second { Ticks .to_datetime (timestamp ). second } : { average_frequency :.6f} Hz" )
332
396
333
397
if subscriber .downsampled_count > 0 :
334
398
print (f" Downsampled { subscriber .downsampled_count :,} measurements in last measurement set..." )
0 commit comments