@@ -114,8 +114,8 @@ def predict_loads(
114
114
current_has_replicas = curr_prov .num_nodes () > 1
115
115
next_has_replicas = next_prov .num_nodes () > 1
116
116
117
- curr_writer_cpu_util = ctx .metrics .aurora_writer_cpu_avg / 100
118
- curr_writer_cpu_util_denorm = curr_writer_cpu_util * aurora_num_cpus (curr_prov )
117
+ curr_writer_cpu_util = float ( ctx .metrics .aurora_writer_cpu_avg / 100 )
118
+ curr_writer_cpu_util_denorm = float ( curr_writer_cpu_util * aurora_num_cpus (curr_prov ) )
119
119
120
120
# We take a very conservative approach to query movement. If new queries
121
121
# are added onto Aurora, we increase the load. But if queries are
@@ -209,7 +209,7 @@ def predict_loads(
209
209
# We currently have read replicas.
210
210
curr_num_read_replicas = curr_prov .num_nodes () - 1
211
211
total_reader_cpu_denorm = (
212
- (ctx .metrics .aurora_reader_cpu_avg / 100 )
212
+ float (ctx .metrics .aurora_reader_cpu_avg / 100 )
213
213
* aurora_num_cpus (curr_prov )
214
214
* curr_num_read_replicas
215
215
)
@@ -277,11 +277,11 @@ def compute_direct_cpu_denorm(
277
277
per_query_cpu_denorm = np .clip (
278
278
query_run_times * alpha , a_min = 0.0 , a_max = load_max
279
279
)
280
- total_denorm = np .dot (per_query_cpu_denorm , arrival_weights )
281
- max_query_cpu_denorm = (per_query_cpu_denorm * arrival_weights ).max ()
280
+ total_denorm = np .dot (per_query_cpu_denorm , arrival_weights ). item ()
281
+ max_query_cpu_denorm = (per_query_cpu_denorm * arrival_weights ).max (). item ()
282
282
else :
283
283
# Edge case: Query with 0 arrival count (used as a constraint).
284
- total_denorm = np . zeros_like ( query_run_times )
284
+ total_denorm = 0.0
285
285
max_query_cpu_denorm = 0.0
286
286
if debug_dict is not None :
287
287
debug_dict ["aurora_total_cpu_denorm" ] = total_denorm
@@ -309,7 +309,7 @@ def query_movement_factor(
309
309
total_next_latency = np .dot (
310
310
curr_query_run_times , workload .get_arrival_counts_batch (query_indices )
311
311
)
312
- return total_next_latency / norm_factor
312
+ return total_next_latency . item () / norm_factor
313
313
314
314
@classmethod
315
315
def predict_query_latency_load_resources (
0 commit comments