Skip to content

Commit 0e9825e

Browse files
authored
Dashboard improvements (#69)
* Dashboard improvements Signed-off-by: Raul Sevilla <[email protected]> * Remove pod template variable Signed-off-by: Raul Sevilla <[email protected]> --------- Signed-off-by: Raul Sevilla <[email protected]>
1 parent 0e11cea commit 0e9825e

File tree

3 files changed

+54
-81
lines changed

3 files changed

+54
-81
lines changed

templates/etcd-on-cluster-dashboard.jsonnet

Lines changed: 9 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -295,7 +295,7 @@ local keys = grafana.graphPanel.new(
295295
datasource='$datasource',
296296
).addTarget(
297297
prometheus.target(
298-
'etcd_debugging_mvcc_keys_total{namespace="openshift-etcd",pod=~"$pod"}',
298+
'etcd_debugging_mvcc_keys_total{namespace="openshift-etcd"}',
299299
legendFormat='{{ pod }} Num keys',
300300
)
301301
);
@@ -305,7 +305,7 @@ local compacted_keys = grafana.graphPanel.new(
305305
datasource='$datasource',
306306
).addTarget(
307307
prometheus.target(
308-
'etcd_debugging_mvcc_db_compaction_keys_total{namespace="openshift-etcd",pod=~"$pod"}',
308+
'etcd_debugging_mvcc_db_compaction_keys_total{namespace="openshift-etcd"}',
309309
legendFormat='{{ pod }} keys compacted',
310310
)
311311
);
@@ -315,12 +315,12 @@ local heartbeat_failures = grafana.graphPanel.new(
315315
datasource='$datasource',
316316
).addTarget(
317317
prometheus.target(
318-
'etcd_server_heartbeat_send_failures_total{namespace="openshift-etcd",pod=~"$pod"}',
319-
legendFormat='{{ pod }} heartbeat failures',
318+
'etcd_server_heartbeat_send_failures_total{namespace="openshift-etcd"}',
319+
legendFormat='{{ pod }} heartbeat failures',
320320
)
321321
).addTarget(
322322
prometheus.target(
323-
'etcd_server_health_failures{namespace="openshift-etcd",pod=~"$pod"}',
323+
'etcd_server_health_failures{namespace="openshift-etcd"}',
324324
legendFormat='{{ pod }} health failures',
325325
)
326326
);
@@ -343,12 +343,12 @@ local key_operations = grafana.graphPanel.new(
343343
],
344344
}.addTarget(
345345
prometheus.target(
346-
'rate(etcd_debugging_mvcc_put_total{namespace="openshift-etcd",pod=~"$pod"}[2m])',
346+
'rate(etcd_mvcc_put_total{namespace="openshift-etcd"}[2m])',
347347
legendFormat='{{ pod }} puts/s',
348348
)
349349
).addTarget(
350350
prometheus.target(
351-
'rate(etcd_debugging_mvcc_delete_total{namespace="openshift-etcd",pod=~"$pod"}[2m])',
351+
'rate(etcd_mvcc_delete_total{namespace="openshift-etcd"}[2m])',
352352
legendFormat='{{ pod }} deletes/s',
353353
)
354354
);
@@ -370,12 +370,12 @@ local slow_operations = grafana.graphPanel.new(
370370
],
371371
}.addTarget(
372372
prometheus.target(
373-
'delta(etcd_server_slow_apply_total{namespace="openshift-etcd",pod=~"$pod"}[2m])',
373+
'delta(etcd_server_slow_apply_total{namespace="openshift-etcd"}[2m])',
374374
legendFormat='{{ pod }} slow applies',
375375
)
376376
).addTarget(
377377
prometheus.target(
378-
'delta(etcd_server_slow_read_indexes_total{namespace="openshift-etcd",pod=~"$pod"}[2m])',
378+
'delta(etcd_server_slow_read_indexes_total{namespace="openshift-etcd"}[2m])',
379379
legendFormat='{{ pod }} slow read indexes',
380380
)
381381
);
@@ -474,20 +474,6 @@ grafana.dashboard.new(
474474
)
475475
)
476476

477-
.addTemplate(
478-
grafana.template.new(
479-
'pod',
480-
'$datasource',
481-
'label_values({job="etcd"}, pod)',
482-
refresh=1,
483-
) {
484-
type: 'query',
485-
multi: false,
486-
includeAll: false,
487-
}
488-
)
489-
490-
491477
.addPanel(
492478
grafana.row.new(title='General Resource Usage', collapse=true).addPanels(
493479
[

templates/hypershift-performance.jsonnet

Lines changed: 34 additions & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ local genericGraphLegendPanel(title, datasource, format) = grafana.graphPanel.ne
2323

2424
local hostedControlPlaneCPU = genericGraphLegendPanel('Hosted Control Plane CPU', 'Cluster Prometheus', 'percent').addTarget(
2525
prometheus.target(
26-
'topk(10,irate(container_cpu_usage_seconds_total{namespace=~"$namespace",container!="POD",name!=""}[1m])*100)',
26+
'topk(10,irate(container_cpu_usage_seconds_total{namespace=~"$namespace",container!="POD",name!=""}[2m])*100)',
2727
legendFormat='{{pod}}/{{container}}',
2828
)
2929
);
@@ -179,7 +179,7 @@ local top10ContMemHosted = genericGraphLegendPanel('Top 10 Hosted Clusters conta
179179

180180
local top10ContCPUHosted = genericGraphLegendPanel('Top 10 Hosted Clusters container CPU', 'Cluster Prometheus', 'percent').addTarget(
181181
prometheus.target(
182-
'topk(10,irate(container_cpu_usage_seconds_total{namespace=~"^ocm-.*",container!="POD",name!=""}[1m])*100)',
182+
'topk(10,irate(container_cpu_usage_seconds_total{namespace=~"^ocm-.*",container!="POD",name!=""}[2m])*100)',
183183
legendFormat='{{ namespace }} - {{ name }}',
184184
)
185185
);
@@ -193,7 +193,7 @@ local top10ContMemManagement = genericGraphLegendPanel('Top 10 Management Cluste
193193

194194
local top10ContCPUManagement = genericGraphLegendPanel('Top 10 Management Cluster container CPU', 'Cluster Prometheus', 'percent').addTarget(
195195
prometheus.target(
196-
'topk(10,irate(container_cpu_usage_seconds_total{namespace!="",container!="POD",name!=""}[1m])*100)',
196+
'topk(10,irate(container_cpu_usage_seconds_total{namespace!="",container!="POD",name!=""}[2m])*100)',
197197
legendFormat='{{ namespace }} - {{ name }}',
198198
)
199199
);
@@ -207,7 +207,7 @@ local top10ContMemOBOManagement = genericGraphLegendPanel('Top 10 Management Clu
207207

208208
local top10ContCPUOBOManagement = genericGraphLegendPanel('Top 10 Management Cluster OBO NS Pods CPU', 'Cluster Prometheus', 'percent').addTarget(
209209
prometheus.target(
210-
'topk(10,irate(container_cpu_usage_seconds_total{namespace="openshift-observability-operator",container!="POD",name!=""}[1m])*100)',
210+
'topk(10,irate(container_cpu_usage_seconds_total{namespace="openshift-observability-operator",container!="POD",name!=""}[2m])*100)',
211211
legendFormat='{{ pod }}/{{ container }}',
212212
)
213213
);
@@ -221,7 +221,7 @@ local top10ContMemHypershiftManagement = genericGraphLegendPanel('Top 10 Managem
221221

222222
local top10ContCPUHypershiftManagement = genericGraphLegendPanel('Top 10 Management Cluster Hypershift NS Pods CPU', 'Cluster Prometheus', 'percent').addTarget(
223223
prometheus.target(
224-
'topk(10,irate(container_cpu_usage_seconds_total{namespace="hypershift",container!="POD",name!=""}[1m])*100)',
224+
'topk(10,irate(container_cpu_usage_seconds_total{namespace="hypershift",container!="POD",name!=""}[2m])*100)',
225225
legendFormat='{{ pod }}/{{ container }}',
226226
)
227227
);
@@ -341,7 +341,7 @@ local request_duration_99th_quantile_by_resource = grafana.graphPanel.new(
341341
legend_hideZero=true,
342342
).addTarget(
343343
prometheus.target(
344-
'histogram_quantile(0.99, sum(rate(apiserver_request_duration_seconds_bucket{namespace=~"$namespace",resource=~"$resource",subresource!="log",verb!~"WATCH|WATCHLIST|PROXY"}[1m])) by(resource, namespace, verb, le))',
344+
'histogram_quantile(0.99, sum(rate(apiserver_request_duration_seconds_bucket{namespace=~"$namespace",resource=~"$resource",subresource!="log",verb!~"WATCH|WATCHLIST|PROXY"}[2m])) by(resource, namespace, verb, le))',
345345
legendFormat='{{verb}}:{{resource}}/{{namespace}}',
346346
)
347347
);
@@ -665,7 +665,7 @@ local mgmt_heartbeat_failures = grafana.graphPanel.new(
665665
)
666666
).addTarget(
667667
prometheus.target(
668-
'etcd_server_health_failures{namespace=~"openshift-etcd",pod=~"$pod"}',
668+
'etcd_server_health_failures{namespace=~"openshift-etcd"}',
669669
legendFormat='{{namespace}} - {{ pod }} health failures',
670670
)
671671
);
@@ -688,12 +688,12 @@ local mgmt_key_operations = grafana.graphPanel.new(
688688
],
689689
}.addTarget(
690690
prometheus.target(
691-
'rate(etcd_debugging_mvcc_put_total{namespace=~"openshift-etcd"}[2m])',
691+
'rate(etcd_mvcc_put_total{namespace=~"openshift-etcd"}[2m])',
692692
legendFormat='{{namespace}} - {{ pod }} puts/s',
693693
)
694694
).addTarget(
695695
prometheus.target(
696-
'rate(etcd_debugging_mvcc_delete_total{namespace=~"openshift-etcd"}[2m])',
696+
'rate(etcd_mvcc_delete_total{namespace=~"openshift-etcd"}[2m])',
697697
legendFormat='{{namespace}} - {{ pod }} deletes/s',
698698
)
699699
);
@@ -1095,7 +1095,7 @@ local keys = grafana.graphPanel.new(
10951095
datasource='OBO',
10961096
).addTarget(
10971097
prometheus.target(
1098-
'etcd_debugging_mvcc_keys_total{namespace=~"$namespace",pod=~"$pod"}',
1098+
'etcd_debugging_mvcc_keys_total{namespace=~"$namespace"}',
10991099
legendFormat='{{namespace}} - {{ pod }} Num keys',
11001100
)
11011101
);
@@ -1105,7 +1105,7 @@ local compacted_keys = grafana.graphPanel.new(
11051105
datasource='OBO',
11061106
).addTarget(
11071107
prometheus.target(
1108-
'etcd_debugging_mvcc_db_compaction_keys_total{namespace=~"$namespace",pod=~"$pod"}',
1108+
'etcd_debugging_mvcc_db_compaction_keys_total{namespace=~"$namespace"}',
11091109
legendFormat='{{namespace}} - {{ pod }} keys compacted',
11101110
)
11111111
);
@@ -1115,12 +1115,12 @@ local heartbeat_failures = grafana.graphPanel.new(
11151115
datasource='OBO',
11161116
).addTarget(
11171117
prometheus.target(
1118-
'etcd_server_heartbeat_send_failures_total{namespace=~"$namespace",pod=~"$pod"}',
1118+
'etcd_server_heartbeat_send_failures_total{namespace=~"$namespace"}',
11191119
legendFormat='{{namespace}} - {{ pod }} heartbeat failures',
11201120
)
11211121
).addTarget(
11221122
prometheus.target(
1123-
'etcd_server_health_failures{namespace=~"$namespace",pod=~"$pod"}',
1123+
'etcd_server_health_failures{namespace=~"$namespace"}',
11241124
legendFormat='{{namespace}} - {{ pod }} health failures',
11251125
)
11261126
);
@@ -1143,12 +1143,12 @@ local key_operations = grafana.graphPanel.new(
11431143
],
11441144
}.addTarget(
11451145
prometheus.target(
1146-
'rate(etcd_debugging_mvcc_put_total{namespace=~"$namespace",pod=~"$pod"}[2m])',
1146+
'rate(etcd_mvcc_put_total{namespace=~"$namespace"}[2m])',
11471147
legendFormat='{{namespace}} - {{ pod }} puts/s',
11481148
)
11491149
).addTarget(
11501150
prometheus.target(
1151-
'rate(etcd_debugging_mvcc_delete_total{namespace=~"$namespace",pod=~"$pod"}[2m])',
1151+
'rate(etcd_mvcc_delete_total{namespace=~"$namespace"}[2m])',
11521152
legendFormat='{{namespace}} - {{ pod }} deletes/s',
11531153
)
11541154
);
@@ -1170,12 +1170,12 @@ local slow_operations = grafana.graphPanel.new(
11701170
],
11711171
}.addTarget(
11721172
prometheus.target(
1173-
'delta(etcd_server_slow_apply_total{namespace=~"$namespace",pod=~"$pod"}[2m])',
1173+
'delta(etcd_server_slow_apply_total{namespace=~"$namespace"}[2m])',
11741174
legendFormat='{{namespace}} - {{ pod }} slow applies',
11751175
)
11761176
).addTarget(
11771177
prometheus.target(
1178-
'delta(etcd_server_slow_read_indexes_total{namespace=~"$namespace",pod=~"$pod"}[2m])',
1178+
'delta(etcd_server_slow_read_indexes_total{namespace=~"$namespace"}[2m])',
11791179
legendFormat='{{namespace}} - {{ pod }} slow read indexes',
11801180
)
11811181
);
@@ -1271,7 +1271,7 @@ local request_duration_99th_quantile = grafana.graphPanel.new(
12711271
legend_hideZero=true,
12721272
).addTarget(
12731273
prometheus.target(
1274-
'histogram_quantile(0.99, sum(rate(apiserver_request_duration_seconds_bucket{namespace=~"$namespace",resource=~"$resource",subresource!="log",verb!~"WATCH|WATCHLIST|PROXY"}[1m])) by(verb,le))',
1274+
'histogram_quantile(0.99, sum(rate(apiserver_request_duration_seconds_bucket{namespace=~"$namespace",resource=~"$resource",subresource!="log",verb!~"WATCH|WATCHLIST|PROXY"}[2m])) by(verb,le))',
12751275
legendFormat='{{verb}}',
12761276
)
12771277
);
@@ -1289,7 +1289,7 @@ local request_rate_by_instance = grafana.graphPanel.new(
12891289
legend_hideZero=true,
12901290
).addTarget(
12911291
prometheus.target(
1292-
'sum(rate(apiserver_request_total{namespace=~"$namespace",resource=~"$resource",code=~"$code",verb=~"$verb"}[1m])) by(instance)',
1292+
'sum(rate(apiserver_request_total{namespace=~"$namespace",resource=~"$resource",code=~"$code",verb=~"$verb"}[2m])) by(instance)',
12931293
legendFormat='{{instance}}',
12941294
)
12951295
);
@@ -1307,7 +1307,7 @@ local request_duration_99th_quantile_by_resource = grafana.graphPanel.new(
13071307
legend_hideZero=true,
13081308
).addTarget(
13091309
prometheus.target(
1310-
'histogram_quantile(0.99, sum(rate(apiserver_request_duration_seconds_bucket{namespace=~"$namespace",resource=~"$resource",subresource!="log",verb!~"WATCH|WATCHLIST|PROXY"}[1m])) by(resource,le))',
1310+
'histogram_quantile(0.99, sum(rate(apiserver_request_duration_seconds_bucket{namespace=~"$namespace",resource=~"$resource",subresource!="log",verb!~"WATCH|WATCHLIST|PROXY"}[2m])) by(resource,le))',
13111311
legendFormat='{{resource}}',
13121312
)
13131313
);
@@ -1325,7 +1325,7 @@ local request_rate_by_resource = grafana.graphPanel.new(
13251325
legend_hideZero=true,
13261326
).addTarget(
13271327
prometheus.target(
1328-
'sum(rate(apiserver_request_total{namespace=~"$namespace",resource=~"$resource",code=~"$code",verb=~"$verb"}[1m])) by(resource)',
1328+
'sum(rate(apiserver_request_total{namespace=~"$namespace",resource=~"$resource",code=~"$code",verb=~"$verb"}[2m])) by(resource)',
13291329
legendFormat='{{resource}}',
13301330
)
13311331
);
@@ -1335,12 +1335,12 @@ local request_duration_read_write = grafana.graphPanel.new(
13351335
datasource='OBO',
13361336
).addTarget(
13371337
prometheus.target(
1338-
'histogram_quantile(0.99, sum(rate(apiserver_request_duration_seconds_bucket{namespace=~"$namespace",resource=~"$resource",verb=~"LIST|GET"}[1m])) by(le))',
1338+
'histogram_quantile(0.99, sum(rate(apiserver_request_duration_seconds_bucket{namespace=~"$namespace",resource=~"$resource",verb=~"LIST|GET"}[2m])) by(le))',
13391339
legendFormat='read',
13401340
)
13411341
).addTarget(
13421342
prometheus.target(
1343-
'histogram_quantile(0.99, sum(rate(apiserver_request_duration_seconds_bucket{namespace=~"$namespace",resource=~"$resource",verb=~"POST|PUT|PATCH|UPDATE|DELETE"}[1m])) by(le))',
1343+
'histogram_quantile(0.99, sum(rate(apiserver_request_duration_seconds_bucket{namespace=~"$namespace",resource=~"$resource",verb=~"POST|PUT|PATCH|UPDATE|DELETE"}[2m])) by(le))',
13441344
legendFormat='write',
13451345
)
13461346
);
@@ -1351,12 +1351,12 @@ local request_rate_read_write = grafana.graphPanel.new(
13511351
datasource='OBO',
13521352
).addTarget(
13531353
prometheus.target(
1354-
'sum(rate(apiserver_request_total{namespace=~"$namespace",resource=~"$resource",verb=~"LIST|GET"}[1m]))',
1354+
'sum(rate(apiserver_request_total{namespace=~"$namespace",resource=~"$resource",verb=~"LIST|GET"}[2m]))',
13551355
legendFormat='read',
13561356
)
13571357
).addTarget(
13581358
prometheus.target(
1359-
'sum(rate(apiserver_request_total{namespace=~"$namespace",resource=~"$resource",verb=~"POST|PUT|PATCH|UPDATE|DELETE"}[1m]))',
1359+
'sum(rate(apiserver_request_total{namespace=~"$namespace",resource=~"$resource",verb=~"POST|PUT|PATCH|UPDATE|DELETE"}[2m]))',
13601360
legendFormat='write',
13611361
)
13621362
);
@@ -1368,7 +1368,7 @@ local requests_dropped_rate = grafana.graphPanel.new(
13681368
description='Number of requests dropped with "Try again later" response',
13691369
).addTarget(
13701370
prometheus.target(
1371-
'sum(rate(apiserver_dropped_requests_total{namespace=~"$namespace"}[1m])) by (requestKind)',
1371+
'sum(rate(apiserver_dropped_requests_total{namespace=~"$namespace"}[2m])) by (requestKind)',
13721372
)
13731373
);
13741374

@@ -1379,7 +1379,7 @@ local requests_terminated_rate = grafana.graphPanel.new(
13791379
description='Number of requests which apiserver terminated in self-defense',
13801380
).addTarget(
13811381
prometheus.target(
1382-
'sum(rate(apiserver_request_terminations_total{namespace=~"$namespace",resource=~"$resource",code=~"$code"}[1m])) by(component)',
1382+
'sum(rate(apiserver_request_terminations_total{namespace=~"$namespace",resource=~"$resource",code=~"$code"}[2m])) by(component)',
13831383
)
13841384
);
13851385

@@ -1396,7 +1396,7 @@ local requests_status_rate = grafana.graphPanel.new(
13961396
legend_hideZero=true,
13971397
).addTarget(
13981398
prometheus.target(
1399-
'sum(rate(apiserver_request_total{namespace=~"$namespace",resource=~"$resource",verb=~"$verb",code=~"$code"}[1m])) by(code)',
1399+
'sum(rate(apiserver_request_total{namespace=~"$namespace",resource=~"$resource",verb=~"$verb",code=~"$code"}[2m])) by(code)',
14001400
legendFormat='{{code}}'
14011401
)
14021402
);
@@ -1443,7 +1443,7 @@ local pf_requests_rejected = grafana.graphPanel.new(
14431443
description='Number of requests rejected by API Priority and Fairness system',
14441444
).addTarget(
14451445
prometheus.target(
1446-
'sum(rate(apiserver_flowcontrol_rejected_requests_total{namespace=~"$namespace"}[1m])) by (reason)',
1446+
'sum(rate(apiserver_flowcontrol_rejected_requests_total{namespace=~"$namespace"}[2m])) by (reason)',
14471447
)
14481448
);
14491449

@@ -1461,7 +1461,7 @@ local response_size_99th_quartile = grafana.graphPanel.new(
14611461
legend_hideZero=true,
14621462
).addTarget(
14631463
prometheus.target(
1464-
'histogram_quantile(0.99, sum(rate(apiserver_response_sizes_bucket{namespace=~"$namespace",resource=~"$resource",verb=~"$verb"}[1m])) by(instance,le))',
1464+
'histogram_quantile(0.99, sum(rate(apiserver_response_sizes_bucket{namespace=~"$namespace",resource=~"$resource",verb=~"$verb"}[2m])) by(instance,le))',
14651465
legendFormat='{{instance}}',
14661466
)
14671467
);
@@ -1480,7 +1480,7 @@ local pf_request_queue_length = grafana.graphPanel.new(
14801480
legend_hideZero=true,
14811481
).addTarget(
14821482
prometheus.target(
1483-
'histogram_quantile(0.99, sum(rate(apiserver_flowcontrol_request_queue_length_after_enqueue_bucket{namespace=~"$namespace"}[1m])) by(flowSchema, priorityLevel, le))',
1483+
'histogram_quantile(0.99, sum(rate(apiserver_flowcontrol_request_queue_length_after_enqueue_bucket{namespace=~"$namespace"}[2m])) by(flowSchema, priorityLevel, le))',
14841484
legendFormat='{{flowSchema}}:{{priorityLevel}}',
14851485
)
14861486
);
@@ -1499,7 +1499,7 @@ local pf_request_wait_duration_99th_quartile = grafana.graphPanel.new(
14991499
legend_hideZero=true,
15001500
).addTarget(
15011501
prometheus.target(
1502-
'histogram_quantile(0.99, sum(rate(apiserver_flowcontrol_request_wait_duration_seconds_bucket{namespace=~"$namespace"}[1m])) by(flowSchema, priorityLevel, le))',
1502+
'histogram_quantile(0.99, sum(rate(apiserver_flowcontrol_request_wait_duration_seconds_bucket{namespace=~"$namespace"}[2m])) by(flowSchema, priorityLevel, le))',
15031503
legendFormat='{{flowSchema}}:{{priorityLevel}}',
15041504
)
15051505
);
@@ -1518,7 +1518,7 @@ local pf_request_execution_duration = grafana.graphPanel.new(
15181518
legend_hideZero=true,
15191519
).addTarget(
15201520
prometheus.target(
1521-
'histogram_quantile(0.99, sum(rate(apiserver_flowcontrol_request_execution_seconds_bucket{namespace=~"$namespace"}[1m])) by(flowSchema, priorityLevel, le))',
1521+
'histogram_quantile(0.99, sum(rate(apiserver_flowcontrol_request_execution_seconds_bucket{namespace=~"$namespace"}[2m])) by(flowSchema, priorityLevel, le))',
15221522
legendFormat='{{flowSchema}}:{{priorityLevel}}',
15231523
)
15241524
);
@@ -1537,7 +1537,7 @@ local pf_request_dispatch_rate = grafana.graphPanel.new(
15371537
legend_hideZero=true,
15381538
).addTarget(
15391539
prometheus.target(
1540-
'sum(rate(apiserver_flowcontrol_dispatched_requests_total{namespace=~"$namespace"}[1m])) by(flowSchema,priorityLevel)',
1540+
'sum(rate(apiserver_flowcontrol_dispatched_requests_total{namespace=~"$namespace"}[2m])) by(flowSchema,priorityLevel)',
15411541
legendFormat='{{flowSchema}}:{{priorityLevel}}',
15421542
)
15431543
);
@@ -1599,19 +1599,6 @@ grafana.dashboard.new(
15991599
},
16001600
)
16011601

1602-
.addTemplate(
1603-
grafana.template.new(
1604-
'pod',
1605-
'Cluster Prometheus',
1606-
'label_values({pod=~"etcd.*", namespace="$namespace"}, pod)',
1607-
refresh=1,
1608-
) {
1609-
type: 'query',
1610-
multi: true,
1611-
includeAll: false,
1612-
}
1613-
)
1614-
16151602
.addTemplate(
16161603
grafana.template.new(
16171604
'resource',

0 commit comments

Comments
 (0)