-
Notifications
You must be signed in to change notification settings - Fork 2
/
setup
executable file
·371 lines (293 loc) · 12.7 KB
/
setup
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
#!/bin/sh
FILE_DIR=`perl -e 'use Cwd "abs_path";print abs_path(shift)' $0`
BASE_DIR=`dirname $FILE_DIR`/..
CLUSTER=cluster-xp
ZONE=us-central1-f
MACHINE_TYPE=n1-standard-2
NUM_NODES=25
MIN_NODES=25
MAX_NODES=25
verify () {
if [ $? != 0 ]; then
echo "$1"
exit 2
fi
}
create_cluster () {
echo "Creating the cluster"
gcloud beta container clusters create $CLUSTER --zone $ZONE --machine-type $MACHINE_TYPE --num-nodes $NUM_NODES --preemptible --enable-autoscaling --min-nodes=$MIN_NODES --max-nodes=$MAX_NODES --disk-size=50
gcloud container node-pools create simulator-pool --machine-type=n1-highmem-16 --num-nodes=1 --cluster=$CLUSTER --preemptible --zone $ZONE --disk-size=30
#gcloud container node-pools create resource-discovery-pool --machine-type=$MACHINE_TYPE --num-nodes=11 --cluster=$CLUSTER --preemptible --zone $ZONE --disk-size=30
#gcloud container node-pools create resource-catalog-pool --machine-type=$MACHINE_TYPE --num-nodes=14 --cluster=$CLUSTER --preemptible --zone $ZONE --disk-size=30
#gcloud container node-pools create data-collector-pool --machine-type=$MACHINE_TYPE --num-nodes=14 --cluster=$CLUSTER --preemptible --zone $ZONE --disk-size=30
gcloud container node-pools create mongo-pool --machine-type=n1-highmem-2 --num-nodes=5 --cluster=$CLUSTER --preemptible --zone $ZONE --disk-size=40
gcloud container node-pools create postgres-pool --machine-type=n1-highmem-2 --num-nodes=5 --cluster=$CLUSTER --preemptible --zone $ZONE --disk-size=40
gcloud container node-pools create rabbitmq-pool --machine-type=n1-highmem-2 --num-nodes=1 --cluster=$CLUSTER --preemptible --zone $ZONE --disk-size=40
gcloud container clusters get-credentials $CLUSTER
#gcloud compute instances list --format='value[separator=","](name)'
}
create_pods () {
echo "Creating postgres service"
kubectl create -f kubernetes/postgres.yaml
pod_name=`kubectl get pods | grep 'postgres.*Running\|Running.*postgres' | head -1`
echo "Postgres POD name: $pod_name"
while [ -z "$pod_name" ]
do
sleep 3
pod_name=`kubectl get pods | grep 'postgres.*Running\|Running.*postgres' | head -1`
echo -n '.'
done
echo "Preparing database"
kubectl create -f kubernetes/kong_migration_postgres.yaml
echo "Running job for database migration"
running=`kubectl get pods | grep kong-migration | wc -l`
while [ $running -eq 1 ]
do
sleep 3s
running=`kubectl get pods | grep kong-migration | wc -l`
echo -n '.'
done
echo
echo "Deleting job of database migration"
kubectl delete -f kubernetes/kong_migration_postgres.yaml
echo "Deploying Kong"
kubectl create -f kubernetes/kong_postgres.yaml
echo "Deploying RabbitMQ"
kubectl create secret generic rabbitmq-config --from-literal=erlang-cookie=c-is-for-cookie-thats-good-enough-for-me
kubectl create -f kubernetes/rabbitmq.yaml
echo "Deploying MongoDB"
kubectl apply -f kubernetes/googlecloud_ssd.yaml
kubectl apply -f kubernetes/mongo-statefulset.yaml
echo "Deploying etcd"
kubectl create -f kubernetes/etcd.yaml
sleep 3s
echo "Deploying Postgres with Stolon"
kubectl create -f kubernetes/postgres-stolon-interscity.yaml
sleep 10s
pod_name=`kubectl get pods -o go-template --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}' | grep stolon-sentinel | head -1`
echo "Stolon sentinel POD name: $pod_name"
printf "yes\n" | kubectl exec $pod_name stolonctl -it -- --cluster-name=cluster-xp --store-backend=etcd --store-endpoints=http://etcd-client:2379 init
while [ $? != 0 ]
do
sleep 3
printf "yes\n" | kubectl exec $pod_name stolonctl -it -- --cluster-name=cluster-xp --store-backend=etcd --store-endpoints=http://etcd-client:2379 init
done
kubectl create -f kubernetes/redis.yaml
create_interscity_services
enable_autoscaling
echo "======== Current Status ==========="
kubectl get svc,pods
}
create_interscity_services() {
echo "Preparing Resource Catalog database"
kubectl create -f kubernetes/resource-cataloguer-migration.yaml
echo "Running job for database migration"
running=`kubectl get pods | grep resource-cataloguer-migration | wc -l`
while [ $running -eq 1 ]
do
sleep 3s
running=`kubectl get pods | grep resource-cataloguer-migration | wc -l`
echo -n '.'
done
echo
echo "Deleting job of resource-cataloguer database migration"
kubectl delete -f kubernetes/resource-cataloguer-migration.yaml
echo "Deploying Resource Cataloguer"
kubectl create -f kubernetes/resource-cataloguer.yaml
#=====================================================================
#echo "Preparing Resource Adaptor database"
#kubectl create -f kubernetes/resource-adaptor-migration.yaml
#echo "Running job for database migration"
#running=`kubectl get pods | grep resource-adaptor-migration | wc -l`
#while [ $running -eq 1 ]
#do
# sleep 3s
# running=`kubectl get pods | grep resource-adaptor-migration | wc -l`
# echo -n '.'
#done
#echo
#echo "Deleting job of resource-adaptor database migration"
#kubectl delete -f kubernetes/resource-adaptor-migration.yaml
#echo "Deploying Resource Cataloguer"
#kubectl create -f kubernetes/resource-adaptor.yaml
#=====================================================================
echo "Restoring MongoDB for DataCollector"
kubectl exec -it mongo-0 -- apt-get update
kubectl exec -it mongo-0 -- apt-get install wget -y;
kubectl exec -it mongo-0 -- wget https://www.ime.usp.br/~esposte/documents/interscity-experiments/data-collector-backup.tar.gz -O /tmp/data-collector-backup.tar.gz;
kubectl exec -it mongo-0 -- tar -zxvf /tmp/data-collector-backup.tar.gz -C /tmp/;
kubectl exec -it mongo-0 -- mongorestore --drop --db data_collector_development /tmp/tmp/data_collector_development -h mongo
echo "Restoring MongoDB Cache Last Sensor Value collection"
kubectl exec -it mongo-cache-0 -- mongoexport --host mongo-0.mongo --db data_collector_development --collection last_sensor_values --out /tmp/last_sensor_values.json
kubectl exec -it mongo-cache-0 -- mongoimport --db data_collector_cache_development --file /tmp/last_sensor_values.json
echo "Preparing Data Collector indexes database"
kubectl create -f kubernetes/data-collector-index.yaml
echo "Running job for indexes creation"
running=`kubectl get pods | grep data-collector-index | wc -l`
while [ $running -eq 1 ]
do
sleep 3s
running=`kubectl get pods | grep data-collector-index | wc -l`
echo -n '.'
done
echo
echo "Deleting job of data-collector-index"
kubectl delete -f kubernetes/data-collector-index.yaml
echo "Deploying Data Collector"
kubectl create -f kubernetes/data-collector.yaml
#=====================================================================
echo "Deploying Resource Discoverer"
kubectl create -f kubernetes/resource-discoverer.yaml
}
create_simulator() {
echo "Deploying Simulator"
kubectl create -f kubernetes/simulator.yaml
}
enable_autoscaling(){
kubectl autoscale deployment data-collector --cpu-percent=60 --min=4 --max=100
#kubectl autoscale deployment resource-adaptor --cpu-percent=70 --min=1 --max=20
kubectl autoscale deployment resource-discovery --cpu-percent=60 --min=4 --max=100
kubectl autoscale deployment resource-catalog --cpu-percent=60 --min=4 --max=100
kubectl autoscale deployment kong-rc --cpu-percent=60 --min=4 --max=100
}
disable_autoscaling(){
kubectl delete hpa data-collector
#kubectl delete hpa resource-adaptor
kubectl delete hpa resource-discovery
kubectl delete hpa resource-catalog
kubectl delete hpa kong-rc
}
copy_simulator_output(){
echo "Copying output to /tmp/response_time.csv"
pod_name=`kubectl get pods -o go-template --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}' | grep simulator | head -1`
kubectl cp default/$pod_name:/src/mock-simulators/smart_city_model/output/response_time.csv /tmp/response_time.csv
kubectl cp default/$pod_name:/src/mock-simulators/smart_city_model/output/events.csv /tmp/events.csv
}
delete_simulator() {
copy_simulator_output
echo "Deploying Simulator"
kubectl delete -f kubernetes/simulator.yaml
}
delete_cluster() {
echo "Deleting the cluster"
yes | gcloud container clusters delete $CLUSTER --zone $ZONE
echo "Cluster deleted!"
echo "Deleting node-pools"
yes | gcloud container node-pools delete simulator-pool --cluster=$CLUSTER
yes | gcloud container node-pools delete mongo-pool --cluster=$CLUSTER
yes | gcloud container node-pools delete rabbitmq-pool --cluster=$CLUSTER
yes | gcloud container node-pools delete postgres-pool --cluster=$CLUSTER
yes | gcloud container node-pools delete resource-catalog-pool --cluster=$CLUSTER
yes | gcloud container node-pools delete resource-discovery-pool --cluster=$CLUSTER
yes | gcloud container node-pools delete data-collector-pool --cluster=$CLUSTER
echo "Deleting disks"
names=`gcloud compute disks list | sed -n '1!p' | while read -r instance; do printf $instance | awk '{print $1}'; done`
for name in $names; do
echo "Disk: $name"
yes | gcloud compute disks delete $name
done
echo "Deleting addresses"
names=`gcloud compute addresses list | sed -n '1!p' | while read -r instance; do printf $instance | awk '{print $1}'; done`
for name in $names; do
echo "Address: $name"
yes | gcloud compute addresses delete $name
done
echo "Deleting fowarding-rules"
names=`gcloud compute forwarding-rules list | sed -n '1!p' | while read -r instance; do printf $instance | awk '{print $1}'; done`
for name in $names; do
echo "Fowarding-rules: $name"
yes | gcloud compute forwarding-rules delete $name --region=us-central1
done
echo "Deleting backend-services"
names=`gcloud compute backend-services list | sed -n '1!p' | while read -r instance; do printf $instance | awk '{print $1}'; done`
for name in $names; do
echo "Backend-services: $name"
yes | gcloud compute backend-services delete $name --region=us-central1
done
echo "Deleting Health checks"
names=`gcloud compute health-checks list | sed -n '1!p' | while read -r instance; do printf $instance | awk '{print $1}'; done`
for name in $names; do
echo "health-checks: $name"
yes | gcloud compute health-checks delete $name
done
echo "Deleting target-pools"
names=`gcloud compute target-pools list | sed -n '1!p' | while read -r instance; do printf $instance | awk '{print $1}'; done`
for name in $names; do
echo "Target Pools: $name"
yes | gcloud compute target-pools delete $name
done
echo "Deleting firewall-rules"
names=`gcloud compute firewall-rules list | sed -n '1!p' | while read -r instance; do printf $instance | awk '{print $1}'; done`
for name in $names; do
echo "Fowarding-rules: $name"
yes | gcloud compute firewall-rules delete $name
done
}
delete_pods() {
disable_autoscaling
echo "Deleting postgres service"
kubectl delete -f kubernetes/postgres.yaml
echo "Deleting Kong"
kubectl delete -f kubernetes/kong_migration_postgres.yaml
kubectl delete -f kubernetes/kong_postgres.yaml
echo "Deleting RabbitMQ"
kubectl delete -f kubernetes/rabbitmq.yaml
echo "Deleting MongoDB"
kubectl delete -f kubernetes/mongo-statefulset.yaml
kubectl delete pvc -l role=mongo
kubectl delete pvc -l role=mongo-cache
echo "Deploying etcd"
kubectl delete -f kubernetes/etcd.yaml
echo "Deleting Postgres"
kubectl delete -f kubernetes/postgres-stolon-interscity.yaml
echo "Deleting Redis"
kubectl delete -f kubernetes/redis.yaml
echo "Deleting Resource Cataloguer"
kubectl delete -f kubernetes/resource-cataloguer-migration.yaml
kubectl delete -f kubernetes/resource-cataloguer.yaml
#echo "Deleting Resource Adaptor"
#kubectl delete -f kubernetes/resource-adaptor-migration.yaml
#kubectl delete -f kubernetes/resource-adaptor.yaml
echo "Deleting Data Collector"
kubectl delete -f kubernetes/data-collector-index.yaml
kubectl delete -f kubernetes/data-collector.yaml
echo "Deleting Resource Discoverer"
kubectl delete -f kubernetes/resource-discoverer.yaml
}
scale_mongo() {
kubectl scale --replicas=$1 statefulset mongo
}
scale_postgres() {
kubectl scale --replicas=3 rc stolon-sentinel
kubectl scale --replicas=3 rc stolon-proxy
}
if [ "$1" = "create-all" ]; then
create_cluster
create_pods
fi
if [ "$1" = "create-pods" ]; then
create_pods
fi
if [ "$1" = "delete-all" ]; then
delete_pods
delete_cluster
fi
if [ "$1" = "delete-pods" ]; then
delete_pods
fi
if [ "$1" = "run" ]; then
create_simulator
file=/tmp/containers.txt
echo "Monitoring the number of containers (output to $file). Press Ctrl+C to stop monitoring."
if [ -e $file ]
then
rm $file
fi
while true; do echo "====="; date +"%T"; kubectl get pods | grep Running; sleep 60; done > /tmp/containers.txt
fi
if [ "$1" = "stop" ]; then
delete_simulator
fi
if [ "$1" = "copy" ]; then
copy_simulator_output
fi