forked from beekhof/osp-ha-deploy
-
Notifications
You must be signed in to change notification settings - Fork 0
/
glance.scenario
134 lines (107 loc) · 5.29 KB
/
glance.scenario
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
# This file can be used directly by 'phd', see 'build-all.sh' in this
# directory for how it can be invoked. The only requirement is a list
# of nodes you'd like it to modify.
#
# The scope of each command-block is controlled by the preceeding
# 'target' line.
#
# - target=all
# The commands are executed on evey node provided
#
# - target=local
# The commands are executed from the node hosting phd. When not
# using phd, they should be run from some other independant host
# (such as the puppet master)
#
# - target=$PHD_ENV_nodes{N}
# The commands are executed on the Nth node provided.
# For example, to run on only the first node would be target=$PHD_ENV_nodes1
#
# Tasks to be performed at this step include:
#################################
# Scenario Requirements Section #
#################################
= VARIABLES =
PHD_VAR_network_internal
PHD_VAR_env_configdir
PHD_VAR_deployment
#################################
# Scenario Requirements Section #
#################################
= REQUIREMENTS =
nodes: 1
######################
# Deployment Scripts #
######################
= SCRIPTS =
target=all
....
yum install -y openstack-glance openstack-utils nfs-utils
....
target=all
....
# Unfortunately https://bugzilla.redhat.com/show_bug.cgi?id=1175005
# prevents NFS mounts from succeeding by default prior to a reboot
systemctl daemon-reload
systemctl start rpcbind.service
systemctl start rpc-statd.service
# Now mount /srv so that we can use $PHD_VAR_env_configdir further down
if grep -q srv /etc/fstab; then
echo /srv is already mounted;
else
mkdir -p /srv
echo "${PHD_VAR_network_internal}.1:/srv /srv nfs defaults,v3 0 0" >> /etc/fstab
mount /srv
fi
....
target=all
....
# Configure the API service
openstack-config --set /etc/glance/glance-api.conf database connection mysql://glance:glancetest@vip-mysql/glance
openstack-config --set /etc/glance/glance-api.conf database max_retries -1
openstack-config --set /etc/glance/glance-api.conf paste_deploy flavor keystone
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken auth_host vip-keystone
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken auth_port 35357
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken auth_protocol http
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken admin_tenant_name services
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken admin_user glance
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken admin_password glancetest
openstack-config --set /etc/glance/glance-api.conf DEFAULT notification_driver messaging
openstack-config --set /etc/glance/glance.conf DEFAULT rabbit_hosts ${PHD_VAR_network_hosts_rabbitmq}
openstack-config --set /etc/glance/glance.conf DEFAULT rabbit_ha_queues true
openstack-config --set /etc/glance/glance-api.conf DEFAULT registry_host vip-glance
openstack-config --set /etc/glance/glance-api.conf DEFAULT bind_host $(ip addr show dev eth1 scope global | grep dynamic| sed -e 's#.*inet ##g' -e 's#/.*##g')
# Configure the registry service
openstack-config --set /etc/glance/glance-registry.conf database connection mysql://glance:glancetest@vip-mysql/glance
openstack-config --set /etc/glance/glance-registry.conf database max_retries -1
openstack-config --set /etc/glance/glance-registry.conf DEFAULT registry_host vip-glance
openstack-config --set /etc/glance/glance-registry.conf paste_deploy flavor keystone
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken auth_host vip-keystone
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken auth_port 35357
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken auth_protocol http
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken admin_tenant_name services
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken admin_user glance
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken admin_password glancetest
openstack-config --set /etc/glance/glance-registry.conf DEFAULT bind_host $(ip addr show dev eth1 scope global | grep dynamic| sed -e 's#.*inet ##g' -e 's#/.*##g')
# create the NFS share mountpoint on the nfs server
mkdir -p $PHD_VAR_env_configdir/glance
....
target=$PHD_ENV_nodes1
....
su keystone -s /bin/sh -c "keystone-manage db_sync"
# populate the glance db entries
su glance -s /bin/sh -c "glance-manage db_sync"
# Now have pacemaker mount the NFS share as service.
pcs resource create glance-fs Filesystem device="${PHD_VAR_network_internal}.1:$PHD_VAR_env_configdir/glance" directory="/var/lib/glance" fstype="nfs" options="v3" --clone
# wait for glance-fs to be started and running
chown glance:nobody /var/lib/glance
pcs resource create glance-registry systemd:openstack-glance-registry --clone
pcs resource create glance-api systemd:openstack-glance-api --clone
pcs constraint order start glance-fs-clone then glance-registry-clone
pcs constraint colocation add glance-registry-clone with glance-fs-clone
pcs constraint order start glance-registry-clone then glance-api-clone
pcs constraint colocation add glance-api-clone with glance-registry-clone
if [ $PHD_VAR_deployment = collapsed ]; then
pcs constraint order start keystone-clone then glance-registry-clone
fi
....