Skip to content
This repository has been archived by the owner on Feb 9, 2022. It is now read-only.

Commit

Permalink
Merge pull request #16 from bennyz/add_proper_logs
Browse files Browse the repository at this point in the history
Add a report about fail/successful VMs and templates
  • Loading branch information
maorlipchuk authored Mar 1, 2018
2 parents eefb8f7 + ea3abb1 commit 0713a32
Show file tree
Hide file tree
Showing 14 changed files with 222 additions and 95 deletions.
3 changes: 3 additions & 0 deletions defaults/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -27,3 +27,6 @@ dr_clean_orphaned_vms: "True"

# Indicate whether to remove lun disks from the setup as part of engine setup.
dr_clean_orphaned_disks: "True"

# Indicate the default entities status report file name
dr_report_file: "report.log"
11 changes: 11 additions & 0 deletions tasks/recover/add_fcp_domain.yml
Original file line number Diff line number Diff line change
Expand Up @@ -12,3 +12,14 @@
auth: "{{ ovirt_auth }}"
data_center: "{{ fcp_storage['dr_' + dr_target_host + '_dc_name'] }}"
fcp: {}
register: result

- name: Log append to succeed_storage_domains
set_fact:
succeed_storage_domains: "{{ succeed_storage_domains }} + [ \"{{ fcp_storage['dr_' + dr_target_host + '_name']|default('') }}\" ]"
when: result is succeeded

- name: Log append to failed_storage_domains
set_fact:
failed_storage_domains: "{{ failed_storage_domains }} + [ \"{{ fcp_storage['dr_' + dr_target_host + '_name']|default('') }}\" ]"
when: result is failed
11 changes: 11 additions & 0 deletions tasks/recover/add_glusterfs_domain.yml
Original file line number Diff line number Diff line change
Expand Up @@ -11,3 +11,14 @@
glusterfs:
path: "{{ gluster_storage['dr_' + dr_target_host + '_path'] }}"
address: "{{ gluster_storage['dr_' + dr_target_host + '_address'] }}"
register: result

- name: Log append to succeed_storage_domains
set_fact:
succeed_storage_domains: "{{ succeed_storage_domains }} + [ \"{{ gluster_storage['dr_' + dr_target_host + '_name'] }}\" ]"
when: result is succeeded

- name: Log append to failed_storage_domains
set_fact:
failed_storage_domains: "{{ failed_storage_domains }} + [ \"{{ gluster_storage['dr_' + dr_target_host + '_name'] }}\" ]"
when: result is failed
92 changes: 50 additions & 42 deletions tasks/recover/add_iscsi_domain.yml
Original file line number Diff line number Diff line change
@@ -1,45 +1,53 @@
# TODO: Add support for connect to multiple targets with the same LUN.
# Every connect should be done using a different ip
- name: Login to iSCSI targets
ovirt_hosts:
state: iscsilogin
name: "{{ ovirt_hosts[0].name }}"
auth: "{{ ovirt_auth }}"
iscsi:
username: "{{ iscsi_storage['dr_' + dr_target_host + '_username']|default('') }}"
password: "{{ iscsi_storage['dr_' + dr_target_host + '_password']|default('') }}"
address: "{{ iscsi_storage['dr_' + dr_target_host + '_address'] }}"
target: "{{ dr_target }}"
# Make port to be optional
port: "{{ iscsi_storage['dr_' + dr_target_host + '_port']|default('3260'|int, true) }}"
with_items:
- "{{ iscsi_storage['dr_' + dr_target_host + '_target'] }}"
loop_control:
loop_var: dr_target
- block:
- name: Login to iSCSI targets
ovirt_hosts:
state: iscsilogin
name: "{{ ovirt_hosts[0].name }}"
auth: "{{ ovirt_auth }}"
iscsi:
username: "{{ iscsi_storage['dr_' + dr_target_host + '_username']|default('') }}"
password: "{{ iscsi_storage['dr_' + dr_target_host + '_password']|default('') }}"
address: "{{ iscsi_storage['dr_' + dr_target_host + '_address'] }}"
target: "{{ dr_target }}"
# Make port to be optional
port: "{{ iscsi_storage['dr_' + dr_target_host + '_port']|default('3260'|int, true) }}"
with_items:
- "{{ iscsi_storage['dr_' + dr_target_host + '_target'] }}"
loop_control:
loop_var: dr_target

- name: Import iSCSI storage domain
ovirt_storage_domains:
state: imported
id: "{{ iscsi_storage['dr_domain_id'] }}"
name: "{{ iscsi_storage['dr_' + dr_target_host + '_name']|default('') }}"
host: "{{ ovirt_hosts[0].name }}"
auth: "{{ ovirt_auth }}"
data_center: "{{ iscsi_storage['dr_' + dr_target_host + '_dc_name'] }}"
critical_space_action_blocker: "{{ iscsi_storage['dr_critical_space_action_blocker'] }}"
warning_low_space: "{{ iscsi_storage['dr_warning_low_space'] }}"
wipe_after_delete: "{{ iscsi_storage['dr_wipe_after_delete'] }}"
discard_after_delete: "{{ iscsi_storage['dr_discard_after_delete'] }}"
backup: "{{ iscsi_storage['dr_backup'] }}"
# TODO: For import iSCSI there is no need for the iscsi parameters
iscsi:
username: "{{ iscsi_storage['dr_' + dr_target_host + '_username']|default('') }}"
password: "{{ iscsi_storage['dr_' + dr_target_host + '_password']|default('') }}"
address: "{{ iscsi_storage['dr_' + dr_target_host + '_address'] }}"
# We use target since state imported in ovirt_storage_domains.py creates a storage domain
# which calls login, therfore we must have a target althout the targets were already connected before.
# Therefore passing the first target in the list as a transient target.
target: "{{ dr_target }}"
with_items:
- "{{ iscsi_storage['dr_' + dr_target_host + '_target'] }}"
loop_control:
loop_var: dr_target
- name: Import iSCSI storage domain
ovirt_storage_domains:
state: imported
id: "{{ iscsi_storage['dr_domain_id'] }}"
name: "{{ iscsi_storage['dr_' + dr_target_host + '_name']|default('') }}"
host: "{{ ovirt_hosts[0].name }}"
auth: "{{ ovirt_auth }}"
data_center: "{{ iscsi_storage['dr_' + dr_target_host + '_dc_name'] }}"
critical_space_action_blocker: "{{ iscsi_storage['dr_critical_space_action_blocker'] }}"
warning_low_space: "{{ iscsi_storage['dr_warning_low_space'] }}"
wipe_after_delete: "{{ iscsi_storage['dr_wipe_after_delete'] }}"
discard_after_delete: "{{ iscsi_storage['dr_discard_after_delete'] }}"
backup: "{{ iscsi_storage['dr_backup'] }}"
# TODO: For import iSCSI there is no need for the iscsi parameters
iscsi:
username: "{{ iscsi_storage['dr_' + dr_target_host + '_username']|default('') }}"
password: "{{ iscsi_storage['dr_' + dr_target_host + '_password']|default('') }}"
address: "{{ iscsi_storage['dr_' + dr_target_host + '_address'] }}"
# We use target since state imported in ovirt_storage_domains.py creates a storage domain
# which calls login, therfore we must have a target althout the targets were already connected before.
# Therefore passing the first target in the list as a transient target.
target: "{{ dr_target }}"
with_items:
- "{{ iscsi_storage['dr_' + dr_target_host + '_target'] }}"
loop_control:
loop_var: dr_target
- name: Log append to succeed_storage_domains
set_fact:
succeed_storage_domains: "{{ succeed_storage_domains }} + [ \"{{ iscsi_storage['dr_' + dr_target_host + '_name']|default('') }}\" ]"
rescue:
- name: Log append to failed_storage_domains
set_fact:
failed_storage_domains: "{{ failed_storage_domains }} + [ \"{{ iscsi_storage['dr_' + dr_target_host + '_name']|default('') }}\" ]"
34 changes: 21 additions & 13 deletions tasks/recover/add_nfs_domain.yml
Original file line number Diff line number Diff line change
@@ -1,13 +1,21 @@
- name: Add NFS storage domain
ovirt_storage_domains:
name: "{{ nfs_storage['dr_' + dr_target_host + '_name'] }}"
critical_space_action_blocker: "{{ nfs_storage['dr_critical_space_action_blocker'] }}"
warning_low_space: "{{ nfs_storage['dr_warning_low_space'] }}"
wipe_after_delete: "{{ nfs_storage['dr_wipe_after_delete'] }}"
backup: "{{ nfs_storage['dr_backup'] }}"
host: "{{ ovirt_hosts[0].name }}"
data_center: "{{ nfs_storage['dr_' + dr_target_host + '_dc_name'] }}"
auth: "{{ ovirt_auth }}"
nfs:
path: "{{ nfs_storage['dr_' + dr_target_host + '_path'] }}"
address: "{{ nfs_storage['dr_' + dr_target_host + '_address'] }}"
- block:
- name: Add NFS storage domain
ovirt_storage_domains:
name: "{{ nfs_storage['dr_' + dr_target_host + '_name'] }}"
critical_space_action_blocker: "{{ nfs_storage['dr_critical_space_action_blocker'] }}"
wipe_after_delete: "{{ nfs_storage['dr_wipe_after_delete'] }}"
backup: "{{ nfs_storage['dr_backup'] }}"
host: "{{ ovirt_hosts[0].name }}"
data_center: "{{ nfs_storage['dr_' + dr_target_host + '_dc_name'] }}"
auth: "{{ ovirt_auth }}"
nfs:
path: "{{ nfs_storage['dr_' + dr_target_host + '_path'] }}"
address: "{{ nfs_storage['dr_' + dr_target_host + '_address'] }}"
- name: Log append to failed storage domains
set_fact:
succeed_storage_domains: "{{ succeed_storage_domains }} + [ \"{{ nfs_storage['dr_' + dr_target_host + '_name'] }}\" ]"

rescue:
- name: Log append to failed_storage_domains
set_fact:
failed_storage_domains: "{{ failed_storage_domains }} + [ \"{{ nfs_storage['dr_' + dr_target_host + '_name'] }}\" ]"
10 changes: 10 additions & 0 deletions tasks/recover/add_posixfs_domain.yml
Original file line number Diff line number Diff line change
Expand Up @@ -12,4 +12,14 @@
vfs_type: "{{ posix_storage['dr_' + dr_target_host + '_vfs_type'] }}"
path: "{{ posix_storage['dr_' + dr_target_host + '_path'] }}"
address: "{{ posix_storage['dr_' + dr_target_host + '_address'] }}"
register: result

- name: Log append to succeed_storage_domains
set_fact:
succeed_storage_domains: "{{ succeed_storage_domains }} + [ \"{{ posix_storage['dr_' + dr_target_host + '_name'] }}\" ]"
when: result is succeeded

- name: Log append to failed_storage_domains
set_fact:
failed_storage_domains: "{{ failed_storage_domains }} + [ \"{{ posix_storage['dr_' + dr_target_host + '_name'] }}\" ]"
when: result is failed
7 changes: 7 additions & 0 deletions tasks/recover/print_info.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
- name:
template:
src: report_log_template.j2
dest: files/{{ dr_report_file }}

- name: Print report
shell: cat files/{{ dr_report_file }}
22 changes: 22 additions & 0 deletions tasks/recover/register_template.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
- name: Register unregistered Template
ovirt_templates:
state: registered
storage_domain: "{{ storage.name }}"
id: "{{ unreg_template.id }}"
allow_partial_import: "{{ dr_partial_import }}"
auth: "{{ ovirt_auth }}"
cluster_mappings: "{{ dr_cluster_map }}"
domain_mappings: "{{ dr_domain_map }}"
vnic_profile_mappings: "{{ dr_network_map }}"
role_mappings: "{{ dr_role_map }}"
register: template_register_result

- name: Log append failed Template to issues failed_template_names
set_fact:
failed_template_names: "{{ failed_template_names }} + [ '{{ unreg_template.name }}' ]"
when: template_register_result | failed

- name: Log append succeed_template_names
set_fact:
succeed_template_names: "{{ succeed_template_names }} + [ '{{ unreg_template.name }}' ]"
when: template_register_result | succeeded
20 changes: 1 addition & 19 deletions tasks/recover/register_templates.yml
Original file line number Diff line number Diff line change
Expand Up @@ -5,25 +5,7 @@
storage_domain: "{{ storage.name }}"
auth: "{{ ovirt_auth }}"

- name: Register unregistered Template
ovirt_templates:
state: registered
storage_domain: "{{ storage.name }}"
id: "{{ unreg_template.id }}"
allow_partial_import: "{{ dr_partial_import }}"
auth: "{{ ovirt_auth }}"
cluster_mappings: "{{ dr_cluster_map }}"
domain_mappings: "{{ dr_domain_map }}"
vnic_profile_mappings: "{{ dr_network_map }}"
role_mappings: "{{ dr_role_map }}"

# TODO: We should set a flag
# - fail: msg="The execution has failed because of errors."
# when: flag == "failed"
#
# - name: Set flag
# set_fact: flag = failed
# when: "'FAILED' in command_result.stderr"
- include: tasks/recover/register_template.yml
# The main task already declared ignore errors so that might bredundant to put it here
# ignore_errors: "{{ ignore | default(yes) }}"
with_items: "{{ ovirt_storage_templates }}"
Expand Down
26 changes: 26 additions & 0 deletions tasks/recover/register_vm.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
- block:
- name: register VMs
ovirt_vms:
state: registered
storage_domain: "{{ storage.name }}"
id: "{{ unreg_vm.id }}"
auth: "{{ ovirt_auth }}"
allow_partial_import: "{{ dr_partial_import }}"
cluster_mappings: "{{ dr_cluster_map }}"
domain_mappings: "{{ dr_domain_map }}"
role_mappings: "{{ dr_role_map }}"
affinity_group_mappings: "{{ dr_affinity_group_map }}"
affinity_label_mappings: "{{ dr_affinity_label_map }}"
vnic_profile_mappings: "{{ dr_network_map }}"
lun_mappings: "{{ dr_lun_map }}"
reassign_bad_macs: "{{ dr_reset_mac_pool }}"
register: vm_register_result
- name: Log append failed VM to failed_vm_names
set_fact:
failed_vm_names: "{{ failed_vm_names }} + [ '{{ unreg_vm.name }}' ]"
when: vm_register_result | failed

- name: Log append succeed_vm_names
set_fact:
succeed_vm_names: "{{ succeed_vm_names }} + [ '{{ unreg_vm.name }}' ]"
when: vm_register_result | succeeded
23 changes: 2 additions & 21 deletions tasks/recover/register_vms.yml
Original file line number Diff line number Diff line change
Expand Up @@ -10,27 +10,8 @@
unreg_vms: "{{ unreg_vms|default([]) }} + {{ ovirt_storage_vms }}"

# TODO: We should filter out vms which were already exists in the setup (diskless VMs)
- name: Register unregistered VM
ovirt_vms:
state: registered
storage_domain: "{{ storage.name }}"
id: "{{ unreg_vm.id }}"
auth: "{{ ovirt_auth }}"
allow_partial_import: "{{ dr_partial_import }}"
cluster_mappings: "{{ dr_cluster_map }}"
domain_mappings: "{{ dr_domain_map }}"
role_mappings: "{{ dr_role_map }}"
affinity_group_mappings: "{{ dr_affinity_group_map }}"
affinity_label_mappings: "{{ dr_affinity_label_map }}"
vnic_profile_mappings: "{{ dr_network_map }}"
lun_mappings: "{{ dr_lun_map }}"
reassign_bad_macs: "{{ dr_reset_mac_pool }}"

# The main task already declared ignore errors so that might be redundant to put it here
# ignore_errors: "{{ ignore | default(yes) }}"
- include: tasks/recover/register_vm.yml
with_items: "{{ ovirt_storage_vms }}"
# We use loop_control so storage.name will not be overriden by the nested loop.
loop_control:
loop_var: unreg_vm


loop_var: unreg_vm
24 changes: 24 additions & 0 deletions tasks/recover/report_log_template.j2
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
{% if succeed_vm_names | length > 0 %}
The following VMs registered successfully: {{ succeed_vm_names | unique | join (", ") }}
{% endif %}
{% if failed_vm_names | length > 0 %}
The following VMs failed to be registered: {{ failed_vm_names | unique | join (", ") }}
{% endif %}
{% if succeed_template_names | length > 0 %}
The following Templates registered successfully: {{ succeed_template_names | unique | join (", ") }}
{% endif %}
{% if failed_template_names | length > 0 %}
The following Templates failed to be registered: {{ failed_template_names | unique | join (", ") }}
{% endif %}
{% if succeed_to_run_vms | length > 0 %}
The following VMs started successfully: {{ succeed_to_run_vms | unique | join (", ") }}
{% endif %}
{% if failed_to_run_vms | length > 0 %}
The following VMs failed to run: {{ failed_to_run_vms | unique | join (", ") }}
{% endif %}
{% if succeed_storage_domains | length > 0 %}
The following storage domains were successfully added: {{ succeed_storage_domains | unique | join (", ") }}
{% endif %}
{% if failed_storage_domains | length > 0 %}
The following storage domains were not added: {{ failed_storage_domains | unique | join (", ") }}
{% endif %}
10 changes: 10 additions & 0 deletions tasks/recover/run_vms.yml
Original file line number Diff line number Diff line change
Expand Up @@ -4,3 +4,13 @@
name: "{{ vms.name }}"
wait: False
auth: "{{ ovirt_auth }}"
register: result
- name: Log append succeed_to_run_vms
set_fact:
succeed_to_run_vms: "{{ succeed_to_run_vms }} + [ '{{ vms.name }}' ]"
when: result | succeeded

- name: Log append failed_to_run_vms
set_fact:
failed_to_run_vms: "{{ failed_to_run_vms }} + [ '{{ vms.name }}' ]"
when: result | failed
24 changes: 24 additions & 0 deletions tasks/recover_engine.yml
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,29 @@
ca_file: "{{ vars['dr_sites_' + dr_target_host + '_ca_file'] }}"
ignore_errors: False

- name: Delete previous report log
shell: rm logs/{{ dr_report_file }}
ignore_errors: True

- file:
path: logs/
state: directory

- file:
path: logs/{{ dr_report_file }}
state: touch

- name: Init entity status list
set_fact:
failed_vm_names: []
succeed_vm_names: []
failed_template_names: []
succeed_template_names: []
failed_to_run_vms: []
succeed_to_run_vms: []
succeed_storage_domains: []
failed_storage_domains: []

# TODO: We should add a validation task that will validate whether
# all the hosts in the other site (primary or secondary) could not be connected
# and also set a timer that will wait at least 180 seconds until the first
Expand Down Expand Up @@ -158,6 +181,7 @@
ignore_errors: "{{ dr_ignore_error_recover }}"

- always:
- include_tasks: tasks/recover/print_info.yml
- name: Revoke the SSO token
ovirt_auth:
state: absent
Expand Down

0 comments on commit 0713a32

Please sign in to comment.