forked from doximity/lambda-ebs-snapshots
-
Notifications
You must be signed in to change notification settings - Fork 0
/
ebs_snapshot_worker.py
133 lines (106 loc) · 4.17 KB
/
ebs_snapshot_worker.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
# Original code from:
# https://serverlesscode.com/post/lambda-schedule-ebs-snapshot-backups/
# http://blog.powerupcloud.com/2016/02/15/automate-ebs-snapshots-using-lambda-function/
# Rewritten to be configured on individual Volumes, not Instances.
# https://github.com/Brayyy/Lambda-EBS-Snapshot-Manager
# Updated for Python 3.6, added local time to 4/day, added requirement for EBS volume to be 'in-use'
# https://github.com/TacMechMonkey/Lambda_EBS_Backups-Python_3-6
import datetime
import time
import os
import boto3
RETENTION_DEFAULT = 90
TIME_ZONE = 'Australia/Brisbane'
AWS_REGION = 'ap-southeast-2'
BACKUP_KEY = 'Backup'
RETENTION_KEY = 'Retention'
if 'RETENTION_DEFAULT' in os.environ:
RETENTION_DEFAULT = int(os.environ['RETENTION_DEFAULT'])
if 'TIME_ZONE' in os.environ:
TIME_ZONE = os.environ['TIME_ZONE']
if 'AWS_REGION' in os.environ:
AWS_REGION = os.environ['AWS_REGION']
if 'BACKUP_KEY' in os.environ:
BACKUP_KEY = os.environ['BACKUP_KEY']
if 'RETENTION_KEY' in os.environ:
RETENTION_KEY = os.environ['RETENTION_KEY']
EC2_CLIENT = boto3.client('ec2', region_name=AWS_REGION)
os.environ['TZ'] = TIME_ZONE
def create_snapshot():
current_hour = int(datetime.datetime.now().strftime('%H'))
volumes = EC2_CLIENT.describe_volumes(
Filters=[
{'Name': 'tag-key', 'Values': [BACKUP_KEY]},
{'Name': 'status', 'Values': ['in-use']},
]
).get(
'Volumes', []
)
print("Number of volumes with backup tag: %d" % len(volumes))
for volume in volumes:
vol_id = volume['VolumeId']
vol_retention = RETENTION_DEFAULT
snap_date = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
for name in volume['Tags']:
tag_key = name['Key']
tag_val = name['Value']
if tag_key == 'Name':
snap_short_desc = vol_id + ' (' + tag_val + ')'
if tag_key == RETENTION_KEY and tag_val.isdigit():
vol_retention = int(tag_val)
if tag_key == BACKUP_KEY:
if tag_val in ('', 'No', 'false'):
continue
elif tag_val == 'Weekly':
backup_mod = 168
elif tag_val == 'Daily':
backup_mod = 24
elif tag_val == '4/day':
backup_mod = 6
elif tag_val == 'Hourly':
backup_mod = 1
else:
print("Unknown backup schedule %s for volume %s" % (tag_val, vol_id))
continue
if (current_hour + 10) % backup_mod != 0:
print("Backup of %s is not scheduled" % vol_id)
continue
snap_name = 'Backup of ' + snap_short_desc
snap_desc = 'Lambda backup ' + snap_date + ' of ' + snap_short_desc
delete_ts = '%.0f' % ((vol_retention * 86400) + time.time())
print("Backup of %s needed" % vol_id)
snap = EC2_CLIENT.create_snapshot(
VolumeId=vol_id,
Description=snap_desc,
)
print("%s created" % snap['SnapshotId'])
EC2_CLIENT.create_tags(
Resources=[snap['SnapshotId']],
Tags=[
{'Key': 'Name', 'Value': snap_name},
{'Key': 'Delete After', 'Value': str(delete_ts)}
]
)
def delete_old_backups(aws_account_ids):
filters = [
{'Name': 'tag-key', 'Values': ['Delete After']}
]
snapshot_response = EC2_CLIENT.describe_snapshots(
OwnerIds=aws_account_ids,
Filters=filters
)
for snap in snapshot_response['Snapshots']:
for name in snap['Tags']:
tag_key = name['Key']
tag_val = name['Value']
if tag_key == 'Delete After':
if int(tag_val) < time.time():
print("%s is being deleted" % snap['SnapshotId'])
EC2_CLIENT.delete_snapshot(SnapshotId=snap['SnapshotId'])
else:
print("%s is safe" % snap['SnapshotId'])
def lambda_handler(event, context):
aws_account_ids = [context.invoked_function_arn.split(":")[4]]
create_snapshot()
delete_old_backups(aws_account_ids)
return "Successful"