-
Notifications
You must be signed in to change notification settings - Fork 4
/
Copy pathmigrate_to_cinder.py
262 lines (219 loc) · 10.7 KB
/
migrate_to_cinder.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
#!/usr/bin/env python
# oVirt Storage Migration
# NFS -> Cinder/Ceph
# Author: Jordan Rodgers ([email protected])
# Written for KGCOE at RIT (https://www.rit.edu/kgcoe/)
from ovirtsdk.api import API
from ovirtsdk.xml import params
from cinderclient.v1 import client
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
import os
import rados
import rbd
import time
import smtplib
import sys
def connect(ovirt_api_url, ovirt_username, ovirt_password, cinder_username, cinder_password, cinder_project,
cinder_auth_url, ceph_conf_file, ceph_client, ceph_pool):
VERSION = params.Version(major='4', minor='0')
ovirt_api = API(url=ovirt_api_url, username=ovirt_username, password=ovirt_password, insecure=True)
cinder_api = client.Client(cinder_username, cinder_password, cinder_project, cinder_auth_url, service_type="volume")
ceph_api = rados.Rados(conffile=ceph_conf_file, name="client.{}".format(ceph_client))
ceph_api.connect()
ceph_api_ioctx = ceph_api.open_ioctx(ceph_pool)
return ovirt_api, cinder_api, ceph_api, ceph_api_ioctx
def get_vms_to_migrate(ovirt_api, search_query):
vms_to_migrate = []
for vm in ovirt_api.vms.list(query=search_query):
print("'{}' is set to be migrated.".format(vm.name))
vms_to_migrate.append(vm)
return vms_to_migrate
def migrate_disks(ovirt_api, cinder_api, ceph_api_ioctx, vms_to_migrate, old_storage_id, new_storage_id, nfs_mount_dir,
migrate_tag, ceph_pool, ceph_client, ceph_conf_file):
completed_vms = []
failed_vms = []
for vm in vms_to_migrate:
print("Starting migration for '{}'.".format(vm.name))
remove_snapshots(vm)
print("[{}] Checking for disks to migrate...".format(vm.name))
disks = vm.disks.list()
for disk in disks:
for storage_domain in disk.storage_domains.storage_domain:
if storage_domain.id == old_storage_id:
print("[{}] '{}' needs to be migrated...".format(vm.name, disk.name))
try:
deactivate_disk(vm, disk)
print("[{}] Attempting to migrate '{}' to Cinder...".format(vm.name, disk.name))
cinder_disk_id = create_cinder_disk(cinder_api, disk, vm, cinder_volume_type)
delete_rbd(vm, disk, cinder_disk_id, ceph_api_ioctx)
print("[{}] Converting '{}' from NFS to RBD...".format(vm.name, disk.name))
image_path = find_image(old_storage_id, disk, nfs_mount_dir)
if image_path:
if os.system("qemu-img convert -p -O raw {} rbd:{}/volume-{}:id={}:conf={}".format(
image_path, ceph_pool, cinder_disk_id, ceph_client, ceph_conf_file)) == 0:
new_disk = register_disk(vm, disk, ovirt_api, disk.name, new_storage_id)
if new_disk:
attach_detach_disk(vm, disk, new_disk)
set_boot_order(vm)
print("[{}] Sucessfully migrated '{}'!".format(vm.name, disk.name))
else:
print("[{}] Could not register the Cinder volume in oVirt.".format(vm.name))
error_message(vm, disk, failed_vms)
else:
print("[{}] Failed to convert '{}' from NFS to RBD.".format(vm.name, disk.name))
error_message(vm, disk, failed_vms)
else:
print("[{}] Could not find the correct image file to convert.".format(vm.name))
error_message(vm, disk, failed_vms)
except:
error_message(vm, disk, failed_vms)
done = check_vm(vm, old_storage_id)
if done:
remove_tag(vm, completed_vms, migrate_tag)
return completed_vms, failed_vms
def remove_snapshots(vm):
print("[{}] Checking VM for snapshots...".format(vm.name))
snapshots = vm.snapshots.list()
if len(snapshots) > 1:
removed_snaps = 0
for snapshot in snapshots:
if snapshot.description != 'Active VM' and snapshot.description != 'Active VM snapshot':
print("[{}] Removing snapshot '{}'...".format(vm.name, snapshot.description))
snapshot.delete()
removed_snaps += 1
new_snapshots = vm.snapshots.list()
while len(new_snapshots) > len(snapshots) - removed_snaps:
time.sleep(3)
new_snapshots = vm.snapshots.list()
def deactivate_disk(vm, disk):
print("[{}] Deactivating '{}' for migration...".format(vm.name, disk.name))
if disk.active:
disk.deactivate()
while not disk.active:
time.sleep(3)
def create_cinder_disk(cinder_api, disk, vm, cinder_volume_type):
print("[{}] Creating a Cinder volume for {}...".format(vm.name, disk.name))
new_disk = cinder_api.volumes.create(display_name=disk.name, size=disk.provisioned_size / 1073741824,
volume_type=cinder_volume_type)
disk_id = new_disk.id
new_disk = cinder_api.volumes.get(disk_id)
print("[{}] Waiting for the volume to be created...".format(vm.name))
while new_disk.status != 'available':
time.sleep(3)
new_disk = cinder_api.volumes.get(disk_id)
return disk_id
def delete_rbd(vm, disk, cinder_disk_id, ceph_api_ioctx):
print("[{}] Deleting the underlying RBD for the new '{}' Cinder volume...".format(vm.name, disk.name))
rbd_inst = rbd.RBD()
rbd_name = "volume-{}".format(cinder_disk_id)
rbd_inst.remove(ceph_api_ioctx, rbd_name)
def find_image(old_storage_id, disk, nfs_mount_dir):
image_path = "{}/{}/images/{}/".format(nfs_mount_dir, old_storage_id, disk.id)
image_dir_files = os.listdir(image_path)
if len(image_dir_files) == 3:
for filename in image_dir_files:
if '.meta' in filename or '.lease' in filename:
pass
else:
image_path += filename
return image_path
return False
def register_disk(vm, disk, ovirt_api, old_disk_name, new_storage_id):
print("[{}] Registering '{}' in oVirt...".format(vm.name, disk.name))
new_storage = ovirt_api.storagedomains.get(id=new_storage_id)
unregistered_disks = new_storage.disks.list(unregistered=True)
if len(unregistered_disks) == 1:
if unregistered_disks[0].name == old_disk_name:
new_disk = new_storage.disks.add(unregistered_disks[0], unregistered=True)
return new_disk
elif len(unregistered_disks) > 1:
for disk in unregistered_disks:
if disk.name == old_disk_name:
new_disk = new_storage.disks.add(disk, unregistered=True)
return new_disk
return False
def attach_detach_disk(vm, disk, new_disk):
print("[{}] Attaching the '{}' Cinder volume to the VM...".format(vm.name, disk.name))
vm.disks.add(params.Disk(id=new_disk.id, active=True))
print("[{}] Detaching the '{}' NFS volume from the VM...".format(vm.name, disk.name))
disk.delete(action=params.Action(detach=True))
def set_boot_order(vm):
vm.set_os(params.OperatingSystem(boot=[params.Boot(dev='hd')]))
vm.update()
def check_vm(vm, old_storage_id):
disks = vm.disks.list()
for disk in disks:
for storage_domain in disk.storage_domains.storage_domain:
if storage_domain.id == old_storage_id:
return False
return True
def remove_tag(vm, completed_vms, migrate_tag):
completed_vms.append(vm.name)
for tag in vm.tags.list():
if tag.name == migrate_tag:
tag.delete()
def error_message(vm, disk, failed_vms):
failed_vms.append("{} ({})".format(vm.name, disk.name))
print("[{}] ERROR: Could not migrate '{}'. Reactivating original disk. "
"Please manually clean up any remnants from this failed migration.".format(vm.name, disk.name))
disk.activate()
def email_report(completed_vms, failed_vms, mail_from, mail_to, mail_subject, mail_smtp_server):
sender = mail_from
receivers = mail_to
msg = MIMEMultipart()
msg['From'] = sender
msg['To'] = receivers
msg['Subject'] = mail_subject
body = "Successful VM Migrations:\n" \
"{}\n\n" \
"Failed VM Migrations:\n" \
"{}".format("\n".join(completed_vms), "\n".join(failed_vms))
msg.attach(MIMEText(body, 'plain'))
text = msg.as_string()
if completed_vms or failed_vms:
try:
server = smtplib.SMTP(mail_smtp_server, 25)
server.starttls()
server.sendmail(sender, receivers, text)
server.quit()
print("Successfully sent email report.")
except:
print("ERROR: Unable to send email report!")
if __name__ == "__main__":
if os.path.isfile('.ovirt_migration_lock'):
sys.exit("Lockfile exists. Exiting.")
else:
open('.ovirt_migration_lock', 'a').close()
ovirt_api_url = 'https://ovirt.example.com/ovirt-engine/api/'
ovirt_username = ''
ovirt_password = ''
cinder_username = 'admin'
cinder_password = ''
cinder_project = 'admin'
cinder_auth_url = 'http://IP_OF_CINDER:35357/v2.0'
ceph_conf_file = '/etc/ceph/ceph.conf'
ceph_client = 'admin'
ceph_pool = 'rbd'
old_storage_id = ''
new_storage_id = ''
nfs_mount_dir = ''
migrate_tag = 'Migrate_to_Cinder'
cinder_volume_type = ''
search_query = 'Storage.name= Status=down Tag={}'.format(migrate_tag)
mail_from = ''
mail_to = ''
mail_subject = 'oVirt Cinder Migration Report'
mail_smtp_server = ''
ovirt_api, cinder_api, ceph_api, ceph_api_ioctx = connect(ovirt_api_url, ovirt_username, ovirt_password,
cinder_username, cinder_password, cinder_project,
cinder_auth_url, ceph_conf_file, ceph_client, ceph_pool)
vms_to_migrate = get_vms_to_migrate(ovirt_api, search_query)
completed_vms, failed_vms = migrate_disks(ovirt_api, cinder_api, ceph_api_ioctx, vms_to_migrate, old_storage_id,
new_storage_id, nfs_mount_dir, migrate_tag, ceph_pool, ceph_client,
ceph_conf_file)
print("No more VMs to migrate.")
email_report(completed_vms, failed_vms, mail_from, mail_to, mail_subject, mail_smtp_server)
ceph_api_ioctx.close()
ceph_api.shutdown()
os.remove('.ovirt_migration_lock')