Coverage for watcher/common/nova_helper.py: 89%
348 statements
« prev ^ index » next coverage.py v7.8.2, created at 2025-06-17 12:22 +0000
« prev ^ index » next coverage.py v7.8.2, created at 2025-06-17 12:22 +0000
1# -*- encoding: utf-8 -*-
2# Copyright (c) 2015 b<>com
3#
4# Authors: Jean-Emile DARTOIS <jean-emile.dartois@b-com.com>
5#
6# Licensed under the Apache License, Version 2.0 (the "License");
7# you may not use this file except in compliance with the License.
8# You may obtain a copy of the License at
9#
10# http://www.apache.org/licenses/LICENSE-2.0
11#
12# Unless required by applicable law or agreed to in writing, software
13# distributed under the License is distributed on an "AS IS" BASIS,
14# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
15# implied.
16# See the License for the specific language governing permissions and
17# limitations under the License.
18#
20import time
22from novaclient import api_versions
23from oslo_log import log
25import glanceclient.exc as glexceptions
26import novaclient.exceptions as nvexceptions
28from watcher.common import clients
29from watcher.common import exception
30from watcher import conf
32LOG = log.getLogger(__name__)
34CONF = conf.CONF
37class NovaHelper(object):
39 def __init__(self, osc=None):
40 """:param osc: an OpenStackClients instance"""
41 self.osc = osc if osc else clients.OpenStackClients()
42 self.neutron = self.osc.neutron()
43 self.cinder = self.osc.cinder()
44 self.nova = self.osc.nova()
45 self.glance = self.osc.glance()
47 def get_compute_node_list(self):
48 hypervisors = self.nova.hypervisors.list()
49 # filter out baremetal nodes from hypervisors
50 compute_nodes = [node for node in hypervisors if
51 node.hypervisor_type != 'ironic']
52 return compute_nodes
54 def get_compute_node_by_name(self, node_name, servers=False,
55 detailed=False):
56 """Search for a hypervisor (compute node) by hypervisor_hostname
58 :param node_name: The hypervisor_hostname to search
59 :param servers: If true, include information about servers per
60 hypervisor
61 :param detailed: If true, include information about the compute service
62 per hypervisor (requires microversion 2.53)
63 """
64 return self.nova.hypervisors.search(node_name, servers=servers,
65 detailed=detailed)
67 def get_compute_node_by_hostname(self, node_hostname):
68 """Get compute node by hostname
70 :param node_hostname: Compute service hostname
71 :returns: novaclient.v2.hypervisors.Hypervisor object if found
72 :raises: ComputeNodeNotFound if no hypervisor is found for the compute
73 service hostname or there was an error communicating with nova
74 """
75 try:
76 # This is a fuzzy match on hypervisor_hostname so we could get back
77 # more than one compute node. If so, match on the compute service
78 # hostname.
79 compute_nodes = self.get_compute_node_by_name(
80 node_hostname, detailed=True)
81 for cn in compute_nodes:
82 if cn.service['host'] == node_hostname:
83 return cn
84 raise exception.ComputeNodeNotFound(name=node_hostname)
85 except Exception as exc:
86 LOG.exception(exc)
87 raise exception.ComputeNodeNotFound(name=node_hostname)
89 def get_compute_node_by_uuid(self, node_uuid):
90 """Get compute node by uuid
92 :param node_uuid: hypervisor id as uuid after microversion 2.53
93 :returns: novaclient.v2.hypervisors.Hypervisor object if found
94 """
95 return self.nova.hypervisors.get(node_uuid)
97 def get_instance_list(self, filters=None, marker=None, limit=-1):
98 """List servers for all tenants with details.
100 This always gets servers with the all_tenants=True filter.
102 :param filters: Dict of additional filters (optional).
103 :param marker: Get servers that appear later in the server
104 list than that represented by this server id (optional).
105 :param limit: Maximum number of servers to return (optional).
106 If limit == -1, all servers will be returned,
107 note that limit == -1 will have a performance
108 penalty. For details, please see:
109 https://bugs.launchpad.net/watcher/+bug/1834679
110 :returns: list of novaclient Server objects
111 """
112 search_opts = {'all_tenants': True}
113 if filters:
114 search_opts.update(filters)
115 return self.nova.servers.list(search_opts=search_opts,
116 marker=marker,
117 limit=limit)
119 def get_instance_by_uuid(self, instance_uuid):
120 return [instance for instance in
121 self.nova.servers.list(search_opts={"all_tenants": True,
122 "uuid": instance_uuid})]
124 def get_instance_by_name(self, instance_name):
125 return [instance for instance in
126 self.nova.servers.list(search_opts={"all_tenants": True,
127 "name": instance_name})]
129 def get_instances_by_node(self, host):
130 return [instance for instance in
131 self.nova.servers.list(search_opts={"all_tenants": True,
132 "host": host},
133 limit=-1)]
135 def get_flavor_list(self):
136 return self.nova.flavors.list(**{'is_public': None})
138 def get_service(self, service_id):
139 return self.nova.services.find(id=service_id)
141 def get_aggregate_list(self):
142 return self.nova.aggregates.list()
144 def get_aggregate_detail(self, aggregate_id):
145 return self.nova.aggregates.get(aggregate_id)
147 def get_availability_zone_list(self):
148 return self.nova.availability_zones.list(detailed=True)
150 def get_service_list(self):
151 return self.nova.services.list(binary='nova-compute')
153 def find_instance(self, instance_id):
154 return self.nova.servers.get(instance_id)
156 def confirm_resize(self, instance, previous_status, retry=60):
157 instance.confirm_resize()
158 instance = self.nova.servers.get(instance.id)
159 while instance.status != previous_status and retry:
160 instance = self.nova.servers.get(instance.id)
161 retry -= 1
162 time.sleep(1)
163 if instance.status == previous_status:
164 return True
165 else:
166 LOG.debug("confirm resize failed for the "
167 "instance %s", instance.id)
168 return False
170 def wait_for_volume_status(self, volume, status, timeout=60,
171 poll_interval=1):
172 """Wait until volume reaches given status.
174 :param volume: volume resource
175 :param status: expected status of volume
176 :param timeout: timeout in seconds
177 :param poll_interval: poll interval in seconds
178 """
179 start_time = time.time()
180 while time.time() - start_time < timeout:
181 volume = self.cinder.volumes.get(volume.id)
182 if volume.status == status:
183 break
184 time.sleep(poll_interval)
185 else:
186 raise Exception("Volume %s did not reach status %s after %d s"
187 % (volume.id, status, timeout))
188 return volume.status == status
190 def watcher_non_live_migrate_instance(self, instance_id, dest_hostname,
191 retry=120):
192 """This method migrates a given instance
194 This method uses the Nova built-in migrate()
195 action to do a migration of a given instance.
196 For migrating a given dest_hostname, Nova API version
197 must be 2.56 or higher.
199 It returns True if the migration was successful,
200 False otherwise.
202 :param instance_id: the unique id of the instance to migrate.
203 :param dest_hostname: the name of the destination compute node, if
204 destination_node is None, nova scheduler choose
205 the destination host
206 """
207 LOG.debug(
208 "Trying a cold migrate of instance '%s' ", instance_id)
210 # Looking for the instance to migrate
211 instance = self.find_instance(instance_id)
212 if not instance:
213 LOG.debug("Instance %s not found !", instance_id)
214 return False
215 else:
216 host_name = getattr(instance, "OS-EXT-SRV-ATTR:host")
217 LOG.debug(
218 "Instance %(instance)s found on host '%(host)s'.",
219 {'instance': instance_id, 'host': host_name})
221 previous_status = getattr(instance, 'status')
222 instance.migrate(host=dest_hostname)
223 instance = self.nova.servers.get(instance_id)
225 while (getattr(instance, 'status') not in 225 ↛ 227line 225 didn't jump to line 227 because the condition on line 225 was never true
226 ["VERIFY_RESIZE", "ERROR"] and retry):
227 instance = self.nova.servers.get(instance.id)
228 time.sleep(2)
229 retry -= 1
230 new_hostname = getattr(instance, 'OS-EXT-SRV-ATTR:host')
232 if (host_name != new_hostname and 232 ↛ 242line 232 didn't jump to line 242 because the condition on line 232 was always true
233 instance.status == 'VERIFY_RESIZE'):
234 if not self.confirm_resize(instance, previous_status): 234 ↛ 235line 234 didn't jump to line 235 because the condition on line 234 was never true
235 return False
236 LOG.debug(
237 "cold migration succeeded : "
238 "instance %(instance)s is now on host '%(host)s'.",
239 {'instance': instance_id, 'host': new_hostname})
240 return True
241 else:
242 LOG.debug(
243 "cold migration for instance %s failed", instance_id)
244 return False
246 def resize_instance(self, instance_id, flavor, retry=120):
247 """This method resizes given instance with specified flavor.
249 This method uses the Nova built-in resize()
250 action to do a resize of a given instance.
252 It returns True if the resize was successful,
253 False otherwise.
255 :param instance_id: the unique id of the instance to resize.
256 :param flavor: the name or ID of the flavor to resize to.
257 """
258 LOG.debug(
259 "Trying a resize of instance %(instance)s to "
260 "flavor '%(flavor)s'",
261 {'instance': instance_id, 'flavor': flavor})
263 # Looking for the instance to resize
264 instance = self.find_instance(instance_id)
266 flavor_id = None
268 try:
269 flavor_id = self.nova.flavors.get(flavor).id
270 except nvexceptions.NotFound:
271 flavor_id = [f.id for f in self.nova.flavors.list() if
272 f.name == flavor][0]
273 except nvexceptions.ClientException as e:
274 LOG.debug("Nova client exception occurred while resizing "
275 "instance %s. Exception: %s", instance_id, e)
277 if not flavor_id: 277 ↛ 278line 277 didn't jump to line 278 because the condition on line 277 was never true
278 LOG.debug("Flavor not found: %s", flavor)
279 return False
281 if not instance: 281 ↛ 282line 281 didn't jump to line 282 because the condition on line 281 was never true
282 LOG.debug("Instance not found: %s", instance_id)
283 return False
285 instance_status = getattr(instance, 'OS-EXT-STS:vm_state')
286 LOG.debug(
287 "Instance %(id)s is in '%(status)s' status.",
288 {'id': instance_id, 'status': instance_status})
290 instance.resize(flavor=flavor_id)
291 while getattr(instance,
292 'OS-EXT-STS:vm_state') != 'resized' \
293 and retry:
294 instance = self.nova.servers.get(instance.id)
295 LOG.debug('Waiting the resize of %s to %s', instance, flavor_id)
296 time.sleep(1)
297 retry -= 1
299 instance_status = getattr(instance, 'status')
300 if instance_status != 'VERIFY_RESIZE':
301 return False
303 instance.confirm_resize()
305 LOG.debug("Resizing succeeded : instance %s is now on flavor "
306 "'%s'.", instance_id, flavor_id)
308 return True
310 def live_migrate_instance(self, instance_id, dest_hostname, retry=120):
311 """This method does a live migration of a given instance
313 This method uses the Nova built-in live_migrate()
314 action to do a live migration of a given instance.
316 It returns True if the migration was successful,
317 False otherwise.
319 :param instance_id: the unique id of the instance to migrate.
320 :param dest_hostname: the name of the destination compute node, if
321 destination_node is None, nova scheduler choose
322 the destination host
323 """
324 LOG.debug(
325 "Trying a live migrate instance %(instance)s ",
326 {'instance': instance_id})
328 # Looking for the instance to migrate
329 instance = self.find_instance(instance_id)
330 if not instance:
331 LOG.debug("Instance not found: %s", instance_id)
332 return False
333 else:
334 host_name = getattr(instance, 'OS-EXT-SRV-ATTR:host')
335 LOG.debug(
336 "Instance %(instance)s found on host '%(host)s'.",
337 {'instance': instance_id, 'host': host_name})
339 # From nova api version 2.25(Mitaka release), the default value of
340 # block_migration is None which is mapped to 'auto'.
341 instance.live_migrate(host=dest_hostname)
343 instance = self.nova.servers.get(instance_id)
345 # NOTE: If destination host is not specified for live migration
346 # let nova scheduler choose the destination host.
347 if dest_hostname is None:
348 while (instance.status not in ['ACTIVE', 'ERROR'] and retry):
349 instance = self.nova.servers.get(instance.id)
350 LOG.debug('Waiting the migration of %s', instance.id)
351 time.sleep(1)
352 retry -= 1
353 new_hostname = getattr(instance, 'OS-EXT-SRV-ATTR:host')
355 if host_name != new_hostname and instance.status == 'ACTIVE':
356 LOG.debug(
357 "Live migration succeeded : "
358 "instance %(instance)s is now on host '%(host)s'.",
359 {'instance': instance_id, 'host': new_hostname})
360 return True
361 else:
362 return False
364 while getattr(instance,
365 'OS-EXT-SRV-ATTR:host') != dest_hostname \
366 and retry:
367 instance = self.nova.servers.get(instance.id)
368 if not getattr(instance, 'OS-EXT-STS:task_state'):
369 LOG.debug("Instance task state: %s is null", instance_id)
370 break
371 LOG.debug('Waiting the migration of %s to %s',
372 instance,
373 getattr(instance, 'OS-EXT-SRV-ATTR:host'))
374 time.sleep(1)
375 retry -= 1
377 host_name = getattr(instance, 'OS-EXT-SRV-ATTR:host')
378 if host_name != dest_hostname:
379 return False
381 LOG.debug(
382 "Live migration succeeded : "
383 "instance %(instance)s is now on host '%(host)s'.",
384 {'instance': instance_id, 'host': host_name})
386 return True
388 def abort_live_migrate(self, instance_id, source, destination, retry=240):
389 LOG.debug("Aborting live migration of instance %s", instance_id)
390 migration = self.get_running_migration(instance_id)
391 if migration: 391 ↛ 401line 391 didn't jump to line 401 because the condition on line 391 was always true
392 migration_id = getattr(migration[0], "id")
393 try:
394 self.nova.server_migrations.live_migration_abort(
395 server=instance_id, migration=migration_id)
396 except exception as e:
397 # Note: Does not return from here, as abort request can't be
398 # accepted but migration still going on.
399 LOG.exception(e)
400 else:
401 LOG.debug(
402 "No running migrations found for instance %s", instance_id)
404 while retry:
405 instance = self.nova.servers.get(instance_id)
406 if (getattr(instance, 'OS-EXT-STS:task_state') is None and
407 getattr(instance, 'status') in ['ACTIVE', 'ERROR']):
408 break
409 time.sleep(2)
410 retry -= 1
411 instance_host = getattr(instance, 'OS-EXT-SRV-ATTR:host')
412 instance_status = getattr(instance, 'status')
414 # Abort live migration successful, action is cancelled
415 if instance_host == source and instance_status == 'ACTIVE':
416 return True
417 # Nova Unable to abort live migration, action is succeeded
418 elif instance_host == destination and instance_status == 'ACTIVE':
419 return False
421 else:
422 raise Exception("Live migration execution and abort both failed "
423 "for the instance %s" % instance_id)
425 def enable_service_nova_compute(self, hostname):
426 if float(CONF.nova_client.api_version) < 2.53: 426 ↛ 427line 426 didn't jump to line 427 because the condition on line 426 was never true
427 status = self.nova.services.enable(
428 host=hostname, binary='nova-compute').status == 'enabled'
429 else:
430 service_uuid = self.nova.services.list(host=hostname,
431 binary='nova-compute')[0].id
432 status = self.nova.services.enable(
433 service_uuid=service_uuid).status == 'enabled'
435 return status
437 def disable_service_nova_compute(self, hostname, reason=None):
438 if float(CONF.nova_client.api_version) < 2.53: 438 ↛ 439line 438 didn't jump to line 439 because the condition on line 438 was never true
439 status = self.nova.services.disable_log_reason(
440 host=hostname,
441 binary='nova-compute',
442 reason=reason).status == 'disabled'
443 else:
444 service_uuid = self.nova.services.list(host=hostname,
445 binary='nova-compute')[0].id
446 status = self.nova.services.disable_log_reason(
447 service_uuid=service_uuid,
448 reason=reason).status == 'disabled'
450 return status
452 def create_image_from_instance(self, instance_id, image_name,
453 metadata={"reason": "instance_migrate"}):
454 """This method creates a new image from a given instance.
456 It waits for this image to be in 'active' state before returning.
457 It returns the unique UUID of the created image if successful,
458 None otherwise.
460 :param instance_id: the uniqueid of
461 the instance to backup as an image.
462 :param image_name: the name of the image to create.
463 :param metadata: a dictionary containing the list of
464 key-value pairs to associate to the image as metadata.
465 """
466 LOG.debug(
467 "Trying to create an image from instance %s ...", instance_id)
469 # Looking for the instance
470 instance = self.find_instance(instance_id)
472 if not instance: 472 ↛ 473line 472 didn't jump to line 473 because the condition on line 472 was never true
473 LOG.debug("Instance not found: %s", instance_id)
474 return None
475 else:
476 host_name = getattr(instance, 'OS-EXT-SRV-ATTR:host')
477 LOG.debug(
478 "Instance %(instance)s found on host '%(host)s'.",
479 {'instance': instance_id, 'host': host_name})
481 # We need to wait for an appropriate status
482 # of the instance before we can build an image from it
483 if self.wait_for_instance_status(instance, ('ACTIVE', 'SHUTOFF'), 483 ↛ 516line 483 didn't jump to line 516 because the condition on line 483 was always true
484 5,
485 10):
486 image_uuid = self.nova.servers.create_image(instance_id,
487 image_name,
488 metadata)
490 image = self.glance.images.get(image_uuid)
491 if not image:
492 return None
494 # Waiting for the new image to be officially in ACTIVE state
495 # in order to make sure it can be used
496 status = image.status
497 retry = 10
498 while status != 'active' and status != 'error' and retry:
499 time.sleep(5)
500 retry -= 1
501 # Retrieve the instance again so the status field updates
502 image = self.glance.images.get(image_uuid)
503 if not image: 503 ↛ 504line 503 didn't jump to line 504 because the condition on line 503 was never true
504 break
505 status = image.status
506 LOG.debug("Current image status: %s", status)
508 if not image: 508 ↛ 509line 508 didn't jump to line 509 because the condition on line 508 was never true
509 LOG.debug("Image not found: %s", image_uuid)
510 else:
511 LOG.debug(
512 "Image %(image)s successfully created for "
513 "instance %(instance)s",
514 {'image': image_uuid, 'instance': instance_id})
515 return image_uuid
516 return None
518 def delete_instance(self, instance_id):
519 """This method deletes a given instance.
521 :param instance_id: the unique id of the instance to delete.
522 """
523 LOG.debug("Trying to remove instance %s ...", instance_id)
525 instance = self.find_instance(instance_id)
527 if not instance:
528 LOG.debug("Instance not found: %s", instance_id)
529 return False
530 else:
531 self.nova.servers.delete(instance_id)
532 LOG.debug("Instance %s removed.", instance_id)
533 return True
535 def stop_instance(self, instance_id):
536 """This method stops a given instance.
538 :param instance_id: the unique id of the instance to stop.
539 """
540 LOG.debug("Trying to stop instance %s ...", instance_id)
542 instance = self.find_instance(instance_id)
544 if not instance:
545 LOG.debug("Instance not found: %s", instance_id)
546 return False
547 elif getattr(instance, 'OS-EXT-STS:vm_state') == "stopped":
548 LOG.debug("Instance has been stopped: %s", instance_id)
549 return True
550 else:
551 self.nova.servers.stop(instance_id)
553 if self.wait_for_instance_state(instance, "stopped", 8, 10):
554 LOG.debug("Instance %s stopped.", instance_id)
555 return True
556 else:
557 return False
559 def wait_for_instance_state(self, server, state, retry, sleep):
560 """Waits for server to be in a specific state
562 The state can be one of the following :
563 active, stopped
565 :param server: server object.
566 :param state: for which state we are waiting for
567 :param retry: how many times to retry
568 :param sleep: seconds to sleep between the retries
569 """
570 if not server: 570 ↛ 571line 570 didn't jump to line 571 because the condition on line 570 was never true
571 return False
573 while getattr(server, 'OS-EXT-STS:vm_state') != state and retry:
574 time.sleep(sleep)
575 server = self.nova.servers.get(server)
576 retry -= 1
577 return getattr(server, 'OS-EXT-STS:vm_state') == state
579 def wait_for_instance_status(self, instance, status_list, retry, sleep):
580 """Waits for instance to be in a specific status
582 The status can be one of the following
583 : BUILD, ACTIVE, ERROR, VERIFY_RESIZE, SHUTOFF
585 :param instance: instance object.
586 :param status_list: tuple containing the list of
587 status we are waiting for
588 :param retry: how many times to retry
589 :param sleep: seconds to sleep between the retries
590 """
591 if not instance:
592 return False
594 while instance.status not in status_list and retry:
595 LOG.debug("Current instance status: %s", instance.status)
596 time.sleep(sleep)
597 instance = self.nova.servers.get(instance.id)
598 retry -= 1
599 LOG.debug("Current instance status: %s", instance.status)
600 return instance.status in status_list
602 def create_instance(self, node_id, inst_name="test", image_id=None,
603 flavor_name="m1.tiny",
604 sec_group_list=["default"],
605 network_names_list=["demo-net"], keypair_name="mykeys",
606 create_new_floating_ip=True,
607 block_device_mapping_v2=None):
608 """This method creates a new instance
610 It also creates, if requested, a new floating IP and associates
611 it with the new instance
612 It returns the unique id of the created instance.
613 """
614 LOG.debug(
615 "Trying to create new instance '%(inst)s' "
616 "from image '%(image)s' with flavor '%(flavor)s' ...",
617 {'inst': inst_name, 'image': image_id, 'flavor': flavor_name})
619 try:
620 self.nova.keypairs.findall(name=keypair_name)
621 except nvexceptions.NotFound:
622 LOG.debug("Key pair '%s' not found ", keypair_name)
623 return
625 try:
626 image = self.glance.images.get(image_id)
627 except glexceptions.NotFound:
628 LOG.debug("Image '%s' not found ", image_id)
629 return
631 try:
632 flavor = self.nova.flavors.find(name=flavor_name)
633 except nvexceptions.NotFound:
634 LOG.debug("Flavor '%s' not found ", flavor_name)
635 return
637 # Make sure all security groups exist
638 for sec_group_name in sec_group_list:
639 group_id = self.get_security_group_id_from_name(sec_group_name)
641 if not group_id:
642 LOG.debug("Security group '%s' not found ", sec_group_name)
643 return
645 net_list = list()
647 for network_name in network_names_list:
648 nic_id = self.get_network_id_from_name(network_name)
650 if not nic_id:
651 LOG.debug("Network '%s' not found ", network_name)
652 return
653 net_obj = {"net-id": nic_id}
654 net_list.append(net_obj)
656 # get availability zone of destination host
657 azone = self.nova.services.list(host=node_id,
658 binary='nova-compute')[0].zone
659 instance = self.nova.servers.create(
660 inst_name, image,
661 flavor=flavor,
662 key_name=keypair_name,
663 security_groups=sec_group_list,
664 nics=net_list,
665 block_device_mapping_v2=block_device_mapping_v2,
666 availability_zone="%s:%s" % (azone, node_id))
668 # Poll at 5 second intervals, until the status is no longer 'BUILD'
669 if instance: 669 ↛ 688line 669 didn't jump to line 688 because the condition on line 669 was always true
670 if self.wait_for_instance_status(instance, 670 ↛ 688line 670 didn't jump to line 688 because the condition on line 670 was always true
671 ('ACTIVE', 'ERROR'), 5, 10):
672 instance = self.nova.servers.get(instance.id)
674 if create_new_floating_ip and instance.status == 'ACTIVE': 674 ↛ 688line 674 didn't jump to line 688 because the condition on line 674 was always true
675 LOG.debug(
676 "Creating a new floating IP"
677 " for instance '%s'", instance.id)
678 # Creating floating IP for the new instance
679 floating_ip = self.nova.floating_ips.create()
681 instance.add_floating_ip(floating_ip)
683 LOG.debug(
684 "Instance %(instance)s associated to "
685 "Floating IP '%(ip)s'",
686 {'instance': instance.id, 'ip': floating_ip.ip})
688 return instance
690 def get_security_group_id_from_name(self, group_name="default"):
691 """This method returns the security group of the provided group name"""
692 security_groups = self.neutron.list_security_groups(name=group_name)
694 security_group_id = security_groups['security_groups'][0]['id']
696 return security_group_id
698 def get_network_id_from_name(self, net_name="private"):
699 """This method returns the unique id of the provided network name"""
700 networks = self.neutron.list_networks(name=net_name)
702 # LOG.debug(networks)
703 network_id = networks['networks'][0]['id']
705 return network_id
707 def get_hostname(self, instance):
708 return str(getattr(instance, 'OS-EXT-SRV-ATTR:host'))
710 def get_running_migration(self, instance_id):
711 return self.nova.server_migrations.list(server=instance_id)
713 def swap_volume(self, old_volume, new_volume,
714 retry=120, retry_interval=10):
715 """Swap old_volume for new_volume"""
716 attachments = old_volume.attachments
717 instance_id = attachments[0]['server_id']
718 # do volume update
719 self.nova.volumes.update_server_volume(
720 instance_id, old_volume.id, new_volume.id)
721 while getattr(new_volume, 'status') != 'in-use' and retry:
722 new_volume = self.cinder.volumes.get(new_volume.id)
723 LOG.debug('Waiting volume update to %s', new_volume)
724 time.sleep(retry_interval)
725 retry -= 1
726 LOG.debug("retry count: %s", retry)
727 if getattr(new_volume, 'status') != "in-use":
728 LOG.error("Volume update retry timeout or error")
729 return False
731 host_name = getattr(new_volume, "os-vol-host-attr:host")
732 LOG.debug(
733 "Volume update succeeded : "
734 "Volume %(volume)s is now on host '%(host)s'.",
735 {'volume': new_volume.id, 'host': host_name})
736 return True
738 def _check_nova_api_version(self, client, version):
739 api_version = api_versions.APIVersion(version_str=version)
740 try:
741 api_versions.discover_version(client, api_version)
742 return True
743 except nvexceptions.UnsupportedVersion as e:
744 LOG.exception(e)
745 return False