Coverage for watcher/decision_engine/strategy/strategies/zone_migration.py: 88%

404 statements  

« prev     ^ index     » next       coverage.py v7.8.2, created at 2025-06-17 12:22 +0000

1# Licensed under the Apache License, Version 2.0 (the "License"); 

2# you may not use this file except in compliance with the License. 

3# You may obtain a copy of the License at 

4# 

5# http://www.apache.org/licenses/LICENSE-2.0 

6# 

7# Unless required by applicable law or agreed to in writing, software 

8# distributed under the License is distributed on an "AS IS" BASIS, 

9# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 

10# implied. 

11# See the License for the specific language governing permissions and 

12# limitations under the License. 

13# 

14 

15from dateutil.parser import parse 

16 

17from oslo_log import log 

18 

19from cinderclient.v3.volumes import Volume 

20from novaclient.v2.servers import Server 

21from watcher._i18n import _ 

22from watcher.common import cinder_helper 

23from watcher.common import nova_helper 

24from watcher.decision_engine.model import element 

25from watcher.decision_engine.strategy.strategies import base 

26 

27LOG = log.getLogger(__name__) 

28 

29INSTANCE = "instance" 

30VOLUME = "volume" 

31ACTIVE = "active" 

32PAUSED = 'paused' 

33STOPPED = "stopped" 

34status_ACTIVE = 'ACTIVE' 

35status_PAUSED = 'PAUSED' 

36status_SHUTOFF = 'SHUTOFF' 

37AVAILABLE = "available" 

38IN_USE = "in-use" 

39 

40 

41class ZoneMigration(base.ZoneMigrationBaseStrategy): 

42 """Zone migration using instance and volume migration 

43 

44 This is zone migration strategy to migrate many instances and volumes 

45 efficiently with minimum downtime for hardware maintenance. 

46 """ 

47 

48 def __init__(self, config, osc=None): 

49 

50 super(ZoneMigration, self).__init__(config, osc) 

51 self._nova = None 

52 self._cinder = None 

53 

54 self.live_count = 0 

55 self.planned_live_count = 0 

56 self.cold_count = 0 

57 self.planned_cold_count = 0 

58 self.volume_count = 0 

59 self.planned_volume_count = 0 

60 self.volume_update_count = 0 

61 self.planned_volume_update_count = 0 

62 

63 @classmethod 

64 def get_name(cls): 

65 return "zone_migration" 

66 

67 @classmethod 

68 def get_display_name(cls): 

69 return _("Zone migration") 

70 

71 @classmethod 

72 def get_translatable_display_name(cls): 

73 return "Zone migration" 

74 

75 @classmethod 

76 def get_schema(cls): 

77 return { 

78 "properties": { 

79 "compute_nodes": { 

80 "type": "array", 

81 "items": { 

82 "type": "object", 

83 "properties": { 

84 "src_node": { 

85 "description": "Compute node from which" 

86 " instances migrate", 

87 "type": "string" 

88 }, 

89 "dst_node": { 

90 "description": "Compute node to which " 

91 "instances migrate", 

92 "type": "string" 

93 } 

94 }, 

95 "required": ["src_node"], 

96 "additionalProperties": False 

97 } 

98 }, 

99 "storage_pools": { 

100 "type": "array", 

101 "items": { 

102 "type": "object", 

103 "properties": { 

104 "src_pool": { 

105 "description": "Storage pool from which" 

106 " volumes migrate", 

107 "type": "string" 

108 }, 

109 "dst_pool": { 

110 "description": "Storage pool to which" 

111 " volumes migrate", 

112 "type": "string" 

113 }, 

114 "src_type": { 

115 "description": "Volume type from which" 

116 " volumes migrate", 

117 "type": "string" 

118 }, 

119 "dst_type": { 

120 "description": "Volume type to which" 

121 " volumes migrate", 

122 "type": "string" 

123 } 

124 }, 

125 "required": ["src_pool", "src_type", "dst_type"], 

126 "additionalProperties": False 

127 } 

128 }, 

129 "parallel_total": { 

130 "description": "The number of actions to be run in" 

131 " parallel in total", 

132 "type": "integer", "minimum": 0, "default": 6 

133 }, 

134 "parallel_per_node": { 

135 "description": "The number of actions to be run in" 

136 " parallel per compute node", 

137 "type": "integer", "minimum": 0, "default": 2 

138 }, 

139 "parallel_per_pool": { 

140 "description": "The number of actions to be run in" 

141 " parallel per storage host", 

142 "type": "integer", "minimum": 0, "default": 2 

143 }, 

144 "priority": { 

145 "description": "List prioritizes instances and volumes", 

146 "type": "object", 

147 "properties": { 

148 "project": { 

149 "type": "array", "items": {"type": "string"} 

150 }, 

151 "compute_node": { 

152 "type": "array", "items": {"type": "string"} 

153 }, 

154 "storage_pool": { 

155 "type": "array", "items": {"type": "string"} 

156 }, 

157 "compute": { 

158 "enum": ["vcpu_num", "mem_size", "disk_size", 

159 "created_at"] 

160 }, 

161 "storage": { 

162 "enum": ["size", "created_at"] 

163 } 

164 }, 

165 "additionalProperties": False 

166 }, 

167 "with_attached_volume": { 

168 "description": "instance migrates just after attached" 

169 " volumes or not", 

170 "type": "boolean", "default": False 

171 }, 

172 }, 

173 "additionalProperties": False 

174 } 

175 

176 @property 

177 def migrate_compute_nodes(self): 

178 """Get compute nodes from input_parameters 

179 

180 :returns: compute nodes 

181 e.g. [{"src_node": "w012", "dst_node": "w022"}, 

182 {"src_node": "w013", "dst_node": "w023"}] 

183 """ 

184 

185 return self.input_parameters.get('compute_nodes') 

186 

187 @property 

188 def migrate_storage_pools(self): 

189 """Get storage pools from input_parameters 

190 

191 :returns: storage pools 

192 e.g. [ 

193 {"src_pool": "src1@back1#pool1", 

194 "dst_pool": "dst1@back1#pool1", 

195 "src_type": "src1_type", 

196 "dst_type": "dst1_type"}, 

197 {"src_pool": "src1@back2#pool1", 

198 "dst_pool": "dst1@back2#pool1", 

199 "src_type": "src1_type", 

200 "dst_type": "dst1_type"} 

201 ] 

202 """ 

203 

204 return self.input_parameters.get('storage_pools') 

205 

206 @property 

207 def parallel_total(self): 

208 return self.input_parameters.get('parallel_total') 

209 

210 @property 

211 def parallel_per_node(self): 

212 return self.input_parameters.get('parallel_per_node') 

213 

214 @property 

215 def parallel_per_pool(self): 

216 return self.input_parameters.get('parallel_per_pool') 

217 

218 @property 

219 def priority(self): 

220 """Get priority from input_parameters 

221 

222 :returns: priority map 

223 e.g. 

224 { 

225 "project": ["pj1"], 

226 "compute_node": ["compute1", "compute2"], 

227 "compute": ["vcpu_num"], 

228 "storage_pool": ["pool1", "pool2"], 

229 "storage": ["size", "created_at"] 

230 } 

231 """ 

232 

233 return self.input_parameters.get('priority') 

234 

235 @property 

236 def with_attached_volume(self): 

237 return self.input_parameters.get('with_attached_volume') 

238 

239 @property 

240 def nova(self): 

241 if self._nova is None: 241 ↛ 243line 241 didn't jump to line 243 because the condition on line 241 was always true

242 self._nova = nova_helper.NovaHelper(osc=self.osc) 

243 return self._nova 

244 

245 @property 

246 def cinder(self): 

247 if self._cinder is None: 247 ↛ 249line 247 didn't jump to line 249 because the condition on line 247 was always true

248 self._cinder = cinder_helper.CinderHelper(osc=self.osc) 

249 return self._cinder 

250 

251 def get_available_compute_nodes(self): 

252 default_node_scope = [element.ServiceState.ENABLED.value, 

253 element.ServiceState.DISABLED.value] 

254 return {uuid: cn for uuid, cn in 

255 self.compute_model.get_all_compute_nodes().items() 

256 if cn.state == element.ServiceState.ONLINE.value and 

257 cn.status in default_node_scope} 

258 

259 def get_available_storage_nodes(self): 

260 default_node_scope = [element.ServiceState.ENABLED.value, 

261 element.ServiceState.DISABLED.value] 

262 return {uuid: cn for uuid, cn in 

263 self.storage_model.get_all_storage_nodes().items() 

264 if cn.state == element.ServiceState.ONLINE.value and 

265 cn.status in default_node_scope} 

266 

267 def pre_execute(self): 

268 self._pre_execute() 

269 LOG.debug(self.storage_model.to_string()) 

270 

271 def do_execute(self, audit=None): 

272 """Strategy execution phase 

273 

274 """ 

275 filtered_targets = self.filtered_targets() 

276 self.set_migration_count(filtered_targets) 

277 

278 total_limit = self.parallel_total 

279 per_node_limit = self.parallel_per_node 

280 per_pool_limit = self.parallel_per_pool 

281 action_counter = ActionCounter(total_limit, 

282 per_pool_limit, per_node_limit) 

283 

284 for k, targets in iter(filtered_targets.items()): 

285 if k == VOLUME: 

286 self.volumes_migration(targets, action_counter) 

287 elif k == INSTANCE: 287 ↛ 284line 287 didn't jump to line 284 because the condition on line 287 was always true

288 if self.volume_count == 0 and self.volume_update_count == 0: 

289 # if with_attached_volume is true, 

290 # instance having attached volumes already migrated, 

291 # migrate instances which does not have attached volumes 

292 if self.with_attached_volume: 292 ↛ 293line 292 didn't jump to line 293 because the condition on line 292 was never true

293 targets = self.instances_no_attached(targets) 

294 self.instances_migration(targets, action_counter) 

295 else: 

296 self.instances_migration(targets, action_counter) 

297 

298 LOG.debug("action total: %s, pools: %s, nodes %s ", 

299 action_counter.total_count, 

300 action_counter.per_pool_count, 

301 action_counter.per_node_count) 

302 

303 def post_execute(self): 

304 """Post-execution phase 

305 

306 This can be used to compute the global efficacy 

307 """ 

308 self.solution.set_efficacy_indicators( 

309 live_migrate_instance_count=self.live_count, 

310 planned_live_migrate_instance_count=self.planned_live_count, 

311 cold_migrate_instance_count=self.cold_count, 

312 planned_cold_migrate_instance_count=self.planned_cold_count, 

313 volume_migrate_count=self.volume_count, 

314 planned_volume_migrate_count=self.planned_volume_count, 

315 volume_update_count=self.volume_update_count, 

316 planned_volume_update_count=self.planned_volume_update_count 

317 ) 

318 

319 def set_migration_count(self, targets): 

320 """Set migration count 

321 

322 :param targets: dict of instance object and volume object list 

323 keys of dict are instance and volume 

324 """ 

325 for instance in targets.get('instance', []): 

326 if self.is_live(instance): 

327 self.live_count += 1 

328 elif self.is_cold(instance): 328 ↛ 325line 328 didn't jump to line 325 because the condition on line 328 was always true

329 self.cold_count += 1 

330 for volume in targets.get('volume', []): 

331 if self.is_available(volume): 

332 self.volume_count += 1 

333 elif self.is_in_use(volume): 333 ↛ 330line 333 didn't jump to line 330 because the condition on line 333 was always true

334 self.volume_update_count += 1 

335 

336 def is_live(self, instance): 

337 status = getattr(instance, 'status') 

338 state = getattr(instance, 'OS-EXT-STS:vm_state') 

339 return (status == status_ACTIVE and state == ACTIVE 

340 ) or (status == status_PAUSED and state == PAUSED) 

341 

342 def is_cold(self, instance): 

343 status = getattr(instance, 'status') 

344 state = getattr(instance, 'OS-EXT-STS:vm_state') 

345 return status == status_SHUTOFF and state == STOPPED 

346 

347 def is_available(self, volume): 

348 return getattr(volume, 'status') == AVAILABLE 

349 

350 def is_in_use(self, volume): 

351 return getattr(volume, 'status') == IN_USE 

352 

353 def instances_no_attached(self, instances): 

354 return [i for i in instances 

355 if not getattr(i, "os-extended-volumes:volumes_attached")] 

356 

357 def get_host_by_pool(self, pool): 

358 """Get host name from pool name 

359 

360 Utility method to get host name from pool name 

361 which is formatted as host@backend#pool. 

362 

363 :param pool: pool name 

364 :returns: host name 

365 """ 

366 return pool.split('@')[0] 

367 

368 def get_dst_node(self, src_node): 

369 """Get destination node from self.migration_compute_nodes 

370 

371 :param src_node: compute node name 

372 :returns: destination node name 

373 """ 

374 for node in self.migrate_compute_nodes: 

375 if node.get("src_node") == src_node: 

376 return node.get("dst_node") 

377 

378 def get_dst_pool_and_type(self, src_pool, src_type): 

379 """Get destination pool and type from self.migration_storage_pools 

380 

381 :param src_pool: storage pool name 

382 :param src_type: storage volume type 

383 :returns: set of storage pool name and volume type name 

384 """ 

385 for pool in self.migrate_storage_pools: 385 ↛ exitline 385 didn't return from function 'get_dst_pool_and_type' because the loop on line 385 didn't complete

386 if pool.get("src_pool") == src_pool: 

387 return (pool.get("dst_pool", None), 

388 pool.get("dst_type")) 

389 

390 def volumes_migration(self, volumes, action_counter): 

391 for volume in volumes: 

392 if action_counter.is_total_max(): 

393 LOG.debug('total reached limit') 

394 break 

395 

396 pool = getattr(volume, 'os-vol-host-attr:host') 

397 if action_counter.is_pool_max(pool): 

398 LOG.debug("%s has objects to be migrated, but it has" 

399 " reached the limit of parallelization.", pool) 

400 continue 

401 

402 src_type = volume.volume_type 

403 dst_pool, dst_type = self.get_dst_pool_and_type(pool, src_type) 

404 LOG.debug(src_type) 

405 LOG.debug("%s %s", dst_pool, dst_type) 

406 

407 if self.is_available(volume): 

408 if src_type == dst_type: 

409 self._volume_migrate(volume, dst_pool) 

410 else: 

411 self._volume_retype(volume, dst_type) 

412 elif self.is_in_use(volume): 412 ↛ 421line 412 didn't jump to line 421 because the condition on line 412 was always true

413 self._volume_update(volume, dst_type) 

414 

415 # if with_attached_volume is True, migrate attaching instances 

416 if self.with_attached_volume: 

417 instances = [self.nova.find_instance(dic.get('server_id')) 

418 for dic in volume.attachments] 

419 self.instances_migration(instances, action_counter) 

420 

421 action_counter.add_pool(pool) 

422 

423 def instances_migration(self, instances, action_counter): 

424 

425 for instance in instances: 

426 src_node = getattr(instance, 'OS-EXT-SRV-ATTR:host') 

427 

428 if action_counter.is_total_max(): 428 ↛ 429line 428 didn't jump to line 429 because the condition on line 428 was never true

429 LOG.debug('total reached limit') 

430 break 

431 

432 if action_counter.is_node_max(src_node): 

433 LOG.debug("%s has objects to be migrated, but it has" 

434 " reached the limit of parallelization.", src_node) 

435 continue 

436 

437 dst_node = self.get_dst_node(src_node) 

438 if self.is_live(instance): 

439 self._live_migration(instance, src_node, dst_node) 

440 elif self.is_cold(instance): 440 ↛ 443line 440 didn't jump to line 443 because the condition on line 440 was always true

441 self._cold_migration(instance, src_node, dst_node) 

442 

443 action_counter.add_node(src_node) 

444 

445 def _live_migration(self, instance, src_node, dst_node): 

446 parameters = {"migration_type": "live", 

447 "source_node": src_node, 

448 "resource_name": instance.name} 

449 if dst_node: 

450 # if dst_node is None, do not add it to the parameters for the 

451 # migration action, and let Nova figure out the destination node 

452 parameters["destination_node"] = dst_node 

453 self.solution.add_action( 

454 action_type="migrate", 

455 resource_id=instance.id, 

456 input_parameters=parameters) 

457 self.planned_live_count += 1 

458 

459 def _cold_migration(self, instance, src_node, dst_node): 

460 parameters = {"migration_type": "cold", 

461 "source_node": src_node, 

462 "resource_name": instance.name} 

463 if dst_node: 463 ↛ 467line 463 didn't jump to line 467 because the condition on line 463 was always true

464 # if dst_node is None, do not add it to the parameters for the 

465 # migration action, and let Nova figure out the destination node 

466 parameters["destination_node"] = dst_node 

467 self.solution.add_action( 

468 action_type="migrate", 

469 resource_id=instance.id, 

470 input_parameters=parameters) 

471 self.planned_cold_count += 1 

472 

473 def _volume_update(self, volume, dst_type): 

474 parameters = {"migration_type": "swap", 

475 "destination_type": dst_type, 

476 "resource_name": volume.name} 

477 self.solution.add_action( 

478 action_type="volume_migrate", 

479 resource_id=volume.id, 

480 input_parameters=parameters) 

481 self.planned_volume_update_count += 1 

482 

483 def _volume_migrate(self, volume, dst_pool): 

484 parameters = {"migration_type": "migrate", 

485 "destination_node": dst_pool, 

486 "resource_name": volume.name} 

487 self.solution.add_action( 

488 action_type="volume_migrate", 

489 resource_id=volume.id, 

490 input_parameters=parameters) 

491 self.planned_volume_count += 1 

492 

493 def _volume_retype(self, volume, dst_type): 

494 parameters = {"migration_type": "retype", 

495 "destination_type": dst_type, 

496 "resource_name": volume.name} 

497 self.solution.add_action( 

498 action_type="volume_migrate", 

499 resource_id=volume.id, 

500 input_parameters=parameters) 

501 self.planned_volume_count += 1 

502 

503 def get_src_node_list(self): 

504 """Get src nodes from migrate_compute_nodes 

505 

506 :returns: src node name list 

507 """ 

508 if not self.migrate_compute_nodes: 508 ↛ 509line 508 didn't jump to line 509 because the condition on line 508 was never true

509 return None 

510 

511 return [v for dic in self.migrate_compute_nodes 

512 for k, v in dic.items() if k == "src_node"] 

513 

514 def get_src_pool_list(self): 

515 """Get src pools from migrate_storage_pools 

516 

517 :returns: src pool name list 

518 """ 

519 

520 return [v for dic in self.migrate_storage_pools 

521 for k, v in dic.items() if k == "src_pool"] 

522 

523 def get_instances(self): 

524 """Get migrate target instances 

525 

526 :returns: instance list on src nodes and compute scope 

527 """ 

528 

529 src_node_list = self.get_src_node_list() 

530 

531 if not src_node_list: 531 ↛ 532line 531 didn't jump to line 532 because the condition on line 531 was never true

532 return None 

533 

534 return [i for i in self.nova.get_instance_list() 

535 if getattr(i, 'OS-EXT-SRV-ATTR:host') in src_node_list and 

536 self.compute_model.get_instance_by_uuid(i.id)] 

537 

538 def get_volumes(self): 

539 """Get migrate target volumes 

540 

541 :returns: volume list on src pools and storage scope 

542 """ 

543 

544 src_pool_list = self.get_src_pool_list() 

545 

546 return [i for i in self.cinder.get_volume_list() 

547 if getattr(i, 'os-vol-host-attr:host') in src_pool_list and 

548 self.storage_model.get_volume_by_uuid(i.id)] 

549 

550 def filtered_targets(self): 

551 """Filter targets 

552 

553 prioritize instances and volumes based on priorities 

554 from input parameters. 

555 

556 :returns: prioritized targets 

557 """ 

558 result = {} 

559 

560 if self.migrate_compute_nodes: 

561 result["instance"] = self.get_instances() 

562 

563 if self.migrate_storage_pools: 563 ↛ 566line 563 didn't jump to line 566 because the condition on line 563 was always true

564 result["volume"] = self.get_volumes() 

565 

566 if not self.priority: 566 ↛ 567line 566 didn't jump to line 567 because the condition on line 566 was never true

567 return result 

568 

569 filter_actions = self.get_priority_filter_list() 

570 LOG.debug(filter_actions) 

571 

572 # apply all filters set in input parameter 

573 for action in list(reversed(filter_actions)): 

574 LOG.debug(action) 

575 result = action.apply_filter(result) 

576 

577 return result 

578 

579 def get_priority_filter_list(self): 

580 """Get priority filters 

581 

582 :returns: list of filter object with arguments in self.priority 

583 """ 

584 

585 filter_list = [] 

586 priority_filter_map = self.get_priority_filter_map() 

587 

588 for k, v in iter(self.priority.items()): 

589 if k in priority_filter_map: 589 ↛ 588line 589 didn't jump to line 588 because the condition on line 589 was always true

590 filter_list.append(priority_filter_map[k](v)) 

591 

592 return filter_list 

593 

594 def get_priority_filter_map(self): 

595 """Get priority filter map 

596 

597 :returns: filter map 

598 key is the key in priority input parameters. 

599 value is filter class for prioritizing. 

600 """ 

601 

602 return { 

603 "project": ProjectSortFilter, 

604 "compute_node": ComputeHostSortFilter, 

605 "storage_pool": StorageHostSortFilter, 

606 "compute": ComputeSpecSortFilter, 

607 "storage": StorageSpecSortFilter, 

608 } 

609 

610 

611class ActionCounter(object): 

612 """Manage the number of actions in parallel""" 

613 

614 def __init__(self, total_limit=6, per_pool_limit=2, per_node_limit=2): 

615 """Initialize dict of host and the number of action 

616 

617 :param total_limit: total number of actions 

618 :param per_pool_limit: the number of migrate actions per storage pool 

619 :param per_node_limit: the number of migrate actions per compute node 

620 """ 

621 self.total_limit = total_limit 

622 self.per_pool_limit = per_pool_limit 

623 self.per_node_limit = per_node_limit 

624 self.per_pool_count = {} 

625 self.per_node_count = {} 

626 self.total_count = 0 

627 

628 def add_pool(self, pool): 

629 """Increment the number of actions on host and total count 

630 

631 :param pool: storage pool 

632 :returns: True if incremented, False otherwise 

633 """ 

634 if pool not in self.per_pool_count: 634 ↛ 635line 634 didn't jump to line 635 because the condition on line 634 was never true

635 self.per_pool_count[pool] = 0 

636 

637 if not self.is_total_max() and not self.is_pool_max(pool): 637 ↛ 643line 637 didn't jump to line 643 because the condition on line 637 was always true

638 self.per_pool_count[pool] += 1 

639 self.total_count += 1 

640 LOG.debug("total: %s, per_pool: %s", 

641 self.total_count, self.per_pool_count) 

642 return True 

643 return False 

644 

645 def add_node(self, node): 

646 """Add the number of actions on node 

647 

648 :param host: compute node 

649 :returns: True if action can be added, False otherwise 

650 """ 

651 if node not in self.per_node_count: 651 ↛ 652line 651 didn't jump to line 652 because the condition on line 651 was never true

652 self.per_node_count[node] = 0 

653 

654 if not self.is_total_max() and not self.is_node_max(node): 654 ↛ 660line 654 didn't jump to line 660 because the condition on line 654 was always true

655 self.per_node_count[node] += 1 

656 self.total_count += 1 

657 LOG.debug("total: %s, per_node: %s", 

658 self.total_count, self.per_node_count) 

659 return True 

660 return False 

661 

662 def is_total_max(self): 

663 """Check if total count reached limit 

664 

665 :returns: True if total count reached limit, False otherwise 

666 """ 

667 return self.total_count >= self.total_limit 

668 

669 def is_pool_max(self, pool): 

670 """Check if per pool count reached limit 

671 

672 :returns: True if count reached limit, False otherwise 

673 """ 

674 if pool not in self.per_pool_count: 

675 self.per_pool_count[pool] = 0 

676 LOG.debug("the number of parallel per pool %s is %s ", 

677 pool, self.per_pool_count[pool]) 

678 LOG.debug("per pool limit is %s", self.per_pool_limit) 

679 return self.per_pool_count[pool] >= self.per_pool_limit 

680 

681 def is_node_max(self, node): 

682 """Check if per node count reached limit 

683 

684 :returns: True if count reached limit, False otherwise 

685 """ 

686 if node not in self.per_node_count: 

687 self.per_node_count[node] = 0 

688 return self.per_node_count[node] >= self.per_node_limit 

689 

690 

691class BaseFilter(object): 

692 """Base class for Filter""" 

693 

694 apply_targets = ('ALL',) 

695 

696 def __init__(self, values=[], **kwargs): 

697 """initialization 

698 

699 :param values: priority value 

700 """ 

701 

702 if not isinstance(values, list): 702 ↛ 703line 702 didn't jump to line 703 because the condition on line 702 was never true

703 values = [values] 

704 

705 self.condition = values 

706 

707 def apply_filter(self, targets): 

708 """apply filter to targets 

709 

710 :param targets: dict of instance object and volume object list 

711 keys of dict are instance and volume 

712 """ 

713 

714 if not targets: 714 ↛ 715line 714 didn't jump to line 715 because the condition on line 714 was never true

715 return {} 

716 

717 for cond in list(reversed(self.condition)): 

718 for k, v in iter(targets.items()): 

719 if not self.is_allowed(k): 

720 continue 

721 LOG.debug("filter:%s with the key: %s", cond, k) 

722 targets[k] = self.exec_filter(v, cond) 

723 

724 LOG.debug(targets) 

725 return targets 

726 

727 def is_allowed(self, key): 

728 return (key in self.apply_targets) or ('ALL' in self.apply_targets) 

729 

730 def exec_filter(self, items, sort_key): 

731 """This is implemented by sub class""" 

732 return items 

733 

734 

735class SortMovingToFrontFilter(BaseFilter): 

736 """This is to move to front if a condition is True""" 

737 

738 def exec_filter(self, items, sort_key): 

739 return self.sort_moving_to_front(items, 

740 sort_key, 

741 self.compare_func) 

742 

743 def sort_moving_to_front(self, items, sort_key=None, compare_func=None): 

744 if not compare_func or not sort_key: 744 ↛ 745line 744 didn't jump to line 745 because the condition on line 744 was never true

745 return items 

746 

747 for item in list(reversed(items)): 

748 if compare_func(item, sort_key): 

749 items.remove(item) 

750 items.insert(0, item) 

751 return items 

752 

753 def compare_func(self, item, sort_key): 

754 return True 

755 

756 

757class ProjectSortFilter(SortMovingToFrontFilter): 

758 """ComputeHostSortFilter""" 

759 

760 apply_targets = ('instance', 'volume') 

761 

762 def __init__(self, values=[], **kwargs): 

763 super(ProjectSortFilter, self).__init__(values, **kwargs) 

764 

765 def compare_func(self, item, sort_key): 

766 """Compare project id of item with sort_key 

767 

768 :param item: instance object or volume object 

769 :param sort_key: project id 

770 :returns: true: project id of item equals sort_key 

771 false: otherwise 

772 """ 

773 

774 project_id = self.get_project_id(item) 

775 LOG.debug("project_id: %s, sort_key: %s", project_id, sort_key) 

776 return project_id == sort_key 

777 

778 def get_project_id(self, item): 

779 """get project id of item 

780 

781 :param item: instance object or volume object 

782 :returns: project id 

783 """ 

784 

785 if isinstance(item, Volume): 

786 return getattr(item, 'os-vol-tenant-attr:tenant_id') 

787 elif isinstance(item, Server): 787 ↛ exitline 787 didn't return from function 'get_project_id' because the condition on line 787 was always true

788 return item.tenant_id 

789 

790 

791class ComputeHostSortFilter(SortMovingToFrontFilter): 

792 """ComputeHostSortFilter""" 

793 

794 apply_targets = ('instance',) 

795 

796 def __init__(self, values=[], **kwargs): 

797 super(ComputeHostSortFilter, self).__init__(values, **kwargs) 

798 

799 def compare_func(self, item, sort_key): 

800 """Compare compute name of item with sort_key 

801 

802 :param item: instance object 

803 :param sort_key: compute host name 

804 :returns: true: compute name on where instance host equals sort_key 

805 false: otherwise 

806 """ 

807 

808 host = self.get_host(item) 

809 LOG.debug("host: %s, sort_key: %s", host, sort_key) 

810 return host == sort_key 

811 

812 def get_host(self, item): 

813 """get hostname on which item is 

814 

815 :param item: instance object 

816 :returns: hostname on which item is 

817 """ 

818 

819 return getattr(item, 'OS-EXT-SRV-ATTR:host') 

820 

821 

822class StorageHostSortFilter(SortMovingToFrontFilter): 

823 """StoragehostSortFilter""" 

824 

825 apply_targets = ('volume',) 

826 

827 def compare_func(self, item, sort_key): 

828 """Compare pool name of item with sort_key 

829 

830 :param item: volume object 

831 :param sort_key: storage pool name 

832 :returns: true: pool name on where instance.host equals sort_key 

833 false: otherwise 

834 """ 

835 

836 host = self.get_host(item) 

837 LOG.debug("host: %s, sort_key: %s", host, sort_key) 

838 return host == sort_key 

839 

840 def get_host(self, item): 

841 return getattr(item, 'os-vol-host-attr:host') 

842 

843 

844class ComputeSpecSortFilter(BaseFilter): 

845 """ComputeSpecSortFilter""" 

846 

847 apply_targets = ('instance',) 

848 accept_keys = ['vcpu_num', 'mem_size', 'disk_size', 'created_at'] 

849 

850 def __init__(self, values=[], **kwargs): 

851 super(ComputeSpecSortFilter, self).__init__(values, **kwargs) 

852 self._nova = None 

853 

854 @property 

855 def nova(self): 

856 if self._nova is None: 856 ↛ 858line 856 didn't jump to line 858 because the condition on line 856 was always true

857 self._nova = nova_helper.NovaHelper() 

858 return self._nova 

859 

860 def exec_filter(self, items, sort_key): 

861 result = items 

862 

863 if sort_key not in self.accept_keys: 863 ↛ 864line 863 didn't jump to line 864 because the condition on line 863 was never true

864 LOG.warning("Invalid key is specified: %s", sort_key) 

865 else: 

866 result = self.get_sorted_items(items, sort_key) 

867 

868 return result 

869 

870 def get_sorted_items(self, items, sort_key): 

871 """Sort items by sort_key 

872 

873 :param items: instances 

874 :param sort_key: sort_key 

875 :returns: items sorted by sort_key 

876 """ 

877 

878 result = items 

879 flavors = self.nova.get_flavor_list() 

880 

881 if sort_key == 'mem_size': 

882 result = sorted(items, 

883 key=lambda x: float(self.get_mem_size(x, flavors)), 

884 reverse=True) 

885 elif sort_key == 'vcpu_num': 

886 result = sorted(items, 

887 key=lambda x: float(self.get_vcpu_num(x, flavors)), 

888 reverse=True) 

889 elif sort_key == 'disk_size': 

890 result = sorted(items, 

891 key=lambda x: float( 

892 self.get_disk_size(x, flavors)), 

893 reverse=True) 

894 elif sort_key == 'created_at': 894 ↛ 899line 894 didn't jump to line 899 because the condition on line 894 was always true

895 result = sorted(items, 

896 key=lambda x: parse(getattr(x, sort_key)), 

897 reverse=False) 

898 

899 return result 

900 

901 def get_mem_size(self, item, flavors): 

902 """Get memory size of item 

903 

904 :param item: instance 

905 :param flavors: flavors 

906 :returns: memory size of item 

907 """ 

908 

909 LOG.debug("item: %s, flavors: %s", item, flavors) 

910 for flavor in flavors: 910 ↛ exitline 910 didn't return from function 'get_mem_size' because the loop on line 910 didn't complete

911 LOG.debug("item.flavor: %s, flavor: %s", item.flavor, flavor) 

912 if item.flavor.get('id') == flavor.id: 

913 LOG.debug("flavor.ram: %s", flavor.ram) 

914 return flavor.ram 

915 

916 def get_vcpu_num(self, item, flavors): 

917 """Get vcpu number of item 

918 

919 :param item: instance 

920 :param flavors: flavors 

921 :returns: vcpu number of item 

922 """ 

923 

924 LOG.debug("item: %s, flavors: %s", item, flavors) 

925 for flavor in flavors: 925 ↛ exitline 925 didn't return from function 'get_vcpu_num' because the loop on line 925 didn't complete

926 LOG.debug("item.flavor: %s, flavor: %s", item.flavor, flavor) 

927 if item.flavor.get('id') == flavor.id: 

928 LOG.debug("flavor.vcpus: %s", flavor.vcpus) 

929 return flavor.vcpus 

930 

931 def get_disk_size(self, item, flavors): 

932 """Get disk size of item 

933 

934 :param item: instance 

935 :param flavors: flavors 

936 :returns: disk size of item 

937 """ 

938 

939 LOG.debug("item: %s, flavors: %s", item, flavors) 

940 for flavor in flavors: 940 ↛ exitline 940 didn't return from function 'get_disk_size' because the loop on line 940 didn't complete

941 LOG.debug("item.flavor: %s, flavor: %s", item.flavor, flavor) 

942 if item.flavor.get('id') == flavor.id: 

943 LOG.debug("flavor.disk: %s", flavor.disk) 

944 return flavor.disk 

945 

946 

947class StorageSpecSortFilter(BaseFilter): 

948 """StorageSpecSortFilter""" 

949 

950 apply_targets = ('volume',) 

951 accept_keys = ['size', 'created_at'] 

952 

953 def exec_filter(self, items, sort_key): 

954 result = items 

955 

956 if sort_key not in self.accept_keys: 956 ↛ 957line 956 didn't jump to line 957 because the condition on line 956 was never true

957 LOG.warning("Invalid key is specified: %s", sort_key) 

958 return result 

959 

960 if sort_key == 'created_at': 

961 result = sorted(items, 

962 key=lambda x: parse(getattr(x, sort_key)), 

963 reverse=False) 

964 else: 

965 result = sorted(items, 

966 key=lambda x: float(getattr(x, sort_key)), 

967 reverse=True) 

968 LOG.debug(result) 

969 return result