diff -Nru nova-17.0.0/AUTHORS nova-17.0.1/AUTHORS --- nova-17.0.0/AUTHORS 2018-02-28 11:35:39.000000000 +0000 +++ nova-17.0.1/AUTHORS 2018-03-07 23:35:18.000000000 +0000 @@ -391,6 +391,7 @@ Eric Guo Eric Harney Eric Harney +Eric M Gonzalez Eric Windisch Eric Windisch Erik Berg @@ -1074,6 +1075,7 @@ Shih-Hao Li Shilla Saebi Shlomi Sasson +Shoham Peller Shraddha Pandhe Shraddha Pandhe Shuangtai Tian diff -Nru nova-17.0.0/ChangeLog nova-17.0.1/ChangeLog --- nova-17.0.0/ChangeLog 2018-02-28 11:35:37.000000000 +0000 +++ nova-17.0.1/ChangeLog 2018-03-07 23:35:15.000000000 +0000 @@ -1,9 +1,23 @@ CHANGES ======= +17.0.1 +------ + +* Allow 'network' in RequestContext service\_catalog +* Check for multiattach before removing connections +* Pass user context to virt driver when detaching volume +* Handle spawning error on unshelving +* Imported Translations from Zanata +* compute: Cleans up allocations after failed resize +* Update noVNC deployment docs to mention non-US keymap fix in 1.0.0 +* [placement] Add functional tests for traits API +* [placement] Add sending global request ID in put (3) + 17.0.0 ------ +* libvirt: disconnect volume from host during detach * Ensure attachment\_id always exists for block device mapping * Add functional test for deleting BFV server with old attach flow * Update plugs Contrail methods to work with privsep @@ -15,7 +29,11 @@ * Add functional tests to ensure BDM removal on delete * Store block device mappings in cell0 * Drop extra loop which modifies Cinder volume status +* Check quota before creating volume snapshots +* Add the ability to get absolute limits from Cinder +* Add resource\_class to fields in ironic node cache * Lazy-load instance attributes with read\_deleted=yes +* unquiesce instance on volume snapshot failure 17.0.0.0rc2 ----------- diff -Nru nova-17.0.0/debian/changelog nova-17.0.1/debian/changelog --- nova-17.0.0/debian/changelog 2018-02-28 17:44:34.000000000 +0000 +++ nova-17.0.1/debian/changelog 2018-03-12 16:43:39.000000000 +0000 @@ -1,3 +1,17 @@ +nova (2:17.0.1-0ubuntu1) bionic; urgency=medium + + * New upstream point release for OpenStack Queens. + + -- Corey Bryant Mon, 12 Mar 2018 12:43:39 -0400 + +nova (2:17.0.0-0ubuntu2) bionic; urgency=medium + + * Add Depends nova-compute-kvm -> ipxe-qemu{-256k-compat-efi-roms} + to ensure that required ROM's are installed for all architectures + (LP: #1754015). + + -- James Page Mon, 12 Mar 2018 16:06:06 +0000 + nova (2:17.0.0-0ubuntu1) bionic; urgency=medium * New upstream release for OpenStack Queens. diff -Nru nova-17.0.0/debian/control nova-17.0.1/debian/control --- nova-17.0.0/debian/control 2018-02-28 17:44:34.000000000 +0000 +++ nova-17.0.1/debian/control 2018-03-12 16:43:39.000000000 +0000 @@ -308,6 +308,8 @@ Package: nova-compute-kvm Architecture: all Depends: + ipxe-qemu, + ipxe-qemu-256k-compat-efi-roms, nova-compute-libvirt (= ${binary:Version}), qemu-kvm | qemu-system (>= 1.3.0) | kvm, ${misc:Depends}, diff -Nru nova-17.0.0/doc/source/admin/remote-console-access.rst nova-17.0.1/doc/source/admin/remote-console-access.rst --- nova-17.0.0/doc/source/admin/remote-console-access.rst 2018-02-28 11:32:10.000000000 +0000 +++ nova-17.0.1/doc/source/admin/remote-console-access.rst 2018-03-07 23:31:34.000000000 +0000 @@ -334,6 +334,11 @@ # apt-get install nova-novncproxy +.. note:: + + If using non-US key mappings, then you need at least noVNC 1.0.0 for `a fix + `_. + The service starts automatically on installation. To restart the service, run: diff -Nru nova-17.0.0/nova/compute/api.py nova-17.0.1/nova/compute/api.py --- nova-17.0.0/nova/compute/api.py 2018-02-28 11:32:10.000000000 +0000 +++ nova-17.0.1/nova/compute/api.py 2018-03-07 23:31:42.000000000 +0000 @@ -2855,9 +2855,45 @@ if instance.root_device_name: properties['root_device_name'] = instance.root_device_name + bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( + context, instance.uuid) + + mapping = [] # list of BDM dicts that can go into the image properties + # Do some up-front filtering of the list of BDMs from + # which we are going to create snapshots. + volume_bdms = [] + for bdm in bdms: + if bdm.no_device: + continue + if bdm.is_volume: + # These will be handled below. + volume_bdms.append(bdm) + else: + mapping.append(bdm.get_image_mapping()) + + # Check limits in Cinder before creating snapshots to avoid going over + # quota in the middle of a list of volumes. This is a best-effort check + # but concurrently running snapshot requests from the same project + # could still fail to create volume snapshots if they go over limit. + if volume_bdms: + limits = self.volume_api.get_absolute_limits(context) + total_snapshots_used = limits['totalSnapshotsUsed'] + max_snapshots = limits['maxTotalSnapshots'] + # -1 means there is unlimited quota for snapshots + if (max_snapshots > -1 and + len(volume_bdms) + total_snapshots_used > max_snapshots): + LOG.debug('Unable to create volume snapshots for instance. ' + 'Currently has %s snapshots, requesting %s new ' + 'snapshots, with a limit of %s.', + total_snapshots_used, len(volume_bdms), + max_snapshots, instance=instance) + raise exception.OverQuota(overs='snapshots') + quiesced = False if instance.vm_state == vm_states.ACTIVE: try: + LOG.info("Attempting to quiesce instance before volume " + "snapshot.", instance=instance) self.compute_rpcapi.quiesce_instance(context, instance) quiesced = True except (exception.InstanceQuiesceNotSupported, @@ -2871,17 +2907,10 @@ {'reason': err}, instance=instance) - bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( - context, instance.uuid) - @wrap_instance_event(prefix='api') def snapshot_instance(self, context, instance, bdms): - mapping = [] - for bdm in bdms: - if bdm.no_device: - continue - - if bdm.is_volume: + try: + for bdm in volume_bdms: # create snapshot based on volume_id volume = self.volume_api.get(context, bdm.volume_id) # NOTE(yamahata): Should we wait for snapshot creation? @@ -2889,20 +2918,28 @@ # short time, it doesn't matter for now. name = _('snapshot for %s') % image_meta['name'] LOG.debug('Creating snapshot from volume %s.', - volume['id'], - instance=instance) + volume['id'], instance=instance) snapshot = self.volume_api.create_snapshot_force( - context, volume['id'], name, - volume['display_description']) + context, volume['id'], + name, volume['display_description']) mapping_dict = block_device.snapshot_from_bdm( - snapshot['id'], - bdm) + snapshot['id'], bdm) mapping_dict = mapping_dict.get_image_mapping() - else: - mapping_dict = bdm.get_image_mapping() - - mapping.append(mapping_dict) - return mapping + mapping.append(mapping_dict) + return mapping + # NOTE(tasker): No error handling is done in the above for loop. + # This means that if the snapshot fails and throws an exception + # the traceback will skip right over the unquiesce needed below. + # Here, catch any exception, unquiesce the instance, and raise the + # error so that the calling function can do what it needs to in + # order to properly treat a failed snap. + except Exception: + with excutils.save_and_reraise_exception(): + if quiesced: + LOG.info("Unquiescing instance after volume snapshot " + "failure.", instance=instance) + self.compute_rpcapi.unquiesce_instance( + context, instance, mapping) self._record_action_start(context, instance, instance_actions.CREATE_IMAGE) diff -Nru nova-17.0.0/nova/compute/manager.py nova-17.0.1/nova/compute/manager.py --- nova-17.0.0/nova/compute/manager.py 2018-02-28 11:32:10.000000000 +0000 +++ nova-17.0.1/nova/compute/manager.py 2018-03-07 23:31:42.000000000 +0000 @@ -697,7 +697,7 @@ cn_uuid = compute_nodes[migration.source_node] if not scheduler_utils.remove_allocation_from_compute( - instance, cn_uuid, self.reportclient): + context, instance, cn_uuid, self.reportclient): LOG.error("Failed to clean allocation of evacuated instance " "on the source node %s", cn_uuid, instance=instance) @@ -2902,7 +2902,7 @@ # on the same host (not evacuate) uses the NopClaim which will # not raise ComputeResourcesUnavailable. rt.delete_allocation_for_evacuated_instance( - instance, scheduled_node, node_type='destination') + context, instance, scheduled_node, node_type='destination') self._notify_instance_rebuild_error(context, instance, e, bdms) raise exception.BuildAbortException( instance_uuid=instance.uuid, reason=e.format_message()) @@ -2916,7 +2916,8 @@ self._set_migration_status(migration, 'failed') if recreate or scheduled_node is not None: rt.delete_allocation_for_evacuated_instance( - instance, scheduled_node, node_type='destination') + context, instance, scheduled_node, + node_type='destination') self._notify_instance_rebuild_error(context, instance, e, bdms) raise else: @@ -3832,7 +3833,7 @@ # any shared providers in the case of a confirm_resize operation and # the source host and shared providers for a revert_resize operation.. if not scheduler_utils.remove_allocation_from_compute( - instance, cn_uuid, self.reportclient, flavor): + context, instance, cn_uuid, self.reportclient, flavor): LOG.error("Failed to save manipulated allocation", instance=instance) @@ -4213,6 +4214,15 @@ This is initiated from the destination host's ``prep_resize`` routine and runs on the source host. """ + try: + self._resize_instance(context, instance, image, migration, + instance_type, clean_shutdown) + except Exception: + with excutils.save_and_reraise_exception(): + self._revert_allocation(context, instance, migration) + + def _resize_instance(self, context, instance, image, + migration, instance_type, clean_shutdown): with self._error_out_instance_on_exception(context, instance), \ errors_out_migration_ctxt(migration): network_info = self.network_api.get_instance_nw_info(context, @@ -4438,6 +4448,20 @@ new host machine. """ + try: + self._finish_resize_helper(context, disk_info, image, instance, + migration) + except Exception: + with excutils.save_and_reraise_exception(): + self._revert_allocation(context, instance, migration) + + def _finish_resize_helper(self, context, disk_info, image, instance, + migration): + """Completes the migration process. + + The caller must revert the instance's allocations if the migration + process failed. + """ bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) @@ -4914,8 +4938,11 @@ # instance claim will not remove the allocations. rt.reportclient.delete_allocation_for_instance(context, instance.uuid) - # FIXME: Umm, shouldn't we be rolling back volume connections - # and port bindings? + # FIXME: Umm, shouldn't we be rolling back port bindings too? + self._terminate_volume_connections(context, instance, bdms) + # The reverts_task_state decorator on unshelve_instance will + # eventually save these updates. + self._nil_out_instance_obj_host_and_node(instance) if image: instance.image_ref = shelved_image_ref @@ -6294,7 +6321,7 @@ # attempt to clean up any doubled per-instance allocation rt = self._get_resource_tracker() rt.delete_allocation_for_migrated_instance( - instance, source_node) + ctxt, instance, source_node) def _consoles_enabled(self): """Returns whether a console is enable.""" diff -Nru nova-17.0.0/nova/compute/resource_tracker.py nova-17.0.1/nova/compute/resource_tracker.py --- nova-17.0.0/nova/compute/resource_tracker.py 2018-02-28 11:32:10.000000000 +0000 +++ nova-17.0.1/nova/compute/resource_tracker.py 2018-03-07 23:31:42.000000000 +0000 @@ -894,7 +894,7 @@ # that the resource provider exists in the tree and has had its # cached traits refreshed. self.reportclient.set_traits_for_provider( - compute_node.uuid, traits) + context, compute_node.uuid, traits) if self.pci_tracker: self.pci_tracker.save(context) @@ -1316,27 +1316,30 @@ "host that might need to be removed: %s.", instance_uuid, instance.host, instance.node, alloc) - def delete_allocation_for_evacuated_instance(self, instance, node, + def delete_allocation_for_evacuated_instance(self, context, instance, node, node_type='source'): self._delete_allocation_for_moved_instance( - instance, node, 'evacuated', node_type) + context, instance, node, 'evacuated', node_type) - def delete_allocation_for_migrated_instance(self, instance, node): - self._delete_allocation_for_moved_instance(instance, node, 'migrated') + def delete_allocation_for_migrated_instance(self, context, instance, node): + self._delete_allocation_for_moved_instance(context, instance, node, + 'migrated') def _delete_allocation_for_moved_instance( - self, instance, node, move_type, node_type='source'): + self, context, instance, node, move_type, node_type='source'): # Clean up the instance allocation from this node in placement cn_uuid = self.compute_nodes[node].uuid if not scheduler_utils.remove_allocation_from_compute( - instance, cn_uuid, self.reportclient): + context, instance, cn_uuid, self.reportclient): LOG.error("Failed to clean allocation of %s " "instance on the %s node %s", move_type, node_type, cn_uuid, instance=instance) - def delete_allocation_for_failed_resize(self, instance, node, flavor): + def delete_allocation_for_failed_resize(self, context, instance, node, + flavor): """Delete instance allocations for the node during a failed resize + :param context: The request context. :param instance: The instance being resized/migrated. :param node: The node provider on which the instance should have allocations to remove. If this is a resize to the same host, then @@ -1345,7 +1348,7 @@ """ cn = self.compute_nodes[node] if not scheduler_utils.remove_allocation_from_compute( - instance, cn.uuid, self.reportclient, flavor): + context, instance, cn.uuid, self.reportclient, flavor): if instance.instance_type_id == flavor.id: operation = 'migration' else: diff -Nru nova-17.0.0/nova/conductor/tasks/live_migrate.py nova-17.0.1/nova/conductor/tasks/live_migrate.py --- nova-17.0.0/nova/conductor/tasks/live_migrate.py 2018-02-28 11:32:10.000000000 +0000 +++ nova-17.0.1/nova/conductor/tasks/live_migrate.py 2018-03-07 23:31:34.000000000 +0000 @@ -376,8 +376,8 @@ # allocated for the given (destination) node. self.scheduler_client.reportclient.\ remove_provider_from_instance_allocation( - self.instance.uuid, compute_node.uuid, self.instance.user_id, - self.instance.project_id, resources) + self.context, self.instance.uuid, compute_node.uuid, + self.instance.user_id, self.instance.project_id, resources) def _check_not_over_max_retries(self, attempted_hosts): if CONF.migrate_max_retries == -1: diff -Nru nova-17.0.0/nova/context.py nova-17.0.1/nova/context.py --- nova-17.0.0/nova/context.py 2018-02-28 11:32:01.000000000 +0000 +++ nova-17.0.1/nova/context.py 2018-03-07 23:31:42.000000000 +0000 @@ -119,7 +119,7 @@ # Only include required parts of service_catalog self.service_catalog = [s for s in service_catalog if s.get('type') in ('image', 'block-storage', 'volumev3', - 'key-manager', 'placement')] + 'key-manager', 'placement', 'network')] else: # if list is empty or none self.service_catalog = [] diff -Nru nova-17.0.0/nova/locale/cs/LC_MESSAGES/nova.po nova-17.0.1/nova/locale/cs/LC_MESSAGES/nova.po --- nova-17.0.0/nova/locale/cs/LC_MESSAGES/nova.po 2018-02-28 11:32:10.000000000 +0000 +++ nova-17.0.1/nova/locale/cs/LC_MESSAGES/nova.po 2018-03-07 23:31:42.000000000 +0000 @@ -12,7 +12,7 @@ msgstr "" "Project-Id-Version: nova VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2018-01-03 23:41+0000\n" +"POT-Creation-Date: 2018-02-28 15:53+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -21,7 +21,7 @@ "Language: cs\n" "Plural-Forms: nplurals=3; plural=(n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2;\n" "Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.9.6\n" +"X-Generator: Zanata 4.3.3\n" "Language-Team: Czech\n" #, python-format @@ -97,18 +97,6 @@ msgstr "Hypervizor %(type)s nepodporuje zařízení PCI" #, python-format -msgid "%(value_name)s must be <= %(max_value)d" -msgstr "%(value_name)s musí být <= %(max_value)d" - -#, python-format -msgid "%(value_name)s must be >= %(min_value)d" -msgstr "%(value_name)s musí být >= %(min_value)d" - -#, python-format -msgid "%(value_name)s must be an integer" -msgstr "%(value_name)s musí být celé číslo" - -#, python-format msgid "%(worker_name)s value of %(workers)s is invalid, must be greater than 0" msgstr "" "Hodnota %(worker_name)s ve %(workers)s je neplatná, musí být větší než 0" @@ -1319,10 +1307,6 @@ msgstr "Hypervizor s ID !%s! nemohl být nalezen." #, python-format -msgid "Hypervisor: %s" -msgstr "Hypervizor: %s" - -#, python-format msgid "IP allocation over quota in pool %s." msgstr "Přidělení IP adres přesahující kvótu v zásobě %s." @@ -3675,9 +3659,6 @@ msgid "fping utility is not found." msgstr "nástroj fping nenalezen." -msgid "host" -msgstr "Hostitel" - #, python-format msgid "href %s does not contain version" msgstr "href %s neobsahuje verzi" @@ -3902,6 +3883,3 @@ msgid "you can not pass project if the scope is private" msgstr "nemůžete předat projekt, pokud je zaměření soukromé" - -msgid "zone" -msgstr "Zóna" diff -Nru nova-17.0.0/nova/locale/de/LC_MESSAGES/nova.po nova-17.0.1/nova/locale/de/LC_MESSAGES/nova.po --- nova-17.0.0/nova/locale/de/LC_MESSAGES/nova.po 2018-02-28 11:32:10.000000000 +0000 +++ nova-17.0.1/nova/locale/de/LC_MESSAGES/nova.po 2018-03-07 23:31:42.000000000 +0000 @@ -15,7 +15,7 @@ msgstr "" "Project-Id-Version: nova VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2018-01-03 23:41+0000\n" +"POT-Creation-Date: 2018-02-28 15:53+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -25,7 +25,7 @@ "Language-Team: German\n" "Plural-Forms: nplurals=2; plural=(n != 1)\n" "Generated-By: Babel 2.2.0\n" -"X-Generator: Zanata 3.9.6\n" +"X-Generator: Zanata 4.3.3\n" #, python-format msgid "%(address)s is not a valid IP address." @@ -116,18 +116,6 @@ msgstr "%(type)s Hypervisor unterstützt PCI Gerät nicht" #, python-format -msgid "%(value_name)s must be <= %(max_value)d" -msgstr "%(value_name)s muss <= %(max_value)d sein" - -#, python-format -msgid "%(value_name)s must be >= %(min_value)d" -msgstr "%(value_name)s muss >= %(min_value)d sein" - -#, python-format -msgid "%(value_name)s must be an integer" -msgstr "%(value_name)s muss eine Ganzzahl sein" - -#, python-format msgid "%(worker_name)s value of %(workers)s is invalid, must be greater than 0" msgstr "" "Wert %(worker_name)s von %(workers)s ist ungültig; muss größer als 0 sein" @@ -1515,10 +1503,6 @@ msgstr "Hypervisor mit ID '%s' konnte nicht gefunden werden. " #, python-format -msgid "Hypervisor: %s" -msgstr "Hypervisor: %s" - -#, python-format msgid "IP allocation over quota in pool %s." msgstr "IP-Zuordnung über Quote in Pool %s." @@ -4155,9 +4139,6 @@ msgid "fping utility is not found." msgstr "fping-Dienstprogramm wurde nicht gefunden." -msgid "host" -msgstr "Host" - #, python-format msgid "href %s does not contain version" msgstr "Hyperlink %s enthält Version nicht" @@ -4428,6 +4409,3 @@ msgid "you can not pass project if the scope is private" msgstr "Sie können das Projekt nicht übergeben, wenn der Bereich privat ist" - -msgid "zone" -msgstr "Zone" diff -Nru nova-17.0.0/nova/locale/es/LC_MESSAGES/nova.po nova-17.0.1/nova/locale/es/LC_MESSAGES/nova.po --- nova-17.0.0/nova/locale/es/LC_MESSAGES/nova.po 2018-02-28 11:32:10.000000000 +0000 +++ nova-17.0.1/nova/locale/es/LC_MESSAGES/nova.po 2018-03-07 23:31:42.000000000 +0000 @@ -16,7 +16,7 @@ msgstr "" "Project-Id-Version: nova VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2018-01-03 23:41+0000\n" +"POT-Creation-Date: 2018-02-28 15:53+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -25,7 +25,7 @@ "Language: es\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.9.6\n" +"X-Generator: Zanata 4.3.3\n" "Language-Team: Spanish\n" #, python-format @@ -116,18 +116,6 @@ msgstr "El hipervisor %(type)s no soporta dispositivos PCI" #, python-format -msgid "%(value_name)s must be <= %(max_value)d" -msgstr "%(value_name)s debe ser <= %(max_value)d" - -#, python-format -msgid "%(value_name)s must be >= %(min_value)d" -msgstr "%(value_name)s debe ser >= %(min_value)d" - -#, python-format -msgid "%(value_name)s must be an integer" -msgstr "%(value_name)s debe ser un entero" - -#, python-format msgid "%(worker_name)s value of %(workers)s is invalid, must be greater than 0" msgstr "" "El valor %(worker_name)s de %(workers)s es inválido, debe ser mayor que 0." @@ -1472,10 +1460,6 @@ msgstr "El hipervisor con el ID '%s' no se ha podido encontrar. " #, python-format -msgid "Hypervisor: %s" -msgstr "Hipervisor: %s" - -#, python-format msgid "IP allocation over quota in pool %s." msgstr "La asignación IP excede la capacidad en pool %s." @@ -4080,9 +4064,6 @@ msgid "fping utility is not found." msgstr "No se encuentra el programa de utilidad fping." -msgid "host" -msgstr "host" - #, python-format msgid "href %s does not contain version" msgstr "href %s no contiene la versión" @@ -4354,6 +4335,3 @@ msgid "you can not pass project if the scope is private" msgstr "No puede aprobar el proyecto si el alcance es privado" - -msgid "zone" -msgstr "zona" diff -Nru nova-17.0.0/nova/locale/fr/LC_MESSAGES/nova.po nova-17.0.1/nova/locale/fr/LC_MESSAGES/nova.po --- nova-17.0.0/nova/locale/fr/LC_MESSAGES/nova.po 2018-02-28 11:32:10.000000000 +0000 +++ nova-17.0.1/nova/locale/fr/LC_MESSAGES/nova.po 2018-03-07 23:31:42.000000000 +0000 @@ -28,7 +28,7 @@ msgstr "" "Project-Id-Version: nova VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2018-01-03 23:41+0000\n" +"POT-Creation-Date: 2018-02-28 15:53+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -37,7 +37,7 @@ "Language: fr\n" "Plural-Forms: nplurals=2; plural=(n > 1);\n" "Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.9.6\n" +"X-Generator: Zanata 4.3.3\n" "Language-Team: French\n" #, python-format @@ -126,18 +126,6 @@ msgstr "L'hyperviseur %(type)s ne supporte pas les périphériques PCI" #, python-format -msgid "%(value_name)s must be <= %(max_value)d" -msgstr "%(value_name)s doit etre <= %(max_value)d" - -#, python-format -msgid "%(value_name)s must be >= %(min_value)d" -msgstr "%(value_name)sdoit être supérieur à %(min_value)d" - -#, python-format -msgid "%(value_name)s must be an integer" -msgstr "%(value_name)s doit etre un entier." - -#, python-format msgid "%(worker_name)s value of %(workers)s is invalid, must be greater than 0" msgstr "" "La valeur %(worker_name)s de %(workers)s est invalide, elle doit être " @@ -1492,10 +1480,6 @@ msgstr "L'hyperviseur avec l'ID '%s' est introuvable." #, python-format -msgid "Hypervisor: %s" -msgstr "Hyperviseur: %s" - -#, python-format msgid "IP allocation over quota in pool %s." msgstr "L'allocation IP dépasse le quota dans le pool %s." @@ -4092,9 +4076,6 @@ msgid "fping utility is not found." msgstr "L'utilitaire fping est introuvable." -msgid "host" -msgstr "host" - #, python-format msgid "href %s does not contain version" msgstr "href %s ne contient pas de version" @@ -4368,6 +4349,3 @@ msgid "you can not pass project if the scope is private" msgstr "Vous ne pouvez passer un projet si le périmètre est privé" - -msgid "zone" -msgstr "zone" diff -Nru nova-17.0.0/nova/locale/it/LC_MESSAGES/nova.po nova-17.0.1/nova/locale/it/LC_MESSAGES/nova.po --- nova-17.0.0/nova/locale/it/LC_MESSAGES/nova.po 2018-02-28 11:32:10.000000000 +0000 +++ nova-17.0.1/nova/locale/it/LC_MESSAGES/nova.po 2018-03-07 23:31:42.000000000 +0000 @@ -13,7 +13,7 @@ msgstr "" "Project-Id-Version: nova VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2018-01-03 23:41+0000\n" +"POT-Creation-Date: 2018-02-28 15:53+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -22,7 +22,7 @@ "Language: it\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.9.6\n" +"X-Generator: Zanata 4.3.3\n" "Language-Team: Italian\n" #, python-format @@ -114,18 +114,6 @@ msgstr "l'hypervisor %(type)s non supporta i dispositivi PCI" #, python-format -msgid "%(value_name)s must be <= %(max_value)d" -msgstr "%(value_name)s deve essere <= %(max_value)d" - -#, python-format -msgid "%(value_name)s must be >= %(min_value)d" -msgstr "%(value_name)s deve essere >= %(min_value)d" - -#, python-format -msgid "%(value_name)s must be an integer" -msgstr "%(value_name)s deve essere un numero intero" - -#, python-format msgid "%(worker_name)s value of %(workers)s is invalid, must be greater than 0" msgstr "" "Il valore %(worker_name)s di %(workers)s non è valido, deve essere maggiore " @@ -1470,10 +1458,6 @@ msgstr "Impossibile trovare hypervisor con ID '%s'." #, python-format -msgid "Hypervisor: %s" -msgstr "Hypervisor: %s" - -#, python-format msgid "IP allocation over quota in pool %s." msgstr "L'allocazione IP supera la quota nel pool %s." @@ -4054,9 +4038,6 @@ msgid "fping utility is not found." msgstr "l'utilità fping non è stata trovata." -msgid "host" -msgstr "host" - #, python-format msgid "href %s does not contain version" msgstr "href %s non contiene la versione" @@ -4323,6 +4304,3 @@ msgid "you can not pass project if the scope is private" msgstr "non è possibile passare il progetto se l'ambito è privato" - -msgid "zone" -msgstr "Zona" diff -Nru nova-17.0.0/nova/locale/ja/LC_MESSAGES/nova.po nova-17.0.1/nova/locale/ja/LC_MESSAGES/nova.po --- nova-17.0.0/nova/locale/ja/LC_MESSAGES/nova.po 2018-02-28 11:32:10.000000000 +0000 +++ nova-17.0.1/nova/locale/ja/LC_MESSAGES/nova.po 2018-03-07 23:31:42.000000000 +0000 @@ -13,7 +13,7 @@ msgstr "" "Project-Id-Version: nova VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2018-01-03 23:41+0000\n" +"POT-Creation-Date: 2018-02-28 15:53+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -22,7 +22,7 @@ "Language: ja\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.9.6\n" +"X-Generator: Zanata 4.3.3\n" "Language-Team: Japanese\n" #, python-format @@ -113,18 +113,6 @@ msgstr "%(type)s ハイパーバイザーは PCI デバイスをサポートしていません" #, python-format -msgid "%(value_name)s must be <= %(max_value)d" -msgstr "%(value_name)s は <= %(max_value)d 以下でなければなりません" - -#, python-format -msgid "%(value_name)s must be >= %(min_value)d" -msgstr "%(value_name)s は %(min_value)d 以上でなければなりません" - -#, python-format -msgid "%(value_name)s must be an integer" -msgstr "%(value_name)s は整数でなければなりません" - -#, python-format msgid "%(worker_name)s value of %(workers)s is invalid, must be greater than 0" msgstr "" "%(workers)s の %(worker_name)s 値が無効です。0 より大きい値にしなければなりま" @@ -1454,10 +1442,6 @@ msgstr "ID '%s' のハイパーバイザーが見つかりませんでした。" #, python-format -msgid "Hypervisor: %s" -msgstr "ハイパーバイザー: %s" - -#, python-format msgid "IP allocation over quota in pool %s." msgstr "IP の割り当て量がプール %s 内のクォータを超えています。" @@ -4032,9 +4016,6 @@ msgid "fping utility is not found." msgstr "fping ユーティリティーが見つかりません。" -msgid "host" -msgstr "ホスト" - #, python-format msgid "href %s does not contain version" msgstr "href %s にバージョンが含まれていません" @@ -4298,6 +4279,3 @@ msgid "you can not pass project if the scope is private" msgstr "スコープがプライベートである場合、プロジェクトを渡すことはできません" - -msgid "zone" -msgstr "ゾーン" diff -Nru nova-17.0.0/nova/locale/ko_KR/LC_MESSAGES/nova.po nova-17.0.1/nova/locale/ko_KR/LC_MESSAGES/nova.po --- nova-17.0.0/nova/locale/ko_KR/LC_MESSAGES/nova.po 2018-02-28 11:32:10.000000000 +0000 +++ nova-17.0.1/nova/locale/ko_KR/LC_MESSAGES/nova.po 2018-03-07 23:31:42.000000000 +0000 @@ -14,16 +14,16 @@ msgstr "" "Project-Id-Version: nova VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2018-01-03 23:41+0000\n" +"POT-Creation-Date: 2018-02-28 15:53+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2017-03-02 12:00+0000\n" "Last-Translator: Ian Y. Choi \n" -"Language: ko-KR\n" +"Language: ko_KR\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.9.6\n" +"X-Generator: Zanata 4.3.3\n" "Language-Team: Korean (South Korea)\n" #, python-format @@ -111,18 +111,6 @@ msgstr "%(type)s 하이퍼바이저가 PCI 디바이스를 지원하지 않음" #, python-format -msgid "%(value_name)s must be <= %(max_value)d" -msgstr "%(value_name)s은(는) %(max_value)d보다 작거나 같아야 함" - -#, python-format -msgid "%(value_name)s must be >= %(min_value)d" -msgstr "%(value_name)s은(는) >= %(min_value)d이어야 함. " - -#, python-format -msgid "%(value_name)s must be an integer" -msgstr "%(value_name)s은(는) 정수여야 함" - -#, python-format msgid "%(worker_name)s value of %(workers)s is invalid, must be greater than 0" msgstr "" "%(workers)s의 %(worker_name)s 값이 올바르지 않습니다. 해당 값은 0보다 커야 합" @@ -1444,10 +1432,6 @@ msgstr "ID가 '%s'인 하이퍼바이저를 찾을 수 없습니다. " #, python-format -msgid "Hypervisor: %s" -msgstr "하이퍼바이저: %s" - -#, python-format msgid "IP allocation over quota in pool %s." msgstr "%s 풀에서 IP 할당이 할당량을 초과했습니다." @@ -4007,9 +3991,6 @@ msgid "fping utility is not found." msgstr "fping 유틸리티를 찾을 수 없습니다. " -msgid "host" -msgstr "호스트" - #, python-format msgid "href %s does not contain version" msgstr "href %s에 버전이 없음" @@ -4263,6 +4244,3 @@ msgid "you can not pass project if the scope is private" msgstr "개인용 범위인 경우 프로젝트를 전달할 수 없음" - -msgid "zone" -msgstr "영역" diff -Nru nova-17.0.0/nova/locale/pt_BR/LC_MESSAGES/nova.po nova-17.0.1/nova/locale/pt_BR/LC_MESSAGES/nova.po --- nova-17.0.0/nova/locale/pt_BR/LC_MESSAGES/nova.po 2018-02-28 11:32:10.000000000 +0000 +++ nova-17.0.1/nova/locale/pt_BR/LC_MESSAGES/nova.po 2018-03-07 23:31:42.000000000 +0000 @@ -19,16 +19,16 @@ msgstr "" "Project-Id-Version: nova VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2018-01-03 23:41+0000\n" +"POT-Creation-Date: 2018-02-28 15:53+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-12 06:08+0000\n" "Last-Translator: Copied by Zanata \n" -"Language: pt-BR\n" +"Language: pt_BR\n" "Plural-Forms: nplurals=2; plural=(n > 1);\n" "Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.9.6\n" +"X-Generator: Zanata 4.3.3\n" "Language-Team: Portuguese (Brazil)\n" #, python-format @@ -117,18 +117,6 @@ msgstr "O hypervisor %(type)s não suporta dispositivos PCI" #, python-format -msgid "%(value_name)s must be <= %(max_value)d" -msgstr "%(value_name)s deve ser <= %(max_value)d" - -#, python-format -msgid "%(value_name)s must be >= %(min_value)d" -msgstr "%(value_name)s deve ser >= %(min_value)d" - -#, python-format -msgid "%(value_name)s must be an integer" -msgstr "%(value_name)s deve ser um número inteiro" - -#, python-format msgid "%(worker_name)s value of %(workers)s is invalid, must be greater than 0" msgstr "%(worker_name)s valor de %(workers)s é inválido, deve ser maior que 0" @@ -1456,10 +1444,6 @@ msgstr "O hypervisor com o ID '%s' não pôde ser localizado." #, python-format -msgid "Hypervisor: %s" -msgstr "Hypervisor: %s" - -#, python-format msgid "IP allocation over quota in pool %s." msgstr "Alocação de IP de cota no conjunto %s." @@ -4022,9 +4006,6 @@ msgid "fping utility is not found." msgstr "utilitário fping não localizado." -msgid "host" -msgstr "host" - #, python-format msgid "href %s does not contain version" msgstr "href %s não contém versão" @@ -4296,6 +4277,3 @@ msgid "you can not pass project if the scope is private" msgstr "não será possível aprovar o projeto se o escopo for privado" - -msgid "zone" -msgstr "zona" diff -Nru nova-17.0.0/nova/locale/ru/LC_MESSAGES/nova.po nova-17.0.1/nova/locale/ru/LC_MESSAGES/nova.po --- nova-17.0.0/nova/locale/ru/LC_MESSAGES/nova.po 2018-02-28 11:32:10.000000000 +0000 +++ nova-17.0.1/nova/locale/ru/LC_MESSAGES/nova.po 2018-03-07 23:31:42.000000000 +0000 @@ -16,7 +16,7 @@ msgstr "" "Project-Id-Version: nova VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2018-01-03 23:41+0000\n" +"POT-Creation-Date: 2018-02-28 15:53+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -27,7 +27,7 @@ "%10<=4 && (n%100<12 || n%100>14) ? 1 : n%10==0 || (n%10>=5 && n%10<=9) || (n" "%100>=11 && n%100<=14)? 2 : 3);\n" "Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.9.6\n" +"X-Generator: Zanata 4.3.3\n" "Language-Team: Russian\n" #, python-format @@ -117,18 +117,6 @@ msgstr "Гипервизор %(type)s не поддерживает устройства PCI" #, python-format -msgid "%(value_name)s must be <= %(max_value)d" -msgstr "%(value_name)s должно быть <= %(max_value)d" - -#, python-format -msgid "%(value_name)s must be >= %(min_value)d" -msgstr "%(value_name)s должен быть >= %(min_value)d" - -#, python-format -msgid "%(value_name)s must be an integer" -msgstr "%(value_name)s должен быть целым числом" - -#, python-format msgid "%(worker_name)s value of %(workers)s is invalid, must be greater than 0" msgstr "" "Значение %(worker_name)s, равное %(workers)s, является недопустимым. " @@ -1462,10 +1450,6 @@ msgstr "Гипервизор с ИД '%s' не найден." #, python-format -msgid "Hypervisor: %s" -msgstr "Гипервизор: %s" - -#, python-format msgid "IP allocation over quota in pool %s." msgstr "Превышение квоты выделения IP-адресов в пуле %s." @@ -3994,9 +3978,6 @@ msgid "fping utility is not found." msgstr "Утилита fping не найдена." -msgid "host" -msgstr "Узел" - #, python-format msgid "href %s does not contain version" msgstr "href %s не содержит версию" @@ -4264,6 +4245,3 @@ msgid "you can not pass project if the scope is private" msgstr "Нельзя запустить проект, если область является частной" - -msgid "zone" -msgstr "Зона" diff -Nru nova-17.0.0/nova/locale/tr_TR/LC_MESSAGES/nova.po nova-17.0.1/nova/locale/tr_TR/LC_MESSAGES/nova.po --- nova-17.0.0/nova/locale/tr_TR/LC_MESSAGES/nova.po 2018-02-28 11:32:10.000000000 +0000 +++ nova-17.0.1/nova/locale/tr_TR/LC_MESSAGES/nova.po 2018-03-07 23:31:42.000000000 +0000 @@ -10,16 +10,16 @@ msgstr "" "Project-Id-Version: nova VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2018-01-03 23:41+0000\n" +"POT-Creation-Date: 2018-02-28 15:53+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-12 06:09+0000\n" "Last-Translator: Copied by Zanata \n" -"Language: tr-TR\n" +"Language: tr_TR\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.9.6\n" +"X-Generator: Zanata 4.3.3\n" "Language-Team: Turkish (Turkey)\n" #, python-format @@ -70,18 +70,6 @@ msgstr "%(type)s hipervizörü PCI aygıtlarını desteklemiyor" #, python-format -msgid "%(value_name)s must be <= %(max_value)d" -msgstr "%(value_name)s <= %(max_value)d olmalı" - -#, python-format -msgid "%(value_name)s must be >= %(min_value)d" -msgstr "%(value_name)s >= %(min_value)d olmalı" - -#, python-format -msgid "%(value_name)s must be an integer" -msgstr "%(value_name)s tam sayı olmalı" - -#, python-format msgid "%(worker_name)s value of %(workers)s is invalid, must be greater than 0" msgstr "%(workers)s in %(worker_name)s değeri geçersiz, 0'dan büyük olmalı" @@ -1121,10 +1109,6 @@ msgstr "'%s' kimlikli hipervizör bulunamadı." #, python-format -msgid "Hypervisor: %s" -msgstr "Hipervizör: %s" - -#, python-format msgid "IP allocation over quota in pool %s." msgstr "%s havuzundaki IP ayırma kota üzerinde." @@ -3239,9 +3223,6 @@ msgid "fping utility is not found." msgstr "fping aracı bulunamadı." -msgid "host" -msgstr "Host" - #, python-format msgid "href %s does not contain version" msgstr "%s referansı versiyon içermiyor" @@ -3459,6 +3440,3 @@ msgid "you can not pass project if the scope is private" msgstr "kapsam özel ise proje geçiremezsiniz" - -msgid "zone" -msgstr "Bölge" diff -Nru nova-17.0.0/nova/locale/zh_CN/LC_MESSAGES/nova.po nova-17.0.1/nova/locale/zh_CN/LC_MESSAGES/nova.po --- nova-17.0.0/nova/locale/zh_CN/LC_MESSAGES/nova.po 2018-02-28 11:32:10.000000000 +0000 +++ nova-17.0.1/nova/locale/zh_CN/LC_MESSAGES/nova.po 2018-03-07 23:31:42.000000000 +0000 @@ -35,17 +35,17 @@ msgstr "" "Project-Id-Version: nova VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2018-01-03 23:41+0000\n" +"POT-Creation-Date: 2018-02-28 15:53+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2017-06-24 04:35+0000\n" "Last-Translator: blkart \n" -"Language: zh-CN\n" +"Language: zh_CN\n" "Language-Team: Chinese (China)\n" "Plural-Forms: nplurals=1; plural=0\n" "Generated-By: Babel 2.2.0\n" -"X-Generator: Zanata 3.9.6\n" +"X-Generator: Zanata 4.3.3\n" msgid "\"Look for the VDIs failed" msgstr "查找VDI失败" @@ -134,18 +134,6 @@ msgstr "%(type)s监测器不支持PCI设备" #, python-format -msgid "%(value_name)s must be <= %(max_value)d" -msgstr "%(value_name)s 必须小于或等于 %(max_value)d" - -#, python-format -msgid "%(value_name)s must be >= %(min_value)d" -msgstr "%(value_name)s 必须大于或等于 %(min_value)d" - -#, python-format -msgid "%(value_name)s must be an integer" -msgstr "%(value_name)s 必须为整数" - -#, python-format msgid "%(worker_name)s value of %(workers)s is invalid, must be greater than 0" msgstr "工作线程%(worker_name)s的数量%(workers)s非法,必须大于0" @@ -1442,10 +1430,6 @@ msgstr "找不到具有标识“%s”的管理程序。" #, python-format -msgid "Hypervisor: %s" -msgstr "监测器:%s" - -#, python-format msgid "IP allocation over quota in pool %s." msgstr "IP分配操作池%s的配额。" @@ -3976,9 +3960,6 @@ msgid "fping utility is not found." msgstr "找不到 fping 实用程序。" -msgid "host" -msgstr "主机" - #, python-format msgid "href %s does not contain version" msgstr "href %s 不包含版本" @@ -4232,6 +4213,3 @@ msgid "you can not pass project if the scope is private" msgstr "如果范围是私有的,您不能跨过这个项目" - -msgid "zone" -msgstr "域" diff -Nru nova-17.0.0/nova/locale/zh_TW/LC_MESSAGES/nova.po nova-17.0.1/nova/locale/zh_TW/LC_MESSAGES/nova.po --- nova-17.0.0/nova/locale/zh_TW/LC_MESSAGES/nova.po 2018-02-28 11:32:10.000000000 +0000 +++ nova-17.0.1/nova/locale/zh_TW/LC_MESSAGES/nova.po 2018-03-07 23:31:42.000000000 +0000 @@ -12,16 +12,16 @@ msgstr "" "Project-Id-Version: nova VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2018-01-03 23:41+0000\n" +"POT-Creation-Date: 2018-02-28 15:53+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-12 06:10+0000\n" "Last-Translator: Copied by Zanata \n" -"Language: zh-TW\n" +"Language: zh_TW\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.9.6\n" +"X-Generator: Zanata 4.3.3\n" "Language-Team: Chinese (Taiwan)\n" #, python-format @@ -108,18 +108,6 @@ msgstr "%(type)s Hypervisor 不支援 PCI 裝置" #, python-format -msgid "%(value_name)s must be <= %(max_value)d" -msgstr "%(value_name)s 必須 <= %(max_value)d" - -#, python-format -msgid "%(value_name)s must be >= %(min_value)d" -msgstr "%(value_name)s 必須 >= %(min_value)d" - -#, python-format -msgid "%(value_name)s must be an integer" -msgstr "%(value_name)s 必須是整數" - -#, python-format msgid "%(worker_name)s value of %(workers)s is invalid, must be greater than 0" msgstr "%(workers)s 的 %(worker_name)s 值無效,必須大於 0" @@ -1326,10 +1314,6 @@ msgstr "找不到 ID 為 '%s' 的 Hypervisor。" #, python-format -msgid "Hypervisor: %s" -msgstr "Hypervisor:%s" - -#, python-format msgid "IP allocation over quota in pool %s." msgstr "IP 配置超過儲存區 %s 中的配額。" @@ -3735,9 +3719,6 @@ msgid "fping utility is not found." msgstr "找不到 fping 公用程式。" -msgid "host" -msgstr "主機" - #, python-format msgid "href %s does not contain version" msgstr "href %s 不包含版本" @@ -3984,6 +3965,3 @@ msgid "you can not pass project if the scope is private" msgstr "如果範圍是專用的,則您無法傳遞專案" - -msgid "zone" -msgstr "區域" diff -Nru nova-17.0.0/nova/scheduler/client/report.py nova-17.0.1/nova/scheduler/client/report.py --- nova-17.0.0/nova/scheduler/client/report.py 2018-02-28 11:32:10.000000000 +0000 +++ nova-17.0.1/nova/scheduler/client/report.py 2018-03-07 23:31:42.000000000 +0000 @@ -1090,9 +1090,10 @@ self._delete_inventory(context, rp_uuid) @safe_connect - def _ensure_traits(self, traits): + def _ensure_traits(self, context, traits): """Make sure all specified traits exist in the placement service. + :param context: The security context :param traits: Iterable of trait strings to ensure exist. :raises: TraitCreationFailed if traits contains a trait that did not exist in placement, and couldn't be created. When this @@ -1118,7 +1119,8 @@ # Might be neat to have a batch create. But creating multiple # traits will generally happen once, at initial startup, if at all. for trait in traits_to_create: - resp = self.put('/traits/' + trait, None, version='1.6') + resp = self.put('/traits/' + trait, None, version='1.6', + global_request_id=context.global_id) if not resp: raise exception.TraitCreationFailed(name=trait, error=resp.text) @@ -1136,11 +1138,12 @@ raise exception.TraitRetrievalFailed(error=resp.text) @safe_connect - def set_traits_for_provider(self, rp_uuid, traits): + def set_traits_for_provider(self, context, rp_uuid, traits): """Replace a provider's traits with those specified. The provider must exist - this method does not attempt to create it. + :param context: The security context :param rp_uuid: The UUID of the provider whose traits are to be updated :param traits: Iterable of traits to set on the provider :raises: ResourceProviderUpdateConflict if the provider's generation @@ -1158,7 +1161,7 @@ if not self._provider_tree.have_traits_changed(rp_uuid, traits): return - self._ensure_traits(traits) + self._ensure_traits(context, traits) url = '/resource_providers/%s/traits' % rp_uuid # NOTE(efried): Don't use the DELETE API when traits is empty, because @@ -1170,7 +1173,8 @@ 'resource_provider_generation': generation, 'traits': traits, } - resp = self.put(url, payload, version='1.6') + resp = self.put(url, payload, version='1.6', + global_request_id=context.global_id) if resp.status_code == 200: json = resp.json() @@ -1201,11 +1205,12 @@ raise exception.ResourceProviderUpdateFailed(url=url, error=resp.text) @safe_connect - def set_aggregates_for_provider(self, rp_uuid, aggregates): + def set_aggregates_for_provider(self, context, rp_uuid, aggregates): """Replace a provider's aggregates with those specified. The provider must exist - this method does not attempt to create it. + :param context: The security context :param rp_uuid: The UUID of the provider whose aggregates are to be updated. :param aggregates: Iterable of aggregates to set on the provider. @@ -1214,7 +1219,8 @@ # TODO(efried): Handle generation conflicts when supported by placement url = '/resource_providers/%s/aggregates' % rp_uuid aggregates = list(aggregates) if aggregates else [] - resp = self.put(url, aggregates, version='1.1') + resp = self.put(url, aggregates, version='1.1', + global_request_id=context.global_id) if resp.status_code == 200: placement_aggs = resp.json()['aggregates'] @@ -1376,7 +1382,7 @@ return allocations.get( rp_uuid, {}).get('resources', {}) - def _allocate_for_instance(self, rp_uuid, instance): + def _allocate_for_instance(self, context, rp_uuid, instance): my_allocations = _instance_to_allocations_dict(instance) current_allocations = self.get_allocations_for_consumer_by_provider( rp_uuid, instance.uuid) @@ -1390,8 +1396,9 @@ LOG.debug('Sending allocation for instance %s', my_allocations, instance=instance) - res = self.put_allocations(rp_uuid, instance.uuid, my_allocations, - instance.project_id, instance.user_id) + res = self.put_allocations(context, rp_uuid, instance.uuid, + my_allocations, instance.project_id, + instance.user_id) if res: LOG.info('Submitted allocation for instance', instance=instance) @@ -1491,8 +1498,8 @@ return r.status_code == 204 @safe_connect - def remove_provider_from_instance_allocation(self, consumer_uuid, rp_uuid, - user_id, project_id, + def remove_provider_from_instance_allocation(self, context, consumer_uuid, + rp_uuid, user_id, project_id, resources): """Grabs an allocation for a particular consumer UUID, strips parts of the allocation that refer to a supplied resource provider UUID, and @@ -1508,6 +1515,7 @@ subtract resources from the single allocation to ensure we do not exceed the reserved or max_unit amounts for the resource on the host. + :param context: The security context :param consumer_uuid: The instance/consumer UUID :param rp_uuid: The UUID of the provider whose resources we wish to remove from the consumer's allocation @@ -1580,7 +1588,8 @@ LOG.debug("Sending updated allocation %s for instance %s after " "removing resources for %s.", new_allocs, consumer_uuid, rp_uuid) - r = self.put(url, payload, version='1.10') + r = self.put(url, payload, version='1.10', + global_request_id=context.global_id) if r.status_code != 204: LOG.warning("Failed to save allocation for %s. Got HTTP %s: %s", consumer_uuid, r.status_code, r.text) @@ -1656,8 +1665,8 @@ @safe_connect @retries - def put_allocations(self, rp_uuid, consumer_uuid, alloc_data, project_id, - user_id): + def put_allocations(self, context, rp_uuid, consumer_uuid, alloc_data, + project_id, user_id): """Creates allocation records for the supplied instance UUID against the supplied resource provider. @@ -1665,6 +1674,7 @@ Once shared storage and things like NUMA allocations are a reality, this will change to allocate against multiple providers. + :param context: The security context :param rp_uuid: The UUID of the resource provider to allocate against. :param consumer_uuid: The instance's UUID. :param alloc_data: Dict, keyed by resource class, of amounts to @@ -1688,7 +1698,8 @@ 'user_id': user_id, } url = '/allocations/%s' % consumer_uuid - r = self.put(url, payload, version='1.8') + r = self.put(url, payload, version='1.8', + global_request_id=context.global_id) if r.status_code == 406: # microversion 1.8 not available so try the earlier way # TODO(melwitt): Remove this when we can be sure all placement @@ -1734,7 +1745,7 @@ def update_instance_allocation(self, context, compute_node, instance, sign): if sign > 0: - self._allocate_for_instance(compute_node.uuid, instance) + self._allocate_for_instance(context, compute_node.uuid, instance) else: self.delete_allocation_for_instance(context, instance.uuid) diff -Nru nova-17.0.0/nova/scheduler/utils.py nova-17.0.1/nova/scheduler/utils.py --- nova-17.0.0/nova/scheduler/utils.py 2018-02-28 11:32:10.000000000 +0000 +++ nova-17.0.1/nova/scheduler/utils.py 2018-03-07 23:31:42.000000000 +0000 @@ -797,10 +797,11 @@ user_id, allocation_request_version=allocation_request_version) -def remove_allocation_from_compute(instance, compute_node_uuid, reportclient, - flavor=None): +def remove_allocation_from_compute(context, instance, compute_node_uuid, + reportclient, flavor=None): """Removes the instance allocation from the compute host. + :param context: The request context :param instance: the instance object owning the allocation :param compute_node_uuid: the UUID of the compute node where the allocation needs to be removed @@ -817,5 +818,5 @@ my_resources = resources_from_flavor(instance, flavor) return reportclient.remove_provider_from_instance_allocation( - instance.uuid, compute_node_uuid, instance.user_id, + context, instance.uuid, compute_node_uuid, instance.user_id, instance.project_id, my_resources) diff -Nru nova-17.0.0/nova/tests/functional/api/openstack/placement/gabbits/traits.yaml nova-17.0.1/nova/tests/functional/api/openstack/placement/gabbits/traits.yaml --- nova-17.0.0/nova/tests/functional/api/openstack/placement/gabbits/traits.yaml 2018-02-28 11:32:10.000000000 +0000 +++ nova-17.0.1/nova/tests/functional/api/openstack/placement/gabbits/traits.yaml 2018-03-07 23:31:34.000000000 +0000 @@ -28,6 +28,12 @@ response_strings: - 'The trait is invalid. A valid trait must be no longer than 255 characters, start with the prefix \"CUSTOM_\" and use following characters: \"A\"-\"Z\", \"0\"-\"9\" and \"_\"' +- name: create a trait earlier version + PUT: /traits/CUSTOM_TRAIT_1 + request_headers: + openstack-api-version: placement 1.5 + status: 404 + - name: create a trait PUT: /traits/CUSTOM_TRAIT_1 status: 201 @@ -47,6 +53,12 @@ response_forbidden_headers: - content-type +- name: get a trait earlier version + GET: /traits/CUSTOM_TRAIT_1 + request_headers: + openstack-api-version: placement 1.5 + status: 404 + - name: get a trait GET: /traits/CUSTOM_TRAIT_1 status: 204 @@ -60,6 +72,12 @@ GET: /traits/NON_EXISTED status: 404 +- name: delete a trait earlier version + DELETE: /traits/CUSTOM_TRAIT_1 + request_headers: + openstack-api-version: placement 1.5 + status: 404 + - name: delete a trait DELETE: /traits/CUSTOM_TRAIT_1 status: 204 @@ -107,6 +125,12 @@ - MISC_SHARES_VIA_AGGREGATE - HW_CPU_X86_SHA +- name: list traits earlier version + GET: /traits + request_headers: + openstack-api-version: placement 1.5 + status: 404 + - name: list traits with invalid format of name parameter GET: /traits?name=in_abc status: 400 @@ -213,6 +237,12 @@ response_forbidden_headers: - content-type +- name: list traits for resource provider earlier version + GET: /resource_providers/$ENVIRON['RP_UUID']/traits + request_headers: + openstack-api-version: placement 1.5 + status: 404 + - name: list traits for resource provider without traits GET: /resource_providers/$ENVIRON['RP_UUID']/traits status: 200 @@ -224,6 +254,13 @@ - cache-control - last-modified +- name: set traits for resource provider earlier version + PUT: /resource_providers/$ENVIRON['RP_UUID']/traits + request_headers: + content-type: application/json + openstack-api-version: placement 1.5 + status: 404 + - name: set traits for resource provider PUT: /resource_providers/$ENVIRON['RP_UUID']/traits request_headers: @@ -270,7 +307,19 @@ - CUSTOM_TRAIT_1 - CUSTOM_TRAIT_2 response_strings: - - CUSTOM_TRAIT_1 + - "'resource_provider_generation' is a required property" + +- name: set traits for resource provider with invalid resource provider generation + PUT: /resource_providers/$ENVIRON['RP_UUID']/traits + request_headers: + content-type: application/json + status: 400 + data: + traits: + - CUSTOM_TRAIT_1 + resource_provider_generation: invalid_generation + response_strings: + - "'invalid_generation' is not of type 'integer'" - name: set traits for resource provider with conflict generation PUT: /resource_providers/$ENVIRON['RP_UUID']/traits @@ -300,6 +349,31 @@ - NON_EXISTED_TRAIT1 - NON_EXISTED_TRAIT2 +- name: set traits for resource provider with invalid type of traits + PUT: /resource_providers/$ENVIRON['RP_UUID']/traits + request_headers: + content-type: application/json + status: 400 + data: + traits: invalid_type + resource_provider_generation: 1 + response_strings: + - "'invalid_type' is not of type 'array'" + +- name: set traits for resource provider with additional properties + PUT: /resource_providers/$ENVIRON['RP_UUID']/traits + request_headers: + content-type: application/json + status: 400 + data: + traits: + - CUSTOM_TRAIT_1 + - CUSTOM_TRAIT_2 + resource_provider_generation: 1 + additional: additional + response_strings: + - 'Additional properties are not allowed' + - name: set traits for non_existed resource provider PUT: /resource_providers/non_existed/traits request_headers: @@ -336,6 +410,12 @@ response_strings: - No resource provider with uuid non_existed found +- name: delete traits for resource provider earlier version + DELETE: /resource_providers/$ENVIRON['RP_UUID']/traits + request_headers: + openstack-api-version: placement 1.5 + status: 404 + - name: delete traits for resource provider DELETE: /resource_providers/$ENVIRON['RP_UUID']/traits status: 204 diff -Nru nova-17.0.0/nova/tests/functional/api/openstack/placement/test_report_client.py nova-17.0.1/nova/tests/functional/api/openstack/placement/test_report_client.py --- nova-17.0.0/nova/tests/functional/api/openstack/placement/test_report_client.py 2018-02-28 11:32:10.000000000 +0000 +++ nova-17.0.1/nova/tests/functional/api/openstack/placement/test_report_client.py 2018-03-07 23:31:42.000000000 +0000 @@ -331,7 +331,8 @@ self.client.update_compute_node(self.context, self.compute_node) # The compute node is associated with two of the shared storages self.client.set_aggregates_for_provider( - self.compute_uuid, set([uuids.agg_disk_1, uuids.agg_disk_2])) + self.context, self.compute_uuid, + set([uuids.agg_disk_1, uuids.agg_disk_2])) # Register two SR-IOV PFs with VF and bandwidth inventory for x in (1, 2): @@ -357,10 +358,11 @@ }, }, parent_provider_uuid=self.compute_uuid) # They're associated with an IP address aggregate - self.client.set_aggregates_for_provider(uuid, [uuids.agg_ip]) + self.client.set_aggregates_for_provider(self.context, uuid, + [uuids.agg_ip]) # Set some traits on 'em self.client.set_traits_for_provider( - uuid, ['CUSTOM_PHYSNET_%d' % x]) + self.context, uuid, ['CUSTOM_PHYSNET_%d' % x]) # Register three shared storage pools with disk inventory for x in (1, 2, 3): @@ -379,11 +381,12 @@ }) # Mark as a sharing provider self.client.set_traits_for_provider( - uuid, ['MISC_SHARES_VIA_AGGREGATE']) + self.context, uuid, ['MISC_SHARES_VIA_AGGREGATE']) # Associate each with its own aggregate. The compute node is # associated with the first two (agg_disk_1 and agg_disk_2). agg = getattr(uuids, 'agg_disk_%d' % x) - self.client.set_aggregates_for_provider(uuid, [agg]) + self.client.set_aggregates_for_provider(self.context, uuid, + [agg]) # Register a shared IP address provider with IP address inventory self.client.set_inventory_for_provider( @@ -399,9 +402,11 @@ }) # Mark as a sharing provider, and add another trait self.client.set_traits_for_provider( - uuids.sip, set(['MISC_SHARES_VIA_AGGREGATE', 'CUSTOM_FOO'])) + self.context, uuids.sip, + set(['MISC_SHARES_VIA_AGGREGATE', 'CUSTOM_FOO'])) # It's associated with the same aggregate as both PFs - self.client.set_aggregates_for_provider(uuids.sip, [uuids.agg_ip]) + self.client.set_aggregates_for_provider(self.context, uuids.sip, + [uuids.agg_ip]) # Register a shared network bandwidth provider self.client.set_inventory_for_provider( @@ -417,9 +422,10 @@ }) # Mark as a sharing provider self.client.set_traits_for_provider( - uuids.sbw, ['MISC_SHARES_VIA_AGGREGATE']) + self.context, uuids.sbw, ['MISC_SHARES_VIA_AGGREGATE']) # It's associated with some other aggregate. - self.client.set_aggregates_for_provider(uuids.sbw, [uuids.agg_bw]) + self.client.set_aggregates_for_provider(self.context, uuids.sbw, + [uuids.agg_bw]) # Setup is done. Grab the ProviderTree prov_tree = self.client.get_provider_tree_and_ensure_root( diff -Nru nova-17.0.0/nova/tests/functional/test_servers.py nova-17.0.1/nova/tests/functional/test_servers.py --- nova-17.0.0/nova/tests/functional/test_servers.py 2018-02-28 11:32:10.000000000 +0000 +++ nova-17.0.1/nova/tests/functional/test_servers.py 2018-03-07 23:31:42.000000000 +0000 @@ -2728,9 +2728,10 @@ allocations = self._get_allocations_by_server_uuid(server['id']) self.assertIn(source_rp_uuid, allocations) - def test_resize_to_same_host_prep_resize_fails(self): + def _test_resize_to_same_host_instance_fails(self, failing_method, + event_name): """Tests that when we resize to the same host and resize fails in - the prep_resize method, we cleanup the allocations before rescheduling. + the given method, we cleanup the allocations before rescheduling. """ # make sure that the test only uses a single host compute2_service_id = self.admin_api.get_services( @@ -2742,16 +2743,17 @@ server = self._boot_and_check_allocations(self.flavor1, hostname) - def fake_prep_resize(*args, **kwargs): + def fake_resize_method(*args, **kwargs): # Ensure the allocations are doubled now before we fail. usages = self._get_provider_usages(rp_uuid) self.assertFlavorsMatchAllocation( self.flavor1, self.flavor2, usages) - raise test.TestingException('Simulated _prep_resize failure.') + raise test.TestingException('Simulated resize failure.') # Yes this isn't great in a functional test, but it's simple. - self.stub_out('nova.compute.manager.ComputeManager._prep_resize', - fake_prep_resize) + self.stub_out( + 'nova.compute.manager.ComputeManager.%s' % failing_method, + fake_resize_method) self.flags(allow_resize_to_same_host=True) resize_req = { @@ -2762,7 +2764,7 @@ self.api.post_server_action(server['id'], resize_req) self._wait_for_action_fail_completion( - server, instance_actions.RESIZE, 'compute_prep_resize') + server, instance_actions.RESIZE, event_name) # Ensure the allocation records still exist on the host. source_rp_uuid = self._get_provider_uuid_by_host(hostname) @@ -2771,6 +2773,18 @@ # allocation which just leaves us with the original flavor. self.assertFlavorMatchesAllocation(self.flavor1, source_usages) + def test_resize_to_same_host_prep_resize_fails(self): + self._test_resize_to_same_host_instance_fails( + '_prep_resize', 'compute_prep_resize') + + def test_resize_instance_fails_allocation_cleanup(self): + self._test_resize_to_same_host_instance_fails( + '_resize_instance', 'compute_resize_instance') + + def test_finish_resize_fails_allocation_cleanup(self): + self._test_resize_to_same_host_instance_fails( + '_finish_resize', 'compute_finish_resize') + def _test_resize_reschedule_uses_host_lists(self, fails, num_alts=None): """Test that when a resize attempt fails, the retry comes from the supplied host_list, and does not call the scheduler. diff -Nru nova-17.0.0/nova/tests/unit/api/openstack/compute/test_server_actions.py nova-17.0.1/nova/tests/unit/api/openstack/compute/test_server_actions.py --- nova-17.0.0/nova/tests/unit/api/openstack/compute/test_server_actions.py 2018-02-28 11:32:10.000000000 +0000 +++ nova-17.0.1/nova/tests/unit/api/openstack/compute/test_server_actions.py 2018-03-07 23:31:42.000000000 +0000 @@ -982,6 +982,10 @@ snapshot = dict(id=_fake_id('d')) with test.nested( + mock.patch.object( + self.controller.compute_api.volume_api, 'get_absolute_limits', + return_value={'totalSnapshotsUsed': 0, + 'maxTotalSnapshots': 10}), mock.patch.object(self.controller.compute_api.compute_rpcapi, 'quiesce_instance', side_effect=exception.InstanceQuiesceNotSupported( @@ -991,7 +995,7 @@ mock.patch.object(self.controller.compute_api.volume_api, 'create_snapshot_force', return_value=snapshot), - ) as (mock_quiesce, mock_vol_get, mock_vol_create): + ) as (mock_get_limits, mock_quiesce, mock_vol_get, mock_vol_create): if mock_vol_create_side_effect: mock_vol_create.side_effect = mock_vol_create_side_effect @@ -1086,6 +1090,10 @@ snapshot = dict(id=_fake_id('d')) with test.nested( + mock.patch.object( + self.controller.compute_api.volume_api, 'get_absolute_limits', + return_value={'totalSnapshotsUsed': 0, + 'maxTotalSnapshots': 10}), mock.patch.object(self.controller.compute_api.compute_rpcapi, 'quiesce_instance', side_effect=exception.InstanceQuiesceNotSupported( @@ -1095,7 +1103,7 @@ mock.patch.object(self.controller.compute_api.volume_api, 'create_snapshot_force', return_value=snapshot), - ) as (mock_quiesce, mock_vol_get, mock_vol_create): + ) as (mock_get_limits, mock_quiesce, mock_vol_get, mock_vol_create): response = self.controller._action_create_image(self.req, FAKE_UUID, body=body) diff -Nru nova-17.0.0/nova/tests/unit/compute/test_compute_api.py nova-17.0.1/nova/tests/unit/compute/test_compute_api.py --- nova-17.0.0/nova/tests/unit/compute/test_compute_api.py 2018-02-28 11:32:10.000000000 +0000 +++ nova-17.0.1/nova/tests/unit/compute/test_compute_api.py 2018-03-07 23:31:42.000000000 +0000 @@ -3043,7 +3043,8 @@ instance) def _test_snapshot_volume_backed(self, quiesce_required, quiesce_fails, - vm_state=vm_states.ACTIVE): + vm_state=vm_states.ACTIVE, + snapshot_fails=False, limits=None): fake_sys_meta = {'image_min_ram': '11', 'image_min_disk': '22', 'image_container_format': 'ami', @@ -3089,6 +3090,8 @@ return {'id': volume_id, 'display_description': ''} def fake_volume_create_snapshot(context, volume_id, name, description): + if snapshot_fails: + raise exception.OverQuota(overs="snapshots") return {'id': '%s-snapshot' % volume_id} def fake_quiesce_instance(context, instance): @@ -3100,6 +3103,11 @@ def fake_unquiesce_instance(context, instance, mapping=None): quiesced[1] = True + def fake_get_absolute_limits(context): + if limits is not None: + return limits + return {"totalSnapshotsUsed": 0, "maxTotalSnapshots": 10} + self.stub_out('nova.objects.BlockDeviceMappingList' '.get_by_instance_uuid', fake_bdm_list_get_by_instance_uuid) @@ -3148,13 +3156,24 @@ 'destination_type': 'volume', 'delete_on_termination': False, 'tag': None}) + limits_patcher = mock.patch.object( + self.compute_api.volume_api, 'get_absolute_limits', + side_effect=fake_get_absolute_limits) + limits_patcher.start() + self.addCleanup(limits_patcher.stop) + with test.nested( mock.patch.object(compute_api.API, '_record_action_start'), mock.patch.object(compute_utils, 'EventReporter')) as ( mock_record, mock_event): # All the db_only fields and the volume ones are removed - self.compute_api.snapshot_volume_backed( - self.context, instance, 'test-snapshot') + if snapshot_fails: + self.assertRaises(exception.OverQuota, + self.compute_api.snapshot_volume_backed, + self.context, instance, "test-snapshot") + else: + self.compute_api.snapshot_volume_backed( + self.context, instance, 'test-snapshot') self.assertEqual(quiesce_expected, quiesced[0]) self.assertEqual(quiesce_expected, quiesced[1]) @@ -3187,7 +3206,9 @@ 'guest_format': 'swap', 'delete_on_termination': True, 'tag': None}) instance_bdms.append(bdm) - expect_meta['properties']['block_device_mapping'].append( + # The non-volume image mapping will go at the front of the list + # because the volume BDMs are processed separately. + expect_meta['properties']['block_device_mapping'].insert(0, {'guest_format': 'swap', 'boot_index': -1, 'no_device': False, 'image_id': None, 'volume_id': None, 'disk_bus': None, 'volume_size': None, 'source_type': 'blank', @@ -3204,8 +3225,13 @@ mock_record, mock_event): # Check that the mappings from the image properties are not # included - self.compute_api.snapshot_volume_backed( - self.context, instance, 'test-snapshot') + if snapshot_fails: + self.assertRaises(exception.OverQuota, + self.compute_api.snapshot_volume_backed, + self.context, instance, "test-snapshot") + else: + self.compute_api.snapshot_volume_backed( + self.context, instance, 'test-snapshot') self.assertEqual(quiesce_expected, quiesced[0]) self.assertEqual(quiesce_expected, quiesced[1]) @@ -3223,6 +3249,29 @@ def test_snapshot_volume_backed_with_quiesce(self): self._test_snapshot_volume_backed(True, False) + def test_snapshot_volume_backed_with_quiesce_create_snap_fails(self): + self._test_snapshot_volume_backed(quiesce_required=True, + quiesce_fails=False, + snapshot_fails=True) + + def test_snapshot_volume_backed_unlimited_quota(self): + """Tests that there is unlimited quota on volume snapshots so we + don't perform a quota check. + """ + limits = {'maxTotalSnapshots': -1, 'totalSnapshotsUsed': 0} + self._test_snapshot_volume_backed( + quiesce_required=False, quiesce_fails=False, limits=limits) + + def test_snapshot_volume_backed_over_quota_before_snapshot(self): + """Tests that the up-front check on quota fails before actually + attempting to snapshot any volumes. + """ + limits = {'maxTotalSnapshots': 1, 'totalSnapshotsUsed': 1} + self.assertRaises(exception.OverQuota, + self._test_snapshot_volume_backed, + quiesce_required=False, quiesce_fails=False, + limits=limits) + def test_snapshot_volume_backed_with_quiesce_skipped(self): self._test_snapshot_volume_backed(False, True) diff -Nru nova-17.0.0/nova/tests/unit/compute/test_compute_mgr.py nova-17.0.1/nova/tests/unit/compute/test_compute_mgr.py --- nova-17.0.0/nova/tests/unit/compute/test_compute_mgr.py 2018-02-28 11:32:10.000000000 +0000 +++ nova-17.0.1/nova/tests/unit/compute/test_compute_mgr.py 2018-03-07 23:31:42.000000000 +0000 @@ -725,7 +725,7 @@ self.compute.init_host() mock_remove_allocation.assert_called_once_with( - deleted_instance.uuid, uuids.our_node_uuid, + self.context, deleted_instance.uuid, uuids.our_node_uuid, deleted_instance.user_id, deleted_instance.project_id, mock.sentinel.my_resources) @@ -3595,8 +3595,8 @@ get_node.assert_called_once_with( self.context, our_host, migration.source_node) remove_allocation.assert_called_once_with( - instance_2.uuid, uuids.our_node_uuid, uuids.user_id, - uuids.project_id, mock.sentinel.resources) + self.context, instance_2.uuid, uuids.our_node_uuid, + uuids.user_id, uuids.project_id, mock.sentinel.resources) def test_destroy_evacuated_instances_node_deleted(self): our_host = self.compute.host @@ -3672,8 +3672,8 @@ # but only instance_2 is deallocated as the compute node for # instance_1 is already deleted remove_allocation.assert_called_once_with( - instance_2.uuid, uuids.our_node_uuid, uuids.user_id, - uuids.project_id, mock.sentinel.resources) + self.context, instance_2.uuid, uuids.our_node_uuid, + uuids.user_id, uuids.project_id, mock.sentinel.resources) self.assertEqual(2, get_node.call_count) @@ -3923,10 +3923,13 @@ self.assertFalse( rt.delete_allocation_for_evacuated_instance.called) + @mock.patch('nova.context.RequestContext.elevated') @mock.patch('nova.compute.utils.add_instance_fault_from_exc') @mock.patch.object(manager.ComputeManager, '_error_out_instance_on_exception') - def test_rebuild_driver_error_evacuate(self, mock_error, mock_aiffe): + def test_rebuild_driver_error_evacuate(self, mock_error, mock_aiffe, + mock_elevated): + mock_elevated.return_value = self.context instance = fake_instance.fake_instance_obj(self.context) ex = test.TestingException('foo') with mock.patch.object(self.compute, '_get_resource_tracker') as mrt: @@ -3935,7 +3938,7 @@ recreate=True, scheduled_node='foo') rt = mrt.return_value delete_alloc = rt.delete_allocation_for_evacuated_instance - delete_alloc.assert_called_once_with(instance, 'foo', + delete_alloc.assert_called_once_with(self.context, instance, 'foo', node_type='destination') @mock.patch('nova.context.RequestContext.elevated') @@ -4018,7 +4021,7 @@ mock_validate_policy.assert_called_once_with( elevated_context, instance, {'group': [uuids.group]}) mock_delete_allocation.assert_called_once_with( - instance, 'fake-node', node_type='destination') + elevated_context, instance, 'fake-node', node_type='destination') mock_notify.assert_called_once_with( elevated_context, instance, 'fake-mini', action='rebuild', bdms=None, exception=exc, phase='error') @@ -5994,6 +5997,7 @@ expected_attrs=['metadata', 'system_metadata', 'info_cache']) self.migration = objects.Migration( context=self.context.elevated(), + uuid=mock.sentinel.uuid, instance_uuid=self.instance.uuid, new_instance_type_id=7, dest_compute=None, @@ -6393,7 +6397,7 @@ rt.get_node_uuid.assert_called_once_with(mock.sentinel.node) remove = mock_rc.remove_provider_from_instance_allocation remove.assert_called_once_with( - instance.uuid, rt.get_node_uuid.return_value, + self.context, instance.uuid, rt.get_node_uuid.return_value, instance.user_id, instance.project_id, mock_resources.return_value) do_it() @@ -7022,7 +7026,8 @@ # ...so we should have called the old style delete mock_delete.assert_not_called() fn = mock_rt.return_value.delete_allocation_for_migrated_instance - fn.assert_called_once_with(self.instance, self.instance.node) + fn.assert_called_once_with(self.context, self.instance, + self.instance.node) def test_post_live_migration_legacy(self): # We have no migrate_data... @@ -7044,7 +7049,8 @@ # ...so we should have called the old style delete mock_delete.assert_not_called() fn = mock_rt.return_value.delete_allocation_for_migrated_instance - fn.assert_called_once_with(self.instance, self.instance.node) + fn.assert_called_once_with(self.context, self.instance, + self.instance.node) def test_post_live_migration_cinder_v3_api(self): # Because live migration has succeeded, _post_live_migration diff -Nru nova-17.0.0/nova/tests/unit/compute/test_compute.py nova-17.0.1/nova/tests/unit/compute/test_compute.py --- nova-17.0.0/nova/tests/unit/compute/test_compute.py 2018-02-28 11:32:10.000000000 +0000 +++ nova-17.0.1/nova/tests/unit/compute/test_compute.py 2018-03-07 23:31:42.000000000 +0000 @@ -4622,6 +4622,7 @@ # ensure that task_state is reverted after a failed operation. migration = objects.Migration(context=self.context.elevated()) migration.instance_uuid = 'b48316c5-71e8-45e4-9884-6c78055b9b13' + migration.uuid = mock.sentinel.uuid migration.new_instance_type_id = '1' instance_type = objects.Flavor() diff -Nru nova-17.0.0/nova/tests/unit/compute/test_resource_tracker.py nova-17.0.1/nova/tests/unit/compute/test_resource_tracker.py --- nova-17.0.0/nova/tests/unit/compute/test_resource_tracker.py 2018-02-28 11:32:10.000000000 +0000 +++ nova-17.0.1/nova/tests/unit/compute/test_resource_tracker.py 2018-03-07 23:31:34.000000000 +0000 @@ -1315,6 +1315,7 @@ self.rt._update(mock.sentinel.ctx, new_compute) rc.set_traits_for_provider.assert_called_once_with( + mock.sentinel.ctx, new_compute.uuid, mock.sentinel.traits, ) @@ -2842,13 +2843,15 @@ mock_resource_from_flavor.return_value = mock_resource instance = _INSTANCE_FIXTURES[0].obj_clone() instance.uuid = uuids.inst0 + ctxt = context.get_admin_context() - self.rt.delete_allocation_for_evacuated_instance(instance, _NODENAME) + self.rt.delete_allocation_for_evacuated_instance( + ctxt, instance, _NODENAME) rc = self.rt.reportclient mock_remove_allocation = rc.remove_provider_from_instance_allocation mock_remove_allocation.assert_called_once_with( - instance.uuid, self.rt.compute_nodes[_NODENAME].uuid, + ctxt, instance.uuid, self.rt.compute_nodes[_NODENAME].uuid, instance.user_id, instance.project_id, mock_resource) diff -Nru nova-17.0.0/nova/tests/unit/compute/test_shelve.py nova-17.0.1/nova/tests/unit/compute/test_shelve.py --- nova-17.0.0/nova/tests/unit/compute/test_shelve.py 2018-02-28 11:32:10.000000000 +0000 +++ nova-17.0.1/nova/tests/unit/compute/test_shelve.py 2018-03-07 23:31:42.000000000 +0000 @@ -454,6 +454,91 @@ self.mock_get_allocs.assert_called_once_with(instance.uuid) mock_get_power_state.assert_called_once_with(self.context, instance) + @mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid') + @mock.patch('nova.compute.utils.notify_about_instance_action') + @mock.patch.object(nova.compute.resource_tracker.ResourceTracker, + 'instance_claim') + @mock.patch.object(neutron_api.API, 'setup_instance_network_on_host') + @mock.patch.object(nova.virt.fake.SmallFakeDriver, 'spawn', + side_effect=test.TestingException('oops!')) + @mock.patch.object(nova.compute.manager.ComputeManager, + '_prep_block_device', return_value='fake_bdm') + @mock.patch.object(nova.compute.manager.ComputeManager, + '_notify_about_instance_usage') + @mock.patch('nova.utils.get_image_from_system_metadata') + @mock.patch.object(nova.compute.manager.ComputeManager, + '_terminate_volume_connections') + def test_unshelve_spawn_fails_cleanup_volume_connections( + self, mock_terminate_volume_connections, mock_image_meta, + mock_notify_instance_usage, mock_prep_block_device, mock_spawn, + mock_setup_network, mock_instance_claim, + mock_notify_instance_action, mock_get_bdms): + """Tests error handling when a instance fails to unshelve and makes + sure that volume connections are cleaned up from the host + and that the host/node values are unset on the instance. + """ + mock_bdms = mock.Mock() + mock_get_bdms.return_value = mock_bdms + instance = self._create_fake_instance_obj() + node = test_compute.NODENAME + limits = {} + filter_properties = {'limits': limits} + instance.task_state = task_states.UNSHELVING + instance.save() + image_meta = {'properties': {'base_image_ref': uuids.image_id}} + mock_image_meta.return_value = image_meta + + tracking = {'last_state': instance.task_state} + + def fake_claim(context, instance, node, limits): + instance.host = self.compute.host + instance.node = node + requests = objects.InstancePCIRequests(requests=[]) + return claims.Claim(context, instance, node, + self.rt, _fake_resources(), + requests, limits=limits) + mock_instance_claim.side_effect = fake_claim + + def check_save(expected_task_state=None): + if tracking['last_state'] == task_states.UNSHELVING: + # This is before we've failed. + self.assertEqual(task_states.SPAWNING, instance.task_state) + tracking['last_state'] = instance.task_state + elif tracking['last_state'] == task_states.SPAWNING: + # This is after we've failed. + self.assertIsNone(instance.host) + self.assertIsNone(instance.node) + self.assertIsNone(instance.task_state) + tracking['last_state'] = instance.task_state + else: + self.fail('Unexpected save!') + + with mock.patch.object(instance, 'save') as mock_save: + mock_save.side_effect = check_save + self.assertRaises(test.TestingException, + self.compute.unshelve_instance, + self.context, instance, image=None, + filter_properties=filter_properties, node=node) + + mock_notify_instance_action.assert_called_once_with( + self.context, instance, 'fake-mini', action='unshelve', + phase='start', bdms=mock_bdms) + mock_notify_instance_usage.assert_called_once_with( + self.context, instance, 'unshelve.start') + mock_prep_block_device.assert_called_once_with( + self.context, instance, mock_bdms) + mock_setup_network.assert_called_once_with(self.context, instance, + self.compute.host) + mock_instance_claim.assert_called_once_with(self.context, instance, + test_compute.NODENAME, + limits) + mock_spawn.assert_called_once_with( + self.context, instance, test.MatchType(objects.ImageMeta), + injected_files=[], admin_password=None, + allocations={}, network_info=[], block_device_info='fake_bdm') + mock_terminate_volume_connections.assert_called_once_with( + self.context, instance, mock_bdms) + @mock.patch.object(objects.InstanceList, 'get_by_filters') def test_shelved_poll_none_offloaded(self, mock_get_by_filters): # Test instances are not offloaded when shelved_offload_time is -1 diff -Nru nova-17.0.0/nova/tests/unit/scheduler/client/test_report.py nova-17.0.1/nova/tests/unit/scheduler/client/test_report.py --- nova-17.0.0/nova/tests/unit/scheduler/client/test_report.py 2018-02-28 11:32:10.000000000 +0000 +++ nova-17.0.1/nova/tests/unit/scheduler/client/test_report.py 2018-03-07 23:31:42.000000000 +0000 @@ -266,11 +266,14 @@ consumer_uuid = mock.sentinel.consumer data = {"MEMORY_MB": 1024} expected_url = "/allocations/%s" % consumer_uuid - resp = self.client.put_allocations(rp_uuid, consumer_uuid, data, + resp = self.client.put_allocations(self.context, rp_uuid, + consumer_uuid, data, mock.sentinel.project_id, mock.sentinel.user_id) self.assertTrue(resp) - mock_put.assert_called_once_with(expected_url, mock.ANY, version='1.8') + mock_put.assert_called_once_with( + expected_url, mock.ANY, version='1.8', + global_request_id=self.context.global_id) @mock.patch('nova.scheduler.client.report.SchedulerReportClient.put') def test_put_allocations_fail_fallback_succeeds(self, mock_put): @@ -285,12 +288,14 @@ consumer_uuid = mock.sentinel.consumer data = {"MEMORY_MB": 1024} expected_url = "/allocations/%s" % consumer_uuid - resp = self.client.put_allocations(rp_uuid, consumer_uuid, data, + resp = self.client.put_allocations(self.context, rp_uuid, + consumer_uuid, data, mock.sentinel.project_id, mock.sentinel.user_id) self.assertTrue(resp) # Should fall back to earlier way if 1.8 fails. - call1 = mock.call(expected_url, mock.ANY, version='1.8') + call1 = mock.call(expected_url, mock.ANY, version='1.8', + global_request_id=self.context.global_id) call2 = mock.call(expected_url, mock.ANY) self.assertEqual(2, mock_put.call_count) mock_put.assert_has_calls([call1, call2]) @@ -304,11 +309,14 @@ consumer_uuid = mock.sentinel.consumer data = {"MEMORY_MB": 1024} expected_url = "/allocations/%s" % consumer_uuid - resp = self.client.put_allocations(rp_uuid, consumer_uuid, data, + resp = self.client.put_allocations(self.context, rp_uuid, + consumer_uuid, data, mock.sentinel.project_id, mock.sentinel.user_id) self.assertFalse(resp) - mock_put.assert_called_once_with(expected_url, mock.ANY, version='1.8') + mock_put.assert_called_once_with( + expected_url, mock.ANY, version='1.8', + global_request_id=self.context.global_id) log_msg = mock_warn.call_args[0][0] self.assertIn("Unable to submit allocation for instance", log_msg) @@ -328,13 +336,14 @@ consumer_uuid = mock.sentinel.consumer data = {"MEMORY_MB": 1024} expected_url = "/allocations/%s" % consumer_uuid - resp = self.client.put_allocations(rp_uuid, consumer_uuid, data, + resp = self.client.put_allocations(self.context, rp_uuid, + consumer_uuid, data, mock.sentinel.project_id, mock.sentinel.user_id) self.assertTrue(resp) mock_put.assert_has_calls([ - mock.call(expected_url, mock.ANY, version='1.8'), - mock.call(expected_url, mock.ANY, version='1.8')]) + mock.call(expected_url, mock.ANY, version='1.8', + global_request_id=self.context.global_id)] * 2) @mock.patch('nova.scheduler.client.report.SchedulerReportClient.put') def test_put_allocations_retry_gives_up(self, mock_put): @@ -349,14 +358,14 @@ consumer_uuid = mock.sentinel.consumer data = {"MEMORY_MB": 1024} expected_url = "/allocations/%s" % consumer_uuid - resp = self.client.put_allocations(rp_uuid, consumer_uuid, data, + resp = self.client.put_allocations(self.context, rp_uuid, + consumer_uuid, data, mock.sentinel.project_id, mock.sentinel.user_id) self.assertFalse(resp) mock_put.assert_has_calls([ - mock.call(expected_url, mock.ANY, version='1.8'), - mock.call(expected_url, mock.ANY, version='1.8'), - mock.call(expected_url, mock.ANY, version='1.8')]) + mock.call(expected_url, mock.ANY, version='1.8', + global_request_id=self.context.global_id)] * 3) def test_claim_resources_success_with_old_version(self): get_resp_mock = mock.Mock(status_code=200) @@ -898,7 +907,8 @@ project_id = uuids.project_id user_id = uuids.user_id res = self.client.remove_provider_from_instance_allocation( - consumer_uuid, uuids.source, user_id, project_id, mock.Mock()) + self.context, consumer_uuid, uuids.source, user_id, project_id, + mock.Mock()) expected_url = "/allocations/%s" % consumer_uuid # New allocations should only include the destination... @@ -928,7 +938,7 @@ self.assertEqual(expected_allocations, actual_allocations) self.ks_adap_mock.put.assert_called_once_with( expected_url, microversion='1.10', json=mock.ANY, raise_exc=False, - headers={}) + headers={'X-Openstack-Request-Id': self.context.global_id}) self.assertTrue(res) @@ -971,7 +981,8 @@ project_id = uuids.project_id user_id = uuids.user_id res = self.client.remove_provider_from_instance_allocation( - consumer_uuid, uuids.source, user_id, project_id, mock.Mock()) + self.context, consumer_uuid, uuids.source, user_id, project_id, + mock.Mock()) expected_url = "/allocations/%s" % consumer_uuid # New allocations should only include the destination... @@ -1009,7 +1020,7 @@ self.assertEqual(expected_allocations, actual_allocations) self.ks_adap_mock.put.assert_called_once_with( expected_url, microversion='1.10', json=mock.ANY, raise_exc=False, - headers={}) + headers={'X-Openstack-Request-Id': self.context.global_id}) self.assertTrue(res) @@ -1043,7 +1054,8 @@ project_id = uuids.project_id user_id = uuids.user_id res = self.client.remove_provider_from_instance_allocation( - consumer_uuid, uuids.source, user_id, project_id, mock.Mock()) + self.context, consumer_uuid, uuids.source, user_id, project_id, + mock.Mock()) self.ks_adap_mock.get.assert_called() self.ks_adap_mock.put.assert_not_called() @@ -1061,7 +1073,8 @@ project_id = uuids.project_id user_id = uuids.user_id res = self.client.remove_provider_from_instance_allocation( - consumer_uuid, uuids.source, user_id, project_id, mock.Mock()) + self.context, consumer_uuid, uuids.source, user_id, project_id, + mock.Mock()) self.ks_adap_mock.get.assert_called() self.ks_adap_mock.put.assert_not_called() @@ -1989,11 +2002,12 @@ self.assertEqual(set(), self.client._provider_tree.data(uuids.rp).aggregates) - self.client.set_aggregates_for_provider(uuids.rp, aggs) + self.client.set_aggregates_for_provider(self.context, uuids.rp, aggs) self.ks_adap_mock.put.assert_called_once_with( '/resource_providers/%s/aggregates' % uuids.rp, json=aggs, - raise_exc=False, microversion='1.1', headers={}) + raise_exc=False, microversion='1.1', + headers={'X-Openstack-Request-Id': self.context.global_id}) # Cache was updated self.assertEqual(set(aggs), self.client._provider_tree.data(uuids.rp).aggregates) @@ -2002,7 +2016,8 @@ self.ks_adap_mock.put.return_value = mock.Mock(status_code=503) self.assertRaises( exception.ResourceProviderUpdateFailed, - self.client.set_aggregates_for_provider, uuids.rp, []) + self.client.set_aggregates_for_provider, + self.context, uuids.rp, []) class TestAggregates(SchedulerReportClientTestCase): @@ -2107,18 +2122,20 @@ # Request all traits; custom traits need to be created get_mock.json.return_value = {'traits': standard_traits} - self.client._ensure_traits(all_traits) + self.client._ensure_traits(self.context, all_traits) self.ks_adap_mock.get.assert_called_once_with( '/traits?name=in:' + ','.join(all_traits), **self.trait_api_kwargs) self.ks_adap_mock.put.assert_has_calls( - [mock.call('/traits/' + trait, headers={}, **self.trait_api_kwargs) + [mock.call('/traits/' + trait, + headers={'X-Openstack-Request-Id': self.context.global_id}, + **self.trait_api_kwargs) for trait in custom_traits], any_order=True) self.ks_adap_mock.reset_mock() # Request standard traits; no traits need to be created get_mock.json.return_value = {'traits': standard_traits} - self.client._ensure_traits(standard_traits) + self.client._ensure_traits(self.context, standard_traits) self.ks_adap_mock.get.assert_called_once_with( '/traits?name=in:' + ','.join(standard_traits), **self.trait_api_kwargs) @@ -2127,8 +2144,8 @@ self.ks_adap_mock.reset_mock() # Request no traits - short circuit - self.client._ensure_traits(None) - self.client._ensure_traits([]) + self.client._ensure_traits(self.context, None) + self.client._ensure_traits(self.context, []) self.ks_adap_mock.get.assert_not_called() self.ks_adap_mock.put.assert_not_called() @@ -2136,7 +2153,8 @@ self.ks_adap_mock.get.return_value = mock.Mock(status_code=400) self.assertRaises(exception.TraitRetrievalFailed, - self.client._ensure_traits, ['FOO']) + self.client._ensure_traits, + self.context, ['FOO']) self.ks_adap_mock.get.assert_called_once_with( '/traits?name=in:FOO', **self.trait_api_kwargs) @@ -2151,12 +2169,15 @@ self.ks_adap_mock.put.return_value = put_mock self.assertRaises(exception.TraitCreationFailed, - self.client._ensure_traits, ['FOO']) + self.client._ensure_traits, + self.context, ['FOO']) self.ks_adap_mock.get.assert_called_once_with( '/traits?name=in:FOO', **self.trait_api_kwargs) self.ks_adap_mock.put.assert_called_once_with( - '/traits/FOO', headers={}, **self.trait_api_kwargs) + '/traits/FOO', + headers={'X-Openstack-Request-Id': self.context.global_id}, + **self.trait_api_kwargs) def test_set_traits_for_provider(self): traits = ['HW_NIC_OFFLOAD_UCS', 'HW_NIC_OFFLOAD_RDMA'] @@ -2176,7 +2197,7 @@ self.ks_adap_mock.put.return_value = put_mock # Invoke - self.client.set_traits_for_provider(uuids.rp, traits) + self.client.set_traits_for_provider(self.context, uuids.rp, traits) # Verify API calls self.ks_adap_mock.get.assert_called_once_with( @@ -2184,7 +2205,8 @@ self.ks_adap_mock.put.assert_called_once_with( '/resource_providers/%s/traits' % uuids.rp, json={'traits': traits, 'resource_provider_generation': 0}, - headers={}, **self.trait_api_kwargs) + headers={'X-Openstack-Request-Id': self.context.global_id}, + **self.trait_api_kwargs) # And ensure the provider tree cache was updated appropriately self.assertFalse( @@ -2205,7 +2227,8 @@ get_mock.status_code = 400 self.assertRaises( exception.TraitRetrievalFailed, - self.client.set_traits_for_provider, uuids.rp, traits) + self.client.set_traits_for_provider, + self.context, uuids.rp, traits) self.ks_adap_mock.put.assert_not_called() get_mock.status_code = 200 @@ -2215,13 +2238,15 @@ self.ks_adap_mock.put.return_value = mock.Mock(status_code=409) self.assertRaises( exception.ResourceProviderUpdateConflict, - self.client.set_traits_for_provider, uuids.rp, traits) + self.client.set_traits_for_provider, + self.context, uuids.rp, traits) # Other error self.ks_adap_mock.put.return_value = mock.Mock(status_code=503) self.assertRaises( exception.ResourceProviderUpdateFailed, - self.client.set_traits_for_provider, uuids.rp, traits) + self.client.set_traits_for_provider, + self.context, uuids.rp, traits) class TestAssociations(SchedulerReportClientTestCase): @@ -3512,7 +3537,8 @@ self.client.update_instance_allocation(self.context, cn, inst, 1) mock_put.assert_called_once_with( '/allocations/%s' % inst.uuid, - expected, version='1.8') + expected, version='1.8', + global_request_id=self.context.global_id) self.assertTrue(mock_get.called) @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' diff -Nru nova-17.0.0/nova/tests/unit/test_context.py nova-17.0.1/nova/tests/unit/test_context.py --- nova-17.0.0/nova/tests/unit/test_context.py 2018-02-28 11:32:01.000000000 +0000 +++ nova-17.0.1/nova/tests/unit/test_context.py 2018-03-07 23:31:42.000000000 +0000 @@ -103,6 +103,7 @@ {u'type': u's3', u'name': u's3'}, {u'type': u'image', u'name': u'glance'}, {u'type': u'volumev3', u'name': u'cinderv3'}, + {u'type': u'network', u'name': u'neutron'}, {u'type': u'ec2', u'name': u'ec2'}, {u'type': u'object-store', u'name': u'swift'}, {u'type': u'identity', u'name': u'keystone'}, @@ -112,6 +113,7 @@ volume_catalog = [{u'type': u'image', u'name': u'glance'}, {u'type': u'volumev3', u'name': u'cinderv3'}, + {u'type': u'network', u'name': u'neutron'}, {u'type': u'block-storage', u'name': u'cinder'}] ctxt = context.RequestContext('111', '222', service_catalog=service_catalog) diff -Nru nova-17.0.0/nova/tests/unit/virt/hyperv/test_driver.py nova-17.0.1/nova/tests/unit/virt/hyperv/test_driver.py --- nova-17.0.0/nova/tests/unit/virt/hyperv/test_driver.py 2018-02-28 11:32:01.000000000 +0000 +++ nova-17.0.1/nova/tests/unit/virt/hyperv/test_driver.py 2018-03-07 23:31:34.000000000 +0000 @@ -204,8 +204,8 @@ def test_detach_volume(self): mock_instance = fake_instance.fake_instance_obj(self.context) self.driver.detach_volume( - mock.sentinel.connection_info, mock_instance, - mock.sentinel.mountpoint, mock.sentinel.encryption) + mock.sentinel.context, mock.sentinel.connection_info, + mock_instance, mock.sentinel.mountpoint, mock.sentinel.encryption) self.driver._volumeops.detach_volume.assert_called_once_with( mock.sentinel.connection_info, diff -Nru nova-17.0.0/nova/tests/unit/virt/libvirt/test_driver.py nova-17.0.1/nova/tests/unit/virt/libvirt/test_driver.py --- nova-17.0.0/nova/tests/unit/virt/libvirt/test_driver.py 2018-02-28 11:32:10.000000000 +0000 +++ nova-17.0.1/nova/tests/unit/virt/libvirt/test_driver.py 2018-03-07 23:31:42.000000000 +0000 @@ -6745,6 +6745,84 @@ mock_encryptor.detach_volume.called_once_with(self.context, **encryption) + @mock.patch.object(libvirt_driver.LibvirtDriver, '_detach_encryptor') + @mock.patch('nova.objects.InstanceList.get_uuids_by_host') + @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_driver') + @mock.patch('nova.volume.cinder.API.get') + def test_disconnect_multiattach_single_connection( + self, mock_volume_get, mock_get_volume_driver, + mock_get_instances, mock_detach_encryptor): + + drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) + mock_volume_driver = mock.MagicMock( + spec=volume_drivers.LibvirtBaseVolumeDriver) + mock_get_volume_driver.return_value = mock_volume_driver + + attachments = ( + [('70ab645f-6ffc-406a-b3d2-5007a0c01b82', + {'mountpoint': u'/dev/vdb', + 'attachment_id': u'9402c249-99df-4f72-89e7-fd611493ee5d'}), + ('00803490-f768-4049-aa7d-151f54e6311e', + {'mountpoint': u'/dev/vdb', + 'attachment_id': u'd6128a7b-19c8-4a3e-8036-011396df95ac'})]) + + mock_volume_get.return_value = ( + {'attachments': OrderedDict(attachments), 'multiattach': True, + 'id': 'd30559cf-f092-4693-8589-0d0a1e7d9b1f'}) + + fake_connection_info = { + 'multiattach': True, + 'volume_id': 'd30559cf-f092-4693-8589-0d0a1e7d9b1f'} + fake_instance_1 = fake_instance.fake_instance_obj( + self.context, + host='fake-host-1') + + mock_get_instances.return_value = ( + ['00803490-f768-4049-aa7d-151f54e6311e']) + drvr._disconnect_volume( + self.context, fake_connection_info, fake_instance_1) + mock_volume_driver.disconnect_volume.assert_called_once_with( + fake_connection_info, fake_instance_1) + + @mock.patch.object(libvirt_driver.LibvirtDriver, '_detach_encryptor') + @mock.patch('nova.objects.InstanceList.get_uuids_by_host') + @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_driver') + @mock.patch('nova.volume.cinder.API.get') + def test_disconnect_multiattach_multi_connection( + self, mock_volume_get, mock_get_volume_driver, + mock_get_instances, mock_detach_encryptor): + + drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) + mock_volume_driver = mock.MagicMock( + spec=volume_drivers.LibvirtBaseVolumeDriver) + mock_get_volume_driver.return_value = mock_volume_driver + + attachments = ( + [('70ab645f-6ffc-406a-b3d2-5007a0c01b82', + {'mountpoint': u'/dev/vdb', + 'attachment_id': u'9402c249-99df-4f72-89e7-fd611493ee5d'}), + ('00803490-f768-4049-aa7d-151f54e6311e', + {'mountpoint': u'/dev/vdb', + 'attachment_id': u'd6128a7b-19c8-4a3e-8036-011396df95ac'})]) + + mock_volume_get.return_value = ( + {'attachments': OrderedDict(attachments), 'multiattach': True, + 'id': 'd30559cf-f092-4693-8589-0d0a1e7d9b1f'}) + + fake_connection_info = { + 'multiattach': True, + 'volume_id': 'd30559cf-f092-4693-8589-0d0a1e7d9b1f'} + fake_instance_1 = fake_instance.fake_instance_obj( + self.context, + host='fake-host-1') + + mock_get_instances.return_value = ( + ['00803490-f768-4049-aa7d-151f54e6311e', + '70ab645f-6ffc-406a-b3d2-5007a0c01b82']) + drvr._disconnect_volume( + self.context, fake_connection_info, fake_instance_1) + mock_volume_driver.disconnect_volume.assert_not_called() + def test_attach_invalid_volume_type(self): self.create_fake_libvirt_mock() libvirt_driver.LibvirtDriver._conn.lookupByUUIDString \ @@ -6901,7 +6979,8 @@ for state in (power_state.RUNNING, power_state.PAUSED): mock_dom.info.return_value = [state, 512, 512, 2, 1234, 5678] mock_get_domain.return_value = mock_dom - drvr.detach_volume(connection_info, instance, '/dev/vdc') + drvr.detach_volume( + self.context, connection_info, instance, '/dev/vdc') mock_get_domain.assert_called_with(instance) mock_dom.detachDeviceFlags.assert_called_with( @@ -6911,10 +6990,12 @@ """, flags=flags) mock_disconnect_volume.assert_called_with( - None, connection_info, instance, encryption=None) + self.context, connection_info, instance, encryption=None) + @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._disconnect_volume') @mock.patch('nova.virt.libvirt.host.Host._get_domain') - def test_detach_volume_disk_not_found(self, mock_get_domain): + def test_detach_volume_disk_not_found(self, mock_get_domain, + mock_disconnect_volume): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) mock_xml_without_disk = """ @@ -6930,10 +7011,42 @@ mock_dom.info.return_value = [power_state.RUNNING, 512, 512, 2, 1234, 5678] mock_get_domain.return_value = mock_dom - self.assertRaises(exception.DiskNotFound, drvr.detach_volume, - connection_info, instance, '/dev/vdc') + + drvr.detach_volume( + self.context, connection_info, instance, '/dev/vdc') mock_get_domain.assert_called_once_with(instance) + mock_disconnect_volume.assert_called_once_with( + self.context, connection_info, instance, encryption=None) + + @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_encryptor') + @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._disconnect_volume') + @mock.patch('nova.virt.libvirt.host.Host._get_domain') + def test_detach_volume_disk_not_found_encryption(self, mock_get_domain, + mock_disconnect_volume, + mock_get_encryptor): + drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) + instance = objects.Instance(**self.test_instance) + mock_xml_without_disk = """ + + +""" + mock_dom = mock.MagicMock(return_value=mock_xml_without_disk) + encryption = mock.MagicMock() + + connection_info = {"driver_volume_type": "fake", + "data": {"device_path": "/fake", + "access_mode": "rw"}} + + mock_dom.info.return_value = [power_state.RUNNING, 512, 512, 2, 1234, + 5678] + mock_get_domain.return_value = mock_dom + + drvr.detach_volume(self.context, connection_info, instance, + '/dev/vdc', encryption) + + mock_disconnect_volume.assert_called_once_with( + self.context, connection_info, instance, encryption=encryption) @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_driver') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_encryptor') @@ -6965,8 +7078,9 @@ "data": {"device_path": "/fake", "access_mode": "rw"}} encryption = {"provider": "NoOpEncryptor"} - drvr.detach_volume(connection_info, instance, '/dev/vdc', - encryption=encryption) + drvr.detach_volume( + self.context, connection_info, instance, '/dev/vdc', + encryption=encryption) mock_order.assert_has_calls([ mock.call.detach_volume(), @@ -15093,10 +15207,11 @@ mock.patch.object(drvr, '_disconnect_volume') ) as (_get_domain, _disconnect_volume): connection_info = {'driver_volume_type': 'fake'} - drvr.detach_volume(connection_info, instance, '/dev/sda') + drvr.detach_volume( + self.context, connection_info, instance, '/dev/sda') _get_domain.assert_called_once_with(instance) - _disconnect_volume.assert_called_once_with(None, connection_info, - instance, encryption=None) + _disconnect_volume.assert_called_once_with( + self.context, connection_info, instance, encryption=None) def _test_attach_detach_interface_get_config(self, method_name): """Tests that the get_config() method is properly called in diff -Nru nova-17.0.0/nova/tests/unit/virt/test_block_device.py nova-17.0.1/nova/tests/unit/virt/test_block_device.py --- nova-17.0.0/nova/tests/unit/virt/test_block_device.py 2018-02-28 11:32:10.000000000 +0000 +++ nova-17.0.1/nova/tests/unit/virt/test_block_device.py 2018-03-07 23:31:42.000000000 +0000 @@ -555,7 +555,7 @@ test.TestingException) if driver_attach: self.virt_driver.detach_volume( - expected_conn_info, instance, + self.context, expected_conn_info, instance, bdm_dict['device_name'], encryption=enc_data).AndReturn(None) self.volume_api.terminate_connection( diff -Nru nova-17.0.0/nova/tests/unit/virt/test_virt_drivers.py nova-17.0.1/nova/tests/unit/virt/test_virt_drivers.py --- nova-17.0.0/nova/tests/unit/virt/test_virt_drivers.py 2018-02-28 11:32:01.000000000 +0000 +++ nova-17.0.1/nova/tests/unit/virt/test_virt_drivers.py 2018-03-07 23:31:42.000000000 +0000 @@ -481,7 +481,8 @@ self.connection.attach_volume(None, connection_info, instance_ref, '/dev/sda')) self.assertIsNone( - self.connection.detach_volume(connection_info, instance_ref, + self.connection.detach_volume(mock.sentinel.context, + connection_info, instance_ref, '/dev/sda')) @catch_notimplementederror @@ -542,7 +543,8 @@ driver_block_device.DriverVolumeBlockDevice, 'save'): self.connection.power_on( self.ctxt, instance_ref, network_info, bdm) - self.connection.detach_volume(connection_info, + self.connection.detach_volume(mock.sentinel.context, + connection_info, instance_ref, '/dev/sda') diff -Nru nova-17.0.0/nova/tests/unit/virt/vmwareapi/test_driver_api.py nova-17.0.1/nova/tests/unit/virt/vmwareapi/test_driver_api.py --- nova-17.0.0/nova/tests/unit/virt/vmwareapi/test_driver_api.py 2018-02-28 11:32:01.000000000 +0000 +++ nova-17.0.1/nova/tests/unit/virt/vmwareapi/test_driver_api.py 2018-03-07 23:31:34.000000000 +0000 @@ -1750,7 +1750,8 @@ self._create_vm() connection_info = self._test_vmdk_connection_info('vmdk') mount_point = '/dev/vdc' - self.conn.detach_volume(connection_info, self.instance, mount_point, + self.conn.detach_volume(mock.sentinel.context, connection_info, + self.instance, mount_point, encryption=None) mock_detach_volume_vmdk.assert_called_once_with(connection_info, self.instance) @@ -1797,7 +1798,8 @@ with mock.patch.object(volumeops.VMwareVolumeOps, 'detach_volume') as detach_volume: - self.conn.detach_volume(connection_info, self.instance, + self.conn.detach_volume(mock.sentinel.context, connection_info, + self.instance, '/dev/vdc', encryption=None) detach_volume.assert_called_once_with(connection_info, self.instance) @@ -1819,7 +1821,8 @@ self._create_vm() connection_info = self._test_vmdk_connection_info('iscsi') mount_point = '/dev/vdc' - self.conn.detach_volume(connection_info, self.instance, mount_point, + self.conn.detach_volume(mock.sentinel.context, connection_info, + self.instance, mount_point, encryption=None) mock_detach_volume_iscsi.assert_called_once_with(connection_info, self.instance) @@ -1903,7 +1906,8 @@ device = 'fake_device' mock_get_rdm_disk.return_value = device - self.conn.detach_volume(connection_info, self.instance, mount_point, + self.conn.detach_volume(mock.sentinel.context, connection_info, + self.instance, mount_point, encryption=None) mock_iscsi_get_target.assert_called_once_with(connection_info['data']) diff -Nru nova-17.0.0/nova/tests/unit/volume/test_cinder.py nova-17.0.1/nova/tests/unit/volume/test_cinder.py --- nova-17.0.0/nova/tests/unit/volume/test_cinder.py 2018-02-28 11:32:10.000000000 +0000 +++ nova-17.0.1/nova/tests/unit/volume/test_cinder.py 2018-03-07 23:31:34.000000000 +0000 @@ -15,6 +15,7 @@ from cinderclient import api_versions as cinder_api_versions from cinderclient import exceptions as cinder_exception +from cinderclient.v2 import limits as cinder_limits from keystoneauth1 import loading as ks_loading from keystoneclient import exceptions as keystone_exception import mock @@ -753,6 +754,36 @@ self.api.update, self.ctx, '', '') @mock.patch('nova.volume.cinder.cinderclient') + def test_get_absolute_limits_forbidden(self, cinderclient): + """Tests to make sure we gracefully handle a Forbidden error raised + from python-cinderclient when getting limits. + """ + cinderclient.return_value.limits.get.side_effect = ( + cinder_exception.Forbidden(403)) + self.assertRaises( + exception.Forbidden, self.api.get_absolute_limits, self.ctx) + + @mock.patch('nova.volume.cinder.cinderclient') + def test_get_absolute_limits(self, cinderclient): + """Tests the happy path of getting the absolute limits.""" + expected_limits = { + "totalSnapshotsUsed": 0, + "maxTotalBackups": 10, + "maxTotalVolumeGigabytes": 1000, + "maxTotalSnapshots": 10, + "maxTotalBackupGigabytes": 1000, + "totalBackupGigabytesUsed": 0, + "maxTotalVolumes": 10, + "totalVolumesUsed": 0, + "totalBackupsUsed": 0, + "totalGigabytesUsed": 0 + } + limits_obj = cinder_limits.Limits(None, {'absolute': expected_limits}) + cinderclient.return_value.limits.get.return_value = limits_obj + actual_limits = self.api.get_absolute_limits(self.ctx) + self.assertDictEqual(expected_limits, actual_limits) + + @mock.patch('nova.volume.cinder.cinderclient') def test_get_snapshot(self, mock_cinderclient): snapshot_id = 'snapshot_id' mock_volume_snapshots = mock.MagicMock() diff -Nru nova-17.0.0/nova/virt/block_device.py nova-17.0.1/nova/virt/block_device.py --- nova-17.0.0/nova/virt/block_device.py 2018-02-28 11:32:10.000000000 +0000 +++ nova-17.0.1/nova/virt/block_device.py 2018-03-07 23:31:42.000000000 +0000 @@ -296,7 +296,7 @@ encryption = encryptors.get_encryption_metadata(context, volume_api, volume_id, connection_info) - virt_driver.detach_volume(connection_info, instance, mp, + virt_driver.detach_volume(context, connection_info, instance, mp, encryption=encryption) except exception.DiskNotFound as err: LOG.warning('Ignoring DiskNotFound exception while ' diff -Nru nova-17.0.0/nova/virt/driver.py nova-17.0.1/nova/virt/driver.py --- nova-17.0.0/nova/virt/driver.py 2018-02-28 11:32:10.000000000 +0000 +++ nova-17.0.1/nova/virt/driver.py 2018-03-07 23:31:42.000000000 +0000 @@ -466,7 +466,7 @@ """Attach the disk to the instance at mountpoint using info.""" raise NotImplementedError() - def detach_volume(self, connection_info, instance, mountpoint, + def detach_volume(self, context, connection_info, instance, mountpoint, encryption=None): """Detach the disk attached to the instance.""" raise NotImplementedError() diff -Nru nova-17.0.0/nova/virt/fake.py nova-17.0.1/nova/virt/fake.py --- nova-17.0.0/nova/virt/fake.py 2018-02-28 11:32:01.000000000 +0000 +++ nova-17.0.1/nova/virt/fake.py 2018-03-07 23:31:34.000000000 +0000 @@ -309,7 +309,7 @@ self._mounts[instance_name] = {} self._mounts[instance_name][mountpoint] = connection_info - def detach_volume(self, connection_info, instance, mountpoint, + def detach_volume(self, context, connection_info, instance, mountpoint, encryption=None): """Detach the disk attached to the instance.""" try: diff -Nru nova-17.0.0/nova/virt/hyperv/driver.py nova-17.0.1/nova/virt/hyperv/driver.py --- nova-17.0.0/nova/virt/hyperv/driver.py 2018-02-28 11:32:01.000000000 +0000 +++ nova-17.0.1/nova/virt/hyperv/driver.py 2018-03-07 23:31:34.000000000 +0000 @@ -183,7 +183,7 @@ return self._volumeops.attach_volume(connection_info, instance.name) - def detach_volume(self, connection_info, instance, mountpoint, + def detach_volume(self, context, connection_info, instance, mountpoint, encryption=None): return self._volumeops.detach_volume(connection_info, instance.name) diff -Nru nova-17.0.0/nova/virt/ironic/driver.py nova-17.0.1/nova/virt/ironic/driver.py --- nova-17.0.0/nova/virt/ironic/driver.py 2018-02-28 11:32:10.000000000 +0000 +++ nova-17.0.1/nova/virt/ironic/driver.py 2018-03-07 23:31:42.000000000 +0000 @@ -76,7 +76,7 @@ _NODE_FIELDS = ('uuid', 'power_state', 'target_power_state', 'provision_state', 'target_provision_state', 'last_error', 'maintenance', - 'properties', 'instance_uuid', 'traits') + 'properties', 'instance_uuid', 'traits', 'resource_class') # Console state checking interval in seconds _CONSOLE_STATE_CHECKING_INTERVAL = 1 diff -Nru nova-17.0.0/nova/virt/libvirt/driver.py nova-17.0.1/nova/virt/libvirt/driver.py --- nova-17.0.0/nova/virt/libvirt/driver.py 2018-02-28 11:32:10.000000000 +0000 +++ nova-17.0.1/nova/virt/libvirt/driver.py 2018-03-07 23:31:42.000000000 +0000 @@ -1237,11 +1237,53 @@ self._attach_encryptor(context, connection_info, encryption, allow_native_luks) + def _should_disconnect_target(self, context, connection_info, instance): + connection_count = 0 + + # NOTE(jdg): Multiattach is a special case (not to be confused + # with shared_targets). With multiattach we may have a single volume + # attached multiple times to *this* compute node (ie Server-1 and + # Server-2). So, if we receive a call to delete the attachment for + # Server-1 we need to take special care to make sure that the Volume + # isn't also attached to another Server on this Node. Otherwise we + # will indiscriminantly delete the connection for all Server and that's + # no good. So check if it's attached multiple times on this node + # if it is we skip the call to brick to delete the connection. + if connection_info.get('multiattach', False): + volume = self._volume_api.get( + context, + driver_block_device.get_volume_id(connection_info)) + attachments = volume.get('attachments', {}) + if len(attachments) > 1: + # First we get a list of all Server UUID's associated with + # this Host (Compute Node). We're going to use this to + # determine if the Volume being detached is also in-use by + # another Server on this Host, ie just check to see if more + # than one attachment.server_id for this volume is in our + # list of Server UUID's for this Host + servers_this_host = objects.InstanceList.get_uuids_by_host( + context, instance.host) + + # NOTE(jdg): nova.volume.cinder translates the + # volume['attachments'] response into a dict which includes + # the Server UUID as the key, so we're using that + # here to check against our server_this_host list + for server_id, data in attachments.items(): + if server_id in servers_this_host: + connection_count += 1 + return (False if connection_count > 1 else True) + def _disconnect_volume(self, context, connection_info, instance, encryption=None): self._detach_encryptor(context, connection_info, encryption=encryption) - vol_driver = self._get_volume_driver(connection_info) - vol_driver.disconnect_volume(connection_info, instance) + if self._should_disconnect_target(context, connection_info, instance): + vol_driver = self._get_volume_driver(connection_info) + vol_driver.disconnect_volume(connection_info, instance) + else: + LOG.info("Detected multiple connections on this host for volume: " + "%s, skipping target disconnect.", + driver_block_device.get_volume_id(connection_info), + instance=instance) def _extend_volume(self, connection_info, instance): vol_driver = self._get_volume_driver(connection_info) @@ -1539,7 +1581,7 @@ block_device_info=block_device_info) return xml - def detach_volume(self, connection_info, instance, mountpoint, + def detach_volume(self, context, connection_info, instance, mountpoint, encryption=None): disk_dev = mountpoint.rpartition("/")[2] try: @@ -1563,7 +1605,11 @@ LOG.warning("During detach_volume, instance disappeared.", instance=instance) except exception.DeviceNotFound: - raise exception.DiskNotFound(location=disk_dev) + # We should still try to disconnect logical device from + # host, an error might have happened during a previous + # call. + LOG.info("Device %s not found in instance.", + disk_dev, instance=instance) except libvirt.libvirtError as ex: # NOTE(vish): This is called to cleanup volumes after live # migration, so we should still disconnect even if @@ -1576,11 +1622,7 @@ else: raise - # NOTE(lyarwood): We can provide None as the request context here as we - # already have the encryption metadata dict from the compute layer. - # This avoids the need to add the request context to the signature of - # detach_volume requiring changes across all drivers. - self._disconnect_volume(None, connection_info, instance, + self._disconnect_volume(context, connection_info, instance, encryption=encryption) def extend_volume(self, connection_info, instance): diff -Nru nova-17.0.0/nova/virt/vmwareapi/driver.py nova-17.0.1/nova/virt/vmwareapi/driver.py --- nova-17.0.0/nova/virt/vmwareapi/driver.py 2018-02-28 11:32:01.000000000 +0000 +++ nova-17.0.1/nova/virt/vmwareapi/driver.py 2018-03-07 23:31:34.000000000 +0000 @@ -396,7 +396,7 @@ """Attach volume storage to VM instance.""" return self._volumeops.attach_volume(connection_info, instance) - def detach_volume(self, connection_info, instance, mountpoint, + def detach_volume(self, context, connection_info, instance, mountpoint, encryption=None): """Detach volume storage to VM instance.""" return self._volumeops.detach_volume(connection_info, instance) diff -Nru nova-17.0.0/nova/virt/xenapi/driver.py nova-17.0.1/nova/virt/xenapi/driver.py --- nova-17.0.0/nova/virt/xenapi/driver.py 2018-02-28 11:32:01.000000000 +0000 +++ nova-17.0.1/nova/virt/xenapi/driver.py 2018-03-07 23:31:34.000000000 +0000 @@ -445,7 +445,7 @@ instance['name'], mountpoint) - def detach_volume(self, connection_info, instance, mountpoint, + def detach_volume(self, context, connection_info, instance, mountpoint, encryption=None): """Detach volume storage from VM instance.""" self._volumeops.detach_volume(connection_info, diff -Nru nova-17.0.0/nova/volume/cinder.py nova-17.0.1/nova/volume/cinder.py --- nova-17.0.0/nova/volume/cinder.py 2018-02-28 11:32:10.000000000 +0000 +++ nova-17.0.1/nova/volume/cinder.py 2018-03-07 23:31:34.000000000 +0000 @@ -543,6 +543,23 @@ def update(self, context, volume_id, fields): raise NotImplementedError() + @translate_cinder_exception + def get_absolute_limits(self, context): + """Returns quota limit and usage information for the given tenant + + See the /v3/{project_id}/limits API reference for details. + + :param context: The nova RequestContext for the user request. Note + that the limit information returned from Cinder is specific to + the project_id within this context. + :returns: dict of absolute limits + """ + # cinderclient returns a generator of AbsoluteLimit objects, so iterate + # over the generator and return a dictionary which is easier for the + # nova client-side code to handle. + limits = cinderclient(context).limits.get().absolute + return {limit.name: limit.value for limit in limits} + @translate_snapshot_exception def get_snapshot(self, context, snapshot_id): item = cinderclient(context).volume_snapshots.get(snapshot_id) diff -Nru nova-17.0.0/nova.egg-info/pbr.json nova-17.0.1/nova.egg-info/pbr.json --- nova-17.0.0/nova.egg-info/pbr.json 2018-02-28 11:35:39.000000000 +0000 +++ nova-17.0.1/nova.egg-info/pbr.json 2018-03-07 23:35:18.000000000 +0000 @@ -1 +1 @@ -{"git_version": "a4a53bf", "is_release": true} \ No newline at end of file +{"git_version": "9d8de62", "is_release": true} \ No newline at end of file diff -Nru nova-17.0.0/nova.egg-info/PKG-INFO nova-17.0.1/nova.egg-info/PKG-INFO --- nova-17.0.0/nova.egg-info/PKG-INFO 2018-02-28 11:35:39.000000000 +0000 +++ nova-17.0.1/nova.egg-info/PKG-INFO 2018-03-07 23:35:18.000000000 +0000 @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: nova -Version: 17.0.0 +Version: 17.0.1 Summary: Cloud computing fabric controller Home-page: https://docs.openstack.org/nova/latest/ Author: OpenStack diff -Nru nova-17.0.0/PKG-INFO nova-17.0.1/PKG-INFO --- nova-17.0.0/PKG-INFO 2018-02-28 11:35:41.000000000 +0000 +++ nova-17.0.1/PKG-INFO 2018-03-07 23:35:22.000000000 +0000 @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: nova -Version: 17.0.0 +Version: 17.0.1 Summary: Cloud computing fabric controller Home-page: https://docs.openstack.org/nova/latest/ Author: OpenStack