Comment 1 for bug 1427060

Revision history for this message
jichenjc (jichenjc) wrote :

If we take a look at following code, line 3023,3024 can be moved to previous line before reserve bdm
because it's only a status check , I will try to submit a patch and see whether they are some traps of
concurrency issue there

3007 def _attach_volume(self, context, instance, volume_id, device,
3008 disk_bus, device_type):
3009 """Attach an existing volume to an existing instance.
3010
3011 This method is separated to make it possible for cells version
3012 to override it.
3013 """
3014 # NOTE(vish): This is done on the compute host because we want
3015 # to avoid a race where two devices are requested at
3016 # the same time. When db access is removed from
3017 # compute, the bdm will be created here and we will
3018 # have to make sure that they are assigned atomically.
3019 volume_bdm = self.compute_rpcapi.reserve_block_device_name(
3020 context, instance, device, volume_id, disk_bus=disk_bus,
3021 device_type=device_type)
3022 try:
3023 volume = self.volume_api.get(context, volume_id)
3024 self.volume_api.check_attach(context, volume, instance=instance)
3025 self.volume_api.reserve_volume(context, volume_id)
3026 self.compute_rpcapi.attach_volume(context, instance=instance,
3027 volume_id=volume_id, mountpoint=device, bdm=volume_bdm)
3028 except Exception:
3029 with excutils.save_and_reraise_exception():
3030 volume_bdm.destroy()
3031