在上一篇 Openstack Nova 源碼分析 — Create instances (nova-conductor階段) 中,記錄了 nova-api 接收到創建虛擬機的請求后,在 nova-conductor 中的執行流程。最終 nova-comductor 通過調用 nova-compute 的 RPC 接口函數 compute_rpcapi.build_and_run_instance() 將創建虛擬機的請求,通過 Queue 傳遞給 nova-compute 。本篇繼續往下看看 Openstack 創建一個虛擬機時,程序流在 nova-compute 和 Virt Driver 階段的執行過程。而且本篇使用 VCDirver 作為Virt Driver Type 。
NOTE:下面的代碼塊大多為節選。
nova-compute & vCenter
在之前的文章 VMware 接入 Openstack — 使用 Openstack 創建 vCenter 虛擬機 已經記錄過如何將 VMware 接入 Openstack ,本質是通過 nova-compute 和 vCenter 中的 Cluster 一一對應來進行管理。
緊接著,當 nova-conductor 調用了 nova-compute 的 RPC 接口后,相應接口的具體操作函數在 nova.compute.manager 中實現。
# nova/compute/manager.py
1841 def build_and_run_instance(self, context, instance, image, request_spec,
1 filter_properties, admin_password=None,
2 injected_files=None, requested_networks=None,
3 security_groups=None, block_device_mapping=None,
4 node=None, limits=None):
5
6 @utils.synchronized(instance.uuid)
7 def _locked_do_build_and_run_instance(*args, **kwargs):
8 # NOTE(danms): We grab the semaphore with the instance uuid
9 # locked because we could wait in line to build this instance
10 # for a while and we want to make sure that nothing else tries
11 # to do anything with this instance while we wait.
12 with self._build_semaphore:
13 self._do_build_and_run_instance(*args, **kwargs)
14
15 # NOTE(danms): We spawn here to return the RPC worker thread back to
16 # the pool. Since what follows could take a really long time, we don't
17 # want to tie up RPC workers.
18 utils.spawn_n(_locked_do_build_and_run_instance,
19 context, instance, image, request_spec,
20 filter_properties, admin_password, injected_files,
21 requested_networks, security_groups,
22 block_device_mapping, node, limits)
在上述的 nova.compute.manager.ComputeManager:build_and_run_instance()中調用了
_do_build_and_run_instance() 函數。
# nova/compute/manager.py
1870 def _do_build_and_run_instance(self, context, instance, image,
1 request_spec, filter_properties, admin_password, injected_files,
2 requested_networks, security_groups, block_device_mapping,
3 node=None, limits=None):
...
1901 try:
1 self._build_and_run_instance(context, instance, image,
2 decoded_files, admin_password, requested_networks,
3 security_groups, block_device_mapping, node, limits,
4 filter_properties)
5 return build_results.ACTIVE
再跳轉到
_build_and_run_instance(),這個函數非常重要。
# nova/compute/manager.py
93 from nova.virt import driver
667 class ComputeManager(manager.Manager):
677 def __init__(self, compute_driver=None, *args, **kwargs):
679 self.virtapi = ComputeVirtAPI(self)
718 self.driver = driver.load_compute_driver(self.virtapi, compute_driver)
# 加載 Driver, 過程如下:
# nova.virt.driver:load_compute_driver()
# ==> oslo_utils.importutils:import_object_ns()
# ==> nova.utils:check_isinstance()
# Return: 一個由 (compute_driver = CONF.compute_driver) 決定的 ComputeDriver 實例化對象 driver
1979 def _build_and_run_instance(self, context, instance, image, injected_files,
1 admin_password, requested_networks, security_groups,
2 block_device_mapping, node, limits, filter_properties):
########## Get Image
1983 image_name = image.get('name')
1 self._notify_about_instance_usage(context, instance, 'create.start',
2 extra_usage_info={'image_name': image_name})
########## Update DB
1986 try:
1 rt = self._get_resource_tracker(node)
2 with rt.instance_claim(context, instance, limits):
########## Change the Instance status
1997 instance.vm_state = vm_states.BUILDING
1 instance.task_state = task_states.SPAWNING
2 # NOTE(JoshNang) This also saves the changes to the
3 # instance from _allocate_network_async, as they aren't
4 # saved in that function to prevent races.
5 instance.save(expected_task_state=
6 task_states.BLOCK_DEVICE_MAPPING)
########## Block Storage
2004 block_device_info = resources['block_device_info']
########## Network
2005 network_info = resources['network_info']
########## Create Instance
# 由 ComputeDriver 實例化對象 driver 調用 spawn() 函數來進行虛擬機的創建
2006 self.driver.spawn(context, instance, image,
1 injected_files, admin_password,
2 network_info=network_info,
3 block_device_info=block_device_info)
因為我希望使用 VCDriver 驅動類型, 所以在 Nova 的配置文件 /etc/nova.conf 中設置選項
compute_driver=vmwareapi.VMwareVCDriver。
這樣的話,通過執行代碼 compute_driver = CONF.compute_driver 就可以獲得 VCDriver 的 driver 對象。當我們使用這個 driver 對象來創建虛擬機時,程序流會進入到nova/virt/vmwareapi/ 再通過調用 VMware 提供的 API 接口 (nova.virt.vmwareapi:spawn())來最終實現虛擬機的創建。
# nova/virt/vmwareapi/driver.py
46 from nova.virt.vmwareapi import vmops
125 class VMwareVCDriver(driver.ComputeDriver):
148 def __init__(self, virtapi, scheme="https"):
166 self._session = VMwareAPISession(scheme=scheme)
# 實例化了一個 vmops.VMwareVMOps 對象
186 self._vmops = vmops.VMwareVMOps(self._session,
1 virtapi,
2 self._volumeops,
3 self._cluster_ref,
4 datastore_regex=self._datastore_regex)
401 def spawn(self, context, instance, image_meta, injected_files,
1 admin_password, network_info=None, block_device_info=None):
2 """Create VM instance."""
3 image_meta = objects.ImageMeta.from_dict(image_meta)
4 self._vmops.spawn(context, instance, image_meta, injected_files,
5 admin_password, network_info, block_device_info)
# _vmops 為 vmops.VMwareVMOps 的實例化對象
跳轉到 nova.virt.vmwareapi.vmops.VMwareVMOps:spawn()
# nova/virt/vmwareapi/vmops.py
62 from nova.virt.vmwareapi import vm_util
# 著一個類封裝了對 vCenter 的虛擬機的操作函數,EG. _get_base_folder/_extend_virtual_disk/_delete_datastore_file/build_virtual_machine 等
149 class VMwareVMOps(object):
1 """Management class for VM-related tasks."""
152 def __init__(self, session, virtapi, volumeops, cluster=None,
1 datastore_regex=None):
677 def spawn(self, context, instance, image_meta, injected_files,
1 admin_password, network_info, block_device_info=None):
# 這里調用了 VMwareVMOps:build_virtual_machine() 函數, 該函數的實現如下。 接受 return ==> Task Result
691 vm_ref = self.build_virtual_machine(instance,
1 image_info,
2 vi.dc_info,
3 vi.datastore,
4 network_info,
5 extra_specs,
6 metadata)
277 def build_virtual_machine(self, instance, image_info,
1 dc_info, datastore, network_info, extra_specs,
2 metadata):
3 vif_infos = vmwarevif.get_vif_info(self._session,
4 self._cluster,
5 utils.is_neutron(),
6 image_info.vif_model,
7 network_info)
8
9 if extra_specs.storage_policy:
10 profile_spec = vm_util.get_storage_profile_spec(
11 self._session, extra_specs.storage_policy)
12 else:
13 profile_spec = None
14 # Get the create vm config spec
15 client_factory = self._session.vim.client.factory
16 config_spec = vm_util.get_vm_create_spec(client_factory,
17 instance,
18 datastore.name,
19 vif_infos,
20 extra_specs,
21 image_info.os_type,
22 profile_spec=profile_spec,
23 metadata=metadata)
24 # Create the VM
# create_vm() 會返回 Task 的 Result ,并附值給 vm_ref。 vm_ref 被用于創建虛擬機后的一切列數據更新 。
25 vm_ref = vm_util.create_vm(self._session, instance, dc_info.vmFolder,
26 config_spec, self._root_resource_pool)
27 return vm_ref
在 VMwareVMOps:build_virtual_machine() 函數中又調用了 nova.virt.vmwareapi.vm_util:get_vm_create_spec() 函數來獲取創建虛擬機所需要的參數信息。同時也調用了nova.virt.vmwareapi.vm_util:create_vm() 來 Create the VM 。所以我們先轉到 nova.virt.vmwareapi.vm_util Module 去看看具體的 Return 。
# nova/virt/vmwareapi/vm_util.py
"""
The VMware API VM utility module to build SOAP object specs.
"""
1287 def create_vm(session, instance, vm_folder, config_spec, res_pool_ref):
1 """Create VM on ESX host."""
2 LOG.debug("Creating VM on the ESX host", instance=instance)
# session 是 nova.virt.vmwareapi.driver.VMwareAPISession 的實例化對象
3 vm_create_task = session._call_method(
4 session.vim,
5 "CreateVM_Task", vm_folder,
6 config=config_spec, pool=res_pool_ref)
7 try:
8 task_info = session._wait_for_task(vm_create_task)
9 except vexc.VMwareDriverException:
10 # An invalid guestId will result in an error with no specific fault
11 # type and the generic error 'A specified parameter was not correct'.
12 # As guestId is user-editable, we try to help the user out with some
13 # additional information if we notice that guestId isn't in our list of
14 # known-good values.
15 # We don't check this in advance or do anything more than warn because
16 # we can't guarantee that our list of known-good guestIds is complete.
17 # Consequently, a value which we don't recognise may in fact be valid.
18 with excutils.save_and_reraise_exception():
19 if config_spec.guestId not in constants.VALID_OS_TYPES:
20 LOG.warning(_LW('vmware_ostype from image is not recognised: '
21 '\'%(ostype)s\'. An invalid os type may be '
22 'one cause of this instance creation failure'),
23 {'ostype': config_spec.guestId})
24 LOG.debug("Created VM on the ESX host", instance=instance)
25 return task_info.result
# 這個函數的最終返回 Task 的執行結果
到此為止關于虛擬機的創建就完成了,需要注意的是:在我們創建玩虛擬機之后其實還有許多的事情是需要做的,EG. 更新數據庫/開啟虛擬機
所以,在nova.virt.vmwareapi.vm_util:create_vm() 中得到了創建虛擬機的 Task Result task_info.result 之后,需要使用這一 Return 來進行一系列的操作。當然,這一系列的操作會在 VM 相關任務管理類: nova.virt.vmwareapi.wmops.VMwareVMOps 中實現。
# nova/virt/vmmwareapi/vmops.py
# 下面的操作,都是在成功創建了虛擬機并接收 vm_ref (vm_ref = vm_util.create_vm())返回值之后執行。
2 # Cache the vm_ref. This saves a remote call to the VC. This uses the
1 # instance uuid.
701 vm_util.vm_ref_cache_update(instance.uuid, vm_ref)
2 # Set the machine.id parameter of the instance to inject
1 # the NIC configuration inside the VM
708 if CONF.flat_injected:
1 self._set_machine_id(client_factory, instance, network_info,
2 vm_ref=vm_ref)
2 # Set the vnc configuration of the instance, vnc port starts from 5900
1 if CONF.vnc.enabled:
714 self._get_and_set_vnc_config(client_factory, instance, vm_ref)
6 if instance.image_ref:
5 self._imagecache.enlist_image(
4 image_info.image_id, vi.datastore, vi.dc_info.ref)
3 self._fetch_image_if_missing(context, vi)
2
1 if image_info.is_iso:
727 self._use_iso_image(vm_ref, vi)
1 elif image_info.linked_clone:
2 self._use_disk_image_as_linked_clone(vm_ref, vi)
3 else:
4 self._use_disk_image_as_full_clone(vm_ref, vi)
1 # Create ephemeral disks
758 self._create_ephemeral(block_device_info, instance, vm_ref,
1 vi.dc_info, vi.datastore, instance.uuid,
2 vi.ii.adapter_type)
3 self._create_swap(block_device_info, instance, vm_ref, vi.dc_info,
4 vi.datastore, instance.uuid, vi.ii.adapter_type)
764 if configdrive.required_by(instance):
1 self._configure_config_drive(
2 instance, vm_ref, vi.dc_info, vi.datastore,
3 injected_files, admin_password, network_info)
# 將虛擬機起電
769 vm_util.power_on_instance(self._session, instance, vm_ref=vm_ref)