Revision 7107282a
src/vmm_mad/remotes/lib/vcenter_driver/virtual_machine.rb | ||
---|---|---|
52 | 52 |
############################################################################ |
53 | 53 |
############################################################################ |
54 | 54 |
|
55 |
# clone from template attrs |
|
56 |
attr_accessor :vi_client |
|
57 |
attr_accessor :vm_prefix |
|
55 |
# VM XMLElement |
|
58 | 56 |
attr_accessor :one_item |
59 |
# attr_accessor :dfile |
|
57 |
|
|
58 |
# OpenNebula host |
|
60 | 59 |
attr_accessor :host |
61 | 60 |
|
62 |
# (used in clone_vm) |
|
61 |
# Target Datastore VMware reference (must be defined when VM is new) |
|
62 |
attr_accessor :target_ds_ref |
|
63 |
|
|
64 |
# Cached vi_client |
|
65 |
# @return VIClient instance |
|
66 |
def vi_client |
|
67 |
if !@vi_client |
|
68 |
@item._connection |
|
69 |
end |
|
70 |
@vi_client |
|
71 |
end |
|
72 |
|
|
73 |
# vi_client setter (must be used when the VM does not exist in vCenter) |
|
74 |
def vi_client=(vi_client) |
|
75 |
@vi_client = vi_client |
|
76 |
end |
|
77 |
|
|
78 |
# Cached one_item |
|
79 |
def one_item |
|
80 |
# TODO: fetch one_item if it doesn't exist |
|
81 |
@one_item |
|
82 |
end |
|
83 |
|
|
84 |
# one_item setter (must be used when the VM does not exist in vCenter) |
|
85 |
def one_item=(one_item) |
|
86 |
@one_item = one_item |
|
87 |
end |
|
88 |
|
|
89 |
# Cached cluster |
|
63 | 90 |
# @return ClusterComputeResource |
64 | 91 |
def cluster |
65 | 92 |
if @cluster.nil? |
66 |
ccr_ref = @host['TEMPLATE/VCENTER_CCR'] |
|
67 |
@cluster = ClusterComputeResource.new_from_ref(@vi_client, ccr_ref)
|
|
93 |
ccr_ref = @host['TEMPLATE/VCENTER_CCR_REF']
|
|
94 |
@cluster = ClusterComputeResource.new_from_ref(vi_client, ccr_ref) |
|
68 | 95 |
end |
69 | 96 |
@cluster |
70 | 97 |
end |
71 | 98 |
|
72 |
# (used in clone_vm) |
|
99 |
############################################################################ |
|
100 |
############################################################################ |
|
101 |
|
|
102 |
# @return Boolean whether the VM exists in vCenter |
|
103 |
def is_new? |
|
104 |
vm_id = self['config.extraConfig'].select do |o| |
|
105 |
o.key == "opennebula.vm.id" |
|
106 |
end.first.value rescue nil |
|
107 |
|
|
108 |
!vm_id |
|
109 |
end |
|
110 |
|
|
111 |
############################################################################ |
|
112 |
# Getters |
|
113 |
############################################################################ |
|
114 |
|
|
73 | 115 |
# @return RbVmomi::VIM::ResourcePool |
74 | 116 |
def get_rp |
75 | 117 |
req_rp = one_item['USER_TEMPLATE/RESOURCE_POOL'] |
76 | 118 |
|
77 |
if @vi_client.rp_confined?
|
|
78 |
if req_rp && req_rp != @vi_client.rp
|
|
79 |
raise "Available resource pool in host [#{@vi_client.rp}]"\
|
|
119 |
if vi_client.rp_confined? |
|
120 |
if req_rp && req_rp != vi_client.rp |
|
121 |
raise "Available resource pool in host [#{vi_client.rp}]"\ |
|
80 | 122 |
" does not match requested resource pool"\ |
81 | 123 |
" [#{req_rp}]" |
82 | 124 |
end |
83 | 125 |
|
84 |
return @vi_client.rp
|
|
126 |
return vi_client.rp |
|
85 | 127 |
else |
86 |
|
|
87 | 128 |
if req_rp |
88 | 129 |
rps = cluster.resource_pools.select{|r| r._ref == req_rp } |
89 | 130 |
|
... | ... | |
93 | 134 |
return rps.first |
94 | 135 |
end |
95 | 136 |
else |
96 |
return cluster.item.resourcePool
|
|
137 |
return cluster['resourcePool']
|
|
97 | 138 |
end |
98 | 139 |
end |
99 | 140 |
end |
100 | 141 |
|
101 |
# (used in clone_vm) |
|
102 | 142 |
# @return RbVmomi::VIM::Datastore or nil |
103 | 143 |
def get_ds |
104 | 144 |
req_ds = one_item['USER_TEMPLATE/VCENTER_DATASTORE'] |
... | ... | |
118 | 158 |
end |
119 | 159 |
end |
120 | 160 |
|
121 |
# (used in clone_vm) |
|
122 | 161 |
# @return Customization or nil |
123 | 162 |
def get_customization |
124 | 163 |
xpath = "USER_TEMPLATE/VCENTER_CUSTOMIZATION_SPEC" |
... | ... | |
129 | 168 |
end |
130 | 169 |
|
131 | 170 |
begin |
132 |
custom_spec = @vi_client.vim
|
|
171 |
custom_spec = vi_client.vim |
|
133 | 172 |
.serviceContent |
134 | 173 |
.customizationSpecManager |
135 | 174 |
.GetCustomizationSpec(:name => customization.text) |
... | ... | |
144 | 183 |
end |
145 | 184 |
end |
146 | 185 |
|
147 |
def clone_vm |
|
148 |
vm_prefix = @host['TEMPLATE/VM_PREFIX'] |
|
149 |
vm_prefix = VM_PREFIX_DEFAULT if vm_prefix.nil? || vm_prefix.empty? |
|
150 |
vm_prefix.gsub!("$i", one_item['ID']) |
|
151 |
|
|
152 |
vc_template_ref = one_item['USER_TEMPLATE/VCENTER_REF'] |
|
153 |
vc_template = RbVmomi::VIM::VirtualMachine(@vi_client.vim, vc_template_ref) |
|
154 |
|
|
155 |
vcenter_name = vm_prefix + one_item['NAME'] |
|
186 |
# @return String image name |
|
187 |
def get_img_name(disk) |
|
188 |
if disk["PERSISTENT"] == "YES" |
|
189 |
return disk["SOURCE"] |
|
190 |
else |
|
191 |
image_id = disk["IMAGE_ID"] |
|
192 |
disk_id = disk["DISK_ID"] |
|
193 |
vm_id = one_item['ID'] |
|
156 | 194 |
|
157 |
# Relocate spec |
|
158 |
relocate_spec_params = {} |
|
195 |
return "one-#{image_id}-#{vm_id}-#{disk_id}.vmdk" |
|
196 |
end |
|
197 |
end |
|
159 | 198 |
|
160 |
relocate_spec_params[:pool] = get_rp |
|
199 |
# @return VCenterDriver::Datastore datastore where the disk will live under |
|
200 |
def get_effective_ds(disk) |
|
201 |
if disk["PERSISTENT"] == "YES" |
|
202 |
ds_ref = disk["VCENTER_DS_REF"] |
|
203 |
else |
|
204 |
ds_ref = @target_ds_ref |
|
161 | 205 |
|
162 |
ds = get_ds |
|
163 |
if ds |
|
164 |
relocate_spec_params[:datastore] = ds |
|
165 |
relocate_spec_params[:diskMoveType] = :moveChildMostDiskBacking |
|
206 |
if ds_ref.nil? |
|
207 |
raise "target_ds_ref must be defined on this object." |
|
208 |
end |
|
166 | 209 |
end |
167 | 210 |
|
168 |
relocate_spec = RbVmomi::VIM.VirtualMachineRelocateSpec(
|
|
169 |
relocate_spec_params)
|
|
211 |
VCenterDriver::Datastore.new_from_ref(vi_client, ds_ref)
|
|
212 |
end
|
|
170 | 213 |
|
171 |
# Running flag - prevents spurious poweroff states in the VM |
|
172 |
running_flag = [{ :key => "opennebula.vm.running", :value => "no"}] |
|
214 |
# @return String vcenter name |
|
215 |
def get_vcenter_name |
|
216 |
vm_prefix = @host['TEMPLATE/VM_PREFIX'] |
|
217 |
vm_prefix = VM_PREFIX_DEFAULT if vm_prefix.nil? || vm_prefix.empty? |
|
218 |
vm_prefix.gsub!("$i", one_item['ID']) |
|
173 | 219 |
|
174 |
running_flag_spec = RbVmomi::VIM.VirtualMachineConfigSpec(
|
|
175 |
{:extraConfig => running_flag})
|
|
220 |
vm_prefix + one_item['NAME']
|
|
221 |
end
|
|
176 | 222 |
|
177 |
clone_parameters = { |
|
178 |
:location => relocate_spec, |
|
179 |
:powerOn => false, |
|
180 |
:template => false, |
|
181 |
:config => running_flag_spec |
|
182 |
} |
|
223 |
############################################################################ |
|
224 |
# Crate and reconfigure VM related methods |
|
225 |
############################################################################ |
|
183 | 226 |
|
184 |
cs = get_customization |
|
185 |
clone_parameters[:customization] = cs if cs |
|
227 |
# When creating a new these instance variables must be set beforehand |
|
228 |
# @vi_client |
|
229 |
# @one_item |
|
230 |
# @host |
|
231 |
# |
|
232 |
# This function creates a new VM from the @one_item XML and returns the |
|
233 |
# VMware ref |
|
234 |
def clone_vm |
|
235 |
vcenter_name = get_vcenter_name |
|
186 | 236 |
|
187 |
clone_spec = RbVmomi::VIM.VirtualMachineCloneSpec(clone_parameters) |
|
237 |
vc_template_ref = one_item['USER_TEMPLATE/VCENTER_TEMPLATE_REF'] |
|
238 |
vc_template = RbVmomi::VIM::VirtualMachine(vi_client.vim, vc_template_ref) |
|
188 | 239 |
|
189 |
# TODO storpod (L2575 vcenter_driver.rb)
|
|
240 |
clone_spec = RbVmomi::VIM.VirtualMachineCloneSpec(spec_hash_clone)
|
|
190 | 241 |
|
242 |
vm = nil |
|
191 | 243 |
begin |
192 | 244 |
vm = vc_template.CloneVM_Task( |
193 | 245 |
:folder => vc_template.parent, |
... | ... | |
217 | 269 |
|
218 | 270 |
# @item is populated |
219 | 271 |
|
220 |
@item = item
|
|
272 |
@item = vm
|
|
221 | 273 |
|
222 | 274 |
reconfigure |
223 | 275 |
poweron |
... | ... | |
226 | 278 |
return @item._ref |
227 | 279 |
end |
228 | 280 |
|
229 |
############################################################################ |
|
230 |
############################################################################ |
|
231 |
# these have @item populated |
|
232 |
|
|
233 |
|
|
234 |
# spec_hash |
|
235 |
# :device_change |
|
236 |
# :numCPUs |
|
237 |
# :memoryMB |
|
238 |
# :extraconfig |
|
239 |
# :vmid |
|
240 |
# :context |
|
241 |
# :vnc |
|
242 |
# :opennebula.hotplugged_nics |
|
281 |
|
|
282 |
# @return clone parameters spec hash |
|
283 |
def spec_hash_clone |
|
284 |
# Relocate spec |
|
285 |
relocate_spec_params = {} |
|
286 |
|
|
287 |
relocate_spec_params[:pool] = get_rp |
|
288 |
|
|
289 |
ds = get_ds |
|
290 |
|
|
291 |
if ds |
|
292 |
relocate_spec_params[:datastore] = ds |
|
293 |
relocate_spec_params[:diskMoveType] = :moveChildMostDiskBacking |
|
294 |
end |
|
295 |
|
|
296 |
# TODO storpod (L2575 vcenter_driver.rb) |
|
297 |
|
|
298 |
relocate_spec = RbVmomi::VIM.VirtualMachineRelocateSpec( |
|
299 |
relocate_spec_params) |
|
300 |
|
|
301 |
# Running flag - prevents spurious poweroff states in the VM |
|
302 |
running_flag = [{ :key => "opennebula.vm.running", :value => "no"}] |
|
303 |
|
|
304 |
running_flag_spec = RbVmomi::VIM.VirtualMachineConfigSpec( |
|
305 |
{ :extraConfig => running_flag } |
|
306 |
) |
|
307 |
|
|
308 |
clone_parameters = { |
|
309 |
:location => relocate_spec, |
|
310 |
:powerOn => false, |
|
311 |
:template => false, |
|
312 |
:config => running_flag_spec |
|
313 |
} |
|
314 |
|
|
315 |
cs = get_customization |
|
316 |
clone_parameters[:customization] = cs if cs |
|
317 |
|
|
318 |
clone_parameters |
|
319 |
end |
|
320 |
|
|
321 |
|
|
243 | 322 |
def reconfigure |
244 |
spec_hash = {}
|
|
245 |
extraconfig = []
|
|
323 |
extraconfig = []
|
|
324 |
device_change = []
|
|
246 | 325 |
|
247 | 326 |
# get vmid |
248 |
extraconfig << spec_hash_vmid
|
|
327 |
extraconfig += extraconfig_vmid
|
|
249 | 328 |
|
250 | 329 |
# get token |
251 |
extraconfig << spec_hash_context
|
|
330 |
extraconfig += extraconfig_context
|
|
252 | 331 |
|
253 | 332 |
# vnc configuration (for config_array hash) |
254 |
extraconfig << spec_hash_vnc |
|
255 |
|
|
256 |
# extraconfig |
|
257 |
spec_hash.merge({:extraConfig => extraconfig}) |
|
333 |
extraconfig += extraconfig_vnc |
|
258 | 334 |
|
259 | 335 |
# device_change hash (nics) |
260 |
spec_hash.merge(spec_hash_nics)
|
|
336 |
device_change += device_change_nics
|
|
261 | 337 |
|
262 | 338 |
# device_change hash (disks) |
263 |
spec_hash.merge(spec_hash_disks)
|
|
339 |
device_change += device_change_disks
|
|
264 | 340 |
|
265 |
binding.pry |
|
266 |
# |
|
341 |
num_cpus = one_item["TEMPLATE/VCPU"] || 1 |
|
342 |
|
|
343 |
spec_hash = { |
|
344 |
:numCPUs => num_cpus.to_i, |
|
345 |
:memoryMB => one_item["TEMPLATE/MEMORY"], |
|
346 |
:extraConfig => extraconfig |
|
347 |
} |
|
348 |
|
|
349 |
spec_hash[:deviceChange] = device_change if !device_change.empty? |
|
350 |
|
|
351 |
spec = RbVmomi::VIM.VirtualMachineConfigSpec(spec_hash) |
|
352 |
|
|
353 |
self.item.ReconfigVM_Task(:spec => spec).wait_for_completion |
|
267 | 354 |
end |
268 | 355 |
|
269 |
def spec_hash_vmid |
|
270 |
{ :key => "opennebula.vm.id", :value => one_item['ID'] } |
|
356 |
def extraconfig_vmid |
|
357 |
[ |
|
358 |
{ :key => "opennebula.vm.id", :value => one_item['ID'] } |
|
359 |
] |
|
271 | 360 |
end |
272 | 361 |
|
273 |
def spec_hash_context
|
|
362 |
def extraconfig_context
|
|
274 | 363 |
# TODO: migrator to 5.4 (create token.sh) |
275 | 364 |
context_text = "# Context variables generated by OpenNebula\n" |
276 | 365 |
one_item.each('TEMPLATE/CONTEXT/*') do |context_element| |
... | ... | |
293 | 382 |
] |
294 | 383 |
end |
295 | 384 |
|
296 |
def spec_hash_vnc
|
|
385 |
def extraconfig_vnc
|
|
297 | 386 |
vnc_port = one_item["TEMPLATE/GRAPHICS/PORT"] |
298 | 387 |
vnc_listen = one_item["TEMPLATE/GRAPHICS/LISTEN"] || "0.0.0.0" |
299 | 388 |
vnc_keymap = one_item["TEMPLATE/GRAPHICS/KEYMAP"] |
... | ... | |
308 | 397 |
conf |
309 | 398 |
end |
310 | 399 |
|
311 |
def spec_hash_nics |
|
400 |
def device_change_nics |
|
401 |
# Final list of changes to be applied in vCenter |
|
402 |
device_change = [] |
|
403 |
|
|
404 |
# List of interfaces from the OpenNebula template |
|
312 | 405 |
nics = [] |
313 | 406 |
one_item.each("TEMPLATE/NIC") { |nic| nics << nic } |
314 | 407 |
|
408 |
# Remove detached nics in poweroff |
|
315 | 409 |
if !is_new? |
316 |
# TODO: review
|
|
317 |
nic_array = [] |
|
410 |
# To be included in device_change
|
|
411 |
detach_nic_array = []
|
|
318 | 412 |
|
319 | 413 |
# Get MACs from NICs inside VM template |
320 | 414 |
one_mac_addresses = [] |
415 |
nics.each do |nic| |
|
416 |
one_mac_addresses << nic["MAC"] |
|
417 |
end rescue nil |
|
321 | 418 |
|
322 |
nics.each{|nic| |
|
323 |
one_mac_addresses << nic.elements["MAC"].text |
|
324 |
} rescue nil |
|
325 |
|
|
326 |
# B4897 - Get mac of NICs that were hot-plugged from vCenter extraConfig |
|
419 |
# B4897 - Get mac of NICs that were hot-plugged from vCenter |
|
420 |
# extraConfig |
|
421 |
# Get opennebula.hotplugged_nics attribute from the vCenter object |
|
327 | 422 |
hotplugged_nics = [] |
328 |
extraconfig_nics = vm.config.extraConfig.select do |val|
|
|
423 |
extraconfig_nics = self["config.extraConfig"].select do |val|
|
|
329 | 424 |
val[:key] == "opennebula.hotplugged_nics" |
330 | 425 |
end |
331 | 426 |
|
... | ... | |
333 | 428 |
hotplugged_nics = extraconfig_nics[0][:value].to_s.split(";") |
334 | 429 |
end |
335 | 430 |
|
336 |
vm.config.hardware.device.each{ |dv|
|
|
431 |
self["config.hardware.device"].each do |dv|
|
|
337 | 432 |
if is_nic?(dv) |
338 |
nics.each{|nic| |
|
339 |
if nic.elements["MAC"].text == dv.macAddress |
|
340 |
nics.delete(nic) |
|
341 |
end |
|
342 |
} rescue nil |
|
343 |
|
|
344 |
# B4897 - Remove detached NICs from vCenter that were unplugged in POWEROFF |
|
345 |
if !one_mac_addresses.include?(dv.macAddress) && hotplugged_nics.include?(dv.macAddress) |
|
346 |
nic_array << { :operation => :remove, :device => dv} |
|
347 |
hotplugged_nics.delete(dv.macAddress) |
|
348 |
config_array << { |
|
349 |
:key => 'opennebula.hotplugged_nics', |
|
350 |
:value => hotplugged_nics.join(";") |
|
351 |
} |
|
433 |
# nics array will contain the list of nics to be attached |
|
434 |
nics.each do |nic| |
|
435 |
if nic["MAC"] == dv.macAddress |
|
436 |
nics.delete(nic) |
|
437 |
end |
|
438 |
end |
|
439 |
|
|
440 |
# if the nic is in the list opennebula.hotplugged_nics and |
|
441 |
# not in the list of the OpenNebula NICs we can remove it. |
|
442 |
# B4897 - Remove detached NICs from vCenter that were unplugged |
|
443 |
# in POWEROFF |
|
444 |
if !one_mac_addresses.include?(dv.macAddress) && |
|
445 |
hotplugged_nics.include?(dv.macAddress) |
|
446 |
|
|
447 |
detach_nic_array << { |
|
448 |
:operation => :remove, |
|
449 |
:device => dv |
|
450 |
} |
|
451 |
|
|
452 |
hotplugged_nics.delete(dv.macAddress) |
|
453 |
config_array << { |
|
454 |
:key => 'opennebula.hotplugged_nics', |
|
455 |
:value => hotplugged_nics.join(";") |
|
456 |
} |
|
457 |
end |
|
458 |
end |
|
459 |
end |
|
460 |
|
|
461 |
device_change += detach_nic_array |
|
462 |
end |
|
463 |
|
|
464 |
return if nics.empty? |
|
465 |
|
|
466 |
# Attach new nics (nics now contains only the interfaces not present |
|
467 |
# in the VM in vCenter) |
|
468 |
attach_nic_array = [] |
|
469 |
nics.each do |nic| |
|
470 |
attach_nic_array << calculate_add_nic_spec(nic) |
|
471 |
end |
|
472 |
|
|
473 |
attach_nic_array |
|
474 |
end |
|
475 |
|
|
476 |
# Returns an array of actions to be included in :deviceChange |
|
477 |
def calculate_add_nic_spec(nic) |
|
478 |
mac = nic["MAC"] |
|
479 |
bridge = nic["BRIDGE"] |
|
480 |
model = nic["MODEL"] |
|
481 |
backing = nil |
|
482 |
|
|
483 |
limit_in = nic["INBOUND_PEAK_BW"] |
|
484 |
limit_out = nic["OUTBOUND_PEAK_BW"] |
|
485 |
limit = nil |
|
486 |
|
|
487 |
if limit_in && limit_out |
|
488 |
limit=([limit_in.to_i, limit_out.to_i].min / 1024) * 8 |
|
489 |
end |
|
490 |
|
|
491 |
rsrv_in = nic["INBOUND_AVG_BW"] |
|
492 |
rsrv_out = nic["OUTBOUND_AVG_BW"] |
|
493 |
rsrv = nil |
|
494 |
|
|
495 |
if rsrv_in || rsrv_out |
|
496 |
rsrv=([rsrv_in.to_i, rsrv_out.to_i].min / 1024) * 8 |
|
497 |
end |
|
498 |
|
|
499 |
network = self["runtime.host.network"].select do |n| |
|
500 |
n.name == bridge |
|
501 |
end |
|
502 |
|
|
503 |
if network.empty? |
|
504 |
raise "Network #{bridge} not found in host #{self['runtime.host.name']}" |
|
505 |
else |
|
506 |
network = network.first |
|
507 |
end |
|
508 |
|
|
509 |
card_num = 1 # start in one, we want the next avaliable id |
|
510 |
|
|
511 |
self["config.hardware.device"].each do |dv| |
|
512 |
card_num += 1 if is_nic?(dv) |
|
513 |
end |
|
514 |
|
|
515 |
nic_card = case model |
|
516 |
when "virtuale1000", "e1000" |
|
517 |
RbVmomi::VIM::VirtualE1000 |
|
518 |
when "virtuale1000e", "e1000e" |
|
519 |
RbVmomi::VIM::VirtualE1000e |
|
520 |
when "virtualpcnet32", "pcnet32" |
|
521 |
RbVmomi::VIM::VirtualPCNet32 |
|
522 |
when "virtualsriovethernetcard", "sriovethernetcard" |
|
523 |
RbVmomi::VIM::VirtualSriovEthernetCard |
|
524 |
when "virtualvmxnetm", "vmxnetm" |
|
525 |
RbVmomi::VIM::VirtualVmxnetm |
|
526 |
when "virtualvmxnet2", "vmnet2" |
|
527 |
RbVmomi::VIM::VirtualVmxnet2 |
|
528 |
when "virtualvmxnet3", "vmxnet3" |
|
529 |
RbVmomi::VIM::VirtualVmxnet3 |
|
530 |
else # If none matches, use VirtualE1000 |
|
531 |
RbVmomi::VIM::VirtualE1000 |
|
352 | 532 |
end |
533 |
|
|
534 |
if network.class == RbVmomi::VIM::Network |
|
535 |
backing = RbVmomi::VIM.VirtualEthernetCardNetworkBackingInfo( |
|
536 |
:deviceName => bridge, |
|
537 |
:network => network) |
|
538 |
else |
|
539 |
port = RbVmomi::VIM::DistributedVirtualSwitchPortConnection( |
|
540 |
:switchUuid => |
|
541 |
network.config.distributedVirtualSwitch.uuid, |
|
542 |
:portgroupKey => network.key) |
|
543 |
backing = |
|
544 |
RbVmomi::VIM.VirtualEthernetCardDistributedVirtualPortBackingInfo( |
|
545 |
:port => port) |
|
546 |
end |
|
547 |
|
|
548 |
card_spec = { |
|
549 |
:key => 0, |
|
550 |
:deviceInfo => { |
|
551 |
:label => "net" + card_num.to_s, |
|
552 |
:summary => bridge |
|
553 |
}, |
|
554 |
:backing => backing, |
|
555 |
:addressType => mac ? 'manual' : 'generated', |
|
556 |
:macAddress => mac |
|
557 |
} |
|
558 |
|
|
559 |
if (limit || rsrv) && (limit > 0) |
|
560 |
ra_spec = {} |
|
561 |
rsrv = limit if rsrv > limit |
|
562 |
ra_spec[:limit] = limit if limit |
|
563 |
ra_spec[:reservation] = rsrv if rsrv |
|
564 |
ra_spec[:share] = RbVmomi::VIM.SharesInfo({ |
|
565 |
:level => RbVmomi::VIM.SharesLevel("normal"), |
|
566 |
:shares => 0 |
|
567 |
}) |
|
568 |
card_spec[:resourceAllocation] = |
|
569 |
RbVmomi::VIM.VirtualEthernetCardResourceAllocation(ra_spec) |
|
570 |
end |
|
571 |
|
|
572 |
{ |
|
573 |
:operation => :add, |
|
574 |
:device => nic_card.new(card_spec) |
|
575 |
} |
|
576 |
end |
|
577 |
|
|
578 |
# Checks if a RbVmomi::VIM::VirtualDevice is a network interface |
|
579 |
def is_nic?(device) |
|
580 |
!device.class.ancestors.index(RbVmomi::VIM::VirtualEthernetCard).nil? |
|
581 |
end |
|
582 |
|
|
583 |
def device_change_disks |
|
584 |
disks = [] |
|
585 |
one_item.each("TEMPLATE/DISK") { |disk| disks << disk } |
|
586 |
|
|
587 |
if !is_new? |
|
588 |
self["config.hardware.device"].each do |d| |
|
589 |
if is_disk_or_cdrom?(d) |
|
590 |
disks.each do |disk| |
|
591 |
if d.backing.respond_to?(:fileName) && |
|
592 |
get_img_name(disk) == d.backing.fileName |
|
593 |
|
|
594 |
disks.delete(disk) |
|
595 |
end |
|
596 |
end |
|
353 | 597 |
end |
598 |
end |
|
599 |
end |
|
600 |
|
|
601 |
return if disks.nil? |
|
602 |
|
|
603 |
position = 0 |
|
604 |
attach_disk_array = [] |
|
605 |
disks.each do |disk| |
|
606 |
attach_disk_array << calculate_add_disk_spec(disk) |
|
607 |
position += 1 |
|
608 |
end |
|
609 |
|
|
610 |
attach_disk_array |
|
611 |
end |
|
612 |
|
|
613 |
def calculate_add_disk_spec(disk, position=0) |
|
614 |
img_name = get_img_name(disk) |
|
615 |
ds = get_effective_ds(disk) |
|
616 |
|
|
617 |
ds_name = ds['name'] |
|
618 |
type = disk["TYPE"] |
|
619 |
|
|
620 |
# TODO: size_kb = 0 ?? |
|
621 |
size_kb = 0 |
|
622 |
|
|
623 |
controller, new_number = find_free_controller(position) |
|
624 |
|
|
625 |
if type == "CDROM" |
|
626 |
vmdk_backing = RbVmomi::VIM::VirtualCdromIsoBackingInfo( |
|
627 |
:datastore => ds.item, |
|
628 |
:fileName => "[#{ds_name}] #{img_name}" |
|
629 |
) |
|
630 |
|
|
631 |
cd = self['config.hardware.device'].select do |hw| |
|
632 |
hw.class == RbVmomi::VIM::VirtualCdrom |
|
633 |
end.first |
|
634 |
|
|
635 |
# If no CDROM drive present, we need to add it |
|
636 |
if !cd |
|
637 |
controller, _ = find_free_controller |
|
638 |
|
|
639 |
device = RbVmomi::VIM::VirtualCdrom( |
|
640 |
:backing => vmdk_backing, |
|
641 |
:key => -1, |
|
642 |
:controllerKey => 15000, |
|
643 |
:unitNumber => 0, |
|
644 |
|
|
645 |
:connectable => RbVmomi::VIM::VirtualDeviceConnectInfo( |
|
646 |
:startConnected => true, |
|
647 |
:connected => true, |
|
648 |
:allowGuestControl => true |
|
649 |
) |
|
650 |
) |
|
651 |
|
|
652 |
return { |
|
653 |
:operation => :add, |
|
654 |
:device => device |
|
655 |
} |
|
656 |
else |
|
657 |
device = RbVmomi::VIM::VirtualCdrom( |
|
658 |
backing: vmdk_backing, |
|
659 |
key: cd.key, |
|
660 |
controllerKey: cd.controllerKey, |
|
661 |
connectable: RbVmomi::VIM::VirtualDeviceConnectInfo( |
|
662 |
startConnected: true, |
|
663 |
connected: true, |
|
664 |
allowGuestControl: true |
|
665 |
) |
|
666 |
) |
|
667 |
|
|
668 |
return { |
|
669 |
:operation => :edit, |
|
670 |
:device => device |
|
671 |
} |
|
672 |
end |
|
673 |
else |
|
674 |
# TYPE is regular disk (not CDROM) |
|
675 |
vmdk_backing = RbVmomi::VIM::VirtualDiskFlatVer2BackingInfo( |
|
676 |
:datastore => ds.item, |
|
677 |
:diskMode => 'persistent', |
|
678 |
:fileName => "[#{ds_name}] #{img_name}" |
|
679 |
) |
|
680 |
|
|
681 |
device = RbVmomi::VIM::VirtualDisk( |
|
682 |
:backing => vmdk_backing, |
|
683 |
:capacityInKB => size_kb, |
|
684 |
:controllerKey => controller.key, |
|
685 |
:key => -1, |
|
686 |
:unitNumber => new_number |
|
687 |
) |
|
688 |
|
|
689 |
{ |
|
690 |
:operation => :add, |
|
691 |
:device => device |
|
354 | 692 |
} |
693 |
end |
|
694 |
end |
|
695 |
|
|
696 |
# Checks if a RbVmomi::VIM::VirtualDevice is a disk |
|
697 |
def is_disk_or_cdrom?(device) |
|
698 |
is_disk = !(device.class.ancestors.index(RbVmomi::VIM::VirtualDisk)).nil? |
|
699 |
is_cdrom = !(device.class.ancestors.index(RbVmomi::VIM::VirtualCdrom)).nil? |
|
700 |
is_disk || is_cdrom |
|
701 |
end |
|
355 | 702 |
|
356 |
device_change += nic_array |
|
703 |
def find_free_controller(position=0) |
|
704 |
free_scsi_controllers = [] |
|
705 |
available_controller = nil |
|
706 |
scsi_schema = {} |
|
707 |
|
|
708 |
used_numbers = [] |
|
709 |
available_numbers = [] |
|
710 |
|
|
711 |
self["config.hardware.device"].each do |dev| |
|
712 |
if dev.is_a? RbVmomi::VIM::VirtualSCSIController |
|
713 |
if scsi_schema[dev.controllerKey].nil? |
|
714 |
scsi_schema[dev.key] = {} |
|
715 |
end |
|
716 |
|
|
717 |
used_numbers << dev.scsiCtlrUnitNumber |
|
718 |
scsi_schema[dev.key][:device] = dev |
|
719 |
end |
|
720 |
|
|
721 |
next if dev.class != RbVmomi::VIM::VirtualDisk |
|
722 |
used_numbers << dev.unitNumber |
|
723 |
end |
|
724 |
|
|
725 |
15.times do |scsi_id| |
|
726 |
available_numbers << scsi_id if used_numbers.grep(scsi_id).length <= 0 |
|
357 | 727 |
end |
358 | 728 |
|
359 |
return if nics.nil? |
|
729 |
scsi_schema.keys.each do |controller| |
|
730 |
free_scsi_controllers << scsi_schema[controller][:device].deviceInfo.label |
|
731 |
end |
|
732 |
|
|
733 |
if free_scsi_controllers.length > 0 |
|
734 |
available_controller_label = free_scsi_controllers[0] |
|
735 |
else |
|
736 |
add_new_scsi(scsi_schema) |
|
737 |
return find_free_controller |
|
738 |
end |
|
360 | 739 |
|
361 |
if nics
|
|
740 |
controller = nil
|
|
362 | 741 |
|
742 |
self['config.hardware.device'].each do |device| |
|
743 |
if device.deviceInfo.label == available_controller_label |
|
744 |
controller = device |
|
745 |
break |
|
746 |
end |
|
363 | 747 |
end |
748 |
|
|
749 |
new_unit_number = available_numbers.sort[position] |
|
750 |
|
|
751 |
return controller, new_unit_number |
|
364 | 752 |
end |
365 | 753 |
|
366 |
def spec_hash_disks |
|
367 |
if is_new? |
|
754 |
def add_new_scsi(scsi_schema) |
|
755 |
controller = nil |
|
756 |
|
|
757 |
if scsi_schema.keys.length >= 4 |
|
758 |
raise "Cannot add a new controller, maximum is 4." |
|
759 |
end |
|
760 |
|
|
761 |
if scsi_schema.keys.length == 0 |
|
762 |
scsi_key = 0 |
|
763 |
scsi_number = 0 |
|
764 |
else scsi_schema.keys.length < 4 |
|
765 |
scsi_key = scsi_schema.keys.sort[-1] + 1 |
|
766 |
scsi_number = scsi_schema[scsi_schema.keys.sort[-1]][:device].busNumber + 1 |
|
368 | 767 |
end |
768 |
|
|
769 |
controller_device = RbVmomi::VIM::VirtualLsiLogicController( |
|
770 |
:key => scsi_key, |
|
771 |
:busNumber => scsi_number, |
|
772 |
:sharedBus => :noSharing |
|
773 |
) |
|
774 |
|
|
775 |
device_config_spec = RbVmomi::VIM::VirtualDeviceConfigSpec( |
|
776 |
:device => controller_device, |
|
777 |
:operation => :add |
|
778 |
) |
|
779 |
|
|
780 |
vm_config_spec = RbVmomi::VIM::VirtualMachineConfigSpec( |
|
781 |
:deviceChange => [device_config_spec] |
|
782 |
) |
|
783 |
|
|
784 |
self.item.ReconfigVM_Task(:spec => vm_config_spec).wait_for_completion |
|
785 |
|
|
786 |
self["config.hardware.device"].each do |device| |
|
787 |
if device.class == RbVmomi::VIM::VirtualLsiLogicController && |
|
788 |
device.key == scsi_key |
|
789 |
|
|
790 |
controller = device.deviceInfo.label |
|
791 |
end |
|
792 |
end |
|
793 |
|
|
794 |
return controller |
|
369 | 795 |
end |
370 | 796 |
|
797 |
############################################################################ |
|
798 |
# actions |
|
799 |
############################################################################ |
|
800 |
|
|
371 | 801 |
def poweron |
802 |
self.item.PowerOnVM_Task.wait_for_completion |
|
372 | 803 |
end |
373 | 804 |
|
374 | 805 |
def set_running(state) |
375 |
end |
|
376 |
|
|
377 |
def one_item |
|
378 |
# TODO: fetch one_item if it doesn't exist |
|
379 |
@one_item |
|
380 |
end |
|
806 |
value = state ? "yes" : "no" |
|
381 | 807 |
|
382 |
def is_new? |
|
383 |
vm_id = vm['config.extraConfig'].select do |o| |
|
384 |
o.key == "opennebula.vm.id" |
|
385 |
end.first.value rescue nil |
|
808 |
config_array = [ |
|
809 |
{ :key => "opennebula.vm.running", :value => value } |
|
810 |
] |
|
811 |
spec = RbVmomi::VIM.VirtualMachineConfigSpec( |
|
812 |
{ :extraConfig => config_array } |
|
813 |
) |
|
386 | 814 |
|
387 |
!vm_id
|
|
815 |
self.item.ReconfigVM_Task(:spec => spec).wait_for_completion
|
|
388 | 816 |
end |
389 | 817 |
|
390 | 818 |
############################################################################ |
819 |
# monitoring |
|
391 | 820 |
############################################################################ |
392 | 821 |
|
393 | 822 |
# @param vm CachedItem (of RbVmomi::VIM::VirtualMachine) |
... | ... | |
600 | 1029 |
|
601 | 1030 |
end |
602 | 1031 |
|
603 |
######################################################################## |
|
604 | 1032 |
# Generates a OpenNebula IM Driver valid string with the monitor info |
605 |
######################################################################## |
|
606 | 1033 |
def info |
607 | 1034 |
return 'STATE=d' if @state == 'd' |
608 | 1035 |
|
... | ... | |
665 | 1092 |
} |
666 | 1093 |
end |
667 | 1094 |
|
668 |
######################################################################## |
|
669 | 1095 |
# Converts the VI string state to OpenNebula state convention |
670 | 1096 |
# Guest states are: |
671 | 1097 |
# - poweredOff The virtual machine is currently powered off. |
672 | 1098 |
# - poweredOn The virtual machine is currently powered on. |
673 | 1099 |
# - suspended The virtual machine is currently suspended. |
674 |
######################################################################## |
|
675 | 1100 |
def state_to_c(state) |
676 | 1101 |
case state |
677 | 1102 |
when 'poweredOn' |
... | ... | |
689 | 1114 |
def self.new_from_ref(vi_client, ref) |
690 | 1115 |
self.new(RbVmomi::VIM::VirtualMachine.new(vi_client.vim, ref)) |
691 | 1116 |
end |
1117 |
|
|
692 | 1118 |
end # class VirtualMachine |
693 | 1119 |
|
694 | 1120 |
end # module VCenterDriver |
Also available in: Unified diff