Statistics
| Branch: | Tag: | Revision:

one / src / vmm_mad / remotes / lib / vcenter_driver / virtual_machine.rb @ efcd7598

History | View | Annotate | Download (108 KB)

1
module VCenterDriver
2
require 'digest'
3
class VirtualMachineFolder
4
    attr_accessor :item, :items
5

    
6
    def initialize(item)
7
        @item = item
8
        @items = {}
9
    end
10

    
11
    ########################################################################
12
    # Builds a hash with Datastore-Ref / Datastore to be used as a cache
13
    # @return [Hash] in the form
14
    #   { ds_ref [Symbol] => Datastore object }
15
    ########################################################################
16
    def fetch!
17
        VIClient.get_entities(@item, "VirtualMachine").each do |item|
18
            item_name = item._ref
19
            @items[item_name.to_sym] = VirtualMachine.new(item)
20
        end
21
    end
22

    
23
    def fetch_templates!
24
        VIClient.get_entities(@item, "VirtualMachine").each do |item|
25
            if item.config.template
26
                item_name = item._ref
27
                @items[item_name.to_sym] = Template.new(item)
28
            end
29
        end
30
    end
31

    
32
    ########################################################################
33
    # Returns a Datastore. Uses the cache if available.
34
    # @param ref [Symbol] the vcenter ref
35
    # @return Datastore
36
    ########################################################################
37
    def get(ref)
38
        if !@items[ref.to_sym]
39
            rbvmomi_dc = RbVmomi::VIM::Datastore.new(@item._connection, ref)
40
            @items[ref.to_sym] = Datastore.new(rbvmomi_dc)
41
        end
42

    
43
        @items[ref.to_sym]
44
    end
45
end # class VirtualMachineFolder
46

    
47
class Template
48

    
49
    attr_accessor :item
50

    
51
    include Memoize
52

    
53
    def initialize(item=nil, vi_client=nil)
54
        @item = item
55
        @vi_client = vi_client
56
        @locking = true
57
    end
58

    
59
    # Locking function. Similar to flock
60
    def lock
61
        if @locking
62
           @locking_file = File.open("/tmp/vcenter-importer-lock","w")
63
           @locking_file.flock(File::LOCK_EX)
64
        end
65
    end
66

    
67
    # Unlock driver execution mutex
68
    def unlock
69
        if @locking
70
            @locking_file.close
71
        end
72
    end
73

    
74
    def get_dc
75
        item = @item
76

    
77
        while !item.instance_of? RbVmomi::VIM::Datacenter
78
            item = item.parent
79
            if item.nil?
80
                raise "Could not find the parent Datacenter"
81
            end
82
        end
83

    
84
        Datacenter.new(item)
85
    end
86

    
87
    def delete_template
88
        @item.Destroy_Task.wait_for_completion
89
    end
90

    
91
    def get_vcenter_instance_uuid
92
        @vi_client.vim.serviceContent.about.instanceUuid rescue nil
93
    end
94

    
95
    def create_template_copy(template_name)
96
        error = nil
97
        template_ref = nil
98

    
99
        template_name = "one-#{self['name']}" if template_name.empty?
100

    
101
        relocate_spec_params = {}
102
        relocate_spec_params[:pool] = get_rp
103
        relocate_spec = RbVmomi::VIM.VirtualMachineRelocateSpec(relocate_spec_params)
104

    
105
        clone_spec = RbVmomi::VIM.VirtualMachineCloneSpec({
106
            :location => relocate_spec,
107
            :powerOn  => false,
108
            :template => false
109
        })
110

    
111
        template = nil
112
        begin
113
            template = @item.CloneVM_Task(:folder => @item.parent,
114
                                          :name   => template_name,
115
                                          :spec   => clone_spec).wait_for_completion
116
            template_ref = template._ref
117
        rescue Exception => e
118
            if !e.message.start_with?('DuplicateName')
119
                error = "Could not create the template clone. Reason: #{e.message}"
120
                return error, nil
121
            end
122

    
123
            dc = get_dc
124
            vm_folder = dc.vm_folder
125
            vm_folder.fetch!
126
            vm = vm_folder.items
127
                    .select{|k,v| v.item.name == template_name}
128
                    .values.first.item rescue nil
129

    
130
            if vm
131
                begin
132
                    vm.Destroy_Task.wait_for_completion
133
                    template = @item.CloneVM_Task(:folder => @item.parent,
134
                                                  :name   => template_name,
135
                                                  :spec   => clone_spec).wait_for_completion
136
                    template_ref = template._ref
137
                rescue
138
                    error = "Could not delete the existing template, please remove it manually from vCenter. Reason: #{e.message}"
139
                end
140
            else
141
                error = "Could not create the template clone. Reason: #{e.message}"
142
            end
143
        end
144

    
145
        return error, template_ref
146
    end
147

    
148
    # Linked Clone over existing template
149
    def create_delta_disks
150

    
151
        begin
152
            disks = @item['config.hardware.device'].grep(RbVmomi::VIM::VirtualDisk)
153
            disk_without_snapshots = disks.select { |x| x.backing.parent.nil? }
154
        rescue
155
            error = "Cannot extract existing disks on template."
156
            use_linked_clones = false
157
            return error, use_linked_clones
158
        end
159

    
160
        if !disk_without_snapshots.empty?
161

    
162
            begin
163
                if self['config.template']
164
                    @item.MarkAsVirtualMachine(:pool => get_rp, :host => self['runtime.host'])
165
                end
166
            rescue Exception => e
167
                @item.MarkAsTemplate()
168
                error = "Cannot mark the template as a VirtualMachine. Not using linked clones. Reason: #{e.message}/#{e.backtrace}"
169
                use_linked_clones = false
170
                return error, use_linked_clones
171
            end
172

    
173
            begin
174
                spec = {}
175
                spec[:deviceChange] = []
176

    
177
                disk_without_snapshots.each do |disk|
178
                    remove_disk_spec = { :operation => :remove, :device => disk }
179
                    spec[:deviceChange] << remove_disk_spec
180

    
181
                    add_disk_spec = { :operation => :add,
182
                                    :fileOperation => :create,
183
                                    :device => disk.dup.tap { |x|
184
                                            x.backing = x.backing.dup
185
                                            x.backing.fileName = "[#{disk.backing.datastore.name}]"
186
                                            x.backing.parent = disk.backing
187
                                    }
188
                    }
189
                    spec[:deviceChange] << add_disk_spec
190
                end
191

    
192
                @item.ReconfigVM_Task(:spec => spec).wait_for_completion if !spec[:deviceChange].empty?
193
            rescue Exception => e
194
                error = "Cannot create the delta disks on top of the template. Reason: #{e.message}."
195
                use_linked_clones = false
196
                return error, use_linked_clones
197
            end
198

    
199
            begin
200
                @item.MarkAsTemplate()
201
            rescue
202
                error = "Cannot mark the VirtualMachine as a template. Not using linked clones."
203
                use_linked_clones = false
204
                return error, use_linked_clones
205
            end
206

    
207
            error = nil
208
            use_linked_clones = true
209
            return error, use_linked_clones
210
        else
211
            # Template already has delta disks
212
            error = nil
213
            use_linked_clones = true
214
            return error, use_linked_clones
215
        end
216
    end
217

    
218
    def import_vcenter_disks(vc_uuid, dpool, ipool, sunstone=false, template_id=nil)
219
        disk_info = ""
220
        error = ""
221
        sunstone_disk_info = []
222

    
223
        begin
224
            lock #Lock import operation, to avoid concurrent creation of images
225

    
226
            ##ccr_ref = self["runtime.host.parent._ref"]
227
            dc = get_dc
228
            dc_ref = dc.item._ref
229

    
230
            #Get disks and info required
231
            vc_disks = get_vcenter_disks
232

    
233
            # Track allocated images
234
            allocated_images = []
235

    
236
            vc_disks.each do |disk|
237
                datastore_found = VCenterDriver::Storage.get_one_image_ds_by_ref_and_dc(disk[:datastore]._ref,
238
                                                                                        dc_ref,
239
                                                                                        vc_uuid,
240
                                                                                        dpool)
241
                if datastore_found.nil?
242
                    error = "\n    ERROR: datastore #{disk[:datastore].name}: has to be imported first as an image datastore!\n"
243

    
244
                    #Rollback delete disk images
245
                    allocated_images.each do |i|
246
                        i.delete
247
                    end
248

    
249
                    break
250
                end
251

    
252
                image_import = VCenterDriver::Datastore.get_image_import_template(disk[:datastore].name,
253
                                                                                  disk[:path],
254
                                                                                  disk[:type],
255
                                                                                  disk[:prefix],
256
                                                                                  ipool,
257
                                                                                  template_id)
258
                #Image is already in the datastore
259
                if image_import[:one]
260
                    # This is the disk info
261
                    disk_tmp = ""
262
                    disk_tmp << "DISK=[\n"
263
                    disk_tmp << "IMAGE_ID=\"#{image_import[:one]["ID"]}\",\n"
264
                    disk_tmp << "OPENNEBULA_MANAGED=\"NO\"\n"
265
                    disk_tmp << "]\n"
266
                    if sunstone
267
                        sunstone_disk = {}
268
                        sunstone_disk[:type] = "EXISTING_DISK"
269
                        sunstone_disk[:image_tmpl] = disk_tmp
270
                        sunstone_disk_info << sunstone_disk
271
                    else
272
                        disk_info << disk_tmp
273
                    end
274

    
275
                elsif !image_import[:template].empty?
276

    
277
                    if sunstone
278
                        sunstone_disk = {}
279
                        sunstone_disk[:type] = "NEW_DISK"
280
                        sunstone_disk[:image_tmpl] = image_import[:template]
281
                        sunstone_disk[:ds_id] = datastore_found['ID'].to_i
282
                        sunstone_disk_info << sunstone_disk
283
                    else
284
                        # Then the image is created as it's not in the datastore
285
                        one_i = VCenterDriver::VIHelper.new_one_item(OpenNebula::Image)
286
                        allocated_images << one_i
287
                        rc = one_i.allocate(image_import[:template], datastore_found['ID'].to_i)
288

    
289
                        if OpenNebula.is_error?(rc)
290
                            error = "    Error creating disk from template: #{rc.message}\n"
291
                            break
292
                        end
293

    
294
                        #Add info for One template
295
                        one_i.info
296
                        disk_info << "DISK=[\n"
297
                        disk_info << "IMAGE_ID=\"#{one_i["ID"]}\",\n"
298
                        disk_info << "OPENNEBULA_MANAGED=\"NO\"\n"
299
                        disk_info << "]\n"
300
                    end
301
                end
302
            end
303

    
304
        rescue Exception => e
305
            error = "\n    There was an error trying to create an image for disk in vcenter template. Reason: #{e.message}\n#{e.backtrace}"
306
        ensure
307
            unlock
308
            if !error.empty? && allocated_images
309
                #Rollback delete disk images
310
                allocated_images.each do |i|
311
                    i.delete
312
                end
313
            end
314
        end
315

    
316
        return error, sunstone_disk_info, allocated_images if sunstone
317

    
318
        return error, disk_info, allocated_images if !sunstone
319

    
320
    end
321

    
322
    def import_vcenter_nics(vc_uuid, npool, hpool, vcenter_instance_name,
323
                            template_ref, wild, sunstone=false, vm_name=nil, vm_id=nil, dc_name=nil)
324
        nic_info = ""
325
        error = ""
326
        sunstone_nic_info = []
327

    
328
        begin
329
            lock #Lock import operation, to avoid concurrent creation of networks
330

    
331
            if !dc_name
332
                dc = get_dc
333
                dc_name = dc.item.name
334
                dc_ref  = dc.item._ref
335
            end
336

    
337
            ccr_ref  = self["runtime.host.parent._ref"]
338
            ccr_name = self["runtime.host.parent.name"]
339

    
340
            #Get disks and info required
341
            vc_nics = get_vcenter_nics
342

    
343
            # Track allocated networks for rollback
344
            allocated_networks = []
345

    
346
            # Track port groups duplicated in this VM
347
            duplicated_networks = []
348

    
349
            vc_nics.each do |nic|
350
                # Check if the network already exists
351
                network_found = VCenterDriver::Network.get_unmanaged_vnet_by_ref(nic[:net_ref],
352
                                                                                 template_ref,
353
                                                                                 vc_uuid,
354
                                                                                 npool)
355
                #Network is already in OpenNebula
356
                if network_found
357

    
358
                    # This is the existing nic info
359
                    nic_tmp = ""
360
                    nic_tmp << "NIC=[\n"
361
                    nic_tmp << "NETWORK_ID=\"#{network_found["ID"]}\",\n"
362
                    nic_tmp << "OPENNEBULA_MANAGED=\"NO\"\n"
363
                    nic_tmp << "]\n"
364

    
365
                    if sunstone
366
                        sunstone_nic = {}
367
                        sunstone_nic[:type] = "EXISTING_NIC"
368
                        sunstone_nic[:network_tmpl] = nic_tmp
369
                        sunstone_nic_info << sunstone_nic
370
                    else
371
                        nic_info << nic_tmp
372
                    end
373
                else
374
                    # Then the network has to be created as it's not in OpenNebula
375
                    one_vn = VCenterDriver::VIHelper.new_one_item(OpenNebula::VirtualNetwork)
376

    
377
                    # We're importing unmanaged nics
378
                    unmanaged = true
379

    
380
                    # Let's get the OpenNebula host associated to the cluster reference
381
                    one_host = VCenterDriver::VIHelper.find_by_ref(OpenNebula::HostPool,
382
                                                                  "TEMPLATE/VCENTER_CCR_REF",
383
                                                                   ccr_ref,
384
                                                                   vc_uuid,
385
                                                                   hpool)
386

    
387
                    # Let's get the CLUSTER_ID from the OpenNebula host
388
                    if !one_host || !one_host['CLUSTER_ID']
389
                        cluster_id = -1
390
                    else
391
                        cluster_id = one_host['CLUSTER_ID']
392
                    end
393

    
394
                    # We have to know if we're importing nics from a wild vm
395
                    # or from a template
396
                    if wild
397
                        unmanaged = "wild"
398
                    else
399
                        unmanaged = "template"
400
                    end
401

    
402
                    # Prepare the Virtual Network template
403
                    one_vnet = VCenterDriver::Network.to_one_template(nic[:net_name],
404
                                                                      nic[:net_ref],
405
                                                                      nic[:pg_type],
406
                                                                      ccr_ref,
407
                                                                      ccr_name,
408
                                                                      vc_uuid,
409
                                                                      vcenter_instance_name,
410
                                                                      dc_name,
411
                                                                      cluster_id,
412
                                                                      nil,
413
                                                                      unmanaged,
414
                                                                      template_ref,
415
                                                                      dc_ref,
416
                                                                      vm_name,
417
                                                                      vm_id)
418

    
419
                    # By default add an ethernet range to network size 255
420
                    ar_str = ""
421
                    ar_str << "AR=[\n"
422
                    ar_str << "TYPE=\"ETHER\",\n"
423
                    ar_str << "SIZE=\"255\"\n"
424
                    ar_str << "]\n"
425
                    one_vnet[:one] << ar_str
426

    
427
                    if sunstone
428
                        if !duplicated_networks.include?(nic[:net_name])
429
                            sunstone_nic = {}
430
                            sunstone_nic[:type] = "NEW_NIC"
431
                            sunstone_nic[:network_name] = nic[:net_name]
432
                            sunstone_nic[:network_tmpl] = one_vnet[:one]
433
                            sunstone_nic[:one_cluster_id] = cluster_id.to_i
434
                            sunstone_nic_info << sunstone_nic
435
                            duplicated_networks << nic[:net_name]
436
                        else
437
                            sunstone_nic = {}
438
                            sunstone_nic[:type] = "DUPLICATED_NIC"
439
                            sunstone_nic[:network_name] = nic[:net_name]
440
                            sunstone_nic_info << sunstone_nic
441
                        end
442
                    else
443
                        # Allocate the Virtual Network
444
                        allocated_networks << one_vn
445
                        rc = one_vn.allocate(one_vnet[:one], cluster_id.to_i)
446

    
447
                        if OpenNebula.is_error?(rc)
448
                            error = "\n    ERROR: Could not allocate virtual network due to #{rc.message}\n"
449
                            break
450
                        end
451

    
452
                        # Add info for One template
453
                        one_vn.info
454
                        nic_info << "NIC=[\n"
455
                        nic_info << "NETWORK_ID=\"#{one_vn["ID"]}\",\n"
456
                        nic_info << "OPENNEBULA_MANAGED=\"NO\"\n"
457
                        nic_info << "]\n"
458

    
459
                        # Refresh npool
460
                        npool.info_all
461
                    end
462
                end
463
            end
464

    
465
        rescue Exception => e
466
            error = "\n    There was an error trying to create a virtual network to repesent a vCenter network for a VM or VM Template. Reason: #{e.message}"
467
        ensure
468
            unlock
469
            #Rollback, delete virtual networks
470
            if !error.empty? && allocated_networks
471
                allocated_networks.each do |n|
472
                    n.delete
473
                end
474
            end
475
        end
476

    
477
        return error, nic_info, allocated_networks if !sunstone
478

    
479
        return error, sunstone_nic_info, allocated_networks if sunstone
480
    end
481

    
482
    def get_vcenter_disk_key(unit_number, controller_key)
483

    
484
        key = nil
485

    
486
        @item["config.hardware.device"].each do |device|
487
            disk = {}
488

    
489
            if is_disk_or_iso?(device)
490
                disk[:device]    = device
491
                if device.controllerKey == controller_key &&
492
                   device.unitNumber == unit_number
493

    
494
                   key = device.key
495
                   break
496
                end
497
            end
498
        end
499

    
500
        return key
501
    end
502

    
503
    def get_vcenter_disks
504

    
505
        disks = []
506
        ide_controlled  = []
507
        sata_controlled = []
508
        scsi_controlled = []
509

    
510
        @item["config.hardware.device"].each do |device|
511
            disk = {}
512

    
513
            if device.is_a? RbVmomi::VIM::VirtualIDEController
514
                ide_controlled.concat(device.device)
515
            end
516

    
517
            if device.is_a? RbVmomi::VIM::VirtualSATAController
518
                sata_controlled.concat(device.device)
519
            end
520

    
521
            if device.is_a? RbVmomi::VIM::VirtualSCSIController
522
                scsi_controlled.concat(device.device)
523
            end
524

    
525
            if is_disk_or_iso?(device)
526
                disk[:device]    = device
527
                disk[:datastore] = device.backing.datastore
528
                disk[:path]      = device.backing.fileName
529
                disk[:path_wo_ds]= disk[:path].sub(/^\[(.*?)\] /, "")
530
                disk[:type]      = is_disk?(device) ? "OS" : "CDROM"
531
                disk[:key]       = device.key
532
                disk[:prefix]    = "hd" if ide_controlled.include?(device.key)
533
                disk[:prefix]    = "sd" if scsi_controlled.include?(device.key)
534
                disk[:prefix]    = "sd" if sata_controlled.include?(device.key)
535
                disks << disk
536
            end
537
        end
538

    
539
        return disks
540
    end
541

    
542
    def get_vcenter_nics
543
        nics = []
544
        @item["config.hardware.device"].each do |device|
545
            nic = {}
546
            if is_nic?(device)
547
                begin
548
                    nic[:net_name]  = device.backing.network.name
549
                    nic[:net_ref]   = device.backing.network._ref
550
                    nic[:pg_type]   = VCenterDriver::Network.get_network_type(device)
551
                    nics << nic
552
                rescue
553
                end
554
            end
555
        end
556
        return nics
557
    end
558

    
559
    #  Checks if a RbVmomi::VIM::VirtualDevice is a disk or a cdrom
560
    def is_disk_or_cdrom?(device)
561
        is_disk  = !(device.class.ancestors.index(RbVmomi::VIM::VirtualDisk)).nil?
562
        is_cdrom = !(device.class.ancestors.index(RbVmomi::VIM::VirtualCdrom)).nil?
563
        is_disk || is_cdrom
564
    end
565

    
566
    #  Checks if a RbVmomi::VIM::VirtualDevice is a disk or an iso file
567
    def is_disk_or_iso?(device)
568
        is_disk  = !(device.class.ancestors.index(RbVmomi::VIM::VirtualDisk)).nil?
569
        is_iso = device.backing.is_a? RbVmomi::VIM::VirtualCdromIsoBackingInfo
570
        is_disk || is_iso
571
    end
572

    
573
    #  Checks if a RbVmomi::VIM::VirtualDevice is a disk
574
    def is_disk?(device)
575
        !(device.class.ancestors.index(RbVmomi::VIM::VirtualDisk)).nil?
576
    end
577

    
578
    #  Checks if a RbVmomi::VIM::VirtualDevice is a network interface
579
    def is_nic?(device)
580
        !device.class.ancestors.index(RbVmomi::VIM::VirtualEthernetCard).nil?
581
    end
582

    
583
    # @return RbVmomi::VIM::ResourcePool, first resource pool in cluster
584
    def get_rp
585
        self['runtime.host.parent.resourcePool']
586
    end
587

    
588
    def vm_to_one(vm_name)
589

    
590
        str = "NAME   = \"#{vm_name}\"\n"\
591
              "CPU    = \"#{@vm_info["config.hardware.numCPU"]}\"\n"\
592
              "vCPU   = \"#{@vm_info["config.hardware.numCPU"]}\"\n"\
593
              "MEMORY = \"#{@vm_info["config.hardware.memoryMB"]}\"\n"\
594
              "HYPERVISOR = \"vcenter\"\n"\
595
              "CONTEXT = [\n"\
596
              "    NETWORK = \"YES\",\n"\
597
              "    SSH_PUBLIC_KEY = \"$USER[SSH_PUBLIC_KEY]\"\n"\
598
              "]\n"\
599
              "VCENTER_INSTANCE_ID =\"#{@vm_info[:vc_uuid]}\"\n"\
600
              "VCENTER_CCR_REF =\"#{@vm_info[:cluster_ref]}\"\n"
601

    
602
        str << "IMPORT_VM_ID =\"#{self["_ref"]}\"\n"
603
        str << "IMPORT_STATE =\"#{@state}\"\n"
604

    
605
        vnc_port = nil
606
        keymap = nil
607

    
608
        @vm_info["config.extraConfig"].select do |xtra|
609
            if xtra[:key].downcase=="remotedisplay.vnc.port"
610
                vnc_port = xtra[:value]
611
            end
612

    
613
            if xtra[:key].downcase=="remotedisplay.vnc.keymap"
614
                keymap = xtra[:value]
615
            end
616
        end
617

    
618
        if !@vm_info["config.extraConfig"].empty?
619
            str << "GRAPHICS = [\n"\
620
                   "  TYPE     =\"vnc\",\n"
621
            str << "  PORT     =\"#{vnc_port}\",\n" if vnc_port
622
            str << "  KEYMAP   =\"#{keymap}\",\n" if keymap
623
            str << "  LISTEN   =\"0.0.0.0\"\n"
624
            str << "]\n"
625
        end
626

    
627
        if !@vm_info["config.annotation"] || @vm_info["config.annotation"].empty?
628
            str << "DESCRIPTION = \"vCenter Template imported by OpenNebula" \
629
                " from Cluster #{@vm_info["cluster_name"]}\"\n"
630
        else
631
            notes = @vm_info["config.annotation"].gsub("\\", "\\\\").gsub("\"", "\\\"")
632
            str << "DESCRIPTION = \"#{notes}\"\n"
633
        end
634

    
635
        case @vm_info["guest.guestFullName"]
636
            when /CentOS/i
637
                str << "LOGO=images/logos/centos.png\n"
638
            when /Debian/i
639
                str << "LOGO=images/logos/debian.png\n"
640
            when /Red Hat/i
641
                str << "LOGO=images/logos/redhat.png\n"
642
            when /Ubuntu/i
643
                str << "LOGO=images/logos/ubuntu.png\n"
644
            when /Windows XP/i
645
                str << "LOGO=images/logos/windowsxp.png\n"
646
            when /Windows/i
647
                str << "LOGO=images/logos/windows8.png\n"
648
            when /Linux/i
649
                str << "LOGO=images/logos/linux.png\n"
650
        end
651

    
652
        return str
653
    end
654

    
655
    def self.template_to_one(template, vc_uuid, ccr_ref, ccr_name, import_name, host_id)
656

    
657
        num_cpu, memory, annotation, guest_fullname = template.item.collect("config.hardware.numCPU","config.hardware.memoryMB","config.annotation","guest.guestFullName")
658

    
659
        str = "NAME   = \"#{import_name}\"\n"\
660
              "CPU    = \"#{num_cpu}\"\n"\
661
              "vCPU   = \"#{num_cpu}\"\n"\
662
              "MEMORY = \"#{memory}\"\n"\
663
              "HYPERVISOR = \"vcenter\"\n"\
664
              "CONTEXT = [\n"\
665
              "    NETWORK = \"YES\",\n"\
666
              "    SSH_PUBLIC_KEY = \"$USER[SSH_PUBLIC_KEY]\"\n"\
667
              "]\n"\
668
              "VCENTER_INSTANCE_ID =\"#{vc_uuid}\"\n"
669

    
670
        str << "VCENTER_TEMPLATE_REF =\"#{template["_ref"]}\"\n"
671
        str << "VCENTER_CCR_REF =\"#{ccr_ref}\"\n"
672

    
673
        str << "GRAPHICS = [\n"\
674
               "  TYPE     =\"vnc\",\n"
675
        str << "  LISTEN   =\"0.0.0.0\"\n"
676
        str << "]\n"
677

    
678
        if annotation.nil? || annotation.empty?
679
            str << "DESCRIPTION = \"vCenter Template imported by OpenNebula" \
680
                " from Cluster #{ccr_name}\"\n"
681
        else
682
            notes = annotation.gsub("\\", "\\\\").gsub("\"", "\\\"")
683
            str << "DESCRIPTION = \"#{notes}\"\n"
684
        end
685

    
686
        case guest_fullname
687
            when /CentOS/i
688
                str << "LOGO=images/logos/centos.png\n"
689
            when /Debian/i
690
                str << "LOGO=images/logos/debian.png\n"
691
            when /Red Hat/i
692
                str << "LOGO=images/logos/redhat.png\n"
693
            when /Ubuntu/i
694
                str << "LOGO=images/logos/ubuntu.png\n"
695
            when /Windows XP/i
696
                str << "LOGO=images/logos/windowsxp.png\n"
697
            when /Windows/i
698
                str << "LOGO=images/logos/windows8.png\n"
699
            when /Linux/i
700
                str << "LOGO=images/logos/linux.png\n"
701
        end
702

    
703
        return str
704
    end
705

    
706
    def self.get_xml_template(template, vcenter_uuid, vi_client, vcenter_instance_name=nil, dc_name=nil, rp_cache={})
707

    
708
        begin
709
            template_ref      = template['_ref']
710
            template_name     = template["name"]
711
            template_ccr      = template['runtime.host.parent']
712
            template_ccr_ref  = template_ccr._ref
713
            template_ccr_name = template_ccr.name
714

    
715
            # Set vcenter instance name
716
            vcenter_instance_name = vi_client.vim.host if !vcenter_instance_name
717

    
718
            # Get datacenter info
719
            if !dc_name
720
                dc = get_dc
721
                dc_name = dc.item.name
722
            end
723

    
724
            #Get resource pools and generate a list
725
            if !rp_cache[template_ccr_name]
726
                tmp_cluster = VCenterDriver::ClusterComputeResource.new_from_ref(template_ccr_ref, vi_client)
727
                rp_list = tmp_cluster.get_resource_pool_list
728
                rp = ""
729
                if !rp_list.empty?
730
                    rp_name_list = []
731
                    rp_list.each do |rp_hash|
732
                        rp_name_list << rp_hash[:name]
733
                    end
734
                    rp =  "O|list|Which resource pool you want this VM to run in? "
735
                    rp << "|#{rp_name_list.join(",")}" #List of RP
736
                    rp << "|#{rp_name_list.first}" #Default RP
737
                end
738
                rp_cache[template_ccr_name] = {}
739
                rp_cache[template_ccr_name][:rp] = rp
740
                rp_cache[template_ccr_name][:rp_list] = rp_list
741
            end
742
            rp      = rp_cache[template_ccr_name][:rp]
743
            rp_list = rp_cache[template_ccr_name][:rp_list]
744

    
745

    
746
            # Determine the location path for the template
747
            vcenter_template = VCenterDriver::VirtualMachine.new_from_ref(template_ref, vi_client)
748
            item = vcenter_template.item
749
            folders = []
750
            while !item.instance_of? RbVmomi::VIM::Datacenter
751
                item = item.parent
752
                if !item.instance_of? RbVmomi::VIM::Datacenter
753
                    folders << item.name if item.name != "vm"
754
                end
755
                raise "Could not find the templates parent location" if item.nil?
756
            end
757
            location = folders.reverse.join("/")
758
            location = "/" if location.empty?
759

    
760
            # Generate a crypto hash for the template name and take the first 12 chars
761
            sha256            = Digest::SHA256.new
762
            full_name         = "#{template_name} - #{template_ccr_name} [#{vcenter_instance_name} - #{dc_name}]_#{location}"
763
            template_hash     = sha256.hexdigest(full_name)[0..11]
764
            template_name     = template_name.tr("\u007F", "")
765
            template_ccr_name = template_ccr_name.tr("\u007F", "")
766
            import_name       = "#{template_name} - #{template_ccr_name} #{template_hash}"
767

    
768
            # Prepare the Hash that will be used by importers to display
769
            # the object being imported
770
            one_tmp = {}
771
            one_tmp[:name]                  = import_name
772
            one_tmp[:template_name]         = template_name
773
            one_tmp[:sunstone_template_name]= "#{template_name} [ Cluster: #{template_ccr_name} - Template location: #{location} ]"
774
            one_tmp[:template_hash]         = template_hash
775
            one_tmp[:template_location]     = location
776
            one_tmp[:vcenter_ccr_ref]       = template_ccr_ref
777
            one_tmp[:vcenter_ref]           = template_ref
778
            one_tmp[:vcenter_instance_uuid] = vcenter_uuid
779
            one_tmp[:cluster_name]          = template_ccr_name
780
            one_tmp[:rp]                    = rp
781
            one_tmp[:rp_list]               = rp_list
782
            one_tmp[:template]              = template
783
            one_tmp[:import_disks_and_nics] = true # By default we import disks and nics
784

    
785
            # Get the host ID of the OpenNebula host which represents the vCenter Cluster
786
            host_id = nil
787
            one_host = VCenterDriver::VIHelper.find_by_ref(OpenNebula::HostPool,
788
                                                           "TEMPLATE/VCENTER_CCR_REF",
789
                                                           template_ccr_ref,
790
                                                           vcenter_uuid)
791
            host_id    = one_host["ID"]
792
            cluster_id = one_host["CLUSTER_ID"]
793
            raise "Could not find the host's ID associated to template being imported" if !host_id
794

    
795
            # Get the OpenNebula's template hash
796
            one_tmp[:one] = template_to_one(template, vcenter_uuid, template_ccr_ref, template_ccr_name, import_name, host_id)
797
            return one_tmp
798
        rescue
799
            return nil
800
        end
801
    end
802

    
803
    # TODO check with uuid
804
    def self.new_from_ref(ref, vi_client)
805
        self.new(RbVmomi::VIM::VirtualMachine.new(vi_client.vim, ref), vi_client)
806
    end
807

    
808
end
809

    
810
class VirtualMachine < Template
811
    VM_PREFIX_DEFAULT = "one-$i-"
812

    
813
    POLL_ATTRIBUTE    = OpenNebula::VirtualMachine::Driver::POLL_ATTRIBUTE
814
    VM_STATE          = OpenNebula::VirtualMachine::Driver::VM_STATE
815

    
816
    VM_SHUTDOWN_TIMEOUT = 600 #10 minutes til poweroff hard
817

    
818
    attr_accessor :item
819

    
820
    attr_accessor :vm_info
821

    
822
    include Memoize
823

    
824
    def initialize(item=nil, vi_client=nil)
825
        @item = item
826
        @vi_client = vi_client
827
        @locking = true
828
        @vm_info = nil
829
    end
830

    
831
    ############################################################################
832
    ############################################################################
833

    
834
    # Attributes that must be defined when the VM does not exist in vCenter
835
    attr_accessor :vi_client
836

    
837
    # these have their own getter (if they aren't set, we can set them
838
    # dynamically)
839
    attr_writer :one_item
840
    attr_writer :host
841
    attr_writer :target_ds_ref
842

    
843
    ############################################################################
844
    ############################################################################
845

    
846
    # The OpenNebula VM
847
    # @return OpenNebula::VirtualMachine or XMLElement
848
    def one_item
849
        if !@one_item
850
            vm_id = get_vm_id
851

    
852
            raise "Unable to find vm_id." if vm_id.nil?
853

    
854
            @one_item = VIHelper.one_item(OpenNebula::VirtualMachine, vm_id)
855
        end
856

    
857
        @one_item
858
    end
859

    
860
    # The OpenNebula host
861
    # @return OpenNebula::Host or XMLElement
862
    def host
863
        if @host.nil?
864
            if one_item.nil?
865
                raise "'one_item' must be previously set to be able to " <<
866
                      "access the OpenNebula host."
867
            end
868

    
869
            host_id = one_item["HISTORY_RECORDS/HISTORY[last()]/HID"]
870
            raise "No valid host_id found." if host_id.nil?
871

    
872
            @host = VIHelper.one_item(OpenNebula::Host, host_id)
873
        end
874

    
875
        @host
876
    end
877

    
878
    # Target Datastore VMware reference getter
879
    # @return
880
    def target_ds_ref
881
        if @target_ds_ref.nil?
882
            if one_item.nil?
883
                raise "'one_item' must be previously set to be able to " <<
884
                      "access the target Datastore."
885
            end
886

    
887
            target_ds_id = one_item["HISTORY_RECORDS/HISTORY[last()]/DS_ID"]
888
            raise "No valid target_ds_id found." if target_ds_id.nil?
889

    
890
            target_ds = VCenterDriver::VIHelper.one_item(OpenNebula::Datastore,
891
                                                         target_ds_id)
892

    
893
            @target_ds_ref = target_ds['TEMPLATE/VCENTER_DS_REF']
894
        end
895

    
896
        @target_ds_ref
897
    end
898

    
899
    # Cached cluster
900
    # @return ClusterComputeResource
901
    def cluster
902
        if @cluster.nil?
903
            ccr_ref = host['TEMPLATE/VCENTER_CCR_REF']
904
            @cluster = ClusterComputeResource.new_from_ref(ccr_ref, vi_client)
905
        end
906

    
907
        @cluster
908
    end
909

    
910
    ############################################################################
911
    ############################################################################
912

    
913
    # @return Boolean whether the VM exists in vCenter
914
    def is_new?
915
        !get_vm_id
916
    end
917

    
918
    # @return String the vm_id stored in vCenter
919
    def get_vm_id
920
        vm_ref = self['_ref']
921
        return nil if !vm_ref
922

    
923
        vc_uuid = get_vcenter_instance_uuid
924

    
925
        one_vm = VCenterDriver::VIHelper.find_by_ref(OpenNebula::VirtualMachinePool,
926
                                                     "DEPLOY_ID",
927
                                                     vm_ref,
928
                                                     vc_uuid)
929
        return nil if !one_vm
930

    
931
        return one_vm["ID"]
932
    end
933

    
934
    def get_vcenter_instance_uuid
935
        @vi_client.vim.serviceContent.about.instanceUuid
936
    end
937

    
938
    def get_unmanaged_keys
939
        unmanaged_keys = {}
940
        @item.config.extraConfig.each do |val|
941
             if val[:key].include?("opennebula.disk")
942
                 unmanaged_keys[val[:key]] = val[:value]
943
             end
944
        end
945
        return unmanaged_keys
946
    end
947

    
948
    ############################################################################
949
    # Getters
950
    ############################################################################
951

    
952
    # @return RbVmomi::VIM::ResourcePool
953
    def get_rp
954

    
955
        req_rp = one_item['VCENTER_RESOURCE_POOL'] ||
956
                 one_item['USER_TEMPLATE/VCENTER_RESOURCE_POOL']
957

    
958
        #Get ref for req_rp
959
        rp_list    = cluster.get_resource_pool_list
960
        req_rp_ref = rp_list.select { |rp| rp[:name] == req_rp }.first[:ref] rescue nil
961

    
962
        if vi_client.rp_confined?
963
            if req_rp_ref && req_rp_ref != vi_client.rp._ref
964
                raise "Available resource pool [#{vi_client.rp.name}] in host"\
965
                      " does not match requested resource pool"\
966
                      " [#{req_rp}]"
967
            end
968

    
969
            return vi_client.rp
970
        else
971
            if req_rp_ref
972
                rps = cluster.resource_pools.select{|r| r._ref == req_rp_ref }
973

    
974
                if rps.empty?
975
                    raise "No matching resource pool found (#{req_rp})."
976
                else
977
                    return rps.first
978
                end
979
            else
980
                return cluster['resourcePool']
981
            end
982
        end
983
    end
984

    
985
    # @return RbVmomi::VIM::Datastore or nil
986
    def get_ds
987
        ##req_ds = one_item['USER_TEMPLATE/VCENTER_DS_REF']
988
        current_ds_id  = one_item["HISTORY_RECORDS/HISTORY[last()]/DS_ID"]
989
        current_ds     = VCenterDriver::VIHelper.one_item(OpenNebula::Datastore, current_ds_id)
990
        current_ds_ref = current_ds['TEMPLATE/VCENTER_DS_REF']
991

    
992
        if current_ds_ref
993
            dc = cluster.get_dc
994

    
995
            ds_folder = dc.datastore_folder
996
            ds = ds_folder.get(current_ds_ref)
997
            ds_item = ds.item rescue nil
998

    
999
            return ds_item
1000
        else
1001
            return nil
1002
        end
1003
    end
1004

    
1005
    # StorageResouceManager reference
1006
    def get_sm
1007
        self['_connection.serviceContent.storageResourceManager']
1008
    end
1009

    
1010
    # @return Customization or nil
1011
    def get_customization
1012
        xpath = "USER_TEMPLATE/VCENTER_CUSTOMIZATION_SPEC"
1013
        customization_spec = one_item[xpath]
1014

    
1015
        if customization_spec.nil?
1016
            return nil
1017
        end
1018

    
1019
        begin
1020
            custom_spec = vi_client.vim
1021
                            .serviceContent
1022
                            .customizationSpecManager
1023
                            .GetCustomizationSpec(:name => customization.text)
1024

    
1025
            if custom_spec && (spec = custom_spec.spec)
1026
                return spec
1027
            else
1028
                raise "Error getting customization spec"
1029
            end
1030
        rescue
1031
            raise "Customization spec '#{customization.text}' not found"
1032
        end
1033
    end
1034

    
1035
    # @return VCenterDriver::Datastore datastore where the disk will live under
1036
    def get_effective_ds(disk)
1037
        if disk["PERSISTENT"] == "YES"
1038
            ds_ref = disk["VCENTER_DS_REF"]
1039
        else
1040
            ds_ref = target_ds_ref
1041

    
1042
            if ds_ref.nil?
1043
                raise "target_ds_ref must be defined on this object."
1044
            end
1045
        end
1046

    
1047
        VCenterDriver::Storage.new_from_ref(ds_ref, vi_client)
1048
    end
1049

    
1050
    # @return String vcenter name
1051
    def get_vcenter_name
1052
        vm_prefix = host['TEMPLATE/VM_PREFIX']
1053
        vm_prefix = VM_PREFIX_DEFAULT if vm_prefix.nil? || vm_prefix.empty?
1054
        vm_prefix.gsub!("$i", one_item['ID'])
1055

    
1056
        vm_prefix + one_item['NAME']
1057
    end
1058

    
1059
    ############################################################################
1060
    # Create and reconfigure VM related methods
1061
    ############################################################################
1062

    
1063
    # This function creates a new VM from the @one_item XML and returns the
1064
    # VMware ref
1065
    # @param one_item OpenNebula::VirtualMachine
1066
    # @param vi_client VCenterDriver::VIClient
1067
    # @return String vmware ref
1068
    def clone_vm(one_item, vi_client)
1069
        @one_item = one_item
1070
        @vi_client = vi_client
1071

    
1072
        vcenter_name = get_vcenter_name
1073

    
1074
        vc_template_ref = one_item['USER_TEMPLATE/VCENTER_TEMPLATE_REF']
1075
        vc_template = RbVmomi::VIM::VirtualMachine(vi_client.vim, vc_template_ref)
1076

    
1077
        ds = get_ds
1078

    
1079
        # Default disk move type (Full Clone)
1080
        disk_move_type = :moveAllDiskBackingsAndDisallowSharing
1081

    
1082
        if ds.instance_of? RbVmomi::VIM::Datastore
1083
            use_linked_clones = one_item['USER_TEMPLATE/VCENTER_LINKED_CLONES']
1084
            if use_linked_clones && use_linked_clones.downcase == "yes"
1085
                # Check if all disks in template has delta disks
1086
                disks = vc_template.config
1087
                                .hardware.device.grep(RbVmomi::VIM::VirtualDisk)
1088

    
1089
                disks_no_delta = disks.select { |d| d.backing.parent == nil }
1090

    
1091
                # Can use linked clones if all disks have delta disks
1092
                if (disks_no_delta.size == 0)
1093
                    disk_move_type = :moveChildMostDiskBacking
1094
                end
1095
            end
1096
        end
1097

    
1098
        spec_hash = spec_hash_clone(disk_move_type)
1099

    
1100
        clone_spec = RbVmomi::VIM.VirtualMachineCloneSpec(spec_hash)
1101

    
1102
        # Specify vm folder in vSpere's VM and Templates view F#4823
1103
        vcenter_vm_folder = nil
1104
        vcenter_vm_folder = one_item["USER_TEMPLATE/VCENTER_VM_FOLDER"]
1105
        vcenter_vm_folder_object = nil
1106
        dc = cluster.get_dc
1107
        if !!vcenter_vm_folder && !vcenter_vm_folder.empty?
1108
            vcenter_vm_folder_object = dc.item.find_folder(vcenter_vm_folder)
1109
        end
1110
        vcenter_vm_folder_object = vc_template.parent if vcenter_vm_folder_object.nil?
1111

    
1112
        if ds.instance_of? RbVmomi::VIM::StoragePod
1113
            # VM is cloned using Storage Resource Manager for StoragePods
1114
            begin
1115
                vm = storagepod_clonevm_task(vc_template, vcenter_name,
1116
                                             clone_spec, ds, vcenter_vm_folder_object, dc)
1117
            rescue Exception => e
1118
                raise "Cannot clone VM Template to StoragePod: #{e.message}"
1119
            end
1120
        else
1121
            vm = nil
1122
            begin
1123
                vm = vc_template.CloneVM_Task(
1124
                    :folder => vcenter_vm_folder_object,
1125
                    :name   => vcenter_name,
1126
                    :spec   => clone_spec).wait_for_completion
1127
            rescue Exception => e
1128
                if !e.message.start_with?('DuplicateName')
1129
                    raise "Cannot clone VM Template: #{e.message}\n#{e.backtrace}"
1130
                end
1131

    
1132
                vm_folder = dc.vm_folder
1133
                vm_folder.fetch!
1134
                vm = vm_folder.items
1135
                        .select{|k,v| v.item.name == vcenter_name}
1136
                        .values.first.item rescue nil
1137

    
1138
                if vm
1139
                    # Detach all persistent disks to avoid accidental destruction
1140
                    detach_persistent_disks(vm)
1141

    
1142
                    vm.Destroy_Task.wait_for_completion
1143
                    vm = vc_template.CloneVM_Task(
1144
                        :folder => vcenter_vm_folder_object,
1145
                        :name   => vcenter_name,
1146
                        :spec   => clone_spec).wait_for_completion
1147
                else
1148
                    raise "Cannot clone VM Template"
1149
                end
1150
            end
1151
        end
1152
        # @item is populated
1153
        @item = vm
1154

    
1155
        return self['_ref']
1156
    end
1157

    
1158

    
1159
    def storagepod_clonevm_task(vc_template, vcenter_name, clone_spec, storpod, vcenter_vm_folder_object, dc)
1160

    
1161
        storage_manager = vc_template
1162
                            ._connection.serviceContent.storageResourceManager
1163

    
1164
        storage_spec = RbVmomi::VIM.StoragePlacementSpec(
1165
            type: 'clone',
1166
            cloneName: vcenter_name,
1167
            folder: vcenter_vm_folder_object,
1168
            podSelectionSpec: RbVmomi::VIM.StorageDrsPodSelectionSpec(storagePod: storpod),
1169
            vm: vc_template,
1170
            cloneSpec: clone_spec
1171
        )
1172

    
1173
        # Query a storage placement recommendation
1174
        result = storage_manager
1175
                    .RecommendDatastores(storageSpec: storage_spec) rescue nil
1176

    
1177
        raise "Could not get placement specification for StoragePod" if result.nil?
1178

    
1179
        if !result.respond_to?(:recommendations) || result.recommendations.size == 0
1180
            raise "Could not get placement specification for StoragePod"
1181
        end
1182

    
1183
        # Get recommendation key to be applied
1184
        key = result.recommendations.first.key ||= ''
1185
        raise "Missing Datastore recommendation for StoragePod" if key.empty?
1186

    
1187
        begin
1188
            apply_sr = storage_manager
1189
                            .ApplyStorageDrsRecommendation_Task(key: [key])
1190
                            .wait_for_completion
1191
            return apply_sr.vm
1192
        rescue Exception => e
1193
            if !e.message.start_with?('DuplicateName')
1194
                raise "Cannot clone VM Template: #{e.message}\n#{e.backtrace}"
1195
            end
1196

    
1197
            # The VM already exists, try to find the vm
1198
            vm_folder = dc.vm_folder
1199
            vm_folder.fetch!
1200
            vm = vm_folder.items
1201
                    .select{|k,v| v.item.name == vcenter_name}
1202
                    .values.first.item rescue nil
1203

    
1204
            if vm
1205

    
1206
                begin
1207
                    # Detach all persistent disks to avoid accidental destruction
1208
                    detach_persistent_disks(vm)
1209

    
1210
                    # Destroy the VM with any disks still attached to it
1211
                    vm.Destroy_Task.wait_for_completion
1212

    
1213
                    # Query a storage placement recommendation
1214
                    result = storage_manager.RecommendDatastores(storageSpec: storage_spec) rescue nil
1215

    
1216
                    raise "Could not get placement specification for StoragePod" if result.nil?
1217

    
1218
                    if !result.respond_to?(:recommendations) || result.recommendations.size == 0
1219
                        raise "Could not get placement specification for StoragePod"
1220
                    end
1221

    
1222
                    # Get recommendation key to be applied
1223
                    key = result.recommendations.first.key ||= ''
1224
                    raise "Missing Datastore recommendation for StoragePod" if key.empty?
1225

    
1226
                    apply_sr = storage_manager
1227
                            .ApplyStorageDrsRecommendation_Task(key: [key])
1228
                            .wait_for_completion
1229
                    return apply_sr.vm
1230
                rescue Exception => e
1231
                   raise "Failure applying recommendation while cloning VM: #{e.message}"
1232
                end
1233
            end
1234
        end
1235
    end
1236

    
1237
    # @return clone parameters spec hash
1238
    def spec_hash_clone(disk_move_type)
1239
        # Relocate spec
1240
        relocate_spec_params = {}
1241

    
1242
        relocate_spec_params[:pool] = get_rp
1243
        relocate_spec_params[:diskMoveType] = disk_move_type
1244

    
1245
        ds = get_ds
1246

    
1247
        relocate_spec_params[:datastore] = ds if ds.instance_of? RbVmomi::VIM::Datastore
1248

    
1249
        relocate_spec = RbVmomi::VIM.VirtualMachineRelocateSpec(
1250
                                                         relocate_spec_params)
1251

    
1252
        # Running flag - prevents spurious poweroff states in the VM
1253
        running_flag = [{ :key => "opennebula.vm.running", :value => "no"}]
1254

    
1255
        running_flag_spec = RbVmomi::VIM.VirtualMachineConfigSpec(
1256
            { :extraConfig => running_flag }
1257
        )
1258

    
1259
        clone_parameters = {
1260
            :location => relocate_spec,
1261
            :powerOn  => false,
1262
            :template => false,
1263
            :config   => running_flag_spec
1264
        }
1265

    
1266
        cs = get_customization
1267
        clone_parameters[:customization] = cs if cs
1268

    
1269
        clone_parameters
1270
    end
1271

    
1272
    def reference_unmanaged_devices(template_ref)
1273

    
1274
        extraconfig   = []
1275
        device_change = []
1276

    
1277
        # Get unmanaged disks in OpenNebula's VM template
1278
        xpath = "TEMPLATE/DISK[OPENNEBULA_MANAGED=\"NO\" or OPENNEBULA_MANAGED=\"no\"]"
1279
        unmanaged_disks = one_item.retrieve_xmlelements(xpath)
1280

    
1281
        if !unmanaged_disks.empty?
1282

    
1283
            # Get vcenter VM disks to know real path of cloned disk
1284
            vcenter_disks = get_vcenter_disks
1285

    
1286
            # Create an array with the paths of the disks in vcenter template
1287
            template = VCenterDriver::Template.new_from_ref(template_ref, vi_client)
1288
            template_disks = template.get_vcenter_disks
1289
            template_disks_vector = []
1290
            template_disks.each do |d|
1291
                template_disks_vector << d[:path_wo_ds]
1292
            end
1293

    
1294
            # Try to find index of disks in template disks
1295
            unmanaged_disks.each do |unmanaged_disk|
1296
                index = template_disks_vector.index(unmanaged_disk["SOURCE"])
1297
                if index
1298
                    reference = {}
1299
                    reference[:key]   = "opennebula.disk.#{unmanaged_disk["DISK_ID"]}"
1300
                    reference[:value] = "#{vcenter_disks[index][:key]}"
1301
                    extraconfig << reference
1302
                end
1303
            end
1304
        end
1305

    
1306
        # Add info for existing nics in template in vm xml
1307
        xpath = "TEMPLATE/NIC[OPENNEBULA_MANAGED=\"NO\" or OPENNEBULA_MANAGED=\"no\"]"
1308
        unmanaged_nics = one_item.retrieve_xmlelements(xpath)
1309

    
1310
        if !unmanaged_nics.empty?
1311
            index = 0
1312
            self["config.hardware.device"].each_with_index do |device|
1313
                if is_nic?(device)
1314
                    # Edit capacity setting new size in KB
1315
                    device.macAddress = unmanaged_nics[index]["MAC"]
1316
                    device_change << { :device => device, :operation => :edit }
1317
                    index += 1
1318
                end
1319
            end
1320
        end
1321

    
1322
        # Save in extraconfig the key for unmanaged disks
1323
        if !extraconfig.empty? || !device_change.empty?
1324
            spec = {}
1325
            spec[:extraConfig]  = extraconfig if !extraconfig.empty?
1326
            spec[:deviceChange] = device_change if !device_change.empty?
1327
            @item.ReconfigVM_Task(:spec => spec).wait_for_completion
1328
        end
1329
    end
1330

    
1331
    def resize_unmanaged_disks
1332
        resize_hash = {}
1333
        disks = []
1334

    
1335
        unmanaged_keys = get_unmanaged_keys
1336
        vc_disks = get_vcenter_disks
1337

    
1338
        # Look for unmanaged disks with original size changed
1339
        xpath = "TEMPLATE/DISK[(OPENNEBULA_MANAGED=\"NO\" or OPENNEBULA_MANAGED=\"no\") and boolean(ORIGINAL_SIZE) and ORIGINAL_SIZE != SIZE]"
1340
        unmanaged_resized_disks = one_item.retrieve_xmlelements(xpath)
1341

    
1342
        return if unmanaged_resized_disks.empty?
1343

    
1344
        # Cannot resize linked cloned disks
1345
        if one_item["USER_TEMPLATE/VCENTER_LINKED_CLONES"] &&
1346
           one_item["USER_TEMPLATE/VCENTER_LINKED_CLONES"] == "YES"
1347
            raise "Linked cloned disks cannot be resized."
1348
        end
1349

    
1350
        unmanaged_resized_disks.each do |disk|
1351
            vc_disks.each do |vcenter_disk|
1352
                if unmanaged_keys.key?("opennebula.disk.#{disk["DISK_ID"]}")
1353
                    device_key = unmanaged_keys["opennebula.disk.#{disk["DISK_ID"]}"].to_i
1354

    
1355
                    if device_key == vcenter_disk[:key].to_i
1356

    
1357
                        break if disk["SIZE"].to_i <= disk["ORIGINAL_SIZE"].to_i
1358

    
1359
                        # Edit capacity setting new size in KB
1360
                        d = vcenter_disk[:device]
1361
                        d.capacityInKB = disk["SIZE"].to_i * 1024
1362
                        disks <<   { :device => d, :operation => :edit }
1363
                        break
1364
                    end
1365
                end
1366
            end
1367
        end
1368

    
1369
        if !disks.empty?
1370
            resize_hash[:deviceChange] = disks
1371
            @item.ReconfigVM_Task(:spec => resize_hash).wait_for_completion
1372
        end
1373
    end
1374

    
1375
    def create_storagedrs_disks(device_change_spod, device_change_spod_ids)
1376

    
1377
        sm = get_sm
1378
        disk_locator = []
1379
        extra_config = []
1380

    
1381
        device_change_spod.each do |device_spec|
1382
            disk_locator << RbVmomi::VIM.PodDiskLocator(diskId: device_spec[:device].key)
1383
        end
1384

    
1385
        spec = {}
1386
        spec[:deviceChange] = device_change_spod
1387

    
1388
        # Disk locator is required for AddDisk
1389
        vmpod_hash = {}
1390
        vmpod_hash[:storagePod] = get_ds
1391
        vmpod_hash[:disk] = disk_locator
1392
        vmpod_config = RbVmomi::VIM::VmPodConfigForPlacement(vmpod_hash)
1393

    
1394
        # The storage pod selection requires initialize
1395
        spod_hash = {}
1396
        spod_hash[:initialVmConfig] = [ vmpod_config ]
1397
        spod_select = RbVmomi::VIM::StorageDrsPodSelectionSpec(spod_hash)
1398
        storage_spec = RbVmomi::VIM.StoragePlacementSpec(
1399
            type: :reconfigure,
1400
            podSelectionSpec: spod_select,
1401
            vm: self['_ref'],
1402
            configSpec: spec
1403
        )
1404

    
1405
        # Query a storage placement recommendation
1406
        result = sm.RecommendDatastores(storageSpec: storage_spec) rescue nil
1407

    
1408
        raise "Could not get placement specification for StoragePod" if result.nil?
1409

    
1410
        if !result.respond_to?(:recommendations) || result.recommendations.size == 0
1411
            raise "Could not get placement specification for StoragePod"
1412
        end
1413

    
1414
        # Get recommendation key to be applied
1415
        key = result.recommendations.first.key ||= ''
1416
        raise "Missing Datastore recommendation for StoragePod" if key.empty?
1417

    
1418
        # Apply recommendation
1419
        sm.ApplyStorageDrsRecommendation_Task(key: [key]).wait_for_completion
1420

    
1421
        # Set references in opennebula.disk elements
1422
        device_change_spod.each do |device_spec|
1423
            unit_number    = device_spec[:device].unitNumber
1424
            controller_key = device_spec[:device].controllerKey
1425
            key            = get_vcenter_disk_key(unit_number, controller_key)
1426
            disk_id        = device_change_spod_ids["#{controller_key}-#{unit_number}"]
1427
            reference      = {}
1428
            reference[:key]   = "opennebula.disk.#{disk_id}"
1429
            reference[:value] = key.to_s
1430
            extra_config << reference
1431
        end
1432

    
1433
        extra_config
1434
    end
1435

    
1436

    
1437
    def reconfigure
1438
        extraconfig   = []
1439
        device_change = []
1440

    
1441
        # Unmanaged keys
1442
        unmanaged_keys = get_unmanaged_keys
1443

    
1444
        # Get disk devices in vm
1445
        vc_disks = get_vcenter_disks
1446

    
1447
        # Get an array with disk paths in OpenNebula's vm template
1448
        disks_in_onevm_vector = disks_in_onevm(unmanaged_keys, vc_disks)
1449

    
1450
        # As the original template may have been modified in OpenNebula
1451
        # but not in vcenter, we must detach disks that are in vcenter
1452
        # but not in OpenNebula's vm template
1453
        if is_new?
1454
            device_change, extra_config = device_detach_disks(disks_in_onevm_vector, unmanaged_keys, vc_disks)
1455
            if !device_change.empty?
1456
                spec_hash = {}
1457
                spec_hash[:deviceChange] = device_change if !device_change.empty?
1458
                spec_hash[:extraConfig] = extra_config  if !extra_config.empty?
1459

    
1460
                # Reconfigure for disks detached from original template
1461
                spec = RbVmomi::VIM.VirtualMachineConfigSpec(spec_hash)
1462
                @item.ReconfigVM_Task(:spec => spec).wait_for_completion
1463

    
1464
                # Get disk devices in vm again after reconfigure
1465
                vc_disks = get_vcenter_disks
1466
            end
1467
        end
1468

    
1469
        # Now reconfigure disks, nics and extraconfig for the VM
1470
        device_change = []
1471

    
1472
        # get token and context
1473
        extraconfig += extraconfig_context
1474

    
1475
        # vnc configuration (for config_array hash)
1476
        extraconfig += extraconfig_vnc
1477

    
1478
        # Set CPU, memory and extraconfig
1479
        num_cpus = one_item["TEMPLATE/VCPU"] || 1
1480

    
1481
        spec_hash = {
1482
            :numCPUs      => num_cpus.to_i,
1483
            :memoryMB     => one_item["TEMPLATE/MEMORY"],
1484
            :extraConfig  => extraconfig
1485
        }
1486

    
1487
        # device_change hash (nics)
1488
        device_change += device_change_nics
1489

    
1490
        # Now attach disks that are in OpenNebula's template but not in vcenter
1491
        # e.g those that has been attached in poweroff
1492
        device_change_ds, device_change_spod, device_change_spod_ids = device_attach_disks(disks_in_onevm_vector, vc_disks)
1493
        device_change += device_change_ds
1494

    
1495
        # Create volatile disks in StorageDRS if any
1496
        if !device_change_spod.empty?
1497
            spec_hash[:extraConfig] = create_storagedrs_disks(device_change_spod, device_change_spod_ids)
1498
        end
1499

    
1500
        # Common reconfigure task
1501
        spec_hash[:deviceChange] = device_change
1502
        spec = RbVmomi::VIM.VirtualMachineConfigSpec(spec_hash)
1503
        @item.ReconfigVM_Task(:spec => spec).wait_for_completion
1504
    end
1505

    
1506
    def extraconfig_context
1507
        context_text = "# Context variables generated by OpenNebula\n"
1508
        one_item.each('TEMPLATE/CONTEXT/*') do |context_element|
1509
            # next if !context_element.text
1510
            context_text += context_element.name + "='" +
1511
                            context_element.text.gsub("'", "\\'") + "'\n"
1512
        end
1513

    
1514
        # token
1515
        token = File.read(File.join(VAR_LOCATION,
1516
                        'vms',
1517
                        one_item['ID'],
1518
                        'token.txt')).chomp rescue nil
1519

    
1520
        context_text += "ONEGATE_TOKEN='#{token}'\n" if token
1521

    
1522
        # context_text
1523
        [
1524
            { :key => "guestinfo.opennebula.context",
1525
              :value => Base64.encode64(context_text) }
1526
        ]
1527
    end
1528

    
1529
    def extraconfig_vnc
1530
        if one_item["TEMPLATE/GRAPHICS"]
1531
            vnc_port   = one_item["TEMPLATE/GRAPHICS/PORT"]
1532
            vnc_listen = one_item["TEMPLATE/GRAPHICS/LISTEN"] || "0.0.0.0"
1533
            vnc_keymap = one_item["TEMPLATE/GRAPHICS/KEYMAP"]
1534

    
1535
            conf = [ {:key => "remotedisplay.vnc.enabled",:value => "TRUE"},
1536
                    {:key => "remotedisplay.vnc.port",   :value => vnc_port},
1537
                    {:key => "remotedisplay.vnc.ip",     :value => vnc_listen}]
1538

    
1539
            conf += [{:key => "remotedisplay.vnc.keymap",
1540
                            :value => vnc_keymap}] if vnc_keymap
1541

    
1542
            conf
1543
        else
1544
            conf = []
1545
        end
1546
    end
1547

    
1548
    def device_change_nics
1549
        # Final list of changes to be applied in vCenter
1550
        device_change = []
1551

    
1552
        # Hash of interfaces from the OpenNebula xml
1553
        nics_in_template = {}
1554
        xpath = "TEMPLATE/NIC"
1555
        one_item.each(xpath) { |nic|
1556
            nics_in_template[nic["MAC"]] = nic
1557
        }
1558

    
1559
        # Check nics in VM
1560
        self["config.hardware.device"].each do |dv|
1561
            if is_nic?(dv)
1562
                if nics_in_template.key?(dv.macAddress)
1563
                    # Remove nic that is already in the XML to avoid duplicate
1564
                    nics_in_template.delete(dv.macAddress)
1565
                else
1566
                    # B4897 - It was detached in poweroff, remove it from VM
1567
                    device_change << {
1568
                        :operation => :remove,
1569
                        :device    => dv
1570
                    }
1571
                end
1572
            end
1573
        end
1574

    
1575
        # Attach new nics (nics_in_template now contains only the interfaces
1576
        # not present in the VM in vCenter)
1577
        nics_in_template.each do |key, nic|
1578
            device_change << calculate_add_nic_spec(nic)
1579
        end
1580

    
1581
        return device_change
1582
    end
1583

    
1584
    # Regenerate context when devices are hot plugged (reconfigure)
1585
    def regenerate_context
1586
        spec_hash = { :extraConfig  => extraconfig_context }
1587
        spec = RbVmomi::VIM.VirtualMachineConfigSpec(spec_hash)
1588

    
1589
        begin
1590
            @item.ReconfigVM_Task(:spec => spec).wait_for_completion
1591
        rescue Exception => e
1592
            raise "Cannot create snapshot for VM: #{e.message}\n#{e.backtrace}"
1593
        end
1594
    end
1595

    
1596
    # Returns an array of actions to be included in :deviceChange
1597
    def calculate_add_nic_spec(nic)
1598

    
1599
        mac       = nic["MAC"]
1600
        pg_name   = nic["BRIDGE"]
1601
        model     = nic["VCENTER_NET_MODEL"] || VCenterDriver::VIHelper.get_default("VM/TEMPLATE/NIC/MODEL")
1602
        vnet_ref  = nic["VCENTER_NET_REF"]
1603
        backing   = nil
1604

    
1605
        limit_in  = nic["INBOUND_PEAK_BW"] || VCenterDriver::VIHelper.get_default("VM/TEMPLATE/NIC/INBOUND_PEAK_BW")
1606
        limit_out = nic["OUTBOUND_PEAK_BW"] || VCenterDriver::VIHelper.get_default("VM/TEMPLATE/NIC/OUTBOUND_PEAK_BW")
1607
        limit     = nil
1608

    
1609
        if limit_in && limit_out
1610
            limit=([limit_in.to_i, limit_out.to_i].min / 1024) * 8
1611
        end
1612

    
1613
        rsrv_in  = nic["INBOUND_AVG_BW"] || VCenterDriver::VIHelper.get_default("VM/TEMPLATE/NIC/INBOUND_AVG_BW")
1614
        rsrv_out = nic["OUTBOUND_AVG_BW"] || VCenterDriver::VIHelper.get_default("VM/TEMPLATE/NIC/OUTBOUND_AVG_BW")
1615
        rsrv     = nil
1616

    
1617
        if rsrv_in || rsrv_out
1618
            rsrv=([rsrv_in.to_i, rsrv_out.to_i].min / 1024) * 8
1619
        end
1620

    
1621
        network = self["runtime.host"].network.select do |n|
1622
            n._ref == vnet_ref || n.name == pg_name
1623
        end
1624

    
1625
        network = network.first
1626

    
1627
        card_num = 1 # start in one, we want the next avaliable id
1628

    
1629
        @item["config.hardware.device"].each do |dv|
1630
            card_num += 1 if is_nic?(dv)
1631
        end
1632

    
1633
        nic_card = case model
1634
                        when "virtuale1000", "e1000"
1635
                            RbVmomi::VIM::VirtualE1000
1636
                        when "virtuale1000e", "e1000e"
1637
                            RbVmomi::VIM::VirtualE1000e
1638
                        when "virtualpcnet32", "pcnet32"
1639
                            RbVmomi::VIM::VirtualPCNet32
1640
                        when "virtualsriovethernetcard", "sriovethernetcard"
1641
                            RbVmomi::VIM::VirtualSriovEthernetCard
1642
                        when "virtualvmxnetm", "vmxnetm"
1643
                            RbVmomi::VIM::VirtualVmxnetm
1644
                        when "virtualvmxnet2", "vmnet2"
1645
                            RbVmomi::VIM::VirtualVmxnet2
1646
                        when "virtualvmxnet3", "vmxnet3"
1647
                            RbVmomi::VIM::VirtualVmxnet3
1648
                        else # If none matches, use VirtualE1000
1649
                            RbVmomi::VIM::VirtualE1000
1650
                   end
1651

    
1652
        if network.class == RbVmomi::VIM::Network
1653
            backing = RbVmomi::VIM.VirtualEthernetCardNetworkBackingInfo(
1654
                        :deviceName => pg_name,
1655
                        :network    => network)
1656
        else
1657
            port    = RbVmomi::VIM::DistributedVirtualSwitchPortConnection(
1658
                        :switchUuid =>
1659
                                network.config.distributedVirtualSwitch.uuid,
1660
                        :portgroupKey => network.key)
1661
            backing =
1662
              RbVmomi::VIM.VirtualEthernetCardDistributedVirtualPortBackingInfo(
1663
                 :port => port)
1664
        end
1665

    
1666
        card_spec = {
1667
            :key => 0,
1668
            :deviceInfo => {
1669
                :label => "net" + card_num.to_s,
1670
                :summary => pg_name
1671
            },
1672
            :backing     => backing,
1673
            :addressType => mac ? 'manual' : 'generated',
1674
            :macAddress  => mac
1675
        }
1676

    
1677
        if (limit || rsrv) && (limit > 0)
1678
            ra_spec = {}
1679
            rsrv = limit if rsrv > limit
1680
            ra_spec[:limit] = limit if limit
1681
            ra_spec[:reservation] = rsrv if rsrv
1682
            ra_spec[:share] =  RbVmomi::VIM.SharesInfo({
1683
                    :level => RbVmomi::VIM.SharesLevel("normal"),
1684
                    :shares => 0
1685
                })
1686
            card_spec[:resourceAllocation] =
1687
               RbVmomi::VIM.VirtualEthernetCardResourceAllocation(ra_spec)
1688
        end
1689

    
1690
        {
1691
            :operation => :add,
1692
            :device    => nic_card.new(card_spec)
1693
        }
1694
    end
1695

    
1696
     # Returns an array of actions to be included in :deviceChange
1697
    def calculate_add_nic_spec_autogenerate_mac(nic)
1698

    
1699
        pg_name   = nic["BRIDGE"]
1700
        model     = nic["VCENTER_NET_MODEL"] || VCenterDriver::VIHelper.get_default("VM/TEMPLATE/NIC/MODEL")
1701
        vnet_ref  = nic["VCENTER_NET_REF"]
1702
        backing   = nil
1703

    
1704
        limit_in  = nic["INBOUND_PEAK_BW"] || VCenterDriver::VIHelper.get_default("VM/TEMPLATE/NIC/INBOUND_PEAK_BW")
1705
        limit_out = nic["OUTBOUND_PEAK_BW"] || VCenterDriver::VIHelper.get_default("VM/TEMPLATE/NIC/OUTBOUND_PEAK_BW")
1706
        limit     = nil
1707

    
1708
        if limit_in && limit_out
1709
            limit=([limit_in.to_i, limit_out.to_i].min / 1024) * 8
1710
        end
1711

    
1712
        rsrv_in  = nic["INBOUND_AVG_BW"] || VCenterDriver::VIHelper.get_default("VM/TEMPLATE/NIC/INBOUND_AVG_BW")
1713
        rsrv_out = nic["OUTBOUND_AVG_BW"] || VCenterDriver::VIHelper.get_default("VM/TEMPLATE/NIC/OUTBOUND_AVG_BW")
1714
        rsrv     = nil
1715

    
1716
        if rsrv_in || rsrv_out
1717
            rsrv=([rsrv_in.to_i, rsrv_out.to_i].min / 1024) * 8
1718
        end
1719

    
1720
        network = self["runtime.host"].network.select do |n|
1721
            n._ref == vnet_ref || n.name == pg_name
1722
        end
1723

    
1724
        network = network.first
1725

    
1726
        card_num = 1 # start in one, we want the next avaliable id
1727

    
1728
        @item["config.hardware.device"].each do |dv|
1729
            card_num += 1 if is_nic?(dv)
1730
        end
1731

    
1732
        nic_card = case model
1733
                        when "virtuale1000", "e1000"
1734
                            RbVmomi::VIM::VirtualE1000
1735
                        when "virtuale1000e", "e1000e"
1736
                            RbVmomi::VIM::VirtualE1000e
1737
                        when "virtualpcnet32", "pcnet32"
1738
                            RbVmomi::VIM::VirtualPCNet32
1739
                        when "virtualsriovethernetcard", "sriovethernetcard"
1740
                            RbVmomi::VIM::VirtualSriovEthernetCard
1741
                        when "virtualvmxnetm", "vmxnetm"
1742
                            RbVmomi::VIM::VirtualVmxnetm
1743
                        when "virtualvmxnet2", "vmnet2"
1744
                            RbVmomi::VIM::VirtualVmxnet2
1745
                        when "virtualvmxnet3", "vmxnet3"
1746
                            RbVmomi::VIM::VirtualVmxnet3
1747
                        else # If none matches, use VirtualE1000
1748
                            RbVmomi::VIM::VirtualE1000
1749
                   end
1750

    
1751
        if network.class == RbVmomi::VIM::Network
1752
            backing = RbVmomi::VIM.VirtualEthernetCardNetworkBackingInfo(
1753
                        :deviceName => pg_name,
1754
                        :network    => network)
1755
        else
1756
            port    = RbVmomi::VIM::DistributedVirtualSwitchPortConnection(
1757
                        :switchUuid =>
1758
                                network.config.distributedVirtualSwitch.uuid,
1759
                        :portgroupKey => network.key)
1760
            backing =
1761
              RbVmomi::VIM.VirtualEthernetCardDistributedVirtualPortBackingInfo(
1762
                 :port => port)
1763
        end
1764

    
1765
        card_spec = {
1766
            :key => 0,
1767
            :deviceInfo => {
1768
                :label => "net" + card_num.to_s,
1769
                :summary => pg_name
1770
            },
1771
            :backing     => backing,
1772
            :addressType => 'generated'
1773
        }
1774

    
1775
        if (limit || rsrv) && (limit > 0)
1776
            ra_spec = {}
1777
            rsrv = limit if rsrv > limit
1778
            ra_spec[:limit] = limit if limit
1779
            ra_spec[:reservation] = rsrv if rsrv
1780
            ra_spec[:share] =  RbVmomi::VIM.SharesInfo({
1781
                    :level => RbVmomi::VIM.SharesLevel("normal"),
1782
                    :shares => 0
1783
                })
1784
            card_spec[:resourceAllocation] =
1785
               RbVmomi::VIM.VirtualEthernetCardResourceAllocation(ra_spec)
1786
        end
1787

    
1788
        {
1789
            :operation => :add,
1790
            :device    => nic_card.new(card_spec)
1791
        }
1792
    end
1793

    
1794
    # Add NIC to VM
1795
    def attach_nic
1796
        spec_hash = {}
1797
        nic = nil
1798

    
1799
        # Extract nic from driver action
1800
        nic = one_item.retrieve_xmlelements("TEMPLATE/NIC[ATTACH='YES']").first
1801

    
1802
        begin
1803
            # A new NIC requires a vcenter spec
1804
            attach_nic_array = []
1805
            attach_nic_array << calculate_add_nic_spec(nic)
1806
            spec_hash[:deviceChange] = attach_nic_array if !attach_nic_array.empty?
1807

    
1808
            # Reconfigure VM
1809
            spec = RbVmomi::VIM.VirtualMachineConfigSpec(spec_hash)
1810

    
1811
            @item.ReconfigVM_Task(:spec => spec).wait_for_completion
1812
        rescue Exception => e
1813
            raise "Cannot attach NIC to VM: #{e.message}\n#{e.backtrace}"
1814
        end
1815

    
1816
    end
1817

    
1818
    # Detach NIC from VM
1819
    def detach_nic
1820
        spec_hash = {}
1821
        nic = nil
1822

    
1823
        # Extract nic from driver action
1824
        nic = one_item.retrieve_xmlelements("TEMPLATE/NIC[ATTACH='YES']").first
1825
        mac = nic["MAC"]
1826

    
1827
        # Get VM nic element if it has a device with that mac
1828
        nic_device = @item["config.hardware.device"].find do |device|
1829
            is_nic?(device) && (device.macAddress ==  mac)
1830
        end rescue nil
1831

    
1832
        return if nic_device.nil? #Silently ignore if nic is not found
1833

    
1834
        # Remove NIC from VM in the ReconfigVM_Task
1835
        spec_hash[:deviceChange] = [
1836
                :operation => :remove,
1837
                :device => nic_device ]
1838

    
1839
        begin
1840
            @item.ReconfigVM_Task(:spec => spec_hash).wait_for_completion
1841
        rescue Exception => e
1842
            raise "Cannot detach NIC from VM: #{e.message}\n#{e.backtrace}"
1843
        end
1844
    end
1845

    
1846
    # Detach all nics useful when removing pg and sw so they're not in use
1847
    def detach_all_nics
1848
        spec_hash = {}
1849
        device_change = []
1850

    
1851
        @item["config.hardware.device"].each do |device|
1852
            if is_nic?(device)
1853
                device_change << {:operation => :remove, :device => device}
1854
            end
1855
        end
1856

    
1857
        # Remove NIC from VM in the ReconfigVM_Task
1858
        spec_hash[:deviceChange] = device_change
1859

    
1860
        begin
1861
            @item.ReconfigVM_Task(:spec => spec_hash).wait_for_completion
1862
        rescue Exception => e
1863
            raise "Cannot detach all NICs from VM: #{e.message}\n#{e.backtrace}"
1864
        end
1865
    end
1866

    
1867
    def get_device_filename_and_ds_from_key(key, vc_disks)
1868
        device = vc_disks.select{ |d| d[:key].to_i == key.to_i}.first rescue nil
1869
        return device
1870
    end
1871

    
1872
    def disks_in_onevm(unmanaged_keys, vc_disks)
1873
        onevm_disks_vector = []
1874

    
1875
        disks = one_item.retrieve_xmlelements("TEMPLATE/DISK")
1876
        disks.each do |disk|
1877
            if unmanaged_keys.key?("opennebula.disk.#{disk["DISK_ID"]}")
1878
                device_key = unmanaged_keys["opennebula.disk.#{disk["DISK_ID"]}"].to_i
1879
                disk_hash = get_device_filename_and_ds_from_key(device_key, vc_disks)
1880
                onevm_disks_vector << disk_hash[:path_wo_ds] if disk_hash
1881
                next
1882
            end
1883

    
1884
            img_name  = VCenterDriver::FileHelper.get_img_name(disk, one_item['ID'], self['name'], instantiated_as_persistent?)
1885
            onevm_disks_vector << "#{img_name}"
1886
        end
1887

    
1888
        return onevm_disks_vector
1889
    end
1890

    
1891
    def device_attach_disks(onevm_disks_vector, vc_disks)
1892

    
1893
        disks = one_item.retrieve_xmlelements("TEMPLATE/DISK")
1894

    
1895
        vc_disks.each do |d|
1896
            index = onevm_disks_vector.index(d[:path_wo_ds])
1897
            if index
1898
                disks.delete_at(index)
1899
                onevm_disks_vector.delete_at(index)
1900
            end
1901
        end
1902

    
1903
        return [],[],{} if disks.empty?
1904

    
1905
        attach_disk_array = []
1906
        attach_spod_array = []
1907
        attach_spod_disk_info = {}
1908

    
1909
        position = 0
1910
        disks.each do |disk|
1911
            storpod = disk["VCENTER_DS_REF"].start_with?('group-')
1912
            if storpod
1913
                spec = calculate_add_disk_spec(disk, position)
1914
                attach_spod_array << spec
1915
                unit_ctrl = "#{spec[:device].controllerKey}-#{spec[:device].unitNumber}"
1916
                attach_spod_disk_info[unit_ctrl] = disk["DISK_ID"]
1917
            else
1918
                attach_disk_array << calculate_add_disk_spec(disk, position)
1919
            end
1920

    
1921
            position += 1
1922
        end
1923

    
1924
        return attach_disk_array, attach_spod_array, attach_spod_disk_info
1925
    end
1926

    
1927
    def device_detach_disks(onevm_disks_vector, unmanaged_keys, vc_disks)
1928
        detach_disk_array = []
1929
        extra_config      = []
1930
        ipool = VCenterDriver::VIHelper.one_pool(OpenNebula::ImagePool)
1931
        if ipool.respond_to?(:message)
1932
            raise "Could not get OpenNebula ImagePool: #{ipool.message}"
1933
        end
1934

    
1935
        vc_disks.each do |d|
1936
            if !onevm_disks_vector.index(d[:path_wo_ds])
1937

    
1938
                # If disk to be detached is not persistent detach and destroy it
1939
                persistent = VCenterDriver::VIHelper.find_persistent_image_by_source(d[:path_wo_ds], ipool)
1940
                if !persistent
1941
                    detach_disk_array << {
1942
                        :fileOperation => :destroy,
1943
                        :operation => :remove,
1944
                        :device    => d[:device]
1945
                    }
1946
                end
1947

    
1948
                # Remove reference opennebula.disk if exist
1949
                unmanaged_keys.each do |key, value|
1950
                    if value.to_i == d[:key].to_i
1951
                        reference = {}
1952
                        reference[:key]   = key
1953
                        reference[:value] = ""
1954
                        extra_config << reference
1955
                        break
1956
                    end
1957
                end
1958
            end
1959
        end
1960

    
1961
        return detach_disk_array, extra_config
1962
    end
1963

    
1964
    # Attach DISK to VM (hotplug)
1965
    def attach_disk
1966
        # TODO position? and disk size for volatile?
1967

    
1968
        spec_hash = {}
1969
        disk = nil
1970
        device_change = []
1971

    
1972
        # Extract unmanaged_keys
1973
        unmanaged_keys = get_unmanaged_keys
1974
        vc_disks = get_vcenter_disks
1975

    
1976
        # Extract disk from driver action
1977
        disk = one_item.retrieve_xmlelements("TEMPLATE/DISK[ATTACH='YES']").first
1978

    
1979
        # Check if we're dealing with a StoragePod SYSTEM ds
1980
        storpod = disk["VCENTER_DS_REF"].start_with?('group-')
1981

    
1982
        # Check if disk being attached is already connected to the VM
1983
        raise "DISK is already connected to VM" if disk_attached_to_vm(disk, unmanaged_keys, vc_disks)
1984

    
1985
        # Generate vCenter spec and reconfigure VM
1986
        device_change << calculate_add_disk_spec(disk)
1987
        raise "Could not generate DISK spec" if device_change.empty?
1988

    
1989
        spec_hash[:deviceChange] = device_change
1990
        spec = RbVmomi::VIM.VirtualMachineConfigSpec(spec_hash)
1991

    
1992
        begin
1993
            if storpod
1994
                #Ask for StorageDRS recommendation to reconfigure VM (AddDisk)
1995
                sm = get_sm
1996

    
1997
                # Disk id is -1 as I don't know what disk id is going to be set
1998
                disk_locator = [ RbVmomi::VIM.PodDiskLocator(diskId: -1) ]
1999

    
2000
                # Disk locator is required for AddDisk
2001
                vmpod_hash = {}
2002
                vmpod_hash[:storagePod] = get_ds
2003
                vmpod_hash[:disk] = disk_locator
2004
                vmpod_config = RbVmomi::VIM::VmPodConfigForPlacement(vmpod_hash)
2005

    
2006
                # The storage pod selection requires initialize
2007
                spod_hash = {}
2008
                spod_hash[:initialVmConfig] = [ vmpod_config ]
2009
                spod_select = RbVmomi::VIM::StorageDrsPodSelectionSpec(spod_hash)
2010
                storage_spec = RbVmomi::VIM.StoragePlacementSpec(
2011
                    type: :reconfigure,
2012
                    podSelectionSpec: spod_select,
2013
                    vm: self['_ref'],
2014
                    configSpec: spec
2015
                )
2016

    
2017
                # Query a storage placement recommendation
2018
                result = sm.RecommendDatastores(storageSpec: storage_spec) rescue nil
2019

    
2020
                raise "Could not get placement specification for StoragePod" if result.nil?
2021

    
2022
                if !result.respond_to?(:recommendations) || result.recommendations.size == 0
2023
                    raise "Could not get placement specification for StoragePod"
2024
                end
2025

    
2026
                # Get recommendation key to be applied
2027
                key = result.recommendations.first.key ||= ''
2028
                raise "Missing Datastore recommendation for StoragePod" if key.empty?
2029

    
2030
                # Apply recommendation
2031
                sm.ApplyStorageDrsRecommendation_Task(key: [key]).wait_for_completion
2032

    
2033
                # Add the key for the volatile disk to the unmanaged opennebula.disk.id variables
2034
                unit_number    = spec_hash[:deviceChange][0][:device].unitNumber
2035
                controller_key = spec_hash[:deviceChange][0][:device].controllerKey
2036
                key = get_vcenter_disk_key(unit_number, controller_key)
2037
                spec_hash = {}
2038
                reference = {}
2039
                reference[:key]   = "opennebula.disk.#{disk["DISK_ID"]}"
2040
                reference[:value] = key.to_s
2041
                spec_hash[:extraConfig] = [ reference ]
2042
                @item.ReconfigVM_Task(:spec => spec_hash).wait_for_completion
2043
            else
2044
                @item.ReconfigVM_Task(:spec => spec).wait_for_completion
2045
            end
2046
        rescue Exception => e
2047
            raise "Cannot attach DISK to VM: #{e.message}\n#{e.backtrace}"
2048
        end
2049
    end
2050

    
2051
    # Detach persistent disks to avoid incidental destruction
2052
    def detach_persistent_disks(vm)
2053
        spec_hash = {}
2054
        spec_hash[:deviceChange] = []
2055
        ipool = VCenterDriver::VIHelper.one_pool(OpenNebula::ImagePool)
2056
        if ipool.respond_to?(:message)
2057
            raise "Could not get OpenNebula ImagePool: #{ipool.message}"
2058
        end
2059

    
2060
        vm.config.hardware.device.each do |disk|
2061
            if is_disk_or_cdrom?(disk)
2062
                # Let's try to find if disks is persistent
2063
                source = disk.backing.fileName.sub(/^\[(.*?)\] /, "")
2064
                persistent = VCenterDriver::VIHelper.find_persistent_image_by_source(source, ipool)
2065
                if persistent
2066
                    spec_hash[:deviceChange] << {
2067
                        :operation => :remove,
2068
                        :device => disk
2069
                    }
2070
                end
2071
            end
2072

    
2073
        end
2074

    
2075
        return nil if spec_hash[:deviceChange].empty?
2076

    
2077
        begin
2078
            vm.ReconfigVM_Task(:spec => spec_hash).wait_for_completion
2079
        rescue Exception => e
2080
            raise "Cannot detach all DISKs from VM: #{e.message}\n#{e.backtrace}"
2081
        end
2082
    end
2083

    
2084

    
2085
    # Detach DISK from VM
2086
    def detach_disk(disk)
2087
        spec_hash = {}
2088
        img_path = ""
2089
        ds_ref = nil
2090

    
2091
        # Extract unmanaged disk keys
2092
        unmanaged_keys = get_unmanaged_keys
2093
        vc_disks = get_vcenter_disks
2094

    
2095
        # Get vcenter device to be detached and remove if found
2096
        device = disk_attached_to_vm(disk, unmanaged_keys, vc_disks)
2097

    
2098
        if device
2099
            img_path << device[:path_wo_ds]
2100

    
2101
            if unmanaged_keys.key?("opennebula.disk.#{disk["DISK_ID"]}")
2102
                reference = {}
2103
                reference[:key]   = "opennebula.disk.#{disk["DISK_ID"]}"
2104
                reference[:value] = ""
2105
                spec_hash[:extraConfig] = [ reference ]
2106
            end
2107

    
2108
            ds_ref = device[:datastore]._ref
2109

    
2110
            # Generate vCenter spec and reconfigure VM
2111
            spec_hash[:deviceChange] = [{
2112
                :operation => :remove,
2113
                :device => device[:device]
2114
            }]
2115

    
2116
            begin
2117
                @item.ReconfigVM_Task(:spec => spec_hash).wait_for_completion
2118
            rescue Exception => e
2119
                raise "Cannot detach DISK from VM: #{e.message}\n#{e.backtrace}"
2120
            end
2121
        end
2122

    
2123
        return ds_ref, img_path
2124
    end
2125

    
2126
    # Get vcenter device representing DISK object (hotplug)
2127
    def disk_attached_to_vm(disk, unmanaged_keys, vc_disks)
2128

    
2129
        img_name = ""
2130
        device_found = nil
2131
        disk_id = disk["DISK_ID"]
2132

    
2133
        vc_disks.each do |d|
2134
            # Check if we are dealing with the unmanaged disks present in the template when cloned
2135

    
2136
            if unmanaged_keys.key?("opennebula.disk.#{disk_id}") && d[:key] == unmanaged_keys["opennebula.disk.#{disk_id}"].to_i
2137
                device_found = d
2138
                break
2139
            end
2140

    
2141
            # Alright let's see if we can find other devices only with the expected image name
2142
            img_name  = VCenterDriver::FileHelper.get_img_name(disk, one_item['ID'], self['name'], instantiated_as_persistent?)
2143
            if d[:path_wo_ds] == "#{img_name}"
2144
                device_found = d
2145
                break
2146
            end
2147
        end
2148

    
2149
        return device_found
2150
    end
2151

    
2152
    def calculate_add_disk_spec(disk, position=0)
2153
        img_name = VCenterDriver::FileHelper.get_img_name(disk, one_item['ID'], self['name'],instantiated_as_persistent?)
2154
        type     = disk["TYPE"]
2155
        size_kb  = disk["SIZE"].to_i * 1024
2156

    
2157
        if type == "CDROM"
2158
            # CDROM drive will be found in the IMAGE DS
2159
            ds_ref   = disk["VCENTER_DS_REF"]
2160
            ds       = VCenterDriver::Storage.new_from_ref(ds_ref, @vi_client)
2161
            ds_name  = ds['name']
2162

    
2163
            # CDROM can only be added when the VM is in poweroff state
2164
            vmdk_backing = RbVmomi::VIM::VirtualCdromIsoBackingInfo(
2165
                :datastore => ds.item,
2166
                :fileName  => "[#{ds_name}] #{img_name}"
2167
            )
2168

    
2169
            if @item["summary.runtime.powerState"] != "poweredOff"
2170
                raise "The CDROM image can only be added as an IDE device "\
2171
                      "when the VM is in the powered off state"
2172
            end
2173

    
2174
            controller, unit_number = find_free_ide_controller(position)
2175

    
2176
            device = RbVmomi::VIM::VirtualCdrom(
2177
                :backing       => vmdk_backing,
2178
                :key           => -1,
2179
                :controllerKey => controller.key,
2180
                :unitNumber    => unit_number,
2181

    
2182
                :connectable => RbVmomi::VIM::VirtualDeviceConnectInfo(
2183
                    :startConnected    => true,
2184
                    :connected         => true,
2185
                    :allowGuestControl => true
2186
                )
2187
            )
2188

    
2189
            return {
2190
                :operation => :add,
2191
                :device => device
2192
            }
2193

    
2194
        else
2195
            # TYPE is regular disk (not CDROM)
2196

    
2197
            controller, unit_number = find_free_controller(position)
2198

    
2199
            storpod = disk["VCENTER_DS_REF"].start_with?('group-')
2200
            if storpod
2201
                vmdk_backing = RbVmomi::VIM::VirtualDiskFlatVer2BackingInfo(
2202
                  :diskMode  => 'persistent',
2203
                  :fileName  => ""
2204
                )
2205
            else
2206
                ds           = get_effective_ds(disk)
2207
                ds_name      = ds['name']
2208
                vmdk_backing = RbVmomi::VIM::VirtualDiskFlatVer2BackingInfo(
2209
                  :datastore => ds.item,
2210
                  :diskMode  => 'persistent',
2211
                  :fileName  => "[#{ds_name}] #{img_name}"
2212
                )
2213
            end
2214

    
2215
            device = RbVmomi::VIM::VirtualDisk(
2216
              :backing       => vmdk_backing,
2217
              :capacityInKB  => size_kb,
2218
              :controllerKey => controller.key,
2219
              :key           => (-1 - position),
2220
              :unitNumber    => unit_number
2221
            )
2222

    
2223
            config = {
2224
               :operation => :add,
2225
               :device    => device
2226
            }
2227

    
2228
            # For StorageDRS vCenter must create the file
2229
            config[:fileOperation] = :create if storpod
2230

    
2231
            return config
2232
        end
2233
    end
2234

    
2235
    def resize_unmanaged_disk(disk, new_size)
2236

    
2237
        resize_hash = {}
2238
        disks       = []
2239
        found       = false
2240

    
2241
        unmanaged_keys = get_unmanaged_keys
2242
        vc_disks = get_vcenter_disks
2243

    
2244
        vc_disks.each do |vcenter_disk|
2245
            if unmanaged_keys.key?("opennebula.disk.#{disk["DISK_ID"]}")
2246
                device_key = unmanaged_keys["opennebula.disk.#{disk["DISK_ID"]}"].to_i
2247

    
2248
                if device_key == vcenter_disk[:key].to_i
2249

    
2250
                    if disk["SIZE"].to_i <= disk["ORIGINAL_SIZE"].to_i
2251
                        raise "Disk size cannot be shrinked."
2252
                    end
2253

    
2254
                    # Edit capacity setting new size in KB
2255
                    d = vcenter_disk[:device]
2256
                    d.capacityInKB = disk["SIZE"].to_i * 1024
2257
                    disks <<   { :device => d, :operation => :edit }
2258

    
2259
                    found = true
2260
                    break
2261
                end
2262
            end
2263
        end
2264

    
2265
        raise "Unmanaged disk could not be found to apply resize operation." if !found
2266

    
2267
        if !disks.empty?
2268
            resize_hash[:deviceChange] = disks
2269
            @item.ReconfigVM_Task(:spec => resize_hash).wait_for_completion
2270
        else
2271
            raise "Device was not found after attaching it to VM in poweroff."
2272
        end
2273
    end
2274

    
2275
    def resize_managed_disk(disk, new_size)
2276

    
2277
        resize_hash = {}
2278

    
2279
        unmanaged_keys = get_unmanaged_keys
2280
        vc_disks       = get_vcenter_disks
2281

    
2282
        # Get vcenter device to be detached and remove if found
2283
        device         = disk_attached_to_vm(disk, unmanaged_keys, vc_disks)
2284

    
2285
        # If the disk is being attached in poweroff, reconfigure the VM
2286
        if !device
2287
            spec_hash     = {}
2288
            device_change = []
2289

    
2290
            # Get an array with disk paths in OpenNebula's vm template
2291
            disks_in_onevm_vector = disks_in_onevm(unmanaged_keys, vc_disks)
2292

    
2293
            device_change_ds, device_change_spod, device_change_spod_ids = device_attach_disks(disks_in_onevm_vector, vc_disks)
2294
            device_change += device_change_ds
2295

    
2296
            # Create volatile disks in StorageDRS if any
2297
            if !device_change_spod.empty?
2298
                spec_hash[:extraConfig] = create_storagedrs_disks(device_change_spod, device_change_spod_ids)
2299
            end
2300

    
2301
            # Common reconfigure task
2302
            spec_hash[:deviceChange] = device_change
2303
            spec = RbVmomi::VIM.VirtualMachineConfigSpec(spec_hash)
2304
            @item.ReconfigVM_Task(:spec => spec).wait_for_completion
2305

    
2306
            # Check again if device has now been attached
2307
            unmanaged_keys = get_unmanaged_keys
2308
            vc_disks       = get_vcenter_disks
2309
            device         = disk_attached_to_vm(disk, unmanaged_keys, vc_disks)
2310

    
2311
            if !device
2312
                raise "Device was not found after attaching it to VM in poweroff."
2313
            end
2314
        end
2315

    
2316
        # Resize disk now that we know that it's part of the VM
2317
        if device
2318
            vcenter_disk = device[:device]
2319
            vcenter_disk.capacityInKB = new_size.to_i * 1024
2320
            resize_hash[:deviceChange] = [{
2321
                :operation => :edit,
2322
                :device => vcenter_disk
2323
            }]
2324

    
2325
            @item.ReconfigVM_Task(:spec => resize_hash).wait_for_completion
2326
        end
2327
    end
2328

    
2329
    def has_snapshots?
2330
        self['rootSnapshot'] && !self['rootSnapshot'].empty?
2331
    end
2332

    
2333
    def instantiated_as_persistent?
2334
        begin
2335
            !!one_item["TEMPLATE/CLONING_TEMPLATE_ID"]
2336
        rescue
2337
            return false #one_item may not be retrieved if deploy_id hasn't been set
2338
        end
2339
    end
2340

    
2341
    def find_free_ide_controller(position=0)
2342

    
2343
        free_ide_controllers = []
2344
        ide_schema           = {}
2345

    
2346
        used_numbers      = []
2347
        available_numbers = []
2348

    
2349
        @item["config.hardware.device"].each do |dev|
2350
            if dev.is_a? RbVmomi::VIM::VirtualIDEController
2351
                if ide_schema[dev.key].nil?
2352
                    ide_schema[dev.key] = {}
2353
                end
2354

    
2355
                ide_schema[dev.key][:device] = dev
2356
            end
2357

    
2358
            next if dev.class != RbVmomi::VIM::VirtualCdrom
2359
            used_numbers << dev.unitNumber
2360
        end
2361

    
2362
        2.times do |ide_id|
2363
            available_numbers << ide_id if used_numbers.grep(ide_id).length <= 0
2364
        end
2365

    
2366
        ide_schema.keys.each do |controller|
2367
            free_ide_controllers << ide_schema[controller][:device].deviceInfo.label
2368
        end
2369

    
2370
        if free_ide_controllers.empty?
2371
            raise "There are no free IDE controllers to connect this CDROM device"
2372
        end
2373

    
2374
        available_controller_label = free_ide_controllers[0]
2375

    
2376
        controller = nil
2377

    
2378
        @item['config.hardware.device'].each do |device|
2379
            if device.deviceInfo.label == available_controller_label
2380
                controller = device
2381
                break
2382
            end
2383
        end
2384

    
2385
        new_unit_number = available_numbers.sort[position]
2386

    
2387
        return controller, new_unit_number
2388
    end
2389

    
2390
    def find_free_controller(position=0)
2391
        free_scsi_controllers = []
2392
        scsi_schema           = {}
2393

    
2394
        used_numbers      = []
2395
        available_numbers = []
2396

    
2397
        @item["config.hardware.device"].each do |dev|
2398
            if dev.is_a? RbVmomi::VIM::VirtualSCSIController
2399
                if scsi_schema[dev.key].nil?
2400
                    scsi_schema[dev.key] = {}
2401
                end
2402

    
2403
                used_numbers << dev.scsiCtlrUnitNumber
2404
                scsi_schema[dev.key][:device] = dev
2405
            end
2406

    
2407
            next if dev.class != RbVmomi::VIM::VirtualDisk
2408
            used_numbers << dev.unitNumber
2409
        end
2410

    
2411
        15.times do |scsi_id|
2412
            available_numbers << scsi_id if used_numbers.grep(scsi_id).length <= 0
2413
        end
2414

    
2415
        scsi_schema.keys.each do |controller|
2416
            free_scsi_controllers << scsi_schema[controller][:device].deviceInfo.label
2417
        end
2418

    
2419
        if free_scsi_controllers.length > 0
2420
            available_controller_label = free_scsi_controllers[0]
2421
        else
2422
            add_new_scsi(scsi_schema)
2423
            return find_free_controller
2424
        end
2425

    
2426
        controller = nil
2427

    
2428
        @item['config.hardware.device'].each do |device|
2429
            if device.deviceInfo.label == available_controller_label
2430
                controller = device
2431
                break
2432
            end
2433
        end
2434

    
2435
        new_unit_number = available_numbers.sort[position]
2436

    
2437
        return controller, new_unit_number
2438
    end
2439

    
2440
    def add_new_scsi(scsi_schema)
2441
        controller = nil
2442

    
2443
        if scsi_schema.keys.length >= 4
2444
            raise "Cannot add a new controller, maximum is 4."
2445
        end
2446

    
2447
        scsi_key    = 0
2448
        scsi_number = 0
2449

    
2450
        if scsi_schema.keys.length > 0 && scsi_schema.keys.length < 4
2451
            scsi_key    = scsi_schema.keys.sort[-1] + 1
2452
            scsi_number = scsi_schema[scsi_schema.keys.sort[-1]][:device].busNumber + 1
2453
        end
2454

    
2455
        controller_device = RbVmomi::VIM::VirtualLsiLogicController(
2456
            :key       => scsi_key,
2457
            :busNumber => scsi_number,
2458
            :sharedBus => :noSharing
2459
        )
2460

    
2461
        device_config_spec = RbVmomi::VIM::VirtualDeviceConfigSpec(
2462
            :device    => controller_device,
2463
            :operation => :add
2464
        )
2465

    
2466
        vm_config_spec = RbVmomi::VIM::VirtualMachineConfigSpec(
2467
            :deviceChange => [device_config_spec]
2468
        )
2469

    
2470
        @item.ReconfigVM_Task(:spec => vm_config_spec).wait_for_completion
2471

    
2472
        @item["config.hardware.device"].each do |device|
2473
            if device.class == RbVmomi::VIM::VirtualLsiLogicController &&
2474
                device.key == scsi_key
2475

    
2476
                controller = device.deviceInfo.label
2477
            end
2478
        end
2479

    
2480
        return controller
2481
    end
2482

    
2483
    # Create a snapshot for the VM
2484
    def create_snapshot(snap_id, snap_name)
2485
        snapshot_hash = {
2486
            :name        => snap_id,
2487
            :description => "OpenNebula Snapshot: #{snap_name}",
2488
            :memory      => true,
2489
            :quiesce     => true
2490
        }
2491

    
2492
        vcenter_version = @vi_client.vim.serviceContent.about.apiVersion rescue nil
2493

    
2494
        if vcenter_version != "5.5"
2495
            begin
2496
                @item.CreateSnapshot_Task(snapshot_hash).wait_for_completion
2497
            rescue Exception => e
2498
                raise "Cannot create snapshot for VM: #{e.message}\n#{e.backtrace}"
2499
            end
2500
        else
2501
            # B#5045 - If vcenter is 5.5 the snapshot may take longer than
2502
            # 15 minutes and it does not report that it has finished using
2503
            # wait_for_completion so we use an active wait instead with a
2504
            # timeout of 1440 minutes = 24 hours
2505
            @item.CreateSnapshot_Task(snapshot_hash)
2506

    
2507
            snapshot_created  = false
2508
            elapsed_minutes   = 0
2509

    
2510
            until snapshot_created || elapsed_minutes == 1440
2511
                if !!@item['snapshot']
2512
                    current_snapshot = @item['snapshot.currentSnapshot'] rescue nil
2513
                    snapshot_found = find_snapshot_in_list(@item['snapshot.rootSnapshotList'], snap_id)
2514
                    snapshot_created = !!snapshot_found && !!current_snapshot && current_snapshot._ref == snapshot_found._ref
2515
                end
2516
                sleep(60)
2517
                elapsed_minutes += 1
2518
            end
2519
        end
2520

    
2521
        return snap_id
2522
    end
2523

    
2524
    # Revert to a VM snapshot
2525
    def revert_snapshot(snap_id)
2526

    
2527
        snapshot_list = self["snapshot.rootSnapshotList"]
2528
        snapshot = find_snapshot_in_list(snapshot_list, snap_id)
2529

    
2530
        return nil if !snapshot
2531

    
2532
        begin
2533
            revert_snapshot_hash = { :_this => snapshot }
2534
            snapshot.RevertToSnapshot_Task(revert_snapshot_hash).wait_for_completion
2535
        rescue Exception => e
2536
            raise "Cannot revert snapshot of VM: #{e.message}\n#{e.backtrace}"
2537
        end
2538
    end
2539

    
2540
    # Delete VM snapshot
2541
    def delete_snapshot(snap_id)
2542

    
2543
        snapshot_list = self["snapshot.rootSnapshotList"]
2544
        snapshot = find_snapshot_in_list(snapshot_list, snap_id)
2545

    
2546
        return nil if !snapshot
2547

    
2548
        begin
2549
            delete_snapshot_hash = {
2550
                :_this => snapshot,
2551
                :removeChildren => false
2552
            }
2553
            snapshot.RemoveSnapshot_Task(delete_snapshot_hash).wait_for_completion
2554
        rescue Exception => e
2555
            raise "Cannot delete snapshot of VM: #{e.message}\n#{e.backtrace}"
2556
        end
2557
    end
2558

    
2559
    def find_snapshot_in_list(list, snap_id)
2560
        list.each do |i|
2561
            if i.name == snap_id.to_s
2562
                return i.snapshot
2563
            elsif !i.childSnapshotList.empty?
2564
                snap = find_snapshot_in_list(i.childSnapshotList, snap_id)
2565
                return snap if snap
2566
            end
2567
        end rescue nil
2568

    
2569
        nil
2570
    end
2571

    
2572
    ############################################################################
2573
    # actions
2574
    ############################################################################
2575

    
2576
    def shutdown
2577
        begin
2578
            @item.ShutdownGuest
2579
            # Check if VM has been powered off
2580
            (0..VM_SHUTDOWN_TIMEOUT).each do
2581
                break if @item.runtime.powerState == "poweredOff"
2582
                sleep 1
2583
            end
2584
        rescue
2585
            # Ignore ShutdownGuest exceptions, maybe VM hasn't openvm tools
2586
        end
2587

    
2588
        # If VM hasn't been powered off, do it now
2589
        if @item.runtime.powerState != "poweredOff"
2590
            poweroff_hard
2591
        end
2592
    end
2593

    
2594
    def destroy
2595
        @item.Destroy_Task.wait_for_completion
2596
    end
2597

    
2598
    def mark_as_template
2599
        @item.MarkAsTemplate
2600
    end
2601

    
2602
    def reset
2603
        @item.ResetVM_Task.wait_for_completion
2604
    end
2605

    
2606
    def suspend
2607
        @item.SuspendVM_Task.wait_for_completion
2608
    end
2609

    
2610
    def reboot
2611
        @item.RebootGuest
2612
    end
2613

    
2614
    def poweron
2615
        ## If need in the future, you can power on VMs from datacenter
2616
        ## dc = get_dc
2617
        ## dc.power_on_vm(@item)
2618
        @item.PowerOnVM_Task.wait_for_completion
2619
    end
2620

    
2621
    def is_powered_on?
2622
        return @item.runtime.powerState == "poweredOn"
2623
    end
2624

    
2625
    def poweroff_hard
2626
        @item.PowerOffVM_Task.wait_for_completion
2627
    end
2628

    
2629
    def remove_all_snapshots
2630
        @item.RemoveAllSnapshots_Task.wait_for_completion
2631
    end
2632

    
2633
    def set_running(state)
2634
        value = state ? "yes" : "no"
2635

    
2636
        config_array = [
2637
            { :key => "opennebula.vm.running", :value => value }
2638
        ]
2639
        spec = RbVmomi::VIM.VirtualMachineConfigSpec(
2640
            { :extraConfig => config_array }
2641
        )
2642

    
2643
        @item.ReconfigVM_Task(:spec => spec).wait_for_completion
2644
    end
2645

    
2646
    ############################################################################
2647
    # monitoring
2648
    ############################################################################
2649

    
2650
    # monitor function used when VMM poll action is called
2651
    def monitor_poll_vm
2652
        reset_monitor
2653

    
2654
        @state = state_to_c(self["summary.runtime.powerState"])
2655

    
2656
        if @state != VM_STATE[:active]
2657
            reset_monitor
2658
            return
2659
        end
2660

    
2661
        cpuMhz = self["runtime.host.summary.hardware.cpuMhz"].to_f
2662

    
2663
        @monitor[:used_memory] = self["summary.quickStats.hostMemoryUsage"] * 1024
2664

    
2665
        used_cpu = self["summary.quickStats.overallCpuUsage"].to_f / cpuMhz
2666
        used_cpu = (used_cpu * 100).to_s
2667
        @monitor[:used_cpu]  = sprintf('%.2f', used_cpu).to_s
2668

    
2669
        # Check for negative values
2670
        @monitor[:used_memory] = 0 if @monitor[:used_memory].to_i < 0
2671
        @monitor[:used_cpu]    = 0 if @monitor[:used_cpu].to_i < 0
2672

    
2673
        guest_ip_addresses = []
2674
        self["guest.net"].each do |net|
2675
            net.ipConfig.ipAddress.each do |ip|
2676
                guest_ip_addresses << ip.ipAddress
2677
            end if net.ipConfig && net.ipConfig.ipAddress
2678
        end if self["guest.net"]
2679

    
2680
        @guest_ip_addresses = guest_ip_addresses.join(',')
2681

    
2682
        pm = self['_connection'].serviceInstance.content.perfManager
2683

    
2684
        provider = pm.provider_summary(@item)
2685

    
2686
        refresh_rate = provider.refreshRate
2687

    
2688
        if get_vm_id
2689
            stats = {}
2690

    
2691
            if (one_item["MONITORING/LAST_MON"] && one_item["MONITORING/LAST_MON"].to_i != 0 )
2692
                #Real time data stores max 1 hour. 1 minute has 3 samples
2693
                interval = (Time.now.to_i - one_item["MONITORING/LAST_MON"].to_i)
2694

    
2695
                #If last poll was more than hour ago get 3 minutes,
2696
                #else calculate how many samples since last poll
2697
                samples =  interval > 3600 ? 9 : (interval / refresh_rate) + 1
2698
                max_samples = samples > 0 ? samples : 1
2699

    
2700
                stats = pm.retrieve_stats(
2701
                    [@item],
2702
                    ['net.transmitted','net.bytesRx','net.bytesTx','net.received',
2703
                    'virtualDisk.numberReadAveraged','virtualDisk.numberWriteAveraged',
2704
                    'virtualDisk.read','virtualDisk.write'],
2705
                    {interval:refresh_rate, max_samples: max_samples}
2706
                ) rescue {}
2707
            else
2708
                # First poll, get at least latest 3 minutes = 9 samples
2709
                stats = pm.retrieve_stats(
2710
                    [@item],
2711
                    ['net.transmitted','net.bytesRx','net.bytesTx','net.received',
2712
                    'virtualDisk.numberReadAveraged','virtualDisk.numberWriteAveraged',
2713
                    'virtualDisk.read','virtualDisk.write'],
2714
                    {interval:refresh_rate, max_samples: 9}
2715
                ) rescue {}
2716
            end
2717

    
2718
            if !stats.empty? && !stats.first[1][:metrics].empty?
2719
                metrics = stats.first[1][:metrics]
2720

    
2721
                nettx_kbpersec = 0
2722
                if metrics['net.transmitted']
2723
                    metrics['net.transmitted'].each { |sample|
2724
                        nettx_kbpersec += sample if sample > 0
2725
                    }
2726
                end
2727

    
2728
                netrx_kbpersec = 0
2729
                if metrics['net.bytesRx']
2730
                    metrics['net.bytesRx'].each { |sample|
2731
                        netrx_kbpersec += sample if sample > 0
2732
                    }
2733
                end
2734

    
2735
                read_kbpersec = 0
2736
                if metrics['virtualDisk.read']
2737
                    metrics['virtualDisk.read'].each { |sample|
2738
                        read_kbpersec += sample if sample > 0
2739
                    }
2740
                end
2741

    
2742
                read_iops = 0
2743
                if metrics['virtualDisk.numberReadAveraged']
2744
                    metrics['virtualDisk.numberReadAveraged'].each { |sample|
2745
                        read_iops += sample if sample > 0
2746
                    }
2747
                end
2748

    
2749
                write_kbpersec = 0
2750
                if metrics['virtualDisk.write']
2751
                    metrics['virtualDisk.write'].each { |sample|
2752
                        write_kbpersec += sample if sample > 0
2753
                    }
2754
                end
2755

    
2756
                write_iops = 0
2757
                if metrics['virtualDisk.numberWriteAveraged']
2758
                    metrics['virtualDisk.numberWriteAveraged'].each { |sample|
2759
                        write_iops += sample if sample > 0
2760
                    }
2761
                end
2762
            else
2763
                nettx_kbpersec = 0
2764
                netrx_kbpersec = 0
2765
                read_kbpersec  = 0
2766
                read_iops      = 0
2767
                write_kbpersec = 0
2768
                write_iops     = 0
2769
            end
2770

    
2771
            # Accumulate values if present
2772
            previous_nettx = @one_item && @one_item["MONITORING/NETTX"] ? @one_item["MONITORING/NETTX"].to_i : 0
2773
            previous_netrx = @one_item && @one_item["MONITORING/NETRX"] ? @one_item["MONITORING/NETRX"].to_i : 0
2774
            previous_diskrdiops = @one_item && @one_item["MONITORING/DISKRDIOPS"] ? @one_item["MONITORING/DISKRDIOPS"].to_i : 0
2775
            previous_diskwriops = @one_item && @one_item["MONITORING/DISKWRIOPS"] ? @one_item["MONITORING/DISKWRIOPS"].to_i : 0
2776
            previous_diskrdbytes = @one_item && @one_item["MONITORING/DISKRDBYTES"] ? @one_item["MONITORING/DISKRDBYTES"].to_i : 0
2777
            previous_diskwrbytes = @one_item && @one_item["MONITORING/DISKWRBYTES"] ? @one_item["MONITORING/DISKWRBYTES"].to_i : 0
2778

    
2779
            @monitor[:nettx] = previous_nettx + (nettx_kbpersec * 1024 * refresh_rate).to_i
2780
            @monitor[:netrx] = previous_netrx + (netrx_kbpersec * 1024 * refresh_rate).to_i
2781

    
2782
            @monitor[:diskrdiops]  = previous_diskrdiops + read_iops
2783
            @monitor[:diskwriops]  = previous_diskwriops + write_iops
2784
            @monitor[:diskrdbytes] = previous_diskrdbytes + (read_kbpersec * 1024 * refresh_rate).to_i
2785
            @monitor[:diskwrbytes] = previous_diskwrbytes + (write_kbpersec * 1024 * refresh_rate).to_i
2786
        end
2787
    end
2788

    
2789
    # monitor function used when poll action is called for all vms
2790
    def monitor(stats)
2791

    
2792
        reset_monitor
2793

    
2794
        refresh_rate = 20 #20 seconds between samples (realtime)
2795

    
2796
        @state = state_to_c(@vm_info["summary.runtime.powerState"])
2797

    
2798
        return if @state != VM_STATE[:active]
2799

    
2800
        cpuMhz =  @vm_info[:esx_host_cpu]
2801

    
2802
        @monitor[:used_memory] = @vm_info["summary.quickStats.hostMemoryUsage"].to_i * 1024
2803

    
2804
        used_cpu = @vm_info["summary.quickStats.overallCpuUsage"].to_f / cpuMhz
2805
        used_cpu = (used_cpu * 100).to_s
2806
        @monitor[:used_cpu]  = sprintf('%.2f', used_cpu).to_s
2807

    
2808
        # Check for negative values
2809
        @monitor[:used_memory] = 0 if @monitor[:used_memory].to_i < 0
2810
        @monitor[:used_cpu]    = 0 if @monitor[:used_cpu].to_i < 0
2811

    
2812
        guest_ip_addresses = []
2813
        @vm_info["guest.net"].each do |net|
2814
            net.ipConfig.ipAddress.each do |ip|
2815
                guest_ip_addresses << ip.ipAddress
2816
            end if net.ipConfig && net.ipConfig.ipAddress
2817
        end if self["guest.net"]
2818

    
2819
        @guest_ip_addresses = guest_ip_addresses.join(',')
2820

    
2821
        if stats.key?(@item)
2822
            metrics = stats[@item][:metrics]
2823

    
2824
            nettx_kbpersec = 0
2825
            if metrics['net.transmitted']
2826
                metrics['net.transmitted'].each { |sample|
2827
                    nettx_kbpersec += sample if sample > 0
2828
                }
2829
            end
2830

    
2831
            netrx_kbpersec = 0
2832
            if metrics['net.bytesRx']
2833
                metrics['net.bytesRx'].each { |sample|
2834
                    netrx_kbpersec += sample if sample > 0
2835
                }
2836
            end
2837

    
2838
            read_kbpersec = 0
2839
            if metrics['virtualDisk.read']
2840
                metrics['virtualDisk.read'].each { |sample|
2841
                    read_kbpersec += sample if sample > 0
2842
                }
2843
            end
2844

    
2845
            read_iops = 0
2846
            if metrics['virtualDisk.numberReadAveraged']
2847
                metrics['virtualDisk.numberReadAveraged'].each { |sample|
2848
                    read_iops += sample if sample > 0
2849
                }
2850
            end
2851

    
2852
            write_kbpersec = 0
2853
            if metrics['virtualDisk.write']
2854
                metrics['virtualDisk.write'].each { |sample|
2855
                    write_kbpersec += sample if sample > 0
2856
                }
2857
            end
2858

    
2859
            write_iops = 0
2860
            if metrics['virtualDisk.numberWriteAveraged']
2861
                metrics['virtualDisk.numberWriteAveraged'].each { |sample|
2862
                    write_iops += sample if sample > 0
2863
                }
2864
            end
2865
        else
2866
            nettx_kbpersec = 0
2867
            netrx_kbpersec = 0
2868
            read_kbpersec  = 0
2869
            read_iops      = 0
2870
            write_kbpersec = 0
2871
            write_iops     = 0
2872
        end
2873

    
2874
        # Accumulate values if present
2875
        previous_nettx = @one_item && @one_item["MONITORING/NETTX"] ? @one_item["MONITORING/NETTX"].to_i : 0
2876
        previous_netrx = @one_item && @one_item["MONITORING/NETRX"] ? @one_item["MONITORING/NETRX"].to_i : 0
2877
        previous_diskrdiops = @one_item && @one_item["MONITORING/DISKRDIOPS"] ? @one_item["MONITORING/DISKRDIOPS"].to_i : 0
2878
        previous_diskwriops = @one_item && @one_item["MONITORING/DISKWRIOPS"] ? @one_item["MONITORING/DISKWRIOPS"].to_i : 0
2879
        previous_diskrdbytes = @one_item && @one_item["MONITORING/DISKRDBYTES"] ? @one_item["MONITORING/DISKRDBYTES"].to_i : 0
2880
        previous_diskwrbytes = @one_item && @one_item["MONITORING/DISKWRBYTES"] ? @one_item["MONITORING/DISKWRBYTES"].to_i : 0
2881

    
2882
        @monitor[:nettx] = previous_nettx + (nettx_kbpersec * 1024 * refresh_rate).to_i
2883
        @monitor[:netrx] = previous_netrx + (netrx_kbpersec * 1024 * refresh_rate).to_i
2884

    
2885
        @monitor[:diskrdiops]  = previous_diskrdiops + read_iops
2886
        @monitor[:diskwriops]  = previous_diskwriops + write_iops
2887
        @monitor[:diskrdbytes] = previous_diskrdbytes + (read_kbpersec * 1024 * refresh_rate).to_i
2888
        @monitor[:diskwrbytes] = previous_diskwrbytes + (write_kbpersec * 1024 * refresh_rate).to_i
2889
    end
2890

    
2891

    
2892

    
2893
    #  Generates a OpenNebula IM Driver valid string with the monitor info
2894
    def info
2895
        return 'STATE=d' if @state == 'd'
2896

    
2897
        guest_ip = @vm_info ? @vm_info["guest.ipAddress"] : self["guest.ipAddress"]
2898

    
2899
        used_cpu    = @monitor[:used_cpu]
2900
        used_memory = @monitor[:used_memory]
2901
        netrx       = @monitor[:netrx]
2902
        nettx       = @monitor[:nettx]
2903
        diskrdbytes = @monitor[:diskrdbytes]
2904
        diskwrbytes = @monitor[:diskwrbytes]
2905
        diskrdiops  = @monitor[:diskrdiops]
2906
        diskwriops  = @monitor[:diskwriops]
2907

    
2908
        esx_host      = @vm_info ? @vm_info[:esx_host_name].to_s : self["runtime.host.name"].to_s
2909
        guest_state   = @vm_info ? @vm_info["guest.guestState"].to_s : self["guest.guestState"].to_s
2910
        vmware_tools  = @vm_info ? @vm_info["guest.toolsRunningStatus"].to_s : self["guest.toolsRunningStatus"].to_s
2911
        vmtools_ver   = @vm_info ? @vm_info["guest.toolsVersion"].to_s :  self["guest.toolsVersion"].to_s
2912
        vmtools_verst = @vm_info ? @vm_info["guest.toolsVersionStatus2"].to_s : vmtools_verst = self["guest.toolsVersionStatus2"].to_s
2913

    
2914
        if @vm_info
2915
            rp_name   = @vm_info[:rp_list].select { |item| item[:ref] == @vm_info["resourcePool"]._ref}.first[:name] rescue ""
2916
            rp_name   = "Resources" if rp_name.empty?
2917
        else
2918
            rp_name   = self["resourcePool"].name
2919
        end
2920

    
2921
        str_info = ""
2922

    
2923
        str_info = "GUEST_IP=" << guest_ip.to_s << " " if guest_ip
2924

    
2925
        if @guest_ip_addresses && !@guest_ip_addresses.empty?
2926
            str_info << "GUEST_IP_ADDRESSES=\"" << @guest_ip_addresses.to_s << "\" "
2927
        end
2928

    
2929
        str_info << "#{POLL_ATTRIBUTE[:state]}="  << @state               << " "
2930
        str_info << "#{POLL_ATTRIBUTE[:cpu]}="    << used_cpu.to_s        << " "
2931
        str_info << "#{POLL_ATTRIBUTE[:memory]}=" << used_memory.to_s     << " "
2932
        str_info << "#{POLL_ATTRIBUTE[:netrx]}="  << netrx.to_s           << " "
2933
        str_info << "#{POLL_ATTRIBUTE[:nettx]}="  << nettx.to_s           << " "
2934

    
2935
        str_info << "DISKRDBYTES=" << diskrdbytes.to_s << " "
2936
        str_info << "DISKWRBYTES=" << diskwrbytes.to_s << " "
2937
        str_info << "DISKRDIOPS="  << diskrdiops.to_s  << " "
2938
        str_info << "DISKWRIOPS="  << diskwriops.to_s  << " "
2939

    
2940
        str_info << "VCENTER_ESX_HOST=\""                 << esx_host        << "\" "
2941
        str_info << "VCENTER_GUEST_STATE="                << guest_state     << " "
2942
        str_info << "VCENTER_VMWARETOOLS_RUNNING_STATUS=" << vmware_tools    << " "
2943
        str_info << "VCENTER_VMWARETOOLS_VERSION="        << vmtools_ver     << " "
2944
        str_info << "VCENTER_VMWARETOOLS_VERSION_STATUS=" << vmtools_verst   << " "
2945
        str_info << "VCENTER_RP_NAME=\""                  << rp_name << "\" "
2946
    end
2947

    
2948
    def reset_monitor
2949
        @monitor = {
2950
            :used_cpu    => 0,
2951
            :used_memory => 0,
2952
            :netrx       => 0,
2953
            :nettx       => 0,
2954
            :diskrdbytes => 0,
2955
            :diskwrbytes => 0,
2956
            :diskrdiops  => 0,
2957
            :diskwriops  => 0
2958
        }
2959
    end
2960

    
2961
    # Converts the VI string state to OpenNebula state convention
2962
    # Guest states are:
2963
    # - poweredOff   The virtual machine is currently powered off.
2964
    # - poweredOn    The virtual machine is currently powered on.
2965
    # - suspended    The virtual machine is currently suspended.
2966
    def state_to_c(state)
2967
        case state
2968
            when 'poweredOn'
2969
                VM_STATE[:active]
2970
            when 'suspended'
2971
                VM_STATE[:paused]
2972
            when 'poweredOff'
2973
                VM_STATE[:deleted]
2974
            else
2975
                VM_STATE[:unknown]
2976
        end
2977
    end
2978

    
2979
    # TODO check with uuid
2980
    def self.new_from_ref(ref, vi_client)
2981
        self.new(RbVmomi::VIM::VirtualMachine.new(vi_client.vim, ref), vi_client)
2982
    end
2983

    
2984
end # class VirtualMachine
2985

    
2986
end # module VCenterDriver