Statistics
| Branch: | Tag: | Revision:

one / src / vmm_mad / remotes / lib / vcenter_driver / virtual_machine.rb @ 9e5bb8b9

History | View | Annotate | Download (108 KB)

1
module VCenterDriver
2
require 'digest'
3
class VirtualMachineFolder
4
    attr_accessor :item, :items
5

    
6
    def initialize(item)
7
        @item = item
8
        @items = {}
9
    end
10

    
11
    ########################################################################
12
    # Builds a hash with Datastore-Ref / Datastore to be used as a cache
13
    # @return [Hash] in the form
14
    #   { ds_ref [Symbol] => Datastore object }
15
    ########################################################################
16
    def fetch!
17
        VIClient.get_entities(@item, "VirtualMachine").each do |item|
18
            item_name = item._ref
19
            @items[item_name.to_sym] = VirtualMachine.new(item)
20
        end
21
    end
22

    
23
    def fetch_templates!
24
        VIClient.get_entities(@item, "VirtualMachine").each do |item|
25
            if item.config.template
26
                item_name = item._ref
27
                @items[item_name.to_sym] = Template.new(item)
28
            end
29
        end
30
    end
31

    
32
    ########################################################################
33
    # Returns a Datastore. Uses the cache if available.
34
    # @param ref [Symbol] the vcenter ref
35
    # @return Datastore
36
    ########################################################################
37
    def get(ref)
38
        if !@items[ref.to_sym]
39
            rbvmomi_dc = RbVmomi::VIM::Datastore.new(@item._connection, ref)
40
            @items[ref.to_sym] = Datastore.new(rbvmomi_dc)
41
        end
42

    
43
        @items[ref.to_sym]
44
    end
45
end # class VirtualMachineFolder
46

    
47
class Template
48

    
49
    attr_accessor :item
50

    
51
    include Memoize
52

    
53
    def initialize(item=nil, vi_client=nil)
54
        @item = item
55
        @vi_client = vi_client
56
        @locking = true
57
    end
58

    
59
    # Locking function. Similar to flock
60
    def lock
61
        if @locking
62
           @locking_file = File.open("/tmp/vcenter-importer-lock","w")
63
           @locking_file.flock(File::LOCK_EX)
64
        end
65
    end
66

    
67
    # Unlock driver execution mutex
68
    def unlock
69
        if @locking
70
            @locking_file.close
71
        end
72
    end
73

    
74
    def get_dc
75
        item = @item
76

    
77
        while !item.instance_of? RbVmomi::VIM::Datacenter
78
            item = item.parent
79
            if item.nil?
80
                raise "Could not find the parent Datacenter"
81
            end
82
        end
83

    
84
        Datacenter.new(item)
85
    end
86

    
87
    def delete_template
88
        @item.Destroy_Task.wait_for_completion
89
    end
90

    
91
    def get_vcenter_instance_uuid
92
        @vi_client.vim.serviceContent.about.instanceUuid rescue nil
93
    end
94

    
95
    def create_template_copy(template_name)
96
        error = nil
97
        template_ref = nil
98

    
99
        template_name = "one-#{self['name']}" if template_name.empty?
100

    
101
        relocate_spec_params = {}
102
        relocate_spec_params[:pool] = get_rp
103
        relocate_spec = RbVmomi::VIM.VirtualMachineRelocateSpec(relocate_spec_params)
104

    
105
        clone_spec = RbVmomi::VIM.VirtualMachineCloneSpec({
106
            :location => relocate_spec,
107
            :powerOn  => false,
108
            :template => false
109
        })
110

    
111
        template = nil
112
        begin
113
            template = @item.CloneVM_Task(:folder => @item.parent,
114
                                          :name   => template_name,
115
                                          :spec   => clone_spec).wait_for_completion
116
            template_ref = template._ref
117
        rescue Exception => e
118
            if !e.message.start_with?('DuplicateName')
119
                error = "Could not create the template clone. Reason: #{e.message}"
120
                return error, nil
121
            end
122

    
123
            dc = get_dc
124
            vm_folder = dc.vm_folder
125
            vm_folder.fetch!
126
            vm = vm_folder.items
127
                    .select{|k,v| v.item.name == template_name}
128
                    .values.first.item rescue nil
129

    
130
            if vm
131
                begin
132
                    vm.Destroy_Task.wait_for_completion
133
                    template = @item.CloneVM_Task(:folder => @item.parent,
134
                                                  :name   => template_name,
135
                                                  :spec   => clone_spec).wait_for_completion
136
                    template_ref = template._ref
137
                rescue
138
                    error = "Could not delete the existing template, please remove it manually from vCenter. Reason: #{e.message}"
139
                end
140
            else
141
                error = "Could not create the template clone. Reason: #{e.message}"
142
            end
143
        end
144

    
145
        return error, template_ref
146
    end
147

    
148
    # Linked Clone over existing template
149
    def create_delta_disks
150

    
151
        begin
152
            disks = @item['config.hardware.device'].grep(RbVmomi::VIM::VirtualDisk)
153
            disk_without_snapshots = disks.select { |x| x.backing.parent.nil? }
154
        rescue
155
            error = "Cannot extract existing disks on template."
156
            use_linked_clones = false
157
            return error, use_linked_clones
158
        end
159

    
160
        if !disk_without_snapshots.empty?
161

    
162
            begin
163
                if self['config.template']
164
                    @item.MarkAsVirtualMachine(:pool => get_rp, :host => self['runtime.host'])
165
                end
166
            rescue Exception => e
167
                @item.MarkAsTemplate()
168
                error = "Cannot mark the template as a VirtualMachine. Not using linked clones. Reason: #{e.message}/#{e.backtrace}"
169
                use_linked_clones = false
170
                return error, use_linked_clones
171
            end
172

    
173
            begin
174
                spec = {}
175
                spec[:deviceChange] = []
176

    
177
                disk_without_snapshots.each do |disk|
178
                    remove_disk_spec = { :operation => :remove, :device => disk }
179
                    spec[:deviceChange] << remove_disk_spec
180

    
181
                    add_disk_spec = { :operation => :add,
182
                                    :fileOperation => :create,
183
                                    :device => disk.dup.tap { |x|
184
                                            x.backing = x.backing.dup
185
                                            x.backing.fileName = "[#{disk.backing.datastore.name}]"
186
                                            x.backing.parent = disk.backing
187
                                    }
188
                    }
189
                    spec[:deviceChange] << add_disk_spec
190
                end
191

    
192
                @item.ReconfigVM_Task(:spec => spec).wait_for_completion if !spec[:deviceChange].empty?
193
            rescue Exception => e
194
                error = "Cannot create the delta disks on top of the template. Reason: #{e.message}."
195
                use_linked_clones = false
196
                return error, use_linked_clones
197
            end
198

    
199
            begin
200
                @item.MarkAsTemplate()
201
            rescue
202
                error = "Cannot mark the VirtualMachine as a template. Not using linked clones."
203
                use_linked_clones = false
204
                return error, use_linked_clones
205
            end
206

    
207
            error = nil
208
            use_linked_clones = true
209
            return error, use_linked_clones
210
        else
211
            # Template already has delta disks
212
            error = nil
213
            use_linked_clones = true
214
            return error, use_linked_clones
215
        end
216
    end
217

    
218
    def import_vcenter_disks(vc_uuid, dpool, ipool, sunstone=false, template_id=nil)
219
        disk_info = ""
220
        error = ""
221
        sunstone_disk_info = []
222

    
223
        begin
224
            lock #Lock import operation, to avoid concurrent creation of images
225

    
226
            ##ccr_ref = self["runtime.host.parent._ref"]
227
            dc = get_dc
228
            dc_ref = dc.item._ref
229

    
230
            #Get disks and info required
231
            vc_disks = get_vcenter_disks
232

    
233
            # Track allocated images
234
            allocated_images = []
235

    
236
            vc_disks.each do |disk|
237
                datastore_found = VCenterDriver::Storage.get_one_image_ds_by_ref_and_dc(disk[:datastore]._ref,
238
                                                                                        dc_ref,
239
                                                                                        vc_uuid,
240
                                                                                        dpool)
241
                if datastore_found.nil?
242
                    error = "\n    ERROR: datastore #{disk[:datastore].name}: has to be imported first as an image datastore!\n"
243

    
244
                    #Rollback delete disk images
245
                    allocated_images.each do |i|
246
                        i.delete
247
                    end
248

    
249
                    break
250
                end
251

    
252
                image_import = VCenterDriver::Datastore.get_image_import_template(disk[:datastore].name,
253
                                                                                  disk[:path],
254
                                                                                  disk[:type],
255
                                                                                  disk[:prefix],
256
                                                                                  ipool,
257
                                                                                  template_id)
258
                #Image is already in the datastore
259
                if image_import[:one]
260
                    # This is the disk info
261
                    disk_tmp = ""
262
                    disk_tmp << "DISK=[\n"
263
                    disk_tmp << "IMAGE_ID=\"#{image_import[:one]["ID"]}\",\n"
264
                    disk_tmp << "OPENNEBULA_MANAGED=\"NO\"\n"
265
                    disk_tmp << "]\n"
266
                    if sunstone
267
                        sunstone_disk = {}
268
                        sunstone_disk[:type] = "EXISTING_DISK"
269
                        sunstone_disk[:image_tmpl] = disk_tmp
270
                        sunstone_disk_info << sunstone_disk
271
                    else
272
                        disk_info << disk_tmp
273
                    end
274

    
275
                elsif !image_import[:template].empty?
276

    
277
                    if sunstone
278
                        sunstone_disk = {}
279
                        sunstone_disk[:type] = "NEW_DISK"
280
                        sunstone_disk[:image_tmpl] = image_import[:template]
281
                        sunstone_disk[:ds_id] = datastore_found['ID'].to_i
282
                        sunstone_disk_info << sunstone_disk
283
                    else
284
                        # Then the image is created as it's not in the datastore
285
                        one_i = VCenterDriver::VIHelper.new_one_item(OpenNebula::Image)
286
                        allocated_images << one_i
287
                        rc = one_i.allocate(image_import[:template], datastore_found['ID'].to_i)
288

    
289
                        if OpenNebula.is_error?(rc)
290
                            error = "    Error creating disk from template: #{rc.message}\n"
291
                            break
292
                        end
293

    
294
                        #Add info for One template
295
                        one_i.info
296
                        disk_info << "DISK=[\n"
297
                        disk_info << "IMAGE_ID=\"#{one_i["ID"]}\",\n"
298
                        disk_info << "OPENNEBULA_MANAGED=\"NO\"\n"
299
                        disk_info << "]\n"
300
                    end
301
                end
302
            end
303

    
304
        rescue Exception => e
305
            error = "\n    There was an error trying to create an image for disk in vcenter template. Reason: #{e.message}\n#{e.backtrace}"
306
        ensure
307
            unlock
308
            if !error.empty? && allocated_images
309
                #Rollback delete disk images
310
                allocated_images.each do |i|
311
                    i.delete
312
                end
313
            end
314
        end
315

    
316
        return error, sunstone_disk_info, allocated_images if sunstone
317

    
318
        return error, disk_info, allocated_images if !sunstone
319

    
320
    end
321

    
322
    def import_vcenter_nics(vc_uuid, npool, hpool, vcenter_instance_name,
323
                            template_ref, wild, sunstone=false, vm_name=nil, vm_id=nil, dc_name=nil)
324
        nic_info = ""
325
        error = ""
326
        sunstone_nic_info = []
327

    
328
        begin
329
            lock #Lock import operation, to avoid concurrent creation of networks
330

    
331
            if !dc_name
332
                dc = get_dc
333
                dc_name = dc.item.name
334
                dc_ref  = dc.item._ref
335
            end
336

    
337
            ccr_ref  = self["runtime.host.parent._ref"]
338
            ccr_name = self["runtime.host.parent.name"]
339

    
340
            #Get disks and info required
341
            vc_nics = get_vcenter_nics
342

    
343
            # Track allocated networks for rollback
344
            allocated_networks = []
345

    
346
            # Track port groups duplicated in this VM
347
            duplicated_networks = []
348

    
349
            vc_nics.each do |nic|
350
                # Check if the network already exists
351
                network_found = VCenterDriver::Network.get_unmanaged_vnet_by_ref(nic[:net_ref],
352
                                                                                 template_ref,
353
                                                                                 vc_uuid,
354
                                                                                 npool)
355
                #Network is already in OpenNebula
356
                if network_found
357

    
358
                    # This is the existing nic info
359
                    nic_tmp = ""
360
                    nic_tmp << "NIC=[\n"
361
                    nic_tmp << "NETWORK_ID=\"#{network_found["ID"]}\",\n"
362
                    nic_tmp << "OPENNEBULA_MANAGED=\"NO\"\n"
363
                    nic_tmp << "]\n"
364

    
365
                    if sunstone
366
                        sunstone_nic = {}
367
                        sunstone_nic[:type] = "EXISTING_NIC"
368
                        sunstone_nic[:network_tmpl] = nic_tmp
369
                        sunstone_nic_info << sunstone_nic
370
                    else
371
                        nic_info << nic_tmp
372
                    end
373
                else
374
                    # Then the network has to be created as it's not in OpenNebula
375
                    one_vn = VCenterDriver::VIHelper.new_one_item(OpenNebula::VirtualNetwork)
376

    
377
                    # We're importing unmanaged nics
378
                    unmanaged = true
379

    
380
                    # Let's get the OpenNebula host associated to the cluster reference
381
                    one_host = VCenterDriver::VIHelper.find_by_ref(OpenNebula::HostPool,
382
                                                                  "TEMPLATE/VCENTER_CCR_REF",
383
                                                                   ccr_ref,
384
                                                                   vc_uuid,
385
                                                                   hpool)
386

    
387
                    # Let's get the CLUSTER_ID from the OpenNebula host
388
                    if !one_host || !one_host['CLUSTER_ID']
389
                        cluster_id = -1
390
                    else
391
                        cluster_id = one_host['CLUSTER_ID']
392
                    end
393

    
394
                    # We have to know if we're importing nics from a wild vm
395
                    # or from a template
396
                    if wild
397
                        unmanaged = "wild"
398
                    else
399
                        unmanaged = "template"
400
                    end
401

    
402
                    # Prepare the Virtual Network template
403
                    one_vnet = VCenterDriver::Network.to_one_template(nic[:net_name],
404
                                                                      nic[:net_ref],
405
                                                                      nic[:pg_type],
406
                                                                      ccr_ref,
407
                                                                      ccr_name,
408
                                                                      vc_uuid,
409
                                                                      vcenter_instance_name,
410
                                                                      dc_name,
411
                                                                      cluster_id,
412
                                                                      nil,
413
                                                                      unmanaged,
414
                                                                      template_ref,
415
                                                                      dc_ref,
416
                                                                      vm_name,
417
                                                                      vm_id)
418

    
419
                    # By default add an ethernet range to network size 255
420
                    ar_str = ""
421
                    ar_str << "AR=[\n"
422
                    ar_str << "TYPE=\"ETHER\",\n"
423
                    ar_str << "SIZE=\"255\"\n"
424
                    ar_str << "]\n"
425
                    one_vnet[:one] << ar_str
426

    
427
                    if sunstone
428
                        if !duplicated_networks.include?(nic[:net_name])
429
                            sunstone_nic = {}
430
                            sunstone_nic[:type] = "NEW_NIC"
431
                            sunstone_nic[:network_name] = nic[:net_name]
432
                            sunstone_nic[:network_tmpl] = one_vnet[:one]
433
                            sunstone_nic[:one_cluster_id] = cluster_id.to_i
434
                            sunstone_nic_info << sunstone_nic
435
                            duplicated_networks << nic[:net_name]
436
                        else
437
                            sunstone_nic = {}
438
                            sunstone_nic[:type] = "DUPLICATED_NIC"
439
                            sunstone_nic[:network_name] = nic[:net_name]
440
                            sunstone_nic_info << sunstone_nic
441
                        end
442
                    else
443
                        # Allocate the Virtual Network
444
                        allocated_networks << one_vn
445
                        rc = one_vn.allocate(one_vnet[:one], cluster_id.to_i)
446

    
447
                        if OpenNebula.is_error?(rc)
448
                            error = "\n    ERROR: Could not allocate virtual network due to #{rc.message}\n"
449
                            break
450
                        end
451

    
452
                        # Add info for One template
453
                        one_vn.info
454
                        nic_info << "NIC=[\n"
455
                        nic_info << "NETWORK_ID=\"#{one_vn["ID"]}\",\n"
456
                        nic_info << "OPENNEBULA_MANAGED=\"NO\"\n"
457
                        nic_info << "]\n"
458

    
459
                        # Refresh npool
460
                        npool.info_all
461
                    end
462
                end
463
            end
464

    
465
        rescue Exception => e
466
            error = "\n    There was an error trying to create a virtual network to repesent a vCenter network for a VM or VM Template. Reason: #{e.message}"
467
        ensure
468
            unlock
469
            #Rollback, delete virtual networks
470
            if !error.empty? && allocated_networks
471
                allocated_networks.each do |n|
472
                    n.delete
473
                end
474
            end
475
        end
476

    
477
        return error, nic_info, allocated_networks if !sunstone
478

    
479
        return error, sunstone_nic_info, allocated_networks if sunstone
480
    end
481

    
482
    def get_vcenter_disk_key(unit_number, controller_key)
483

    
484
        key = nil
485

    
486
        @item["config.hardware.device"].each do |device|
487
            disk = {}
488

    
489
            if is_disk_or_iso?(device)
490
                disk[:device]    = device
491
                if device.controllerKey == controller_key &&
492
                   device.unitNumber == unit_number
493

    
494
                   key = device.key
495
                   break
496
                end
497
            end
498
        end
499

    
500
        return key
501
    end
502

    
503
    def get_vcenter_disks
504

    
505
        disks = []
506
        ide_controlled  = []
507
        sata_controlled = []
508
        scsi_controlled = []
509

    
510
        @item["config.hardware.device"].each do |device|
511
            disk = {}
512

    
513
            if device.is_a? RbVmomi::VIM::VirtualIDEController
514
                ide_controlled.concat(device.device)
515
            end
516

    
517
            if device.is_a? RbVmomi::VIM::VirtualSATAController
518
                sata_controlled.concat(device.device)
519
            end
520

    
521
            if device.is_a? RbVmomi::VIM::VirtualSCSIController
522
                scsi_controlled.concat(device.device)
523
            end
524

    
525
            if is_disk_or_iso?(device)
526
                disk[:device]    = device
527
                disk[:datastore] = device.backing.datastore
528
                disk[:path]      = device.backing.fileName
529
                disk[:path_wo_ds]= disk[:path].sub(/^\[(.*?)\] /, "")
530
                disk[:type]      = is_disk?(device) ? "OS" : "CDROM"
531
                disk[:key]       = device.key
532
                disk[:prefix]    = "hd" if ide_controlled.include?(device.key)
533
                disk[:prefix]    = "sd" if scsi_controlled.include?(device.key)
534
                disk[:prefix]    = "sd" if sata_controlled.include?(device.key)
535
                disks << disk
536
            end
537
        end
538

    
539
        return disks
540
    end
541

    
542
    def get_vcenter_nics
543
        nics = []
544
        @item["config.hardware.device"].each do |device|
545
            nic = {}
546
            if is_nic?(device)
547
                begin
548
                    nic[:net_name]  = device.backing.network.name
549
                    nic[:net_ref]   = device.backing.network._ref
550
                    nic[:pg_type]   = VCenterDriver::Network.get_network_type(device)
551
                    nics << nic
552
                rescue
553
                end
554
            end
555
        end
556
        return nics
557
    end
558

    
559
    #  Checks if a RbVmomi::VIM::VirtualDevice is a disk or a cdrom
560
    def is_disk_or_cdrom?(device)
561
        is_disk  = !(device.class.ancestors.index(RbVmomi::VIM::VirtualDisk)).nil?
562
        is_cdrom = !(device.class.ancestors.index(RbVmomi::VIM::VirtualCdrom)).nil?
563
        is_disk || is_cdrom
564
    end
565

    
566
    #  Checks if a RbVmomi::VIM::VirtualDevice is a disk or an iso file
567
    def is_disk_or_iso?(device)
568
        is_disk  = !(device.class.ancestors.index(RbVmomi::VIM::VirtualDisk)).nil?
569
        is_iso = device.backing.is_a? RbVmomi::VIM::VirtualCdromIsoBackingInfo
570
        is_disk || is_iso
571
    end
572

    
573
    #  Checks if a RbVmomi::VIM::VirtualDevice is a disk
574
    def is_disk?(device)
575
        !(device.class.ancestors.index(RbVmomi::VIM::VirtualDisk)).nil?
576
    end
577

    
578
    #  Checks if a RbVmomi::VIM::VirtualDevice is a network interface
579
    def is_nic?(device)
580
        !device.class.ancestors.index(RbVmomi::VIM::VirtualEthernetCard).nil?
581
    end
582

    
583
    # @return RbVmomi::VIM::ResourcePool, first resource pool in cluster
584
    def get_rp
585
        self['runtime.host.parent.resourcePool']
586
    end
587

    
588
    def vm_to_one(vm_name)
589

    
590
        str = "NAME   = \"#{vm_name}\"\n"\
591
              "CPU    = \"#{@vm_info["config.hardware.numCPU"]}\"\n"\
592
              "vCPU   = \"#{@vm_info["config.hardware.numCPU"]}\"\n"\
593
              "MEMORY = \"#{@vm_info["config.hardware.memoryMB"]}\"\n"\
594
              "HYPERVISOR = \"vcenter\"\n"\
595
              "CONTEXT = [\n"\
596
              "    NETWORK = \"YES\",\n"\
597
              "    SSH_PUBLIC_KEY = \"$USER[SSH_PUBLIC_KEY]\"\n"\
598
              "]\n"\
599
              "VCENTER_INSTANCE_ID =\"#{@vm_info[:vc_uuid]}\"\n"\
600
              "VCENTER_CCR_REF =\"#{@vm_info[:cluster_ref]}\"\n"
601

    
602
        str << "IMPORT_VM_ID =\"#{self["_ref"]}\"\n"
603
        str << "IMPORT_STATE =\"#{@state}\"\n"
604

    
605
        # Get DS information
606
        str << "VCENTER_DS_REF = \"#{@vm_info["datastore"].first._ref}\"\n"
607

    
608
        vnc_port = nil
609
        keymap = nil
610

    
611
        @vm_info["config.extraConfig"].select do |xtra|
612
            if xtra[:key].downcase=="remotedisplay.vnc.port"
613
                vnc_port = xtra[:value]
614
            end
615

    
616
            if xtra[:key].downcase=="remotedisplay.vnc.keymap"
617
                keymap = xtra[:value]
618
            end
619
        end
620

    
621
        if !@vm_info["config.extraConfig"].empty?
622
            str << "GRAPHICS = [\n"\
623
                   "  TYPE     =\"vnc\",\n"
624
            str << "  PORT     =\"#{vnc_port}\",\n" if vnc_port
625
            str << "  KEYMAP   =\"#{keymap}\",\n" if keymap
626
            str << "  LISTEN   =\"0.0.0.0\"\n"
627
            str << "]\n"
628
        end
629

    
630
        if !@vm_info["config.annotation"] || @vm_info["config.annotation"].empty?
631
            str << "DESCRIPTION = \"vCenter Template imported by OpenNebula" \
632
                " from Cluster #{@vm_info["cluster_name"]}\"\n"
633
        else
634
            notes = @vm_info["config.annotation"].gsub("\\", "\\\\").gsub("\"", "\\\"")
635
            str << "DESCRIPTION = \"#{notes}\"\n"
636
        end
637

    
638
        case @vm_info["guest.guestFullName"]
639
            when /CentOS/i
640
                str << "LOGO=images/logos/centos.png\n"
641
            when /Debian/i
642
                str << "LOGO=images/logos/debian.png\n"
643
            when /Red Hat/i
644
                str << "LOGO=images/logos/redhat.png\n"
645
            when /Ubuntu/i
646
                str << "LOGO=images/logos/ubuntu.png\n"
647
            when /Windows XP/i
648
                str << "LOGO=images/logos/windowsxp.png\n"
649
            when /Windows/i
650
                str << "LOGO=images/logos/windows8.png\n"
651
            when /Linux/i
652
                str << "LOGO=images/logos/linux.png\n"
653
        end
654

    
655
        return str
656
    end
657

    
658
    def self.template_to_one(template, vc_uuid, ccr_ref, ccr_name, import_name, host_id)
659

    
660
        num_cpu, memory, annotation, guest_fullname = template.item.collect("config.hardware.numCPU","config.hardware.memoryMB","config.annotation","guest.guestFullName")
661

    
662
        str = "NAME   = \"#{import_name}\"\n"\
663
              "CPU    = \"#{num_cpu}\"\n"\
664
              "vCPU   = \"#{num_cpu}\"\n"\
665
              "MEMORY = \"#{memory}\"\n"\
666
              "HYPERVISOR = \"vcenter\"\n"\
667
              "CONTEXT = [\n"\
668
              "    NETWORK = \"YES\",\n"\
669
              "    SSH_PUBLIC_KEY = \"$USER[SSH_PUBLIC_KEY]\"\n"\
670
              "]\n"\
671
              "VCENTER_INSTANCE_ID =\"#{vc_uuid}\"\n"
672

    
673
        str << "VCENTER_TEMPLATE_REF =\"#{template["_ref"]}\"\n"
674
        str << "VCENTER_CCR_REF =\"#{ccr_ref}\"\n"
675

    
676
        str << "GRAPHICS = [\n"\
677
               "  TYPE     =\"vnc\",\n"
678
        str << "  LISTEN   =\"0.0.0.0\"\n"
679
        str << "]\n"
680

    
681
        if annotation.nil? || annotation.empty?
682
            str << "DESCRIPTION = \"vCenter Template imported by OpenNebula" \
683
                " from Cluster #{ccr_name}\"\n"
684
        else
685
            notes = annotation.gsub("\\", "\\\\").gsub("\"", "\\\"")
686
            str << "DESCRIPTION = \"#{notes}\"\n"
687
        end
688

    
689
        case guest_fullname
690
            when /CentOS/i
691
                str << "LOGO=images/logos/centos.png\n"
692
            when /Debian/i
693
                str << "LOGO=images/logos/debian.png\n"
694
            when /Red Hat/i
695
                str << "LOGO=images/logos/redhat.png\n"
696
            when /Ubuntu/i
697
                str << "LOGO=images/logos/ubuntu.png\n"
698
            when /Windows XP/i
699
                str << "LOGO=images/logos/windowsxp.png\n"
700
            when /Windows/i
701
                str << "LOGO=images/logos/windows8.png\n"
702
            when /Linux/i
703
                str << "LOGO=images/logos/linux.png\n"
704
        end
705

    
706
        return str
707
    end
708

    
709
    def self.get_xml_template(template, vcenter_uuid, vi_client, vcenter_instance_name=nil, dc_name=nil, rp_cache={})
710

    
711
        begin
712
            template_ref      = template['_ref']
713
            template_name     = template["name"]
714
            template_ccr      = template['runtime.host.parent']
715
            template_ccr_ref  = template_ccr._ref
716
            template_ccr_name = template_ccr.name
717

    
718
            # Set vcenter instance name
719
            vcenter_instance_name = vi_client.vim.host if !vcenter_instance_name
720

    
721
            # Get datacenter info
722
            if !dc_name
723
                dc = get_dc
724
                dc_name = dc.item.name
725
            end
726

    
727
            #Get resource pools and generate a list
728
            if !rp_cache[template_ccr_name]
729
                tmp_cluster = VCenterDriver::ClusterComputeResource.new_from_ref(template_ccr_ref, vi_client)
730
                rp_list = tmp_cluster.get_resource_pool_list
731
                rp = ""
732
                if !rp_list.empty?
733
                    rp_name_list = []
734
                    rp_list.each do |rp_hash|
735
                        rp_name_list << rp_hash[:name]
736
                    end
737
                    rp =  "O|list|Which resource pool you want this VM to run in? "
738
                    rp << "|#{rp_name_list.join(",")}" #List of RP
739
                    rp << "|#{rp_name_list.first}" #Default RP
740
                end
741
                rp_cache[template_ccr_name] = {}
742
                rp_cache[template_ccr_name][:rp] = rp
743
                rp_cache[template_ccr_name][:rp_list] = rp_list
744
            end
745
            rp      = rp_cache[template_ccr_name][:rp]
746
            rp_list = rp_cache[template_ccr_name][:rp_list]
747

    
748

    
749
            # Determine the location path for the template
750
            vcenter_template = VCenterDriver::VirtualMachine.new_from_ref(template_ref, vi_client)
751
            item = vcenter_template.item
752
            folders = []
753
            while !item.instance_of? RbVmomi::VIM::Datacenter
754
                item = item.parent
755
                if !item.instance_of? RbVmomi::VIM::Datacenter
756
                    folders << item.name if item.name != "vm"
757
                end
758
                raise "Could not find the templates parent location" if item.nil?
759
            end
760
            location = folders.reverse.join("/")
761
            location = "/" if location.empty?
762

    
763
            # Generate a crypto hash for the template name and take the first 12 chars
764
            sha256            = Digest::SHA256.new
765
            full_name         = "#{template_name} - #{template_ccr_name} [#{vcenter_instance_name} - #{dc_name}]_#{location}"
766
            template_hash     = sha256.hexdigest(full_name)[0..11]
767
            template_name     = template_name.tr("\u007F", "")
768
            template_ccr_name = template_ccr_name.tr("\u007F", "")
769
            import_name       = "#{template_name} - #{template_ccr_name} #{template_hash}"
770

    
771
            # Prepare the Hash that will be used by importers to display
772
            # the object being imported
773
            one_tmp = {}
774
            one_tmp[:name]                  = import_name
775
            one_tmp[:template_name]         = template_name
776
            one_tmp[:sunstone_template_name]= "#{template_name} [ Cluster: #{template_ccr_name} - Template location: #{location} ]"
777
            one_tmp[:template_hash]         = template_hash
778
            one_tmp[:template_location]     = location
779
            one_tmp[:vcenter_ccr_ref]       = template_ccr_ref
780
            one_tmp[:vcenter_ref]           = template_ref
781
            one_tmp[:vcenter_instance_uuid] = vcenter_uuid
782
            one_tmp[:cluster_name]          = template_ccr_name
783
            one_tmp[:rp]                    = rp
784
            one_tmp[:rp_list]               = rp_list
785
            one_tmp[:template]              = template
786
            one_tmp[:import_disks_and_nics] = true # By default we import disks and nics
787

    
788
            # Get the host ID of the OpenNebula host which represents the vCenter Cluster
789
            host_id = nil
790
            one_host = VCenterDriver::VIHelper.find_by_ref(OpenNebula::HostPool,
791
                                                           "TEMPLATE/VCENTER_CCR_REF",
792
                                                           template_ccr_ref,
793
                                                           vcenter_uuid)
794
            host_id    = one_host["ID"]
795
            cluster_id = one_host["CLUSTER_ID"]
796
            raise "Could not find the host's ID associated to template being imported" if !host_id
797

    
798
            # Get the OpenNebula's template hash
799
            one_tmp[:one] = template_to_one(template, vcenter_uuid, template_ccr_ref, template_ccr_name, import_name, host_id)
800
            return one_tmp
801
        rescue
802
            return nil
803
        end
804
    end
805

    
806
    # TODO check with uuid
807
    def self.new_from_ref(ref, vi_client)
808
        self.new(RbVmomi::VIM::VirtualMachine.new(vi_client.vim, ref), vi_client)
809
    end
810

    
811
end
812

    
813
class VirtualMachine < Template
814
    VM_PREFIX_DEFAULT = "one-$i-"
815

    
816
    POLL_ATTRIBUTE    = OpenNebula::VirtualMachine::Driver::POLL_ATTRIBUTE
817
    VM_STATE          = OpenNebula::VirtualMachine::Driver::VM_STATE
818

    
819
    VM_SHUTDOWN_TIMEOUT = 600 #10 minutes til poweroff hard
820

    
821
    attr_accessor :item, :vm_id
822

    
823
    attr_accessor :vm_info
824

    
825
    include Memoize
826

    
827
    def initialize(item=nil, vi_client=nil)
828
        @item = item
829
        @vi_client = vi_client
830
        @locking = true
831
        @vm_info = nil
832
    end
833

    
834
    ############################################################################
835
    ############################################################################
836

    
837
    # Attributes that must be defined when the VM does not exist in vCenter
838
    attr_accessor :vi_client
839

    
840
    # these have their own getter (if they aren't set, we can set them
841
    # dynamically)
842
    attr_writer :one_item
843
    attr_writer :host
844
    attr_writer :target_ds_ref
845

    
846
    ############################################################################
847
    ############################################################################
848

    
849
    # The OpenNebula VM
850
    # @return OpenNebula::VirtualMachine or XMLElement
851
    def one_item
852
        if !@one_item
853
            vm_id = get_vm_id
854

    
855
            raise "Unable to find vm_id." if vm_id.nil?
856

    
857
            @one_item = VIHelper.one_item(OpenNebula::VirtualMachine, vm_id)
858
        end
859

    
860
        @one_item
861
    end
862

    
863
    # The OpenNebula host
864
    # @return OpenNebula::Host or XMLElement
865
    def host
866
        if @host.nil?
867
            if one_item.nil?
868
                raise "'one_item' must be previously set to be able to " <<
869
                      "access the OpenNebula host."
870
            end
871

    
872
            host_id = one_item["HISTORY_RECORDS/HISTORY[last()]/HID"]
873
            raise "No valid host_id found." if host_id.nil?
874

    
875
            @host = VIHelper.one_item(OpenNebula::Host, host_id)
876
        end
877

    
878
        @host
879
    end
880

    
881
    # Target Datastore VMware reference getter
882
    # @return
883
    def target_ds_ref
884
        if @target_ds_ref.nil?
885
            if one_item.nil?
886
                raise "'one_item' must be previously set to be able to " <<
887
                      "access the target Datastore."
888
            end
889

    
890
            target_ds_id = one_item["HISTORY_RECORDS/HISTORY[last()]/DS_ID"]
891
            raise "No valid target_ds_id found." if target_ds_id.nil?
892

    
893
            target_ds = VCenterDriver::VIHelper.one_item(OpenNebula::Datastore,
894
                                                         target_ds_id)
895

    
896
            @target_ds_ref = target_ds['TEMPLATE/VCENTER_DS_REF']
897
        end
898

    
899
        @target_ds_ref
900
    end
901

    
902
    # Cached cluster
903
    # @return ClusterComputeResource
904
    def cluster
905
        if @cluster.nil?
906
            ccr_ref = host['TEMPLATE/VCENTER_CCR_REF']
907
            @cluster = ClusterComputeResource.new_from_ref(ccr_ref, vi_client)
908
        end
909

    
910
        @cluster
911
    end
912

    
913
    ############################################################################
914
    ############################################################################
915

    
916
    # @return Boolean whether the VM exists in vCenter
917
    def is_new?
918
        !get_vm_id
919
    end
920

    
921
    # @return String the vm_id stored in vCenter
922
    def get_vm_id(vm_pool = nil)
923
        if defined?(@vm_id) && @vm_id
924
            return @vm_id
925
        end
926

    
927
        vm_ref = self['_ref']
928
        return nil if !vm_ref
929

    
930
        vc_uuid = get_vcenter_instance_uuid
931

    
932
        one_vm = VCenterDriver::VIHelper.find_by_ref(OpenNebula::VirtualMachinePool,
933
                                                     "DEPLOY_ID",
934
                                                     vm_ref,
935
                                                     vc_uuid,
936
                                                     vm_pool)
937
        return nil if !one_vm
938

    
939
        @vm_id = one_vm["ID"]
940
        return @vm_id
941
    end
942

    
943
    def get_vcenter_instance_uuid
944
        @vi_client.vim.serviceContent.about.instanceUuid
945
    end
946

    
947
    def get_unmanaged_keys
948
        unmanaged_keys = {}
949
        @item.config.extraConfig.each do |val|
950
             if val[:key].include?("opennebula.disk")
951
                 unmanaged_keys[val[:key]] = val[:value]
952
             end
953
        end
954
        return unmanaged_keys
955
    end
956

    
957
    ############################################################################
958
    # Getters
959
    ############################################################################
960

    
961
    # @return RbVmomi::VIM::ResourcePool
962
    def get_rp
963

    
964
        req_rp = one_item['VCENTER_RESOURCE_POOL'] ||
965
                 one_item['USER_TEMPLATE/VCENTER_RESOURCE_POOL']
966

    
967
        #Get ref for req_rp
968
        rp_list    = cluster.get_resource_pool_list
969
        req_rp_ref = rp_list.select { |rp| rp[:name] == req_rp }.first[:ref] rescue nil
970

    
971
        if vi_client.rp_confined?
972
            if req_rp_ref && req_rp_ref != vi_client.rp._ref
973
                raise "Available resource pool [#{vi_client.rp.name}] in host"\
974
                      " does not match requested resource pool"\
975
                      " [#{req_rp}]"
976
            end
977

    
978
            return vi_client.rp
979
        else
980
            if req_rp_ref
981
                rps = cluster.resource_pools.select{|r| r._ref == req_rp_ref }
982

    
983
                if rps.empty?
984
                    raise "No matching resource pool found (#{req_rp})."
985
                else
986
                    return rps.first
987
                end
988
            else
989
                return cluster['resourcePool']
990
            end
991
        end
992
    end
993

    
994
    # @return RbVmomi::VIM::Datastore or nil
995
    def get_ds
996
        ##req_ds = one_item['USER_TEMPLATE/VCENTER_DS_REF']
997
        current_ds_id  = one_item["HISTORY_RECORDS/HISTORY[last()]/DS_ID"]
998
        current_ds     = VCenterDriver::VIHelper.one_item(OpenNebula::Datastore, current_ds_id)
999
        current_ds_ref = current_ds['TEMPLATE/VCENTER_DS_REF']
1000

    
1001
        if current_ds_ref
1002
            dc = cluster.get_dc
1003

    
1004
            ds_folder = dc.datastore_folder
1005
            ds = ds_folder.get(current_ds_ref)
1006
            ds_item = ds.item rescue nil
1007

    
1008
            return ds_item
1009
        else
1010
            return nil
1011
        end
1012
    end
1013

    
1014
    # StorageResouceManager reference
1015
    def get_sm
1016
        self['_connection.serviceContent.storageResourceManager']
1017
    end
1018

    
1019
    # @return Customization or nil
1020
    def get_customization
1021
        xpath = "USER_TEMPLATE/VCENTER_CUSTOMIZATION_SPEC"
1022
        customization_spec = one_item[xpath]
1023

    
1024
        if customization_spec.nil?
1025
            return nil
1026
        end
1027

    
1028
        begin
1029
            custom_spec = vi_client.vim
1030
                            .serviceContent
1031
                            .customizationSpecManager
1032
                            .GetCustomizationSpec(:name => customization.text)
1033

    
1034
            if custom_spec && (spec = custom_spec.spec)
1035
                return spec
1036
            else
1037
                raise "Error getting customization spec"
1038
            end
1039
        rescue
1040
            raise "Customization spec '#{customization.text}' not found"
1041
        end
1042
    end
1043

    
1044
    # @return VCenterDriver::Datastore datastore where the disk will live under
1045
    def get_effective_ds(disk)
1046
        if disk["PERSISTENT"] == "YES"
1047
            ds_ref = disk["VCENTER_DS_REF"]
1048
        else
1049
            ds_ref = target_ds_ref
1050

    
1051
            if ds_ref.nil?
1052
                raise "target_ds_ref must be defined on this object."
1053
            end
1054
        end
1055

    
1056
        VCenterDriver::Storage.new_from_ref(ds_ref, vi_client)
1057
    end
1058

    
1059
    # @return String vcenter name
1060
    def get_vcenter_name
1061
        vm_prefix = host['TEMPLATE/VM_PREFIX']
1062
        vm_prefix = VM_PREFIX_DEFAULT if vm_prefix.nil? || vm_prefix.empty?
1063
        vm_prefix.gsub!("$i", one_item['ID'])
1064

    
1065
        vm_prefix + one_item['NAME']
1066
    end
1067

    
1068
    ############################################################################
1069
    # Create and reconfigure VM related methods
1070
    ############################################################################
1071

    
1072
    # This function creates a new VM from the @one_item XML and returns the
1073
    # VMware ref
1074
    # @param one_item OpenNebula::VirtualMachine
1075
    # @param vi_client VCenterDriver::VIClient
1076
    # @return String vmware ref
1077
    def clone_vm(one_item, vi_client)
1078
        @one_item = one_item
1079
        @vi_client = vi_client
1080

    
1081
        vcenter_name = get_vcenter_name
1082

    
1083
        vc_template_ref = one_item['USER_TEMPLATE/VCENTER_TEMPLATE_REF']
1084
        vc_template = RbVmomi::VIM::VirtualMachine(vi_client.vim, vc_template_ref)
1085

    
1086
        ds = get_ds
1087

    
1088
        # Default disk move type (Full Clone)
1089
        disk_move_type = :moveAllDiskBackingsAndDisallowSharing
1090

    
1091
        if ds.instance_of? RbVmomi::VIM::Datastore
1092
            use_linked_clones = one_item['USER_TEMPLATE/VCENTER_LINKED_CLONES']
1093
            if use_linked_clones && use_linked_clones.downcase == "yes"
1094
                # Check if all disks in template has delta disks
1095
                disks = vc_template.config
1096
                                .hardware.device.grep(RbVmomi::VIM::VirtualDisk)
1097

    
1098
                disks_no_delta = disks.select { |d| d.backing.parent == nil }
1099

    
1100
                # Can use linked clones if all disks have delta disks
1101
                if (disks_no_delta.size == 0)
1102
                    disk_move_type = :moveChildMostDiskBacking
1103
                end
1104
            end
1105
        end
1106

    
1107
        spec_hash = spec_hash_clone(disk_move_type)
1108

    
1109
        clone_spec = RbVmomi::VIM.VirtualMachineCloneSpec(spec_hash)
1110

    
1111
        # Specify vm folder in vSpere's VM and Templates view F#4823
1112
        vcenter_vm_folder = nil
1113
        vcenter_vm_folder = one_item["USER_TEMPLATE/VCENTER_VM_FOLDER"]
1114
        vcenter_vm_folder_object = nil
1115
        dc = cluster.get_dc
1116
        if !!vcenter_vm_folder && !vcenter_vm_folder.empty?
1117
            vcenter_vm_folder_object = dc.item.find_folder(vcenter_vm_folder)
1118
        end
1119
        vcenter_vm_folder_object = vc_template.parent if vcenter_vm_folder_object.nil?
1120

    
1121
        if ds.instance_of? RbVmomi::VIM::StoragePod
1122
            # VM is cloned using Storage Resource Manager for StoragePods
1123
            begin
1124
                vm = storagepod_clonevm_task(vc_template, vcenter_name,
1125
                                             clone_spec, ds, vcenter_vm_folder_object, dc)
1126
            rescue Exception => e
1127
                raise "Cannot clone VM Template to StoragePod: #{e.message}"
1128
            end
1129
        else
1130
            vm = nil
1131
            begin
1132
                vm = vc_template.CloneVM_Task(
1133
                    :folder => vcenter_vm_folder_object,
1134
                    :name   => vcenter_name,
1135
                    :spec   => clone_spec).wait_for_completion
1136
            rescue Exception => e
1137
                if !e.message.start_with?('DuplicateName')
1138
                    raise "Cannot clone VM Template: #{e.message}\n#{e.backtrace}"
1139
                end
1140

    
1141
                vm_folder = dc.vm_folder
1142
                vm_folder.fetch!
1143
                vm = vm_folder.items
1144
                        .select{|k,v| v.item.name == vcenter_name}
1145
                        .values.first.item rescue nil
1146

    
1147
                if vm
1148
                    # Detach all persistent disks to avoid accidental destruction
1149
                    detach_persistent_disks(vm)
1150

    
1151
                    vm.Destroy_Task.wait_for_completion
1152
                    vm = vc_template.CloneVM_Task(
1153
                        :folder => vcenter_vm_folder_object,
1154
                        :name   => vcenter_name,
1155
                        :spec   => clone_spec).wait_for_completion
1156
                else
1157
                    raise "Cannot clone VM Template"
1158
                end
1159
            end
1160
        end
1161
        # @item is populated
1162
        @item = vm
1163

    
1164
        return self['_ref']
1165
    end
1166

    
1167

    
1168
    def storagepod_clonevm_task(vc_template, vcenter_name, clone_spec, storpod, vcenter_vm_folder_object, dc)
1169

    
1170
        storage_manager = vc_template
1171
                            ._connection.serviceContent.storageResourceManager
1172

    
1173
        storage_spec = RbVmomi::VIM.StoragePlacementSpec(
1174
            type: 'clone',
1175
            cloneName: vcenter_name,
1176
            folder: vcenter_vm_folder_object,
1177
            podSelectionSpec: RbVmomi::VIM.StorageDrsPodSelectionSpec(storagePod: storpod),
1178
            vm: vc_template,
1179
            cloneSpec: clone_spec
1180
        )
1181

    
1182
        # Query a storage placement recommendation
1183
        result = storage_manager
1184
                    .RecommendDatastores(storageSpec: storage_spec) rescue nil
1185

    
1186
        raise "Could not get placement specification for StoragePod" if result.nil?
1187

    
1188
        if !result.respond_to?(:recommendations) || result.recommendations.size == 0
1189
            raise "Could not get placement specification for StoragePod"
1190
        end
1191

    
1192
        # Get recommendation key to be applied
1193
        key = result.recommendations.first.key ||= ''
1194
        raise "Missing Datastore recommendation for StoragePod" if key.empty?
1195

    
1196
        begin
1197
            apply_sr = storage_manager
1198
                            .ApplyStorageDrsRecommendation_Task(key: [key])
1199
                            .wait_for_completion
1200
            return apply_sr.vm
1201
        rescue Exception => e
1202
            if !e.message.start_with?('DuplicateName')
1203
                raise "Cannot clone VM Template: #{e.message}\n#{e.backtrace}"
1204
            end
1205

    
1206
            # The VM already exists, try to find the vm
1207
            vm_folder = dc.vm_folder
1208
            vm_folder.fetch!
1209
            vm = vm_folder.items
1210
                    .select{|k,v| v.item.name == vcenter_name}
1211
                    .values.first.item rescue nil
1212

    
1213
            if vm
1214

    
1215
                begin
1216
                    # Detach all persistent disks to avoid accidental destruction
1217
                    detach_persistent_disks(vm)
1218

    
1219
                    # Destroy the VM with any disks still attached to it
1220
                    vm.Destroy_Task.wait_for_completion
1221

    
1222
                    # Query a storage placement recommendation
1223
                    result = storage_manager.RecommendDatastores(storageSpec: storage_spec) rescue nil
1224

    
1225
                    raise "Could not get placement specification for StoragePod" if result.nil?
1226

    
1227
                    if !result.respond_to?(:recommendations) || result.recommendations.size == 0
1228
                        raise "Could not get placement specification for StoragePod"
1229
                    end
1230

    
1231
                    # Get recommendation key to be applied
1232
                    key = result.recommendations.first.key ||= ''
1233
                    raise "Missing Datastore recommendation for StoragePod" if key.empty?
1234

    
1235
                    apply_sr = storage_manager
1236
                            .ApplyStorageDrsRecommendation_Task(key: [key])
1237
                            .wait_for_completion
1238
                    return apply_sr.vm
1239
                rescue Exception => e
1240
                   raise "Failure applying recommendation while cloning VM: #{e.message}"
1241
                end
1242
            end
1243
        end
1244
    end
1245

    
1246
    # @return clone parameters spec hash
1247
    def spec_hash_clone(disk_move_type)
1248
        # Relocate spec
1249
        relocate_spec_params = {}
1250

    
1251
        relocate_spec_params[:pool] = get_rp
1252
        relocate_spec_params[:diskMoveType] = disk_move_type
1253

    
1254
        ds = get_ds
1255

    
1256
        relocate_spec_params[:datastore] = ds if ds.instance_of? RbVmomi::VIM::Datastore
1257

    
1258
        relocate_spec = RbVmomi::VIM.VirtualMachineRelocateSpec(
1259
                                                         relocate_spec_params)
1260

    
1261
        # Running flag - prevents spurious poweroff states in the VM
1262
        running_flag = [{ :key => "opennebula.vm.running", :value => "no"}]
1263

    
1264
        running_flag_spec = RbVmomi::VIM.VirtualMachineConfigSpec(
1265
            { :extraConfig => running_flag }
1266
        )
1267

    
1268
        clone_parameters = {
1269
            :location => relocate_spec,
1270
            :powerOn  => false,
1271
            :template => false,
1272
            :config   => running_flag_spec
1273
        }
1274

    
1275
        cs = get_customization
1276
        clone_parameters[:customization] = cs if cs
1277

    
1278
        clone_parameters
1279
    end
1280

    
1281
    def reference_unmanaged_devices(template_ref)
1282

    
1283
        extraconfig   = []
1284
        device_change = []
1285

    
1286
        # Get unmanaged disks in OpenNebula's VM template
1287
        xpath = "TEMPLATE/DISK[OPENNEBULA_MANAGED=\"NO\" or OPENNEBULA_MANAGED=\"no\"]"
1288
        unmanaged_disks = one_item.retrieve_xmlelements(xpath)
1289

    
1290
        if !unmanaged_disks.empty?
1291

    
1292
            # Get vcenter VM disks to know real path of cloned disk
1293
            vcenter_disks = get_vcenter_disks
1294

    
1295
            # Create an array with the paths of the disks in vcenter template
1296
            template = VCenterDriver::Template.new_from_ref(template_ref, vi_client)
1297
            template_disks = template.get_vcenter_disks
1298
            template_disks_vector = []
1299
            template_disks.each do |d|
1300
                template_disks_vector << d[:path_wo_ds]
1301
            end
1302

    
1303
            # Try to find index of disks in template disks
1304
            unmanaged_disks.each do |unmanaged_disk|
1305
                index = template_disks_vector.index(unmanaged_disk["SOURCE"])
1306
                if index
1307
                    reference = {}
1308
                    reference[:key]   = "opennebula.disk.#{unmanaged_disk["DISK_ID"]}"
1309
                    reference[:value] = "#{vcenter_disks[index][:key]}"
1310
                    extraconfig << reference
1311
                end
1312
            end
1313
        end
1314

    
1315
        # Add info for existing nics in template in vm xml
1316
        xpath = "TEMPLATE/NIC[OPENNEBULA_MANAGED=\"NO\" or OPENNEBULA_MANAGED=\"no\"]"
1317
        unmanaged_nics = one_item.retrieve_xmlelements(xpath)
1318

    
1319
        if !unmanaged_nics.empty?
1320
            index = 0
1321
            self["config.hardware.device"].each_with_index do |device|
1322
                if is_nic?(device)
1323
                    # Edit capacity setting new size in KB
1324
                    device.macAddress = unmanaged_nics[index]["MAC"]
1325
                    device_change << { :device => device, :operation => :edit }
1326
                    index += 1
1327
                end
1328
            end
1329
        end
1330

    
1331
        # Save in extraconfig the key for unmanaged disks
1332
        if !extraconfig.empty? || !device_change.empty?
1333
            spec = {}
1334
            spec[:extraConfig]  = extraconfig if !extraconfig.empty?
1335
            spec[:deviceChange] = device_change if !device_change.empty?
1336
            @item.ReconfigVM_Task(:spec => spec).wait_for_completion
1337
        end
1338
    end
1339

    
1340
    def resize_unmanaged_disks
1341
        resize_hash = {}
1342
        disks = []
1343

    
1344
        unmanaged_keys = get_unmanaged_keys
1345
        vc_disks = get_vcenter_disks
1346

    
1347
        # Look for unmanaged disks with original size changed
1348
        xpath = "TEMPLATE/DISK[(OPENNEBULA_MANAGED=\"NO\" or OPENNEBULA_MANAGED=\"no\") and boolean(ORIGINAL_SIZE) and ORIGINAL_SIZE != SIZE]"
1349
        unmanaged_resized_disks = one_item.retrieve_xmlelements(xpath)
1350

    
1351
        return if unmanaged_resized_disks.empty?
1352

    
1353
        # Cannot resize linked cloned disks
1354
        if one_item["USER_TEMPLATE/VCENTER_LINKED_CLONES"] &&
1355
           one_item["USER_TEMPLATE/VCENTER_LINKED_CLONES"] == "YES"
1356
            raise "Linked cloned disks cannot be resized."
1357
        end
1358

    
1359
        unmanaged_resized_disks.each do |disk|
1360
            vc_disks.each do |vcenter_disk|
1361
                if unmanaged_keys.key?("opennebula.disk.#{disk["DISK_ID"]}")
1362
                    device_key = unmanaged_keys["opennebula.disk.#{disk["DISK_ID"]}"].to_i
1363

    
1364
                    if device_key == vcenter_disk[:key].to_i
1365

    
1366
                        break if disk["SIZE"].to_i <= disk["ORIGINAL_SIZE"].to_i
1367

    
1368
                        # Edit capacity setting new size in KB
1369
                        d = vcenter_disk[:device]
1370
                        d.capacityInKB = disk["SIZE"].to_i * 1024
1371
                        disks <<   { :device => d, :operation => :edit }
1372
                        break
1373
                    end
1374
                end
1375
            end
1376
        end
1377

    
1378
        if !disks.empty?
1379
            resize_hash[:deviceChange] = disks
1380
            @item.ReconfigVM_Task(:spec => resize_hash).wait_for_completion
1381
        end
1382
    end
1383

    
1384
    def create_storagedrs_disks(device_change_spod, device_change_spod_ids)
1385

    
1386
        sm = get_sm
1387
        disk_locator = []
1388
        extra_config = []
1389

    
1390
        device_change_spod.each do |device_spec|
1391
            disk_locator << RbVmomi::VIM.PodDiskLocator(diskId: device_spec[:device].key)
1392
        end
1393

    
1394
        spec = {}
1395
        spec[:deviceChange] = device_change_spod
1396

    
1397
        # Disk locator is required for AddDisk
1398
        vmpod_hash = {}
1399
        vmpod_hash[:storagePod] = get_ds
1400
        vmpod_hash[:disk] = disk_locator
1401
        vmpod_config = RbVmomi::VIM::VmPodConfigForPlacement(vmpod_hash)
1402

    
1403
        # The storage pod selection requires initialize
1404
        spod_hash = {}
1405
        spod_hash[:initialVmConfig] = [ vmpod_config ]
1406
        spod_select = RbVmomi::VIM::StorageDrsPodSelectionSpec(spod_hash)
1407
        storage_spec = RbVmomi::VIM.StoragePlacementSpec(
1408
            type: :reconfigure,
1409
            podSelectionSpec: spod_select,
1410
            vm: self['_ref'],
1411
            configSpec: spec
1412
        )
1413

    
1414
        # Query a storage placement recommendation
1415
        result = sm.RecommendDatastores(storageSpec: storage_spec) rescue nil
1416

    
1417
        raise "Could not get placement specification for StoragePod" if result.nil?
1418

    
1419
        if !result.respond_to?(:recommendations) || result.recommendations.size == 0
1420
            raise "Could not get placement specification for StoragePod"
1421
        end
1422

    
1423
        # Get recommendation key to be applied
1424
        key = result.recommendations.first.key ||= ''
1425
        raise "Missing Datastore recommendation for StoragePod" if key.empty?
1426

    
1427
        # Apply recommendation
1428
        sm.ApplyStorageDrsRecommendation_Task(key: [key]).wait_for_completion
1429

    
1430
        # Set references in opennebula.disk elements
1431
        device_change_spod.each do |device_spec|
1432
            unit_number    = device_spec[:device].unitNumber
1433
            controller_key = device_spec[:device].controllerKey
1434
            key            = get_vcenter_disk_key(unit_number, controller_key)
1435
            disk_id        = device_change_spod_ids["#{controller_key}-#{unit_number}"]
1436
            reference      = {}
1437
            reference[:key]   = "opennebula.disk.#{disk_id}"
1438
            reference[:value] = key.to_s
1439
            extra_config << reference
1440
        end
1441

    
1442
        extra_config
1443
    end
1444

    
1445

    
1446
    def reconfigure
1447
        extraconfig   = []
1448
        device_change = []
1449

    
1450
        # Unmanaged keys
1451
        unmanaged_keys = get_unmanaged_keys
1452

    
1453
        # Get disk devices in vm
1454
        vc_disks = get_vcenter_disks
1455

    
1456
        # Get an array with disk paths in OpenNebula's vm template
1457
        disks_in_onevm_vector = disks_in_onevm(unmanaged_keys, vc_disks)
1458

    
1459
        # As the original template may have been modified in OpenNebula
1460
        # but not in vcenter, we must detach disks that are in vcenter
1461
        # but not in OpenNebula's vm template
1462
        if is_new?
1463
            device_change, extra_config = device_detach_disks(disks_in_onevm_vector, unmanaged_keys, vc_disks)
1464
            if !device_change.empty?
1465
                spec_hash = {}
1466
                spec_hash[:deviceChange] = device_change if !device_change.empty?
1467
                spec_hash[:extraConfig] = extra_config  if !extra_config.empty?
1468

    
1469
                # Reconfigure for disks detached from original template
1470
                spec = RbVmomi::VIM.VirtualMachineConfigSpec(spec_hash)
1471
                @item.ReconfigVM_Task(:spec => spec).wait_for_completion
1472

    
1473
                # Get disk devices in vm again after reconfigure
1474
                vc_disks = get_vcenter_disks
1475
            end
1476
        end
1477

    
1478
        # Now reconfigure disks, nics and extraconfig for the VM
1479
        device_change = []
1480

    
1481
        # get token and context
1482
        extraconfig += extraconfig_context
1483

    
1484
        # vnc configuration (for config_array hash)
1485
        extraconfig += extraconfig_vnc
1486

    
1487
        # Set CPU, memory and extraconfig
1488
        num_cpus = one_item["TEMPLATE/VCPU"] || 1
1489

    
1490
        spec_hash = {
1491
            :numCPUs      => num_cpus.to_i,
1492
            :memoryMB     => one_item["TEMPLATE/MEMORY"],
1493
            :extraConfig  => extraconfig
1494
        }
1495

    
1496
        # device_change hash (nics)
1497
        device_change += device_change_nics
1498

    
1499
        # Now attach disks that are in OpenNebula's template but not in vcenter
1500
        # e.g those that has been attached in poweroff
1501
        device_change_ds, device_change_spod, device_change_spod_ids = device_attach_disks(disks_in_onevm_vector, vc_disks)
1502
        device_change += device_change_ds
1503

    
1504
        # Create volatile disks in StorageDRS if any
1505
        if !device_change_spod.empty?
1506
            spec_hash[:extraConfig] = create_storagedrs_disks(device_change_spod, device_change_spod_ids)
1507
        end
1508

    
1509
        # Common reconfigure task
1510
        spec_hash[:deviceChange] = device_change
1511
        spec = RbVmomi::VIM.VirtualMachineConfigSpec(spec_hash)
1512
        @item.ReconfigVM_Task(:spec => spec).wait_for_completion
1513
    end
1514

    
1515
    def extraconfig_context
1516
        context_text = "# Context variables generated by OpenNebula\n"
1517
        one_item.each('TEMPLATE/CONTEXT/*') do |context_element|
1518
            # next if !context_element.text
1519
            context_text += context_element.name + "='" +
1520
                            context_element.text.gsub("'", "\\'") + "'\n"
1521
        end
1522

    
1523
        # token
1524
        token = File.read(File.join(VAR_LOCATION,
1525
                        'vms',
1526
                        one_item['ID'],
1527
                        'token.txt')).chomp rescue nil
1528

    
1529
        context_text += "ONEGATE_TOKEN='#{token}'\n" if token
1530

    
1531
        # context_text
1532
        [
1533
            { :key => "guestinfo.opennebula.context",
1534
              :value => Base64.encode64(context_text) }
1535
        ]
1536
    end
1537

    
1538
    def extraconfig_vnc
1539
        if one_item["TEMPLATE/GRAPHICS"]
1540
            vnc_port   = one_item["TEMPLATE/GRAPHICS/PORT"]
1541
            vnc_listen = one_item["TEMPLATE/GRAPHICS/LISTEN"] || "0.0.0.0"
1542
            vnc_keymap = one_item["TEMPLATE/GRAPHICS/KEYMAP"]
1543

    
1544
            conf = [ {:key => "remotedisplay.vnc.enabled",:value => "TRUE"},
1545
                    {:key => "remotedisplay.vnc.port",   :value => vnc_port},
1546
                    {:key => "remotedisplay.vnc.ip",     :value => vnc_listen}]
1547

    
1548
            conf += [{:key => "remotedisplay.vnc.keymap",
1549
                            :value => vnc_keymap}] if vnc_keymap
1550

    
1551
            conf
1552
        else
1553
            conf = []
1554
        end
1555
    end
1556

    
1557
    def device_change_nics
1558
        # Final list of changes to be applied in vCenter
1559
        device_change = []
1560

    
1561
        # Hash of interfaces from the OpenNebula xml
1562
        nics_in_template = {}
1563
        xpath = "TEMPLATE/NIC"
1564
        one_item.each(xpath) { |nic|
1565
            nics_in_template[nic["MAC"]] = nic
1566
        }
1567

    
1568
        # Check nics in VM
1569
        self["config.hardware.device"].each do |dv|
1570
            if is_nic?(dv)
1571
                if nics_in_template.key?(dv.macAddress)
1572
                    # Remove nic that is already in the XML to avoid duplicate
1573
                    nics_in_template.delete(dv.macAddress)
1574
                else
1575
                    # B4897 - It was detached in poweroff, remove it from VM
1576
                    device_change << {
1577
                        :operation => :remove,
1578
                        :device    => dv
1579
                    }
1580
                end
1581
            end
1582
        end
1583

    
1584
        # Attach new nics (nics_in_template now contains only the interfaces
1585
        # not present in the VM in vCenter)
1586
        nics_in_template.each do |key, nic|
1587
            device_change << calculate_add_nic_spec(nic)
1588
        end
1589

    
1590
        return device_change
1591
    end
1592

    
1593
    # Regenerate context when devices are hot plugged (reconfigure)
1594
    def regenerate_context
1595
        spec_hash = { :extraConfig  => extraconfig_context }
1596
        spec = RbVmomi::VIM.VirtualMachineConfigSpec(spec_hash)
1597

    
1598
        begin
1599
            @item.ReconfigVM_Task(:spec => spec).wait_for_completion
1600
        rescue Exception => e
1601
            raise "Cannot create snapshot for VM: #{e.message}\n#{e.backtrace}"
1602
        end
1603
    end
1604

    
1605
    # Returns an array of actions to be included in :deviceChange
1606
    def calculate_add_nic_spec(nic)
1607

    
1608
        mac       = nic["MAC"]
1609
        pg_name   = nic["BRIDGE"]
1610
        model     = nic["VCENTER_NET_MODEL"] || VCenterDriver::VIHelper.get_default("VM/TEMPLATE/NIC/MODEL")
1611
        vnet_ref  = nic["VCENTER_NET_REF"]
1612
        backing   = nil
1613

    
1614
        limit_in  = nic["INBOUND_PEAK_BW"] || VCenterDriver::VIHelper.get_default("VM/TEMPLATE/NIC/INBOUND_PEAK_BW")
1615
        limit_out = nic["OUTBOUND_PEAK_BW"] || VCenterDriver::VIHelper.get_default("VM/TEMPLATE/NIC/OUTBOUND_PEAK_BW")
1616
        limit     = nil
1617

    
1618
        if limit_in && limit_out
1619
            limit=([limit_in.to_i, limit_out.to_i].min / 1024) * 8
1620
        end
1621

    
1622
        rsrv_in  = nic["INBOUND_AVG_BW"] || VCenterDriver::VIHelper.get_default("VM/TEMPLATE/NIC/INBOUND_AVG_BW")
1623
        rsrv_out = nic["OUTBOUND_AVG_BW"] || VCenterDriver::VIHelper.get_default("VM/TEMPLATE/NIC/OUTBOUND_AVG_BW")
1624
        rsrv     = nil
1625

    
1626
        if rsrv_in || rsrv_out
1627
            rsrv=([rsrv_in.to_i, rsrv_out.to_i].min / 1024) * 8
1628
        end
1629

    
1630
        network = self["runtime.host"].network.select do |n|
1631
            n._ref == vnet_ref || n.name == pg_name
1632
        end
1633

    
1634
        network = network.first
1635

    
1636
        card_num = 1 # start in one, we want the next avaliable id
1637

    
1638
        @item["config.hardware.device"].each do |dv|
1639
            card_num += 1 if is_nic?(dv)
1640
        end
1641

    
1642
        nic_card = case model
1643
                        when "virtuale1000", "e1000"
1644
                            RbVmomi::VIM::VirtualE1000
1645
                        when "virtuale1000e", "e1000e"
1646
                            RbVmomi::VIM::VirtualE1000e
1647
                        when "virtualpcnet32", "pcnet32"
1648
                            RbVmomi::VIM::VirtualPCNet32
1649
                        when "virtualsriovethernetcard", "sriovethernetcard"
1650
                            RbVmomi::VIM::VirtualSriovEthernetCard
1651
                        when "virtualvmxnetm", "vmxnetm"
1652
                            RbVmomi::VIM::VirtualVmxnetm
1653
                        when "virtualvmxnet2", "vmnet2"
1654
                            RbVmomi::VIM::VirtualVmxnet2
1655
                        when "virtualvmxnet3", "vmxnet3"
1656
                            RbVmomi::VIM::VirtualVmxnet3
1657
                        else # If none matches, use VirtualE1000
1658
                            RbVmomi::VIM::VirtualE1000
1659
                   end
1660

    
1661
        if network.class == RbVmomi::VIM::Network
1662
            backing = RbVmomi::VIM.VirtualEthernetCardNetworkBackingInfo(
1663
                        :deviceName => pg_name,
1664
                        :network    => network)
1665
        else
1666
            port    = RbVmomi::VIM::DistributedVirtualSwitchPortConnection(
1667
                        :switchUuid =>
1668
                                network.config.distributedVirtualSwitch.uuid,
1669
                        :portgroupKey => network.key)
1670
            backing =
1671
              RbVmomi::VIM.VirtualEthernetCardDistributedVirtualPortBackingInfo(
1672
                 :port => port)
1673
        end
1674

    
1675
        card_spec = {
1676
            :key => 0,
1677
            :deviceInfo => {
1678
                :label => "net" + card_num.to_s,
1679
                :summary => pg_name
1680
            },
1681
            :backing     => backing,
1682
            :addressType => mac ? 'manual' : 'generated',
1683
            :macAddress  => mac
1684
        }
1685

    
1686
        if (limit || rsrv) && (limit > 0)
1687
            ra_spec = {}
1688
            rsrv = limit if rsrv > limit
1689
            ra_spec[:limit] = limit if limit
1690
            ra_spec[:reservation] = rsrv if rsrv
1691
            ra_spec[:share] =  RbVmomi::VIM.SharesInfo({
1692
                    :level => RbVmomi::VIM.SharesLevel("normal"),
1693
                    :shares => 0
1694
                })
1695
            card_spec[:resourceAllocation] =
1696
               RbVmomi::VIM.VirtualEthernetCardResourceAllocation(ra_spec)
1697
        end
1698

    
1699
        {
1700
            :operation => :add,
1701
            :device    => nic_card.new(card_spec)
1702
        }
1703
    end
1704

    
1705
     # Returns an array of actions to be included in :deviceChange
1706
    def calculate_add_nic_spec_autogenerate_mac(nic)
1707

    
1708
        pg_name   = nic["BRIDGE"]
1709
        model     = nic["VCENTER_NET_MODEL"] || VCenterDriver::VIHelper.get_default("VM/TEMPLATE/NIC/MODEL")
1710
        vnet_ref  = nic["VCENTER_NET_REF"]
1711
        backing   = nil
1712

    
1713
        limit_in  = nic["INBOUND_PEAK_BW"] || VCenterDriver::VIHelper.get_default("VM/TEMPLATE/NIC/INBOUND_PEAK_BW")
1714
        limit_out = nic["OUTBOUND_PEAK_BW"] || VCenterDriver::VIHelper.get_default("VM/TEMPLATE/NIC/OUTBOUND_PEAK_BW")
1715
        limit     = nil
1716

    
1717
        if limit_in && limit_out
1718
            limit=([limit_in.to_i, limit_out.to_i].min / 1024) * 8
1719
        end
1720

    
1721
        rsrv_in  = nic["INBOUND_AVG_BW"] || VCenterDriver::VIHelper.get_default("VM/TEMPLATE/NIC/INBOUND_AVG_BW")
1722
        rsrv_out = nic["OUTBOUND_AVG_BW"] || VCenterDriver::VIHelper.get_default("VM/TEMPLATE/NIC/OUTBOUND_AVG_BW")
1723
        rsrv     = nil
1724

    
1725
        if rsrv_in || rsrv_out
1726
            rsrv=([rsrv_in.to_i, rsrv_out.to_i].min / 1024) * 8
1727
        end
1728

    
1729
        network = self["runtime.host"].network.select do |n|
1730
            n._ref == vnet_ref || n.name == pg_name
1731
        end
1732

    
1733
        network = network.first
1734

    
1735
        card_num = 1 # start in one, we want the next avaliable id
1736

    
1737
        @item["config.hardware.device"].each do |dv|
1738
            card_num += 1 if is_nic?(dv)
1739
        end
1740

    
1741
        nic_card = case model
1742
                        when "virtuale1000", "e1000"
1743
                            RbVmomi::VIM::VirtualE1000
1744
                        when "virtuale1000e", "e1000e"
1745
                            RbVmomi::VIM::VirtualE1000e
1746
                        when "virtualpcnet32", "pcnet32"
1747
                            RbVmomi::VIM::VirtualPCNet32
1748
                        when "virtualsriovethernetcard", "sriovethernetcard"
1749
                            RbVmomi::VIM::VirtualSriovEthernetCard
1750
                        when "virtualvmxnetm", "vmxnetm"
1751
                            RbVmomi::VIM::VirtualVmxnetm
1752
                        when "virtualvmxnet2", "vmnet2"
1753
                            RbVmomi::VIM::VirtualVmxnet2
1754
                        when "virtualvmxnet3", "vmxnet3"
1755
                            RbVmomi::VIM::VirtualVmxnet3
1756
                        else # If none matches, use VirtualE1000
1757
                            RbVmomi::VIM::VirtualE1000
1758
                   end
1759

    
1760
        if network.class == RbVmomi::VIM::Network
1761
            backing = RbVmomi::VIM.VirtualEthernetCardNetworkBackingInfo(
1762
                        :deviceName => pg_name,
1763
                        :network    => network)
1764
        else
1765
            port    = RbVmomi::VIM::DistributedVirtualSwitchPortConnection(
1766
                        :switchUuid =>
1767
                                network.config.distributedVirtualSwitch.uuid,
1768
                        :portgroupKey => network.key)
1769
            backing =
1770
              RbVmomi::VIM.VirtualEthernetCardDistributedVirtualPortBackingInfo(
1771
                 :port => port)
1772
        end
1773

    
1774
        card_spec = {
1775
            :key => 0,
1776
            :deviceInfo => {
1777
                :label => "net" + card_num.to_s,
1778
                :summary => pg_name
1779
            },
1780
            :backing     => backing,
1781
            :addressType => 'generated'
1782
        }
1783

    
1784
        if (limit || rsrv) && (limit > 0)
1785
            ra_spec = {}
1786
            rsrv = limit if rsrv > limit
1787
            ra_spec[:limit] = limit if limit
1788
            ra_spec[:reservation] = rsrv if rsrv
1789
            ra_spec[:share] =  RbVmomi::VIM.SharesInfo({
1790
                    :level => RbVmomi::VIM.SharesLevel("normal"),
1791
                    :shares => 0
1792
                })
1793
            card_spec[:resourceAllocation] =
1794
               RbVmomi::VIM.VirtualEthernetCardResourceAllocation(ra_spec)
1795
        end
1796

    
1797
        {
1798
            :operation => :add,
1799
            :device    => nic_card.new(card_spec)
1800
        }
1801
    end
1802

    
1803
    # Add NIC to VM
1804
    def attach_nic
1805
        spec_hash = {}
1806
        nic = nil
1807

    
1808
        # Extract nic from driver action
1809
        nic = one_item.retrieve_xmlelements("TEMPLATE/NIC[ATTACH='YES']").first
1810

    
1811
        begin
1812
            # A new NIC requires a vcenter spec
1813
            attach_nic_array = []
1814
            attach_nic_array << calculate_add_nic_spec(nic)
1815
            spec_hash[:deviceChange] = attach_nic_array if !attach_nic_array.empty?
1816

    
1817
            # Reconfigure VM
1818
            spec = RbVmomi::VIM.VirtualMachineConfigSpec(spec_hash)
1819

    
1820
            @item.ReconfigVM_Task(:spec => spec).wait_for_completion
1821
        rescue Exception => e
1822
            raise "Cannot attach NIC to VM: #{e.message}\n#{e.backtrace}"
1823
        end
1824

    
1825
    end
1826

    
1827
    # Detach NIC from VM
1828
    def detach_nic
1829
        spec_hash = {}
1830
        nic = nil
1831

    
1832
        # Extract nic from driver action
1833
        nic = one_item.retrieve_xmlelements("TEMPLATE/NIC[ATTACH='YES']").first
1834
        mac = nic["MAC"]
1835

    
1836
        # Get VM nic element if it has a device with that mac
1837
        nic_device = @item["config.hardware.device"].find do |device|
1838
            is_nic?(device) && (device.macAddress ==  mac)
1839
        end rescue nil
1840

    
1841
        return if nic_device.nil? #Silently ignore if nic is not found
1842

    
1843
        # Remove NIC from VM in the ReconfigVM_Task
1844
        spec_hash[:deviceChange] = [
1845
                :operation => :remove,
1846
                :device => nic_device ]
1847

    
1848
        begin
1849
            @item.ReconfigVM_Task(:spec => spec_hash).wait_for_completion
1850
        rescue Exception => e
1851
            raise "Cannot detach NIC from VM: #{e.message}\n#{e.backtrace}"
1852
        end
1853
    end
1854

    
1855
    # Detach all nics useful when removing pg and sw so they're not in use
1856
    def detach_all_nics
1857
        spec_hash = {}
1858
        device_change = []
1859

    
1860
        @item["config.hardware.device"].each do |device|
1861
            if is_nic?(device)
1862
                device_change << {:operation => :remove, :device => device}
1863
            end
1864
        end
1865

    
1866
        # Remove NIC from VM in the ReconfigVM_Task
1867
        spec_hash[:deviceChange] = device_change
1868

    
1869
        begin
1870
            @item.ReconfigVM_Task(:spec => spec_hash).wait_for_completion
1871
        rescue Exception => e
1872
            raise "Cannot detach all NICs from VM: #{e.message}\n#{e.backtrace}"
1873
        end
1874
    end
1875

    
1876
    def get_device_filename_and_ds_from_key(key, vc_disks)
1877
        device = vc_disks.select{ |d| d[:key].to_i == key.to_i}.first rescue nil
1878
        return device
1879
    end
1880

    
1881
    def disks_in_onevm(unmanaged_keys, vc_disks)
1882
        onevm_disks_vector = []
1883

    
1884
        disks = one_item.retrieve_xmlelements("TEMPLATE/DISK")
1885
        disks.each do |disk|
1886
            if unmanaged_keys.key?("opennebula.disk.#{disk["DISK_ID"]}")
1887
                device_key = unmanaged_keys["opennebula.disk.#{disk["DISK_ID"]}"].to_i
1888
                disk_hash = get_device_filename_and_ds_from_key(device_key, vc_disks)
1889
                onevm_disks_vector << disk_hash[:path_wo_ds] if disk_hash
1890
                next
1891
            end
1892

    
1893
            img_name  = VCenterDriver::FileHelper.get_img_name(disk, one_item['ID'], self['name'], instantiated_as_persistent?)
1894
            onevm_disks_vector << "#{img_name}"
1895
        end
1896

    
1897
        return onevm_disks_vector
1898
    end
1899

    
1900
    def device_attach_disks(onevm_disks_vector, vc_disks)
1901

    
1902
        disks = one_item.retrieve_xmlelements("TEMPLATE/DISK")
1903

    
1904
        vc_disks.each do |d|
1905
            index = onevm_disks_vector.index(d[:path_wo_ds])
1906
            if index
1907
                disks.delete_at(index)
1908
                onevm_disks_vector.delete_at(index)
1909
            end
1910
        end
1911

    
1912
        return [],[],{} if disks.empty?
1913

    
1914
        attach_disk_array = []
1915
        attach_spod_array = []
1916
        attach_spod_disk_info = {}
1917

    
1918
        position = 0
1919
        disks.each do |disk|
1920
            storpod = disk["VCENTER_DS_REF"].start_with?('group-')
1921
            if storpod
1922
                spec = calculate_add_disk_spec(disk, position)
1923
                attach_spod_array << spec
1924
                unit_ctrl = "#{spec[:device].controllerKey}-#{spec[:device].unitNumber}"
1925
                attach_spod_disk_info[unit_ctrl] = disk["DISK_ID"]
1926
            else
1927
                attach_disk_array << calculate_add_disk_spec(disk, position)
1928
            end
1929

    
1930
            position += 1
1931
        end
1932

    
1933
        return attach_disk_array, attach_spod_array, attach_spod_disk_info
1934
    end
1935

    
1936
    def device_detach_disks(onevm_disks_vector, unmanaged_keys, vc_disks)
1937
        detach_disk_array = []
1938
        extra_config      = []
1939
        ipool = VCenterDriver::VIHelper.one_pool(OpenNebula::ImagePool)
1940
        if ipool.respond_to?(:message)
1941
            raise "Could not get OpenNebula ImagePool: #{ipool.message}"
1942
        end
1943

    
1944
        vc_disks.each do |d|
1945
            if !onevm_disks_vector.index(d[:path_wo_ds])
1946

    
1947
                # If disk to be detached is not persistent detach and destroy it
1948
                persistent = VCenterDriver::VIHelper.find_persistent_image_by_source(d[:path_wo_ds], ipool)
1949
                if !persistent
1950
                    detach_disk_array << {
1951
                        :fileOperation => :destroy,
1952
                        :operation => :remove,
1953
                        :device    => d[:device]
1954
                    }
1955
                end
1956

    
1957
                # Remove reference opennebula.disk if exist
1958
                unmanaged_keys.each do |key, value|
1959
                    if value.to_i == d[:key].to_i
1960
                        reference = {}
1961
                        reference[:key]   = key
1962
                        reference[:value] = ""
1963
                        extra_config << reference
1964
                        break
1965
                    end
1966
                end
1967
            end
1968
        end
1969

    
1970
        return detach_disk_array, extra_config
1971
    end
1972

    
1973
    # Attach DISK to VM (hotplug)
1974
    def attach_disk
1975
        # TODO position? and disk size for volatile?
1976

    
1977
        spec_hash = {}
1978
        disk = nil
1979
        device_change = []
1980

    
1981
        # Extract unmanaged_keys
1982
        unmanaged_keys = get_unmanaged_keys
1983
        vc_disks = get_vcenter_disks
1984

    
1985
        # Extract disk from driver action
1986
        disk = one_item.retrieve_xmlelements("TEMPLATE/DISK[ATTACH='YES']").first
1987

    
1988
        # Check if we're dealing with a StoragePod SYSTEM ds
1989
        storpod = disk["VCENTER_DS_REF"].start_with?('group-')
1990

    
1991
        # Check if disk being attached is already connected to the VM
1992
        raise "DISK is already connected to VM" if disk_attached_to_vm(disk, unmanaged_keys, vc_disks)
1993

    
1994
        # Generate vCenter spec and reconfigure VM
1995
        device_change << calculate_add_disk_spec(disk)
1996
        raise "Could not generate DISK spec" if device_change.empty?
1997

    
1998
        spec_hash[:deviceChange] = device_change
1999
        spec = RbVmomi::VIM.VirtualMachineConfigSpec(spec_hash)
2000

    
2001
        begin
2002
            if storpod
2003
                #Ask for StorageDRS recommendation to reconfigure VM (AddDisk)
2004
                sm = get_sm
2005

    
2006
                # Disk id is -1 as I don't know what disk id is going to be set
2007
                disk_locator = [ RbVmomi::VIM.PodDiskLocator(diskId: -1) ]
2008

    
2009
                # Disk locator is required for AddDisk
2010
                vmpod_hash = {}
2011
                vmpod_hash[:storagePod] = get_ds
2012
                vmpod_hash[:disk] = disk_locator
2013
                vmpod_config = RbVmomi::VIM::VmPodConfigForPlacement(vmpod_hash)
2014

    
2015
                # The storage pod selection requires initialize
2016
                spod_hash = {}
2017
                spod_hash[:initialVmConfig] = [ vmpod_config ]
2018
                spod_select = RbVmomi::VIM::StorageDrsPodSelectionSpec(spod_hash)
2019
                storage_spec = RbVmomi::VIM.StoragePlacementSpec(
2020
                    type: :reconfigure,
2021
                    podSelectionSpec: spod_select,
2022
                    vm: self['_ref'],
2023
                    configSpec: spec
2024
                )
2025

    
2026
                # Query a storage placement recommendation
2027
                result = sm.RecommendDatastores(storageSpec: storage_spec) rescue nil
2028

    
2029
                raise "Could not get placement specification for StoragePod" if result.nil?
2030

    
2031
                if !result.respond_to?(:recommendations) || result.recommendations.size == 0
2032
                    raise "Could not get placement specification for StoragePod"
2033
                end
2034

    
2035
                # Get recommendation key to be applied
2036
                key = result.recommendations.first.key ||= ''
2037
                raise "Missing Datastore recommendation for StoragePod" if key.empty?
2038

    
2039
                # Apply recommendation
2040
                sm.ApplyStorageDrsRecommendation_Task(key: [key]).wait_for_completion
2041

    
2042
                # Add the key for the volatile disk to the unmanaged opennebula.disk.id variables
2043
                unit_number    = spec_hash[:deviceChange][0][:device].unitNumber
2044
                controller_key = spec_hash[:deviceChange][0][:device].controllerKey
2045
                key = get_vcenter_disk_key(unit_number, controller_key)
2046
                spec_hash = {}
2047
                reference = {}
2048
                reference[:key]   = "opennebula.disk.#{disk["DISK_ID"]}"
2049
                reference[:value] = key.to_s
2050
                spec_hash[:extraConfig] = [ reference ]
2051
                @item.ReconfigVM_Task(:spec => spec_hash).wait_for_completion
2052
            else
2053
                @item.ReconfigVM_Task(:spec => spec).wait_for_completion
2054
            end
2055
        rescue Exception => e
2056
            raise "Cannot attach DISK to VM: #{e.message}\n#{e.backtrace}"
2057
        end
2058
    end
2059

    
2060
    # Detach persistent disks to avoid incidental destruction
2061
    def detach_persistent_disks(vm)
2062
        spec_hash = {}
2063
        spec_hash[:deviceChange] = []
2064
        ipool = VCenterDriver::VIHelper.one_pool(OpenNebula::ImagePool)
2065
        if ipool.respond_to?(:message)
2066
            raise "Could not get OpenNebula ImagePool: #{ipool.message}"
2067
        end
2068

    
2069
        vm.config.hardware.device.each do |disk|
2070
            if is_disk_or_cdrom?(disk)
2071
                # Let's try to find if disks is persistent
2072
                source = disk.backing.fileName.sub(/^\[(.*?)\] /, "")
2073
                persistent = VCenterDriver::VIHelper.find_persistent_image_by_source(source, ipool)
2074
                if persistent
2075
                    spec_hash[:deviceChange] << {
2076
                        :operation => :remove,
2077
                        :device => disk
2078
                    }
2079
                end
2080
            end
2081

    
2082
        end
2083

    
2084
        return nil if spec_hash[:deviceChange].empty?
2085

    
2086
        begin
2087
            vm.ReconfigVM_Task(:spec => spec_hash).wait_for_completion
2088
        rescue Exception => e
2089
            raise "Cannot detach all DISKs from VM: #{e.message}\n#{e.backtrace}"
2090
        end
2091
    end
2092

    
2093

    
2094
    # Detach DISK from VM
2095
    def detach_disk(disk)
2096
        spec_hash = {}
2097
        img_path = ""
2098
        ds_ref = nil
2099

    
2100
        # Extract unmanaged disk keys
2101
        unmanaged_keys = get_unmanaged_keys
2102
        vc_disks = get_vcenter_disks
2103

    
2104
        # Get vcenter device to be detached and remove if found
2105
        device = disk_attached_to_vm(disk, unmanaged_keys, vc_disks)
2106

    
2107
        if device
2108
            img_path << device[:path_wo_ds]
2109

    
2110
            if unmanaged_keys.key?("opennebula.disk.#{disk["DISK_ID"]}")
2111
                reference = {}
2112
                reference[:key]   = "opennebula.disk.#{disk["DISK_ID"]}"
2113
                reference[:value] = ""
2114
                spec_hash[:extraConfig] = [ reference ]
2115
            end
2116

    
2117
            ds_ref = device[:datastore]._ref
2118

    
2119
            # Generate vCenter spec and reconfigure VM
2120
            spec_hash[:deviceChange] = [{
2121
                :operation => :remove,
2122
                :device => device[:device]
2123
            }]
2124

    
2125
            begin
2126
                @item.ReconfigVM_Task(:spec => spec_hash).wait_for_completion
2127
            rescue Exception => e
2128
                raise "Cannot detach DISK from VM: #{e.message}\n#{e.backtrace}"
2129
            end
2130
        end
2131

    
2132
        return ds_ref, img_path
2133
    end
2134

    
2135
    # Get vcenter device representing DISK object (hotplug)
2136
    def disk_attached_to_vm(disk, unmanaged_keys, vc_disks)
2137

    
2138
        img_name = ""
2139
        device_found = nil
2140
        disk_id = disk["DISK_ID"]
2141

    
2142
        vc_disks.each do |d|
2143
            # Check if we are dealing with the unmanaged disks present in the template when cloned
2144

    
2145
            if unmanaged_keys.key?("opennebula.disk.#{disk_id}") && d[:key] == unmanaged_keys["opennebula.disk.#{disk_id}"].to_i
2146
                device_found = d
2147
                break
2148
            end
2149

    
2150
            # Alright let's see if we can find other devices only with the expected image name
2151
            img_name  = VCenterDriver::FileHelper.get_img_name(disk, one_item['ID'], self['name'], instantiated_as_persistent?)
2152
            if d[:path_wo_ds] == "#{img_name}"
2153
                device_found = d
2154
                break
2155
            end
2156
        end
2157

    
2158
        return device_found
2159
    end
2160

    
2161
    def calculate_add_disk_spec(disk, position=0)
2162
        img_name = VCenterDriver::FileHelper.get_img_name(disk, one_item['ID'], self['name'],instantiated_as_persistent?)
2163
        type     = disk["TYPE"]
2164
        size_kb  = disk["SIZE"].to_i * 1024
2165

    
2166
        if type == "CDROM"
2167
            # CDROM drive will be found in the IMAGE DS
2168
            ds_ref   = disk["VCENTER_DS_REF"]
2169
            ds       = VCenterDriver::Storage.new_from_ref(ds_ref, @vi_client)
2170
            ds_name  = ds['name']
2171

    
2172
            # CDROM can only be added when the VM is in poweroff state
2173
            vmdk_backing = RbVmomi::VIM::VirtualCdromIsoBackingInfo(
2174
                :datastore => ds.item,
2175
                :fileName  => "[#{ds_name}] #{img_name}"
2176
            )
2177

    
2178
            if @item["summary.runtime.powerState"] != "poweredOff"
2179
                raise "The CDROM image can only be added as an IDE device "\
2180
                      "when the VM is in the powered off state"
2181
            end
2182

    
2183
            controller, unit_number = find_free_ide_controller(position)
2184

    
2185
            device = RbVmomi::VIM::VirtualCdrom(
2186
                :backing       => vmdk_backing,
2187
                :key           => -1,
2188
                :controllerKey => controller.key,
2189
                :unitNumber    => unit_number,
2190

    
2191
                :connectable => RbVmomi::VIM::VirtualDeviceConnectInfo(
2192
                    :startConnected    => true,
2193
                    :connected         => true,
2194
                    :allowGuestControl => true
2195
                )
2196
            )
2197

    
2198
            return {
2199
                :operation => :add,
2200
                :device => device
2201
            }
2202

    
2203
        else
2204
            # TYPE is regular disk (not CDROM)
2205

    
2206
            controller, unit_number = find_free_controller(position)
2207

    
2208
            storpod = disk["VCENTER_DS_REF"].start_with?('group-')
2209
            if storpod
2210
                vmdk_backing = RbVmomi::VIM::VirtualDiskFlatVer2BackingInfo(
2211
                  :diskMode  => 'persistent',
2212
                  :fileName  => ""
2213
                )
2214
            else
2215
                ds           = get_effective_ds(disk)
2216
                ds_name      = ds['name']
2217
                vmdk_backing = RbVmomi::VIM::VirtualDiskFlatVer2BackingInfo(
2218
                  :datastore => ds.item,
2219
                  :diskMode  => 'persistent',
2220
                  :fileName  => "[#{ds_name}] #{img_name}"
2221
                )
2222
            end
2223

    
2224
            device = RbVmomi::VIM::VirtualDisk(
2225
              :backing       => vmdk_backing,
2226
              :capacityInKB  => size_kb,
2227
              :controllerKey => controller.key,
2228
              :key           => (-1 - position),
2229
              :unitNumber    => unit_number
2230
            )
2231

    
2232
            config = {
2233
               :operation => :add,
2234
               :device    => device
2235
            }
2236

    
2237
            # For StorageDRS vCenter must create the file
2238
            config[:fileOperation] = :create if storpod
2239

    
2240
            return config
2241
        end
2242
    end
2243

    
2244
    def resize_unmanaged_disk(disk, new_size)
2245

    
2246
        resize_hash = {}
2247
        disks       = []
2248
        found       = false
2249

    
2250
        unmanaged_keys = get_unmanaged_keys
2251
        vc_disks = get_vcenter_disks
2252

    
2253
        vc_disks.each do |vcenter_disk|
2254
            if unmanaged_keys.key?("opennebula.disk.#{disk["DISK_ID"]}")
2255
                device_key = unmanaged_keys["opennebula.disk.#{disk["DISK_ID"]}"].to_i
2256

    
2257
                if device_key == vcenter_disk[:key].to_i
2258

    
2259
                    if disk["SIZE"].to_i <= disk["ORIGINAL_SIZE"].to_i
2260
                        raise "Disk size cannot be shrinked."
2261
                    end
2262

    
2263
                    # Edit capacity setting new size in KB
2264
                    d = vcenter_disk[:device]
2265
                    d.capacityInKB = disk["SIZE"].to_i * 1024
2266
                    disks <<   { :device => d, :operation => :edit }
2267

    
2268
                    found = true
2269
                    break
2270
                end
2271
            end
2272
        end
2273

    
2274
        raise "Unmanaged disk could not be found to apply resize operation." if !found
2275

    
2276
        if !disks.empty?
2277
            resize_hash[:deviceChange] = disks
2278
            @item.ReconfigVM_Task(:spec => resize_hash).wait_for_completion
2279
        else
2280
            raise "Device was not found after attaching it to VM in poweroff."
2281
        end
2282
    end
2283

    
2284
    def resize_managed_disk(disk, new_size)
2285

    
2286
        resize_hash = {}
2287

    
2288
        unmanaged_keys = get_unmanaged_keys
2289
        vc_disks       = get_vcenter_disks
2290

    
2291
        # Get vcenter device to be detached and remove if found
2292
        device         = disk_attached_to_vm(disk, unmanaged_keys, vc_disks)
2293

    
2294
        # If the disk is being attached in poweroff, reconfigure the VM
2295
        if !device
2296
            spec_hash     = {}
2297
            device_change = []
2298

    
2299
            # Get an array with disk paths in OpenNebula's vm template
2300
            disks_in_onevm_vector = disks_in_onevm(unmanaged_keys, vc_disks)
2301

    
2302
            device_change_ds, device_change_spod, device_change_spod_ids = device_attach_disks(disks_in_onevm_vector, vc_disks)
2303
            device_change += device_change_ds
2304

    
2305
            # Create volatile disks in StorageDRS if any
2306
            if !device_change_spod.empty?
2307
                spec_hash[:extraConfig] = create_storagedrs_disks(device_change_spod, device_change_spod_ids)
2308
            end
2309

    
2310
            # Common reconfigure task
2311
            spec_hash[:deviceChange] = device_change
2312
            spec = RbVmomi::VIM.VirtualMachineConfigSpec(spec_hash)
2313
            @item.ReconfigVM_Task(:spec => spec).wait_for_completion
2314

    
2315
            # Check again if device has now been attached
2316
            unmanaged_keys = get_unmanaged_keys
2317
            vc_disks       = get_vcenter_disks
2318
            device         = disk_attached_to_vm(disk, unmanaged_keys, vc_disks)
2319

    
2320
            if !device
2321
                raise "Device was not found after attaching it to VM in poweroff."
2322
            end
2323
        end
2324

    
2325
        # Resize disk now that we know that it's part of the VM
2326
        if device
2327
            vcenter_disk = device[:device]
2328
            vcenter_disk.capacityInKB = new_size.to_i * 1024
2329
            resize_hash[:deviceChange] = [{
2330
                :operation => :edit,
2331
                :device => vcenter_disk
2332
            }]
2333

    
2334
            @item.ReconfigVM_Task(:spec => resize_hash).wait_for_completion
2335
        end
2336
    end
2337

    
2338
    def has_snapshots?
2339
        self['rootSnapshot'] && !self['rootSnapshot'].empty?
2340
    end
2341

    
2342
    def instantiated_as_persistent?
2343
        begin
2344
            !!one_item["TEMPLATE/CLONING_TEMPLATE_ID"]
2345
        rescue
2346
            return false #one_item may not be retrieved if deploy_id hasn't been set
2347
        end
2348
    end
2349

    
2350
    def find_free_ide_controller(position=0)
2351

    
2352
        free_ide_controllers = []
2353
        ide_schema           = {}
2354

    
2355
        used_numbers      = []
2356
        available_numbers = []
2357

    
2358
        @item["config.hardware.device"].each do |dev|
2359
            if dev.is_a? RbVmomi::VIM::VirtualIDEController
2360
                if ide_schema[dev.key].nil?
2361
                    ide_schema[dev.key] = {}
2362
                end
2363

    
2364
                ide_schema[dev.key][:device] = dev
2365
            end
2366

    
2367
            next if dev.class != RbVmomi::VIM::VirtualCdrom
2368
            used_numbers << dev.unitNumber
2369
        end
2370

    
2371
        2.times do |ide_id|
2372
            available_numbers << ide_id if used_numbers.grep(ide_id).length <= 0
2373
        end
2374

    
2375
        ide_schema.keys.each do |controller|
2376
            free_ide_controllers << ide_schema[controller][:device].deviceInfo.label
2377
        end
2378

    
2379
        if free_ide_controllers.empty?
2380
            raise "There are no free IDE controllers to connect this CDROM device"
2381
        end
2382

    
2383
        available_controller_label = free_ide_controllers[0]
2384

    
2385
        controller = nil
2386

    
2387
        @item['config.hardware.device'].each do |device|
2388
            if device.deviceInfo.label == available_controller_label
2389
                controller = device
2390
                break
2391
            end
2392
        end
2393

    
2394
        new_unit_number = available_numbers.sort[position]
2395

    
2396
        return controller, new_unit_number
2397
    end
2398

    
2399
    def find_free_controller(position=0)
2400
        free_scsi_controllers = []
2401
        scsi_schema           = {}
2402

    
2403
        used_numbers      = []
2404
        available_numbers = []
2405

    
2406
        @item["config.hardware.device"].each do |dev|
2407
            if dev.is_a? RbVmomi::VIM::VirtualSCSIController
2408
                if scsi_schema[dev.key].nil?
2409
                    scsi_schema[dev.key] = {}
2410
                end
2411

    
2412
                used_numbers << dev.scsiCtlrUnitNumber
2413
                scsi_schema[dev.key][:device] = dev
2414
            end
2415

    
2416
            next if dev.class != RbVmomi::VIM::VirtualDisk
2417
            used_numbers << dev.unitNumber
2418
        end
2419

    
2420
        15.times do |scsi_id|
2421
            available_numbers << scsi_id if used_numbers.grep(scsi_id).length <= 0
2422
        end
2423

    
2424
        scsi_schema.keys.each do |controller|
2425
            free_scsi_controllers << scsi_schema[controller][:device].deviceInfo.label
2426
        end
2427

    
2428
        if free_scsi_controllers.length > 0
2429
            available_controller_label = free_scsi_controllers[0]
2430
        else
2431
            add_new_scsi(scsi_schema)
2432
            return find_free_controller
2433
        end
2434

    
2435
        controller = nil
2436

    
2437
        @item['config.hardware.device'].each do |device|
2438
            if device.deviceInfo.label == available_controller_label
2439
                controller = device
2440
                break
2441
            end
2442
        end
2443

    
2444
        new_unit_number = available_numbers.sort[position]
2445

    
2446
        return controller, new_unit_number
2447
    end
2448

    
2449
    def add_new_scsi(scsi_schema)
2450
        controller = nil
2451

    
2452
        if scsi_schema.keys.length >= 4
2453
            raise "Cannot add a new controller, maximum is 4."
2454
        end
2455

    
2456
        scsi_key    = 0
2457
        scsi_number = 0
2458

    
2459
        if scsi_schema.keys.length > 0 && scsi_schema.keys.length < 4
2460
            scsi_key    = scsi_schema.keys.sort[-1] + 1
2461
            scsi_number = scsi_schema[scsi_schema.keys.sort[-1]][:device].busNumber + 1
2462
        end
2463

    
2464
        controller_device = RbVmomi::VIM::VirtualLsiLogicController(
2465
            :key       => scsi_key,
2466
            :busNumber => scsi_number,
2467
            :sharedBus => :noSharing
2468
        )
2469

    
2470
        device_config_spec = RbVmomi::VIM::VirtualDeviceConfigSpec(
2471
            :device    => controller_device,
2472
            :operation => :add
2473
        )
2474

    
2475
        vm_config_spec = RbVmomi::VIM::VirtualMachineConfigSpec(
2476
            :deviceChange => [device_config_spec]
2477
        )
2478

    
2479
        @item.ReconfigVM_Task(:spec => vm_config_spec).wait_for_completion
2480

    
2481
        @item["config.hardware.device"].each do |device|
2482
            if device.class == RbVmomi::VIM::VirtualLsiLogicController &&
2483
                device.key == scsi_key
2484

    
2485
                controller = device.deviceInfo.label
2486
            end
2487
        end
2488

    
2489
        return controller
2490
    end
2491

    
2492
    # Create a snapshot for the VM
2493
    def create_snapshot(snap_id, snap_name)
2494
        snapshot_hash = {
2495
            :name        => snap_id,
2496
            :description => "OpenNebula Snapshot: #{snap_name}",
2497
            :memory      => true,
2498
            :quiesce     => true
2499
        }
2500

    
2501
        vcenter_version = @vi_client.vim.serviceContent.about.apiVersion rescue nil
2502

    
2503
        if vcenter_version != "5.5"
2504
            begin
2505
                @item.CreateSnapshot_Task(snapshot_hash).wait_for_completion
2506
            rescue Exception => e
2507
                raise "Cannot create snapshot for VM: #{e.message}\n#{e.backtrace}"
2508
            end
2509
        else
2510
            # B#5045 - If vcenter is 5.5 the snapshot may take longer than
2511
            # 15 minutes and it does not report that it has finished using
2512
            # wait_for_completion so we use an active wait instead with a
2513
            # timeout of 1440 minutes = 24 hours
2514
            @item.CreateSnapshot_Task(snapshot_hash)
2515

    
2516
            snapshot_created  = false
2517
            elapsed_minutes   = 0
2518

    
2519
            until snapshot_created || elapsed_minutes == 1440
2520
                if !!@item['snapshot']
2521
                    current_snapshot = @item['snapshot.currentSnapshot'] rescue nil
2522
                    snapshot_found = find_snapshot_in_list(@item['snapshot.rootSnapshotList'], snap_id)
2523
                    snapshot_created = !!snapshot_found && !!current_snapshot && current_snapshot._ref == snapshot_found._ref
2524
                end
2525
                sleep(60)
2526
                elapsed_minutes += 1
2527
            end
2528
        end
2529

    
2530
        return snap_id
2531
    end
2532

    
2533
    # Revert to a VM snapshot
2534
    def revert_snapshot(snap_id)
2535

    
2536
        snapshot_list = self["snapshot.rootSnapshotList"]
2537
        snapshot = find_snapshot_in_list(snapshot_list, snap_id)
2538

    
2539
        return nil if !snapshot
2540

    
2541
        begin
2542
            revert_snapshot_hash = { :_this => snapshot }
2543
            snapshot.RevertToSnapshot_Task(revert_snapshot_hash).wait_for_completion
2544
        rescue Exception => e
2545
            raise "Cannot revert snapshot of VM: #{e.message}\n#{e.backtrace}"
2546
        end
2547
    end
2548

    
2549
    # Delete VM snapshot
2550
    def delete_snapshot(snap_id)
2551

    
2552
        snapshot_list = self["snapshot.rootSnapshotList"]
2553
        snapshot = find_snapshot_in_list(snapshot_list, snap_id)
2554

    
2555
        return nil if !snapshot
2556

    
2557
        begin
2558
            delete_snapshot_hash = {
2559
                :_this => snapshot,
2560
                :removeChildren => false
2561
            }
2562
            snapshot.RemoveSnapshot_Task(delete_snapshot_hash).wait_for_completion
2563
        rescue Exception => e
2564
            raise "Cannot delete snapshot of VM: #{e.message}\n#{e.backtrace}"
2565
        end
2566
    end
2567

    
2568
    def find_snapshot_in_list(list, snap_id)
2569
        list.each do |i|
2570
            if i.name == snap_id.to_s
2571
                return i.snapshot
2572
            elsif !i.childSnapshotList.empty?
2573
                snap = find_snapshot_in_list(i.childSnapshotList, snap_id)
2574
                return snap if snap
2575
            end
2576
        end rescue nil
2577

    
2578
        nil
2579
    end
2580

    
2581
    ############################################################################
2582
    # actions
2583
    ############################################################################
2584

    
2585
    def shutdown
2586
        begin
2587
            @item.ShutdownGuest
2588
            # Check if VM has been powered off
2589
            (0..VM_SHUTDOWN_TIMEOUT).each do
2590
                break if @item.runtime.powerState == "poweredOff"
2591
                sleep 1
2592
            end
2593
        rescue
2594
            # Ignore ShutdownGuest exceptions, maybe VM hasn't openvm tools
2595
        end
2596

    
2597
        # If VM hasn't been powered off, do it now
2598
        if @item.runtime.powerState != "poweredOff"
2599
            poweroff_hard
2600
        end
2601
    end
2602

    
2603
    def destroy
2604
        @item.Destroy_Task.wait_for_completion
2605
    end
2606

    
2607
    def mark_as_template
2608
        @item.MarkAsTemplate
2609
    end
2610

    
2611
    def reset
2612
        @item.ResetVM_Task.wait_for_completion
2613
    end
2614

    
2615
    def suspend
2616
        @item.SuspendVM_Task.wait_for_completion
2617
    end
2618

    
2619
    def reboot
2620
        @item.RebootGuest
2621
    end
2622

    
2623
    def poweron
2624
        ## If need in the future, you can power on VMs from datacenter
2625
        ## dc = get_dc
2626
        ## dc.power_on_vm(@item)
2627
        @item.PowerOnVM_Task.wait_for_completion
2628
    end
2629

    
2630
    def is_powered_on?
2631
        return @item.runtime.powerState == "poweredOn"
2632
    end
2633

    
2634
    def poweroff_hard
2635
        @item.PowerOffVM_Task.wait_for_completion
2636
    end
2637

    
2638
    def remove_all_snapshots
2639
        @item.RemoveAllSnapshots_Task.wait_for_completion
2640
    end
2641

    
2642
    def set_running(state)
2643
        value = state ? "yes" : "no"
2644

    
2645
        config_array = [
2646
            { :key => "opennebula.vm.running", :value => value }
2647
        ]
2648
        spec = RbVmomi::VIM.VirtualMachineConfigSpec(
2649
            { :extraConfig => config_array }
2650
        )
2651

    
2652
        @item.ReconfigVM_Task(:spec => spec).wait_for_completion
2653
    end
2654

    
2655
    ############################################################################
2656
    # monitoring
2657
    ############################################################################
2658

    
2659
    # monitor function used when VMM poll action is called
2660
    def monitor_poll_vm
2661
        reset_monitor
2662

    
2663
        @state = state_to_c(self["summary.runtime.powerState"])
2664

    
2665
        if @state != VM_STATE[:active]
2666
            reset_monitor
2667
            return
2668
        end
2669

    
2670
        cpuMhz = self["runtime.host.summary.hardware.cpuMhz"].to_f
2671

    
2672
        @monitor[:used_memory] = self["summary.quickStats.hostMemoryUsage"] * 1024
2673

    
2674
        used_cpu = self["summary.quickStats.overallCpuUsage"].to_f / cpuMhz
2675
        used_cpu = (used_cpu * 100).to_s
2676
        @monitor[:used_cpu]  = sprintf('%.2f', used_cpu).to_s
2677

    
2678
        # Check for negative values
2679
        @monitor[:used_memory] = 0 if @monitor[:used_memory].to_i < 0
2680
        @monitor[:used_cpu]    = 0 if @monitor[:used_cpu].to_i < 0
2681

    
2682
        guest_ip_addresses = []
2683
        self["guest.net"].each do |net|
2684
            net.ipConfig.ipAddress.each do |ip|
2685
                guest_ip_addresses << ip.ipAddress
2686
            end if net.ipConfig && net.ipConfig.ipAddress
2687
        end if self["guest.net"]
2688

    
2689
        @guest_ip_addresses = guest_ip_addresses.join(',')
2690

    
2691
        pm = self['_connection'].serviceInstance.content.perfManager
2692

    
2693
        provider = pm.provider_summary(@item)
2694

    
2695
        refresh_rate = provider.refreshRate
2696

    
2697
        if get_vm_id
2698
            stats = {}
2699

    
2700
            if (one_item["MONITORING/LAST_MON"] && one_item["MONITORING/LAST_MON"].to_i != 0 )
2701
                #Real time data stores max 1 hour. 1 minute has 3 samples
2702
                interval = (Time.now.to_i - one_item["MONITORING/LAST_MON"].to_i)
2703

    
2704
                #If last poll was more than hour ago get 3 minutes,
2705
                #else calculate how many samples since last poll
2706
                samples =  interval > 3600 ? 9 : (interval / refresh_rate) + 1
2707
                max_samples = samples > 0 ? samples : 1
2708

    
2709
                stats = pm.retrieve_stats(
2710
                    [@item],
2711
                    ['net.transmitted','net.bytesRx','net.bytesTx','net.received',
2712
                    'virtualDisk.numberReadAveraged','virtualDisk.numberWriteAveraged',
2713
                    'virtualDisk.read','virtualDisk.write'],
2714
                    {interval:refresh_rate, max_samples: max_samples}
2715
                ) rescue {}
2716
            else
2717
                # First poll, get at least latest 3 minutes = 9 samples
2718
                stats = pm.retrieve_stats(
2719
                    [@item],
2720
                    ['net.transmitted','net.bytesRx','net.bytesTx','net.received',
2721
                    'virtualDisk.numberReadAveraged','virtualDisk.numberWriteAveraged',
2722
                    'virtualDisk.read','virtualDisk.write'],
2723
                    {interval:refresh_rate, max_samples: 9}
2724
                ) rescue {}
2725
            end
2726

    
2727
            if !stats.empty? && !stats.first[1][:metrics].empty?
2728
                metrics = stats.first[1][:metrics]
2729

    
2730
                nettx_kbpersec = 0
2731
                if metrics['net.transmitted']
2732
                    metrics['net.transmitted'].each { |sample|
2733
                        nettx_kbpersec += sample if sample > 0
2734
                    }
2735
                end
2736

    
2737
                netrx_kbpersec = 0
2738
                if metrics['net.bytesRx']
2739
                    metrics['net.bytesRx'].each { |sample|
2740
                        netrx_kbpersec += sample if sample > 0
2741
                    }
2742
                end
2743

    
2744
                read_kbpersec = 0
2745
                if metrics['virtualDisk.read']
2746
                    metrics['virtualDisk.read'].each { |sample|
2747
                        read_kbpersec += sample if sample > 0
2748
                    }
2749
                end
2750

    
2751
                read_iops = 0
2752
                if metrics['virtualDisk.numberReadAveraged']
2753
                    metrics['virtualDisk.numberReadAveraged'].each { |sample|
2754
                        read_iops += sample if sample > 0
2755
                    }
2756
                end
2757

    
2758
                write_kbpersec = 0
2759
                if metrics['virtualDisk.write']
2760
                    metrics['virtualDisk.write'].each { |sample|
2761
                        write_kbpersec += sample if sample > 0
2762
                    }
2763
                end
2764

    
2765
                write_iops = 0
2766
                if metrics['virtualDisk.numberWriteAveraged']
2767
                    metrics['virtualDisk.numberWriteAveraged'].each { |sample|
2768
                        write_iops += sample if sample > 0
2769
                    }
2770
                end
2771
            else
2772
                nettx_kbpersec = 0
2773
                netrx_kbpersec = 0
2774
                read_kbpersec  = 0
2775
                read_iops      = 0
2776
                write_kbpersec = 0
2777
                write_iops     = 0
2778
            end
2779

    
2780
            # Accumulate values if present
2781
            previous_nettx = @one_item && @one_item["MONITORING/NETTX"] ? @one_item["MONITORING/NETTX"].to_i : 0
2782
            previous_netrx = @one_item && @one_item["MONITORING/NETRX"] ? @one_item["MONITORING/NETRX"].to_i : 0
2783
            previous_diskrdiops = @one_item && @one_item["MONITORING/DISKRDIOPS"] ? @one_item["MONITORING/DISKRDIOPS"].to_i : 0
2784
            previous_diskwriops = @one_item && @one_item["MONITORING/DISKWRIOPS"] ? @one_item["MONITORING/DISKWRIOPS"].to_i : 0
2785
            previous_diskrdbytes = @one_item && @one_item["MONITORING/DISKRDBYTES"] ? @one_item["MONITORING/DISKRDBYTES"].to_i : 0
2786
            previous_diskwrbytes = @one_item && @one_item["MONITORING/DISKWRBYTES"] ? @one_item["MONITORING/DISKWRBYTES"].to_i : 0
2787

    
2788
            @monitor[:nettx] = previous_nettx + (nettx_kbpersec * 1024 * refresh_rate).to_i
2789
            @monitor[:netrx] = previous_netrx + (netrx_kbpersec * 1024 * refresh_rate).to_i
2790

    
2791
            @monitor[:diskrdiops]  = previous_diskrdiops + read_iops
2792
            @monitor[:diskwriops]  = previous_diskwriops + write_iops
2793
            @monitor[:diskrdbytes] = previous_diskrdbytes + (read_kbpersec * 1024 * refresh_rate).to_i
2794
            @monitor[:diskwrbytes] = previous_diskwrbytes + (write_kbpersec * 1024 * refresh_rate).to_i
2795
        end
2796
    end
2797

    
2798
    # monitor function used when poll action is called for all vms
2799
    def monitor(stats)
2800

    
2801
        reset_monitor
2802

    
2803
        refresh_rate = 20 #20 seconds between samples (realtime)
2804

    
2805
        @state = state_to_c(@vm_info["summary.runtime.powerState"])
2806

    
2807
        return if @state != VM_STATE[:active]
2808

    
2809
        cpuMhz =  @vm_info[:esx_host_cpu]
2810

    
2811
        @monitor[:used_memory] = @vm_info["summary.quickStats.hostMemoryUsage"].to_i * 1024
2812

    
2813
        used_cpu = @vm_info["summary.quickStats.overallCpuUsage"].to_f / cpuMhz
2814
        used_cpu = (used_cpu * 100).to_s
2815
        @monitor[:used_cpu]  = sprintf('%.2f', used_cpu).to_s
2816

    
2817
        # Check for negative values
2818
        @monitor[:used_memory] = 0 if @monitor[:used_memory].to_i < 0
2819
        @monitor[:used_cpu]    = 0 if @monitor[:used_cpu].to_i < 0
2820

    
2821
        guest_ip_addresses = []
2822
        @vm_info["guest.net"].each do |net|
2823
            net.ipConfig.ipAddress.each do |ip|
2824
                guest_ip_addresses << ip.ipAddress
2825
            end if net.ipConfig && net.ipConfig.ipAddress
2826
        end if self["guest.net"]
2827

    
2828
        @guest_ip_addresses = guest_ip_addresses.join(',')
2829

    
2830
        if stats.key?(@item)
2831
            metrics = stats[@item][:metrics]
2832

    
2833
            nettx_kbpersec = 0
2834
            if metrics['net.transmitted']
2835
                metrics['net.transmitted'].each { |sample|
2836
                    nettx_kbpersec += sample if sample > 0
2837
                }
2838
            end
2839

    
2840
            netrx_kbpersec = 0
2841
            if metrics['net.bytesRx']
2842
                metrics['net.bytesRx'].each { |sample|
2843
                    netrx_kbpersec += sample if sample > 0
2844
                }
2845
            end
2846

    
2847
            read_kbpersec = 0
2848
            if metrics['virtualDisk.read']
2849
                metrics['virtualDisk.read'].each { |sample|
2850
                    read_kbpersec += sample if sample > 0
2851
                }
2852
            end
2853

    
2854
            read_iops = 0
2855
            if metrics['virtualDisk.numberReadAveraged']
2856
                metrics['virtualDisk.numberReadAveraged'].each { |sample|
2857
                    read_iops += sample if sample > 0
2858
                }
2859
            end
2860

    
2861
            write_kbpersec = 0
2862
            if metrics['virtualDisk.write']
2863
                metrics['virtualDisk.write'].each { |sample|
2864
                    write_kbpersec += sample if sample > 0
2865
                }
2866
            end
2867

    
2868
            write_iops = 0
2869
            if metrics['virtualDisk.numberWriteAveraged']
2870
                metrics['virtualDisk.numberWriteAveraged'].each { |sample|
2871
                    write_iops += sample if sample > 0
2872
                }
2873
            end
2874
        else
2875
            nettx_kbpersec = 0
2876
            netrx_kbpersec = 0
2877
            read_kbpersec  = 0
2878
            read_iops      = 0
2879
            write_kbpersec = 0
2880
            write_iops     = 0
2881
        end
2882

    
2883
        # Accumulate values if present
2884
        previous_nettx = @one_item && @one_item["MONITORING/NETTX"] ? @one_item["MONITORING/NETTX"].to_i : 0
2885
        previous_netrx = @one_item && @one_item["MONITORING/NETRX"] ? @one_item["MONITORING/NETRX"].to_i : 0
2886
        previous_diskrdiops = @one_item && @one_item["MONITORING/DISKRDIOPS"] ? @one_item["MONITORING/DISKRDIOPS"].to_i : 0
2887
        previous_diskwriops = @one_item && @one_item["MONITORING/DISKWRIOPS"] ? @one_item["MONITORING/DISKWRIOPS"].to_i : 0
2888
        previous_diskrdbytes = @one_item && @one_item["MONITORING/DISKRDBYTES"] ? @one_item["MONITORING/DISKRDBYTES"].to_i : 0
2889
        previous_diskwrbytes = @one_item && @one_item["MONITORING/DISKWRBYTES"] ? @one_item["MONITORING/DISKWRBYTES"].to_i : 0
2890

    
2891
        @monitor[:nettx] = previous_nettx + (nettx_kbpersec * 1024 * refresh_rate).to_i
2892
        @monitor[:netrx] = previous_netrx + (netrx_kbpersec * 1024 * refresh_rate).to_i
2893

    
2894
        @monitor[:diskrdiops]  = previous_diskrdiops + read_iops
2895
        @monitor[:diskwriops]  = previous_diskwriops + write_iops
2896
        @monitor[:diskrdbytes] = previous_diskrdbytes + (read_kbpersec * 1024 * refresh_rate).to_i
2897
        @monitor[:diskwrbytes] = previous_diskwrbytes + (write_kbpersec * 1024 * refresh_rate).to_i
2898
    end
2899

    
2900

    
2901

    
2902
    #  Generates a OpenNebula IM Driver valid string with the monitor info
2903
    def info
2904
        return 'STATE=d' if @state == 'd'
2905

    
2906
        guest_ip = @vm_info ? @vm_info["guest.ipAddress"] : self["guest.ipAddress"]
2907

    
2908
        used_cpu    = @monitor[:used_cpu]
2909
        used_memory = @monitor[:used_memory]
2910
        netrx       = @monitor[:netrx]
2911
        nettx       = @monitor[:nettx]
2912
        diskrdbytes = @monitor[:diskrdbytes]
2913
        diskwrbytes = @monitor[:diskwrbytes]
2914
        diskrdiops  = @monitor[:diskrdiops]
2915
        diskwriops  = @monitor[:diskwriops]
2916

    
2917
        esx_host      = @vm_info ? @vm_info[:esx_host_name].to_s : self["runtime.host.name"].to_s
2918
        guest_state   = @vm_info ? @vm_info["guest.guestState"].to_s : self["guest.guestState"].to_s
2919
        vmware_tools  = @vm_info ? @vm_info["guest.toolsRunningStatus"].to_s : self["guest.toolsRunningStatus"].to_s
2920
        vmtools_ver   = @vm_info ? @vm_info["guest.toolsVersion"].to_s :  self["guest.toolsVersion"].to_s
2921
        vmtools_verst = @vm_info ? @vm_info["guest.toolsVersionStatus2"].to_s : vmtools_verst = self["guest.toolsVersionStatus2"].to_s
2922

    
2923
        if @vm_info
2924
            rp_name   = @vm_info[:rp_list].select { |item| item[:ref] == @vm_info["resourcePool"]._ref}.first[:name] rescue ""
2925
            rp_name   = "Resources" if rp_name.empty?
2926
        else
2927
            rp_name   = self["resourcePool"].name
2928
        end
2929

    
2930
        str_info = ""
2931

    
2932
        str_info = "GUEST_IP=" << guest_ip.to_s << " " if guest_ip
2933

    
2934
        if @guest_ip_addresses && !@guest_ip_addresses.empty?
2935
            str_info << "GUEST_IP_ADDRESSES=\"" << @guest_ip_addresses.to_s << "\" "
2936
        end
2937

    
2938
        str_info << "#{POLL_ATTRIBUTE[:state]}="  << @state               << " "
2939
        str_info << "#{POLL_ATTRIBUTE[:cpu]}="    << used_cpu.to_s        << " "
2940
        str_info << "#{POLL_ATTRIBUTE[:memory]}=" << used_memory.to_s     << " "
2941
        str_info << "#{POLL_ATTRIBUTE[:netrx]}="  << netrx.to_s           << " "
2942
        str_info << "#{POLL_ATTRIBUTE[:nettx]}="  << nettx.to_s           << " "
2943

    
2944
        str_info << "DISKRDBYTES=" << diskrdbytes.to_s << " "
2945
        str_info << "DISKWRBYTES=" << diskwrbytes.to_s << " "
2946
        str_info << "DISKRDIOPS="  << diskrdiops.to_s  << " "
2947
        str_info << "DISKWRIOPS="  << diskwriops.to_s  << " "
2948

    
2949
        str_info << "VCENTER_ESX_HOST=\""                 << esx_host        << "\" "
2950
        str_info << "VCENTER_GUEST_STATE="                << guest_state     << " "
2951
        str_info << "VCENTER_VMWARETOOLS_RUNNING_STATUS=" << vmware_tools    << " "
2952
        str_info << "VCENTER_VMWARETOOLS_VERSION="        << vmtools_ver     << " "
2953
        str_info << "VCENTER_VMWARETOOLS_VERSION_STATUS=" << vmtools_verst   << " "
2954
        str_info << "VCENTER_RP_NAME=\""                  << rp_name << "\" "
2955
    end
2956

    
2957
    def reset_monitor
2958
        @monitor = {
2959
            :used_cpu    => 0,
2960
            :used_memory => 0,
2961
            :netrx       => 0,
2962
            :nettx       => 0,
2963
            :diskrdbytes => 0,
2964
            :diskwrbytes => 0,
2965
            :diskrdiops  => 0,
2966
            :diskwriops  => 0
2967
        }
2968
    end
2969

    
2970
    # Converts the VI string state to OpenNebula state convention
2971
    # Guest states are:
2972
    # - poweredOff   The virtual machine is currently powered off.
2973
    # - poweredOn    The virtual machine is currently powered on.
2974
    # - suspended    The virtual machine is currently suspended.
2975
    def state_to_c(state)
2976
        case state
2977
            when 'poweredOn'
2978
                VM_STATE[:active]
2979
            when 'suspended'
2980
                VM_STATE[:paused]
2981
            when 'poweredOff'
2982
                VM_STATE[:deleted]
2983
            else
2984
                VM_STATE[:unknown]
2985
        end
2986
    end
2987

    
2988
    # TODO check with uuid
2989
    def self.new_from_ref(ref, vi_client)
2990
        self.new(RbVmomi::VIM::VirtualMachine.new(vi_client.vim, ref), vi_client)
2991
    end
2992

    
2993
end # class VirtualMachine
2994

    
2995
end # module VCenterDriver