Statistics
| Branch: | Tag: | Revision:

one / src / vmm_mad / remotes / lib / vcenter_driver / virtual_machine.rb @ e3b9f6f7

History | View | Annotate | Download (108 KB)

1
module VCenterDriver
2
require 'digest'
3
class VirtualMachineFolder
4
    attr_accessor :item, :items
5

    
6
    def initialize(item)
7
        @item = item
8
        @items = {}
9
    end
10

    
11
    ########################################################################
12
    # Builds a hash with Datastore-Ref / Datastore to be used as a cache
13
    # @return [Hash] in the form
14
    #   { ds_ref [Symbol] => Datastore object }
15
    ########################################################################
16
    def fetch!
17
        VIClient.get_entities(@item, "VirtualMachine").each do |item|
18
            item_name = item._ref
19
            @items[item_name.to_sym] = VirtualMachine.new(item)
20
        end
21
    end
22

    
23
    def fetch_templates!
24
        VIClient.get_entities(@item, "VirtualMachine").each do |item|
25
            if item.config.template
26
                item_name = item._ref
27
                @items[item_name.to_sym] = Template.new(item)
28
            end
29
        end
30
    end
31

    
32
    ########################################################################
33
    # Returns a Datastore. Uses the cache if available.
34
    # @param ref [Symbol] the vcenter ref
35
    # @return Datastore
36
    ########################################################################
37
    def get(ref)
38
        if !@items[ref.to_sym]
39
            rbvmomi_dc = RbVmomi::VIM::Datastore.new(@item._connection, ref)
40
            @items[ref.to_sym] = Datastore.new(rbvmomi_dc)
41
        end
42

    
43
        @items[ref.to_sym]
44
    end
45
end # class VirtualMachineFolder
46

    
47
class Template
48

    
49
    attr_accessor :item
50

    
51
    include Memoize
52

    
53
    def initialize(item=nil, vi_client=nil)
54
        @item = item
55
        @vi_client = vi_client
56
        @locking = true
57
    end
58

    
59
    # Locking function. Similar to flock
60
    def lock
61
        if @locking
62
           @locking_file = File.open("/tmp/vcenter-importer-lock","w")
63
           @locking_file.flock(File::LOCK_EX)
64
        end
65
    end
66

    
67
    # Unlock driver execution mutex
68
    def unlock
69
        if @locking
70
            @locking_file.close
71
        end
72
    end
73

    
74
    def get_dc
75
        item = @item
76

    
77
        while !item.instance_of? RbVmomi::VIM::Datacenter
78
            item = item.parent
79
            if item.nil?
80
                raise "Could not find the parent Datacenter"
81
            end
82
        end
83

    
84
        Datacenter.new(item)
85
    end
86

    
87
    def delete_template
88
        @item.Destroy_Task.wait_for_completion
89
    end
90

    
91
    def get_vcenter_instance_uuid
92
        @vi_client.vim.serviceContent.about.instanceUuid rescue nil
93
    end
94

    
95
    def create_template_copy(template_name)
96
        error = nil
97
        template_ref = nil
98

    
99
        template_name = "one-#{self['name']}" if template_name.empty?
100

    
101
        relocate_spec_params = {}
102
        relocate_spec_params[:pool] = get_rp
103
        relocate_spec = RbVmomi::VIM.VirtualMachineRelocateSpec(relocate_spec_params)
104

    
105
        clone_spec = RbVmomi::VIM.VirtualMachineCloneSpec({
106
            :location => relocate_spec,
107
            :powerOn  => false,
108
            :template => false
109
        })
110

    
111
        template = nil
112
        begin
113
            template = @item.CloneVM_Task(:folder => @item.parent,
114
                                          :name   => template_name,
115
                                          :spec   => clone_spec).wait_for_completion
116
            template_ref = template._ref
117
        rescue Exception => e
118
            if !e.message.start_with?('DuplicateName')
119
                error = "Could not create the template clone. Reason: #{e.message}"
120
                return error, nil
121
            end
122

    
123
            dc = get_dc
124
            vm_folder = dc.vm_folder
125
            vm_folder.fetch!
126
            vm = vm_folder.items
127
                    .select{|k,v| v.item.name == template_name}
128
                    .values.first.item rescue nil
129

    
130
            if vm
131
                begin
132
                    vm.Destroy_Task.wait_for_completion
133
                    template = @item.CloneVM_Task(:folder => @item.parent,
134
                                                  :name   => template_name,
135
                                                  :spec   => clone_spec).wait_for_completion
136
                    template_ref = template._ref
137
                rescue
138
                    error = "Could not delete the existing template, please remove it manually from vCenter. Reason: #{e.message}"
139
                end
140
            else
141
                error = "Could not create the template clone. Reason: #{e.message}"
142
            end
143
        end
144

    
145
        return error, template_ref
146
    end
147

    
148
    # Linked Clone over existing template
149
    def create_delta_disks
150

    
151
        begin
152
            disks = @item['config.hardware.device'].grep(RbVmomi::VIM::VirtualDisk)
153
            disk_without_snapshots = disks.select { |x| x.backing.parent.nil? }
154
        rescue
155
            error = "Cannot extract existing disks on template."
156
            use_linked_clones = false
157
            return error, use_linked_clones
158
        end
159

    
160
        if !disk_without_snapshots.empty?
161

    
162
            begin
163
                if self['config.template']
164
                    @item.MarkAsVirtualMachine(:pool => get_rp, :host => self['runtime.host'])
165
                end
166
            rescue Exception => e
167
                @item.MarkAsTemplate()
168
                error = "Cannot mark the template as a VirtualMachine. Not using linked clones. Reason: #{e.message}/#{e.backtrace}"
169
                use_linked_clones = false
170
                return error, use_linked_clones
171
            end
172

    
173
            begin
174
                spec = {}
175
                spec[:deviceChange] = []
176

    
177
                disk_without_snapshots.each do |disk|
178
                    remove_disk_spec = { :operation => :remove, :device => disk }
179
                    spec[:deviceChange] << remove_disk_spec
180

    
181
                    add_disk_spec = { :operation => :add,
182
                                    :fileOperation => :create,
183
                                    :device => disk.dup.tap { |x|
184
                                            x.backing = x.backing.dup
185
                                            x.backing.fileName = "[#{disk.backing.datastore.name}]"
186
                                            x.backing.parent = disk.backing
187
                                    }
188
                    }
189
                    spec[:deviceChange] << add_disk_spec
190
                end
191

    
192
                @item.ReconfigVM_Task(:spec => spec).wait_for_completion if !spec[:deviceChange].empty?
193
            rescue Exception => e
194
                error = "Cannot create the delta disks on top of the template. Reason: #{e.message}."
195
                use_linked_clones = false
196
                return error, use_linked_clones
197
            end
198

    
199
            begin
200
                @item.MarkAsTemplate()
201
            rescue
202
                error = "Cannot mark the VirtualMachine as a template. Not using linked clones."
203
                use_linked_clones = false
204
                return error, use_linked_clones
205
            end
206

    
207
            error = nil
208
            use_linked_clones = true
209
            return error, use_linked_clones
210
        else
211
            # Template already has delta disks
212
            error = nil
213
            use_linked_clones = true
214
            return error, use_linked_clones
215
        end
216
    end
217

    
218
    def import_vcenter_disks(vc_uuid, dpool, ipool, sunstone=false, template_id=nil)
219
        disk_info = ""
220
        error = ""
221
        sunstone_disk_info = []
222

    
223
        begin
224
            lock #Lock import operation, to avoid concurrent creation of images
225

    
226
            ##ccr_ref = self["runtime.host.parent._ref"]
227
            dc = get_dc
228
            dc_ref = dc.item._ref
229

    
230
            #Get disks and info required
231
            vc_disks = get_vcenter_disks
232

    
233
            # Track allocated images
234
            allocated_images = []
235

    
236
            vc_disks.each do |disk|
237
                datastore_found = VCenterDriver::Storage.get_one_image_ds_by_ref_and_dc(disk[:datastore]._ref,
238
                                                                                        dc_ref,
239
                                                                                        vc_uuid,
240
                                                                                        dpool)
241
                if datastore_found.nil?
242
                    error = "\n    ERROR: datastore #{disk[:datastore].name}: has to be imported first as an image datastore!\n"
243

    
244
                    #Rollback delete disk images
245
                    allocated_images.each do |i|
246
                        i.delete
247
                    end
248

    
249
                    break
250
                end
251

    
252
                image_import = VCenterDriver::Datastore.get_image_import_template(disk[:datastore].name,
253
                                                                                  disk[:path],
254
                                                                                  disk[:type],
255
                                                                                  disk[:prefix],
256
                                                                                  ipool,
257
                                                                                  template_id)
258
                #Image is already in the datastore
259
                if image_import[:one]
260
                    # This is the disk info
261
                    disk_tmp = ""
262
                    disk_tmp << "DISK=[\n"
263
                    disk_tmp << "IMAGE_ID=\"#{image_import[:one]["ID"]}\",\n"
264
                    disk_tmp << "OPENNEBULA_MANAGED=\"NO\"\n"
265
                    disk_tmp << "]\n"
266
                    if sunstone
267
                        sunstone_disk = {}
268
                        sunstone_disk[:type] = "EXISTING_DISK"
269
                        sunstone_disk[:image_tmpl] = disk_tmp
270
                        sunstone_disk_info << sunstone_disk
271
                    else
272
                        disk_info << disk_tmp
273
                    end
274

    
275
                elsif !image_import[:template].empty?
276

    
277
                    if sunstone
278
                        sunstone_disk = {}
279
                        sunstone_disk[:type] = "NEW_DISK"
280
                        sunstone_disk[:image_tmpl] = image_import[:template]
281
                        sunstone_disk[:ds_id] = datastore_found['ID'].to_i
282
                        sunstone_disk_info << sunstone_disk
283
                    else
284
                        # Then the image is created as it's not in the datastore
285
                        one_i = VCenterDriver::VIHelper.new_one_item(OpenNebula::Image)
286
                        allocated_images << one_i
287
                        rc = one_i.allocate(image_import[:template], datastore_found['ID'].to_i)
288

    
289
                        if OpenNebula.is_error?(rc)
290
                            error = "    Error creating disk from template: #{rc.message}\n"
291
                            break
292
                        end
293

    
294
                        #Add info for One template
295
                        one_i.info
296
                        disk_info << "DISK=[\n"
297
                        disk_info << "IMAGE_ID=\"#{one_i["ID"]}\",\n"
298
                        disk_info << "OPENNEBULA_MANAGED=\"NO\"\n"
299
                        disk_info << "]\n"
300
                    end
301
                end
302
            end
303

    
304
        rescue Exception => e
305
            error = "\n    There was an error trying to create an image for disk in vcenter template. Reason: #{e.message}\n#{e.backtrace}"
306
        ensure
307
            unlock
308
            if !error.empty? && allocated_images
309
                #Rollback delete disk images
310
                allocated_images.each do |i|
311
                    i.delete
312
                end
313
            end
314
        end
315

    
316
        return error, sunstone_disk_info, allocated_images if sunstone
317

    
318
        return error, disk_info, allocated_images if !sunstone
319

    
320
    end
321

    
322
    def import_vcenter_nics(vc_uuid, npool, hpool, vcenter_instance_name,
323
                            template_ref, wild, sunstone=false, vm_name=nil, vm_id=nil, dc_name=nil)
324
        nic_info = ""
325
        error = ""
326
        sunstone_nic_info = []
327

    
328
        begin
329
            lock #Lock import operation, to avoid concurrent creation of networks
330

    
331
            if !dc_name
332
                dc = get_dc
333
                dc_name = dc.item.name
334
                dc_ref  = dc.item._ref
335
            end
336

    
337
            ccr_ref  = self["runtime.host.parent._ref"]
338
            ccr_name = self["runtime.host.parent.name"]
339

    
340
            #Get disks and info required
341
            vc_nics = get_vcenter_nics
342

    
343
            # Track allocated networks for rollback
344
            allocated_networks = []
345

    
346
            # Track port groups duplicated in this VM
347
            duplicated_networks = []
348

    
349
            vc_nics.each do |nic|
350
                # Check if the network already exists
351
                network_found = VCenterDriver::Network.get_unmanaged_vnet_by_ref(nic[:net_ref],
352
                                                                                 template_ref,
353
                                                                                 vc_uuid,
354
                                                                                 npool)
355
                #Network is already in OpenNebula
356
                if network_found
357

    
358
                    # This is the existing nic info
359
                    nic_tmp = ""
360
                    nic_tmp << "NIC=[\n"
361
                    nic_tmp << "NETWORK_ID=\"#{network_found["ID"]}\",\n"
362
                    nic_tmp << "OPENNEBULA_MANAGED=\"NO\"\n"
363
                    nic_tmp << "]\n"
364

    
365
                    if sunstone
366
                        sunstone_nic = {}
367
                        sunstone_nic[:type] = "EXISTING_NIC"
368
                        sunstone_nic[:network_tmpl] = nic_tmp
369
                        sunstone_nic_info << sunstone_nic
370
                    else
371
                        nic_info << nic_tmp
372
                    end
373
                else
374
                    # Then the network has to be created as it's not in OpenNebula
375
                    one_vn = VCenterDriver::VIHelper.new_one_item(OpenNebula::VirtualNetwork)
376

    
377
                    # We're importing unmanaged nics
378
                    unmanaged = true
379

    
380
                    # Let's get the OpenNebula host associated to the cluster reference
381
                    one_host = VCenterDriver::VIHelper.find_by_ref(OpenNebula::HostPool,
382
                                                                  "TEMPLATE/VCENTER_CCR_REF",
383
                                                                   ccr_ref,
384
                                                                   vc_uuid,
385
                                                                   hpool)
386

    
387
                    # Let's get the CLUSTER_ID from the OpenNebula host
388
                    if !one_host || !one_host['CLUSTER_ID']
389
                        cluster_id = -1
390
                    else
391
                        cluster_id = one_host['CLUSTER_ID']
392
                    end
393

    
394
                    # We have to know if we're importing nics from a wild vm
395
                    # or from a template
396
                    if wild
397
                        unmanaged = "wild"
398
                    else
399
                        unmanaged = "template"
400
                    end
401

    
402
                    # Prepare the Virtual Network template
403
                    one_vnet = VCenterDriver::Network.to_one_template(nic[:net_name],
404
                                                                      nic[:net_ref],
405
                                                                      nic[:pg_type],
406
                                                                      ccr_ref,
407
                                                                      ccr_name,
408
                                                                      vc_uuid,
409
                                                                      vcenter_instance_name,
410
                                                                      dc_name,
411
                                                                      cluster_id,
412
                                                                      nil,
413
                                                                      unmanaged,
414
                                                                      template_ref,
415
                                                                      dc_ref,
416
                                                                      vm_name,
417
                                                                      vm_id)
418

    
419
                    # By default add an ethernet range to network size 255
420
                    ar_str = ""
421
                    ar_str << "AR=[\n"
422
                    ar_str << "TYPE=\"ETHER\",\n"
423
                    ar_str << "SIZE=\"255\"\n"
424
                    ar_str << "]\n"
425
                    one_vnet[:one] << ar_str
426

    
427
                    if sunstone
428
                        if !duplicated_networks.include?(nic[:net_name])
429
                            sunstone_nic = {}
430
                            sunstone_nic[:type] = "NEW_NIC"
431
                            sunstone_nic[:network_name] = nic[:net_name]
432
                            sunstone_nic[:network_tmpl] = one_vnet[:one]
433
                            sunstone_nic[:one_cluster_id] = cluster_id.to_i
434
                            sunstone_nic_info << sunstone_nic
435
                            duplicated_networks << nic[:net_name]
436
                        else
437
                            sunstone_nic = {}
438
                            sunstone_nic[:type] = "DUPLICATED_NIC"
439
                            sunstone_nic[:network_name] = nic[:net_name]
440
                            sunstone_nic_info << sunstone_nic
441
                        end
442
                    else
443
                        # Allocate the Virtual Network
444
                        allocated_networks << one_vn
445
                        rc = one_vn.allocate(one_vnet[:one], cluster_id.to_i)
446

    
447
                        if OpenNebula.is_error?(rc)
448
                            error = "\n    ERROR: Could not allocate virtual network due to #{rc.message}\n"
449
                            break
450
                        end
451

    
452
                        # Add info for One template
453
                        one_vn.info
454
                        nic_info << "NIC=[\n"
455
                        nic_info << "NETWORK_ID=\"#{one_vn["ID"]}\",\n"
456
                        nic_info << "OPENNEBULA_MANAGED=\"NO\"\n"
457
                        nic_info << "]\n"
458

    
459
                        # Refresh npool
460
                        npool.info_all
461
                    end
462
                end
463
            end
464

    
465
        rescue Exception => e
466
            error = "\n    There was an error trying to create a virtual network to repesent a vCenter network for a VM or VM Template. Reason: #{e.message}"
467
        ensure
468
            unlock
469
            #Rollback, delete virtual networks
470
            if !error.empty? && allocated_networks
471
                allocated_networks.each do |n|
472
                    n.delete
473
                end
474
            end
475
        end
476

    
477
        return error, nic_info, allocated_networks if !sunstone
478

    
479
        return error, sunstone_nic_info, allocated_networks if sunstone
480
    end
481

    
482
    def get_vcenter_disk_key(unit_number, controller_key)
483

    
484
        key = nil
485

    
486
        @item["config.hardware.device"].each do |device|
487
            disk = {}
488

    
489
            if is_disk_or_iso?(device)
490
                disk[:device]    = device
491
                if device.controllerKey == controller_key &&
492
                   device.unitNumber == unit_number
493

    
494
                   key = device.key
495
                   break
496
                end
497
            end
498
        end
499

    
500
        return key
501
    end
502

    
503
    def get_vcenter_disks
504

    
505
        disks = []
506
        ide_controlled  = []
507
        sata_controlled = []
508
        scsi_controlled = []
509

    
510
        @item["config.hardware.device"].each do |device|
511
            disk = {}
512

    
513
            if device.is_a? RbVmomi::VIM::VirtualIDEController
514
                ide_controlled.concat(device.device)
515
            end
516

    
517
            if device.is_a? RbVmomi::VIM::VirtualSATAController
518
                sata_controlled.concat(device.device)
519
            end
520

    
521
            if device.is_a? RbVmomi::VIM::VirtualSCSIController
522
                scsi_controlled.concat(device.device)
523
            end
524

    
525
            if is_disk_or_iso?(device)
526
                disk[:device]    = device
527
                disk[:datastore] = device.backing.datastore
528
                disk[:path]      = device.backing.fileName
529
                disk[:path_wo_ds]= disk[:path].sub(/^\[(.*?)\] /, "")
530
                disk[:type]      = is_disk?(device) ? "OS" : "CDROM"
531
                disk[:key]       = device.key
532
                disk[:prefix]    = "hd" if ide_controlled.include?(device.key)
533
                disk[:prefix]    = "sd" if scsi_controlled.include?(device.key)
534
                disk[:prefix]    = "sd" if sata_controlled.include?(device.key)
535
                disks << disk
536
            end
537
        end
538

    
539
        return disks
540
    end
541

    
542
    def get_vcenter_nics
543
        nics = []
544
        @item["config.hardware.device"].each do |device|
545
            nic = {}
546
            if is_nic?(device)
547
                begin
548
                    nic[:net_name]  = device.backing.network.name
549
                    nic[:net_ref]   = device.backing.network._ref
550
                    nic[:pg_type]   = VCenterDriver::Network.get_network_type(device)
551
                    nics << nic
552
                rescue
553
                end
554
            end
555
        end
556
        return nics
557
    end
558

    
559
    #  Checks if a RbVmomi::VIM::VirtualDevice is a disk or a cdrom
560
    def is_disk_or_cdrom?(device)
561
        is_disk  = !(device.class.ancestors.index(RbVmomi::VIM::VirtualDisk)).nil?
562
        is_cdrom = !(device.class.ancestors.index(RbVmomi::VIM::VirtualCdrom)).nil?
563
        is_disk || is_cdrom
564
    end
565

    
566
    #  Checks if a RbVmomi::VIM::VirtualDevice is a disk or an iso file
567
    def is_disk_or_iso?(device)
568
        is_disk  = !(device.class.ancestors.index(RbVmomi::VIM::VirtualDisk)).nil?
569
        is_iso = device.backing.is_a? RbVmomi::VIM::VirtualCdromIsoBackingInfo
570
        is_disk || is_iso
571
    end
572

    
573
    #  Checks if a RbVmomi::VIM::VirtualDevice is a disk
574
    def is_disk?(device)
575
        !(device.class.ancestors.index(RbVmomi::VIM::VirtualDisk)).nil?
576
    end
577

    
578
    #  Checks if a RbVmomi::VIM::VirtualDevice is a network interface
579
    def is_nic?(device)
580
        !device.class.ancestors.index(RbVmomi::VIM::VirtualEthernetCard).nil?
581
    end
582

    
583
    # @return RbVmomi::VIM::ResourcePool, first resource pool in cluster
584
    def get_rp
585
        self['runtime.host.parent.resourcePool']
586
    end
587

    
588
    def vm_to_one(vm_name)
589

    
590
        str = "NAME   = \"#{vm_name}\"\n"\
591
              "CPU    = \"#{@vm_info["config.hardware.numCPU"]}\"\n"\
592
              "vCPU   = \"#{@vm_info["config.hardware.numCPU"]}\"\n"\
593
              "MEMORY = \"#{@vm_info["config.hardware.memoryMB"]}\"\n"\
594
              "HYPERVISOR = \"vcenter\"\n"\
595
              "CONTEXT = [\n"\
596
              "    NETWORK = \"YES\",\n"\
597
              "    SSH_PUBLIC_KEY = \"$USER[SSH_PUBLIC_KEY]\"\n"\
598
              "]\n"\
599
              "VCENTER_INSTANCE_ID =\"#{@vm_info[:vc_uuid]}\"\n"\
600
              "VCENTER_CCR_REF =\"#{@vm_info[:cluster_ref]}\"\n"
601

    
602
        str << "IMPORT_VM_ID =\"#{self["_ref"]}\"\n"
603
        str << "IMPORT_STATE =\"#{@state}\"\n"
604

    
605
        # Get DS information
606
        ds_folder = get_dc.datastore_folder
607
        ds_name   = self["config.datastoreUrl"][0].name
608
        ds_folder.fetch!
609
        ds_vcenter_ref = ds_folder.items.select{|_,d| d["name"] == ds_name}.values[0]["_ref"]
610
        str << "VCENTER_DS_REF = \"#{ds_vcenter_ref}\"\n"
611

    
612
        vnc_port = nil
613
        keymap = nil
614

    
615
        @vm_info["config.extraConfig"].select do |xtra|
616
            if xtra[:key].downcase=="remotedisplay.vnc.port"
617
                vnc_port = xtra[:value]
618
            end
619

    
620
            if xtra[:key].downcase=="remotedisplay.vnc.keymap"
621
                keymap = xtra[:value]
622
            end
623
        end
624

    
625
        if !@vm_info["config.extraConfig"].empty?
626
            str << "GRAPHICS = [\n"\
627
                   "  TYPE     =\"vnc\",\n"
628
            str << "  PORT     =\"#{vnc_port}\",\n" if vnc_port
629
            str << "  KEYMAP   =\"#{keymap}\",\n" if keymap
630
            str << "  LISTEN   =\"0.0.0.0\"\n"
631
            str << "]\n"
632
        end
633

    
634
        if !@vm_info["config.annotation"] || @vm_info["config.annotation"].empty?
635
            str << "DESCRIPTION = \"vCenter Template imported by OpenNebula" \
636
                " from Cluster #{@vm_info["cluster_name"]}\"\n"
637
        else
638
            notes = @vm_info["config.annotation"].gsub("\\", "\\\\").gsub("\"", "\\\"")
639
            str << "DESCRIPTION = \"#{notes}\"\n"
640
        end
641

    
642
        case @vm_info["guest.guestFullName"]
643
            when /CentOS/i
644
                str << "LOGO=images/logos/centos.png\n"
645
            when /Debian/i
646
                str << "LOGO=images/logos/debian.png\n"
647
            when /Red Hat/i
648
                str << "LOGO=images/logos/redhat.png\n"
649
            when /Ubuntu/i
650
                str << "LOGO=images/logos/ubuntu.png\n"
651
            when /Windows XP/i
652
                str << "LOGO=images/logos/windowsxp.png\n"
653
            when /Windows/i
654
                str << "LOGO=images/logos/windows8.png\n"
655
            when /Linux/i
656
                str << "LOGO=images/logos/linux.png\n"
657
        end
658

    
659
        return str
660
    end
661

    
662
    def self.template_to_one(template, vc_uuid, ccr_ref, ccr_name, import_name, host_id)
663

    
664
        num_cpu, memory, annotation, guest_fullname = template.item.collect("config.hardware.numCPU","config.hardware.memoryMB","config.annotation","guest.guestFullName")
665

    
666
        str = "NAME   = \"#{import_name}\"\n"\
667
              "CPU    = \"#{num_cpu}\"\n"\
668
              "vCPU   = \"#{num_cpu}\"\n"\
669
              "MEMORY = \"#{memory}\"\n"\
670
              "HYPERVISOR = \"vcenter\"\n"\
671
              "CONTEXT = [\n"\
672
              "    NETWORK = \"YES\",\n"\
673
              "    SSH_PUBLIC_KEY = \"$USER[SSH_PUBLIC_KEY]\"\n"\
674
              "]\n"\
675
              "VCENTER_INSTANCE_ID =\"#{vc_uuid}\"\n"
676

    
677
        str << "VCENTER_TEMPLATE_REF =\"#{template["_ref"]}\"\n"
678
        str << "VCENTER_CCR_REF =\"#{ccr_ref}\"\n"
679

    
680
        str << "GRAPHICS = [\n"\
681
               "  TYPE     =\"vnc\",\n"
682
        str << "  LISTEN   =\"0.0.0.0\"\n"
683
        str << "]\n"
684

    
685
        if annotation.nil? || annotation.empty?
686
            str << "DESCRIPTION = \"vCenter Template imported by OpenNebula" \
687
                " from Cluster #{ccr_name}\"\n"
688
        else
689
            notes = annotation.gsub("\\", "\\\\").gsub("\"", "\\\"")
690
            str << "DESCRIPTION = \"#{notes}\"\n"
691
        end
692

    
693
        case guest_fullname
694
            when /CentOS/i
695
                str << "LOGO=images/logos/centos.png\n"
696
            when /Debian/i
697
                str << "LOGO=images/logos/debian.png\n"
698
            when /Red Hat/i
699
                str << "LOGO=images/logos/redhat.png\n"
700
            when /Ubuntu/i
701
                str << "LOGO=images/logos/ubuntu.png\n"
702
            when /Windows XP/i
703
                str << "LOGO=images/logos/windowsxp.png\n"
704
            when /Windows/i
705
                str << "LOGO=images/logos/windows8.png\n"
706
            when /Linux/i
707
                str << "LOGO=images/logos/linux.png\n"
708
        end
709

    
710
        return str
711
    end
712

    
713
    def self.get_xml_template(template, vcenter_uuid, vi_client, vcenter_instance_name=nil, dc_name=nil, rp_cache={})
714

    
715
        begin
716
            template_ref      = template['_ref']
717
            template_name     = template["name"]
718
            template_ccr      = template['runtime.host.parent']
719
            template_ccr_ref  = template_ccr._ref
720
            template_ccr_name = template_ccr.name
721

    
722
            # Set vcenter instance name
723
            vcenter_instance_name = vi_client.vim.host if !vcenter_instance_name
724

    
725
            # Get datacenter info
726
            if !dc_name
727
                dc = get_dc
728
                dc_name = dc.item.name
729
            end
730

    
731
            #Get resource pools and generate a list
732
            if !rp_cache[template_ccr_name]
733
                tmp_cluster = VCenterDriver::ClusterComputeResource.new_from_ref(template_ccr_ref, vi_client)
734
                rp_list = tmp_cluster.get_resource_pool_list
735
                rp = ""
736
                if !rp_list.empty?
737
                    rp_name_list = []
738
                    rp_list.each do |rp_hash|
739
                        rp_name_list << rp_hash[:name]
740
                    end
741
                    rp =  "O|list|Which resource pool you want this VM to run in? "
742
                    rp << "|#{rp_name_list.join(",")}" #List of RP
743
                    rp << "|#{rp_name_list.first}" #Default RP
744
                end
745
                rp_cache[template_ccr_name] = {}
746
                rp_cache[template_ccr_name][:rp] = rp
747
                rp_cache[template_ccr_name][:rp_list] = rp_list
748
            end
749
            rp      = rp_cache[template_ccr_name][:rp]
750
            rp_list = rp_cache[template_ccr_name][:rp_list]
751

    
752

    
753
            # Determine the location path for the template
754
            vcenter_template = VCenterDriver::VirtualMachine.new_from_ref(template_ref, vi_client)
755
            item = vcenter_template.item
756
            folders = []
757
            while !item.instance_of? RbVmomi::VIM::Datacenter
758
                item = item.parent
759
                if !item.instance_of? RbVmomi::VIM::Datacenter
760
                    folders << item.name if item.name != "vm"
761
                end
762
                raise "Could not find the templates parent location" if item.nil?
763
            end
764
            location = folders.reverse.join("/")
765
            location = "/" if location.empty?
766

    
767
            # Generate a crypto hash for the template name and take the first 12 chars
768
            sha256            = Digest::SHA256.new
769
            full_name         = "#{template_name} - #{template_ccr_name} [#{vcenter_instance_name} - #{dc_name}]_#{location}"
770
            template_hash     = sha256.hexdigest(full_name)[0..11]
771
            template_name     = template_name.tr("\u007F", "")
772
            template_ccr_name = template_ccr_name.tr("\u007F", "")
773
            import_name       = "#{template_name} - #{template_ccr_name} #{template_hash}"
774

    
775
            # Prepare the Hash that will be used by importers to display
776
            # the object being imported
777
            one_tmp = {}
778
            one_tmp[:name]                  = import_name
779
            one_tmp[:template_name]         = template_name
780
            one_tmp[:sunstone_template_name]= "#{template_name} [ Cluster: #{template_ccr_name} - Template location: #{location} ]"
781
            one_tmp[:template_hash]         = template_hash
782
            one_tmp[:template_location]     = location
783
            one_tmp[:vcenter_ccr_ref]       = template_ccr_ref
784
            one_tmp[:vcenter_ref]           = template_ref
785
            one_tmp[:vcenter_instance_uuid] = vcenter_uuid
786
            one_tmp[:cluster_name]          = template_ccr_name
787
            one_tmp[:rp]                    = rp
788
            one_tmp[:rp_list]               = rp_list
789
            one_tmp[:template]              = template
790
            one_tmp[:import_disks_and_nics] = true # By default we import disks and nics
791

    
792
            # Get the host ID of the OpenNebula host which represents the vCenter Cluster
793
            host_id = nil
794
            one_host = VCenterDriver::VIHelper.find_by_ref(OpenNebula::HostPool,
795
                                                           "TEMPLATE/VCENTER_CCR_REF",
796
                                                           template_ccr_ref,
797
                                                           vcenter_uuid)
798
            host_id    = one_host["ID"]
799
            cluster_id = one_host["CLUSTER_ID"]
800
            raise "Could not find the host's ID associated to template being imported" if !host_id
801

    
802
            # Get the OpenNebula's template hash
803
            one_tmp[:one] = template_to_one(template, vcenter_uuid, template_ccr_ref, template_ccr_name, import_name, host_id)
804
            return one_tmp
805
        rescue
806
            return nil
807
        end
808
    end
809

    
810
    # TODO check with uuid
811
    def self.new_from_ref(ref, vi_client)
812
        self.new(RbVmomi::VIM::VirtualMachine.new(vi_client.vim, ref), vi_client)
813
    end
814

    
815
end
816

    
817
class VirtualMachine < Template
818
    VM_PREFIX_DEFAULT = "one-$i-"
819

    
820
    POLL_ATTRIBUTE    = OpenNebula::VirtualMachine::Driver::POLL_ATTRIBUTE
821
    VM_STATE          = OpenNebula::VirtualMachine::Driver::VM_STATE
822

    
823
    VM_SHUTDOWN_TIMEOUT = 600 #10 minutes til poweroff hard
824

    
825
    attr_accessor :item
826

    
827
    attr_accessor :vm_info
828

    
829
    include Memoize
830

    
831
    def initialize(item=nil, vi_client=nil)
832
        @item = item
833
        @vi_client = vi_client
834
        @locking = true
835
        @vm_info = nil
836
    end
837

    
838
    ############################################################################
839
    ############################################################################
840

    
841
    # Attributes that must be defined when the VM does not exist in vCenter
842
    attr_accessor :vi_client
843

    
844
    # these have their own getter (if they aren't set, we can set them
845
    # dynamically)
846
    attr_writer :one_item
847
    attr_writer :host
848
    attr_writer :target_ds_ref
849

    
850
    ############################################################################
851
    ############################################################################
852

    
853
    # The OpenNebula VM
854
    # @return OpenNebula::VirtualMachine or XMLElement
855
    def one_item
856
        if !@one_item
857
            vm_id = get_vm_id
858

    
859
            raise "Unable to find vm_id." if vm_id.nil?
860

    
861
            @one_item = VIHelper.one_item(OpenNebula::VirtualMachine, vm_id)
862
        end
863

    
864
        @one_item
865
    end
866

    
867
    # The OpenNebula host
868
    # @return OpenNebula::Host or XMLElement
869
    def host
870
        if @host.nil?
871
            if one_item.nil?
872
                raise "'one_item' must be previously set to be able to " <<
873
                      "access the OpenNebula host."
874
            end
875

    
876
            host_id = one_item["HISTORY_RECORDS/HISTORY[last()]/HID"]
877
            raise "No valid host_id found." if host_id.nil?
878

    
879
            @host = VIHelper.one_item(OpenNebula::Host, host_id)
880
        end
881

    
882
        @host
883
    end
884

    
885
    # Target Datastore VMware reference getter
886
    # @return
887
    def target_ds_ref
888
        if @target_ds_ref.nil?
889
            if one_item.nil?
890
                raise "'one_item' must be previously set to be able to " <<
891
                      "access the target Datastore."
892
            end
893

    
894
            target_ds_id = one_item["HISTORY_RECORDS/HISTORY[last()]/DS_ID"]
895
            raise "No valid target_ds_id found." if target_ds_id.nil?
896

    
897
            target_ds = VCenterDriver::VIHelper.one_item(OpenNebula::Datastore,
898
                                                         target_ds_id)
899

    
900
            @target_ds_ref = target_ds['TEMPLATE/VCENTER_DS_REF']
901
        end
902

    
903
        @target_ds_ref
904
    end
905

    
906
    # Cached cluster
907
    # @return ClusterComputeResource
908
    def cluster
909
        if @cluster.nil?
910
            ccr_ref = host['TEMPLATE/VCENTER_CCR_REF']
911
            @cluster = ClusterComputeResource.new_from_ref(ccr_ref, vi_client)
912
        end
913

    
914
        @cluster
915
    end
916

    
917
    ############################################################################
918
    ############################################################################
919

    
920
    # @return Boolean whether the VM exists in vCenter
921
    def is_new?
922
        !get_vm_id
923
    end
924

    
925
    # @return String the vm_id stored in vCenter
926
    def get_vm_id
927
        vm_ref = self['_ref']
928
        return nil if !vm_ref
929

    
930
        vc_uuid = get_vcenter_instance_uuid
931

    
932
        one_vm = VCenterDriver::VIHelper.find_by_ref(OpenNebula::VirtualMachinePool,
933
                                                     "DEPLOY_ID",
934
                                                     vm_ref,
935
                                                     vc_uuid)
936
        return nil if !one_vm
937

    
938
        return one_vm["ID"]
939
    end
940

    
941
    def get_vcenter_instance_uuid
942
        @vi_client.vim.serviceContent.about.instanceUuid
943
    end
944

    
945
    def get_unmanaged_keys
946
        unmanaged_keys = {}
947
        @item.config.extraConfig.each do |val|
948
             if val[:key].include?("opennebula.disk")
949
                 unmanaged_keys[val[:key]] = val[:value]
950
             end
951
        end
952
        return unmanaged_keys
953
    end
954

    
955
    ############################################################################
956
    # Getters
957
    ############################################################################
958

    
959
    # @return RbVmomi::VIM::ResourcePool
960
    def get_rp
961

    
962
        req_rp = one_item['VCENTER_RESOURCE_POOL'] ||
963
                 one_item['USER_TEMPLATE/VCENTER_RESOURCE_POOL']
964

    
965
        #Get ref for req_rp
966
        rp_list    = cluster.get_resource_pool_list
967
        req_rp_ref = rp_list.select { |rp| rp[:name] == req_rp }.first[:ref] rescue nil
968

    
969
        if vi_client.rp_confined?
970
            if req_rp_ref && req_rp_ref != vi_client.rp._ref
971
                raise "Available resource pool [#{vi_client.rp.name}] in host"\
972
                      " does not match requested resource pool"\
973
                      " [#{req_rp}]"
974
            end
975

    
976
            return vi_client.rp
977
        else
978
            if req_rp_ref
979
                rps = cluster.resource_pools.select{|r| r._ref == req_rp_ref }
980

    
981
                if rps.empty?
982
                    raise "No matching resource pool found (#{req_rp})."
983
                else
984
                    return rps.first
985
                end
986
            else
987
                return cluster['resourcePool']
988
            end
989
        end
990
    end
991

    
992
    # @return RbVmomi::VIM::Datastore or nil
993
    def get_ds
994
        ##req_ds = one_item['USER_TEMPLATE/VCENTER_DS_REF']
995
        current_ds_id  = one_item["HISTORY_RECORDS/HISTORY[last()]/DS_ID"]
996
        current_ds     = VCenterDriver::VIHelper.one_item(OpenNebula::Datastore, current_ds_id)
997
        current_ds_ref = current_ds['TEMPLATE/VCENTER_DS_REF']
998

    
999
        if current_ds_ref
1000
            dc = cluster.get_dc
1001

    
1002
            ds_folder = dc.datastore_folder
1003
            ds = ds_folder.get(current_ds_ref)
1004
            ds_item = ds.item rescue nil
1005

    
1006
            return ds_item
1007
        else
1008
            return nil
1009
        end
1010
    end
1011

    
1012
    # StorageResouceManager reference
1013
    def get_sm
1014
        self['_connection.serviceContent.storageResourceManager']
1015
    end
1016

    
1017
    # @return Customization or nil
1018
    def get_customization
1019
        xpath = "USER_TEMPLATE/VCENTER_CUSTOMIZATION_SPEC"
1020
        customization_spec = one_item[xpath]
1021

    
1022
        if customization_spec.nil?
1023
            return nil
1024
        end
1025

    
1026
        begin
1027
            custom_spec = vi_client.vim
1028
                            .serviceContent
1029
                            .customizationSpecManager
1030
                            .GetCustomizationSpec(:name => customization.text)
1031

    
1032
            if custom_spec && (spec = custom_spec.spec)
1033
                return spec
1034
            else
1035
                raise "Error getting customization spec"
1036
            end
1037
        rescue
1038
            raise "Customization spec '#{customization.text}' not found"
1039
        end
1040
    end
1041

    
1042
    # @return VCenterDriver::Datastore datastore where the disk will live under
1043
    def get_effective_ds(disk)
1044
        if disk["PERSISTENT"] == "YES"
1045
            ds_ref = disk["VCENTER_DS_REF"]
1046
        else
1047
            ds_ref = target_ds_ref
1048

    
1049
            if ds_ref.nil?
1050
                raise "target_ds_ref must be defined on this object."
1051
            end
1052
        end
1053

    
1054
        VCenterDriver::Storage.new_from_ref(ds_ref, vi_client)
1055
    end
1056

    
1057
    # @return String vcenter name
1058
    def get_vcenter_name
1059
        vm_prefix = host['TEMPLATE/VM_PREFIX']
1060
        vm_prefix = VM_PREFIX_DEFAULT if vm_prefix.nil? || vm_prefix.empty?
1061
        vm_prefix.gsub!("$i", one_item['ID'])
1062

    
1063
        vm_prefix + one_item['NAME']
1064
    end
1065

    
1066
    ############################################################################
1067
    # Create and reconfigure VM related methods
1068
    ############################################################################
1069

    
1070
    # This function creates a new VM from the @one_item XML and returns the
1071
    # VMware ref
1072
    # @param one_item OpenNebula::VirtualMachine
1073
    # @param vi_client VCenterDriver::VIClient
1074
    # @return String vmware ref
1075
    def clone_vm(one_item, vi_client)
1076
        @one_item = one_item
1077
        @vi_client = vi_client
1078

    
1079
        vcenter_name = get_vcenter_name
1080

    
1081
        vc_template_ref = one_item['USER_TEMPLATE/VCENTER_TEMPLATE_REF']
1082
        vc_template = RbVmomi::VIM::VirtualMachine(vi_client.vim, vc_template_ref)
1083

    
1084
        ds = get_ds
1085

    
1086
        # Default disk move type (Full Clone)
1087
        disk_move_type = :moveAllDiskBackingsAndDisallowSharing
1088

    
1089
        if ds.instance_of? RbVmomi::VIM::Datastore
1090
            use_linked_clones = one_item['USER_TEMPLATE/VCENTER_LINKED_CLONES']
1091
            if use_linked_clones && use_linked_clones.downcase == "yes"
1092
                # Check if all disks in template has delta disks
1093
                disks = vc_template.config
1094
                                .hardware.device.grep(RbVmomi::VIM::VirtualDisk)
1095

    
1096
                disks_no_delta = disks.select { |d| d.backing.parent == nil }
1097

    
1098
                # Can use linked clones if all disks have delta disks
1099
                if (disks_no_delta.size == 0)
1100
                    disk_move_type = :moveChildMostDiskBacking
1101
                end
1102
            end
1103
        end
1104

    
1105
        spec_hash = spec_hash_clone(disk_move_type)
1106

    
1107
        clone_spec = RbVmomi::VIM.VirtualMachineCloneSpec(spec_hash)
1108

    
1109
        # Specify vm folder in vSpere's VM and Templates view F#4823
1110
        vcenter_vm_folder = nil
1111
        vcenter_vm_folder = one_item["USER_TEMPLATE/VCENTER_VM_FOLDER"]
1112
        vcenter_vm_folder_object = nil
1113
        dc = cluster.get_dc
1114
        if !!vcenter_vm_folder && !vcenter_vm_folder.empty?
1115
            vcenter_vm_folder_object = dc.item.find_folder(vcenter_vm_folder)
1116
        end
1117
        vcenter_vm_folder_object = vc_template.parent if vcenter_vm_folder_object.nil?
1118

    
1119
        if ds.instance_of? RbVmomi::VIM::StoragePod
1120
            # VM is cloned using Storage Resource Manager for StoragePods
1121
            begin
1122
                vm = storagepod_clonevm_task(vc_template, vcenter_name,
1123
                                             clone_spec, ds, vcenter_vm_folder_object, dc)
1124
            rescue Exception => e
1125
                raise "Cannot clone VM Template to StoragePod: #{e.message}"
1126
            end
1127
        else
1128
            vm = nil
1129
            begin
1130
                vm = vc_template.CloneVM_Task(
1131
                    :folder => vcenter_vm_folder_object,
1132
                    :name   => vcenter_name,
1133
                    :spec   => clone_spec).wait_for_completion
1134
            rescue Exception => e
1135
                if !e.message.start_with?('DuplicateName')
1136
                    raise "Cannot clone VM Template: #{e.message}\n#{e.backtrace}"
1137
                end
1138

    
1139
                vm_folder = dc.vm_folder
1140
                vm_folder.fetch!
1141
                vm = vm_folder.items
1142
                        .select{|k,v| v.item.name == vcenter_name}
1143
                        .values.first.item rescue nil
1144

    
1145
                if vm
1146
                    # Detach all persistent disks to avoid accidental destruction
1147
                    detach_persistent_disks(vm)
1148

    
1149
                    vm.Destroy_Task.wait_for_completion
1150
                    vm = vc_template.CloneVM_Task(
1151
                        :folder => vcenter_vm_folder_object,
1152
                        :name   => vcenter_name,
1153
                        :spec   => clone_spec).wait_for_completion
1154
                else
1155
                    raise "Cannot clone VM Template"
1156
                end
1157
            end
1158
        end
1159
        # @item is populated
1160
        @item = vm
1161

    
1162
        return self['_ref']
1163
    end
1164

    
1165

    
1166
    def storagepod_clonevm_task(vc_template, vcenter_name, clone_spec, storpod, vcenter_vm_folder_object, dc)
1167

    
1168
        storage_manager = vc_template
1169
                            ._connection.serviceContent.storageResourceManager
1170

    
1171
        storage_spec = RbVmomi::VIM.StoragePlacementSpec(
1172
            type: 'clone',
1173
            cloneName: vcenter_name,
1174
            folder: vcenter_vm_folder_object,
1175
            podSelectionSpec: RbVmomi::VIM.StorageDrsPodSelectionSpec(storagePod: storpod),
1176
            vm: vc_template,
1177
            cloneSpec: clone_spec
1178
        )
1179

    
1180
        # Query a storage placement recommendation
1181
        result = storage_manager
1182
                    .RecommendDatastores(storageSpec: storage_spec) rescue nil
1183

    
1184
        raise "Could not get placement specification for StoragePod" if result.nil?
1185

    
1186
        if !result.respond_to?(:recommendations) || result.recommendations.size == 0
1187
            raise "Could not get placement specification for StoragePod"
1188
        end
1189

    
1190
        # Get recommendation key to be applied
1191
        key = result.recommendations.first.key ||= ''
1192
        raise "Missing Datastore recommendation for StoragePod" if key.empty?
1193

    
1194
        begin
1195
            apply_sr = storage_manager
1196
                            .ApplyStorageDrsRecommendation_Task(key: [key])
1197
                            .wait_for_completion
1198
            return apply_sr.vm
1199
        rescue Exception => e
1200
            if !e.message.start_with?('DuplicateName')
1201
                raise "Cannot clone VM Template: #{e.message}\n#{e.backtrace}"
1202
            end
1203

    
1204
            # The VM already exists, try to find the vm
1205
            vm_folder = dc.vm_folder
1206
            vm_folder.fetch!
1207
            vm = vm_folder.items
1208
                    .select{|k,v| v.item.name == vcenter_name}
1209
                    .values.first.item rescue nil
1210

    
1211
            if vm
1212

    
1213
                begin
1214
                    # Detach all persistent disks to avoid accidental destruction
1215
                    detach_persistent_disks(vm)
1216

    
1217
                    # Destroy the VM with any disks still attached to it
1218
                    vm.Destroy_Task.wait_for_completion
1219

    
1220
                    # Query a storage placement recommendation
1221
                    result = storage_manager.RecommendDatastores(storageSpec: storage_spec) rescue nil
1222

    
1223
                    raise "Could not get placement specification for StoragePod" if result.nil?
1224

    
1225
                    if !result.respond_to?(:recommendations) || result.recommendations.size == 0
1226
                        raise "Could not get placement specification for StoragePod"
1227
                    end
1228

    
1229
                    # Get recommendation key to be applied
1230
                    key = result.recommendations.first.key ||= ''
1231
                    raise "Missing Datastore recommendation for StoragePod" if key.empty?
1232

    
1233
                    apply_sr = storage_manager
1234
                            .ApplyStorageDrsRecommendation_Task(key: [key])
1235
                            .wait_for_completion
1236
                    return apply_sr.vm
1237
                rescue Exception => e
1238
                   raise "Failure applying recommendation while cloning VM: #{e.message}"
1239
                end
1240
            end
1241
        end
1242
    end
1243

    
1244
    # @return clone parameters spec hash
1245
    def spec_hash_clone(disk_move_type)
1246
        # Relocate spec
1247
        relocate_spec_params = {}
1248

    
1249
        relocate_spec_params[:pool] = get_rp
1250
        relocate_spec_params[:diskMoveType] = disk_move_type
1251

    
1252
        ds = get_ds
1253

    
1254
        relocate_spec_params[:datastore] = ds if ds.instance_of? RbVmomi::VIM::Datastore
1255

    
1256
        relocate_spec = RbVmomi::VIM.VirtualMachineRelocateSpec(
1257
                                                         relocate_spec_params)
1258

    
1259
        # Running flag - prevents spurious poweroff states in the VM
1260
        running_flag = [{ :key => "opennebula.vm.running", :value => "no"}]
1261

    
1262
        running_flag_spec = RbVmomi::VIM.VirtualMachineConfigSpec(
1263
            { :extraConfig => running_flag }
1264
        )
1265

    
1266
        clone_parameters = {
1267
            :location => relocate_spec,
1268
            :powerOn  => false,
1269
            :template => false,
1270
            :config   => running_flag_spec
1271
        }
1272

    
1273
        cs = get_customization
1274
        clone_parameters[:customization] = cs if cs
1275

    
1276
        clone_parameters
1277
    end
1278

    
1279
    def reference_unmanaged_devices(template_ref)
1280

    
1281
        extraconfig   = []
1282
        device_change = []
1283

    
1284
        # Get unmanaged disks in OpenNebula's VM template
1285
        xpath = "TEMPLATE/DISK[OPENNEBULA_MANAGED=\"NO\" or OPENNEBULA_MANAGED=\"no\"]"
1286
        unmanaged_disks = one_item.retrieve_xmlelements(xpath)
1287

    
1288
        if !unmanaged_disks.empty?
1289

    
1290
            # Get vcenter VM disks to know real path of cloned disk
1291
            vcenter_disks = get_vcenter_disks
1292

    
1293
            # Create an array with the paths of the disks in vcenter template
1294
            template = VCenterDriver::Template.new_from_ref(template_ref, vi_client)
1295
            template_disks = template.get_vcenter_disks
1296
            template_disks_vector = []
1297
            template_disks.each do |d|
1298
                template_disks_vector << d[:path_wo_ds]
1299
            end
1300

    
1301
            # Try to find index of disks in template disks
1302
            unmanaged_disks.each do |unmanaged_disk|
1303
                index = template_disks_vector.index(unmanaged_disk["SOURCE"])
1304
                if index
1305
                    reference = {}
1306
                    reference[:key]   = "opennebula.disk.#{unmanaged_disk["DISK_ID"]}"
1307
                    reference[:value] = "#{vcenter_disks[index][:key]}"
1308
                    extraconfig << reference
1309
                end
1310
            end
1311
        end
1312

    
1313
        # Add info for existing nics in template in vm xml
1314
        xpath = "TEMPLATE/NIC[OPENNEBULA_MANAGED=\"NO\" or OPENNEBULA_MANAGED=\"no\"]"
1315
        unmanaged_nics = one_item.retrieve_xmlelements(xpath)
1316

    
1317
        if !unmanaged_nics.empty?
1318
            index = 0
1319
            self["config.hardware.device"].each_with_index do |device|
1320
                if is_nic?(device)
1321
                    # Edit capacity setting new size in KB
1322
                    device.macAddress = unmanaged_nics[index]["MAC"]
1323
                    device_change << { :device => device, :operation => :edit }
1324
                    index += 1
1325
                end
1326
            end
1327
        end
1328

    
1329
        # Save in extraconfig the key for unmanaged disks
1330
        if !extraconfig.empty? || !device_change.empty?
1331
            spec = {}
1332
            spec[:extraConfig]  = extraconfig if !extraconfig.empty?
1333
            spec[:deviceChange] = device_change if !device_change.empty?
1334
            @item.ReconfigVM_Task(:spec => spec).wait_for_completion
1335
        end
1336
    end
1337

    
1338
    def resize_unmanaged_disks
1339
        resize_hash = {}
1340
        disks = []
1341

    
1342
        unmanaged_keys = get_unmanaged_keys
1343
        vc_disks = get_vcenter_disks
1344

    
1345
        # Look for unmanaged disks with original size changed
1346
        xpath = "TEMPLATE/DISK[(OPENNEBULA_MANAGED=\"NO\" or OPENNEBULA_MANAGED=\"no\") and boolean(ORIGINAL_SIZE) and ORIGINAL_SIZE != SIZE]"
1347
        unmanaged_resized_disks = one_item.retrieve_xmlelements(xpath)
1348

    
1349
        return if unmanaged_resized_disks.empty?
1350

    
1351
        # Cannot resize linked cloned disks
1352
        if one_item["USER_TEMPLATE/VCENTER_LINKED_CLONES"] &&
1353
           one_item["USER_TEMPLATE/VCENTER_LINKED_CLONES"] == "YES"
1354
            raise "Linked cloned disks cannot be resized."
1355
        end
1356

    
1357
        unmanaged_resized_disks.each do |disk|
1358
            vc_disks.each do |vcenter_disk|
1359
                if unmanaged_keys.key?("opennebula.disk.#{disk["DISK_ID"]}")
1360
                    device_key = unmanaged_keys["opennebula.disk.#{disk["DISK_ID"]}"].to_i
1361

    
1362
                    if device_key == vcenter_disk[:key].to_i
1363

    
1364
                        break if disk["SIZE"].to_i <= disk["ORIGINAL_SIZE"].to_i
1365

    
1366
                        # Edit capacity setting new size in KB
1367
                        d = vcenter_disk[:device]
1368
                        d.capacityInKB = disk["SIZE"].to_i * 1024
1369
                        disks <<   { :device => d, :operation => :edit }
1370
                        break
1371
                    end
1372
                end
1373
            end
1374
        end
1375

    
1376
        if !disks.empty?
1377
            resize_hash[:deviceChange] = disks
1378
            @item.ReconfigVM_Task(:spec => resize_hash).wait_for_completion
1379
        end
1380
    end
1381

    
1382
    def create_storagedrs_disks(device_change_spod, device_change_spod_ids)
1383

    
1384
        sm = get_sm
1385
        disk_locator = []
1386
        extra_config = []
1387

    
1388
        device_change_spod.each do |device_spec|
1389
            disk_locator << RbVmomi::VIM.PodDiskLocator(diskId: device_spec[:device].key)
1390
        end
1391

    
1392
        spec = {}
1393
        spec[:deviceChange] = device_change_spod
1394

    
1395
        # Disk locator is required for AddDisk
1396
        vmpod_hash = {}
1397
        vmpod_hash[:storagePod] = get_ds
1398
        vmpod_hash[:disk] = disk_locator
1399
        vmpod_config = RbVmomi::VIM::VmPodConfigForPlacement(vmpod_hash)
1400

    
1401
        # The storage pod selection requires initialize
1402
        spod_hash = {}
1403
        spod_hash[:initialVmConfig] = [ vmpod_config ]
1404
        spod_select = RbVmomi::VIM::StorageDrsPodSelectionSpec(spod_hash)
1405
        storage_spec = RbVmomi::VIM.StoragePlacementSpec(
1406
            type: :reconfigure,
1407
            podSelectionSpec: spod_select,
1408
            vm: self['_ref'],
1409
            configSpec: spec
1410
        )
1411

    
1412
        # Query a storage placement recommendation
1413
        result = sm.RecommendDatastores(storageSpec: storage_spec) rescue nil
1414

    
1415
        raise "Could not get placement specification for StoragePod" if result.nil?
1416

    
1417
        if !result.respond_to?(:recommendations) || result.recommendations.size == 0
1418
            raise "Could not get placement specification for StoragePod"
1419
        end
1420

    
1421
        # Get recommendation key to be applied
1422
        key = result.recommendations.first.key ||= ''
1423
        raise "Missing Datastore recommendation for StoragePod" if key.empty?
1424

    
1425
        # Apply recommendation
1426
        sm.ApplyStorageDrsRecommendation_Task(key: [key]).wait_for_completion
1427

    
1428
        # Set references in opennebula.disk elements
1429
        device_change_spod.each do |device_spec|
1430
            unit_number    = device_spec[:device].unitNumber
1431
            controller_key = device_spec[:device].controllerKey
1432
            key            = get_vcenter_disk_key(unit_number, controller_key)
1433
            disk_id        = device_change_spod_ids["#{controller_key}-#{unit_number}"]
1434
            reference      = {}
1435
            reference[:key]   = "opennebula.disk.#{disk_id}"
1436
            reference[:value] = key.to_s
1437
            extra_config << reference
1438
        end
1439

    
1440
        extra_config
1441
    end
1442

    
1443

    
1444
    def reconfigure
1445
        extraconfig   = []
1446
        device_change = []
1447

    
1448
        # Unmanaged keys
1449
        unmanaged_keys = get_unmanaged_keys
1450

    
1451
        # Get disk devices in vm
1452
        vc_disks = get_vcenter_disks
1453

    
1454
        # Get an array with disk paths in OpenNebula's vm template
1455
        disks_in_onevm_vector = disks_in_onevm(unmanaged_keys, vc_disks)
1456

    
1457
        # As the original template may have been modified in OpenNebula
1458
        # but not in vcenter, we must detach disks that are in vcenter
1459
        # but not in OpenNebula's vm template
1460
        if is_new?
1461
            device_change, extra_config = device_detach_disks(disks_in_onevm_vector, unmanaged_keys, vc_disks)
1462
            if !device_change.empty?
1463
                spec_hash = {}
1464
                spec_hash[:deviceChange] = device_change if !device_change.empty?
1465
                spec_hash[:extraConfig] = extra_config  if !extra_config.empty?
1466

    
1467
                # Reconfigure for disks detached from original template
1468
                spec = RbVmomi::VIM.VirtualMachineConfigSpec(spec_hash)
1469
                @item.ReconfigVM_Task(:spec => spec).wait_for_completion
1470

    
1471
                # Get disk devices in vm again after reconfigure
1472
                vc_disks = get_vcenter_disks
1473
            end
1474
        end
1475

    
1476
        # Now reconfigure disks, nics and extraconfig for the VM
1477
        device_change = []
1478

    
1479
        # get token and context
1480
        extraconfig += extraconfig_context
1481

    
1482
        # vnc configuration (for config_array hash)
1483
        extraconfig += extraconfig_vnc
1484

    
1485
        # Set CPU, memory and extraconfig
1486
        num_cpus = one_item["TEMPLATE/VCPU"] || 1
1487

    
1488
        spec_hash = {
1489
            :numCPUs      => num_cpus.to_i,
1490
            :memoryMB     => one_item["TEMPLATE/MEMORY"],
1491
            :extraConfig  => extraconfig
1492
        }
1493

    
1494
        # device_change hash (nics)
1495
        device_change += device_change_nics
1496

    
1497
        # Now attach disks that are in OpenNebula's template but not in vcenter
1498
        # e.g those that has been attached in poweroff
1499
        device_change_ds, device_change_spod, device_change_spod_ids = device_attach_disks(disks_in_onevm_vector, vc_disks)
1500
        device_change += device_change_ds
1501

    
1502
        # Create volatile disks in StorageDRS if any
1503
        if !device_change_spod.empty?
1504
            spec_hash[:extraConfig] = create_storagedrs_disks(device_change_spod, device_change_spod_ids)
1505
        end
1506

    
1507
        # Common reconfigure task
1508
        spec_hash[:deviceChange] = device_change
1509
        spec = RbVmomi::VIM.VirtualMachineConfigSpec(spec_hash)
1510
        @item.ReconfigVM_Task(:spec => spec).wait_for_completion
1511
    end
1512

    
1513
    def extraconfig_context
1514
        context_text = "# Context variables generated by OpenNebula\n"
1515
        one_item.each('TEMPLATE/CONTEXT/*') do |context_element|
1516
            # next if !context_element.text
1517
            context_text += context_element.name + "='" +
1518
                            context_element.text.gsub("'", "\\'") + "'\n"
1519
        end
1520

    
1521
        # token
1522
        token = File.read(File.join(VAR_LOCATION,
1523
                        'vms',
1524
                        one_item['ID'],
1525
                        'token.txt')).chomp rescue nil
1526

    
1527
        context_text += "ONEGATE_TOKEN='#{token}'\n" if token
1528

    
1529
        # context_text
1530
        [
1531
            { :key => "guestinfo.opennebula.context",
1532
              :value => Base64.encode64(context_text) }
1533
        ]
1534
    end
1535

    
1536
    def extraconfig_vnc
1537
        if one_item["TEMPLATE/GRAPHICS"]
1538
            vnc_port   = one_item["TEMPLATE/GRAPHICS/PORT"]
1539
            vnc_listen = one_item["TEMPLATE/GRAPHICS/LISTEN"] || "0.0.0.0"
1540
            vnc_keymap = one_item["TEMPLATE/GRAPHICS/KEYMAP"]
1541

    
1542
            conf = [ {:key => "remotedisplay.vnc.enabled",:value => "TRUE"},
1543
                    {:key => "remotedisplay.vnc.port",   :value => vnc_port},
1544
                    {:key => "remotedisplay.vnc.ip",     :value => vnc_listen}]
1545

    
1546
            conf += [{:key => "remotedisplay.vnc.keymap",
1547
                            :value => vnc_keymap}] if vnc_keymap
1548

    
1549
            conf
1550
        else
1551
            conf = []
1552
        end
1553
    end
1554

    
1555
    def device_change_nics
1556
        # Final list of changes to be applied in vCenter
1557
        device_change = []
1558

    
1559
        # Hash of interfaces from the OpenNebula xml
1560
        nics_in_template = {}
1561
        xpath = "TEMPLATE/NIC"
1562
        one_item.each(xpath) { |nic|
1563
            nics_in_template[nic["MAC"]] = nic
1564
        }
1565

    
1566
        # Check nics in VM
1567
        self["config.hardware.device"].each do |dv|
1568
            if is_nic?(dv)
1569
                if nics_in_template.key?(dv.macAddress)
1570
                    # Remove nic that is already in the XML to avoid duplicate
1571
                    nics_in_template.delete(dv.macAddress)
1572
                else
1573
                    # B4897 - It was detached in poweroff, remove it from VM
1574
                    device_change << {
1575
                        :operation => :remove,
1576
                        :device    => dv
1577
                    }
1578
                end
1579
            end
1580
        end
1581

    
1582
        # Attach new nics (nics_in_template now contains only the interfaces
1583
        # not present in the VM in vCenter)
1584
        nics_in_template.each do |key, nic|
1585
            device_change << calculate_add_nic_spec(nic)
1586
        end
1587

    
1588
        return device_change
1589
    end
1590

    
1591
    # Regenerate context when devices are hot plugged (reconfigure)
1592
    def regenerate_context
1593
        spec_hash = { :extraConfig  => extraconfig_context }
1594
        spec = RbVmomi::VIM.VirtualMachineConfigSpec(spec_hash)
1595

    
1596
        begin
1597
            @item.ReconfigVM_Task(:spec => spec).wait_for_completion
1598
        rescue Exception => e
1599
            raise "Cannot create snapshot for VM: #{e.message}\n#{e.backtrace}"
1600
        end
1601
    end
1602

    
1603
    # Returns an array of actions to be included in :deviceChange
1604
    def calculate_add_nic_spec(nic)
1605

    
1606
        mac       = nic["MAC"]
1607
        pg_name   = nic["BRIDGE"]
1608
        model     = nic["VCENTER_NET_MODEL"] || VCenterDriver::VIHelper.get_default("VM/TEMPLATE/NIC/MODEL")
1609
        vnet_ref  = nic["VCENTER_NET_REF"]
1610
        backing   = nil
1611

    
1612
        limit_in  = nic["INBOUND_PEAK_BW"] || VCenterDriver::VIHelper.get_default("VM/TEMPLATE/NIC/INBOUND_PEAK_BW")
1613
        limit_out = nic["OUTBOUND_PEAK_BW"] || VCenterDriver::VIHelper.get_default("VM/TEMPLATE/NIC/OUTBOUND_PEAK_BW")
1614
        limit     = nil
1615

    
1616
        if limit_in && limit_out
1617
            limit=([limit_in.to_i, limit_out.to_i].min / 1024) * 8
1618
        end
1619

    
1620
        rsrv_in  = nic["INBOUND_AVG_BW"] || VCenterDriver::VIHelper.get_default("VM/TEMPLATE/NIC/INBOUND_AVG_BW")
1621
        rsrv_out = nic["OUTBOUND_AVG_BW"] || VCenterDriver::VIHelper.get_default("VM/TEMPLATE/NIC/OUTBOUND_AVG_BW")
1622
        rsrv     = nil
1623

    
1624
        if rsrv_in || rsrv_out
1625
            rsrv=([rsrv_in.to_i, rsrv_out.to_i].min / 1024) * 8
1626
        end
1627

    
1628
        network = self["runtime.host"].network.select do |n|
1629
            n._ref == vnet_ref || n.name == pg_name
1630
        end
1631

    
1632
        network = network.first
1633

    
1634
        card_num = 1 # start in one, we want the next avaliable id
1635

    
1636
        @item["config.hardware.device"].each do |dv|
1637
            card_num += 1 if is_nic?(dv)
1638
        end
1639

    
1640
        nic_card = case model
1641
                        when "virtuale1000", "e1000"
1642
                            RbVmomi::VIM::VirtualE1000
1643
                        when "virtuale1000e", "e1000e"
1644
                            RbVmomi::VIM::VirtualE1000e
1645
                        when "virtualpcnet32", "pcnet32"
1646
                            RbVmomi::VIM::VirtualPCNet32
1647
                        when "virtualsriovethernetcard", "sriovethernetcard"
1648
                            RbVmomi::VIM::VirtualSriovEthernetCard
1649
                        when "virtualvmxnetm", "vmxnetm"
1650
                            RbVmomi::VIM::VirtualVmxnetm
1651
                        when "virtualvmxnet2", "vmnet2"
1652
                            RbVmomi::VIM::VirtualVmxnet2
1653
                        when "virtualvmxnet3", "vmxnet3"
1654
                            RbVmomi::VIM::VirtualVmxnet3
1655
                        else # If none matches, use VirtualE1000
1656
                            RbVmomi::VIM::VirtualE1000
1657
                   end
1658

    
1659
        if network.class == RbVmomi::VIM::Network
1660
            backing = RbVmomi::VIM.VirtualEthernetCardNetworkBackingInfo(
1661
                        :deviceName => pg_name,
1662
                        :network    => network)
1663
        else
1664
            port    = RbVmomi::VIM::DistributedVirtualSwitchPortConnection(
1665
                        :switchUuid =>
1666
                                network.config.distributedVirtualSwitch.uuid,
1667
                        :portgroupKey => network.key)
1668
            backing =
1669
              RbVmomi::VIM.VirtualEthernetCardDistributedVirtualPortBackingInfo(
1670
                 :port => port)
1671
        end
1672

    
1673
        card_spec = {
1674
            :key => 0,
1675
            :deviceInfo => {
1676
                :label => "net" + card_num.to_s,
1677
                :summary => pg_name
1678
            },
1679
            :backing     => backing,
1680
            :addressType => mac ? 'manual' : 'generated',
1681
            :macAddress  => mac
1682
        }
1683

    
1684
        if (limit || rsrv) && (limit > 0)
1685
            ra_spec = {}
1686
            rsrv = limit if rsrv > limit
1687
            ra_spec[:limit] = limit if limit
1688
            ra_spec[:reservation] = rsrv if rsrv
1689
            ra_spec[:share] =  RbVmomi::VIM.SharesInfo({
1690
                    :level => RbVmomi::VIM.SharesLevel("normal"),
1691
                    :shares => 0
1692
                })
1693
            card_spec[:resourceAllocation] =
1694
               RbVmomi::VIM.VirtualEthernetCardResourceAllocation(ra_spec)
1695
        end
1696

    
1697
        {
1698
            :operation => :add,
1699
            :device    => nic_card.new(card_spec)
1700
        }
1701
    end
1702

    
1703
     # Returns an array of actions to be included in :deviceChange
1704
    def calculate_add_nic_spec_autogenerate_mac(nic)
1705

    
1706
        pg_name   = nic["BRIDGE"]
1707
        model     = nic["VCENTER_NET_MODEL"] || VCenterDriver::VIHelper.get_default("VM/TEMPLATE/NIC/MODEL")
1708
        vnet_ref  = nic["VCENTER_NET_REF"]
1709
        backing   = nil
1710

    
1711
        limit_in  = nic["INBOUND_PEAK_BW"] || VCenterDriver::VIHelper.get_default("VM/TEMPLATE/NIC/INBOUND_PEAK_BW")
1712
        limit_out = nic["OUTBOUND_PEAK_BW"] || VCenterDriver::VIHelper.get_default("VM/TEMPLATE/NIC/OUTBOUND_PEAK_BW")
1713
        limit     = nil
1714

    
1715
        if limit_in && limit_out
1716
            limit=([limit_in.to_i, limit_out.to_i].min / 1024) * 8
1717
        end
1718

    
1719
        rsrv_in  = nic["INBOUND_AVG_BW"] || VCenterDriver::VIHelper.get_default("VM/TEMPLATE/NIC/INBOUND_AVG_BW")
1720
        rsrv_out = nic["OUTBOUND_AVG_BW"] || VCenterDriver::VIHelper.get_default("VM/TEMPLATE/NIC/OUTBOUND_AVG_BW")
1721
        rsrv     = nil
1722

    
1723
        if rsrv_in || rsrv_out
1724
            rsrv=([rsrv_in.to_i, rsrv_out.to_i].min / 1024) * 8
1725
        end
1726

    
1727
        network = self["runtime.host"].network.select do |n|
1728
            n._ref == vnet_ref || n.name == pg_name
1729
        end
1730

    
1731
        network = network.first
1732

    
1733
        card_num = 1 # start in one, we want the next avaliable id
1734

    
1735
        @item["config.hardware.device"].each do |dv|
1736
            card_num += 1 if is_nic?(dv)
1737
        end
1738

    
1739
        nic_card = case model
1740
                        when "virtuale1000", "e1000"
1741
                            RbVmomi::VIM::VirtualE1000
1742
                        when "virtuale1000e", "e1000e"
1743
                            RbVmomi::VIM::VirtualE1000e
1744
                        when "virtualpcnet32", "pcnet32"
1745
                            RbVmomi::VIM::VirtualPCNet32
1746
                        when "virtualsriovethernetcard", "sriovethernetcard"
1747
                            RbVmomi::VIM::VirtualSriovEthernetCard
1748
                        when "virtualvmxnetm", "vmxnetm"
1749
                            RbVmomi::VIM::VirtualVmxnetm
1750
                        when "virtualvmxnet2", "vmnet2"
1751
                            RbVmomi::VIM::VirtualVmxnet2
1752
                        when "virtualvmxnet3", "vmxnet3"
1753
                            RbVmomi::VIM::VirtualVmxnet3
1754
                        else # If none matches, use VirtualE1000
1755
                            RbVmomi::VIM::VirtualE1000
1756
                   end
1757

    
1758
        if network.class == RbVmomi::VIM::Network
1759
            backing = RbVmomi::VIM.VirtualEthernetCardNetworkBackingInfo(
1760
                        :deviceName => pg_name,
1761
                        :network    => network)
1762
        else
1763
            port    = RbVmomi::VIM::DistributedVirtualSwitchPortConnection(
1764
                        :switchUuid =>
1765
                                network.config.distributedVirtualSwitch.uuid,
1766
                        :portgroupKey => network.key)
1767
            backing =
1768
              RbVmomi::VIM.VirtualEthernetCardDistributedVirtualPortBackingInfo(
1769
                 :port => port)
1770
        end
1771

    
1772
        card_spec = {
1773
            :key => 0,
1774
            :deviceInfo => {
1775
                :label => "net" + card_num.to_s,
1776
                :summary => pg_name
1777
            },
1778
            :backing     => backing,
1779
            :addressType => 'generated'
1780
        }
1781

    
1782
        if (limit || rsrv) && (limit > 0)
1783
            ra_spec = {}
1784
            rsrv = limit if rsrv > limit
1785
            ra_spec[:limit] = limit if limit
1786
            ra_spec[:reservation] = rsrv if rsrv
1787
            ra_spec[:share] =  RbVmomi::VIM.SharesInfo({
1788
                    :level => RbVmomi::VIM.SharesLevel("normal"),
1789
                    :shares => 0
1790
                })
1791
            card_spec[:resourceAllocation] =
1792
               RbVmomi::VIM.VirtualEthernetCardResourceAllocation(ra_spec)
1793
        end
1794

    
1795
        {
1796
            :operation => :add,
1797
            :device    => nic_card.new(card_spec)
1798
        }
1799
    end
1800

    
1801
    # Add NIC to VM
1802
    def attach_nic
1803
        spec_hash = {}
1804
        nic = nil
1805

    
1806
        # Extract nic from driver action
1807
        nic = one_item.retrieve_xmlelements("TEMPLATE/NIC[ATTACH='YES']").first
1808

    
1809
        begin
1810
            # A new NIC requires a vcenter spec
1811
            attach_nic_array = []
1812
            attach_nic_array << calculate_add_nic_spec(nic)
1813
            spec_hash[:deviceChange] = attach_nic_array if !attach_nic_array.empty?
1814

    
1815
            # Reconfigure VM
1816
            spec = RbVmomi::VIM.VirtualMachineConfigSpec(spec_hash)
1817

    
1818
            @item.ReconfigVM_Task(:spec => spec).wait_for_completion
1819
        rescue Exception => e
1820
            raise "Cannot attach NIC to VM: #{e.message}\n#{e.backtrace}"
1821
        end
1822

    
1823
    end
1824

    
1825
    # Detach NIC from VM
1826
    def detach_nic
1827
        spec_hash = {}
1828
        nic = nil
1829

    
1830
        # Extract nic from driver action
1831
        nic = one_item.retrieve_xmlelements("TEMPLATE/NIC[ATTACH='YES']").first
1832
        mac = nic["MAC"]
1833

    
1834
        # Get VM nic element if it has a device with that mac
1835
        nic_device = @item["config.hardware.device"].find do |device|
1836
            is_nic?(device) && (device.macAddress ==  mac)
1837
        end rescue nil
1838

    
1839
        return if nic_device.nil? #Silently ignore if nic is not found
1840

    
1841
        # Remove NIC from VM in the ReconfigVM_Task
1842
        spec_hash[:deviceChange] = [
1843
                :operation => :remove,
1844
                :device => nic_device ]
1845

    
1846
        begin
1847
            @item.ReconfigVM_Task(:spec => spec_hash).wait_for_completion
1848
        rescue Exception => e
1849
            raise "Cannot detach NIC from VM: #{e.message}\n#{e.backtrace}"
1850
        end
1851
    end
1852

    
1853
    # Detach all nics useful when removing pg and sw so they're not in use
1854
    def detach_all_nics
1855
        spec_hash = {}
1856
        device_change = []
1857

    
1858
        @item["config.hardware.device"].each do |device|
1859
            if is_nic?(device)
1860
                device_change << {:operation => :remove, :device => device}
1861
            end
1862
        end
1863

    
1864
        # Remove NIC from VM in the ReconfigVM_Task
1865
        spec_hash[:deviceChange] = device_change
1866

    
1867
        begin
1868
            @item.ReconfigVM_Task(:spec => spec_hash).wait_for_completion
1869
        rescue Exception => e
1870
            raise "Cannot detach all NICs from VM: #{e.message}\n#{e.backtrace}"
1871
        end
1872
    end
1873

    
1874
    def get_device_filename_and_ds_from_key(key, vc_disks)
1875
        device = vc_disks.select{ |d| d[:key].to_i == key.to_i}.first rescue nil
1876
        return device
1877
    end
1878

    
1879
    def disks_in_onevm(unmanaged_keys, vc_disks)
1880
        onevm_disks_vector = []
1881

    
1882
        disks = one_item.retrieve_xmlelements("TEMPLATE/DISK")
1883
        disks.each do |disk|
1884
            if unmanaged_keys.key?("opennebula.disk.#{disk["DISK_ID"]}")
1885
                device_key = unmanaged_keys["opennebula.disk.#{disk["DISK_ID"]}"].to_i
1886
                disk_hash = get_device_filename_and_ds_from_key(device_key, vc_disks)
1887
                onevm_disks_vector << disk_hash[:path_wo_ds] if disk_hash
1888
                next
1889
            end
1890

    
1891
            img_name  = VCenterDriver::FileHelper.get_img_name(disk, one_item['ID'], self['name'], instantiated_as_persistent?)
1892
            onevm_disks_vector << "#{img_name}"
1893
        end
1894

    
1895
        return onevm_disks_vector
1896
    end
1897

    
1898
    def device_attach_disks(onevm_disks_vector, vc_disks)
1899

    
1900
        disks = one_item.retrieve_xmlelements("TEMPLATE/DISK")
1901

    
1902
        vc_disks.each do |d|
1903
            index = onevm_disks_vector.index(d[:path_wo_ds])
1904
            if index
1905
                disks.delete_at(index)
1906
                onevm_disks_vector.delete_at(index)
1907
            end
1908
        end
1909

    
1910
        return [],[],{} if disks.empty?
1911

    
1912
        attach_disk_array = []
1913
        attach_spod_array = []
1914
        attach_spod_disk_info = {}
1915

    
1916
        position = 0
1917
        disks.each do |disk|
1918
            storpod = disk["VCENTER_DS_REF"].start_with?('group-')
1919
            if storpod
1920
                spec = calculate_add_disk_spec(disk, position)
1921
                attach_spod_array << spec
1922
                unit_ctrl = "#{spec[:device].controllerKey}-#{spec[:device].unitNumber}"
1923
                attach_spod_disk_info[unit_ctrl] = disk["DISK_ID"]
1924
            else
1925
                attach_disk_array << calculate_add_disk_spec(disk, position)
1926
            end
1927

    
1928
            position += 1
1929
        end
1930

    
1931
        return attach_disk_array, attach_spod_array, attach_spod_disk_info
1932
    end
1933

    
1934
    def device_detach_disks(onevm_disks_vector, unmanaged_keys, vc_disks)
1935
        detach_disk_array = []
1936
        extra_config      = []
1937
        ipool = VCenterDriver::VIHelper.one_pool(OpenNebula::ImagePool)
1938
        if ipool.respond_to?(:message)
1939
            raise "Could not get OpenNebula ImagePool: #{ipool.message}"
1940
        end
1941

    
1942
        vc_disks.each do |d|
1943
            if !onevm_disks_vector.index(d[:path_wo_ds])
1944

    
1945
                # If disk to be detached is not persistent detach and destroy it
1946
                persistent = VCenterDriver::VIHelper.find_persistent_image_by_source(d[:path_wo_ds], ipool)
1947
                if !persistent
1948
                    detach_disk_array << {
1949
                        :fileOperation => :destroy,
1950
                        :operation => :remove,
1951
                        :device    => d[:device]
1952
                    }
1953
                end
1954

    
1955
                # Remove reference opennebula.disk if exist
1956
                unmanaged_keys.each do |key, value|
1957
                    if value.to_i == d[:key].to_i
1958
                        reference = {}
1959
                        reference[:key]   = key
1960
                        reference[:value] = ""
1961
                        extra_config << reference
1962
                        break
1963
                    end
1964
                end
1965
            end
1966
        end
1967

    
1968
        return detach_disk_array, extra_config
1969
    end
1970

    
1971
    # Attach DISK to VM (hotplug)
1972
    def attach_disk
1973
        # TODO position? and disk size for volatile?
1974

    
1975
        spec_hash = {}
1976
        disk = nil
1977
        device_change = []
1978

    
1979
        # Extract unmanaged_keys
1980
        unmanaged_keys = get_unmanaged_keys
1981
        vc_disks = get_vcenter_disks
1982

    
1983
        # Extract disk from driver action
1984
        disk = one_item.retrieve_xmlelements("TEMPLATE/DISK[ATTACH='YES']").first
1985

    
1986
        # Check if we're dealing with a StoragePod SYSTEM ds
1987
        storpod = disk["VCENTER_DS_REF"].start_with?('group-')
1988

    
1989
        # Check if disk being attached is already connected to the VM
1990
        raise "DISK is already connected to VM" if disk_attached_to_vm(disk, unmanaged_keys, vc_disks)
1991

    
1992
        # Generate vCenter spec and reconfigure VM
1993
        device_change << calculate_add_disk_spec(disk)
1994
        raise "Could not generate DISK spec" if device_change.empty?
1995

    
1996
        spec_hash[:deviceChange] = device_change
1997
        spec = RbVmomi::VIM.VirtualMachineConfigSpec(spec_hash)
1998

    
1999
        begin
2000
            if storpod
2001
                #Ask for StorageDRS recommendation to reconfigure VM (AddDisk)
2002
                sm = get_sm
2003

    
2004
                # Disk id is -1 as I don't know what disk id is going to be set
2005
                disk_locator = [ RbVmomi::VIM.PodDiskLocator(diskId: -1) ]
2006

    
2007
                # Disk locator is required for AddDisk
2008
                vmpod_hash = {}
2009
                vmpod_hash[:storagePod] = get_ds
2010
                vmpod_hash[:disk] = disk_locator
2011
                vmpod_config = RbVmomi::VIM::VmPodConfigForPlacement(vmpod_hash)
2012

    
2013
                # The storage pod selection requires initialize
2014
                spod_hash = {}
2015
                spod_hash[:initialVmConfig] = [ vmpod_config ]
2016
                spod_select = RbVmomi::VIM::StorageDrsPodSelectionSpec(spod_hash)
2017
                storage_spec = RbVmomi::VIM.StoragePlacementSpec(
2018
                    type: :reconfigure,
2019
                    podSelectionSpec: spod_select,
2020
                    vm: self['_ref'],
2021
                    configSpec: spec
2022
                )
2023

    
2024
                # Query a storage placement recommendation
2025
                result = sm.RecommendDatastores(storageSpec: storage_spec) rescue nil
2026

    
2027
                raise "Could not get placement specification for StoragePod" if result.nil?
2028

    
2029
                if !result.respond_to?(:recommendations) || result.recommendations.size == 0
2030
                    raise "Could not get placement specification for StoragePod"
2031
                end
2032

    
2033
                # Get recommendation key to be applied
2034
                key = result.recommendations.first.key ||= ''
2035
                raise "Missing Datastore recommendation for StoragePod" if key.empty?
2036

    
2037
                # Apply recommendation
2038
                sm.ApplyStorageDrsRecommendation_Task(key: [key]).wait_for_completion
2039

    
2040
                # Add the key for the volatile disk to the unmanaged opennebula.disk.id variables
2041
                unit_number    = spec_hash[:deviceChange][0][:device].unitNumber
2042
                controller_key = spec_hash[:deviceChange][0][:device].controllerKey
2043
                key = get_vcenter_disk_key(unit_number, controller_key)
2044
                spec_hash = {}
2045
                reference = {}
2046
                reference[:key]   = "opennebula.disk.#{disk["DISK_ID"]}"
2047
                reference[:value] = key.to_s
2048
                spec_hash[:extraConfig] = [ reference ]
2049
                @item.ReconfigVM_Task(:spec => spec_hash).wait_for_completion
2050
            else
2051
                @item.ReconfigVM_Task(:spec => spec).wait_for_completion
2052
            end
2053
        rescue Exception => e
2054
            raise "Cannot attach DISK to VM: #{e.message}\n#{e.backtrace}"
2055
        end
2056
    end
2057

    
2058
    # Detach persistent disks to avoid incidental destruction
2059
    def detach_persistent_disks(vm)
2060
        spec_hash = {}
2061
        spec_hash[:deviceChange] = []
2062
        ipool = VCenterDriver::VIHelper.one_pool(OpenNebula::ImagePool)
2063
        if ipool.respond_to?(:message)
2064
            raise "Could not get OpenNebula ImagePool: #{ipool.message}"
2065
        end
2066

    
2067
        vm.config.hardware.device.each do |disk|
2068
            if is_disk_or_cdrom?(disk)
2069
                # Let's try to find if disks is persistent
2070
                source = disk.backing.fileName.sub(/^\[(.*?)\] /, "")
2071
                persistent = VCenterDriver::VIHelper.find_persistent_image_by_source(source, ipool)
2072
                if persistent
2073
                    spec_hash[:deviceChange] << {
2074
                        :operation => :remove,
2075
                        :device => disk
2076
                    }
2077
                end
2078
            end
2079

    
2080
        end
2081

    
2082
        return nil if spec_hash[:deviceChange].empty?
2083

    
2084
        begin
2085
            vm.ReconfigVM_Task(:spec => spec_hash).wait_for_completion
2086
        rescue Exception => e
2087
            raise "Cannot detach all DISKs from VM: #{e.message}\n#{e.backtrace}"
2088
        end
2089
    end
2090

    
2091

    
2092
    # Detach DISK from VM
2093
    def detach_disk(disk)
2094
        spec_hash = {}
2095
        img_path = ""
2096
        ds_ref = nil
2097

    
2098
        # Extract unmanaged disk keys
2099
        unmanaged_keys = get_unmanaged_keys
2100
        vc_disks = get_vcenter_disks
2101

    
2102
        # Get vcenter device to be detached and remove if found
2103
        device = disk_attached_to_vm(disk, unmanaged_keys, vc_disks)
2104

    
2105
        if device
2106
            img_path << device[:path_wo_ds]
2107

    
2108
            if unmanaged_keys.key?("opennebula.disk.#{disk["DISK_ID"]}")
2109
                reference = {}
2110
                reference[:key]   = "opennebula.disk.#{disk["DISK_ID"]}"
2111
                reference[:value] = ""
2112
                spec_hash[:extraConfig] = [ reference ]
2113
            end
2114

    
2115
            ds_ref = device[:datastore]._ref
2116

    
2117
            # Generate vCenter spec and reconfigure VM
2118
            spec_hash[:deviceChange] = [{
2119
                :operation => :remove,
2120
                :device => device[:device]
2121
            }]
2122

    
2123
            begin
2124
                @item.ReconfigVM_Task(:spec => spec_hash).wait_for_completion
2125
            rescue Exception => e
2126
                raise "Cannot detach DISK from VM: #{e.message}\n#{e.backtrace}"
2127
            end
2128
        end
2129

    
2130
        return ds_ref, img_path
2131
    end
2132

    
2133
    # Get vcenter device representing DISK object (hotplug)
2134
    def disk_attached_to_vm(disk, unmanaged_keys, vc_disks)
2135

    
2136
        img_name = ""
2137
        device_found = nil
2138
        disk_id = disk["DISK_ID"]
2139

    
2140
        vc_disks.each do |d|
2141
            # Check if we are dealing with the unmanaged disks present in the template when cloned
2142

    
2143
            if unmanaged_keys.key?("opennebula.disk.#{disk_id}") && d[:key] == unmanaged_keys["opennebula.disk.#{disk_id}"].to_i
2144
                device_found = d
2145
                break
2146
            end
2147

    
2148
            # Alright let's see if we can find other devices only with the expected image name
2149
            img_name  = VCenterDriver::FileHelper.get_img_name(disk, one_item['ID'], self['name'], instantiated_as_persistent?)
2150
            if d[:path_wo_ds] == "#{img_name}"
2151
                device_found = d
2152
                break
2153
            end
2154
        end
2155

    
2156
        return device_found
2157
    end
2158

    
2159
    def calculate_add_disk_spec(disk, position=0)
2160
        img_name = VCenterDriver::FileHelper.get_img_name(disk, one_item['ID'], self['name'],instantiated_as_persistent?)
2161
        type     = disk["TYPE"]
2162
        size_kb  = disk["SIZE"].to_i * 1024
2163

    
2164
        if type == "CDROM"
2165
            # CDROM drive will be found in the IMAGE DS
2166
            ds_ref   = disk["VCENTER_DS_REF"]
2167
            ds       = VCenterDriver::Storage.new_from_ref(ds_ref, @vi_client)
2168
            ds_name  = ds['name']
2169

    
2170
            # CDROM can only be added when the VM is in poweroff state
2171
            vmdk_backing = RbVmomi::VIM::VirtualCdromIsoBackingInfo(
2172
                :datastore => ds.item,
2173
                :fileName  => "[#{ds_name}] #{img_name}"
2174
            )
2175

    
2176
            if @item["summary.runtime.powerState"] != "poweredOff"
2177
                raise "The CDROM image can only be added as an IDE device "\
2178
                      "when the VM is in the powered off state"
2179
            end
2180

    
2181
            controller, unit_number = find_free_ide_controller(position)
2182

    
2183
            device = RbVmomi::VIM::VirtualCdrom(
2184
                :backing       => vmdk_backing,
2185
                :key           => -1,
2186
                :controllerKey => controller.key,
2187
                :unitNumber    => unit_number,
2188

    
2189
                :connectable => RbVmomi::VIM::VirtualDeviceConnectInfo(
2190
                    :startConnected    => true,
2191
                    :connected         => true,
2192
                    :allowGuestControl => true
2193
                )
2194
            )
2195

    
2196
            return {
2197
                :operation => :add,
2198
                :device => device
2199
            }
2200

    
2201
        else
2202
            # TYPE is regular disk (not CDROM)
2203

    
2204
            controller, unit_number = find_free_controller(position)
2205

    
2206
            storpod = disk["VCENTER_DS_REF"].start_with?('group-')
2207
            if storpod
2208
                vmdk_backing = RbVmomi::VIM::VirtualDiskFlatVer2BackingInfo(
2209
                  :diskMode  => 'persistent',
2210
                  :fileName  => ""
2211
                )
2212
            else
2213
                ds           = get_effective_ds(disk)
2214
                ds_name      = ds['name']
2215
                vmdk_backing = RbVmomi::VIM::VirtualDiskFlatVer2BackingInfo(
2216
                  :datastore => ds.item,
2217
                  :diskMode  => 'persistent',
2218
                  :fileName  => "[#{ds_name}] #{img_name}"
2219
                )
2220
            end
2221

    
2222
            device = RbVmomi::VIM::VirtualDisk(
2223
              :backing       => vmdk_backing,
2224
              :capacityInKB  => size_kb,
2225
              :controllerKey => controller.key,
2226
              :key           => (-1 - position),
2227
              :unitNumber    => unit_number
2228
            )
2229

    
2230
            config = {
2231
               :operation => :add,
2232
               :device    => device
2233
            }
2234

    
2235
            # For StorageDRS vCenter must create the file
2236
            config[:fileOperation] = :create if storpod
2237

    
2238
            return config
2239
        end
2240
    end
2241

    
2242
    def resize_unmanaged_disk(disk, new_size)
2243

    
2244
        resize_hash = {}
2245
        disks       = []
2246
        found       = false
2247

    
2248
        unmanaged_keys = get_unmanaged_keys
2249
        vc_disks = get_vcenter_disks
2250

    
2251
        vc_disks.each do |vcenter_disk|
2252
            if unmanaged_keys.key?("opennebula.disk.#{disk["DISK_ID"]}")
2253
                device_key = unmanaged_keys["opennebula.disk.#{disk["DISK_ID"]}"].to_i
2254

    
2255
                if device_key == vcenter_disk[:key].to_i
2256

    
2257
                    if disk["SIZE"].to_i <= disk["ORIGINAL_SIZE"].to_i
2258
                        raise "Disk size cannot be shrinked."
2259
                    end
2260

    
2261
                    # Edit capacity setting new size in KB
2262
                    d = vcenter_disk[:device]
2263
                    d.capacityInKB = disk["SIZE"].to_i * 1024
2264
                    disks <<   { :device => d, :operation => :edit }
2265

    
2266
                    found = true
2267
                    break
2268
                end
2269
            end
2270
        end
2271

    
2272
        raise "Unmanaged disk could not be found to apply resize operation." if !found
2273

    
2274
        if !disks.empty?
2275
            resize_hash[:deviceChange] = disks
2276
            @item.ReconfigVM_Task(:spec => resize_hash).wait_for_completion
2277
        else
2278
            raise "Device was not found after attaching it to VM in poweroff."
2279
        end
2280
    end
2281

    
2282
    def resize_managed_disk(disk, new_size)
2283

    
2284
        resize_hash = {}
2285

    
2286
        unmanaged_keys = get_unmanaged_keys
2287
        vc_disks       = get_vcenter_disks
2288

    
2289
        # Get vcenter device to be detached and remove if found
2290
        device         = disk_attached_to_vm(disk, unmanaged_keys, vc_disks)
2291

    
2292
        # If the disk is being attached in poweroff, reconfigure the VM
2293
        if !device
2294
            spec_hash     = {}
2295
            device_change = []
2296

    
2297
            # Get an array with disk paths in OpenNebula's vm template
2298
            disks_in_onevm_vector = disks_in_onevm(unmanaged_keys, vc_disks)
2299

    
2300
            device_change_ds, device_change_spod, device_change_spod_ids = device_attach_disks(disks_in_onevm_vector, vc_disks)
2301
            device_change += device_change_ds
2302

    
2303
            # Create volatile disks in StorageDRS if any
2304
            if !device_change_spod.empty?
2305
                spec_hash[:extraConfig] = create_storagedrs_disks(device_change_spod, device_change_spod_ids)
2306
            end
2307

    
2308
            # Common reconfigure task
2309
            spec_hash[:deviceChange] = device_change
2310
            spec = RbVmomi::VIM.VirtualMachineConfigSpec(spec_hash)
2311
            @item.ReconfigVM_Task(:spec => spec).wait_for_completion
2312

    
2313
            # Check again if device has now been attached
2314
            unmanaged_keys = get_unmanaged_keys
2315
            vc_disks       = get_vcenter_disks
2316
            device         = disk_attached_to_vm(disk, unmanaged_keys, vc_disks)
2317

    
2318
            if !device
2319
                raise "Device was not found after attaching it to VM in poweroff."
2320
            end
2321
        end
2322

    
2323
        # Resize disk now that we know that it's part of the VM
2324
        if device
2325
            vcenter_disk = device[:device]
2326
            vcenter_disk.capacityInKB = new_size.to_i * 1024
2327
            resize_hash[:deviceChange] = [{
2328
                :operation => :edit,
2329
                :device => vcenter_disk
2330
            }]
2331

    
2332
            @item.ReconfigVM_Task(:spec => resize_hash).wait_for_completion
2333
        end
2334
    end
2335

    
2336
    def has_snapshots?
2337
        self['rootSnapshot'] && !self['rootSnapshot'].empty?
2338
    end
2339

    
2340
    def instantiated_as_persistent?
2341
        begin
2342
            !!one_item["TEMPLATE/CLONING_TEMPLATE_ID"]
2343
        rescue
2344
            return false #one_item may not be retrieved if deploy_id hasn't been set
2345
        end
2346
    end
2347

    
2348
    def find_free_ide_controller(position=0)
2349

    
2350
        free_ide_controllers = []
2351
        ide_schema           = {}
2352

    
2353
        used_numbers      = []
2354
        available_numbers = []
2355

    
2356
        @item["config.hardware.device"].each do |dev|
2357
            if dev.is_a? RbVmomi::VIM::VirtualIDEController
2358
                if ide_schema[dev.key].nil?
2359
                    ide_schema[dev.key] = {}
2360
                end
2361

    
2362
                ide_schema[dev.key][:device] = dev
2363
            end
2364

    
2365
            next if dev.class != RbVmomi::VIM::VirtualCdrom
2366
            used_numbers << dev.unitNumber
2367
        end
2368

    
2369
        2.times do |ide_id|
2370
            available_numbers << ide_id if used_numbers.grep(ide_id).length <= 0
2371
        end
2372

    
2373
        ide_schema.keys.each do |controller|
2374
            free_ide_controllers << ide_schema[controller][:device].deviceInfo.label
2375
        end
2376

    
2377
        if free_ide_controllers.empty?
2378
            raise "There are no free IDE controllers to connect this CDROM device"
2379
        end
2380

    
2381
        available_controller_label = free_ide_controllers[0]
2382

    
2383
        controller = nil
2384

    
2385
        @item['config.hardware.device'].each do |device|
2386
            if device.deviceInfo.label == available_controller_label
2387
                controller = device
2388
                break
2389
            end
2390
        end
2391

    
2392
        new_unit_number = available_numbers.sort[position]
2393

    
2394
        return controller, new_unit_number
2395
    end
2396

    
2397
    def find_free_controller(position=0)
2398
        free_scsi_controllers = []
2399
        scsi_schema           = {}
2400

    
2401
        used_numbers      = []
2402
        available_numbers = []
2403

    
2404
        @item["config.hardware.device"].each do |dev|
2405
            if dev.is_a? RbVmomi::VIM::VirtualSCSIController
2406
                if scsi_schema[dev.key].nil?
2407
                    scsi_schema[dev.key] = {}
2408
                end
2409

    
2410
                used_numbers << dev.scsiCtlrUnitNumber
2411
                scsi_schema[dev.key][:device] = dev
2412
            end
2413

    
2414
            next if dev.class != RbVmomi::VIM::VirtualDisk
2415
            used_numbers << dev.unitNumber
2416
        end
2417

    
2418
        15.times do |scsi_id|
2419
            available_numbers << scsi_id if used_numbers.grep(scsi_id).length <= 0
2420
        end
2421

    
2422
        scsi_schema.keys.each do |controller|
2423
            free_scsi_controllers << scsi_schema[controller][:device].deviceInfo.label
2424
        end
2425

    
2426
        if free_scsi_controllers.length > 0
2427
            available_controller_label = free_scsi_controllers[0]
2428
        else
2429
            add_new_scsi(scsi_schema)
2430
            return find_free_controller
2431
        end
2432

    
2433
        controller = nil
2434

    
2435
        @item['config.hardware.device'].each do |device|
2436
            if device.deviceInfo.label == available_controller_label
2437
                controller = device
2438
                break
2439
            end
2440
        end
2441

    
2442
        new_unit_number = available_numbers.sort[position]
2443

    
2444
        return controller, new_unit_number
2445
    end
2446

    
2447
    def add_new_scsi(scsi_schema)
2448
        controller = nil
2449

    
2450
        if scsi_schema.keys.length >= 4
2451
            raise "Cannot add a new controller, maximum is 4."
2452
        end
2453

    
2454
        scsi_key    = 0
2455
        scsi_number = 0
2456

    
2457
        if scsi_schema.keys.length > 0 && scsi_schema.keys.length < 4
2458
            scsi_key    = scsi_schema.keys.sort[-1] + 1
2459
            scsi_number = scsi_schema[scsi_schema.keys.sort[-1]][:device].busNumber + 1
2460
        end
2461

    
2462
        controller_device = RbVmomi::VIM::VirtualLsiLogicController(
2463
            :key       => scsi_key,
2464
            :busNumber => scsi_number,
2465
            :sharedBus => :noSharing
2466
        )
2467

    
2468
        device_config_spec = RbVmomi::VIM::VirtualDeviceConfigSpec(
2469
            :device    => controller_device,
2470
            :operation => :add
2471
        )
2472

    
2473
        vm_config_spec = RbVmomi::VIM::VirtualMachineConfigSpec(
2474
            :deviceChange => [device_config_spec]
2475
        )
2476

    
2477
        @item.ReconfigVM_Task(:spec => vm_config_spec).wait_for_completion
2478

    
2479
        @item["config.hardware.device"].each do |device|
2480
            if device.class == RbVmomi::VIM::VirtualLsiLogicController &&
2481
                device.key == scsi_key
2482

    
2483
                controller = device.deviceInfo.label
2484
            end
2485
        end
2486

    
2487
        return controller
2488
    end
2489

    
2490
    # Create a snapshot for the VM
2491
    def create_snapshot(snap_id, snap_name)
2492
        snapshot_hash = {
2493
            :name        => snap_id,
2494
            :description => "OpenNebula Snapshot: #{snap_name}",
2495
            :memory      => true,
2496
            :quiesce     => true
2497
        }
2498

    
2499
        vcenter_version = @vi_client.vim.serviceContent.about.apiVersion rescue nil
2500

    
2501
        if vcenter_version != "5.5"
2502
            begin
2503
                @item.CreateSnapshot_Task(snapshot_hash).wait_for_completion
2504
            rescue Exception => e
2505
                raise "Cannot create snapshot for VM: #{e.message}\n#{e.backtrace}"
2506
            end
2507
        else
2508
            # B#5045 - If vcenter is 5.5 the snapshot may take longer than
2509
            # 15 minutes and it does not report that it has finished using
2510
            # wait_for_completion so we use an active wait instead with a
2511
            # timeout of 1440 minutes = 24 hours
2512
            @item.CreateSnapshot_Task(snapshot_hash)
2513

    
2514
            snapshot_created  = false
2515
            elapsed_minutes   = 0
2516

    
2517
            until snapshot_created || elapsed_minutes == 1440
2518
                if !!@item['snapshot']
2519
                    current_snapshot = @item['snapshot.currentSnapshot'] rescue nil
2520
                    snapshot_found = find_snapshot_in_list(@item['snapshot.rootSnapshotList'], snap_id)
2521
                    snapshot_created = !!snapshot_found && !!current_snapshot && current_snapshot._ref == snapshot_found._ref
2522
                end
2523
                sleep(60)
2524
                elapsed_minutes += 1
2525
            end
2526
        end
2527

    
2528
        return snap_id
2529
    end
2530

    
2531
    # Revert to a VM snapshot
2532
    def revert_snapshot(snap_id)
2533

    
2534
        snapshot_list = self["snapshot.rootSnapshotList"]
2535
        snapshot = find_snapshot_in_list(snapshot_list, snap_id)
2536

    
2537
        return nil if !snapshot
2538

    
2539
        begin
2540
            revert_snapshot_hash = { :_this => snapshot }
2541
            snapshot.RevertToSnapshot_Task(revert_snapshot_hash).wait_for_completion
2542
        rescue Exception => e
2543
            raise "Cannot revert snapshot of VM: #{e.message}\n#{e.backtrace}"
2544
        end
2545
    end
2546

    
2547
    # Delete VM snapshot
2548
    def delete_snapshot(snap_id)
2549

    
2550
        snapshot_list = self["snapshot.rootSnapshotList"]
2551
        snapshot = find_snapshot_in_list(snapshot_list, snap_id)
2552

    
2553
        return nil if !snapshot
2554

    
2555
        begin
2556
            delete_snapshot_hash = {
2557
                :_this => snapshot,
2558
                :removeChildren => false
2559
            }
2560
            snapshot.RemoveSnapshot_Task(delete_snapshot_hash).wait_for_completion
2561
        rescue Exception => e
2562
            raise "Cannot delete snapshot of VM: #{e.message}\n#{e.backtrace}"
2563
        end
2564
    end
2565

    
2566
    def find_snapshot_in_list(list, snap_id)
2567
        list.each do |i|
2568
            if i.name == snap_id.to_s
2569
                return i.snapshot
2570
            elsif !i.childSnapshotList.empty?
2571
                snap = find_snapshot_in_list(i.childSnapshotList, snap_id)
2572
                return snap if snap
2573
            end
2574
        end rescue nil
2575

    
2576
        nil
2577
    end
2578

    
2579
    ############################################################################
2580
    # actions
2581
    ############################################################################
2582

    
2583
    def shutdown
2584
        begin
2585
            @item.ShutdownGuest
2586
            # Check if VM has been powered off
2587
            (0..VM_SHUTDOWN_TIMEOUT).each do
2588
                break if @item.runtime.powerState == "poweredOff"
2589
                sleep 1
2590
            end
2591
        rescue
2592
            # Ignore ShutdownGuest exceptions, maybe VM hasn't openvm tools
2593
        end
2594

    
2595
        # If VM hasn't been powered off, do it now
2596
        if @item.runtime.powerState != "poweredOff"
2597
            poweroff_hard
2598
        end
2599
    end
2600

    
2601
    def destroy
2602
        @item.Destroy_Task.wait_for_completion
2603
    end
2604

    
2605
    def mark_as_template
2606
        @item.MarkAsTemplate
2607
    end
2608

    
2609
    def reset
2610
        @item.ResetVM_Task.wait_for_completion
2611
    end
2612

    
2613
    def suspend
2614
        @item.SuspendVM_Task.wait_for_completion
2615
    end
2616

    
2617
    def reboot
2618
        @item.RebootGuest
2619
    end
2620

    
2621
    def poweron
2622
        ## If need in the future, you can power on VMs from datacenter
2623
        ## dc = get_dc
2624
        ## dc.power_on_vm(@item)
2625
        @item.PowerOnVM_Task.wait_for_completion
2626
    end
2627

    
2628
    def is_powered_on?
2629
        return @item.runtime.powerState == "poweredOn"
2630
    end
2631

    
2632
    def poweroff_hard
2633
        @item.PowerOffVM_Task.wait_for_completion
2634
    end
2635

    
2636
    def remove_all_snapshots
2637
        @item.RemoveAllSnapshots_Task.wait_for_completion
2638
    end
2639

    
2640
    def set_running(state)
2641
        value = state ? "yes" : "no"
2642

    
2643
        config_array = [
2644
            { :key => "opennebula.vm.running", :value => value }
2645
        ]
2646
        spec = RbVmomi::VIM.VirtualMachineConfigSpec(
2647
            { :extraConfig => config_array }
2648
        )
2649

    
2650
        @item.ReconfigVM_Task(:spec => spec).wait_for_completion
2651
    end
2652

    
2653
    ############################################################################
2654
    # monitoring
2655
    ############################################################################
2656

    
2657
    # monitor function used when VMM poll action is called
2658
    def monitor_poll_vm
2659
        reset_monitor
2660

    
2661
        @state = state_to_c(self["summary.runtime.powerState"])
2662

    
2663
        if @state != VM_STATE[:active]
2664
            reset_monitor
2665
            return
2666
        end
2667

    
2668
        cpuMhz = self["runtime.host.summary.hardware.cpuMhz"].to_f
2669

    
2670
        @monitor[:used_memory] = self["summary.quickStats.hostMemoryUsage"] * 1024
2671

    
2672
        used_cpu = self["summary.quickStats.overallCpuUsage"].to_f / cpuMhz
2673
        used_cpu = (used_cpu * 100).to_s
2674
        @monitor[:used_cpu]  = sprintf('%.2f', used_cpu).to_s
2675

    
2676
        # Check for negative values
2677
        @monitor[:used_memory] = 0 if @monitor[:used_memory].to_i < 0
2678
        @monitor[:used_cpu]    = 0 if @monitor[:used_cpu].to_i < 0
2679

    
2680
        guest_ip_addresses = []
2681
        self["guest.net"].each do |net|
2682
            net.ipConfig.ipAddress.each do |ip|
2683
                guest_ip_addresses << ip.ipAddress
2684
            end if net.ipConfig && net.ipConfig.ipAddress
2685
        end if self["guest.net"]
2686

    
2687
        @guest_ip_addresses = guest_ip_addresses.join(',')
2688

    
2689
        pm = self['_connection'].serviceInstance.content.perfManager
2690

    
2691
        provider = pm.provider_summary(@item)
2692

    
2693
        refresh_rate = provider.refreshRate
2694

    
2695
        if get_vm_id
2696
            stats = {}
2697

    
2698
            if (one_item["MONITORING/LAST_MON"] && one_item["MONITORING/LAST_MON"].to_i != 0 )
2699
                #Real time data stores max 1 hour. 1 minute has 3 samples
2700
                interval = (Time.now.to_i - one_item["MONITORING/LAST_MON"].to_i)
2701

    
2702
                #If last poll was more than hour ago get 3 minutes,
2703
                #else calculate how many samples since last poll
2704
                samples =  interval > 3600 ? 9 : (interval / refresh_rate) + 1
2705
                max_samples = samples > 0 ? samples : 1
2706

    
2707
                stats = pm.retrieve_stats(
2708
                    [@item],
2709
                    ['net.transmitted','net.bytesRx','net.bytesTx','net.received',
2710
                    'virtualDisk.numberReadAveraged','virtualDisk.numberWriteAveraged',
2711
                    'virtualDisk.read','virtualDisk.write'],
2712
                    {interval:refresh_rate, max_samples: max_samples}
2713
                ) rescue {}
2714
            else
2715
                # First poll, get at least latest 3 minutes = 9 samples
2716
                stats = pm.retrieve_stats(
2717
                    [@item],
2718
                    ['net.transmitted','net.bytesRx','net.bytesTx','net.received',
2719
                    'virtualDisk.numberReadAveraged','virtualDisk.numberWriteAveraged',
2720
                    'virtualDisk.read','virtualDisk.write'],
2721
                    {interval:refresh_rate, max_samples: 9}
2722
                ) rescue {}
2723
            end
2724

    
2725
            if !stats.empty? && !stats.first[1][:metrics].empty?
2726
                metrics = stats.first[1][:metrics]
2727

    
2728
                nettx_kbpersec = 0
2729
                if metrics['net.transmitted']
2730
                    metrics['net.transmitted'].each { |sample|
2731
                        nettx_kbpersec += sample if sample > 0
2732
                    }
2733
                end
2734

    
2735
                netrx_kbpersec = 0
2736
                if metrics['net.bytesRx']
2737
                    metrics['net.bytesRx'].each { |sample|
2738
                        netrx_kbpersec += sample if sample > 0
2739
                    }
2740
                end
2741

    
2742
                read_kbpersec = 0
2743
                if metrics['virtualDisk.read']
2744
                    metrics['virtualDisk.read'].each { |sample|
2745
                        read_kbpersec += sample if sample > 0
2746
                    }
2747
                end
2748

    
2749
                read_iops = 0
2750
                if metrics['virtualDisk.numberReadAveraged']
2751
                    metrics['virtualDisk.numberReadAveraged'].each { |sample|
2752
                        read_iops += sample if sample > 0
2753
                    }
2754
                end
2755

    
2756
                write_kbpersec = 0
2757
                if metrics['virtualDisk.write']
2758
                    metrics['virtualDisk.write'].each { |sample|
2759
                        write_kbpersec += sample if sample > 0
2760
                    }
2761
                end
2762

    
2763
                write_iops = 0
2764
                if metrics['virtualDisk.numberWriteAveraged']
2765
                    metrics['virtualDisk.numberWriteAveraged'].each { |sample|
2766
                        write_iops += sample if sample > 0
2767
                    }
2768
                end
2769
            else
2770
                nettx_kbpersec = 0
2771
                netrx_kbpersec = 0
2772
                read_kbpersec  = 0
2773
                read_iops      = 0
2774
                write_kbpersec = 0
2775
                write_iops     = 0
2776
            end
2777

    
2778
            # Accumulate values if present
2779
            previous_nettx = @one_item && @one_item["MONITORING/NETTX"] ? @one_item["MONITORING/NETTX"].to_i : 0
2780
            previous_netrx = @one_item && @one_item["MONITORING/NETRX"] ? @one_item["MONITORING/NETRX"].to_i : 0
2781
            previous_diskrdiops = @one_item && @one_item["MONITORING/DISKRDIOPS"] ? @one_item["MONITORING/DISKRDIOPS"].to_i : 0
2782
            previous_diskwriops = @one_item && @one_item["MONITORING/DISKWRIOPS"] ? @one_item["MONITORING/DISKWRIOPS"].to_i : 0
2783
            previous_diskrdbytes = @one_item && @one_item["MONITORING/DISKRDBYTES"] ? @one_item["MONITORING/DISKRDBYTES"].to_i : 0
2784
            previous_diskwrbytes = @one_item && @one_item["MONITORING/DISKWRBYTES"] ? @one_item["MONITORING/DISKWRBYTES"].to_i : 0
2785

    
2786
            @monitor[:nettx] = previous_nettx + (nettx_kbpersec * 1024 * refresh_rate).to_i
2787
            @monitor[:netrx] = previous_netrx + (netrx_kbpersec * 1024 * refresh_rate).to_i
2788

    
2789
            @monitor[:diskrdiops]  = previous_diskrdiops + read_iops
2790
            @monitor[:diskwriops]  = previous_diskwriops + write_iops
2791
            @monitor[:diskrdbytes] = previous_diskrdbytes + (read_kbpersec * 1024 * refresh_rate).to_i
2792
            @monitor[:diskwrbytes] = previous_diskwrbytes + (write_kbpersec * 1024 * refresh_rate).to_i
2793
        end
2794
    end
2795

    
2796
    # monitor function used when poll action is called for all vms
2797
    def monitor(stats)
2798

    
2799
        reset_monitor
2800

    
2801
        refresh_rate = 20 #20 seconds between samples (realtime)
2802

    
2803
        @state = state_to_c(@vm_info["summary.runtime.powerState"])
2804

    
2805
        return if @state != VM_STATE[:active]
2806

    
2807
        cpuMhz =  @vm_info[:esx_host_cpu]
2808

    
2809
        @monitor[:used_memory] = @vm_info["summary.quickStats.hostMemoryUsage"].to_i * 1024
2810

    
2811
        used_cpu = @vm_info["summary.quickStats.overallCpuUsage"].to_f / cpuMhz
2812
        used_cpu = (used_cpu * 100).to_s
2813
        @monitor[:used_cpu]  = sprintf('%.2f', used_cpu).to_s
2814

    
2815
        # Check for negative values
2816
        @monitor[:used_memory] = 0 if @monitor[:used_memory].to_i < 0
2817
        @monitor[:used_cpu]    = 0 if @monitor[:used_cpu].to_i < 0
2818

    
2819
        guest_ip_addresses = []
2820
        @vm_info["guest.net"].each do |net|
2821
            net.ipConfig.ipAddress.each do |ip|
2822
                guest_ip_addresses << ip.ipAddress
2823
            end if net.ipConfig && net.ipConfig.ipAddress
2824
        end if self["guest.net"]
2825

    
2826
        @guest_ip_addresses = guest_ip_addresses.join(',')
2827

    
2828
        if stats.key?(@item)
2829
            metrics = stats[@item][:metrics]
2830

    
2831
            nettx_kbpersec = 0
2832
            if metrics['net.transmitted']
2833
                metrics['net.transmitted'].each { |sample|
2834
                    nettx_kbpersec += sample if sample > 0
2835
                }
2836
            end
2837

    
2838
            netrx_kbpersec = 0
2839
            if metrics['net.bytesRx']
2840
                metrics['net.bytesRx'].each { |sample|
2841
                    netrx_kbpersec += sample if sample > 0
2842
                }
2843
            end
2844

    
2845
            read_kbpersec = 0
2846
            if metrics['virtualDisk.read']
2847
                metrics['virtualDisk.read'].each { |sample|
2848
                    read_kbpersec += sample if sample > 0
2849
                }
2850
            end
2851

    
2852
            read_iops = 0
2853
            if metrics['virtualDisk.numberReadAveraged']
2854
                metrics['virtualDisk.numberReadAveraged'].each { |sample|
2855
                    read_iops += sample if sample > 0
2856
                }
2857
            end
2858

    
2859
            write_kbpersec = 0
2860
            if metrics['virtualDisk.write']
2861
                metrics['virtualDisk.write'].each { |sample|
2862
                    write_kbpersec += sample if sample > 0
2863
                }
2864
            end
2865

    
2866
            write_iops = 0
2867
            if metrics['virtualDisk.numberWriteAveraged']
2868
                metrics['virtualDisk.numberWriteAveraged'].each { |sample|
2869
                    write_iops += sample if sample > 0
2870
                }
2871
            end
2872
        else
2873
            nettx_kbpersec = 0
2874
            netrx_kbpersec = 0
2875
            read_kbpersec  = 0
2876
            read_iops      = 0
2877
            write_kbpersec = 0
2878
            write_iops     = 0
2879
        end
2880

    
2881
        # Accumulate values if present
2882
        previous_nettx = @one_item && @one_item["MONITORING/NETTX"] ? @one_item["MONITORING/NETTX"].to_i : 0
2883
        previous_netrx = @one_item && @one_item["MONITORING/NETRX"] ? @one_item["MONITORING/NETRX"].to_i : 0
2884
        previous_diskrdiops = @one_item && @one_item["MONITORING/DISKRDIOPS"] ? @one_item["MONITORING/DISKRDIOPS"].to_i : 0
2885
        previous_diskwriops = @one_item && @one_item["MONITORING/DISKWRIOPS"] ? @one_item["MONITORING/DISKWRIOPS"].to_i : 0
2886
        previous_diskrdbytes = @one_item && @one_item["MONITORING/DISKRDBYTES"] ? @one_item["MONITORING/DISKRDBYTES"].to_i : 0
2887
        previous_diskwrbytes = @one_item && @one_item["MONITORING/DISKWRBYTES"] ? @one_item["MONITORING/DISKWRBYTES"].to_i : 0
2888

    
2889
        @monitor[:nettx] = previous_nettx + (nettx_kbpersec * 1024 * refresh_rate).to_i
2890
        @monitor[:netrx] = previous_netrx + (netrx_kbpersec * 1024 * refresh_rate).to_i
2891

    
2892
        @monitor[:diskrdiops]  = previous_diskrdiops + read_iops
2893
        @monitor[:diskwriops]  = previous_diskwriops + write_iops
2894
        @monitor[:diskrdbytes] = previous_diskrdbytes + (read_kbpersec * 1024 * refresh_rate).to_i
2895
        @monitor[:diskwrbytes] = previous_diskwrbytes + (write_kbpersec * 1024 * refresh_rate).to_i
2896
    end
2897

    
2898

    
2899

    
2900
    #  Generates a OpenNebula IM Driver valid string with the monitor info
2901
    def info
2902
        return 'STATE=d' if @state == 'd'
2903

    
2904
        guest_ip = @vm_info ? @vm_info["guest.ipAddress"] : self["guest.ipAddress"]
2905

    
2906
        used_cpu    = @monitor[:used_cpu]
2907
        used_memory = @monitor[:used_memory]
2908
        netrx       = @monitor[:netrx]
2909
        nettx       = @monitor[:nettx]
2910
        diskrdbytes = @monitor[:diskrdbytes]
2911
        diskwrbytes = @monitor[:diskwrbytes]
2912
        diskrdiops  = @monitor[:diskrdiops]
2913
        diskwriops  = @monitor[:diskwriops]
2914

    
2915
        esx_host      = @vm_info ? @vm_info[:esx_host_name].to_s : self["runtime.host.name"].to_s
2916
        guest_state   = @vm_info ? @vm_info["guest.guestState"].to_s : self["guest.guestState"].to_s
2917
        vmware_tools  = @vm_info ? @vm_info["guest.toolsRunningStatus"].to_s : self["guest.toolsRunningStatus"].to_s
2918
        vmtools_ver   = @vm_info ? @vm_info["guest.toolsVersion"].to_s :  self["guest.toolsVersion"].to_s
2919
        vmtools_verst = @vm_info ? @vm_info["guest.toolsVersionStatus2"].to_s : vmtools_verst = self["guest.toolsVersionStatus2"].to_s
2920

    
2921
        if @vm_info
2922
            rp_name   = @vm_info[:rp_list].select { |item| item[:ref] == @vm_info["resourcePool"]._ref}.first[:name] rescue ""
2923
            rp_name   = "Resources" if rp_name.empty?
2924
        else
2925
            rp_name   = self["resourcePool"].name
2926
        end
2927

    
2928
        str_info = ""
2929

    
2930
        str_info = "GUEST_IP=" << guest_ip.to_s << " " if guest_ip
2931

    
2932
        if @guest_ip_addresses && !@guest_ip_addresses.empty?
2933
            str_info << "GUEST_IP_ADDRESSES=\"" << @guest_ip_addresses.to_s << "\" "
2934
        end
2935

    
2936
        str_info << "#{POLL_ATTRIBUTE[:state]}="  << @state               << " "
2937
        str_info << "#{POLL_ATTRIBUTE[:cpu]}="    << used_cpu.to_s        << " "
2938
        str_info << "#{POLL_ATTRIBUTE[:memory]}=" << used_memory.to_s     << " "
2939
        str_info << "#{POLL_ATTRIBUTE[:netrx]}="  << netrx.to_s           << " "
2940
        str_info << "#{POLL_ATTRIBUTE[:nettx]}="  << nettx.to_s           << " "
2941

    
2942
        str_info << "DISKRDBYTES=" << diskrdbytes.to_s << " "
2943
        str_info << "DISKWRBYTES=" << diskwrbytes.to_s << " "
2944
        str_info << "DISKRDIOPS="  << diskrdiops.to_s  << " "
2945
        str_info << "DISKWRIOPS="  << diskwriops.to_s  << " "
2946

    
2947
        str_info << "VCENTER_ESX_HOST=\""                 << esx_host        << "\" "
2948
        str_info << "VCENTER_GUEST_STATE="                << guest_state     << " "
2949
        str_info << "VCENTER_VMWARETOOLS_RUNNING_STATUS=" << vmware_tools    << " "
2950
        str_info << "VCENTER_VMWARETOOLS_VERSION="        << vmtools_ver     << " "
2951
        str_info << "VCENTER_VMWARETOOLS_VERSION_STATUS=" << vmtools_verst   << " "
2952
        str_info << "VCENTER_RP_NAME=\""                  << rp_name << "\" "
2953
    end
2954

    
2955
    def reset_monitor
2956
        @monitor = {
2957
            :used_cpu    => 0,
2958
            :used_memory => 0,
2959
            :netrx       => 0,
2960
            :nettx       => 0,
2961
            :diskrdbytes => 0,
2962
            :diskwrbytes => 0,
2963
            :diskrdiops  => 0,
2964
            :diskwriops  => 0
2965
        }
2966
    end
2967

    
2968
    # Converts the VI string state to OpenNebula state convention
2969
    # Guest states are:
2970
    # - poweredOff   The virtual machine is currently powered off.
2971
    # - poweredOn    The virtual machine is currently powered on.
2972
    # - suspended    The virtual machine is currently suspended.
2973
    def state_to_c(state)
2974
        case state
2975
            when 'poweredOn'
2976
                VM_STATE[:active]
2977
            when 'suspended'
2978
                VM_STATE[:paused]
2979
            when 'poweredOff'
2980
                VM_STATE[:deleted]
2981
            else
2982
                VM_STATE[:unknown]
2983
        end
2984
    end
2985

    
2986
    # TODO check with uuid
2987
    def self.new_from_ref(ref, vi_client)
2988
        self.new(RbVmomi::VIM::VirtualMachine.new(vi_client.vim, ref), vi_client)
2989
    end
2990

    
2991
end # class VirtualMachine
2992

    
2993
end # module VCenterDriver