Statistics
| Branch: | Tag: | Revision:

one / src / vmm_mad / remotes / lib / vcenter_driver / virtual_machine.rb @ 95fc9267

History | View | Annotate | Download (107 KB)

1
module VCenterDriver
2
require 'digest'
3
class VirtualMachineFolder
4
    attr_accessor :item, :items
5

    
6
    def initialize(item)
7
        @item = item
8
        @items = {}
9
    end
10

    
11
    ########################################################################
12
    # Builds a hash with Datastore-Ref / Datastore to be used as a cache
13
    # @return [Hash] in the form
14
    #   { ds_ref [Symbol] => Datastore object }
15
    ########################################################################
16
    def fetch!
17
        VIClient.get_entities(@item, "VirtualMachine").each do |item|
18
            item_name = item._ref
19
            @items[item_name.to_sym] = VirtualMachine.new(item)
20
        end
21
    end
22

    
23
    def fetch_templates!
24
        VIClient.get_entities(@item, "VirtualMachine").each do |item|
25
            if item.config.template
26
                item_name = item._ref
27
                @items[item_name.to_sym] = Template.new(item)
28
            end
29
        end
30
    end
31

    
32
    ########################################################################
33
    # Returns a Datastore. Uses the cache if available.
34
    # @param ref [Symbol] the vcenter ref
35
    # @return Datastore
36
    ########################################################################
37
    def get(ref)
38
        if !@items[ref.to_sym]
39
            rbvmomi_dc = RbVmomi::VIM::Datastore.new(@item._connection, ref)
40
            @items[ref.to_sym] = Datastore.new(rbvmomi_dc)
41
        end
42

    
43
        @items[ref.to_sym]
44
    end
45
end # class VirtualMachineFolder
46

    
47
class Template
48

    
49
    attr_accessor :item
50

    
51
    include Memoize
52

    
53
    def initialize(item=nil, vi_client=nil)
54
        @item = item
55
        @vi_client = vi_client
56
        @locking = true
57
    end
58

    
59
    # Locking function. Similar to flock
60
    def lock
61
        if @locking
62
           @locking_file = File.open("/tmp/vcenter-importer-lock","w")
63
           @locking_file.flock(File::LOCK_EX)
64
        end
65
    end
66

    
67
    # Unlock driver execution mutex
68
    def unlock
69
        if @locking
70
            @locking_file.close
71
        end
72
    end
73

    
74
    def get_dc
75
        item = @item
76

    
77
        while !item.instance_of? RbVmomi::VIM::Datacenter
78
            item = item.parent
79
            if item.nil?
80
                raise "Could not find the parent Datacenter"
81
            end
82
        end
83

    
84
        Datacenter.new(item)
85
    end
86

    
87
    def delete_template
88
        @item.Destroy_Task.wait_for_completion
89
    end
90

    
91
    def get_vcenter_instance_uuid
92
        @vi_client.vim.serviceContent.about.instanceUuid rescue nil
93
    end
94

    
95
    def create_template_copy(template_name)
96
        error = nil
97
        template_ref = nil
98

    
99
        template_name = "one-#{self['name']}" if template_name.empty?
100

    
101
        relocate_spec_params = {}
102
        relocate_spec_params[:pool] = get_rp
103
        relocate_spec = RbVmomi::VIM.VirtualMachineRelocateSpec(relocate_spec_params)
104

    
105
        clone_spec = RbVmomi::VIM.VirtualMachineCloneSpec({
106
            :location => relocate_spec,
107
            :powerOn  => false,
108
            :template => false
109
        })
110

    
111
        template = nil
112
        begin
113
            template = @item.CloneVM_Task(:folder => @item.parent,
114
                                          :name   => template_name,
115
                                          :spec   => clone_spec).wait_for_completion
116
            template_ref = template._ref
117
        rescue Exception => e
118
            if !e.message.start_with?('DuplicateName')
119
                error = "Could not create the template clone. Reason: #{e.message}"
120
                return error, nil
121
            end
122

    
123
            dc = get_dc
124
            vm_folder = dc.vm_folder
125
            vm_folder.fetch!
126
            vm = vm_folder.items
127
                    .select{|k,v| v.item.name == template_name}
128
                    .values.first.item rescue nil
129

    
130
            if vm
131
                begin
132
                    vm.Destroy_Task.wait_for_completion
133
                    template = @item.CloneVM_Task(:folder => @item.parent,
134
                                                  :name   => template_name,
135
                                                  :spec   => clone_spec).wait_for_completion
136
                    template_ref = template._ref
137
                rescue
138
                    error = "Could not delete the existing template, please remove it manually from vCenter. Reason: #{e.message}"
139
                end
140
            else
141
                error = "Could not create the template clone. Reason: #{e.message}"
142
            end
143
        end
144

    
145
        return error, template_ref
146
    end
147

    
148
    # Linked Clone over existing template
149
    def create_delta_disks
150

    
151
        begin
152
            disks = @item['config.hardware.device'].grep(RbVmomi::VIM::VirtualDisk)
153
            disk_without_snapshots = disks.select { |x| x.backing.parent.nil? }
154
        rescue
155
            error = "Cannot extract existing disks on template."
156
            use_linked_clones = false
157
            return error, use_linked_clones
158
        end
159

    
160
        if !disk_without_snapshots.empty?
161

    
162
            begin
163
                if self['config.template']
164
                    @item.MarkAsVirtualMachine(:pool => get_rp, :host => self['runtime.host'])
165
                end
166
            rescue Exception => e
167
                @item.MarkAsTemplate()
168
                error = "Cannot mark the template as a VirtualMachine. Not using linked clones. Reason: #{e.message}/#{e.backtrace}"
169
                use_linked_clones = false
170
                return error, use_linked_clones
171
            end
172

    
173
            begin
174
                spec = {}
175
                spec[:deviceChange] = []
176

    
177
                disk_without_snapshots.each do |disk|
178
                    remove_disk_spec = { :operation => :remove, :device => disk }
179
                    spec[:deviceChange] << remove_disk_spec
180

    
181
                    add_disk_spec = { :operation => :add,
182
                                    :fileOperation => :create,
183
                                    :device => disk.dup.tap { |x|
184
                                            x.backing = x.backing.dup
185
                                            x.backing.fileName = "[#{disk.backing.datastore.name}]"
186
                                            x.backing.parent = disk.backing
187
                                    }
188
                    }
189
                    spec[:deviceChange] << add_disk_spec
190
                end
191

    
192
                @item.ReconfigVM_Task(:spec => spec).wait_for_completion if !spec[:deviceChange].empty?
193
            rescue Exception => e
194
                error = "Cannot create the delta disks on top of the template. Reason: #{e.message}."
195
                use_linked_clones = false
196
                return error, use_linked_clones
197
            end
198

    
199
            begin
200
                @item.MarkAsTemplate()
201
            rescue
202
                error = "Cannot mark the VirtualMachine as a template. Not using linked clones."
203
                use_linked_clones = false
204
                return error, use_linked_clones
205
            end
206

    
207
            error = nil
208
            use_linked_clones = true
209
            return error, use_linked_clones
210
        else
211
            # Template already has delta disks
212
            error = nil
213
            use_linked_clones = true
214
            return error, use_linked_clones
215
        end
216
    end
217

    
218
    def import_vcenter_disks(vc_uuid, dpool, ipool, sunstone=false, template_id=nil)
219
        disk_info = ""
220
        error = ""
221
        sunstone_disk_info = []
222

    
223
        begin
224
            lock #Lock import operation, to avoid concurrent creation of images
225

    
226
            ##ccr_ref = self["runtime.host.parent._ref"]
227
            dc = get_dc
228
            dc_ref = dc.item._ref
229

    
230
            #Get disks and info required
231
            vc_disks = get_vcenter_disks
232

    
233
            # Track allocated images
234
            allocated_images = []
235

    
236
            vc_disks.each do |disk|
237
                datastore_found = VCenterDriver::Storage.get_one_image_ds_by_ref_and_dc(disk[:datastore]._ref,
238
                                                                                        dc_ref,
239
                                                                                        vc_uuid,
240
                                                                                        dpool)
241
                if datastore_found.nil?
242
                    error = "\n    ERROR: datastore #{disk[:datastore].name}: has to be imported first as an image datastore!\n"
243

    
244
                    #Rollback delete disk images
245
                    allocated_images.each do |i|
246
                        i.delete
247
                    end
248

    
249
                    break
250
                end
251

    
252
                image_import = VCenterDriver::Datastore.get_image_import_template(disk[:datastore].name,
253
                                                                                  disk[:path],
254
                                                                                  disk[:type],
255
                                                                                  disk[:prefix],
256
                                                                                  ipool,
257
                                                                                  template_id)
258
                #Image is already in the datastore
259
                if image_import[:one]
260
                    # This is the disk info
261
                    disk_tmp = ""
262
                    disk_tmp << "DISK=[\n"
263
                    disk_tmp << "IMAGE_ID=\"#{image_import[:one]["ID"]}\",\n"
264
                    disk_tmp << "OPENNEBULA_MANAGED=\"NO\"\n"
265
                    disk_tmp << "]\n"
266
                    if sunstone
267
                        sunstone_disk = {}
268
                        sunstone_disk[:type] = "EXISTING_DISK"
269
                        sunstone_disk[:image_tmpl] = disk_tmp
270
                        sunstone_disk_info << sunstone_disk
271
                    else
272
                        disk_info << disk_tmp
273
                    end
274

    
275
                elsif !image_import[:template].empty?
276

    
277
                    if sunstone
278
                        sunstone_disk = {}
279
                        sunstone_disk[:type] = "NEW_DISK"
280
                        sunstone_disk[:image_tmpl] = image_import[:template]
281
                        sunstone_disk[:ds_id] = datastore_found['ID'].to_i
282
                        sunstone_disk_info << sunstone_disk
283
                    else
284
                        # Then the image is created as it's not in the datastore
285
                        one_i = VCenterDriver::VIHelper.new_one_item(OpenNebula::Image)
286
                        allocated_images << one_i
287
                        rc = one_i.allocate(image_import[:template], datastore_found['ID'].to_i)
288

    
289
                        if OpenNebula.is_error?(rc)
290
                            error = "    Error creating disk from template: #{rc.message}\n"
291
                            break
292
                        end
293

    
294
                        #Add info for One template
295
                        one_i.info
296
                        disk_info << "DISK=[\n"
297
                        disk_info << "IMAGE_ID=\"#{one_i["ID"]}\",\n"
298
                        disk_info << "OPENNEBULA_MANAGED=\"NO\"\n"
299
                        disk_info << "]\n"
300
                    end
301
                end
302
            end
303

    
304
        rescue Exception => e
305
            error = "\n    There was an error trying to create an image for disk in vcenter template. Reason: #{e.message}\n#{e.backtrace}"
306
        ensure
307
            unlock
308
            if !error.empty?
309
                #Rollback delete disk images
310
                allocated_images.each do |i|
311
                    i.delete
312
                end
313
            end
314
        end
315

    
316
        return error, sunstone_disk_info, allocated_images if sunstone
317

    
318
        return error, disk_info, allocated_images if !sunstone
319

    
320
    end
321

    
322
    def import_vcenter_nics(vc_uuid, npool, hpool, vcenter_instance_name,
323
                            template_ref, wild, sunstone=false, vm_name=nil, vm_id=nil, dc_name=nil)
324
        nic_info = ""
325
        error = ""
326
        sunstone_nic_info = []
327

    
328
        begin
329
            lock #Lock import operation, to avoid concurrent creation of networks
330

    
331
            if !dc_name
332
                dc = get_dc
333
                dc_name = dc.item.name
334
                dc_ref  = dc.item._ref
335
            end
336

    
337
            ccr_ref  = self["runtime.host.parent._ref"]
338
            ccr_name = self["runtime.host.parent.name"]
339

    
340
            #Get disks and info required
341
            vc_nics = get_vcenter_nics
342

    
343
            # Track allocated networks for rollback
344
            allocated_networks = []
345

    
346
            # Track port groups duplicated in this VM
347
            duplicated_networks = []
348

    
349
            vc_nics.each do |nic|
350
                # Check if the network already exists
351
                network_found = VCenterDriver::Network.get_unmanaged_vnet_by_ref(nic[:net_ref],
352
                                                                                 template_ref,
353
                                                                                 vc_uuid,
354
                                                                                 npool)
355
                #Network is already in OpenNebula
356
                if network_found
357

    
358
                    # This is the existing nic info
359
                    nic_tmp = ""
360
                    nic_tmp << "NIC=[\n"
361
                    nic_tmp << "NETWORK_ID=\"#{network_found["ID"]}\",\n"
362
                    nic_tmp << "OPENNEBULA_MANAGED=\"NO\"\n"
363
                    nic_tmp << "]\n"
364

    
365
                    if sunstone
366
                        sunstone_nic = {}
367
                        sunstone_nic[:type] = "EXISTING_NIC"
368
                        sunstone_nic[:network_tmpl] = nic_tmp
369
                        sunstone_nic_info << sunstone_nic
370
                    else
371
                        nic_info << nic_tmp
372
                    end
373
                else
374
                    # Then the network has to be created as it's not in OpenNebula
375
                    one_vn = VCenterDriver::VIHelper.new_one_item(OpenNebula::VirtualNetwork)
376

    
377
                    # We're importing unmanaged nics
378
                    unmanaged = true
379

    
380
                    # Let's get the OpenNebula host associated to the cluster reference
381
                    one_host = VCenterDriver::VIHelper.find_by_ref(OpenNebula::HostPool,
382
                                                                  "TEMPLATE/VCENTER_CCR_REF",
383
                                                                   ccr_ref,
384
                                                                   vc_uuid,
385
                                                                   hpool)
386

    
387
                    # Let's get the CLUSTER_ID from the OpenNebula host
388
                    if !one_host || !one_host['CLUSTER_ID']
389
                        cluster_id = -1
390
                    else
391
                        cluster_id = one_host['CLUSTER_ID']
392
                    end
393

    
394
                    # We have to know if we're importing nics from a wild vm
395
                    # or from a template
396
                    if wild
397
                        unmanaged = "wild"
398
                    else
399
                        unmanaged = "template"
400
                    end
401

    
402
                    # Prepare the Virtual Network template
403
                    one_vnet = VCenterDriver::Network.to_one_template(nic[:net_name],
404
                                                                      nic[:net_ref],
405
                                                                      nic[:pg_type],
406
                                                                      ccr_ref,
407
                                                                      ccr_name,
408
                                                                      vc_uuid,
409
                                                                      vcenter_instance_name,
410
                                                                      dc_name,
411
                                                                      cluster_id,
412
                                                                      nil,
413
                                                                      unmanaged,
414
                                                                      template_ref,
415
                                                                      dc_ref,
416
                                                                      vm_name,
417
                                                                      vm_id)
418

    
419
                    # By default add an ethernet range to network size 255
420
                    ar_str = ""
421
                    ar_str << "AR=[\n"
422
                    ar_str << "TYPE=\"ETHER\",\n"
423
                    ar_str << "SIZE=\"255\"\n"
424
                    ar_str << "]\n"
425
                    one_vnet[:one] << ar_str
426

    
427
                    if sunstone
428
                        if !duplicated_networks.include?(nic[:net_name])
429
                            sunstone_nic = {}
430
                            sunstone_nic[:type] = "NEW_NIC"
431
                            sunstone_nic[:network_name] = nic[:net_name]
432
                            sunstone_nic[:network_tmpl] = one_vnet[:one]
433
                            sunstone_nic[:one_cluster_id] = cluster_id.to_i
434
                            sunstone_nic_info << sunstone_nic
435
                            duplicated_networks << nic[:net_name]
436
                        else
437
                            sunstone_nic = {}
438
                            sunstone_nic[:type] = "DUPLICATED_NIC"
439
                            sunstone_nic[:network_name] = nic[:net_name]
440
                            sunstone_nic_info << sunstone_nic
441
                        end
442
                    else
443
                        # Allocate the Virtual Network
444
                        allocated_networks << one_vn
445
                        rc = one_vn.allocate(one_vnet[:one], cluster_id.to_i)
446

    
447
                        if OpenNebula.is_error?(rc)
448
                            error = "\n    ERROR: Could not allocate virtual network due to #{rc.message}\n"
449
                            break
450
                        end
451

    
452
                        # Add info for One template
453
                        one_vn.info
454
                        nic_info << "NIC=[\n"
455
                        nic_info << "NETWORK_ID=\"#{one_vn["ID"]}\",\n"
456
                        nic_info << "OPENNEBULA_MANAGED=\"NO\"\n"
457
                        nic_info << "]\n"
458

    
459
                        # Refresh npool
460
                        npool.info_all
461
                    end
462
                end
463
            end
464

    
465
        rescue Exception => e
466
            error = "\n    There was an error trying to create a virtual network for network in vcenter template. Reason: #{e.message}"
467
        ensure
468
            unlock
469
            #Rollback, delete virtual networks
470
            if !error.empty?
471
                allocated_networks.each do |n|
472
                    n.delete
473
                end
474
            end
475
        end
476

    
477
        return error, nic_info, allocated_networks if !sunstone
478

    
479
        return error, sunstone_nic_info, allocated_networks if sunstone
480
    end
481

    
482
    def get_vcenter_disk_key(unit_number, controller_key)
483

    
484
        key = nil
485

    
486
        @item["config.hardware.device"].each do |device|
487
            disk = {}
488

    
489
            if is_disk_or_iso?(device)
490
                disk[:device]    = device
491
                if device.controllerKey == controller_key &&
492
                   device.unitNumber == unit_number
493

    
494
                   key = device.key
495
                   break
496
                end
497
            end
498
        end
499

    
500
        return key
501
    end
502

    
503
    def get_vcenter_disks
504

    
505
        disks = []
506
        ide_controlled  = []
507
        sata_controlled = []
508
        scsi_controlled = []
509

    
510
        @item["config.hardware.device"].each do |device|
511
            disk = {}
512

    
513
            if device.is_a? RbVmomi::VIM::VirtualIDEController
514
                ide_controlled.concat(device.device)
515
            end
516

    
517
            if device.is_a? RbVmomi::VIM::VirtualSATAController
518
                sata_controlled.concat(device.device)
519
            end
520

    
521
            if device.is_a? RbVmomi::VIM::VirtualSCSIController
522
                scsi_controlled.concat(device.device)
523
            end
524

    
525
            if is_disk_or_iso?(device)
526
                disk[:device]    = device
527
                disk[:datastore] = device.backing.datastore
528
                disk[:path]      = device.backing.fileName
529
                disk[:path_wo_ds]= disk[:path].sub(/^\[(.*?)\] /, "")
530
                disk[:type]      = is_disk?(device) ? "OS" : "CDROM"
531
                disk[:key]       = device.key
532
                disk[:prefix]    = "hd" if ide_controlled.include?(device.key)
533
                disk[:prefix]    = "sd" if scsi_controlled.include?(device.key)
534
                disk[:prefix]    = "sd" if sata_controlled.include?(device.key)
535
                disks << disk
536
            end
537
        end
538

    
539
        return disks
540
    end
541

    
542
    def get_vcenter_nics
543
        nics = []
544
        @item["config.hardware.device"].each do |device|
545
            nic = {}
546
            if is_nic?(device)
547
                nic[:net_name]  = device.backing.network.name
548
                nic[:net_ref]   = device.backing.network._ref
549
                nic[:pg_type]   = VCenterDriver::Network.get_network_type(device)
550
                nics << nic
551
            end
552
        end
553
        return nics
554
    end
555

    
556
    #  Checks if a RbVmomi::VIM::VirtualDevice is a disk or a cdrom
557
    def is_disk_or_cdrom?(device)
558
        is_disk  = !(device.class.ancestors.index(RbVmomi::VIM::VirtualDisk)).nil?
559
        is_cdrom = !(device.class.ancestors.index(RbVmomi::VIM::VirtualCdrom)).nil?
560
        is_disk || is_cdrom
561
    end
562

    
563
    #  Checks if a RbVmomi::VIM::VirtualDevice is a disk or an iso file
564
    def is_disk_or_iso?(device)
565
        is_disk  = !(device.class.ancestors.index(RbVmomi::VIM::VirtualDisk)).nil?
566
        is_iso = device.backing.is_a? RbVmomi::VIM::VirtualCdromIsoBackingInfo
567
        is_disk || is_iso
568
    end
569

    
570
    #  Checks if a RbVmomi::VIM::VirtualDevice is a disk
571
    def is_disk?(device)
572
        !(device.class.ancestors.index(RbVmomi::VIM::VirtualDisk)).nil?
573
    end
574

    
575
    #  Checks if a RbVmomi::VIM::VirtualDevice is a network interface
576
    def is_nic?(device)
577
        !device.class.ancestors.index(RbVmomi::VIM::VirtualEthernetCard).nil?
578
    end
579

    
580
    # @return RbVmomi::VIM::ResourcePool, first resource pool in cluster
581
    def get_rp
582
        self['runtime.host.parent.resourcePool']
583
    end
584

    
585
    def vm_to_one(vm_name)
586

    
587
        str = "NAME   = \"#{vm_name}\"\n"\
588
              "CPU    = \"#{@vm_info["config.hardware.numCPU"]}\"\n"\
589
              "vCPU   = \"#{@vm_info["config.hardware.numCPU"]}\"\n"\
590
              "MEMORY = \"#{@vm_info["config.hardware.memoryMB"]}\"\n"\
591
              "HYPERVISOR = \"vcenter\"\n"\
592
              "CONTEXT = [\n"\
593
              "    NETWORK = \"YES\",\n"\
594
              "    SSH_PUBLIC_KEY = \"$USER[SSH_PUBLIC_KEY]\"\n"\
595
              "]\n"\
596
              "VCENTER_INSTANCE_ID =\"#{@vm_info[:vc_uuid]}\"\n"\
597
              "VCENTER_CCR_REF =\"#{@vm_info[:cluster_ref]}\"\n"
598

    
599
        str << "IMPORT_VM_ID =\"#{self["_ref"]}\"\n"
600
        str << "IMPORT_STATE =\"#{@state}\"\n"
601

    
602
        vnc_port = nil
603
        keymap = nil
604

    
605
        @vm_info["config.extraConfig"].select do |xtra|
606
            if xtra[:key].downcase=="remotedisplay.vnc.port"
607
                vnc_port = xtra[:value]
608
            end
609

    
610
            if xtra[:key].downcase=="remotedisplay.vnc.keymap"
611
                keymap = xtra[:value]
612
            end
613
        end
614

    
615
        if !@vm_info["config.extraConfig"].empty?
616
            str << "GRAPHICS = [\n"\
617
                   "  TYPE     =\"vnc\",\n"
618
            str << "  PORT     =\"#{vnc_port}\",\n" if vnc_port
619
            str << "  KEYMAP   =\"#{keymap}\",\n" if keymap
620
            str << "  LISTEN   =\"0.0.0.0\"\n"
621
            str << "]\n"
622
        end
623

    
624
        if !@vm_info["config.annotation"] || @vm_info["config.annotation"].empty?
625
            str << "DESCRIPTION = \"vCenter Template imported by OpenNebula" \
626
                " from Cluster #{@vm_info["cluster_name"]}\"\n"
627
        else
628
            notes = @vm_info["config.annotation"].gsub("\\", "\\\\").gsub("\"", "\\\"")
629
            str << "DESCRIPTION = \"#{notes}\"\n"
630
        end
631

    
632
        case @vm_info["guest.guestFullName"]
633
            when /CentOS/i
634
                str << "LOGO=images/logos/centos.png\n"
635
            when /Debian/i
636
                str << "LOGO=images/logos/debian.png\n"
637
            when /Red Hat/i
638
                str << "LOGO=images/logos/redhat.png\n"
639
            when /Ubuntu/i
640
                str << "LOGO=images/logos/ubuntu.png\n"
641
            when /Windows XP/i
642
                str << "LOGO=images/logos/windowsxp.png\n"
643
            when /Windows/i
644
                str << "LOGO=images/logos/windows8.png\n"
645
            when /Linux/i
646
                str << "LOGO=images/logos/linux.png\n"
647
        end
648

    
649
        return str
650
    end
651

    
652
    def self.template_to_one(template, vc_uuid, ccr_ref, ccr_name, import_name, host_id)
653

    
654
        num_cpu, memory, annotation, guest_fullname = template.item.collect("config.hardware.numCPU","config.hardware.memoryMB","config.annotation","guest.guestFullName")
655

    
656
        str = "NAME   = \"#{import_name}\"\n"\
657
              "CPU    = \"#{num_cpu}\"\n"\
658
              "vCPU   = \"#{num_cpu}\"\n"\
659
              "MEMORY = \"#{memory}\"\n"\
660
              "HYPERVISOR = \"vcenter\"\n"\
661
              "CONTEXT = [\n"\
662
              "    NETWORK = \"YES\",\n"\
663
              "    SSH_PUBLIC_KEY = \"$USER[SSH_PUBLIC_KEY]\"\n"\
664
              "]\n"\
665
              "VCENTER_INSTANCE_ID =\"#{vc_uuid}\"\n"
666

    
667
        str << "VCENTER_TEMPLATE_REF =\"#{template["_ref"]}\"\n"
668
        str << "VCENTER_CCR_REF =\"#{ccr_ref}\"\n"
669

    
670
        str << "GRAPHICS = [\n"\
671
               "  TYPE     =\"vnc\",\n"
672
        str << "  LISTEN   =\"0.0.0.0\"\n"
673
        str << "]\n"
674

    
675
        if annotation.nil? || annotation.empty?
676
            str << "DESCRIPTION = \"vCenter Template imported by OpenNebula" \
677
                " from Cluster #{ccr_name}\"\n"
678
        else
679
            notes = annotation.gsub("\\", "\\\\").gsub("\"", "\\\"")
680
            str << "DESCRIPTION = \"#{notes}\"\n"
681
        end
682

    
683
        case guest_fullname
684
            when /CentOS/i
685
                str << "LOGO=images/logos/centos.png\n"
686
            when /Debian/i
687
                str << "LOGO=images/logos/debian.png\n"
688
            when /Red Hat/i
689
                str << "LOGO=images/logos/redhat.png\n"
690
            when /Ubuntu/i
691
                str << "LOGO=images/logos/ubuntu.png\n"
692
            when /Windows XP/i
693
                str << "LOGO=images/logos/windowsxp.png\n"
694
            when /Windows/i
695
                str << "LOGO=images/logos/windows8.png\n"
696
            when /Linux/i
697
                str << "LOGO=images/logos/linux.png\n"
698
        end
699

    
700
        return str
701
    end
702

    
703
    def self.get_xml_template(template, vcenter_uuid, vi_client, vcenter_instance_name=nil, dc_name=nil, rp_cache={})
704

    
705
        begin
706
            template_ref      = template['_ref']
707
            template_name     = template["name"]
708
            template_ccr      = template['runtime.host.parent']
709
            template_ccr_ref  = template_ccr._ref
710
            template_ccr_name = template_ccr.name
711

    
712
            # Set vcenter instance name
713
            vcenter_instance_name = vi_client.vim.host if !vcenter_instance_name
714

    
715
            # Get datacenter info
716
            if !dc_name
717
                dc = get_dc
718
                dc_name = dc.item.name
719
            end
720

    
721
            #Get resource pools and generate a list
722
            if !rp_cache[template_ccr_name]
723
                tmp_cluster = VCenterDriver::ClusterComputeResource.new_from_ref(template_ccr_ref, vi_client)
724
                rp_list = tmp_cluster.get_resource_pool_list
725
                rp = ""
726
                if !rp_list.empty?
727
                    rp_name_list = []
728
                    rp_list.each do |rp_hash|
729
                        rp_name_list << rp_hash[:name]
730
                    end
731
                    rp =  "O|list|Which resource pool you want this VM to run in? "
732
                    rp << "|#{rp_name_list.join(",")}" #List of RP
733
                    rp << "|#{rp_name_list.first}" #Default RP
734
                end
735
                rp_cache[template_ccr_name] = {}
736
                rp_cache[template_ccr_name][:rp] = rp
737
                rp_cache[template_ccr_name][:rp_list] = rp_list
738
            end
739
            rp      = rp_cache[template_ccr_name][:rp]
740
            rp_list = rp_cache[template_ccr_name][:rp_list]
741

    
742

    
743
            # Determine the location path for the template
744
            vcenter_template = VCenterDriver::VirtualMachine.new_from_ref(template_ref, vi_client)
745
            item = vcenter_template.item
746
            folders = []
747
            while !item.instance_of? RbVmomi::VIM::Datacenter
748
                item = item.parent
749
                if !item.instance_of? RbVmomi::VIM::Datacenter
750
                    folders << item.name if item.name != "vm"
751
                end
752
                raise "Could not find the templates parent location" if item.nil?
753
            end
754
            location = folders.reverse.join("/")
755
            location = "/" if location.empty?
756

    
757
            # Generate a crypto hash for the template name and take the first 12 chars
758
            sha256            = Digest::SHA256.new
759
            full_name         = "#{template_name} - #{template_ccr_name} [#{vcenter_instance_name} - #{dc_name}]_#{location}"
760
            template_hash     = sha256.hexdigest(full_name)[0..11]
761
            template_name     = template_name.tr("\u007F", "")
762
            template_ccr_name = template_ccr_name.tr("\u007F", "")
763
            import_name       = "#{template_name} - #{template_ccr_name} #{template_hash}"
764

    
765
            # Prepare the Hash that will be used by importers to display
766
            # the object being imported
767
            one_tmp = {}
768
            one_tmp[:name]                  = import_name
769
            one_tmp[:template_name]         = template_name
770
            one_tmp[:sunstone_template_name]= "#{template_name} [ Cluster: #{template_ccr_name} - Template location: #{location} ]"
771
            one_tmp[:template_hash]         = template_hash
772
            one_tmp[:template_location]     = location
773
            one_tmp[:vcenter_ccr_ref]       = template_ccr_ref
774
            one_tmp[:vcenter_ref]           = template_ref
775
            one_tmp[:vcenter_instance_uuid] = vcenter_uuid
776
            one_tmp[:cluster_name]          = template_ccr_name
777
            one_tmp[:rp]                    = rp
778
            one_tmp[:rp_list]               = rp_list
779
            one_tmp[:template]              = template
780
            one_tmp[:import_disks_and_nics] = true # By default we import disks and nics
781

    
782
            # Get the host ID of the OpenNebula host which represents the vCenter Cluster
783
            host_id = nil
784
            one_host = VCenterDriver::VIHelper.find_by_ref(OpenNebula::HostPool,
785
                                                           "TEMPLATE/VCENTER_CCR_REF",
786
                                                           template_ccr_ref,
787
                                                           vcenter_uuid)
788
            host_id    = one_host["ID"]
789
            cluster_id = one_host["CLUSTER_ID"]
790
            raise "Could not find the host's ID associated to template being imported" if !host_id
791

    
792
            # Get the OpenNebula's template hash
793
            one_tmp[:one] = template_to_one(template, vcenter_uuid, template_ccr_ref, template_ccr_name, import_name, host_id)
794
            return one_tmp
795
        rescue
796
            return nil
797
        end
798
    end
799

    
800
    # TODO check with uuid
801
    def self.new_from_ref(ref, vi_client)
802
        self.new(RbVmomi::VIM::VirtualMachine.new(vi_client.vim, ref), vi_client)
803
    end
804

    
805
end
806

    
807
class VirtualMachine < Template
808
    VM_PREFIX_DEFAULT = "one-$i-"
809

    
810
    POLL_ATTRIBUTE    = OpenNebula::VirtualMachine::Driver::POLL_ATTRIBUTE
811
    VM_STATE          = OpenNebula::VirtualMachine::Driver::VM_STATE
812

    
813
    VM_SHUTDOWN_TIMEOUT = 600 #10 minutes til poweroff hard
814

    
815
    attr_accessor :item
816

    
817
    attr_accessor :vm_info
818

    
819
    include Memoize
820

    
821
    def initialize(item=nil, vi_client=nil)
822
        @item = item
823
        @vi_client = vi_client
824
        @locking = true
825
        @vm_info = nil
826
    end
827

    
828
    ############################################################################
829
    ############################################################################
830

    
831
    # Attributes that must be defined when the VM does not exist in vCenter
832
    attr_accessor :vi_client
833

    
834
    # these have their own getter (if they aren't set, we can set them
835
    # dynamically)
836
    attr_writer :one_item
837
    attr_writer :host
838
    attr_writer :target_ds_ref
839

    
840
    ############################################################################
841
    ############################################################################
842

    
843
    # The OpenNebula VM
844
    # @return OpenNebula::VirtualMachine or XMLElement
845
    def one_item
846
        if !@one_item
847
            vm_id = get_vm_id
848

    
849
            raise "Unable to find vm_id." if vm_id.nil?
850

    
851
            @one_item = VIHelper.one_item(OpenNebula::VirtualMachine, vm_id)
852
        end
853

    
854
        @one_item
855
    end
856

    
857
    # The OpenNebula host
858
    # @return OpenNebula::Host or XMLElement
859
    def host
860
        if @host.nil?
861
            if one_item.nil?
862
                raise "'one_item' must be previously set to be able to " <<
863
                      "access the OpenNebula host."
864
            end
865

    
866
            host_id = one_item["HISTORY_RECORDS/HISTORY[last()]/HID"]
867
            raise "No valid host_id found." if host_id.nil?
868

    
869
            @host = VIHelper.one_item(OpenNebula::Host, host_id)
870
        end
871

    
872
        @host
873
    end
874

    
875
    # Target Datastore VMware reference getter
876
    # @return
877
    def target_ds_ref
878
        if @target_ds_ref.nil?
879
            if one_item.nil?
880
                raise "'one_item' must be previously set to be able to " <<
881
                      "access the target Datastore."
882
            end
883

    
884
            target_ds_id = one_item["HISTORY_RECORDS/HISTORY[last()]/DS_ID"]
885
            raise "No valid target_ds_id found." if target_ds_id.nil?
886

    
887
            target_ds = VCenterDriver::VIHelper.one_item(OpenNebula::Datastore,
888
                                                         target_ds_id)
889

    
890
            @target_ds_ref = target_ds['TEMPLATE/VCENTER_DS_REF']
891
        end
892

    
893
        @target_ds_ref
894
    end
895

    
896
    # Cached cluster
897
    # @return ClusterComputeResource
898
    def cluster
899
        if @cluster.nil?
900
            ccr_ref = host['TEMPLATE/VCENTER_CCR_REF']
901
            @cluster = ClusterComputeResource.new_from_ref(ccr_ref, vi_client)
902
        end
903

    
904
        @cluster
905
    end
906

    
907
    ############################################################################
908
    ############################################################################
909

    
910
    # @return Boolean whether the VM exists in vCenter
911
    def is_new?
912
        !get_vm_id
913
    end
914

    
915
    # @return String the vm_id stored in vCenter
916
    def get_vm_id
917
        vm_ref = self['_ref']
918
        return nil if !vm_ref
919

    
920
        vc_uuid = get_vcenter_instance_uuid
921

    
922
        one_vm = VCenterDriver::VIHelper.find_by_ref(OpenNebula::VirtualMachinePool,
923
                                                     "DEPLOY_ID",
924
                                                     vm_ref,
925
                                                     vc_uuid)
926
        return nil if !one_vm
927

    
928
        return one_vm["ID"]
929
    end
930

    
931
    def get_vcenter_instance_uuid
932
        @vi_client.vim.serviceContent.about.instanceUuid
933
    end
934

    
935
    def get_unmanaged_keys
936
        unmanaged_keys = {}
937
        @item.config.extraConfig.each do |val|
938
             if val[:key].include?("opennebula.disk")
939
                 unmanaged_keys[val[:key]] = val[:value]
940
             end
941
        end
942
        return unmanaged_keys
943
    end
944

    
945
    ############################################################################
946
    # Getters
947
    ############################################################################
948

    
949
    # @return RbVmomi::VIM::ResourcePool
950
    def get_rp
951

    
952
        req_rp = one_item['VCENTER_RESOURCE_POOL'] ||
953
                 one_item['USER_TEMPLATE/VCENTER_RESOURCE_POOL']
954

    
955
        #Get ref for req_rp
956
        rp_list    = cluster.get_resource_pool_list
957
        req_rp_ref = rp_list.select { |rp| rp[:name] == req_rp }.first[:ref] rescue nil
958

    
959
        if vi_client.rp_confined?
960
            if req_rp_ref && req_rp_ref != vi_client.rp._ref
961
                raise "Available resource pool [#{vi_client.rp.name}] in host"\
962
                      " does not match requested resource pool"\
963
                      " [#{req_rp}]"
964
            end
965

    
966
            return vi_client.rp
967
        else
968
            if req_rp_ref
969
                rps = cluster.resource_pools.select{|r| r._ref == req_rp_ref }
970

    
971
                if rps.empty?
972
                    raise "No matching resource pool found (#{req_rp})."
973
                else
974
                    return rps.first
975
                end
976
            else
977
                return cluster['resourcePool']
978
            end
979
        end
980
    end
981

    
982
    # @return RbVmomi::VIM::Datastore or nil
983
    def get_ds
984
        ##req_ds = one_item['USER_TEMPLATE/VCENTER_DS_REF']
985
        current_ds_id  = one_item["HISTORY_RECORDS/HISTORY[last()]/DS_ID"]
986
        current_ds     = VCenterDriver::VIHelper.one_item(OpenNebula::Datastore, current_ds_id)
987
        current_ds_ref = current_ds['TEMPLATE/VCENTER_DS_REF']
988

    
989
        if current_ds_ref
990
            dc = cluster.get_dc
991

    
992
            ds_folder = dc.datastore_folder
993
            ds = ds_folder.get(current_ds_ref)
994
            ds_item = ds.item rescue nil
995

    
996
            return ds_item
997
        else
998
            return nil
999
        end
1000
    end
1001

    
1002
    # StorageResouceManager reference
1003
    def get_sm
1004
        self['_connection.serviceContent.storageResourceManager']
1005
    end
1006

    
1007
    # @return Customization or nil
1008
    def get_customization
1009
        xpath = "USER_TEMPLATE/VCENTER_CUSTOMIZATION_SPEC"
1010
        customization_spec = one_item[xpath]
1011

    
1012
        if customization_spec.nil?
1013
            return nil
1014
        end
1015

    
1016
        begin
1017
            custom_spec = vi_client.vim
1018
                            .serviceContent
1019
                            .customizationSpecManager
1020
                            .GetCustomizationSpec(:name => customization.text)
1021

    
1022
            if custom_spec && (spec = custom_spec.spec)
1023
                return spec
1024
            else
1025
                raise "Error getting customization spec"
1026
            end
1027
        rescue
1028
            raise "Customization spec '#{customization.text}' not found"
1029
        end
1030
    end
1031

    
1032
    # @return VCenterDriver::Datastore datastore where the disk will live under
1033
    def get_effective_ds(disk)
1034
        if disk["PERSISTENT"] == "YES"
1035
            ds_ref = disk["VCENTER_DS_REF"]
1036
        else
1037
            ds_ref = target_ds_ref
1038

    
1039
            if ds_ref.nil?
1040
                raise "target_ds_ref must be defined on this object."
1041
            end
1042
        end
1043

    
1044
        VCenterDriver::Storage.new_from_ref(ds_ref, vi_client)
1045
    end
1046

    
1047
    # @return String vcenter name
1048
    def get_vcenter_name
1049
        vm_prefix = host['TEMPLATE/VM_PREFIX']
1050
        vm_prefix = VM_PREFIX_DEFAULT if vm_prefix.nil? || vm_prefix.empty?
1051
        vm_prefix.gsub!("$i", one_item['ID'])
1052

    
1053
        vm_prefix + one_item['NAME']
1054
    end
1055

    
1056
    ############################################################################
1057
    # Create and reconfigure VM related methods
1058
    ############################################################################
1059

    
1060
    # This function creates a new VM from the @one_item XML and returns the
1061
    # VMware ref
1062
    # @param one_item OpenNebula::VirtualMachine
1063
    # @param vi_client VCenterDriver::VIClient
1064
    # @return String vmware ref
1065
    def clone_vm(one_item, vi_client)
1066
        @one_item = one_item
1067
        @vi_client = vi_client
1068

    
1069
        vcenter_name = get_vcenter_name
1070

    
1071
        vc_template_ref = one_item['USER_TEMPLATE/VCENTER_TEMPLATE_REF']
1072
        vc_template = RbVmomi::VIM::VirtualMachine(vi_client.vim, vc_template_ref)
1073

    
1074
        ds = get_ds
1075

    
1076
        # Default disk move type (Full Clone)
1077
        disk_move_type = :moveAllDiskBackingsAndDisallowSharing
1078

    
1079
        if ds.instance_of? RbVmomi::VIM::Datastore
1080
            use_linked_clones = one_item['USER_TEMPLATE/VCENTER_LINKED_CLONES']
1081
            if use_linked_clones && use_linked_clones.downcase == "yes"
1082
                # Check if all disks in template has delta disks
1083
                disks = vc_template.config
1084
                                .hardware.device.grep(RbVmomi::VIM::VirtualDisk)
1085

    
1086
                disks_no_delta = disks.select { |d| d.backing.parent == nil }
1087

    
1088
                # Can use linked clones if all disks have delta disks
1089
                if (disks_no_delta.size == 0)
1090
                    disk_move_type = :moveChildMostDiskBacking
1091
                end
1092
            end
1093
        end
1094

    
1095
        spec_hash = spec_hash_clone(disk_move_type)
1096

    
1097
        clone_spec = RbVmomi::VIM.VirtualMachineCloneSpec(spec_hash)
1098

    
1099
        # Specify vm folder in vSpere's VM and Templates view F#4823
1100
        vcenter_vm_folder = nil
1101
        vcenter_vm_folder = one_item["USER_TEMPLATE/VCENTER_VM_FOLDER"]
1102
        vcenter_vm_folder_object = nil
1103
        dc = cluster.get_dc
1104
        if !!vcenter_vm_folder && !vcenter_vm_folder.empty?
1105
            vcenter_vm_folder_object = dc.item.find_folder(vcenter_vm_folder)
1106
        end
1107
        vcenter_vm_folder_object = vc_template.parent if vcenter_vm_folder_object.nil?
1108

    
1109
        if ds.instance_of? RbVmomi::VIM::StoragePod
1110
            # VM is cloned using Storage Resource Manager for StoragePods
1111
            begin
1112
                vm = storagepod_clonevm_task(vc_template, vcenter_name,
1113
                                             clone_spec, ds, vcenter_vm_folder_object, dc)
1114
            rescue Exception => e
1115
                raise "Cannot clone VM Template to StoragePod: #{e.message}"
1116
            end
1117
        else
1118
            vm = nil
1119
            begin
1120
                vm = vc_template.CloneVM_Task(
1121
                    :folder => vcenter_vm_folder_object,
1122
                    :name   => vcenter_name,
1123
                    :spec   => clone_spec).wait_for_completion
1124
            rescue Exception => e
1125
                if !e.message.start_with?('DuplicateName')
1126
                    raise "Cannot clone VM Template: #{e.message}\n#{e.backtrace}"
1127
                end
1128

    
1129
                vm_folder = dc.vm_folder
1130
                vm_folder.fetch!
1131
                vm = vm_folder.items
1132
                        .select{|k,v| v.item.name == vcenter_name}
1133
                        .values.first.item rescue nil
1134

    
1135
                if vm
1136
                    # Detach all persistent disks to avoid accidental destruction
1137
                    detach_persistent_disks(vm)
1138

    
1139
                    vm.Destroy_Task.wait_for_completion
1140
                    vm = vc_template.CloneVM_Task(
1141
                        :folder => vcenter_vm_folder_object,
1142
                        :name   => vcenter_name,
1143
                        :spec   => clone_spec).wait_for_completion
1144
                else
1145
                    raise "Cannot clone VM Template"
1146
                end
1147
            end
1148
        end
1149
        # @item is populated
1150
        @item = vm
1151

    
1152
        return self['_ref']
1153
    end
1154

    
1155

    
1156
    def storagepod_clonevm_task(vc_template, vcenter_name, clone_spec, storpod, vcenter_vm_folder_object, dc)
1157

    
1158
        storage_manager = vc_template
1159
                            ._connection.serviceContent.storageResourceManager
1160

    
1161
        storage_spec = RbVmomi::VIM.StoragePlacementSpec(
1162
            type: 'clone',
1163
            cloneName: vcenter_name,
1164
            folder: vcenter_vm_folder_object,
1165
            podSelectionSpec: RbVmomi::VIM.StorageDrsPodSelectionSpec(storagePod: storpod),
1166
            vm: vc_template,
1167
            cloneSpec: clone_spec
1168
        )
1169

    
1170
        # Query a storage placement recommendation
1171
        result = storage_manager
1172
                    .RecommendDatastores(storageSpec: storage_spec) rescue nil
1173

    
1174
        raise "Could not get placement specification for StoragePod" if result.nil?
1175

    
1176
        if !result.respond_to?(:recommendations) || result.recommendations.size == 0
1177
            raise "Could not get placement specification for StoragePod"
1178
        end
1179

    
1180
        # Get recommendation key to be applied
1181
        key = result.recommendations.first.key ||= ''
1182
        raise "Missing Datastore recommendation for StoragePod" if key.empty?
1183

    
1184
        begin
1185
            apply_sr = storage_manager
1186
                            .ApplyStorageDrsRecommendation_Task(key: [key])
1187
                            .wait_for_completion
1188
            return apply_sr.vm
1189
        rescue Exception => e
1190
            if !e.message.start_with?('DuplicateName')
1191
                raise "Cannot clone VM Template: #{e.message}\n#{e.backtrace}"
1192
            end
1193

    
1194
            # The VM already exists, try to find the vm
1195
            vm_folder = dc.vm_folder
1196
            vm_folder.fetch!
1197
            vm = vm_folder.items
1198
                    .select{|k,v| v.item.name == vcenter_name}
1199
                    .values.first.item rescue nil
1200

    
1201
            if vm
1202

    
1203
                begin
1204
                    # Detach all persistent disks to avoid accidental destruction
1205
                    detach_persistent_disks(vm)
1206

    
1207
                    # Destroy the VM with any disks still attached to it
1208
                    vm.Destroy_Task.wait_for_completion
1209

    
1210
                    # Query a storage placement recommendation
1211
                    result = storage_manager.RecommendDatastores(storageSpec: storage_spec) rescue nil
1212

    
1213
                    raise "Could not get placement specification for StoragePod" if result.nil?
1214

    
1215
                    if !result.respond_to?(:recommendations) || result.recommendations.size == 0
1216
                        raise "Could not get placement specification for StoragePod"
1217
                    end
1218

    
1219
                    # Get recommendation key to be applied
1220
                    key = result.recommendations.first.key ||= ''
1221
                    raise "Missing Datastore recommendation for StoragePod" if key.empty?
1222

    
1223
                    apply_sr = storage_manager
1224
                            .ApplyStorageDrsRecommendation_Task(key: [key])
1225
                            .wait_for_completion
1226
                    return apply_sr.vm
1227
                rescue Exception => e
1228
                   raise "Failure applying recommendation while cloning VM: #{e.message}"
1229
                end
1230
            end
1231
        end
1232
    end
1233

    
1234
    # @return clone parameters spec hash
1235
    def spec_hash_clone(disk_move_type)
1236
        # Relocate spec
1237
        relocate_spec_params = {}
1238

    
1239
        relocate_spec_params[:pool] = get_rp
1240
        relocate_spec_params[:diskMoveType] = disk_move_type
1241

    
1242
        ds = get_ds
1243

    
1244
        relocate_spec_params[:datastore] = ds if ds.instance_of? RbVmomi::VIM::Datastore
1245

    
1246
        relocate_spec = RbVmomi::VIM.VirtualMachineRelocateSpec(
1247
                                                         relocate_spec_params)
1248

    
1249
        # Running flag - prevents spurious poweroff states in the VM
1250
        running_flag = [{ :key => "opennebula.vm.running", :value => "no"}]
1251

    
1252
        running_flag_spec = RbVmomi::VIM.VirtualMachineConfigSpec(
1253
            { :extraConfig => running_flag }
1254
        )
1255

    
1256
        clone_parameters = {
1257
            :location => relocate_spec,
1258
            :powerOn  => false,
1259
            :template => false,
1260
            :config   => running_flag_spec
1261
        }
1262

    
1263
        cs = get_customization
1264
        clone_parameters[:customization] = cs if cs
1265

    
1266
        clone_parameters
1267
    end
1268

    
1269
    def reference_unmanaged_devices(template_ref)
1270

    
1271
        extraconfig   = []
1272
        device_change = []
1273

    
1274
        # Get unmanaged disks in OpenNebula's VM template
1275
        xpath = "TEMPLATE/DISK[OPENNEBULA_MANAGED=\"NO\" or OPENNEBULA_MANAGED=\"no\"]"
1276
        unmanaged_disks = one_item.retrieve_xmlelements(xpath)
1277

    
1278
        if !unmanaged_disks.empty?
1279

    
1280
            # Get vcenter VM disks to know real path of cloned disk
1281
            vcenter_disks = get_vcenter_disks
1282

    
1283
            # Create an array with the paths of the disks in vcenter template
1284
            template = VCenterDriver::Template.new_from_ref(template_ref, vi_client)
1285
            template_disks = template.get_vcenter_disks
1286
            template_disks_vector = []
1287
            template_disks.each do |d|
1288
                template_disks_vector << d[:path_wo_ds]
1289
            end
1290

    
1291
            # Try to find index of disks in template disks
1292
            unmanaged_disks.each do |unmanaged_disk|
1293
                index = template_disks_vector.index(unmanaged_disk["SOURCE"])
1294
                if index
1295
                    reference = {}
1296
                    reference[:key]   = "opennebula.disk.#{unmanaged_disk["DISK_ID"]}"
1297
                    reference[:value] = "#{vcenter_disks[index][:key]}"
1298
                    extraconfig << reference
1299
                end
1300
            end
1301
        end
1302

    
1303
        # Add info for existing nics in template in vm xml
1304
        xpath = "TEMPLATE/NIC[OPENNEBULA_MANAGED=\"NO\" or OPENNEBULA_MANAGED=\"no\"]"
1305
        unmanaged_nics = one_item.retrieve_xmlelements(xpath)
1306

    
1307
        if !unmanaged_nics.empty?
1308
            index = 0
1309
            self["config.hardware.device"].each_with_index do |device|
1310
                if is_nic?(device)
1311
                    # Edit capacity setting new size in KB
1312
                    device.macAddress = unmanaged_nics[index]["MAC"]
1313
                    device_change << { :device => device, :operation => :edit }
1314
                    index += 1
1315
                end
1316
            end
1317
        end
1318

    
1319
        # Save in extraconfig the key for unmanaged disks
1320
        if !extraconfig.empty? || !device_change.empty?
1321
            spec = {}
1322
            spec[:extraConfig]  = extraconfig if !extraconfig.empty?
1323
            spec[:deviceChange] = device_change if !device_change.empty?
1324
            @item.ReconfigVM_Task(:spec => spec).wait_for_completion
1325
        end
1326
    end
1327

    
1328
    def resize_unmanaged_disks
1329
        resize_hash = {}
1330
        disks = []
1331

    
1332
        unmanaged_keys = get_unmanaged_keys
1333
        vc_disks = get_vcenter_disks
1334

    
1335
        # Look for unmanaged disks with original size changed
1336
        xpath = "TEMPLATE/DISK[(OPENNEBULA_MANAGED=\"NO\" or OPENNEBULA_MANAGED=\"no\") and boolean(ORIGINAL_SIZE) and ORIGINAL_SIZE != SIZE]"
1337
        unmanaged_resized_disks = one_item.retrieve_xmlelements(xpath)
1338

    
1339
        return if unmanaged_resized_disks.empty?
1340

    
1341
        # Cannot resize linked cloned disks
1342
        if one_item["USER_TEMPLATE/VCENTER_LINKED_CLONES"] &&
1343
           one_item["USER_TEMPLATE/VCENTER_LINKED_CLONES"] == "YES"
1344
            raise "Linked cloned disks cannot be resized."
1345
        end
1346

    
1347
        unmanaged_resized_disks.each do |disk|
1348
            vc_disks.each do |vcenter_disk|
1349
                if unmanaged_keys.key?("opennebula.disk.#{disk["DISK_ID"]}")
1350
                    device_key = unmanaged_keys["opennebula.disk.#{disk["DISK_ID"]}"].to_i
1351

    
1352
                    if device_key == vcenter_disk[:key].to_i
1353

    
1354
                        break if disk["SIZE"].to_i <= disk["ORIGINAL_SIZE"].to_i
1355

    
1356
                        # Edit capacity setting new size in KB
1357
                        d = vcenter_disk[:device]
1358
                        d.capacityInKB = disk["SIZE"].to_i * 1024
1359
                        disks <<   { :device => d, :operation => :edit }
1360
                        break
1361
                    end
1362
                end
1363
            end
1364
        end
1365

    
1366
        if !disks.empty?
1367
            resize_hash[:deviceChange] = disks
1368
            @item.ReconfigVM_Task(:spec => resize_hash).wait_for_completion
1369
        end
1370
    end
1371

    
1372
    def create_storagedrs_disks(device_change_spod, device_change_spod_ids)
1373

    
1374
        sm = get_sm
1375
        disk_locator = []
1376
        extra_config = []
1377

    
1378
        device_change_spod.each do |device_spec|
1379
            disk_locator << RbVmomi::VIM.PodDiskLocator(diskId: device_spec[:device].key)
1380
        end
1381

    
1382
        spec = {}
1383
        spec[:deviceChange] = device_change_spod
1384

    
1385
        # Disk locator is required for AddDisk
1386
        vmpod_hash = {}
1387
        vmpod_hash[:storagePod] = get_ds
1388
        vmpod_hash[:disk] = disk_locator
1389
        vmpod_config = RbVmomi::VIM::VmPodConfigForPlacement(vmpod_hash)
1390

    
1391
        # The storage pod selection requires initialize
1392
        spod_hash = {}
1393
        spod_hash[:initialVmConfig] = [ vmpod_config ]
1394
        spod_select = RbVmomi::VIM::StorageDrsPodSelectionSpec(spod_hash)
1395
        storage_spec = RbVmomi::VIM.StoragePlacementSpec(
1396
            type: :reconfigure,
1397
            podSelectionSpec: spod_select,
1398
            vm: self['_ref'],
1399
            configSpec: spec
1400
        )
1401

    
1402
        # Query a storage placement recommendation
1403
        result = sm.RecommendDatastores(storageSpec: storage_spec) rescue nil
1404

    
1405
        raise "Could not get placement specification for StoragePod" if result.nil?
1406

    
1407
        if !result.respond_to?(:recommendations) || result.recommendations.size == 0
1408
            raise "Could not get placement specification for StoragePod"
1409
        end
1410

    
1411
        # Get recommendation key to be applied
1412
        key = result.recommendations.first.key ||= ''
1413
        raise "Missing Datastore recommendation for StoragePod" if key.empty?
1414

    
1415
        # Apply recommendation
1416
        sm.ApplyStorageDrsRecommendation_Task(key: [key]).wait_for_completion
1417

    
1418
        # Set references in opennebula.disk elements
1419
        device_change_spod.each do |device_spec|
1420
            unit_number    = device_spec[:device].unitNumber
1421
            controller_key = device_spec[:device].controllerKey
1422
            key            = get_vcenter_disk_key(unit_number, controller_key)
1423
            disk_id        = device_change_spod_ids["#{controller_key}-#{unit_number}"]
1424
            reference      = {}
1425
            reference[:key]   = "opennebula.disk.#{disk_id}"
1426
            reference[:value] = key.to_s
1427
            extra_config << reference
1428
        end
1429

    
1430
        extra_config
1431
    end
1432

    
1433

    
1434
    def reconfigure
1435
        extraconfig   = []
1436
        device_change = []
1437

    
1438
        # Unmanaged keys
1439
        unmanaged_keys = get_unmanaged_keys
1440

    
1441
        # Get disk devices in vm
1442
        vc_disks = get_vcenter_disks
1443

    
1444
        # Get an array with disk paths in OpenNebula's vm template
1445
        disks_in_onevm_vector = disks_in_onevm(unmanaged_keys, vc_disks)
1446

    
1447
        # As the original template may have been modified in OpenNebula
1448
        # but not in vcenter, we must detach disks that are in vcenter
1449
        # but not in OpenNebula's vm template
1450
        if is_new?
1451
            device_change, extra_config = device_detach_disks(disks_in_onevm_vector, unmanaged_keys, vc_disks)
1452
            if !device_change.empty?
1453
                spec_hash = {}
1454
                spec_hash[:deviceChange] = device_change if !device_change.empty?
1455
                spec_hash[:extraConfig] = extra_config  if !extra_config.empty?
1456

    
1457
                # Reconfigure for disks detached from original template
1458
                spec = RbVmomi::VIM.VirtualMachineConfigSpec(spec_hash)
1459
                @item.ReconfigVM_Task(:spec => spec).wait_for_completion
1460

    
1461
                # Get disk devices in vm again after reconfigure
1462
                vc_disks = get_vcenter_disks
1463
            end
1464
        end
1465

    
1466
        # Now reconfigure disks, nics and extraconfig for the VM
1467
        device_change = []
1468

    
1469
        # get token and context
1470
        extraconfig += extraconfig_context
1471

    
1472
        # vnc configuration (for config_array hash)
1473
        extraconfig += extraconfig_vnc
1474

    
1475
        # Set CPU, memory and extraconfig
1476
        num_cpus = one_item["TEMPLATE/VCPU"] || 1
1477

    
1478
        spec_hash = {
1479
            :numCPUs      => num_cpus.to_i,
1480
            :memoryMB     => one_item["TEMPLATE/MEMORY"],
1481
            :extraConfig  => extraconfig
1482
        }
1483

    
1484
        # device_change hash (nics)
1485
        device_change += device_change_nics
1486

    
1487
        # Now attach disks that are in OpenNebula's template but not in vcenter
1488
        # e.g those that has been attached in poweroff
1489
        device_change_ds, device_change_spod, device_change_spod_ids = device_attach_disks(disks_in_onevm_vector, vc_disks)
1490
        device_change += device_change_ds
1491

    
1492
        # Create volatile disks in StorageDRS if any
1493
        if !device_change_spod.empty?
1494
            spec_hash[:extraConfig] = create_storagedrs_disks(device_change_spod, device_change_spod_ids)
1495
        end
1496

    
1497
        # Common reconfigure task
1498
        spec_hash[:deviceChange] = device_change
1499
        spec = RbVmomi::VIM.VirtualMachineConfigSpec(spec_hash)
1500
        @item.ReconfigVM_Task(:spec => spec).wait_for_completion
1501
    end
1502

    
1503
    def extraconfig_context
1504
        context_text = "# Context variables generated by OpenNebula\n"
1505
        one_item.each('TEMPLATE/CONTEXT/*') do |context_element|
1506
            # next if !context_element.text
1507
            context_text += context_element.name + "='" +
1508
                            context_element.text.gsub("'", "\\'") + "'\n"
1509
        end
1510

    
1511
        # token
1512
        token = File.read(File.join(VAR_LOCATION,
1513
                        'vms',
1514
                        one_item['ID'],
1515
                        'token.txt')).chomp rescue nil
1516

    
1517
        context_text += "ONEGATE_TOKEN='#{token}'\n" if token
1518

    
1519
        # context_text
1520
        [
1521
            { :key => "guestinfo.opennebula.context",
1522
              :value => Base64.encode64(context_text) }
1523
        ]
1524
    end
1525

    
1526
    def extraconfig_vnc
1527
        if one_item["TEMPLATE/GRAPHICS"]
1528
            vnc_port   = one_item["TEMPLATE/GRAPHICS/PORT"]
1529
            vnc_listen = one_item["TEMPLATE/GRAPHICS/LISTEN"] || "0.0.0.0"
1530
            vnc_keymap = one_item["TEMPLATE/GRAPHICS/KEYMAP"]
1531

    
1532
            conf = [ {:key => "remotedisplay.vnc.enabled",:value => "TRUE"},
1533
                    {:key => "remotedisplay.vnc.port",   :value => vnc_port},
1534
                    {:key => "remotedisplay.vnc.ip",     :value => vnc_listen}]
1535

    
1536
            conf += [{:key => "remotedisplay.vnc.keymap",
1537
                            :value => vnc_keymap}] if vnc_keymap
1538

    
1539
            conf
1540
        else
1541
            conf = []
1542
        end
1543
    end
1544

    
1545
    def device_change_nics
1546
        # Final list of changes to be applied in vCenter
1547
        device_change = []
1548

    
1549
        # Hash of interfaces from the OpenNebula xml
1550
        nics_in_template = {}
1551
        xpath = "TEMPLATE/NIC"
1552
        one_item.each(xpath) { |nic|
1553
            nics_in_template[nic["MAC"]] = nic
1554
        }
1555

    
1556
        # Check nics in VM
1557
        self["config.hardware.device"].each do |dv|
1558
            if is_nic?(dv)
1559
                if nics_in_template.key?(dv.macAddress)
1560
                    # Remove nic that is already in the XML to avoid duplicate
1561
                    nics_in_template.delete(dv.macAddress)
1562
                else
1563
                    # B4897 - It was detached in poweroff, remove it from VM
1564
                    device_change << {
1565
                        :operation => :remove,
1566
                        :device    => dv
1567
                    }
1568
                end
1569
            end
1570
        end
1571

    
1572
        # Attach new nics (nics_in_template now contains only the interfaces
1573
        # not present in the VM in vCenter)
1574
        nics_in_template.each do |key, nic|
1575
            device_change << calculate_add_nic_spec(nic)
1576
        end
1577

    
1578
        return device_change
1579
    end
1580

    
1581
    # Regenerate context when devices are hot plugged (reconfigure)
1582
    def regenerate_context
1583
        spec_hash = { :extraConfig  => extraconfig_context }
1584
        spec = RbVmomi::VIM.VirtualMachineConfigSpec(spec_hash)
1585

    
1586
        begin
1587
            @item.ReconfigVM_Task(:spec => spec).wait_for_completion
1588
        rescue Exception => e
1589
            raise "Cannot create snapshot for VM: #{e.message}\n#{e.backtrace}"
1590
        end
1591
    end
1592

    
1593
    # Returns an array of actions to be included in :deviceChange
1594
    def calculate_add_nic_spec(nic)
1595

    
1596
        mac       = nic["MAC"]
1597
        pg_name   = nic["BRIDGE"]
1598
        model     = nic["VCENTER_NET_MODEL"] || VCenterDriver::VIHelper.get_default("VM/TEMPLATE/NIC/MODEL")
1599
        vnet_ref  = nic["VCENTER_NET_REF"]
1600
        backing   = nil
1601

    
1602
        limit_in  = nic["INBOUND_PEAK_BW"] || VCenterDriver::VIHelper.get_default("VM/TEMPLATE/NIC/INBOUND_PEAK_BW")
1603
        limit_out = nic["OUTBOUND_PEAK_BW"] || VCenterDriver::VIHelper.get_default("VM/TEMPLATE/NIC/OUTBOUND_PEAK_BW")
1604
        limit     = nil
1605

    
1606
        if limit_in && limit_out
1607
            limit=([limit_in.to_i, limit_out.to_i].min / 1024) * 8
1608
        end
1609

    
1610
        rsrv_in  = nic["INBOUND_AVG_BW"] || VCenterDriver::VIHelper.get_default("VM/TEMPLATE/NIC/INBOUND_AVG_BW")
1611
        rsrv_out = nic["OUTBOUND_AVG_BW"] || VCenterDriver::VIHelper.get_default("VM/TEMPLATE/NIC/OUTBOUND_AVG_BW")
1612
        rsrv     = nil
1613

    
1614
        if rsrv_in || rsrv_out
1615
            rsrv=([rsrv_in.to_i, rsrv_out.to_i].min / 1024) * 8
1616
        end
1617

    
1618
        network = self["runtime.host"].network.select do |n|
1619
            n._ref == vnet_ref || n.name == pg_name
1620
        end
1621

    
1622
        network = network.first
1623

    
1624
        card_num = 1 # start in one, we want the next avaliable id
1625

    
1626
        @item["config.hardware.device"].each do |dv|
1627
            card_num += 1 if is_nic?(dv)
1628
        end
1629

    
1630
        nic_card = case model
1631
                        when "virtuale1000", "e1000"
1632
                            RbVmomi::VIM::VirtualE1000
1633
                        when "virtuale1000e", "e1000e"
1634
                            RbVmomi::VIM::VirtualE1000e
1635
                        when "virtualpcnet32", "pcnet32"
1636
                            RbVmomi::VIM::VirtualPCNet32
1637
                        when "virtualsriovethernetcard", "sriovethernetcard"
1638
                            RbVmomi::VIM::VirtualSriovEthernetCard
1639
                        when "virtualvmxnetm", "vmxnetm"
1640
                            RbVmomi::VIM::VirtualVmxnetm
1641
                        when "virtualvmxnet2", "vmnet2"
1642
                            RbVmomi::VIM::VirtualVmxnet2
1643
                        when "virtualvmxnet3", "vmxnet3"
1644
                            RbVmomi::VIM::VirtualVmxnet3
1645
                        else # If none matches, use VirtualE1000
1646
                            RbVmomi::VIM::VirtualE1000
1647
                   end
1648

    
1649
        if network.class == RbVmomi::VIM::Network
1650
            backing = RbVmomi::VIM.VirtualEthernetCardNetworkBackingInfo(
1651
                        :deviceName => pg_name,
1652
                        :network    => network)
1653
        else
1654
            port    = RbVmomi::VIM::DistributedVirtualSwitchPortConnection(
1655
                        :switchUuid =>
1656
                                network.config.distributedVirtualSwitch.uuid,
1657
                        :portgroupKey => network.key)
1658
            backing =
1659
              RbVmomi::VIM.VirtualEthernetCardDistributedVirtualPortBackingInfo(
1660
                 :port => port)
1661
        end
1662

    
1663
        card_spec = {
1664
            :key => 0,
1665
            :deviceInfo => {
1666
                :label => "net" + card_num.to_s,
1667
                :summary => pg_name
1668
            },
1669
            :backing     => backing,
1670
            :addressType => mac ? 'manual' : 'generated',
1671
            :macAddress  => mac
1672
        }
1673

    
1674
        if (limit || rsrv) && (limit > 0)
1675
            ra_spec = {}
1676
            rsrv = limit if rsrv > limit
1677
            ra_spec[:limit] = limit if limit
1678
            ra_spec[:reservation] = rsrv if rsrv
1679
            ra_spec[:share] =  RbVmomi::VIM.SharesInfo({
1680
                    :level => RbVmomi::VIM.SharesLevel("normal"),
1681
                    :shares => 0
1682
                })
1683
            card_spec[:resourceAllocation] =
1684
               RbVmomi::VIM.VirtualEthernetCardResourceAllocation(ra_spec)
1685
        end
1686

    
1687
        {
1688
            :operation => :add,
1689
            :device    => nic_card.new(card_spec)
1690
        }
1691
    end
1692

    
1693
     # Returns an array of actions to be included in :deviceChange
1694
    def calculate_add_nic_spec_autogenerate_mac(nic)
1695

    
1696
        pg_name   = nic["BRIDGE"]
1697
        model     = nic["VCENTER_NET_MODEL"] || VCenterDriver::VIHelper.get_default("VM/TEMPLATE/NIC/MODEL")
1698
        vnet_ref  = nic["VCENTER_NET_REF"]
1699
        backing   = nil
1700

    
1701
        limit_in  = nic["INBOUND_PEAK_BW"] || VCenterDriver::VIHelper.get_default("VM/TEMPLATE/NIC/INBOUND_PEAK_BW")
1702
        limit_out = nic["OUTBOUND_PEAK_BW"] || VCenterDriver::VIHelper.get_default("VM/TEMPLATE/NIC/OUTBOUND_PEAK_BW")
1703
        limit     = nil
1704

    
1705
        if limit_in && limit_out
1706
            limit=([limit_in.to_i, limit_out.to_i].min / 1024) * 8
1707
        end
1708

    
1709
        rsrv_in  = nic["INBOUND_AVG_BW"] || VCenterDriver::VIHelper.get_default("VM/TEMPLATE/NIC/INBOUND_AVG_BW")
1710
        rsrv_out = nic["OUTBOUND_AVG_BW"] || VCenterDriver::VIHelper.get_default("VM/TEMPLATE/NIC/OUTBOUND_AVG_BW")
1711
        rsrv     = nil
1712

    
1713
        if rsrv_in || rsrv_out
1714
            rsrv=([rsrv_in.to_i, rsrv_out.to_i].min / 1024) * 8
1715
        end
1716

    
1717
        network = self["runtime.host"].network.select do |n|
1718
            n._ref == vnet_ref || n.name == pg_name
1719
        end
1720

    
1721
        network = network.first
1722

    
1723
        card_num = 1 # start in one, we want the next avaliable id
1724

    
1725
        @item["config.hardware.device"].each do |dv|
1726
            card_num += 1 if is_nic?(dv)
1727
        end
1728

    
1729
        nic_card = case model
1730
                        when "virtuale1000", "e1000"
1731
                            RbVmomi::VIM::VirtualE1000
1732
                        when "virtuale1000e", "e1000e"
1733
                            RbVmomi::VIM::VirtualE1000e
1734
                        when "virtualpcnet32", "pcnet32"
1735
                            RbVmomi::VIM::VirtualPCNet32
1736
                        when "virtualsriovethernetcard", "sriovethernetcard"
1737
                            RbVmomi::VIM::VirtualSriovEthernetCard
1738
                        when "virtualvmxnetm", "vmxnetm"
1739
                            RbVmomi::VIM::VirtualVmxnetm
1740
                        when "virtualvmxnet2", "vmnet2"
1741
                            RbVmomi::VIM::VirtualVmxnet2
1742
                        when "virtualvmxnet3", "vmxnet3"
1743
                            RbVmomi::VIM::VirtualVmxnet3
1744
                        else # If none matches, use VirtualE1000
1745
                            RbVmomi::VIM::VirtualE1000
1746
                   end
1747

    
1748
        if network.class == RbVmomi::VIM::Network
1749
            backing = RbVmomi::VIM.VirtualEthernetCardNetworkBackingInfo(
1750
                        :deviceName => pg_name,
1751
                        :network    => network)
1752
        else
1753
            port    = RbVmomi::VIM::DistributedVirtualSwitchPortConnection(
1754
                        :switchUuid =>
1755
                                network.config.distributedVirtualSwitch.uuid,
1756
                        :portgroupKey => network.key)
1757
            backing =
1758
              RbVmomi::VIM.VirtualEthernetCardDistributedVirtualPortBackingInfo(
1759
                 :port => port)
1760
        end
1761

    
1762
        card_spec = {
1763
            :key => 0,
1764
            :deviceInfo => {
1765
                :label => "net" + card_num.to_s,
1766
                :summary => pg_name
1767
            },
1768
            :backing     => backing,
1769
            :addressType => 'generated'
1770
        }
1771

    
1772
        if (limit || rsrv) && (limit > 0)
1773
            ra_spec = {}
1774
            rsrv = limit if rsrv > limit
1775
            ra_spec[:limit] = limit if limit
1776
            ra_spec[:reservation] = rsrv if rsrv
1777
            ra_spec[:share] =  RbVmomi::VIM.SharesInfo({
1778
                    :level => RbVmomi::VIM.SharesLevel("normal"),
1779
                    :shares => 0
1780
                })
1781
            card_spec[:resourceAllocation] =
1782
               RbVmomi::VIM.VirtualEthernetCardResourceAllocation(ra_spec)
1783
        end
1784

    
1785
        {
1786
            :operation => :add,
1787
            :device    => nic_card.new(card_spec)
1788
        }
1789
    end
1790

    
1791
    # Add NIC to VM
1792
    def attach_nic
1793
        spec_hash = {}
1794
        nic = nil
1795

    
1796
        # Extract nic from driver action
1797
        nic = one_item.retrieve_xmlelements("TEMPLATE/NIC[ATTACH='YES']").first
1798

    
1799
        begin
1800
            # A new NIC requires a vcenter spec
1801
            attach_nic_array = []
1802
            attach_nic_array << calculate_add_nic_spec(nic)
1803
            spec_hash[:deviceChange] = attach_nic_array if !attach_nic_array.empty?
1804

    
1805
            # Reconfigure VM
1806
            spec = RbVmomi::VIM.VirtualMachineConfigSpec(spec_hash)
1807

    
1808
            @item.ReconfigVM_Task(:spec => spec).wait_for_completion
1809
        rescue Exception => e
1810
            raise "Cannot attach NIC to VM: #{e.message}\n#{e.backtrace}"
1811
        end
1812

    
1813
    end
1814

    
1815
    # Detach NIC from VM
1816
    def detach_nic
1817
        spec_hash = {}
1818
        nic = nil
1819

    
1820
        # Extract nic from driver action
1821
        nic = one_item.retrieve_xmlelements("TEMPLATE/NIC[ATTACH='YES']").first
1822
        mac = nic["MAC"]
1823

    
1824
        # Get VM nic element if it has a device with that mac
1825
        nic_device = @item["config.hardware.device"].find do |device|
1826
            is_nic?(device) && (device.macAddress ==  mac)
1827
        end rescue nil
1828

    
1829
        return if nic_device.nil? #Silently ignore if nic is not found
1830

    
1831
        # Remove NIC from VM in the ReconfigVM_Task
1832
        spec_hash[:deviceChange] = [
1833
                :operation => :remove,
1834
                :device => nic_device ]
1835

    
1836
        begin
1837
            @item.ReconfigVM_Task(:spec => spec_hash).wait_for_completion
1838
        rescue Exception => e
1839
            raise "Cannot detach NIC from VM: #{e.message}\n#{e.backtrace}"
1840
        end
1841
    end
1842

    
1843
    # Detach all nics useful when removing pg and sw so they're not in use
1844
    def detach_all_nics
1845
        spec_hash = {}
1846
        device_change = []
1847

    
1848
        @item["config.hardware.device"].each do |device|
1849
            if is_nic?(device)
1850
                device_change << {:operation => :remove, :device => device}
1851
            end
1852
        end
1853

    
1854
        # Remove NIC from VM in the ReconfigVM_Task
1855
        spec_hash[:deviceChange] = device_change
1856

    
1857
        begin
1858
            @item.ReconfigVM_Task(:spec => spec_hash).wait_for_completion
1859
        rescue Exception => e
1860
            raise "Cannot detach all NICs from VM: #{e.message}\n#{e.backtrace}"
1861
        end
1862
    end
1863

    
1864
    def get_device_filename_and_ds_from_key(key, vc_disks)
1865
        device = vc_disks.select{ |d| d[:key].to_i == key.to_i}.first rescue nil
1866
        return device
1867
    end
1868

    
1869
    def disks_in_onevm(unmanaged_keys, vc_disks)
1870
        onevm_disks_vector = []
1871

    
1872
        disks = one_item.retrieve_xmlelements("TEMPLATE/DISK")
1873
        disks.each do |disk|
1874
            if unmanaged_keys.key?("opennebula.disk.#{disk["DISK_ID"]}")
1875
                device_key = unmanaged_keys["opennebula.disk.#{disk["DISK_ID"]}"].to_i
1876
                disk_hash = get_device_filename_and_ds_from_key(device_key, vc_disks)
1877
                onevm_disks_vector << disk_hash[:path_wo_ds] if disk_hash
1878
                next
1879
            end
1880

    
1881
            img_name  = VCenterDriver::FileHelper.get_img_name(disk, one_item['ID'], self['name'], instantiated_as_persistent?)
1882
            onevm_disks_vector << "#{img_name}"
1883
        end
1884

    
1885
        return onevm_disks_vector
1886
    end
1887

    
1888
    def device_attach_disks(onevm_disks_vector, vc_disks)
1889

    
1890
        disks = one_item.retrieve_xmlelements("TEMPLATE/DISK")
1891

    
1892
        vc_disks.each do |d|
1893
            index = onevm_disks_vector.index(d[:path_wo_ds])
1894
            if index
1895
                disks.delete_at(index)
1896
                onevm_disks_vector.delete_at(index)
1897
            end
1898
        end
1899

    
1900
        return [],[],{} if disks.empty?
1901

    
1902
        attach_disk_array = []
1903
        attach_spod_array = []
1904
        attach_spod_disk_info = {}
1905

    
1906
        position = 0
1907
        disks.each do |disk|
1908
            storpod = disk["VCENTER_DS_REF"].start_with?('group-')
1909
            if storpod
1910
                spec = calculate_add_disk_spec(disk, position)
1911
                attach_spod_array << spec
1912
                unit_ctrl = "#{spec[:device].controllerKey}-#{spec[:device].unitNumber}"
1913
                attach_spod_disk_info[unit_ctrl] = disk["DISK_ID"]
1914
            else
1915
                attach_disk_array << calculate_add_disk_spec(disk, position)
1916
            end
1917

    
1918
            position += 1
1919
        end
1920

    
1921
        return attach_disk_array, attach_spod_array, attach_spod_disk_info
1922
    end
1923

    
1924
    def device_detach_disks(onevm_disks_vector, unmanaged_keys, vc_disks)
1925
        detach_disk_array = []
1926
        extra_config      = []
1927
        ipool = VCenterDriver::VIHelper.one_pool(OpenNebula::ImagePool)
1928
        if ipool.respond_to?(:message)
1929
            raise "Could not get OpenNebula ImagePool: #{ipool.message}"
1930
        end
1931

    
1932
        vc_disks.each do |d|
1933
            if !onevm_disks_vector.index(d[:path_wo_ds])
1934

    
1935
                # If disk to be detached is not persistent detach and destroy it
1936
                persistent = VCenterDriver::VIHelper.find_persistent_image_by_source(d[:path_wo_ds], ipool)
1937
                if !persistent
1938
                    detach_disk_array << {
1939
                        :fileOperation => :destroy,
1940
                        :operation => :remove,
1941
                        :device    => d[:device]
1942
                    }
1943
                end
1944

    
1945
                # Remove reference opennebula.disk if exist
1946
                unmanaged_keys.each do |key, value|
1947
                    if value.to_i == d[:key].to_i
1948
                        reference = {}
1949
                        reference[:key]   = key
1950
                        reference[:value] = ""
1951
                        extra_config << reference
1952
                        break
1953
                    end
1954
                end
1955
            end
1956
        end
1957

    
1958
        return detach_disk_array, extra_config
1959
    end
1960

    
1961
    # Attach DISK to VM (hotplug)
1962
    def attach_disk
1963
        # TODO position? and disk size for volatile?
1964

    
1965
        spec_hash = {}
1966
        disk = nil
1967
        device_change = []
1968

    
1969
        # Extract unmanaged_keys
1970
        unmanaged_keys = get_unmanaged_keys
1971
        vc_disks = get_vcenter_disks
1972

    
1973
        # Extract disk from driver action
1974
        disk = one_item.retrieve_xmlelements("TEMPLATE/DISK[ATTACH='YES']").first
1975

    
1976
        # Check if we're dealing with a StoragePod SYSTEM ds
1977
        storpod = disk["VCENTER_DS_REF"].start_with?('group-')
1978

    
1979
        # Check if disk being attached is already connected to the VM
1980
        raise "DISK is already connected to VM" if disk_attached_to_vm(disk, unmanaged_keys, vc_disks)
1981

    
1982
        # Generate vCenter spec and reconfigure VM
1983
        device_change << calculate_add_disk_spec(disk)
1984
        raise "Could not generate DISK spec" if device_change.empty?
1985

    
1986
        spec_hash[:deviceChange] = device_change
1987
        spec = RbVmomi::VIM.VirtualMachineConfigSpec(spec_hash)
1988

    
1989
        begin
1990
            if storpod
1991
                #Ask for StorageDRS recommendation to reconfigure VM (AddDisk)
1992
                sm = get_sm
1993

    
1994
                # Disk id is -1 as I don't know what disk id is going to be set
1995
                disk_locator = [ RbVmomi::VIM.PodDiskLocator(diskId: -1) ]
1996

    
1997
                # Disk locator is required for AddDisk
1998
                vmpod_hash = {}
1999
                vmpod_hash[:storagePod] = get_ds
2000
                vmpod_hash[:disk] = disk_locator
2001
                vmpod_config = RbVmomi::VIM::VmPodConfigForPlacement(vmpod_hash)
2002

    
2003
                # The storage pod selection requires initialize
2004
                spod_hash = {}
2005
                spod_hash[:initialVmConfig] = [ vmpod_config ]
2006
                spod_select = RbVmomi::VIM::StorageDrsPodSelectionSpec(spod_hash)
2007
                storage_spec = RbVmomi::VIM.StoragePlacementSpec(
2008
                    type: :reconfigure,
2009
                    podSelectionSpec: spod_select,
2010
                    vm: self['_ref'],
2011
                    configSpec: spec
2012
                )
2013

    
2014
                # Query a storage placement recommendation
2015
                result = sm.RecommendDatastores(storageSpec: storage_spec) rescue nil
2016

    
2017
                raise "Could not get placement specification for StoragePod" if result.nil?
2018

    
2019
                if !result.respond_to?(:recommendations) || result.recommendations.size == 0
2020
                    raise "Could not get placement specification for StoragePod"
2021
                end
2022

    
2023
                # Get recommendation key to be applied
2024
                key = result.recommendations.first.key ||= ''
2025
                raise "Missing Datastore recommendation for StoragePod" if key.empty?
2026

    
2027
                # Apply recommendation
2028
                sm.ApplyStorageDrsRecommendation_Task(key: [key]).wait_for_completion
2029

    
2030
                # Add the key for the volatile disk to the unmanaged opennebula.disk.id variables
2031
                unit_number    = spec_hash[:deviceChange][0][:device].unitNumber
2032
                controller_key = spec_hash[:deviceChange][0][:device].controllerKey
2033
                key = get_vcenter_disk_key(unit_number, controller_key)
2034
                spec_hash = {}
2035
                reference = {}
2036
                reference[:key]   = "opennebula.disk.#{disk["DISK_ID"]}"
2037
                reference[:value] = key.to_s
2038
                spec_hash[:extraConfig] = [ reference ]
2039
                @item.ReconfigVM_Task(:spec => spec_hash).wait_for_completion
2040
            else
2041
                @item.ReconfigVM_Task(:spec => spec).wait_for_completion
2042
            end
2043
        rescue Exception => e
2044
            raise "Cannot attach DISK to VM: #{e.message}\n#{e.backtrace}"
2045
        end
2046
    end
2047

    
2048
    # Detach persistent disks to avoid incidental destruction
2049
    def detach_persistent_disks(vm)
2050
        spec_hash = {}
2051
        spec_hash[:deviceChange] = []
2052
        ipool = VCenterDriver::VIHelper.one_pool(OpenNebula::ImagePool)
2053
        if ipool.respond_to?(:message)
2054
            raise "Could not get OpenNebula ImagePool: #{ipool.message}"
2055
        end
2056

    
2057
        vm.config.hardware.device.each do |disk|
2058
            if is_disk_or_cdrom?(disk)
2059
                # Let's try to find if disks is persistent
2060
                source = disk.backing.fileName.sub(/^\[(.*?)\] /, "")
2061
                persistent = VCenterDriver::VIHelper.find_persistent_image_by_source(source, ipool)
2062
                if persistent
2063
                    spec_hash[:deviceChange] << {
2064
                        :operation => :remove,
2065
                        :device => disk
2066
                    }
2067
                end
2068
            end
2069

    
2070
        end
2071

    
2072
        return nil if spec_hash[:deviceChange].empty?
2073

    
2074
        begin
2075
            vm.ReconfigVM_Task(:spec => spec_hash).wait_for_completion
2076
        rescue Exception => e
2077
            raise "Cannot detach all DISKs from VM: #{e.message}\n#{e.backtrace}"
2078
        end
2079
    end
2080

    
2081

    
2082
    # Detach DISK from VM
2083
    def detach_disk(disk)
2084
        spec_hash = {}
2085
        img_path = ""
2086
        ds_ref = nil
2087

    
2088
        # Extract unmanaged disk keys
2089
        unmanaged_keys = get_unmanaged_keys
2090
        vc_disks = get_vcenter_disks
2091

    
2092
        # Get vcenter device to be detached and remove if found
2093
        device = disk_attached_to_vm(disk, unmanaged_keys, vc_disks)
2094

    
2095
        if device
2096
            img_path << device[:path_wo_ds]
2097

    
2098
            if unmanaged_keys.key?("opennebula.disk.#{disk["DISK_ID"]}")
2099
                reference = {}
2100
                reference[:key]   = "opennebula.disk.#{disk["DISK_ID"]}"
2101
                reference[:value] = ""
2102
                spec_hash[:extraConfig] = [ reference ]
2103
            end
2104

    
2105
            ds_ref = device[:datastore]._ref
2106

    
2107
            # Generate vCenter spec and reconfigure VM
2108
            spec_hash[:deviceChange] = [{
2109
                :operation => :remove,
2110
                :device => device[:device]
2111
            }]
2112

    
2113
            begin
2114
                @item.ReconfigVM_Task(:spec => spec_hash).wait_for_completion
2115
            rescue Exception => e
2116
                raise "Cannot detach DISK from VM: #{e.message}\n#{e.backtrace}"
2117
            end
2118
        end
2119

    
2120
        return ds_ref, img_path
2121
    end
2122

    
2123
    # Get vcenter device representing DISK object (hotplug)
2124
    def disk_attached_to_vm(disk, unmanaged_keys, vc_disks)
2125

    
2126
        img_name = ""
2127
        device_found = nil
2128
        disk_id = disk["DISK_ID"]
2129

    
2130
        vc_disks.each do |d|
2131
            # Check if we are dealing with the unmanaged disks present in the template when cloned
2132

    
2133
            if unmanaged_keys.key?("opennebula.disk.#{disk_id}") && d[:key] == unmanaged_keys["opennebula.disk.#{disk_id}"].to_i
2134
                device_found = d
2135
                break
2136
            end
2137

    
2138
            # Alright let's see if we can find other devices only with the expected image name
2139
            img_name  = VCenterDriver::FileHelper.get_img_name(disk, one_item['ID'], self['name'], instantiated_as_persistent?)
2140
            if d[:path_wo_ds] == "#{img_name}"
2141
                device_found = d
2142
                break
2143
            end
2144
        end
2145

    
2146
        return device_found
2147
    end
2148

    
2149
    def calculate_add_disk_spec(disk, position=0)
2150
        img_name = VCenterDriver::FileHelper.get_img_name(disk, one_item['ID'], self['name'],instantiated_as_persistent?)
2151
        type     = disk["TYPE"]
2152
        size_kb  = disk["SIZE"].to_i * 1024
2153

    
2154
        if type == "CDROM"
2155
            # CDROM drive will be found in the IMAGE DS
2156
            ds_ref   = disk["VCENTER_DS_REF"]
2157
            ds       = VCenterDriver::Storage.new_from_ref(ds_ref, @vi_client)
2158
            ds_name  = ds['name']
2159

    
2160
            # CDROM can only be added when the VM is in poweroff state
2161
            vmdk_backing = RbVmomi::VIM::VirtualCdromIsoBackingInfo(
2162
                :datastore => ds.item,
2163
                :fileName  => "[#{ds_name}] #{img_name}"
2164
            )
2165

    
2166
            if @item["summary.runtime.powerState"] != "poweredOff"
2167
                raise "The CDROM image can only be added as an IDE device "\
2168
                      "when the VM is in the powered off state"
2169
            end
2170

    
2171
            controller, unit_number = find_free_ide_controller(position)
2172

    
2173
            device = RbVmomi::VIM::VirtualCdrom(
2174
                :backing       => vmdk_backing,
2175
                :key           => -1,
2176
                :controllerKey => controller.key,
2177
                :unitNumber    => unit_number,
2178

    
2179
                :connectable => RbVmomi::VIM::VirtualDeviceConnectInfo(
2180
                    :startConnected    => true,
2181
                    :connected         => true,
2182
                    :allowGuestControl => true
2183
                )
2184
            )
2185

    
2186
            return {
2187
                :operation => :add,
2188
                :device => device
2189
            }
2190

    
2191
        else
2192
            # TYPE is regular disk (not CDROM)
2193

    
2194
            controller, unit_number = find_free_controller(position)
2195

    
2196
            storpod = disk["VCENTER_DS_REF"].start_with?('group-')
2197
            if storpod
2198
                vmdk_backing = RbVmomi::VIM::VirtualDiskFlatVer2BackingInfo(
2199
                  :diskMode  => 'persistent',
2200
                  :fileName  => ""
2201
                )
2202
            else
2203
                ds           = get_effective_ds(disk)
2204
                ds_name      = ds['name']
2205
                vmdk_backing = RbVmomi::VIM::VirtualDiskFlatVer2BackingInfo(
2206
                  :datastore => ds.item,
2207
                  :diskMode  => 'persistent',
2208
                  :fileName  => "[#{ds_name}] #{img_name}"
2209
                )
2210
            end
2211

    
2212
            device = RbVmomi::VIM::VirtualDisk(
2213
              :backing       => vmdk_backing,
2214
              :capacityInKB  => size_kb,
2215
              :controllerKey => controller.key,
2216
              :key           => (-1 - position),
2217
              :unitNumber    => unit_number
2218
            )
2219

    
2220
            config = {
2221
               :operation => :add,
2222
               :device    => device
2223
            }
2224

    
2225
            # For StorageDRS vCenter must create the file
2226
            config[:fileOperation] = :create if storpod
2227

    
2228
            return config
2229
        end
2230
    end
2231

    
2232
    def resize_unmanaged_disk(disk, new_size)
2233

    
2234
        resize_hash = {}
2235
        disks       = []
2236
        found       = false
2237

    
2238
        unmanaged_keys = get_unmanaged_keys
2239
        vc_disks = get_vcenter_disks
2240

    
2241
        vc_disks.each do |vcenter_disk|
2242
            if unmanaged_keys.key?("opennebula.disk.#{disk["DISK_ID"]}")
2243
                device_key = unmanaged_keys["opennebula.disk.#{disk["DISK_ID"]}"].to_i
2244

    
2245
                if device_key == vcenter_disk[:key].to_i
2246

    
2247
                    if disk["SIZE"].to_i <= disk["ORIGINAL_SIZE"].to_i
2248
                        raise "Disk size cannot be shrinked."
2249
                    end
2250

    
2251
                    # Edit capacity setting new size in KB
2252
                    d = vcenter_disk[:device]
2253
                    d.capacityInKB = disk["SIZE"].to_i * 1024
2254
                    disks <<   { :device => d, :operation => :edit }
2255

    
2256
                    found = true
2257
                    break
2258
                end
2259
            end
2260
        end
2261

    
2262
        raise "Unmanaged disk could not be found to apply resize operation." if !found
2263

    
2264
        if !disks.empty?
2265
            resize_hash[:deviceChange] = disks
2266
            @item.ReconfigVM_Task(:spec => resize_hash).wait_for_completion
2267
        else
2268
            raise "Device was not found after attaching it to VM in poweroff."
2269
        end
2270
    end
2271

    
2272
    def resize_managed_disk(disk, new_size)
2273

    
2274
        resize_hash = {}
2275

    
2276
        unmanaged_keys = get_unmanaged_keys
2277
        vc_disks       = get_vcenter_disks
2278

    
2279
        # Get vcenter device to be detached and remove if found
2280
        device         = disk_attached_to_vm(disk, unmanaged_keys, vc_disks)
2281

    
2282
        # If the disk is being attached in poweroff, reconfigure the VM
2283
        if !device
2284
            spec_hash     = {}
2285
            device_change = []
2286

    
2287
            # Get an array with disk paths in OpenNebula's vm template
2288
            disks_in_onevm_vector = disks_in_onevm(unmanaged_keys, vc_disks)
2289

    
2290
            device_change_ds, device_change_spod, device_change_spod_ids = device_attach_disks(disks_in_onevm_vector, vc_disks)
2291
            device_change += device_change_ds
2292

    
2293
            # Create volatile disks in StorageDRS if any
2294
            if !device_change_spod.empty?
2295
                spec_hash[:extraConfig] = create_storagedrs_disks(device_change_spod, device_change_spod_ids)
2296
            end
2297

    
2298
            # Common reconfigure task
2299
            spec_hash[:deviceChange] = device_change
2300
            spec = RbVmomi::VIM.VirtualMachineConfigSpec(spec_hash)
2301
            @item.ReconfigVM_Task(:spec => spec).wait_for_completion
2302

    
2303
            # Check again if device has now been attached
2304
            unmanaged_keys = get_unmanaged_keys
2305
            vc_disks       = get_vcenter_disks
2306
            device         = disk_attached_to_vm(disk, unmanaged_keys, vc_disks)
2307

    
2308
            if !device
2309
                raise "Device was not found after attaching it to VM in poweroff."
2310
            end
2311
        end
2312

    
2313
        # Resize disk now that we know that it's part of the VM
2314
        if device
2315
            vcenter_disk = device[:device]
2316
            vcenter_disk.capacityInKB = new_size.to_i * 1024
2317
            resize_hash[:deviceChange] = [{
2318
                :operation => :edit,
2319
                :device => vcenter_disk
2320
            }]
2321

    
2322
            @item.ReconfigVM_Task(:spec => resize_hash).wait_for_completion
2323
        end
2324
    end
2325

    
2326
    def has_snapshots?
2327
        self['rootSnapshot'] && !self['rootSnapshot'].empty?
2328
    end
2329

    
2330
    def instantiated_as_persistent?
2331
        begin
2332
            !!one_item["TEMPLATE/CLONING_TEMPLATE_ID"]
2333
        rescue
2334
            return false #one_item may not be retrieved if deploy_id hasn't been set
2335
        end
2336
    end
2337

    
2338
    def find_free_ide_controller(position=0)
2339

    
2340
        free_ide_controllers = []
2341
        ide_schema           = {}
2342

    
2343
        used_numbers      = []
2344
        available_numbers = []
2345

    
2346
        @item["config.hardware.device"].each do |dev|
2347
            if dev.is_a? RbVmomi::VIM::VirtualIDEController
2348
                if ide_schema[dev.key].nil?
2349
                    ide_schema[dev.key] = {}
2350
                end
2351

    
2352
                ide_schema[dev.key][:device] = dev
2353
            end
2354

    
2355
            next if dev.class != RbVmomi::VIM::VirtualCdrom
2356
            used_numbers << dev.unitNumber
2357
        end
2358

    
2359
        2.times do |ide_id|
2360
            available_numbers << ide_id if used_numbers.grep(ide_id).length <= 0
2361
        end
2362

    
2363
        ide_schema.keys.each do |controller|
2364
            free_ide_controllers << ide_schema[controller][:device].deviceInfo.label
2365
        end
2366

    
2367
        if free_ide_controllers.empty?
2368
            raise "There are no free IDE controllers to connect this CDROM device"
2369
        end
2370

    
2371
        available_controller_label = free_ide_controllers[0]
2372

    
2373
        controller = nil
2374

    
2375
        @item['config.hardware.device'].each do |device|
2376
            if device.deviceInfo.label == available_controller_label
2377
                controller = device
2378
                break
2379
            end
2380
        end
2381

    
2382
        new_unit_number = available_numbers.sort[position]
2383

    
2384
        return controller, new_unit_number
2385
    end
2386

    
2387
    def find_free_controller(position=0)
2388
        free_scsi_controllers = []
2389
        scsi_schema           = {}
2390

    
2391
        used_numbers      = []
2392
        available_numbers = []
2393

    
2394
        @item["config.hardware.device"].each do |dev|
2395
            if dev.is_a? RbVmomi::VIM::VirtualSCSIController
2396
                if scsi_schema[dev.key].nil?
2397
                    scsi_schema[dev.key] = {}
2398
                end
2399

    
2400
                used_numbers << dev.scsiCtlrUnitNumber
2401
                scsi_schema[dev.key][:device] = dev
2402
            end
2403

    
2404
            next if dev.class != RbVmomi::VIM::VirtualDisk
2405
            used_numbers << dev.unitNumber
2406
        end
2407

    
2408
        15.times do |scsi_id|
2409
            available_numbers << scsi_id if used_numbers.grep(scsi_id).length <= 0
2410
        end
2411

    
2412
        scsi_schema.keys.each do |controller|
2413
            free_scsi_controllers << scsi_schema[controller][:device].deviceInfo.label
2414
        end
2415

    
2416
        if free_scsi_controllers.length > 0
2417
            available_controller_label = free_scsi_controllers[0]
2418
        else
2419
            add_new_scsi(scsi_schema)
2420
            return find_free_controller
2421
        end
2422

    
2423
        controller = nil
2424

    
2425
        @item['config.hardware.device'].each do |device|
2426
            if device.deviceInfo.label == available_controller_label
2427
                controller = device
2428
                break
2429
            end
2430
        end
2431

    
2432
        new_unit_number = available_numbers.sort[position]
2433

    
2434
        return controller, new_unit_number
2435
    end
2436

    
2437
    def add_new_scsi(scsi_schema)
2438
        controller = nil
2439

    
2440
        if scsi_schema.keys.length >= 4
2441
            raise "Cannot add a new controller, maximum is 4."
2442
        end
2443

    
2444
        scsi_key    = 0
2445
        scsi_number = 0
2446

    
2447
        if scsi_schema.keys.length > 0 && scsi_schema.keys.length < 4
2448
            scsi_key    = scsi_schema.keys.sort[-1] + 1
2449
            scsi_number = scsi_schema[scsi_schema.keys.sort[-1]][:device].busNumber + 1
2450
        end
2451

    
2452
        controller_device = RbVmomi::VIM::VirtualLsiLogicController(
2453
            :key       => scsi_key,
2454
            :busNumber => scsi_number,
2455
            :sharedBus => :noSharing
2456
        )
2457

    
2458
        device_config_spec = RbVmomi::VIM::VirtualDeviceConfigSpec(
2459
            :device    => controller_device,
2460
            :operation => :add
2461
        )
2462

    
2463
        vm_config_spec = RbVmomi::VIM::VirtualMachineConfigSpec(
2464
            :deviceChange => [device_config_spec]
2465
        )
2466

    
2467
        @item.ReconfigVM_Task(:spec => vm_config_spec).wait_for_completion
2468

    
2469
        @item["config.hardware.device"].each do |device|
2470
            if device.class == RbVmomi::VIM::VirtualLsiLogicController &&
2471
                device.key == scsi_key
2472

    
2473
                controller = device.deviceInfo.label
2474
            end
2475
        end
2476

    
2477
        return controller
2478
    end
2479

    
2480
    # Create a snapshot for the VM
2481
    def create_snapshot(snap_id, snap_name)
2482
        snapshot_hash = {
2483
            :name        => snap_id,
2484
            :description => "OpenNebula Snapshot: #{snap_name}",
2485
            :memory      => true,
2486
            :quiesce     => true
2487
        }
2488

    
2489
        vcenter_version = @vi_client.vim.serviceContent.about.apiVersion rescue nil
2490

    
2491
        if vcenter_version != "5.5"
2492
            begin
2493
                @item.CreateSnapshot_Task(snapshot_hash).wait_for_completion
2494
            rescue Exception => e
2495
                raise "Cannot create snapshot for VM: #{e.message}\n#{e.backtrace}"
2496
            end
2497
        else
2498
            # B#5045 - If vcenter is 5.5 the snapshot may take longer than
2499
            # 15 minutes and it does not report that it has finished using
2500
            # wait_for_completion so we use an active wait instead with a
2501
            # timeout of 1440 minutes = 24 hours
2502
            @item.CreateSnapshot_Task(snapshot_hash)
2503

    
2504
            snapshot_created  = false
2505
            elapsed_minutes   = 0
2506

    
2507
            until snapshot_created || elapsed_minutes == 1440
2508
                if !!@item['snapshot']
2509
                    current_snapshot = @item['snapshot.currentSnapshot'] rescue nil
2510
                    snapshot_found = find_snapshot_in_list(@item['snapshot.rootSnapshotList'], snap_id)
2511
                    snapshot_created = !!snapshot_found && !!current_snapshot && current_snapshot._ref == snapshot_found._ref
2512
                end
2513
                sleep(60)
2514
                elapsed_minutes += 1
2515
            end
2516
        end
2517

    
2518
        return snap_id
2519
    end
2520

    
2521
    # Revert to a VM snapshot
2522
    def revert_snapshot(snap_id)
2523

    
2524
        snapshot_list = self["snapshot.rootSnapshotList"]
2525
        snapshot = find_snapshot_in_list(snapshot_list, snap_id)
2526

    
2527
        return nil if !snapshot
2528

    
2529
        begin
2530
            revert_snapshot_hash = { :_this => snapshot }
2531
            snapshot.RevertToSnapshot_Task(revert_snapshot_hash).wait_for_completion
2532
        rescue Exception => e
2533
            raise "Cannot revert snapshot of VM: #{e.message}\n#{e.backtrace}"
2534
        end
2535
    end
2536

    
2537
    # Delete VM snapshot
2538
    def delete_snapshot(snap_id)
2539

    
2540
        snapshot_list = self["snapshot.rootSnapshotList"]
2541
        snapshot = find_snapshot_in_list(snapshot_list, snap_id)
2542

    
2543
        return nil if !snapshot
2544

    
2545
        begin
2546
            delete_snapshot_hash = {
2547
                :_this => snapshot,
2548
                :removeChildren => false
2549
            }
2550
            snapshot.RemoveSnapshot_Task(delete_snapshot_hash).wait_for_completion
2551
        rescue Exception => e
2552
            raise "Cannot delete snapshot of VM: #{e.message}\n#{e.backtrace}"
2553
        end
2554
    end
2555

    
2556
    def find_snapshot_in_list(list, snap_id)
2557
        list.each do |i|
2558
            if i.name == snap_id.to_s
2559
                return i.snapshot
2560
            elsif !i.childSnapshotList.empty?
2561
                snap = find_snapshot_in_list(i.childSnapshotList, snap_id)
2562
                return snap if snap
2563
            end
2564
        end rescue nil
2565

    
2566
        nil
2567
    end
2568

    
2569
    ############################################################################
2570
    # actions
2571
    ############################################################################
2572

    
2573
    def shutdown
2574
        begin
2575
            @item.ShutdownGuest
2576
            # Check if VM has been powered off
2577
            (0..VM_SHUTDOWN_TIMEOUT).each do
2578
                break if @item.runtime.powerState == "poweredOff"
2579
                sleep 1
2580
            end
2581
        rescue
2582
            # Ignore ShutdownGuest exceptions, maybe VM hasn't openvm tools
2583
        end
2584

    
2585
        # If VM hasn't been powered off, do it now
2586
        if @item.runtime.powerState != "poweredOff"
2587
            poweroff_hard
2588
        end
2589
    end
2590

    
2591
    def destroy
2592
        @item.Destroy_Task.wait_for_completion
2593
    end
2594

    
2595
    def mark_as_template
2596
        @item.MarkAsTemplate
2597
    end
2598

    
2599
    def reset
2600
        @item.ResetVM_Task.wait_for_completion
2601
    end
2602

    
2603
    def suspend
2604
        @item.SuspendVM_Task.wait_for_completion
2605
    end
2606

    
2607
    def reboot
2608
        @item.RebootGuest
2609
    end
2610

    
2611
    def poweron
2612
        ## If need in the future, you can power on VMs from datacenter
2613
        ## dc = get_dc
2614
        ## dc.power_on_vm(@item)
2615
        @item.PowerOnVM_Task.wait_for_completion
2616
    end
2617

    
2618
    def is_powered_on?
2619
        return @item.runtime.powerState == "poweredOn"
2620
    end
2621

    
2622
    def poweroff_hard
2623
        @item.PowerOffVM_Task.wait_for_completion
2624
    end
2625

    
2626
    def remove_all_snapshots
2627
        @item.RemoveAllSnapshots_Task.wait_for_completion
2628
    end
2629

    
2630
    def set_running(state)
2631
        value = state ? "yes" : "no"
2632

    
2633
        config_array = [
2634
            { :key => "opennebula.vm.running", :value => value }
2635
        ]
2636
        spec = RbVmomi::VIM.VirtualMachineConfigSpec(
2637
            { :extraConfig => config_array }
2638
        )
2639

    
2640
        @item.ReconfigVM_Task(:spec => spec).wait_for_completion
2641
    end
2642

    
2643
    ############################################################################
2644
    # monitoring
2645
    ############################################################################
2646

    
2647
    # monitor function used when VMM poll action is called
2648
    def monitor_poll_vm
2649
        reset_monitor
2650

    
2651
        @state = state_to_c(self["summary.runtime.powerState"])
2652

    
2653
        if @state != VM_STATE[:active]
2654
            reset_monitor
2655
            return
2656
        end
2657

    
2658
        cpuMhz = self["runtime.host.summary.hardware.cpuMhz"].to_f
2659

    
2660
        @monitor[:used_memory] = self["summary.quickStats.hostMemoryUsage"] * 1024
2661

    
2662
        used_cpu = self["summary.quickStats.overallCpuUsage"].to_f / cpuMhz
2663
        used_cpu = (used_cpu * 100).to_s
2664
        @monitor[:used_cpu]  = sprintf('%.2f', used_cpu).to_s
2665

    
2666
        # Check for negative values
2667
        @monitor[:used_memory] = 0 if @monitor[:used_memory].to_i < 0
2668
        @monitor[:used_cpu]    = 0 if @monitor[:used_cpu].to_i < 0
2669

    
2670
        guest_ip_addresses = []
2671
        self["guest.net"].each do |net|
2672
            net.ipConfig.ipAddress.each do |ip|
2673
                guest_ip_addresses << ip.ipAddress
2674
            end if net.ipConfig && net.ipConfig.ipAddress
2675
        end if self["guest.net"]
2676

    
2677
        @guest_ip_addresses = guest_ip_addresses.join(',')
2678

    
2679
        pm = self['_connection'].serviceInstance.content.perfManager
2680

    
2681
        provider = pm.provider_summary(@item)
2682

    
2683
        refresh_rate = provider.refreshRate
2684

    
2685
        if get_vm_id
2686
            stats = {}
2687

    
2688
            if (one_item["MONITORING/LAST_MON"] && one_item["MONITORING/LAST_MON"].to_i != 0 )
2689
                #Real time data stores max 1 hour. 1 minute has 3 samples
2690
                interval = (Time.now.to_i - one_item["MONITORING/LAST_MON"].to_i)
2691

    
2692
                #If last poll was more than hour ago get 3 minutes,
2693
                #else calculate how many samples since last poll
2694
                samples =  interval > 3600 ? 9 : (interval / refresh_rate) + 1
2695
                max_samples = samples > 0 ? samples : 1
2696

    
2697
                stats = pm.retrieve_stats(
2698
                    [@item],
2699
                    ['net.transmitted','net.bytesRx','net.bytesTx','net.received',
2700
                    'virtualDisk.numberReadAveraged','virtualDisk.numberWriteAveraged',
2701
                    'virtualDisk.read','virtualDisk.write'],
2702
                    {interval:refresh_rate, max_samples: max_samples}
2703
                ) rescue {}
2704
            else
2705
                # First poll, get at least latest 3 minutes = 9 samples
2706
                stats = pm.retrieve_stats(
2707
                    [@item],
2708
                    ['net.transmitted','net.bytesRx','net.bytesTx','net.received',
2709
                    'virtualDisk.numberReadAveraged','virtualDisk.numberWriteAveraged',
2710
                    'virtualDisk.read','virtualDisk.write'],
2711
                    {interval:refresh_rate, max_samples: 9}
2712
                ) rescue {}
2713
            end
2714

    
2715
            if !stats.empty? && !stats.first[1][:metrics].empty?
2716
                metrics = stats.first[1][:metrics]
2717

    
2718
                nettx_kbpersec = 0
2719
                if metrics['net.transmitted']
2720
                    metrics['net.transmitted'].each { |sample|
2721
                        nettx_kbpersec += sample if sample > 0
2722
                    }
2723
                end
2724

    
2725
                netrx_kbpersec = 0
2726
                if metrics['net.bytesRx']
2727
                    metrics['net.bytesRx'].each { |sample|
2728
                        netrx_kbpersec += sample if sample > 0
2729
                    }
2730
                end
2731

    
2732
                read_kbpersec = 0
2733
                if metrics['virtualDisk.read']
2734
                    metrics['virtualDisk.read'].each { |sample|
2735
                        read_kbpersec += sample if sample > 0
2736
                    }
2737
                end
2738

    
2739
                read_iops = 0
2740
                if metrics['virtualDisk.numberReadAveraged']
2741
                    metrics['virtualDisk.numberReadAveraged'].each { |sample|
2742
                        read_iops += sample if sample > 0
2743
                    }
2744
                end
2745

    
2746
                write_kbpersec = 0
2747
                if metrics['virtualDisk.write']
2748
                    metrics['virtualDisk.write'].each { |sample|
2749
                        write_kbpersec += sample if sample > 0
2750
                    }
2751
                end
2752

    
2753
                write_iops = 0
2754
                if metrics['virtualDisk.numberWriteAveraged']
2755
                    metrics['virtualDisk.numberWriteAveraged'].each { |sample|
2756
                        write_iops += sample if sample > 0
2757
                    }
2758
                end
2759
            else
2760
                nettx_kbpersec = 0
2761
                netrx_kbpersec = 0
2762
                read_kbpersec  = 0
2763
                read_iops      = 0
2764
                write_kbpersec = 0
2765
                write_iops     = 0
2766
            end
2767

    
2768
            # Accumulate values if present
2769
            previous_nettx = @one_item && @one_item["MONITORING/NETTX"] ? @one_item["MONITORING/NETTX"].to_i : 0
2770
            previous_netrx = @one_item && @one_item["MONITORING/NETRX"] ? @one_item["MONITORING/NETRX"].to_i : 0
2771
            previous_diskrdiops = @one_item && @one_item["MONITORING/DISKRDIOPS"] ? @one_item["MONITORING/DISKRDIOPS"].to_i : 0
2772
            previous_diskwriops = @one_item && @one_item["MONITORING/DISKWRIOPS"] ? @one_item["MONITORING/DISKWRIOPS"].to_i : 0
2773
            previous_diskrdbytes = @one_item && @one_item["MONITORING/DISKRDBYTES"] ? @one_item["MONITORING/DISKRDBYTES"].to_i : 0
2774
            previous_diskwrbytes = @one_item && @one_item["MONITORING/DISKWRBYTES"] ? @one_item["MONITORING/DISKWRBYTES"].to_i : 0
2775

    
2776
            @monitor[:nettx] = previous_nettx + (nettx_kbpersec * 1024 * refresh_rate).to_i
2777
            @monitor[:netrx] = previous_netrx + (netrx_kbpersec * 1024 * refresh_rate).to_i
2778

    
2779
            @monitor[:diskrdiops]  = previous_diskrdiops + read_iops
2780
            @monitor[:diskwriops]  = previous_diskwriops + write_iops
2781
            @monitor[:diskrdbytes] = previous_diskrdbytes + (read_kbpersec * 1024 * refresh_rate).to_i
2782
            @monitor[:diskwrbytes] = previous_diskwrbytes + (write_kbpersec * 1024 * refresh_rate).to_i
2783
        end
2784
    end
2785

    
2786
    # monitor function used when poll action is called for all vms
2787
    def monitor(stats)
2788

    
2789
        reset_monitor
2790

    
2791
        refresh_rate = 20 #20 seconds between samples (realtime)
2792

    
2793
        @state = state_to_c(@vm_info["summary.runtime.powerState"])
2794

    
2795
        return if @state != VM_STATE[:active]
2796

    
2797
        cpuMhz =  @vm_info[:esx_host_cpu]
2798

    
2799
        @monitor[:used_memory] = @vm_info["summary.quickStats.hostMemoryUsage"].to_i * 1024
2800

    
2801
        used_cpu = @vm_info["summary.quickStats.overallCpuUsage"].to_f / cpuMhz
2802
        used_cpu = (used_cpu * 100).to_s
2803
        @monitor[:used_cpu]  = sprintf('%.2f', used_cpu).to_s
2804

    
2805
        # Check for negative values
2806
        @monitor[:used_memory] = 0 if @monitor[:used_memory].to_i < 0
2807
        @monitor[:used_cpu]    = 0 if @monitor[:used_cpu].to_i < 0
2808

    
2809
        guest_ip_addresses = []
2810
        @vm_info["guest.net"].each do |net|
2811
            net.ipConfig.ipAddress.each do |ip|
2812
                guest_ip_addresses << ip.ipAddress
2813
            end if net.ipConfig && net.ipConfig.ipAddress
2814
        end if self["guest.net"]
2815

    
2816
        @guest_ip_addresses = guest_ip_addresses.join(',')
2817

    
2818
        if stats.key?(@item)
2819
            metrics = stats[@item][:metrics]
2820

    
2821
            nettx_kbpersec = 0
2822
            if metrics['net.transmitted']
2823
                metrics['net.transmitted'].each { |sample|
2824
                    nettx_kbpersec += sample if sample > 0
2825
                }
2826
            end
2827

    
2828
            netrx_kbpersec = 0
2829
            if metrics['net.bytesRx']
2830
                metrics['net.bytesRx'].each { |sample|
2831
                    netrx_kbpersec += sample if sample > 0
2832
                }
2833
            end
2834

    
2835
            read_kbpersec = 0
2836
            if metrics['virtualDisk.read']
2837
                metrics['virtualDisk.read'].each { |sample|
2838
                    read_kbpersec += sample if sample > 0
2839
                }
2840
            end
2841

    
2842
            read_iops = 0
2843
            if metrics['virtualDisk.numberReadAveraged']
2844
                metrics['virtualDisk.numberReadAveraged'].each { |sample|
2845
                    read_iops += sample if sample > 0
2846
                }
2847
            end
2848

    
2849
            write_kbpersec = 0
2850
            if metrics['virtualDisk.write']
2851
                metrics['virtualDisk.write'].each { |sample|
2852
                    write_kbpersec += sample if sample > 0
2853
                }
2854
            end
2855

    
2856
            write_iops = 0
2857
            if metrics['virtualDisk.numberWriteAveraged']
2858
                metrics['virtualDisk.numberWriteAveraged'].each { |sample|
2859
                    write_iops += sample if sample > 0
2860
                }
2861
            end
2862
        else
2863
            nettx_kbpersec = 0
2864
            netrx_kbpersec = 0
2865
            read_kbpersec  = 0
2866
            read_iops      = 0
2867
            write_kbpersec = 0
2868
            write_iops     = 0
2869
        end
2870

    
2871
        # Accumulate values if present
2872
        previous_nettx = @one_item && @one_item["MONITORING/NETTX"] ? @one_item["MONITORING/NETTX"].to_i : 0
2873
        previous_netrx = @one_item && @one_item["MONITORING/NETRX"] ? @one_item["MONITORING/NETRX"].to_i : 0
2874
        previous_diskrdiops = @one_item && @one_item["MONITORING/DISKRDIOPS"] ? @one_item["MONITORING/DISKRDIOPS"].to_i : 0
2875
        previous_diskwriops = @one_item && @one_item["MONITORING/DISKWRIOPS"] ? @one_item["MONITORING/DISKWRIOPS"].to_i : 0
2876
        previous_diskrdbytes = @one_item && @one_item["MONITORING/DISKRDBYTES"] ? @one_item["MONITORING/DISKRDBYTES"].to_i : 0
2877
        previous_diskwrbytes = @one_item && @one_item["MONITORING/DISKWRBYTES"] ? @one_item["MONITORING/DISKWRBYTES"].to_i : 0
2878

    
2879
        @monitor[:nettx] = previous_nettx + (nettx_kbpersec * 1024 * refresh_rate).to_i
2880
        @monitor[:netrx] = previous_netrx + (netrx_kbpersec * 1024 * refresh_rate).to_i
2881

    
2882
        @monitor[:diskrdiops]  = previous_diskrdiops + read_iops
2883
        @monitor[:diskwriops]  = previous_diskwriops + write_iops
2884
        @monitor[:diskrdbytes] = previous_diskrdbytes + (read_kbpersec * 1024 * refresh_rate).to_i
2885
        @monitor[:diskwrbytes] = previous_diskwrbytes + (write_kbpersec * 1024 * refresh_rate).to_i
2886
    end
2887

    
2888

    
2889

    
2890
    #  Generates a OpenNebula IM Driver valid string with the monitor info
2891
    def info
2892
        return 'STATE=d' if @state == 'd'
2893

    
2894
        guest_ip = @vm_info ? @vm_info["guest.ipAddress"] : self["guest.ipAddress"]
2895

    
2896
        used_cpu    = @monitor[:used_cpu]
2897
        used_memory = @monitor[:used_memory]
2898
        netrx       = @monitor[:netrx]
2899
        nettx       = @monitor[:nettx]
2900
        diskrdbytes = @monitor[:diskrdbytes]
2901
        diskwrbytes = @monitor[:diskwrbytes]
2902
        diskrdiops  = @monitor[:diskrdiops]
2903
        diskwriops  = @monitor[:diskwriops]
2904

    
2905
        esx_host      = @vm_info ? @vm_info[:esx_host_name].to_s : self["runtime.host.name"].to_s
2906
        guest_state   = @vm_info ? @vm_info["guest.guestState"].to_s : self["guest.guestState"].to_s
2907
        vmware_tools  = @vm_info ? @vm_info["guest.toolsRunningStatus"].to_s : self["guest.toolsRunningStatus"].to_s
2908
        vmtools_ver   = @vm_info ? @vm_info["guest.toolsVersion"].to_s :  self["guest.toolsVersion"].to_s
2909
        vmtools_verst = @vm_info ? @vm_info["guest.toolsVersionStatus2"].to_s : vmtools_verst = self["guest.toolsVersionStatus2"].to_s
2910

    
2911
        if @vm_info
2912
            rp_name   = @vm_info[:rp_list].select { |item| item[:ref] == @vm_info["resourcePool"]._ref}.first[:name] rescue ""
2913
            rp_name   = "Resources" if rp_name.empty?
2914
        else
2915
            rp_name   = self["resourcePool"].name
2916
        end
2917

    
2918
        str_info = ""
2919

    
2920
        str_info = "GUEST_IP=" << guest_ip.to_s << " " if guest_ip
2921

    
2922
        if @guest_ip_addresses && !@guest_ip_addresses.empty?
2923
            str_info << "GUEST_IP_ADDRESSES=\"" << @guest_ip_addresses.to_s << "\" "
2924
        end
2925

    
2926
        str_info << "#{POLL_ATTRIBUTE[:state]}="  << @state               << " "
2927
        str_info << "#{POLL_ATTRIBUTE[:cpu]}="    << used_cpu.to_s        << " "
2928
        str_info << "#{POLL_ATTRIBUTE[:memory]}=" << used_memory.to_s     << " "
2929
        str_info << "#{POLL_ATTRIBUTE[:netrx]}="  << netrx.to_s           << " "
2930
        str_info << "#{POLL_ATTRIBUTE[:nettx]}="  << nettx.to_s           << " "
2931

    
2932
        str_info << "DISKRDBYTES=" << diskrdbytes.to_s << " "
2933
        str_info << "DISKWRBYTES=" << diskwrbytes.to_s << " "
2934
        str_info << "DISKRDIOPS="  << diskrdiops.to_s  << " "
2935
        str_info << "DISKWRIOPS="  << diskwriops.to_s  << " "
2936

    
2937
        str_info << "VCENTER_ESX_HOST=\""                 << esx_host        << "\" "
2938
        str_info << "VCENTER_GUEST_STATE="                << guest_state     << " "
2939
        str_info << "VCENTER_VMWARETOOLS_RUNNING_STATUS=" << vmware_tools    << " "
2940
        str_info << "VCENTER_VMWARETOOLS_VERSION="        << vmtools_ver     << " "
2941
        str_info << "VCENTER_VMWARETOOLS_VERSION_STATUS=" << vmtools_verst   << " "
2942
        str_info << "VCENTER_RP_NAME=\""                  << rp_name << "\" "
2943
    end
2944

    
2945
    def reset_monitor
2946
        @monitor = {
2947
            :used_cpu    => 0,
2948
            :used_memory => 0,
2949
            :netrx       => 0,
2950
            :nettx       => 0,
2951
            :diskrdbytes => 0,
2952
            :diskwrbytes => 0,
2953
            :diskrdiops  => 0,
2954
            :diskwriops  => 0
2955
        }
2956
    end
2957

    
2958
    # Converts the VI string state to OpenNebula state convention
2959
    # Guest states are:
2960
    # - poweredOff   The virtual machine is currently powered off.
2961
    # - poweredOn    The virtual machine is currently powered on.
2962
    # - suspended    The virtual machine is currently suspended.
2963
    def state_to_c(state)
2964
        case state
2965
            when 'poweredOn'
2966
                VM_STATE[:active]
2967
            when 'suspended'
2968
                VM_STATE[:paused]
2969
            when 'poweredOff'
2970
                VM_STATE[:deleted]
2971
            else
2972
                VM_STATE[:unknown]
2973
        end
2974
    end
2975

    
2976
    # TODO check with uuid
2977
    def self.new_from_ref(ref, vi_client)
2978
        self.new(RbVmomi::VIM::VirtualMachine.new(vi_client.vim, ref), vi_client)
2979
    end
2980

    
2981
end # class VirtualMachine
2982

    
2983
end # module VCenterDriver