Statistics
| Branch: | Tag: | Revision:

one / src / vmm_mad / remotes / lib / vcenter_driver / virtual_machine.rb @ d923ef36

History | View | Annotate | Download (103 KB)

1
module VCenterDriver
2
require 'digest'
3
class VirtualMachineFolder
4
    attr_accessor :item, :items
5

    
6
    def initialize(item)
7
        @item = item
8
        @items = {}
9
    end
10

    
11
    ########################################################################
12
    # Builds a hash with Datastore-Ref / Datastore to be used as a cache
13
    # @return [Hash] in the form
14
    #   { ds_ref [Symbol] => Datastore object }
15
    ########################################################################
16
    def fetch!
17
        VIClient.get_entities(@item, "VirtualMachine").each do |item|
18
            item_name = item._ref
19
            @items[item_name.to_sym] = VirtualMachine.new(item)
20
        end
21
    end
22

    
23
    def fetch_templates!
24
        VIClient.get_entities(@item, "VirtualMachine").each do |item|
25
            if item.config.template
26
                item_name = item._ref
27
                @items[item_name.to_sym] = Template.new(item)
28
            end
29
        end
30
    end
31

    
32
    ########################################################################
33
    # Returns a Datastore. Uses the cache if available.
34
    # @param ref [Symbol] the vcenter ref
35
    # @return Datastore
36
    ########################################################################
37
    def get(ref)
38
        if !@items[ref.to_sym]
39
            rbvmomi_dc = RbVmomi::VIM::Datastore.new(@item._connection, ref)
40
            @items[ref.to_sym] = Datastore.new(rbvmomi_dc)
41
        end
42

    
43
        @items[ref.to_sym]
44
    end
45
end # class VirtualMachineFolder
46

    
47
class Template
48

    
49
    attr_accessor :item
50

    
51
    include Memoize
52

    
53
    def initialize(item=nil, vi_client=nil)
54
        @item = item
55
        @vi_client = vi_client
56
        @locking = true
57
    end
58

    
59
    # Locking function. Similar to flock
60
    def lock
61
        if @locking
62
           @locking_file = File.open("/tmp/vcenter-importer-lock","w")
63
           @locking_file.flock(File::LOCK_EX)
64
        end
65
    end
66

    
67
    # Unlock driver execution mutex
68
    def unlock
69
        if @locking
70
            @locking_file.close
71
        end
72
    end
73

    
74
    def get_dc
75
        item = @item
76

    
77
        while !item.instance_of? RbVmomi::VIM::Datacenter
78
            item = item.parent
79
            if item.nil?
80
                raise "Could not find the parent Datacenter"
81
            end
82
        end
83

    
84
        Datacenter.new(item)
85
    end
86

    
87
    def delete_template
88
        @item.Destroy_Task.wait_for_completion
89
    end
90

    
91
    def get_vcenter_instance_uuid
92
        @vi_client.vim.serviceContent.about.instanceUuid rescue nil
93
    end
94

    
95
    def create_template_copy(template_name)
96
        error = nil
97
        template_ref = nil
98

    
99
        template_name = "one-#{self['name']}" if template_name.empty?
100

    
101
        relocate_spec_params = {}
102
        relocate_spec_params[:pool] = get_rp
103
        relocate_spec = RbVmomi::VIM.VirtualMachineRelocateSpec(relocate_spec_params)
104

    
105
        clone_spec = RbVmomi::VIM.VirtualMachineCloneSpec({
106
            :location => relocate_spec,
107
            :powerOn  => false,
108
            :template => false
109
        })
110

    
111
        template = nil
112
        begin
113
            template = @item.CloneVM_Task(:folder => @item.parent,
114
                                          :name   => template_name,
115
                                          :spec   => clone_spec).wait_for_completion
116
            template_ref = template._ref
117
        rescue Exception => e
118
            if !e.message.start_with?('DuplicateName')
119
                error = "Could not create the template clone. Reason: #{e.message}"
120
                return error, nil
121
            end
122

    
123
            dc = get_dc
124
            vm_folder = dc.vm_folder
125
            vm_folder.fetch!
126
            vm = vm_folder.items
127
                    .select{|k,v| v.item.name == template_name}
128
                    .values.first.item rescue nil
129

    
130
            if vm
131
                begin
132
                    vm.Destroy_Task.wait_for_completion
133
                    template = @item.CloneVM_Task(:folder => @item.parent,
134
                                                  :name   => template_name,
135
                                                  :spec   => clone_spec).wait_for_completion
136
                    template_ref = template._ref
137
                rescue
138
                    error = "Could not delete the existing template, please remove it manually from vCenter. Reason: #{e.message}"
139
                end
140
            else
141
                error = "Could not create the template clone. Reason: #{e.message}"
142
            end
143
        end
144

    
145
        return error, template_ref
146
    end
147

    
148
    # Linked Clone over existing template
149
    def create_delta_disks
150

    
151
        begin
152
            disks = @item['config.hardware.device'].grep(RbVmomi::VIM::VirtualDisk)
153
            disk_without_snapshots = disks.select { |x| x.backing.parent.nil? }
154
        rescue
155
            error = "Cannot extract existing disks on template."
156
            use_linked_clones = false
157
            return error, use_linked_clones
158
        end
159

    
160
        if !disk_without_snapshots.empty?
161

    
162
            begin
163
                if self['config.template']
164
                    @item.MarkAsVirtualMachine(:pool => get_rp, :host => self['runtime.host'])
165
                end
166
            rescue Exception => e
167
                @item.MarkAsTemplate()
168
                error = "Cannot mark the template as a VirtualMachine. Not using linked clones. Reason: #{e.message}/#{e.backtrace}"
169
                use_linked_clones = false
170
                return error, use_linked_clones
171
            end
172

    
173
            begin
174
                spec = {}
175
                spec[:deviceChange] = []
176

    
177
                disk_without_snapshots.each do |disk|
178
                    remove_disk_spec = { :operation => :remove, :device => disk }
179
                    spec[:deviceChange] << remove_disk_spec
180

    
181
                    add_disk_spec = { :operation => :add,
182
                                    :fileOperation => :create,
183
                                    :device => disk.dup.tap { |x|
184
                                            x.backing = x.backing.dup
185
                                            x.backing.fileName = "[#{disk.backing.datastore.name}]"
186
                                            x.backing.parent = disk.backing
187
                                    }
188
                    }
189
                    spec[:deviceChange] << add_disk_spec
190
                end
191

    
192
                @item.ReconfigVM_Task(:spec => spec).wait_for_completion if !spec[:deviceChange].empty?
193
            rescue Exception => e
194
                error = "Cannot create the delta disks on top of the template. Reason: #{e.message}."
195
                use_linked_clones = false
196
                return error, use_linked_clones
197
            end
198

    
199
            begin
200
                @item.MarkAsTemplate()
201
            rescue
202
                error = "Cannot mark the VirtualMachine as a template. Not using linked clones."
203
                use_linked_clones = false
204
                return error, use_linked_clones
205
            end
206

    
207
            error = nil
208
            use_linked_clones = true
209
            return error, use_linked_clones
210
        else
211
            # Template already has delta disks
212
            error = nil
213
            use_linked_clones = true
214
            return error, use_linked_clones
215
        end
216
    end
217

    
218
    def import_vcenter_disks(vc_uuid, dpool, ipool, sunstone=false, template_id=nil)
219
        disk_info = ""
220
        error = ""
221
        sunstone_disk_info = []
222

    
223
        begin
224
            lock #Lock import operation, to avoid concurrent creation of images
225

    
226
            ##ccr_ref = self["runtime.host.parent._ref"]
227
            dc = get_dc
228
            dc_ref = dc.item._ref
229

    
230
            #Get disks and info required
231
            vc_disks = get_vcenter_disks
232

    
233
            # Track allocated images
234
            allocated_images = []
235

    
236
            vc_disks.each do |disk|
237
                datastore_found = VCenterDriver::Storage.get_one_image_ds_by_ref_and_dc(disk[:datastore]._ref,
238
                                                                                        dc_ref,
239
                                                                                        vc_uuid,
240
                                                                                        dpool)
241
                if datastore_found.nil?
242
                    error = "\n    ERROR: datastore #{disk[:datastore].name}: has to be imported first as an image datastore!\n"
243

    
244
                    #Rollback delete disk images
245
                    allocated_images.each do |i|
246
                        i.delete
247
                    end
248

    
249
                    break
250
                end
251

    
252
                image_import = VCenterDriver::Datastore.get_image_import_template(disk[:datastore].name,
253
                                                                                  disk[:path],
254
                                                                                  disk[:type],
255
                                                                                  disk[:prefix],
256
                                                                                  ipool,
257
                                                                                  template_id)
258
                #Image is already in the datastore
259
                if image_import[:one]
260
                    # This is the disk info
261
                    disk_tmp = ""
262
                    disk_tmp << "DISK=[\n"
263
                    disk_tmp << "IMAGE_ID=\"#{image_import[:one]["ID"]}\",\n"
264
                    disk_tmp << "OPENNEBULA_MANAGED=\"NO\"\n"
265
                    disk_tmp << "]\n"
266
                    if sunstone
267
                        sunstone_disk = {}
268
                        sunstone_disk[:type] = "EXISTING_DISK"
269
                        sunstone_disk[:image_tmpl] = disk_tmp
270
                        sunstone_disk_info << sunstone_disk
271
                    else
272
                        disk_info << disk_tmp
273
                    end
274

    
275
                elsif !image_import[:template].empty?
276

    
277
                    if sunstone
278
                        sunstone_disk = {}
279
                        sunstone_disk[:type] = "NEW_DISK"
280
                        sunstone_disk[:image_tmpl] = image_import[:template]
281
                        sunstone_disk[:ds_id] = datastore_found['ID'].to_i
282
                        sunstone_disk_info << sunstone_disk
283
                    else
284
                        # Then the image is created as it's not in the datastore
285
                        one_i = VCenterDriver::VIHelper.new_one_item(OpenNebula::Image)
286
                        allocated_images << one_i
287
                        rc = one_i.allocate(image_import[:template], datastore_found['ID'].to_i)
288

    
289
                        if OpenNebula.is_error?(rc)
290
                            error = "    Error creating disk from template: #{rc.message}\n"
291
                            break
292
                        end
293

    
294
                        #Add info for One template
295
                        one_i.info
296
                        disk_info << "DISK=[\n"
297
                        disk_info << "IMAGE_ID=\"#{one_i["ID"]}\",\n"
298
                        disk_info << "OPENNEBULA_MANAGED=\"NO\"\n"
299
                        disk_info << "]\n"
300
                    end
301
                end
302
            end
303

    
304
        rescue Exception => e
305
            error = "\n    There was an error trying to create an image for disk in vcenter template. Reason: #{e.message}\n#{e.backtrace}"
306
        ensure
307
            unlock
308
            if !error.empty?
309
                #Rollback delete disk images
310
                allocated_images.each do |i|
311
                    i.delete
312
                end
313
            end
314
        end
315

    
316
        return error, sunstone_disk_info, allocated_images if sunstone
317

    
318
        return error, disk_info, allocated_images if !sunstone
319

    
320
    end
321

    
322
    def import_vcenter_nics(vc_uuid, npool, hpool, vcenter_instance_name,
323
                            template_ref, wild, sunstone=false, vm_name=nil, vm_id=nil, dc_name=nil)
324
        nic_info = ""
325
        error = ""
326
        sunstone_nic_info = []
327

    
328
        begin
329
            lock #Lock import operation, to avoid concurrent creation of networks
330

    
331
            if !dc_name
332
                dc = get_dc
333
                dc_name = dc.item.name
334
                dc_ref  = dc.item._ref
335
            end
336

    
337
            ccr_ref  = self["runtime.host.parent._ref"]
338
            ccr_name = self["runtime.host.parent.name"]
339

    
340
            #Get disks and info required
341
            vc_nics = get_vcenter_nics
342

    
343
            # Track allocated networks for rollback
344
            allocated_networks = []
345

    
346
            # Track port groups duplicated in this VM
347
            duplicated_networks = []
348

    
349
            vc_nics.each do |nic|
350
                # Check if the network already exists
351
                network_found = VCenterDriver::Network.get_unmanaged_vnet_by_ref(nic[:net_ref],
352
                                                                                 template_ref,
353
                                                                                 vc_uuid,
354
                                                                                 npool)
355
                #Network is already in OpenNebula
356
                if network_found
357

    
358
                    # This is the existing nic info
359
                    nic_tmp = ""
360
                    nic_tmp << "NIC=[\n"
361
                    nic_tmp << "NETWORK_ID=\"#{network_found["ID"]}\",\n"
362
                    nic_tmp << "OPENNEBULA_MANAGED=\"NO\"\n"
363
                    nic_tmp << "]\n"
364

    
365
                    if sunstone
366
                        sunstone_nic = {}
367
                        sunstone_nic[:type] = "EXISTING_NIC"
368
                        sunstone_nic[:network_tmpl] = nic_tmp
369
                        sunstone_nic_info << sunstone_nic
370
                    else
371
                        nic_info << nic_tmp
372
                    end
373
                else
374
                    # Then the network has to be created as it's not in OpenNebula
375
                    one_vn = VCenterDriver::VIHelper.new_one_item(OpenNebula::VirtualNetwork)
376

    
377
                    # We're importing unmanaged nics
378
                    unmanaged = true
379

    
380
                    # Let's get the OpenNebula host associated to the cluster reference
381
                    one_host = VCenterDriver::VIHelper.find_by_ref(OpenNebula::HostPool,
382
                                                                  "TEMPLATE/VCENTER_CCR_REF",
383
                                                                   ccr_ref,
384
                                                                   vc_uuid,
385
                                                                   hpool)
386

    
387
                    # Let's get the CLUSTER_ID from the OpenNebula host
388
                    if !one_host || !one_host['CLUSTER_ID']
389
                        cluster_id = -1
390
                    else
391
                        cluster_id = one_host['CLUSTER_ID']
392
                    end
393

    
394
                    # We have to know if we're importing nics from a wild vm
395
                    # or from a template
396
                    if wild
397
                        unmanaged = "wild"
398
                    else
399
                        unmanaged = "template"
400
                    end
401

    
402
                    # Prepare the Virtual Network template
403
                    one_vnet = VCenterDriver::Network.to_one_template(nic[:net_name],
404
                                                                      nic[:net_ref],
405
                                                                      nic[:pg_type],
406
                                                                      ccr_ref,
407
                                                                      ccr_name,
408
                                                                      vc_uuid,
409
                                                                      vcenter_instance_name,
410
                                                                      dc_name,
411
                                                                      cluster_id,
412
                                                                      nil,
413
                                                                      unmanaged,
414
                                                                      template_ref,
415
                                                                      dc_ref,
416
                                                                      vm_name,
417
                                                                      vm_id)
418

    
419
                    # By default add an ethernet range to network size 255
420
                    ar_str = ""
421
                    ar_str << "AR=[\n"
422
                    ar_str << "TYPE=\"ETHER\",\n"
423
                    ar_str << "SIZE=\"255\"\n"
424
                    ar_str << "]\n"
425
                    one_vnet[:one] << ar_str
426

    
427
                    if sunstone
428
                        if !duplicated_networks.include?(nic[:net_name])
429
                            sunstone_nic = {}
430
                            sunstone_nic[:type] = "NEW_NIC"
431
                            sunstone_nic[:network_name] = nic[:net_name]
432
                            sunstone_nic[:network_tmpl] = one_vnet[:one]
433
                            sunstone_nic[:one_cluster_id] = cluster_id.to_i
434
                            sunstone_nic_info << sunstone_nic
435
                            duplicated_networks << nic[:net_name]
436
                        else
437
                            sunstone_nic = {}
438
                            sunstone_nic[:type] = "DUPLICATED_NIC"
439
                            sunstone_nic[:network_name] = nic[:net_name]
440
                            sunstone_nic_info << sunstone_nic
441
                        end
442
                    else
443
                        # Allocate the Virtual Network
444
                        allocated_networks << one_vn
445
                        rc = one_vn.allocate(one_vnet[:one], cluster_id.to_i)
446

    
447
                        if OpenNebula.is_error?(rc)
448
                            error = "\n    ERROR: Could not allocate virtual network due to #{rc.message}\n"
449
                            break
450
                        end
451

    
452
                        # Add info for One template
453
                        one_vn.info
454
                        nic_info << "NIC=[\n"
455
                        nic_info << "NETWORK_ID=\"#{one_vn["ID"]}\",\n"
456
                        nic_info << "OPENNEBULA_MANAGED=\"NO\"\n"
457
                        nic_info << "]\n"
458

    
459
                        # Refresh npool
460
                        npool.info_all
461
                    end
462
                end
463
            end
464

    
465
        rescue Exception => e
466
            error = "\n    There was an error trying to create a virtual network for network in vcenter template. Reason: #{e.message}"
467
        ensure
468
            unlock
469
            #Rollback, delete virtual networks
470
            if !error.empty?
471
                allocated_networks.each do |n|
472
                    n.delete
473
                end
474
            end
475
        end
476

    
477
        return error, nic_info, allocated_networks if !sunstone
478

    
479
        return error, sunstone_nic_info, allocated_networks if sunstone
480
    end
481

    
482
    def get_vcenter_disk_key(unit_number, controller_key)
483

    
484
        key = nil
485

    
486
        @item["config.hardware.device"].each do |device|
487
            disk = {}
488

    
489
            if is_disk_or_iso?(device)
490
                disk[:device]    = device
491
                if device.controllerKey == controller_key &&
492
                   device.unitNumber == unit_number
493

    
494
                   key = device.key
495
                   break
496
                end
497
            end
498
        end
499

    
500
        return key
501
    end
502

    
503
    def get_vcenter_disks
504

    
505
        disks = []
506
        ide_controlled  = []
507
        sata_controlled = []
508
        scsi_controlled = []
509

    
510
        @item["config.hardware.device"].each do |device|
511
            disk = {}
512

    
513
            if device.is_a? RbVmomi::VIM::VirtualIDEController
514
                ide_controlled.concat(device.device)
515
            end
516

    
517
            if device.is_a? RbVmomi::VIM::VirtualSATAController
518
                sata_controlled.concat(device.device)
519
            end
520

    
521
            if device.is_a? RbVmomi::VIM::VirtualSCSIController
522
                scsi_controlled.concat(device.device)
523
            end
524

    
525
            if is_disk_or_iso?(device)
526
                disk[:device]    = device
527
                disk[:datastore] = device.backing.datastore
528
                disk[:path]      = device.backing.fileName
529
                disk[:path_wo_ds]= disk[:path].sub(/^\[(.*?)\] /, "")
530
                disk[:type]      = is_disk?(device) ? "OS" : "CDROM"
531
                disk[:key]       = device.key
532
                disk[:prefix]    = "hd" if ide_controlled.include?(device.key)
533
                disk[:prefix]    = "sd" if scsi_controlled.include?(device.key)
534
                disk[:prefix]    = "sd" if sata_controlled.include?(device.key)
535
                disks << disk
536
            end
537
        end
538

    
539
        return disks
540
    end
541

    
542
    def get_vcenter_nics
543
        nics = []
544
        @item["config.hardware.device"].each do |device|
545
            nic = {}
546
            if is_nic?(device)
547
                nic[:net_name]  = device.backing.network.name
548
                nic[:net_ref]   = device.backing.network._ref
549
                nic[:pg_type]   = VCenterDriver::Network.get_network_type(device)
550
                nics << nic
551
            end
552
        end
553
        return nics
554
    end
555

    
556
    #  Checks if a RbVmomi::VIM::VirtualDevice is a disk or a cdrom
557
    def is_disk_or_cdrom?(device)
558
        is_disk  = !(device.class.ancestors.index(RbVmomi::VIM::VirtualDisk)).nil?
559
        is_cdrom = !(device.class.ancestors.index(RbVmomi::VIM::VirtualCdrom)).nil?
560
        is_disk || is_cdrom
561
    end
562

    
563
    #  Checks if a RbVmomi::VIM::VirtualDevice is a disk or an iso file
564
    def is_disk_or_iso?(device)
565
        is_disk  = !(device.class.ancestors.index(RbVmomi::VIM::VirtualDisk)).nil?
566
        is_iso = device.backing.is_a? RbVmomi::VIM::VirtualCdromIsoBackingInfo
567
        is_disk || is_iso
568
    end
569

    
570
    #  Checks if a RbVmomi::VIM::VirtualDevice is a disk
571
    def is_disk?(device)
572
        !(device.class.ancestors.index(RbVmomi::VIM::VirtualDisk)).nil?
573
    end
574

    
575
    #  Checks if a RbVmomi::VIM::VirtualDevice is a network interface
576
    def is_nic?(device)
577
        !device.class.ancestors.index(RbVmomi::VIM::VirtualEthernetCard).nil?
578
    end
579

    
580
    # @return RbVmomi::VIM::ResourcePool, first resource pool in cluster
581
    def get_rp
582
        self['runtime.host.parent.resourcePool']
583
    end
584

    
585
    def vm_to_one(vm_name)
586

    
587
        str = "NAME   = \"#{vm_name}\"\n"\
588
              "CPU    = \"#{@vm_info["config.hardware.numCPU"]}\"\n"\
589
              "vCPU   = \"#{@vm_info["config.hardware.numCPU"]}\"\n"\
590
              "MEMORY = \"#{@vm_info["config.hardware.memoryMB"]}\"\n"\
591
              "HYPERVISOR = \"vcenter\"\n"\
592
              "CONTEXT = [\n"\
593
              "    NETWORK = \"YES\",\n"\
594
              "    SSH_PUBLIC_KEY = \"$USER[SSH_PUBLIC_KEY]\"\n"\
595
              "]\n"\
596
              "VCENTER_INSTANCE_ID =\"#{@vm_info[:vc_uuid]}\"\n"\
597
              "VCENTER_CCR_REF =\"#{@vm_info[:cluster_ref]}\"\n"
598

    
599
        str << "IMPORT_VM_ID =\"#{self["_ref"]}\"\n"
600
        str << "IMPORT_STATE =\"#{@state}\"\n"
601

    
602
        vnc_port = nil
603
        keymap = nil
604

    
605
        @vm_info["config.extraConfig"].select do |xtra|
606
            if xtra[:key].downcase=="remotedisplay.vnc.port"
607
                vnc_port = xtra[:value]
608
            end
609

    
610
            if xtra[:key].downcase=="remotedisplay.vnc.keymap"
611
                keymap = xtra[:value]
612
            end
613
        end
614

    
615
        if !@vm_info["config.extraConfig"].empty?
616
            str << "GRAPHICS = [\n"\
617
                   "  TYPE     =\"vnc\",\n"
618
            str << "  PORT     =\"#{vnc_port}\",\n" if vnc_port
619
            str << "  KEYMAP   =\"#{keymap}\",\n" if keymap
620
            str << "  LISTEN   =\"0.0.0.0\"\n"
621
            str << "]\n"
622
        end
623

    
624
        if !@vm_info["config.annotation"] || @vm_info["config.annotation"].empty?
625
            str << "DESCRIPTION = \"vCenter Template imported by OpenNebula" \
626
                " from Cluster #{@vm_info["cluster_name"]}\"\n"
627
        else
628
            notes = @vm_info["config.annotation"].gsub("\\", "\\\\").gsub("\"", "\\\"")
629
            str << "DESCRIPTION = \"#{notes}\"\n"
630
        end
631

    
632
        case @vm_info["guest.guestFullName"]
633
            when /CentOS/i
634
                str << "LOGO=images/logos/centos.png\n"
635
            when /Debian/i
636
                str << "LOGO=images/logos/debian.png\n"
637
            when /Red Hat/i
638
                str << "LOGO=images/logos/redhat.png\n"
639
            when /Ubuntu/i
640
                str << "LOGO=images/logos/ubuntu.png\n"
641
            when /Windows XP/i
642
                str << "LOGO=images/logos/windowsxp.png\n"
643
            when /Windows/i
644
                str << "LOGO=images/logos/windows8.png\n"
645
            when /Linux/i
646
                str << "LOGO=images/logos/linux.png\n"
647
        end
648

    
649
        return str
650
    end
651

    
652
    def self.template_to_one(template, vc_uuid, ccr_ref, ccr_name, import_name, host_id)
653

    
654
        num_cpu, memory, annotation, guest_fullname = template.item.collect("config.hardware.numCPU","config.hardware.memoryMB","config.annotation","guest.guestFullName")
655

    
656
        str = "NAME   = \"#{import_name}\"\n"\
657
              "CPU    = \"#{num_cpu}\"\n"\
658
              "vCPU   = \"#{num_cpu}\"\n"\
659
              "MEMORY = \"#{memory}\"\n"\
660
              "HYPERVISOR = \"vcenter\"\n"\
661
              "CONTEXT = [\n"\
662
              "    NETWORK = \"YES\",\n"\
663
              "    SSH_PUBLIC_KEY = \"$USER[SSH_PUBLIC_KEY]\"\n"\
664
              "]\n"\
665
              "VCENTER_INSTANCE_ID =\"#{vc_uuid}\"\n"
666

    
667
        str << "VCENTER_TEMPLATE_REF =\"#{template["_ref"]}\"\n"
668
        str << "VCENTER_CCR_REF =\"#{ccr_ref}\"\n"
669

    
670
        str << "GRAPHICS = [\n"\
671
               "  TYPE     =\"vnc\",\n"
672
        str << "  LISTEN   =\"0.0.0.0\"\n"
673
        str << "]\n"
674

    
675
        if annotation.nil? || annotation.empty?
676
            str << "DESCRIPTION = \"vCenter Template imported by OpenNebula" \
677
                " from Cluster #{ccr_name}\"\n"
678
        else
679
            notes = annotation.gsub("\\", "\\\\").gsub("\"", "\\\"")
680
            str << "DESCRIPTION = \"#{notes}\"\n"
681
        end
682

    
683
        case guest_fullname
684
            when /CentOS/i
685
                str << "LOGO=images/logos/centos.png\n"
686
            when /Debian/i
687
                str << "LOGO=images/logos/debian.png\n"
688
            when /Red Hat/i
689
                str << "LOGO=images/logos/redhat.png\n"
690
            when /Ubuntu/i
691
                str << "LOGO=images/logos/ubuntu.png\n"
692
            when /Windows XP/i
693
                str << "LOGO=images/logos/windowsxp.png\n"
694
            when /Windows/i
695
                str << "LOGO=images/logos/windows8.png\n"
696
            when /Linux/i
697
                str << "LOGO=images/logos/linux.png\n"
698
        end
699

    
700
        return str
701
    end
702

    
703
    def self.get_xml_template(template, vcenter_uuid, vi_client, vcenter_instance_name=nil, dc_name=nil, rp_cache={})
704

    
705
        begin
706
            template_ref      = template['_ref']
707
            template_name     = template["name"]
708
            template_ccr      = template['runtime.host.parent']
709
            template_ccr_ref  = template_ccr._ref
710
            template_ccr_name = template_ccr.name
711

    
712
            # Set vcenter instance name
713
            vcenter_instance_name = vi_client.vim.host if !vcenter_instance_name
714

    
715
            # Get datacenter info
716
            if !dc_name
717
                dc = get_dc
718
                dc_name = dc.item.name
719
            end
720

    
721
            #Get resource pools and generate a list
722
            if !rp_cache[template_ccr_name]
723
                tmp_cluster = VCenterDriver::ClusterComputeResource.new_from_ref(template_ccr_ref, vi_client)
724
                rp_list = tmp_cluster.get_resource_pool_list
725
                rp = ""
726
                if !rp_list.empty?
727
                    rp_name_list = []
728
                    rp_list.each do |rp_hash|
729
                        rp_name_list << rp_hash[:name]
730
                    end
731
                    rp =  "O|list|Which resource pool you want this VM to run in? "
732
                    rp << "|#{rp_name_list.join(",")}" #List of RP
733
                    rp << "|#{rp_name_list.first}" #Default RP
734
                end
735
                rp_cache[template_ccr_name] = {}
736
                rp_cache[template_ccr_name][:rp] = rp
737
                rp_cache[template_ccr_name][:rp_list] = rp_list
738
            end
739
            rp      = rp_cache[template_ccr_name][:rp]
740
            rp_list = rp_cache[template_ccr_name][:rp_list]
741

    
742

    
743
            # Determine the location path for the template
744
            vcenter_template = VCenterDriver::VirtualMachine.new_from_ref(template_ref, vi_client)
745
            item = vcenter_template.item
746
            folders = []
747
            while !item.instance_of? RbVmomi::VIM::Datacenter
748
                item = item.parent
749
                if !item.instance_of? RbVmomi::VIM::Datacenter
750
                    folders << item.name if item.name != "vm"
751
                end
752
                raise "Could not find the templates parent location" if item.nil?
753
            end
754
            location = folders.reverse.join("/")
755
            location = "/" if location.empty?
756

    
757
            # Generate a crypto hash for the template name and take the first 12 chars
758
            sha256            = Digest::SHA256.new
759
            full_name         = "#{template_name} - #{template_ccr_name} [#{vcenter_instance_name} - #{dc_name}]_#{location}"
760
            template_hash     = sha256.hexdigest(full_name)[0..11]
761
            template_name     = template_name.tr("\u007F", "")
762
            template_ccr_name = template_ccr_name.tr("\u007F", "")
763
            import_name       = "#{template_name} - #{template_ccr_name} #{template_hash}"
764

    
765
            # Prepare the Hash that will be used by importers to display
766
            # the object being imported
767
            one_tmp = {}
768
            one_tmp[:name]                  = import_name
769
            one_tmp[:template_name]         = template_name
770
            one_tmp[:sunstone_template_name]= "#{template_name} [ Cluster: #{template_ccr_name} - Template location: #{location} ]"
771
            one_tmp[:template_hash]         = template_hash
772
            one_tmp[:template_location]     = location
773
            one_tmp[:vcenter_ccr_ref]       = template_ccr_ref
774
            one_tmp[:vcenter_ref]           = template_ref
775
            one_tmp[:vcenter_instance_uuid] = vcenter_uuid
776
            one_tmp[:cluster_name]          = template_ccr_name
777
            one_tmp[:rp]                    = rp
778
            one_tmp[:rp_list]               = rp_list
779
            one_tmp[:template]              = template
780
            one_tmp[:import_disks_and_nics] = true # By default we import disks and nics
781

    
782
            # Get the host ID of the OpenNebula host which represents the vCenter Cluster
783
            host_id = nil
784
            one_host = VCenterDriver::VIHelper.find_by_ref(OpenNebula::HostPool,
785
                                                           "TEMPLATE/VCENTER_CCR_REF",
786
                                                           template_ccr_ref,
787
                                                           vcenter_uuid)
788
            host_id    = one_host["ID"]
789
            cluster_id = one_host["CLUSTER_ID"]
790
            raise "Could not find the host's ID associated to template being imported" if !host_id
791

    
792
            # Get the OpenNebula's template hash
793
            one_tmp[:one] = template_to_one(template, vcenter_uuid, template_ccr_ref, template_ccr_name, import_name, host_id)
794
            return one_tmp
795
        rescue
796
            return nil
797
        end
798
    end
799

    
800
    # TODO check with uuid
801
    def self.new_from_ref(ref, vi_client)
802
        self.new(RbVmomi::VIM::VirtualMachine.new(vi_client.vim, ref), vi_client)
803
    end
804

    
805
end
806

    
807
class VirtualMachine < Template
808
    VM_PREFIX_DEFAULT = "one-$i-"
809

    
810
    POLL_ATTRIBUTE    = OpenNebula::VirtualMachine::Driver::POLL_ATTRIBUTE
811
    VM_STATE          = OpenNebula::VirtualMachine::Driver::VM_STATE
812

    
813
    VM_SHUTDOWN_TIMEOUT = 600 #10 minutes til poweroff hard
814

    
815
    attr_accessor :item
816

    
817
    attr_accessor :vm_info
818

    
819
    include Memoize
820

    
821
    def initialize(item=nil, vi_client=nil)
822
        @item = item
823
        @vi_client = vi_client
824
        @locking = true
825
        @vm_info = nil
826
    end
827

    
828
    ############################################################################
829
    ############################################################################
830

    
831
    # Attributes that must be defined when the VM does not exist in vCenter
832
    attr_accessor :vi_client
833

    
834
    # these have their own getter (if they aren't set, we can set them
835
    # dynamically)
836
    attr_writer :one_item
837
    attr_writer :host
838
    attr_writer :target_ds_ref
839

    
840
    ############################################################################
841
    ############################################################################
842

    
843
    # The OpenNebula VM
844
    # @return OpenNebula::VirtualMachine or XMLElement
845
    def one_item
846
        if !@one_item
847
            vm_id = get_vm_id
848

    
849
            raise "Unable to find vm_id." if vm_id.nil?
850

    
851
            @one_item = VIHelper.one_item(OpenNebula::VirtualMachine, vm_id)
852
        end
853

    
854
        @one_item
855
    end
856

    
857
    # The OpenNebula host
858
    # @return OpenNebula::Host or XMLElement
859
    def host
860
        if @host.nil?
861
            if one_item.nil?
862
                raise "'one_item' must be previously set to be able to " <<
863
                      "access the OpenNebula host."
864
            end
865

    
866
            host_id = one_item["HISTORY_RECORDS/HISTORY[last()]/HID"]
867
            raise "No valid host_id found." if host_id.nil?
868

    
869
            @host = VIHelper.one_item(OpenNebula::Host, host_id)
870
        end
871

    
872
        @host
873
    end
874

    
875
    # Target Datastore VMware reference getter
876
    # @return
877
    def target_ds_ref
878
        if @target_ds_ref.nil?
879
            if one_item.nil?
880
                raise "'one_item' must be previously set to be able to " <<
881
                      "access the target Datastore."
882
            end
883

    
884
            target_ds_id = one_item["HISTORY_RECORDS/HISTORY[last()]/DS_ID"]
885
            raise "No valid target_ds_id found." if target_ds_id.nil?
886

    
887
            target_ds = VCenterDriver::VIHelper.one_item(OpenNebula::Datastore,
888
                                                         target_ds_id)
889

    
890
            @target_ds_ref = target_ds['TEMPLATE/VCENTER_DS_REF']
891
        end
892

    
893
        @target_ds_ref
894
    end
895

    
896
    # Cached cluster
897
    # @return ClusterComputeResource
898
    def cluster
899
        if @cluster.nil?
900
            ccr_ref = host['TEMPLATE/VCENTER_CCR_REF']
901
            @cluster = ClusterComputeResource.new_from_ref(ccr_ref, vi_client)
902
        end
903

    
904
        @cluster
905
    end
906

    
907
    ############################################################################
908
    ############################################################################
909

    
910
    # @return Boolean whether the VM exists in vCenter
911
    def is_new?
912
        !get_vm_id
913
    end
914

    
915
    # @return String the vm_id stored in vCenter
916
    def get_vm_id
917
        vm_ref = self['_ref']
918
        return nil if !vm_ref
919

    
920
        vc_uuid = get_vcenter_instance_uuid
921

    
922
        one_vm = VCenterDriver::VIHelper.find_by_ref(OpenNebula::VirtualMachinePool,
923
                                                     "DEPLOY_ID",
924
                                                     vm_ref,
925
                                                     vc_uuid)
926
        return nil if !one_vm
927

    
928
        return one_vm["ID"]
929
    end
930

    
931
    def get_vcenter_instance_uuid
932
        @vi_client.vim.serviceContent.about.instanceUuid
933
    end
934

    
935
    def get_unmanaged_keys
936
        unmanaged_keys = {}
937
        @item.config.extraConfig.each do |val|
938
             if val[:key].include?("opennebula.disk")
939
                 unmanaged_keys[val[:key]] = val[:value]
940
             end
941
        end
942
        return unmanaged_keys
943
    end
944

    
945
    ############################################################################
946
    # Getters
947
    ############################################################################
948

    
949
    # @return RbVmomi::VIM::ResourcePool
950
    def get_rp
951

    
952
        req_rp = one_item['VCENTER_RESOURCE_POOL'] ||
953
                 one_item['USER_TEMPLATE/VCENTER_RESOURCE_POOL']
954

    
955
        #Get ref for req_rp
956
        rp_list    = cluster.get_resource_pool_list
957
        req_rp_ref = rp_list.select { |rp| rp[:name] == req_rp }.first[:ref] rescue nil
958

    
959
        if vi_client.rp_confined?
960
            if req_rp_ref && req_rp_ref != vi_client.rp._ref
961
                raise "Available resource pool [#{vi_client.rp.name}] in host"\
962
                      " does not match requested resource pool"\
963
                      " [#{req_rp}]"
964
            end
965

    
966
            return vi_client.rp
967
        else
968
            if req_rp_ref
969
                rps = cluster.resource_pools.select{|r| r._ref == req_rp_ref }
970

    
971
                if rps.empty?
972
                    raise "No matching resource pool found (#{req_rp})."
973
                else
974
                    return rps.first
975
                end
976
            else
977
                return cluster['resourcePool']
978
            end
979
        end
980
    end
981

    
982
    # @return RbVmomi::VIM::Datastore or nil
983
    def get_ds
984
        ##req_ds = one_item['USER_TEMPLATE/VCENTER_DS_REF']
985
        current_ds_id  = one_item["HISTORY_RECORDS/HISTORY[last()]/DS_ID"]
986
        current_ds     = VCenterDriver::VIHelper.one_item(OpenNebula::Datastore, current_ds_id)
987
        current_ds_ref = current_ds['TEMPLATE/VCENTER_DS_REF']
988

    
989
        if current_ds_ref
990
            dc = cluster.get_dc
991

    
992
            ds_folder = dc.datastore_folder
993
            ds = ds_folder.get(current_ds_ref)
994
            ds_item = ds.item rescue nil
995

    
996
            return ds_item
997
        else
998
            return nil
999
        end
1000
    end
1001

    
1002
    # StorageResouceManager reference
1003
    def get_sm
1004
        self['_connection.serviceContent.storageResourceManager']
1005
    end
1006

    
1007
    # @return Customization or nil
1008
    def get_customization
1009
        xpath = "USER_TEMPLATE/VCENTER_CUSTOMIZATION_SPEC"
1010
        customization_spec = one_item[xpath]
1011

    
1012
        if customization_spec.nil?
1013
            return nil
1014
        end
1015

    
1016
        begin
1017
            custom_spec = vi_client.vim
1018
                            .serviceContent
1019
                            .customizationSpecManager
1020
                            .GetCustomizationSpec(:name => customization.text)
1021

    
1022
            if custom_spec && (spec = custom_spec.spec)
1023
                return spec
1024
            else
1025
                raise "Error getting customization spec"
1026
            end
1027
        rescue
1028
            raise "Customization spec '#{customization.text}' not found"
1029
        end
1030
    end
1031

    
1032
    # @return VCenterDriver::Datastore datastore where the disk will live under
1033
    def get_effective_ds(disk)
1034
        if disk["PERSISTENT"] == "YES"
1035
            ds_ref = disk["VCENTER_DS_REF"]
1036
        else
1037
            ds_ref = target_ds_ref
1038

    
1039
            if ds_ref.nil?
1040
                raise "target_ds_ref must be defined on this object."
1041
            end
1042
        end
1043

    
1044
        VCenterDriver::Storage.new_from_ref(ds_ref, vi_client)
1045
    end
1046

    
1047
    # @return String vcenter name
1048
    def get_vcenter_name
1049
        vm_prefix = host['TEMPLATE/VM_PREFIX']
1050
        vm_prefix = VM_PREFIX_DEFAULT if vm_prefix.nil? || vm_prefix.empty?
1051
        vm_prefix.gsub!("$i", one_item['ID'])
1052

    
1053
        vm_prefix + one_item['NAME']
1054
    end
1055

    
1056
    ############################################################################
1057
    # Create and reconfigure VM related methods
1058
    ############################################################################
1059

    
1060
    # This function creates a new VM from the @one_item XML and returns the
1061
    # VMware ref
1062
    # @param one_item OpenNebula::VirtualMachine
1063
    # @param vi_client VCenterDriver::VIClient
1064
    # @return String vmware ref
1065
    def clone_vm(one_item, vi_client)
1066
        @one_item = one_item
1067
        @vi_client = vi_client
1068

    
1069
        vcenter_name = get_vcenter_name
1070

    
1071
        vc_template_ref = one_item['USER_TEMPLATE/VCENTER_TEMPLATE_REF']
1072
        vc_template = RbVmomi::VIM::VirtualMachine(vi_client.vim, vc_template_ref)
1073

    
1074
        ds = get_ds
1075

    
1076
        # Default disk move type (Full Clone)
1077
        disk_move_type = :moveAllDiskBackingsAndDisallowSharing
1078

    
1079
        if ds.instance_of? RbVmomi::VIM::Datastore
1080
            use_linked_clones = one_item['USER_TEMPLATE/VCENTER_LINKED_CLONES']
1081
            if use_linked_clones && use_linked_clones.downcase == "yes"
1082
                # Check if all disks in template has delta disks
1083
                disks = vc_template.config
1084
                                .hardware.device.grep(RbVmomi::VIM::VirtualDisk)
1085

    
1086
                disks_no_delta = disks.select { |d| d.backing.parent == nil }
1087

    
1088
                # Can use linked clones if all disks have delta disks
1089
                if (disks_no_delta.size == 0)
1090
                    disk_move_type = :moveChildMostDiskBacking
1091
                end
1092
            end
1093
        end
1094

    
1095
        spec_hash = spec_hash_clone(disk_move_type)
1096

    
1097
        clone_spec = RbVmomi::VIM.VirtualMachineCloneSpec(spec_hash)
1098

    
1099
        # Specify vm folder in vSpere's VM and Templates view F#4823
1100
        vcenter_vm_folder = nil
1101
        vcenter_vm_folder = one_item["USER_TEMPLATE/VCENTER_VM_FOLDER"]
1102
        vcenter_vm_folder_object = nil
1103
        dc = cluster.get_dc
1104
        if !!vcenter_vm_folder && !vcenter_vm_folder.empty?
1105
            vcenter_vm_folder_object = dc.item.find_folder(vcenter_vm_folder)
1106
        end
1107
        vcenter_vm_folder_object = vc_template.parent if vcenter_vm_folder_object.nil?
1108

    
1109
        if ds.instance_of? RbVmomi::VIM::StoragePod
1110
            # VM is cloned using Storage Resource Manager for StoragePods
1111
            begin
1112
                vm = storagepod_clonevm_task(vc_template, vcenter_name,
1113
                                             clone_spec, ds, vcenter_vm_folder_object, dc)
1114
            rescue Exception => e
1115
                raise "Cannot clone VM Template to StoragePod: #{e.message}"
1116
            end
1117
        else
1118
            vm = nil
1119
            begin
1120
                vm = vc_template.CloneVM_Task(
1121
                    :folder => vcenter_vm_folder_object,
1122
                    :name   => vcenter_name,
1123
                    :spec   => clone_spec).wait_for_completion
1124
            rescue Exception => e
1125
                if !e.message.start_with?('DuplicateName')
1126
                    raise "Cannot clone VM Template: #{e.message}\n#{e.backtrace}"
1127
                end
1128

    
1129
                vm_folder = dc.vm_folder
1130
                vm_folder.fetch!
1131
                vm = vm_folder.items
1132
                        .select{|k,v| v.item.name == vcenter_name}
1133
                        .values.first.item rescue nil
1134

    
1135
                if vm
1136
                    # Detach all persistent disks to avoid accidental destruction
1137
                    detach_persistent_disks(vm)
1138

    
1139
                    vm.Destroy_Task.wait_for_completion
1140
                    vm = vc_template.CloneVM_Task(
1141
                        :folder => vcenter_vm_folder_object,
1142
                        :name   => vcenter_name,
1143
                        :spec   => clone_spec).wait_for_completion
1144
                else
1145
                    raise "Cannot clone VM Template"
1146
                end
1147
            end
1148
        end
1149
        # @item is populated
1150
        @item = vm
1151

    
1152
        return self['_ref']
1153
    end
1154

    
1155

    
1156
    def storagepod_clonevm_task(vc_template, vcenter_name, clone_spec, storpod, vcenter_vm_folder_object, dc)
1157

    
1158
        storage_manager = vc_template
1159
                            ._connection.serviceContent.storageResourceManager
1160

    
1161
        storage_spec = RbVmomi::VIM.StoragePlacementSpec(
1162
            type: 'clone',
1163
            cloneName: vcenter_name,
1164
            folder: vcenter_vm_folder_object,
1165
            podSelectionSpec: RbVmomi::VIM.StorageDrsPodSelectionSpec(storagePod: storpod),
1166
            vm: vc_template,
1167
            cloneSpec: clone_spec
1168
        )
1169

    
1170
        # Query a storage placement recommendation
1171
        result = storage_manager
1172
                    .RecommendDatastores(storageSpec: storage_spec) rescue nil
1173

    
1174
        raise "Could not get placement specification for StoragePod" if result.nil?
1175

    
1176
        if !result.respond_to?(:recommendations) || result.recommendations.size == 0
1177
            raise "Could not get placement specification for StoragePod"
1178
        end
1179

    
1180
        # Get recommendation key to be applied
1181
        key = result.recommendations.first.key ||= ''
1182
        raise "Missing Datastore recommendation for StoragePod" if key.empty?
1183

    
1184
        begin
1185
            apply_sr = storage_manager
1186
                            .ApplyStorageDrsRecommendation_Task(key: [key])
1187
                            .wait_for_completion
1188
            return apply_sr.vm
1189
        rescue Exception => e
1190
            if !e.message.start_with?('DuplicateName')
1191
                raise "Cannot clone VM Template: #{e.message}\n#{e.backtrace}"
1192
            end
1193

    
1194
            # The VM already exists, try to find the vm
1195
            vm_folder = dc.vm_folder
1196
            vm_folder.fetch!
1197
            vm = vm_folder.items
1198
                    .select{|k,v| v.item.name == vcenter_name}
1199
                    .values.first.item rescue nil
1200

    
1201
            if vm
1202

    
1203
                begin
1204
                    # Detach all persistent disks to avoid accidental destruction
1205
                    detach_persistent_disks(vm)
1206

    
1207
                    # Destroy the VM with any disks still attached to it
1208
                    vm.Destroy_Task.wait_for_completion
1209

    
1210
                    # Query a storage placement recommendation
1211
                    result = storage_manager.RecommendDatastores(storageSpec: storage_spec) rescue nil
1212

    
1213
                    raise "Could not get placement specification for StoragePod" if result.nil?
1214

    
1215
                    if !result.respond_to?(:recommendations) || result.recommendations.size == 0
1216
                        raise "Could not get placement specification for StoragePod"
1217
                    end
1218

    
1219
                    # Get recommendation key to be applied
1220
                    key = result.recommendations.first.key ||= ''
1221
                    raise "Missing Datastore recommendation for StoragePod" if key.empty?
1222

    
1223
                    apply_sr = storage_manager
1224
                            .ApplyStorageDrsRecommendation_Task(key: [key])
1225
                            .wait_for_completion
1226
                    return apply_sr.vm
1227
                rescue Exception => e
1228
                   raise "Failure applying recommendation while cloning VM: #{e.message}"
1229
                end
1230
            end
1231
        end
1232
    end
1233

    
1234
    # @return clone parameters spec hash
1235
    def spec_hash_clone(disk_move_type)
1236
        # Relocate spec
1237
        relocate_spec_params = {}
1238

    
1239
        relocate_spec_params[:pool] = get_rp
1240
        relocate_spec_params[:diskMoveType] = disk_move_type
1241

    
1242
        ds = get_ds
1243

    
1244
        relocate_spec_params[:datastore] = ds if ds.instance_of? Datastore
1245

    
1246
        relocate_spec = RbVmomi::VIM.VirtualMachineRelocateSpec(
1247
                                                         relocate_spec_params)
1248

    
1249
        # Running flag - prevents spurious poweroff states in the VM
1250
        running_flag = [{ :key => "opennebula.vm.running", :value => "no"}]
1251

    
1252
        running_flag_spec = RbVmomi::VIM.VirtualMachineConfigSpec(
1253
            { :extraConfig => running_flag }
1254
        )
1255

    
1256
        clone_parameters = {
1257
            :location => relocate_spec,
1258
            :powerOn  => false,
1259
            :template => false,
1260
            :config   => running_flag_spec
1261
        }
1262

    
1263
        cs = get_customization
1264
        clone_parameters[:customization] = cs if cs
1265

    
1266
        clone_parameters
1267
    end
1268

    
1269
    def reference_unmanaged_devices(template_ref)
1270

    
1271
        extraconfig   = []
1272
        device_change = []
1273

    
1274
        # Get unmanaged disks in OpenNebula's VM template
1275
        xpath = "TEMPLATE/DISK[OPENNEBULA_MANAGED=\"NO\" or OPENNEBULA_MANAGED=\"no\"]"
1276
        unmanaged_disks = one_item.retrieve_xmlelements(xpath)
1277

    
1278
        if !unmanaged_disks.empty?
1279

    
1280
            # Get vcenter VM disks to know real path of cloned disk
1281
            vcenter_disks = get_vcenter_disks
1282

    
1283
            # Create an array with the paths of the disks in vcenter template
1284
            template = VCenterDriver::Template.new_from_ref(template_ref, vi_client)
1285
            template_disks = template.get_vcenter_disks
1286
            template_disks_vector = []
1287
            template_disks.each do |d|
1288
                template_disks_vector << d[:path_wo_ds]
1289
            end
1290

    
1291
            # Try to find index of disks in template disks
1292
            unmanaged_disks.each do |unmanaged_disk|
1293
                index = template_disks_vector.index(unmanaged_disk["SOURCE"])
1294
                if index
1295
                    reference = {}
1296
                    reference[:key]   = "opennebula.disk.#{unmanaged_disk["DISK_ID"]}"
1297
                    reference[:value] = "#{vcenter_disks[index][:key]}"
1298
                    extraconfig << reference
1299
                end
1300
            end
1301
        end
1302

    
1303
        # Add info for existing nics in template in vm xml
1304
        xpath = "TEMPLATE/NIC[OPENNEBULA_MANAGED=\"NO\" or OPENNEBULA_MANAGED=\"no\"]"
1305
        unmanaged_nics = one_item.retrieve_xmlelements(xpath)
1306

    
1307
        if !unmanaged_nics.empty?
1308
            index = 0
1309
            self["config.hardware.device"].each_with_index do |device|
1310
                if is_nic?(device)
1311
                    # Edit capacity setting new size in KB
1312
                    device.macAddress = unmanaged_nics[index]["MAC"]
1313
                    device_change << { :device => device, :operation => :edit }
1314
                    index += 1
1315
                end
1316
            end
1317
        end
1318

    
1319
        # Save in extraconfig the key for unmanaged disks
1320
        if !extraconfig.empty? || !device_change.empty?
1321
            spec = {}
1322
            spec[:extraConfig]  = extraconfig if !extraconfig.empty?
1323
            spec[:deviceChange] = device_change if !device_change.empty?
1324
            @item.ReconfigVM_Task(:spec => spec).wait_for_completion
1325
        end
1326
    end
1327

    
1328
    def resize_unmanaged_disks
1329
        resize_hash = {}
1330
        disks = []
1331

    
1332
        unmanaged_keys = get_unmanaged_keys
1333
        vc_disks = get_vcenter_disks
1334

    
1335
        # Look for unmanaged disks with original size changed
1336
        xpath = "TEMPLATE/DISK[(OPENNEBULA_MANAGED=\"NO\" or OPENNEBULA_MANAGED=\"no\") and boolean(ORIGINAL_SIZE) and ORIGINAL_SIZE != SIZE]"
1337
        unmanaged_resized_disks = one_item.retrieve_xmlelements(xpath)
1338

    
1339
        return if unmanaged_resized_disks.empty?
1340

    
1341
        unmanaged_resized_disks.each do |disk|
1342
            vc_disks.each do |vcenter_disk|
1343
                if unmanaged_keys.key?("opennebula.disk.#{disk["DISK_ID"]}")
1344
                    device_key = unmanaged_keys["opennebula.disk.#{disk["DISK_ID"]}"].to_i
1345

    
1346
                    if device_key == vcenter_disk[:key].to_i
1347

    
1348
                        break if disk["SIZE"].to_i <= disk["ORIGINAL_SIZE"].to_i
1349

    
1350
                        # Edit capacity setting new size in KB
1351
                        d = vcenter_disk[:device]
1352
                        d.capacityInKB = disk["SIZE"].to_i * 1024
1353
                        disks <<   { :device => d, :operation => :edit }
1354
                        break
1355
                    end
1356
                end
1357
            end
1358
        end
1359

    
1360
        if !disks.empty?
1361
            resize_hash[:deviceChange] = disks
1362
            @item.ReconfigVM_Task(:spec => resize_hash).wait_for_completion
1363
        end
1364
    end
1365

    
1366
    def create_storagedrs_disks(device_change_spod, device_change_spod_ids)
1367

    
1368
        sm = get_sm
1369
        disk_locator = []
1370
        extra_config = []
1371

    
1372
        device_change_spod.each do |device_spec|
1373
            disk_locator << RbVmomi::VIM.PodDiskLocator(diskId: device_spec[:device].key)
1374
        end
1375

    
1376
        spec = {}
1377
        spec[:deviceChange] = device_change_spod
1378

    
1379
        # Disk locator is required for AddDisk
1380
        vmpod_hash = {}
1381
        vmpod_hash[:storagePod] = get_ds
1382
        vmpod_hash[:disk] = disk_locator
1383
        vmpod_config = RbVmomi::VIM::VmPodConfigForPlacement(vmpod_hash)
1384

    
1385
        # The storage pod selection requires initialize
1386
        spod_hash = {}
1387
        spod_hash[:initialVmConfig] = [ vmpod_config ]
1388
        spod_select = RbVmomi::VIM::StorageDrsPodSelectionSpec(spod_hash)
1389
        storage_spec = RbVmomi::VIM.StoragePlacementSpec(
1390
            type: :reconfigure,
1391
            podSelectionSpec: spod_select,
1392
            vm: self['_ref'],
1393
            configSpec: spec
1394
        )
1395

    
1396
        # Query a storage placement recommendation
1397
        result = sm.RecommendDatastores(storageSpec: storage_spec) rescue nil
1398

    
1399
        raise "Could not get placement specification for StoragePod" if result.nil?
1400

    
1401
        if !result.respond_to?(:recommendations) || result.recommendations.size == 0
1402
            raise "Could not get placement specification for StoragePod"
1403
        end
1404

    
1405
        # Get recommendation key to be applied
1406
        key = result.recommendations.first.key ||= ''
1407
        raise "Missing Datastore recommendation for StoragePod" if key.empty?
1408

    
1409
        # Apply recommendation
1410
        sm.ApplyStorageDrsRecommendation_Task(key: [key]).wait_for_completion
1411

    
1412
        # Set references in opennebula.disk elements
1413
        device_change_spod.each do |device_spec|
1414
            unit_number    = device_spec[:device].unitNumber
1415
            controller_key = device_spec[:device].controllerKey
1416
            key            = get_vcenter_disk_key(unit_number, controller_key)
1417
            disk_id        = device_change_spod_ids["#{controller_key}-#{unit_number}"]
1418
            reference      = {}
1419
            reference[:key]   = "opennebula.disk.#{disk_id}"
1420
            reference[:value] = key.to_s
1421
            extra_config << reference
1422
        end
1423

    
1424
        extra_config
1425
    end
1426

    
1427

    
1428
    def reconfigure
1429
        extraconfig   = []
1430
        device_change = []
1431

    
1432
        # Unmanaged keys
1433
        unmanaged_keys = get_unmanaged_keys
1434

    
1435
        # Get disk devices in vm
1436
        vc_disks = get_vcenter_disks
1437

    
1438
        # Get an array with disk paths in OpenNebula's vm template
1439
        disks_in_onevm_vector = disks_in_onevm(unmanaged_keys, vc_disks)
1440

    
1441
        # As the original template may have been modified in OpenNebula
1442
        # but not in vcenter, we must detach disks that are in vcenter
1443
        # but not in OpenNebula's vm template
1444
        if is_new?
1445
            device_change, extra_config = device_detach_disks(disks_in_onevm_vector, unmanaged_keys, vc_disks)
1446
            if !device_change.empty?
1447
                spec_hash = {}
1448
                spec_hash[:deviceChange] = device_change if !device_change.empty?
1449
                spec_hash[:extraConfig] = extra_config  if !extra_config.empty?
1450

    
1451
                # Reconfigure for disks detached from original template
1452
                spec = RbVmomi::VIM.VirtualMachineConfigSpec(spec_hash)
1453
                @item.ReconfigVM_Task(:spec => spec).wait_for_completion
1454

    
1455
                # Get disk devices in vm again after reconfigure
1456
                vc_disks = get_vcenter_disks
1457
            end
1458
        end
1459

    
1460
        # Now reconfigure disks, nics and extraconfig for the VM
1461
        device_change = []
1462

    
1463
        # get token and context
1464
        extraconfig += extraconfig_context
1465

    
1466
        # vnc configuration (for config_array hash)
1467
        extraconfig += extraconfig_vnc
1468

    
1469
        # Set CPU, memory and extraconfig
1470
        num_cpus = one_item["TEMPLATE/VCPU"] || 1
1471

    
1472
        spec_hash = {
1473
            :numCPUs      => num_cpus.to_i,
1474
            :memoryMB     => one_item["TEMPLATE/MEMORY"],
1475
            :extraConfig  => extraconfig
1476
        }
1477

    
1478
        # device_change hash (nics)
1479
        device_change += device_change_nics
1480

    
1481
        # Now attach disks that are in OpenNebula's template but not in vcenter
1482
        # e.g those that has been attached in poweroff
1483
        device_change_ds, device_change_spod, device_change_spod_ids = device_attach_disks(disks_in_onevm_vector, vc_disks)
1484
        device_change += device_change_ds
1485

    
1486
        # Create volatile disks in StorageDRS if any
1487
        if !device_change_spod.empty?
1488
            spec_hash[:extraConfig] = create_storagedrs_disks(device_change_spod, device_change_spod_ids)
1489
        end
1490

    
1491
        # Common reconfigure task
1492
        spec_hash[:deviceChange] = device_change
1493
        spec = RbVmomi::VIM.VirtualMachineConfigSpec(spec_hash)
1494
        @item.ReconfigVM_Task(:spec => spec).wait_for_completion
1495
    end
1496

    
1497
    def extraconfig_context
1498
        context_text = "# Context variables generated by OpenNebula\n"
1499
        one_item.each('TEMPLATE/CONTEXT/*') do |context_element|
1500
            # next if !context_element.text
1501
            context_text += context_element.name + "='" +
1502
                            context_element.text.gsub("'", "\\'") + "'\n"
1503
        end
1504

    
1505
        # token
1506
        token = File.read(File.join(VAR_LOCATION,
1507
                        'vms',
1508
                        one_item['ID'],
1509
                        'token.txt')).chomp rescue nil
1510

    
1511
        context_text += "ONEGATE_TOKEN='#{token}'\n" if token
1512

    
1513
        # context_text
1514
        [
1515
            { :key => "guestinfo.opennebula.context",
1516
              :value => Base64.encode64(context_text) }
1517
        ]
1518
    end
1519

    
1520
    def extraconfig_vnc
1521
        if one_item["TEMPLATE/GRAPHICS"]
1522
            vnc_port   = one_item["TEMPLATE/GRAPHICS/PORT"]
1523
            vnc_listen = one_item["TEMPLATE/GRAPHICS/LISTEN"] || "0.0.0.0"
1524
            vnc_keymap = one_item["TEMPLATE/GRAPHICS/KEYMAP"]
1525

    
1526
            conf = [ {:key => "remotedisplay.vnc.enabled",:value => "TRUE"},
1527
                    {:key => "remotedisplay.vnc.port",   :value => vnc_port},
1528
                    {:key => "remotedisplay.vnc.ip",     :value => vnc_listen}]
1529

    
1530
            conf += [{:key => "remotedisplay.vnc.keymap",
1531
                            :value => vnc_keymap}] if vnc_keymap
1532

    
1533
            conf
1534
        else
1535
            conf = []
1536
        end
1537
    end
1538

    
1539
    def device_change_nics
1540
        # Final list of changes to be applied in vCenter
1541
        device_change = []
1542

    
1543
        # Hash of interfaces from the OpenNebula xml
1544
        nics_in_template = {}
1545
        xpath = "TEMPLATE/NIC"
1546
        one_item.each(xpath) { |nic|
1547
            nics_in_template[nic["MAC"]] = nic
1548
        }
1549

    
1550
        # Check nics in VM
1551
        self["config.hardware.device"].each do |dv|
1552
            if is_nic?(dv)
1553
                if nics_in_template.key?(dv.macAddress)
1554
                    # Remove nic that is already in the XML to avoid duplicate
1555
                    nics_in_template.delete(dv.macAddress)
1556
                else
1557
                    # B4897 - It was detached in poweroff, remove it from VM
1558
                    device_change << {
1559
                        :operation => :remove,
1560
                        :device    => dv
1561
                    }
1562
                end
1563
            end
1564
        end
1565

    
1566
        # Attach new nics (nics_in_template now contains only the interfaces
1567
        # not present in the VM in vCenter)
1568
        nics_in_template.each do |key, nic|
1569
            device_change << calculate_add_nic_spec(nic)
1570
        end
1571

    
1572
        return device_change
1573
    end
1574

    
1575
    # Regenerate context when devices are hot plugged (reconfigure)
1576
    def regenerate_context
1577
        spec_hash = { :extraConfig  => extraconfig_context }
1578
        spec = RbVmomi::VIM.VirtualMachineConfigSpec(spec_hash)
1579

    
1580
        begin
1581
            @item.ReconfigVM_Task(:spec => spec).wait_for_completion
1582
        rescue Exception => e
1583
            raise "Cannot create snapshot for VM: #{e.message}\n#{e.backtrace}"
1584
        end
1585
    end
1586

    
1587
    # Returns an array of actions to be included in :deviceChange
1588
    def calculate_add_nic_spec(nic)
1589

    
1590
        mac       = nic["MAC"]
1591
        pg_name   = nic["BRIDGE"]
1592
        model     = nic["VCENTER_NET_MODEL"] || VCenterDriver::VIHelper.get_default("VM/TEMPLATE/NIC/MODEL")
1593
        vnet_ref  = nic["VCENTER_NET_REF"]
1594
        backing   = nil
1595

    
1596
        limit_in  = nic["INBOUND_PEAK_BW"] || VCenterDriver::VIHelper.get_default("VM/TEMPLATE/NIC/INBOUND_PEAK_BW")
1597
        limit_out = nic["OUTBOUND_PEAK_BW"] || VCenterDriver::VIHelper.get_default("VM/TEMPLATE/NIC/OUTBOUND_PEAK_BW")
1598
        limit     = nil
1599

    
1600
        if limit_in && limit_out
1601
            limit=([limit_in.to_i, limit_out.to_i].min / 1024) * 8
1602
        end
1603

    
1604
        rsrv_in  = nic["INBOUND_AVG_BW"] || VCenterDriver::VIHelper.get_default("VM/TEMPLATE/NIC/INBOUND_AVG_BW")
1605
        rsrv_out = nic["OUTBOUND_AVG_BW"] || VCenterDriver::VIHelper.get_default("VM/TEMPLATE/NIC/OUTBOUND_AVG_BW")
1606
        rsrv     = nil
1607

    
1608
        if rsrv_in || rsrv_out
1609
            rsrv=([rsrv_in.to_i, rsrv_out.to_i].min / 1024) * 8
1610
        end
1611

    
1612
        network = self["runtime.host"].network.select do |n|
1613
            n._ref == vnet_ref || n.name == pg_name
1614
        end
1615

    
1616
        network = network.first
1617

    
1618
        card_num = 1 # start in one, we want the next avaliable id
1619

    
1620
        @item["config.hardware.device"].each do |dv|
1621
            card_num += 1 if is_nic?(dv)
1622
        end
1623

    
1624
        nic_card = case model
1625
                        when "virtuale1000", "e1000"
1626
                            RbVmomi::VIM::VirtualE1000
1627
                        when "virtuale1000e", "e1000e"
1628
                            RbVmomi::VIM::VirtualE1000e
1629
                        when "virtualpcnet32", "pcnet32"
1630
                            RbVmomi::VIM::VirtualPCNet32
1631
                        when "virtualsriovethernetcard", "sriovethernetcard"
1632
                            RbVmomi::VIM::VirtualSriovEthernetCard
1633
                        when "virtualvmxnetm", "vmxnetm"
1634
                            RbVmomi::VIM::VirtualVmxnetm
1635
                        when "virtualvmxnet2", "vmnet2"
1636
                            RbVmomi::VIM::VirtualVmxnet2
1637
                        when "virtualvmxnet3", "vmxnet3"
1638
                            RbVmomi::VIM::VirtualVmxnet3
1639
                        else # If none matches, use VirtualE1000
1640
                            RbVmomi::VIM::VirtualE1000
1641
                   end
1642

    
1643
        if network.class == RbVmomi::VIM::Network
1644
            backing = RbVmomi::VIM.VirtualEthernetCardNetworkBackingInfo(
1645
                        :deviceName => pg_name,
1646
                        :network    => network)
1647
        else
1648
            port    = RbVmomi::VIM::DistributedVirtualSwitchPortConnection(
1649
                        :switchUuid =>
1650
                                network.config.distributedVirtualSwitch.uuid,
1651
                        :portgroupKey => network.key)
1652
            backing =
1653
              RbVmomi::VIM.VirtualEthernetCardDistributedVirtualPortBackingInfo(
1654
                 :port => port)
1655
        end
1656

    
1657
        card_spec = {
1658
            :key => 0,
1659
            :deviceInfo => {
1660
                :label => "net" + card_num.to_s,
1661
                :summary => pg_name
1662
            },
1663
            :backing     => backing,
1664
            :addressType => mac ? 'manual' : 'generated',
1665
            :macAddress  => mac
1666
        }
1667

    
1668
        if (limit || rsrv) && (limit > 0)
1669
            ra_spec = {}
1670
            rsrv = limit if rsrv > limit
1671
            ra_spec[:limit] = limit if limit
1672
            ra_spec[:reservation] = rsrv if rsrv
1673
            ra_spec[:share] =  RbVmomi::VIM.SharesInfo({
1674
                    :level => RbVmomi::VIM.SharesLevel("normal"),
1675
                    :shares => 0
1676
                })
1677
            card_spec[:resourceAllocation] =
1678
               RbVmomi::VIM.VirtualEthernetCardResourceAllocation(ra_spec)
1679
        end
1680

    
1681
        {
1682
            :operation => :add,
1683
            :device    => nic_card.new(card_spec)
1684
        }
1685
    end
1686

    
1687
    # Add NIC to VM
1688
    def attach_nic
1689
        spec_hash = {}
1690
        nic = nil
1691

    
1692
        # Extract nic from driver action
1693
        nic = one_item.retrieve_xmlelements("TEMPLATE/NIC[ATTACH='YES']").first
1694

    
1695
        begin
1696
            # A new NIC requires a vcenter spec
1697
            attach_nic_array = []
1698
            attach_nic_array << calculate_add_nic_spec(nic)
1699
            spec_hash[:deviceChange] = attach_nic_array if !attach_nic_array.empty?
1700

    
1701
            # Reconfigure VM
1702
            spec = RbVmomi::VIM.VirtualMachineConfigSpec(spec_hash)
1703

    
1704
            @item.ReconfigVM_Task(:spec => spec).wait_for_completion
1705
        rescue Exception => e
1706
            raise "Cannot attach NIC to VM: #{e.message}\n#{e.backtrace}"
1707
        end
1708

    
1709
    end
1710

    
1711
    # Detach NIC from VM
1712
    def detach_nic
1713
        spec_hash = {}
1714
        nic = nil
1715

    
1716
        # Extract nic from driver action
1717
        nic = one_item.retrieve_xmlelements("TEMPLATE/NIC[ATTACH='YES']").first
1718
        mac = nic["MAC"]
1719

    
1720
        # Get VM nic element if it has a device with that mac
1721
        nic_device = @item["config.hardware.device"].find do |device|
1722
            is_nic?(device) && (device.macAddress ==  mac)
1723
        end rescue nil
1724

    
1725
        return if nic_device.nil? #Silently ignore if nic is not found
1726

    
1727
        # Remove NIC from VM in the ReconfigVM_Task
1728
        spec_hash[:deviceChange] = [
1729
                :operation => :remove,
1730
                :device => nic_device ]
1731

    
1732
        begin
1733
            @item.ReconfigVM_Task(:spec => spec_hash).wait_for_completion
1734
        rescue Exception => e
1735
            raise "Cannot detach NIC from VM: #{e.message}\n#{e.backtrace}"
1736
        end
1737
    end
1738

    
1739
    # Detach all nics useful when removing pg and sw so they're not in use
1740
    def detach_all_nics
1741
        spec_hash = {}
1742
        device_change = []
1743

    
1744
        @item["config.hardware.device"].each do |device|
1745
            if is_nic?(device)
1746
                device_change << {:operation => :remove, :device => device}
1747
            end
1748
        end
1749

    
1750
        # Remove NIC from VM in the ReconfigVM_Task
1751
        spec_hash[:deviceChange] = device_change
1752

    
1753
        begin
1754
            @item.ReconfigVM_Task(:spec => spec_hash).wait_for_completion
1755
        rescue Exception => e
1756
            raise "Cannot detach all NICs from VM: #{e.message}\n#{e.backtrace}"
1757
        end
1758
    end
1759

    
1760
    def get_device_filename_and_ds_from_key(key, vc_disks)
1761
        device = vc_disks.select{ |d| d[:key].to_i == key.to_i}.first rescue nil
1762
        return device
1763
    end
1764

    
1765
    def disks_in_onevm(unmanaged_keys, vc_disks)
1766
        onevm_disks_vector = []
1767

    
1768
        disks = one_item.retrieve_xmlelements("TEMPLATE/DISK")
1769
        disks.each do |disk|
1770
            if unmanaged_keys.key?("opennebula.disk.#{disk["DISK_ID"]}")
1771
                device_key = unmanaged_keys["opennebula.disk.#{disk["DISK_ID"]}"].to_i
1772
                disk_hash = get_device_filename_and_ds_from_key(device_key, vc_disks)
1773
                onevm_disks_vector << disk_hash[:path_wo_ds] if disk_hash
1774
                next
1775
            end
1776

    
1777
            img_name  = VCenterDriver::FileHelper.get_img_name(disk, one_item['ID'], self['name'])
1778
            onevm_disks_vector << "#{img_name}"
1779
        end
1780

    
1781
        return onevm_disks_vector
1782
    end
1783

    
1784
    def device_attach_disks(onevm_disks_vector, vc_disks)
1785

    
1786
        disks = one_item.retrieve_xmlelements("TEMPLATE/DISK")
1787

    
1788
        vc_disks.each do |d|
1789
            index = onevm_disks_vector.index(d[:path_wo_ds])
1790
            if index
1791
                disks.delete_at(index)
1792
                onevm_disks_vector.delete_at(index)
1793
            end
1794
        end
1795

    
1796
        return [],[],{} if disks.empty?
1797

    
1798
        attach_disk_array = []
1799
        attach_spod_array = []
1800
        attach_spod_disk_info = {}
1801

    
1802
        position = 0
1803
        disks.each do |disk|
1804
            storpod = disk["VCENTER_DS_REF"].start_with?('group-')
1805
            if storpod
1806
                spec = calculate_add_disk_spec(disk, position)
1807
                attach_spod_array << spec
1808
                unit_ctrl = "#{spec[:device].controllerKey}-#{spec[:device].unitNumber}"
1809
                attach_spod_disk_info[unit_ctrl] = disk["DISK_ID"]
1810
            else
1811
                attach_disk_array << calculate_add_disk_spec(disk, position)
1812
            end
1813

    
1814
            position += 1
1815
        end
1816

    
1817
        return attach_disk_array, attach_spod_array, attach_spod_disk_info
1818
    end
1819

    
1820
    def device_detach_disks(onevm_disks_vector, unmanaged_keys, vc_disks)
1821
        detach_disk_array = []
1822
        extra_config      = []
1823
        ipool = VCenterDriver::VIHelper.one_pool(OpenNebula::ImagePool)
1824
        if ipool.respond_to?(:message)
1825
            raise "Could not get OpenNebula ImagePool: #{ipool.message}"
1826
        end
1827

    
1828
        vc_disks.each do |d|
1829
            if !onevm_disks_vector.index(d[:path_wo_ds])
1830

    
1831
                # If disk to be detached is not persistent detach and destroy it
1832
                persistent = VCenterDriver::VIHelper.find_persistent_image_by_source(d[:path_wo_ds], ipool)
1833
                if !persistent
1834
                    detach_disk_array << {
1835
                        :fileOperation => :destroy,
1836
                        :operation => :remove,
1837
                        :device    => d[:device]
1838
                    }
1839
                end
1840

    
1841
                # Remove reference opennebula.disk if exist
1842
                unmanaged_keys.each do |key, value|
1843
                    if value.to_i == d[:key].to_i
1844
                        reference = {}
1845
                        reference[:key]   = key
1846
                        reference[:value] = ""
1847
                        extra_config << reference
1848
                        break
1849
                    end
1850
                end
1851
            end
1852
        end
1853

    
1854
        return detach_disk_array, extra_config
1855
    end
1856

    
1857
    # Attach DISK to VM (hotplug)
1858
    def attach_disk
1859
        # TODO position? and disk size for volatile?
1860

    
1861
        spec_hash = {}
1862
        disk = nil
1863
        device_change = []
1864

    
1865
        # Extract unmanaged_keys
1866
        unmanaged_keys = get_unmanaged_keys
1867
        vc_disks = get_vcenter_disks
1868

    
1869
        # Extract disk from driver action
1870
        disk = one_item.retrieve_xmlelements("TEMPLATE/DISK[ATTACH='YES']").first
1871

    
1872
        # Check if we're dealing with a StoragePod SYSTEM ds
1873
        storpod = disk["VCENTER_DS_REF"].start_with?('group-')
1874

    
1875
        # Check if disk being attached is already connected to the VM
1876
        raise "DISK is already connected to VM" if disk_attached_to_vm(disk, unmanaged_keys, vc_disks)
1877

    
1878
        # Generate vCenter spec and reconfigure VM
1879
        device_change << calculate_add_disk_spec(disk)
1880
        raise "Could not generate DISK spec" if device_change.empty?
1881

    
1882
        spec_hash[:deviceChange] = device_change
1883
        spec = RbVmomi::VIM.VirtualMachineConfigSpec(spec_hash)
1884

    
1885
        begin
1886
            if storpod
1887
                #Ask for StorageDRS recommendation to reconfigure VM (AddDisk)
1888
                sm = get_sm
1889

    
1890
                # Disk id is -1 as I don't know what disk id is going to be set
1891
                disk_locator = [ RbVmomi::VIM.PodDiskLocator(diskId: -1) ]
1892

    
1893
                # Disk locator is required for AddDisk
1894
                vmpod_hash = {}
1895
                vmpod_hash[:storagePod] = get_ds
1896
                vmpod_hash[:disk] = disk_locator
1897
                vmpod_config = RbVmomi::VIM::VmPodConfigForPlacement(vmpod_hash)
1898

    
1899
                # The storage pod selection requires initialize
1900
                spod_hash = {}
1901
                spod_hash[:initialVmConfig] = [ vmpod_config ]
1902
                spod_select = RbVmomi::VIM::StorageDrsPodSelectionSpec(spod_hash)
1903
                storage_spec = RbVmomi::VIM.StoragePlacementSpec(
1904
                    type: :reconfigure,
1905
                    podSelectionSpec: spod_select,
1906
                    vm: self['_ref'],
1907
                    configSpec: spec
1908
                )
1909

    
1910
                # Query a storage placement recommendation
1911
                result = sm.RecommendDatastores(storageSpec: storage_spec) rescue nil
1912

    
1913
                raise "Could not get placement specification for StoragePod" if result.nil?
1914

    
1915
                if !result.respond_to?(:recommendations) || result.recommendations.size == 0
1916
                    raise "Could not get placement specification for StoragePod"
1917
                end
1918

    
1919
                # Get recommendation key to be applied
1920
                key = result.recommendations.first.key ||= ''
1921
                raise "Missing Datastore recommendation for StoragePod" if key.empty?
1922

    
1923
                # Apply recommendation
1924
                sm.ApplyStorageDrsRecommendation_Task(key: [key]).wait_for_completion
1925

    
1926
                # Add the key for the volatile disk to the unmanaged opennebula.disk.id variables
1927
                unit_number    = spec_hash[:deviceChange][0][:device].unitNumber
1928
                controller_key = spec_hash[:deviceChange][0][:device].controllerKey
1929
                key = get_vcenter_disk_key(unit_number, controller_key)
1930
                spec_hash = {}
1931
                reference = {}
1932
                reference[:key]   = "opennebula.disk.#{disk["DISK_ID"]}"
1933
                reference[:value] = key.to_s
1934
                spec_hash[:extraConfig] = [ reference ]
1935
                @item.ReconfigVM_Task(:spec => spec_hash).wait_for_completion
1936
            else
1937
                @item.ReconfigVM_Task(:spec => spec).wait_for_completion
1938
            end
1939
        rescue Exception => e
1940
            raise "Cannot attach DISK to VM: #{e.message}\n#{e.backtrace}"
1941
        end
1942
    end
1943

    
1944
    # Detach persistent disks to avoid incidental destruction
1945
    def detach_persistent_disks(vm)
1946
        spec_hash = {}
1947
        spec_hash[:deviceChange] = []
1948
        ipool = VCenterDriver::VIHelper.one_pool(OpenNebula::ImagePool)
1949
        if ipool.respond_to?(:message)
1950
            raise "Could not get OpenNebula ImagePool: #{ipool.message}"
1951
        end
1952

    
1953
        vm.config.hardware.device.each do |disk|
1954
            if is_disk_or_cdrom?(disk)
1955
                # Let's try to find if disks is persistent
1956
                source = disk.backing.fileName.sub(/^\[(.*?)\] /, "")
1957
                persistent = VCenterDriver::VIHelper.find_persistent_image_by_source(source, ipool)
1958
                if persistent
1959
                    spec_hash[:deviceChange] << {
1960
                        :operation => :remove,
1961
                        :device => disk
1962
                    }
1963
                end
1964
            end
1965

    
1966
        end
1967

    
1968
        return nil if spec_hash[:deviceChange].empty?
1969

    
1970
        begin
1971
            vm.ReconfigVM_Task(:spec => spec_hash).wait_for_completion
1972
        rescue Exception => e
1973
            raise "Cannot detach all DISKs from VM: #{e.message}\n#{e.backtrace}"
1974
        end
1975
    end
1976

    
1977

    
1978
    # Detach DISK from VM
1979
    def detach_disk(disk)
1980
        spec_hash = {}
1981
        img_path = ""
1982
        ds_ref = nil
1983

    
1984
        # Extract unmanaged disk keys
1985
        unmanaged_keys = get_unmanaged_keys
1986
        vc_disks = get_vcenter_disks
1987

    
1988
        # Get vcenter device to be detached and remove if found
1989
        device = disk_attached_to_vm(disk, unmanaged_keys, vc_disks)
1990

    
1991
        if device
1992
            img_path << device[:path_wo_ds]
1993

    
1994
            if unmanaged_keys.key?("opennebula.disk.#{disk["DISK_ID"]}")
1995
                reference = {}
1996
                reference[:key]   = "opennebula.disk.#{disk["DISK_ID"]}"
1997
                reference[:value] = ""
1998
                spec_hash[:extraConfig] = [ reference ]
1999
            end
2000

    
2001
            ds_ref = device[:datastore]._ref
2002

    
2003
            # Generate vCenter spec and reconfigure VM
2004
            spec_hash[:deviceChange] = [{
2005
                :operation => :remove,
2006
                :device => device[:device]
2007
            }]
2008

    
2009
            begin
2010
                @item.ReconfigVM_Task(:spec => spec_hash).wait_for_completion
2011
            rescue Exception => e
2012
                raise "Cannot detach DISK from VM: #{e.message}\n#{e.backtrace}"
2013
            end
2014
        end
2015

    
2016
        return ds_ref, img_path
2017
    end
2018

    
2019
    # Get vcenter device representing DISK object (hotplug)
2020
    def disk_attached_to_vm(disk, unmanaged_keys, vc_disks)
2021

    
2022
        img_name = ""
2023
        device_found = nil
2024
        disk_id = disk["DISK_ID"]
2025

    
2026
        vc_disks.each do |d|
2027
            # Check if we are dealing with the unmanaged disks present in the template when cloned
2028

    
2029
            if unmanaged_keys.key?("opennebula.disk.#{disk_id}") && d[:key] == unmanaged_keys["opennebula.disk.#{disk_id}"].to_i
2030
                device_found = d
2031
                break
2032
            end
2033

    
2034
            # Alright let's see if we can find other devices only with the expected image name
2035
            img_name  = VCenterDriver::FileHelper.get_img_name(disk, one_item['ID'], self['name'])
2036
            if d[:path_wo_ds] == "#{img_name}"
2037
                device_found = d
2038
                break
2039
            end
2040
        end
2041

    
2042
        return device_found
2043
    end
2044

    
2045
    def calculate_add_disk_spec(disk, position=0)
2046
        img_name = VCenterDriver::FileHelper.get_img_name(disk, one_item['ID'], self['name'])
2047
        type     = disk["TYPE"]
2048
        size_kb  = disk["SIZE"].to_i * 1024
2049

    
2050
        if type == "CDROM"
2051
            # CDROM drive will be found in the IMAGE DS
2052
            ds_ref   = disk["VCENTER_DS_REF"]
2053
            ds       = VCenterDriver::Storage.new_from_ref(ds_ref, @vi_client)
2054
            ds_name  = ds['name']
2055

    
2056
            # CDROM can only be added when the VM is in poweroff state
2057
            vmdk_backing = RbVmomi::VIM::VirtualCdromIsoBackingInfo(
2058
                :datastore => ds.item,
2059
                :fileName  => "[#{ds_name}] #{img_name}"
2060
            )
2061

    
2062
            if @item["summary.runtime.powerState"] != "poweredOff"
2063
                raise "The CDROM image can only be added as an IDE device "\
2064
                      "when the VM is in the powered off state"
2065
            end
2066

    
2067
            controller, unit_number = find_free_ide_controller(position)
2068

    
2069
            device = RbVmomi::VIM::VirtualCdrom(
2070
                :backing       => vmdk_backing,
2071
                :key           => -1,
2072
                :controllerKey => controller.key,
2073
                :unitNumber    => unit_number,
2074

    
2075
                :connectable => RbVmomi::VIM::VirtualDeviceConnectInfo(
2076
                    :startConnected    => true,
2077
                    :connected         => true,
2078
                    :allowGuestControl => true
2079
                )
2080
            )
2081

    
2082
            return {
2083
                :operation => :add,
2084
                :device => device
2085
            }
2086

    
2087
        else
2088
            # TYPE is regular disk (not CDROM)
2089

    
2090
            controller, unit_number = find_free_controller(position)
2091

    
2092
            storpod = disk["VCENTER_DS_REF"].start_with?('group-')
2093
            if storpod
2094
                vmdk_backing = RbVmomi::VIM::VirtualDiskFlatVer2BackingInfo(
2095
                  :diskMode  => 'persistent',
2096
                  :fileName  => ""
2097
                )
2098
            else
2099
                ds           = get_effective_ds(disk)
2100
                ds_name      = ds['name']
2101
                vmdk_backing = RbVmomi::VIM::VirtualDiskFlatVer2BackingInfo(
2102
                  :datastore => ds.item,
2103
                  :diskMode  => 'persistent',
2104
                  :fileName  => "[#{ds_name}] #{img_name}"
2105
                )
2106
            end
2107

    
2108
            device = RbVmomi::VIM::VirtualDisk(
2109
              :backing       => vmdk_backing,
2110
              :capacityInKB  => size_kb,
2111
              :controllerKey => controller.key,
2112
              :key           => (-1 - position),
2113
              :unitNumber    => unit_number
2114
            )
2115

    
2116
            config = {
2117
               :operation => :add,
2118
               :device    => device
2119
            }
2120

    
2121
            # For StorageDRS vCenter must create the file
2122
            config[:fileOperation] = :create if storpod
2123

    
2124
            return config
2125
        end
2126
    end
2127

    
2128
    def resize_unmanaged_disk(disk, new_size)
2129

    
2130
        resize_hash = {}
2131
        disks       = []
2132
        found       = false
2133

    
2134
        unmanaged_keys = get_unmanaged_keys
2135
        vc_disks = get_vcenter_disks
2136

    
2137
        vc_disks.each do |vcenter_disk|
2138
            if unmanaged_keys.key?("opennebula.disk.#{disk["DISK_ID"]}")
2139
                device_key = unmanaged_keys["opennebula.disk.#{disk["DISK_ID"]}"].to_i
2140

    
2141
                if device_key == vcenter_disk[:key].to_i
2142

    
2143
                    if disk["SIZE"].to_i <= disk["ORIGINAL_SIZE"].to_i
2144
                        raise "Disk size cannot be shrinked."
2145
                    end
2146

    
2147
                    # Edit capacity setting new size in KB
2148
                    d = vcenter_disk[:device]
2149
                    d.capacityInKB = disk["SIZE"].to_i * 1024
2150
                    disks <<   { :device => d, :operation => :edit }
2151

    
2152
                    found = true
2153
                    break
2154
                end
2155
            end
2156
        end
2157

    
2158
        raise "Unmanaged disk could not be found to apply resize operation." if !found
2159

    
2160
        if !disks.empty?
2161
            resize_hash[:deviceChange] = disks
2162
            @item.ReconfigVM_Task(:spec => resize_hash).wait_for_completion
2163
        else
2164
            raise "Device was not found after attaching it to VM in poweroff."
2165
        end
2166
    end
2167

    
2168
    def resize_managed_disk(disk, new_size)
2169

    
2170
        resize_hash = {}
2171

    
2172
        unmanaged_keys = get_unmanaged_keys
2173
        vc_disks       = get_vcenter_disks
2174

    
2175
        # Get vcenter device to be detached and remove if found
2176
        device         = disk_attached_to_vm(disk, unmanaged_keys, vc_disks)
2177

    
2178
        # If the disk is being attached in poweroff, reconfigure the VM
2179
        if !device
2180
            spec_hash     = {}
2181
            device_change = []
2182

    
2183
            # Get an array with disk paths in OpenNebula's vm template
2184
            disks_in_onevm_vector = disks_in_onevm(unmanaged_keys, vc_disks)
2185

    
2186
            device_change_ds, device_change_spod, device_change_spod_ids = device_attach_disks(disks_in_onevm_vector, vc_disks)
2187
            device_change += device_change_ds
2188

    
2189
            # Create volatile disks in StorageDRS if any
2190
            if !device_change_spod.empty?
2191
                spec_hash[:extraConfig] = create_storagedrs_disks(device_change_spod, device_change_spod_ids)
2192
            end
2193

    
2194
            # Common reconfigure task
2195
            spec_hash[:deviceChange] = device_change
2196
            spec = RbVmomi::VIM.VirtualMachineConfigSpec(spec_hash)
2197
            @item.ReconfigVM_Task(:spec => spec).wait_for_completion
2198

    
2199
            # Check again if device has now been attached
2200
            unmanaged_keys = get_unmanaged_keys
2201
            vc_disks       = get_vcenter_disks
2202
            device         = disk_attached_to_vm(disk, unmanaged_keys, vc_disks)
2203

    
2204
            if !device
2205
                raise "Device was not found after attaching it to VM in poweroff."
2206
            end
2207
        end
2208

    
2209
        # Resize disk now that we know that it's part of the VM
2210
        if device
2211
            vcenter_disk = device[:device]
2212
            vcenter_disk.capacityInKB = new_size.to_i * 1024
2213
            resize_hash[:deviceChange] = [{
2214
                :operation => :edit,
2215
                :device => vcenter_disk
2216
            }]
2217

    
2218
            @item.ReconfigVM_Task(:spec => resize_hash).wait_for_completion
2219
        end
2220
    end
2221

    
2222
    def has_snapshots?
2223
        self['rootSnapshot'] && !self['rootSnapshot'].empty?
2224
    end
2225

    
2226
    def instantiated_as_persistent?
2227
        begin
2228
            !!one_item["TEMPLATE/CLONING_TEMPLATE_ID"]
2229
        rescue
2230
            return false #one_item may not be retrieved if deploy_id hasn't been set
2231
        end
2232
    end
2233

    
2234
    def find_free_ide_controller(position=0)
2235

    
2236
        free_ide_controllers = []
2237
        ide_schema           = {}
2238

    
2239
        used_numbers      = []
2240
        available_numbers = []
2241

    
2242
        @item["config.hardware.device"].each do |dev|
2243
            if dev.is_a? RbVmomi::VIM::VirtualIDEController
2244
                if ide_schema[dev.key].nil?
2245
                    ide_schema[dev.key] = {}
2246
                end
2247

    
2248
                ide_schema[dev.key][:device] = dev
2249
            end
2250

    
2251
            next if dev.class != RbVmomi::VIM::VirtualCdrom
2252
            used_numbers << dev.unitNumber
2253
        end
2254

    
2255
        2.times do |ide_id|
2256
            available_numbers << ide_id if used_numbers.grep(ide_id).length <= 0
2257
        end
2258

    
2259
        ide_schema.keys.each do |controller|
2260
            free_ide_controllers << ide_schema[controller][:device].deviceInfo.label
2261
        end
2262

    
2263
        if free_ide_controllers.empty?
2264
            raise "There are no free IDE controllers to connect this CDROM device"
2265
        end
2266

    
2267
        available_controller_label = free_ide_controllers[0]
2268

    
2269
        controller = nil
2270

    
2271
        @item['config.hardware.device'].each do |device|
2272
            if device.deviceInfo.label == available_controller_label
2273
                controller = device
2274
                break
2275
            end
2276
        end
2277

    
2278
        new_unit_number = available_numbers.sort[position]
2279

    
2280
        return controller, new_unit_number
2281
    end
2282

    
2283
    def find_free_controller(position=0)
2284
        free_scsi_controllers = []
2285
        scsi_schema           = {}
2286

    
2287
        used_numbers      = []
2288
        available_numbers = []
2289

    
2290
        @item["config.hardware.device"].each do |dev|
2291
            if dev.is_a? RbVmomi::VIM::VirtualSCSIController
2292
                if scsi_schema[dev.key].nil?
2293
                    scsi_schema[dev.key] = {}
2294
                end
2295

    
2296
                used_numbers << dev.scsiCtlrUnitNumber
2297
                scsi_schema[dev.key][:device] = dev
2298
            end
2299

    
2300
            next if dev.class != RbVmomi::VIM::VirtualDisk
2301
            used_numbers << dev.unitNumber
2302
        end
2303

    
2304
        15.times do |scsi_id|
2305
            available_numbers << scsi_id if used_numbers.grep(scsi_id).length <= 0
2306
        end
2307

    
2308
        scsi_schema.keys.each do |controller|
2309
            free_scsi_controllers << scsi_schema[controller][:device].deviceInfo.label
2310
        end
2311

    
2312
        if free_scsi_controllers.length > 0
2313
            available_controller_label = free_scsi_controllers[0]
2314
        else
2315
            add_new_scsi(scsi_schema)
2316
            return find_free_controller
2317
        end
2318

    
2319
        controller = nil
2320

    
2321
        @item['config.hardware.device'].each do |device|
2322
            if device.deviceInfo.label == available_controller_label
2323
                controller = device
2324
                break
2325
            end
2326
        end
2327

    
2328
        new_unit_number = available_numbers.sort[position]
2329

    
2330
        return controller, new_unit_number
2331
    end
2332

    
2333
    def add_new_scsi(scsi_schema)
2334
        controller = nil
2335

    
2336
        if scsi_schema.keys.length >= 4
2337
            raise "Cannot add a new controller, maximum is 4."
2338
        end
2339

    
2340
        scsi_key    = 0
2341
        scsi_number = 0
2342

    
2343
        if scsi_schema.keys.length > 0 && scsi_schema.keys.length < 4
2344
            scsi_key    = scsi_schema.keys.sort[-1] + 1
2345
            scsi_number = scsi_schema[scsi_schema.keys.sort[-1]][:device].busNumber + 1
2346
        end
2347

    
2348
        controller_device = RbVmomi::VIM::VirtualLsiLogicController(
2349
            :key       => scsi_key,
2350
            :busNumber => scsi_number,
2351
            :sharedBus => :noSharing
2352
        )
2353

    
2354
        device_config_spec = RbVmomi::VIM::VirtualDeviceConfigSpec(
2355
            :device    => controller_device,
2356
            :operation => :add
2357
        )
2358

    
2359
        vm_config_spec = RbVmomi::VIM::VirtualMachineConfigSpec(
2360
            :deviceChange => [device_config_spec]
2361
        )
2362

    
2363
        @item.ReconfigVM_Task(:spec => vm_config_spec).wait_for_completion
2364

    
2365
        @item["config.hardware.device"].each do |device|
2366
            if device.class == RbVmomi::VIM::VirtualLsiLogicController &&
2367
                device.key == scsi_key
2368

    
2369
                controller = device.deviceInfo.label
2370
            end
2371
        end
2372

    
2373
        return controller
2374
    end
2375

    
2376
    # Create a snapshot for the VM
2377
    def create_snapshot(snap_id, snap_name)
2378
        snapshot_hash = {
2379
            :name        => snap_id,
2380
            :description => "OpenNebula Snapshot: #{snap_name}",
2381
            :memory      => true,
2382
            :quiesce     => true
2383
        }
2384

    
2385
        vcenter_version = @vi_client.vim.serviceContent.about.apiVersion rescue nil
2386

    
2387
        if vcenter_version != "5.5"
2388
            begin
2389
                @item.CreateSnapshot_Task(snapshot_hash).wait_for_completion
2390
            rescue Exception => e
2391
                raise "Cannot create snapshot for VM: #{e.message}\n#{e.backtrace}"
2392
            end
2393
        else
2394
            # B#5045 - If vcenter is 5.5 the snapshot may take longer than
2395
            # 15 minutes and it does not report that it has finished using
2396
            # wait_for_completion so we use an active wait instead with a
2397
            # timeout of 1440 minutes = 24 hours
2398
            @item.CreateSnapshot_Task(snapshot_hash)
2399

    
2400
            snapshot_created  = false
2401
            elapsed_minutes   = 0
2402

    
2403
            until snapshot_created || elapsed_minutes == 1440
2404
                if !!@item['snapshot']
2405
                    current_snapshot = @item['snapshot.currentSnapshot'] rescue nil
2406
                    snapshot_found = find_snapshot_in_list(@item['snapshot.rootSnapshotList'], snap_id)
2407
                    snapshot_created = !!snapshot_found && !!current_snapshot && current_snapshot._ref == snapshot_found._ref
2408
                end
2409
                sleep(60)
2410
                elapsed_minutes += 1
2411
            end
2412
        end
2413

    
2414
        return snap_id
2415
    end
2416

    
2417
    # Revert to a VM snapshot
2418
    def revert_snapshot(snap_id)
2419

    
2420
        snapshot_list = self["snapshot.rootSnapshotList"]
2421
        snapshot = find_snapshot_in_list(snapshot_list, snap_id)
2422

    
2423
        return nil if !snapshot
2424

    
2425
        begin
2426
            revert_snapshot_hash = { :_this => snapshot }
2427
            snapshot.RevertToSnapshot_Task(revert_snapshot_hash).wait_for_completion
2428
        rescue Exception => e
2429
            raise "Cannot revert snapshot of VM: #{e.message}\n#{e.backtrace}"
2430
        end
2431
    end
2432

    
2433
    # Delete VM snapshot
2434
    def delete_snapshot(snap_id)
2435

    
2436
        snapshot_list = self["snapshot.rootSnapshotList"]
2437
        snapshot = find_snapshot_in_list(snapshot_list, snap_id)
2438

    
2439
        return nil if !snapshot
2440

    
2441
        begin
2442
            delete_snapshot_hash = {
2443
                :_this => snapshot,
2444
                :removeChildren => false
2445
            }
2446
            snapshot.RemoveSnapshot_Task(delete_snapshot_hash).wait_for_completion
2447
        rescue Exception => e
2448
            raise "Cannot delete snapshot of VM: #{e.message}\n#{e.backtrace}"
2449
        end
2450
    end
2451

    
2452
    def find_snapshot_in_list(list, snap_id)
2453
        list.each do |i|
2454
            if i.name == snap_id.to_s
2455
                return i.snapshot
2456
            elsif !i.childSnapshotList.empty?
2457
                snap = find_snapshot_in_list(i.childSnapshotList, snap_id)
2458
                return snap if snap
2459
            end
2460
        end rescue nil
2461

    
2462
        nil
2463
    end
2464

    
2465
    ############################################################################
2466
    # actions
2467
    ############################################################################
2468

    
2469
    def shutdown
2470
        begin
2471
            @item.ShutdownGuest
2472
            # Check if VM has been powered off
2473
            (0..VM_SHUTDOWN_TIMEOUT).each do
2474
                break if @item.runtime.powerState == "poweredOff"
2475
                sleep 1
2476
            end
2477
        rescue
2478
            # Ignore ShutdownGuest exceptions, maybe VM hasn't openvm tools
2479
        end
2480

    
2481
        # If VM hasn't been powered off, do it now
2482
        if @item.runtime.powerState != "poweredOff"
2483
            poweroff_hard
2484
        end
2485
    end
2486

    
2487
    def destroy
2488
        @item.Destroy_Task.wait_for_completion
2489
    end
2490

    
2491
    def mark_as_template
2492
        @item.MarkAsTemplate
2493
    end
2494

    
2495
    def reset
2496
        @item.ResetVM_Task.wait_for_completion
2497
    end
2498

    
2499
    def suspend
2500
        @item.SuspendVM_Task.wait_for_completion
2501
    end
2502

    
2503
    def reboot
2504
        @item.RebootGuest
2505
    end
2506

    
2507
    def poweron
2508
        ## If need in the future, you can power on VMs from datacenter
2509
        ## dc = get_dc
2510
        ## dc.power_on_vm(@item)
2511
        @item.PowerOnVM_Task.wait_for_completion
2512
    end
2513

    
2514
    def is_powered_on?
2515
        return @item.runtime.powerState == "poweredOn"
2516
    end
2517

    
2518
    def poweroff_hard
2519
        @item.PowerOffVM_Task.wait_for_completion
2520
    end
2521

    
2522
    def remove_all_snapshots
2523
        @item.RemoveAllSnapshots_Task.wait_for_completion
2524
    end
2525

    
2526
    def set_running(state)
2527
        value = state ? "yes" : "no"
2528

    
2529
        config_array = [
2530
            { :key => "opennebula.vm.running", :value => value }
2531
        ]
2532
        spec = RbVmomi::VIM.VirtualMachineConfigSpec(
2533
            { :extraConfig => config_array }
2534
        )
2535

    
2536
        @item.ReconfigVM_Task(:spec => spec).wait_for_completion
2537
    end
2538

    
2539
    ############################################################################
2540
    # monitoring
2541
    ############################################################################
2542

    
2543
    # monitor function used when VMM poll action is called
2544
    def monitor_poll_vm
2545
        reset_monitor
2546

    
2547
        @state = state_to_c(self["summary.runtime.powerState"])
2548

    
2549
        if @state != VM_STATE[:active]
2550
            reset_monitor
2551
            return
2552
        end
2553

    
2554
        cpuMhz = self["runtime.host.summary.hardware.cpuMhz"].to_f
2555

    
2556
        @monitor[:used_memory] = self["summary.quickStats.hostMemoryUsage"] * 1024
2557

    
2558
        used_cpu = self["summary.quickStats.overallCpuUsage"].to_f / cpuMhz
2559
        used_cpu = (used_cpu * 100).to_s
2560
        @monitor[:used_cpu]  = sprintf('%.2f', used_cpu).to_s
2561

    
2562
        # Check for negative values
2563
        @monitor[:used_memory] = 0 if @monitor[:used_memory].to_i < 0
2564
        @monitor[:used_cpu]    = 0 if @monitor[:used_cpu].to_i < 0
2565

    
2566
        guest_ip_addresses = []
2567
        self["guest.net"].each do |net|
2568
            net.ipConfig.ipAddress.each do |ip|
2569
                guest_ip_addresses << ip.ipAddress
2570
            end if net.ipConfig && net.ipConfig.ipAddress
2571
        end if self["guest.net"]
2572

    
2573
        @guest_ip_addresses = guest_ip_addresses.join(',')
2574

    
2575
        pm = self['_connection'].serviceInstance.content.perfManager
2576

    
2577
        provider = pm.provider_summary(@item)
2578

    
2579
        refresh_rate = provider.refreshRate
2580

    
2581
        if get_vm_id
2582
            stats = {}
2583

    
2584
            if (one_item["MONITORING/LAST_MON"] && one_item["MONITORING/LAST_MON"].to_i != 0 )
2585
                #Real time data stores max 1 hour. 1 minute has 3 samples
2586
                interval = (Time.now.to_i - one_item["MONITORING/LAST_MON"].to_i)
2587

    
2588
                #If last poll was more than hour ago get 3 minutes,
2589
                #else calculate how many samples since last poll
2590
                samples =  interval > 3600 ? 9 : (interval / refresh_rate) + 1
2591
                max_samples = samples > 0 ? samples : 1
2592

    
2593
                stats = pm.retrieve_stats(
2594
                    [@item],
2595
                    ['net.transmitted','net.bytesRx','net.bytesTx','net.received',
2596
                    'virtualDisk.numberReadAveraged','virtualDisk.numberWriteAveraged',
2597
                    'virtualDisk.read','virtualDisk.write'],
2598
                    {interval:refresh_rate, max_samples: max_samples}
2599
                ) rescue {}
2600
            else
2601
                # First poll, get at least latest 3 minutes = 9 samples
2602
                stats = pm.retrieve_stats(
2603
                    [@item],
2604
                    ['net.transmitted','net.bytesRx','net.bytesTx','net.received',
2605
                    'virtualDisk.numberReadAveraged','virtualDisk.numberWriteAveraged',
2606
                    'virtualDisk.read','virtualDisk.write'],
2607
                    {interval:refresh_rate, max_samples: 9}
2608
                ) rescue {}
2609
            end
2610

    
2611
            if !stats.empty? && !stats.first[1][:metrics].empty?
2612
                metrics = stats.first[1][:metrics]
2613

    
2614
                nettx_kbpersec = 0
2615
                if metrics['net.transmitted']
2616
                    metrics['net.transmitted'].each { |sample|
2617
                        nettx_kbpersec += sample if sample > 0
2618
                    }
2619
                end
2620

    
2621
                netrx_kbpersec = 0
2622
                if metrics['net.bytesRx']
2623
                    metrics['net.bytesRx'].each { |sample|
2624
                        netrx_kbpersec += sample if sample > 0
2625
                    }
2626
                end
2627

    
2628
                read_kbpersec = 0
2629
                if metrics['virtualDisk.read']
2630
                    metrics['virtualDisk.read'].each { |sample|
2631
                        read_kbpersec += sample if sample > 0
2632
                    }
2633
                end
2634

    
2635
                read_iops = 0
2636
                if metrics['virtualDisk.numberReadAveraged']
2637
                    metrics['virtualDisk.numberReadAveraged'].each { |sample|
2638
                        read_iops += sample if sample > 0
2639
                    }
2640
                end
2641

    
2642
                write_kbpersec = 0
2643
                if metrics['virtualDisk.write']
2644
                    metrics['virtualDisk.write'].each { |sample|
2645
                        write_kbpersec += sample if sample > 0
2646
                    }
2647
                end
2648

    
2649
                write_iops = 0
2650
                if metrics['virtualDisk.numberWriteAveraged']
2651
                    metrics['virtualDisk.numberWriteAveraged'].each { |sample|
2652
                        write_iops += sample if sample > 0
2653
                    }
2654
                end
2655
            else
2656
                nettx_kbpersec = 0
2657
                netrx_kbpersec = 0
2658
                read_kbpersec  = 0
2659
                read_iops      = 0
2660
                write_kbpersec = 0
2661
                write_iops     = 0
2662
            end
2663

    
2664
            # Accumulate values if present
2665
            previous_nettx = @one_item && @one_item["MONITORING/NETTX"] ? @one_item["MONITORING/NETTX"].to_i : 0
2666
            previous_netrx = @one_item && @one_item["MONITORING/NETRX"] ? @one_item["MONITORING/NETRX"].to_i : 0
2667
            previous_diskrdiops = @one_item && @one_item["MONITORING/DISKRDIOPS"] ? @one_item["MONITORING/DISKRDIOPS"].to_i : 0
2668
            previous_diskwriops = @one_item && @one_item["MONITORING/DISKWRIOPS"] ? @one_item["MONITORING/DISKWRIOPS"].to_i : 0
2669
            previous_diskrdbytes = @one_item && @one_item["MONITORING/DISKRDBYTES"] ? @one_item["MONITORING/DISKRDBYTES"].to_i : 0
2670
            previous_diskwrbytes = @one_item && @one_item["MONITORING/DISKWRBYTES"] ? @one_item["MONITORING/DISKWRBYTES"].to_i : 0
2671

    
2672
            @monitor[:nettx] = previous_nettx + (nettx_kbpersec * 1024 * refresh_rate).to_i
2673
            @monitor[:netrx] = previous_netrx + (netrx_kbpersec * 1024 * refresh_rate).to_i
2674

    
2675
            @monitor[:diskrdiops]  = previous_diskrdiops + read_iops
2676
            @monitor[:diskwriops]  = previous_diskwriops + write_iops
2677
            @monitor[:diskrdbytes] = previous_diskrdbytes + (read_kbpersec * 1024 * refresh_rate).to_i
2678
            @monitor[:diskwrbytes] = previous_diskwrbytes + (write_kbpersec * 1024 * refresh_rate).to_i
2679
        end
2680
    end
2681

    
2682
    # monitor function used when poll action is called for all vms
2683
    def monitor(stats)
2684

    
2685
        reset_monitor
2686

    
2687
        refresh_rate = 20 #20 seconds between samples (realtime)
2688

    
2689
        @state = state_to_c(@vm_info["summary.runtime.powerState"])
2690

    
2691
        return if @state != VM_STATE[:active]
2692

    
2693
        cpuMhz =  @vm_info[:esx_host_cpu]
2694

    
2695
        @monitor[:used_memory] = @vm_info["summary.quickStats.hostMemoryUsage"].to_i * 1024
2696

    
2697
        used_cpu = @vm_info["summary.quickStats.overallCpuUsage"].to_f / cpuMhz
2698
        used_cpu = (used_cpu * 100).to_s
2699
        @monitor[:used_cpu]  = sprintf('%.2f', used_cpu).to_s
2700

    
2701
        # Check for negative values
2702
        @monitor[:used_memory] = 0 if @monitor[:used_memory].to_i < 0
2703
        @monitor[:used_cpu]    = 0 if @monitor[:used_cpu].to_i < 0
2704

    
2705
        guest_ip_addresses = []
2706
        @vm_info["guest.net"].each do |net|
2707
            net.ipConfig.ipAddress.each do |ip|
2708
                guest_ip_addresses << ip.ipAddress
2709
            end if net.ipConfig && net.ipConfig.ipAddress
2710
        end if self["guest.net"]
2711

    
2712
        @guest_ip_addresses = guest_ip_addresses.join(',')
2713

    
2714
        if stats.key?(@item)
2715
            metrics = stats[@item][:metrics]
2716

    
2717
            nettx_kbpersec = 0
2718
            if metrics['net.transmitted']
2719
                metrics['net.transmitted'].each { |sample|
2720
                    nettx_kbpersec += sample if sample > 0
2721
                }
2722
            end
2723

    
2724
            netrx_kbpersec = 0
2725
            if metrics['net.bytesRx']
2726
                metrics['net.bytesRx'].each { |sample|
2727
                    netrx_kbpersec += sample if sample > 0
2728
                }
2729
            end
2730

    
2731
            read_kbpersec = 0
2732
            if metrics['virtualDisk.read']
2733
                metrics['virtualDisk.read'].each { |sample|
2734
                    read_kbpersec += sample if sample > 0
2735
                }
2736
            end
2737

    
2738
            read_iops = 0
2739
            if metrics['virtualDisk.numberReadAveraged']
2740
                metrics['virtualDisk.numberReadAveraged'].each { |sample|
2741
                    read_iops += sample if sample > 0
2742
                }
2743
            end
2744

    
2745
            write_kbpersec = 0
2746
            if metrics['virtualDisk.write']
2747
                metrics['virtualDisk.write'].each { |sample|
2748
                    write_kbpersec += sample if sample > 0
2749
                }
2750
            end
2751

    
2752
            write_iops = 0
2753
            if metrics['virtualDisk.numberWriteAveraged']
2754
                metrics['virtualDisk.numberWriteAveraged'].each { |sample|
2755
                    write_iops += sample if sample > 0
2756
                }
2757
            end
2758
        else
2759
            nettx_kbpersec = 0
2760
            netrx_kbpersec = 0
2761
            read_kbpersec  = 0
2762
            read_iops      = 0
2763
            write_kbpersec = 0
2764
            write_iops     = 0
2765
        end
2766

    
2767
        # Accumulate values if present
2768
        previous_nettx = @one_item && @one_item["MONITORING/NETTX"] ? @one_item["MONITORING/NETTX"].to_i : 0
2769
        previous_netrx = @one_item && @one_item["MONITORING/NETRX"] ? @one_item["MONITORING/NETRX"].to_i : 0
2770
        previous_diskrdiops = @one_item && @one_item["MONITORING/DISKRDIOPS"] ? @one_item["MONITORING/DISKRDIOPS"].to_i : 0
2771
        previous_diskwriops = @one_item && @one_item["MONITORING/DISKWRIOPS"] ? @one_item["MONITORING/DISKWRIOPS"].to_i : 0
2772
        previous_diskrdbytes = @one_item && @one_item["MONITORING/DISKRDBYTES"] ? @one_item["MONITORING/DISKRDBYTES"].to_i : 0
2773
        previous_diskwrbytes = @one_item && @one_item["MONITORING/DISKWRBYTES"] ? @one_item["MONITORING/DISKWRBYTES"].to_i : 0
2774

    
2775
        @monitor[:nettx] = previous_nettx + (nettx_kbpersec * 1024 * refresh_rate).to_i
2776
        @monitor[:netrx] = previous_netrx + (netrx_kbpersec * 1024 * refresh_rate).to_i
2777

    
2778
        @monitor[:diskrdiops]  = previous_diskrdiops + read_iops
2779
        @monitor[:diskwriops]  = previous_diskwriops + write_iops
2780
        @monitor[:diskrdbytes] = previous_diskrdbytes + (read_kbpersec * 1024 * refresh_rate).to_i
2781
        @monitor[:diskwrbytes] = previous_diskwrbytes + (write_kbpersec * 1024 * refresh_rate).to_i
2782
    end
2783

    
2784

    
2785

    
2786
    #  Generates a OpenNebula IM Driver valid string with the monitor info
2787
    def info
2788
        return 'STATE=d' if @state == 'd'
2789

    
2790
        guest_ip = @vm_info ? @vm_info["guest.ipAddress"] : self["guest.ipAddress"]
2791

    
2792
        used_cpu    = @monitor[:used_cpu]
2793
        used_memory = @monitor[:used_memory]
2794
        netrx       = @monitor[:netrx]
2795
        nettx       = @monitor[:nettx]
2796
        diskrdbytes = @monitor[:diskrdbytes]
2797
        diskwrbytes = @monitor[:diskwrbytes]
2798
        diskrdiops  = @monitor[:diskrdiops]
2799
        diskwriops  = @monitor[:diskwriops]
2800

    
2801
        esx_host      = @vm_info ? @vm_info[:esx_host_name].to_s : self["runtime.host.name"].to_s
2802
        guest_state   = @vm_info ? @vm_info["guest.guestState"].to_s : self["guest.guestState"].to_s
2803
        vmware_tools  = @vm_info ? @vm_info["guest.toolsRunningStatus"].to_s : self["guest.toolsRunningStatus"].to_s
2804
        vmtools_ver   = @vm_info ? @vm_info["guest.toolsVersion"].to_s :  self["guest.toolsVersion"].to_s
2805
        vmtools_verst = @vm_info ? @vm_info["guest.toolsVersionStatus2"].to_s : vmtools_verst = self["guest.toolsVersionStatus2"].to_s
2806

    
2807
        if @vm_info
2808
            rp_name   = @vm_info[:rp_list].select { |item| item[:ref] == @vm_info["resourcePool"]._ref}.first[:name] rescue ""
2809
            rp_name   = "Resources" if rp_name.empty?
2810
        else
2811
            rp_name   = self["resourcePool"].name
2812
        end
2813

    
2814
        str_info = ""
2815

    
2816
        str_info = "GUEST_IP=" << guest_ip.to_s << " " if guest_ip
2817

    
2818
        if @guest_ip_addresses && !@guest_ip_addresses.empty?
2819
            str_info << "GUEST_IP_ADDRESSES=\"" << @guest_ip_addresses.to_s << "\" "
2820
        end
2821

    
2822
        str_info << "#{POLL_ATTRIBUTE[:state]}="  << @state               << " "
2823
        str_info << "#{POLL_ATTRIBUTE[:cpu]}="    << used_cpu.to_s        << " "
2824
        str_info << "#{POLL_ATTRIBUTE[:memory]}=" << used_memory.to_s     << " "
2825
        str_info << "#{POLL_ATTRIBUTE[:netrx]}="  << netrx.to_s           << " "
2826
        str_info << "#{POLL_ATTRIBUTE[:nettx]}="  << nettx.to_s           << " "
2827

    
2828
        str_info << "DISKRDBYTES=" << diskrdbytes.to_s << " "
2829
        str_info << "DISKWRBYTES=" << diskwrbytes.to_s << " "
2830
        str_info << "DISKRDIOPS="  << diskrdiops.to_s  << " "
2831
        str_info << "DISKWRIOPS="  << diskwriops.to_s  << " "
2832

    
2833
        str_info << "VCENTER_ESX_HOST=\""                 << esx_host        << "\" "
2834
        str_info << "VCENTER_GUEST_STATE="                << guest_state     << " "
2835
        str_info << "VCENTER_VMWARETOOLS_RUNNING_STATUS=" << vmware_tools    << " "
2836
        str_info << "VCENTER_VMWARETOOLS_VERSION="        << vmtools_ver     << " "
2837
        str_info << "VCENTER_VMWARETOOLS_VERSION_STATUS=" << vmtools_verst   << " "
2838
        str_info << "VCENTER_RP_NAME=\""                  << rp_name << "\" "
2839
    end
2840

    
2841
    def reset_monitor
2842
        @monitor = {
2843
            :used_cpu    => 0,
2844
            :used_memory => 0,
2845
            :netrx       => 0,
2846
            :nettx       => 0,
2847
            :diskrdbytes => 0,
2848
            :diskwrbytes => 0,
2849
            :diskrdiops  => 0,
2850
            :diskwriops  => 0
2851
        }
2852
    end
2853

    
2854
    # Converts the VI string state to OpenNebula state convention
2855
    # Guest states are:
2856
    # - poweredOff   The virtual machine is currently powered off.
2857
    # - poweredOn    The virtual machine is currently powered on.
2858
    # - suspended    The virtual machine is currently suspended.
2859
    def state_to_c(state)
2860
        case state
2861
            when 'poweredOn'
2862
                VM_STATE[:active]
2863
            when 'suspended'
2864
                VM_STATE[:paused]
2865
            when 'poweredOff'
2866
                VM_STATE[:deleted]
2867
            else
2868
                VM_STATE[:unknown]
2869
        end
2870
    end
2871

    
2872
    # TODO check with uuid
2873
    def self.new_from_ref(ref, vi_client)
2874
        self.new(RbVmomi::VIM::VirtualMachine.new(vi_client.vim, ref), vi_client)
2875
    end
2876

    
2877
end # class VirtualMachine
2878

    
2879
end # module VCenterDriver