Statistics
| Branch: | Tag: | Revision:

one / src / vmm_mad / remotes / vcenter / vcenter_driver.rb @ 05ed37c9

History | View | Annotate | Download (53.7 KB)

1
# ---------------------------------------------------------------------------- #
2
# Copyright 2010-2015, C12G Labs S.L                                           #
3
#                                                                              #
4
# Licensed under the Apache License, Version 2.0 (the "License"); you may      #
5
# not use this file except in compliance with the License. You may obtain      #
6
# a copy of the License at                                                     #
7
#                                                                              #
8
# http://www.apache.org/licenses/LICENSE-2.0                                   #
9
#                                                                              #
10
# Unless required by applicable law or agreed to in writing, software          #
11
# distributed under the License is distributed on an "AS IS" BASIS,            #
12
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.     #
13
# See the License for the specific language governing permissions and          #
14
# limitations under the License.                                               #
15
# ---------------------------------------------------------------------------- #
16

    
17
# -------------------------------------------------------------------------#
18
# Set up the environment for the driver                                    #
19
# -------------------------------------------------------------------------#
20
ONE_LOCATION = ENV["ONE_LOCATION"] if !defined?(ONE_LOCATION)
21

    
22
if !ONE_LOCATION
23
   BIN_LOCATION = "/usr/bin" if !defined?(BIN_LOCATION)
24
   LIB_LOCATION = "/usr/lib/one" if !defined?(LIB_LOCATION)
25
   ETC_LOCATION = "/etc/one/" if !defined?(ETC_LOCATION)
26
   VAR_LOCATION = "/var/lib/one" if !defined?(VAR_LOCATION)
27
else
28
   BIN_LOCATION = ONE_LOCATION + "/bin" if !defined?(BIN_LOCATION)
29
   LIB_LOCATION = ONE_LOCATION + "/lib" if !defined?(LIB_LOCATION)
30
   ETC_LOCATION = ONE_LOCATION  + "/etc/" if !defined?(ETC_LOCATION)
31
   VAR_LOCATION = ONE_LOCATION + "/var/" if !defined?(VAR_LOCATION)
32
end
33

    
34
ENV['LANG'] = 'C'
35

    
36
$: << LIB_LOCATION+'/ruby/vendors/rbvmomi/lib'
37
$: << LIB_LOCATION+'/ruby'
38

    
39
require 'rbvmomi'
40
require 'yaml'
41
require 'opennebula'
42
require 'base64'
43
require 'openssl'
44

    
45
module VCenterDriver
46

    
47
################################################################################
48
# This class represents a VCenter connection and an associated OpenNebula client
49
# The connection is associated to the VCenter backing a given OpenNebula host.
50
# For the VCenter driver each OpenNebula host represents a VCenter cluster
51
################################################################################
52
class VIClient
53
    attr_reader :vim, :one, :root, :cluster, :user, :pass, :host
54

    
55
    def get_entities(folder, type, entities=[])
56
        return nil if folder == []
57

    
58
        folder.childEntity.each do |child|
59
            name, junk = child.to_s.split('(')
60

    
61
            case name
62
            when "Folder"
63
                get_entities(child, type, entities)
64
            when type
65
                entities.push(child)
66
            end
67
        end
68

    
69
        return entities
70
    end
71

    
72

    
73
    ############################################################################
74
    # Initializr the VIClient, and creates an OpenNebula client. The parameters
75
    # are obtained from the associated OpenNebula host
76
    # @param hid [Integer] The OpenNebula host id with VCenter attributes
77
    ############################################################################
78
    def initialize(hid)
79

    
80
        initialize_one
81

    
82
        @one_host = ::OpenNebula::Host.new_with_id(hid, @one)
83
        rc = @one_host.info
84

    
85
        if ::OpenNebula.is_error?(rc)
86
            raise "Error getting host information: #{rc.message}"
87
        end
88

    
89
        password = @one_host["TEMPLATE/VCENTER_PASSWORD"]
90

    
91
        if !@token.nil?
92
            begin
93
                cipher = OpenSSL::Cipher::Cipher.new("aes-256-cbc")
94

    
95
                cipher.decrypt
96
                cipher.key = @token
97

    
98
                password =  cipher.update(Base64::decode64(password))
99
                password << cipher.final
100
            rescue
101
                raise "Error decrypting vCenter password"
102
            end
103
        end
104

    
105
        connection = {
106
            :host     => @one_host["TEMPLATE/VCENTER_HOST"],
107
            :user     => @one_host["TEMPLATE/VCENTER_USER"],
108
            :password => password
109
        }
110

    
111
        initialize_vim(connection)
112

    
113
        datacenters = get_entities(@root, 'Datacenter')
114

    
115
        datacenters.each {|dc|
116
            ccrs = get_entities(dc.hostFolder, 'ClusterComputeResource')
117

    
118
            next if ccrs.nil?
119

    
120
            @cluster = ccrs.find{ |ccr| @one_host.name == ccr.name }
121

    
122
            (@dc = dc; break) if @cluster
123
        }
124

    
125
        if @dc.nil? || @cluster.nil?
126
            raise "Cannot find DataCenter or ClusterComputeResource for host."
127
        end
128
    end
129

    
130
    ########################################################################
131
    # Initialize a VIConnection based just on the VIM parameters. The
132
    # OpenNebula client is also initilialized
133
    ########################################################################
134
    def self.new_connection(user_opts)
135

    
136
        conn = allocate
137

    
138
        conn.initialize_one
139

    
140
        conn.initialize_vim(user_opts)
141

    
142
        return conn
143
    end
144

    
145
    ########################################################################
146
    # The associated resource pool for this connection
147
    ########################################################################
148
    def resource_pool
149
        rp_name = @one_host["TEMPLATE/VCENTER_RESOURCE_POOL"]
150

    
151
       if rp_name.nil?
152
          @cluster.resourcePool
153
       else
154
          find_resource_pool(rp_name)
155
       end
156
    end
157

    
158
    ########################################################################
159
    # Searches the desired ResourcePool of the DataCenter for the current
160
    # connection. Returns a RbVmomi::VIM::ResourcePool or the default pool
161
    # if not found
162
    # @param rpool [String] the ResourcePool name
163
    ########################################################################
164
    def find_resource_pool(poolName)
165
        baseEntity = @cluster
166
        entityArray = poolName.split('/')
167
        entityArray.each do |entityArrItem|
168
          if entityArrItem != ''
169
            if baseEntity.is_a? RbVmomi::VIM::Folder
170
                baseEntity = baseEntity.childEntity.find { |f|
171
                                  f.name == entityArrItem
172
                              } or return @cluster.resourcePool
173
            elsif baseEntity.is_a? RbVmomi::VIM::ClusterComputeResource
174
                baseEntity = baseEntity.resourcePool.resourcePool.find { |f|
175
                                  f.name == entityArrItem
176
                              } or return @cluster.resourcePool
177
            elsif baseEntity.is_a? RbVmomi::VIM::ResourcePool
178
                baseEntity = baseEntity.resourcePool.find { |f|
179
                                  f.name == entityArrItem
180
                              } or return @cluster.resourcePool
181
            else
182
                return @cluster.resourcePool
183
            end
184
          end
185
        end
186

    
187
        if !baseEntity.is_a?(RbVmomi::VIM::ResourcePool) and
188
            baseEntity.respond_to?(:resourcePool)
189
              baseEntity = baseEntity.resourcePool
190
        end
191

    
192
        baseEntity
193
    end
194

    
195
    ########################################################################
196
    # Searches the associated vmFolder of the DataCenter for the current
197
    # connection. Returns a RbVmomi::VIM::VirtualMachine or nil if not found
198
    # @param uuid [String] the UUID of the VM or VM Template
199
    ########################################################################
200
    def find_vm_template(uuid)
201
        vms = get_entities(@dc.vmFolder, 'VirtualMachine')
202

    
203
        return vms.find do |v|
204
            begin
205
                v.config && v.config.uuid == uuid
206
            rescue ManagedObjectNotFound
207
                false
208
            end
209
        end
210
    end
211

    
212
    ########################################################################
213
    # Searches the associated vmFolder of the DataCenter for the current
214
    # connection. Returns a RbVmomi::VIM::VirtualMachine or nil if not found
215
    # @param vm_name [String] the UUID of the VM or VM Template
216
    ########################################################################
217
    def find_vm(vm_name)
218
        vms = get_entities(@dc.vmFolder, 'VirtualMachine')
219

    
220
        return vms.find do |v|
221
            begin
222
                v.name == vm_name
223
            rescue ManagedObjectNotFound
224
                false
225
            end
226
        end
227
    end
228

    
229
    ########################################################################
230
    # Builds a hash with the DataCenter / ClusterComputeResource hierarchy
231
    # for this VCenter.
232
    # @return [Hash] in the form
233
    #   {dc_name [String] => ClusterComputeResources Names [Array - String]}
234
    ########################################################################
235
    def hierarchy(one_client=nil)
236
        vc_hosts = {}
237

    
238
        datacenters = get_entities(@root, 'Datacenter')
239

    
240
        hpool = OpenNebula::HostPool.new((one_client||@one))
241
        rc    = hpool.info
242

    
243
        datacenters.each { |dc|
244
            ccrs = get_entities(dc.hostFolder, 'ClusterComputeResource')
245
            vc_hosts[dc.name] = []
246
            ccrs.each { |c|
247
                if !hpool["HOST[NAME=\"#{c.name}\"]"]
248
                    vc_hosts[dc.name] << c.name
249
                end
250
              }
251
        }
252

    
253
        return vc_hosts
254
    end
255

    
256
    ########################################################################
257
    # Builds a hash with the Datacenter / VM Templates for this VCenter
258
    # @param one_client [OpenNebula::Client] Use this client instead of @one
259
    # @return [Hash] in the form
260
    #   { dc_name [String] => }
261
    ########################################################################
262
    def vm_templates(one_client=nil)
263
        vm_templates = {}
264

    
265
        tpool = OpenNebula::TemplatePool.new(
266
            (one_client||@one), OpenNebula::Pool::INFO_ALL)
267
        rc = tpool.info
268
        if OpenNebula.is_error?(rc)
269
            raise "Error contacting OpenNebula #{rc.message}"
270
        end
271

    
272
        datacenters = get_entities(@root, 'Datacenter')
273

    
274
        datacenters.each { |dc|
275
            vms = get_entities(dc.vmFolder, 'VirtualMachine')
276

    
277
            tmp = vms.select { |v| v.config && (v.config.template == true) }
278

    
279
            one_tmp = []
280

    
281
            tmp.each { |t|
282
                vi_tmp = VCenterVm.new(self, t)
283

    
284
                if !tpool["VMTEMPLATE/TEMPLATE/PUBLIC_CLOUD[\
285
                        TYPE=\"vcenter\" \
286
                        and VM_TEMPLATE=\"#{vi_tmp.vm.config.uuid}\"]"]
287
                    one_tmp << {
288
                        :name => vi_tmp.vm.name,
289
                        :uuid => vi_tmp.vm.config.uuid,
290
                        :host => vi_tmp.vm.runtime.host.parent.name,
291
                        :one  => vi_tmp.to_one
292
                    }
293
                end
294
            }
295

    
296
            vm_templates[dc.name] = one_tmp
297
        }
298

    
299
        return vm_templates
300
    end
301

    
302
    ########################################################################
303
    # Builds a hash with the Datacenter / Virtual Machines for this VCenter
304
    # @param one_client [OpenNebula::Client] Use this client instead of @one
305
    # @return [Hash] in the form
306
    #   { dc_name [String] => }
307
    ########################################################################
308
    def running_vms(one_client=nil)
309
        running_vms = {}
310

    
311
        vmpool = OpenNebula::VirtualMachinePool.new(
312
            (one_client||@one), OpenNebula::Pool::INFO_ALL)
313
        rc = vmpool.info
314

    
315
        hostpool = OpenNebula::HostPool.new((one_client||@one))
316
        rc       = hostpool.info
317
        if OpenNebula.is_error?(rc)
318
            raise "Error contacting OpenNebula #{rc.message}"
319
        end
320

    
321
        datacenters = get_entities(@root, 'Datacenter')
322

    
323
        datacenters.each { |dc|
324
            vms     = get_entities(dc.vmFolder, 'VirtualMachine')
325
            ccrs    = get_entities(dc.hostFolder, 'ClusterComputeResource')
326

    
327
            vm_list = vms.select { |v|
328
                # Get rid of VM Templates and VMs not in running state
329
                v.config &&
330
                v.config.template != true &&
331
                v.summary.runtime.powerState == "poweredOn"
332
            }
333

    
334
            one_tmp = []
335

    
336
            vm_list.each { |v|
337
                vi_tmp = VCenterVm.new(self, v)
338

    
339
                # Do not reimport VMs deployed by OpenNebula
340
                # since the core will get confused with the IDs
341
                next if vi_tmp.vm.name.match(/one-\d/)
342

    
343
                container_hostname = vi_tmp.vm.runtime.host.parent.name
344

    
345
                cluster_name = ccrs.collect { |c|
346
                  found_host=c.host.select {|h|
347
                           h.parent.name == container_hostname}
348
                   found_host.first.parent.name if found_host.size > 0
349
                }.first
350

    
351
                if !vmpool["VM/USER_TEMPLATE/PUBLIC_CLOUD[\
352
                        TYPE=\"vcenter\" \
353
                        and VM_TEMPLATE=\"#{vi_tmp.vm.config.uuid}\"]"]
354

    
355
                    host_id = name_to_id(container_hostname,hostpool,"HOST")[1]
356

    
357
                    one_tmp << {
358
                        :name => vi_tmp.vm.name,
359
                        :uuid => vi_tmp.vm.config.uuid,
360
                        :host => container_hostname,
361
                        :host_id => host_id,
362
                        :one  => vi_tmp.vm_to_one
363
                    }
364
                end
365
            }
366

    
367
            running_vms[dc.name] = one_tmp
368
        }
369

    
370
        return running_vms
371
    end
372

    
373
    def name_to_id(name, pool, ename)
374
            objects=pool.select {|object| object.name==name }
375

    
376
            if objects.length>0
377
                if objects.length>1
378
                    return -1, "There are multiple #{ename}s with name #{name}."
379
                else
380
                    result = objects.first.id
381
                end
382
            else
383
                return -1, "#{ename} named #{name} not found."
384
            end
385

    
386
            return 0, result
387
    end
388

    
389
    ########################################################################
390
    # Builds a hash with the Datacenter / CCR (Distributed)Networks
391
    # for this VCenter
392
    # @param one_client [OpenNebula::Client] Use this client instead of @one
393
    # @return [Hash] in the form
394
    #   { dc_name [String] => Networks [Array] }
395
    ########################################################################
396
    def vcenter_networks(one_client=nil)
397
        vcenter_networks = {}
398

    
399
        vnpool = OpenNebula::VirtualNetworkPool.new(
400
            (one_client||@one), OpenNebula::Pool::INFO_ALL)
401
        rc     = vnpool.info
402
        if OpenNebula.is_error?(rc)
403
            raise "Error contacting OpenNebula #{rc.message}"
404
        end
405

    
406
        datacenters = get_entities(@root, 'Datacenter')
407

    
408
        datacenters.each { |dc|
409
            networks = get_entities(dc.networkFolder, 'Network' )
410
            one_nets = []
411

    
412
            networks.each { |n|
413
                if !vnpool["VNET[BRIDGE=\"#{n[:name]}\"]/\
414
                        TEMPLATE[VCENTER_TYPE=\"Port Group\"]"]
415
                    one_nets << {
416
                        :name   => n.name,
417
                        :bridge => n.name,
418
                        :type   => "Port Group",
419
                        :one    => "NAME   = \"#{n[:name]}\"\n" \
420
                                   "BRIDGE = \"#{n[:name]}\"\n" \
421
                                   "VCENTER_TYPE = \"Port Group\""
422
                    }
423
                end
424
            }
425

    
426
            networks = get_entities(dc.networkFolder,
427
                                    'DistributedVirtualPortgroup' )
428

    
429
            networks.each { |n|
430
                if !vnpool["VNET[BRIDGE=\"#{n[:name]}\"]/\
431
                        TEMPLATE[VCENTER_TYPE=\"Distributed Port Group\"]"]
432
                    vnet_template = "NAME   = \"#{n[:name]}\"\n" \
433
                                    "BRIDGE = \"#{n[:name]}\"\n" \
434
                                    "VCENTER_TYPE = \"Distributed Port Group\""
435

    
436

    
437
                    default_pc = n.config.defaultPortConfig
438

    
439
                    has_vlan = false
440
                    vlan_str = ""
441

    
442
                    if default_pc.methods.include? :vlan
443
                       has_vlan = default_pc.vlan.methods.include? :vlanId
444
                    end
445

    
446
                    if has_vlan
447
                        vlan     = n.config.defaultPortConfig.vlan.vlanId
448

    
449
                        if vlan != 0
450
                            if vlan.is_a? Array
451
                                vlan.each{|v|
452
                                    vlan_str += v.start.to_s + ".." +
453
                                                v.end.to_s + ","
454
                                }
455
                                vlan_str.chop!
456
                            else
457
                                vlan_str = vlan.to_s
458
                            end
459
                        end
460
                    end
461

    
462
                    if !vlan_str.empty?
463
                        vnet_template << "VLAN=\"YES\"\n" \
464
                                         "VLAN_ID=#{vlan_str}\n"
465
                    end
466

    
467
                    one_net = {:name   => n.name,
468
                               :bridge => n.name,
469
                               :type   => "Distributed Port Group",
470
                               :one    => vnet_template}
471

    
472
                    one_net[:vlan] = vlan_str if !vlan_str.empty?
473

    
474
                    one_nets << one_net
475
                end
476
            }
477

    
478
            vcenter_networks[dc.name] = one_nets
479
        }
480

    
481
        return vcenter_networks
482
    end
483

    
484
    def self.translate_hostname(hostname)
485
        host_pool = OpenNebula::HostPool.new(::OpenNebula::Client.new())
486
        rc        = host_pool.info
487
        raise "Could not find host #{hostname}" if OpenNebula.is_error?(rc)
488

    
489
        host = host_pool.select {|host_element| host_element.name==hostname }
490
        return host.first.id
491
    end
492

    
493
    ############################################################################
494
    # Initialize an OpenNebula connection with the default ONE_AUTH
495
    ############################################################################
496
    def initialize_one
497
        begin
498
            @one   = ::OpenNebula::Client.new()
499
            system = ::OpenNebula::System.new(@one)
500

    
501
            config = system.get_configuration()
502

    
503
            if ::OpenNebula.is_error?(config)
504
                raise "Error getting oned configuration : #{config.message}"
505
            end
506

    
507
            @token = config["ONE_KEY"]
508
        rescue Exception => e
509
            raise "Error initializing OpenNebula client: #{e.message}"
510
        end
511
    end
512

    
513
    ############################################################################
514
    # Initialize a connection with vCenter. Options
515
    # @param options[Hash] with:
516
    #    :user => The vcenter user
517
    #    :password => Password for the user
518
    #    :host => vCenter hostname or IP
519
    #    :insecure => SSL (optional, defaults to true)
520
    ############################################################################
521
    def initialize_vim(user_opts={})
522
        opts = {
523
            :insecure => true
524
        }.merge(user_opts)
525

    
526
        @user = opts[:user]
527
        @pass = opts[:password]
528
        @host = opts[:host]
529

    
530
        begin
531
            @vim  = RbVmomi::VIM.connect(opts)
532
            @root = @vim.root
533
        rescue Exception => e
534
            raise "Error connecting to #{@host}: #{e.message}"
535
        end
536
    end
537
end
538

    
539
################################################################################
540
# This class is an OpenNebula hosts that abstracts a vCenter cluster. It
541
# includes the functionality needed to monitor the cluster and report the ESX
542
# hosts and VM status of the cluster.
543
################################################################################
544
class VCenterHost < ::OpenNebula::Host
545
    attr_reader :vc_client, :vc_root, :cluster, :host, :client
546

    
547
    ############################################################################
548
    # Initialize the VCenterHost by looking for the associated objects of the
549
    # VIM hierarchy
550
    # client [VIClient] to interact with the associated vCenter
551
    ############################################################################
552
    def initialize(client)
553
        @client  = client
554
        @cluster = client.cluster
555

    
556
        @resource_pool = client.resource_pool
557
    end
558

    
559
    ########################################################################
560
    #  Creates an OpenNebula host representing a cluster in this VCenter
561
    #  @param cluster_name[String] the name of the cluster in the vcenter
562
    #  @param client [VIClient] to create the host
563
    #  @return In case of success [0, host_id] or [-1, error_msg]
564
    ########################################################################
565
    def self.to_one(cluster_name, client)
566
        one_host = ::OpenNebula::Host.new(::OpenNebula::Host.build_xml,
567
            client.one)
568

    
569
        rc = one_host.allocate(cluster_name, 'vcenter', 'vcenter', 'dummy',
570
                ::OpenNebula::ClusterPool::NONE_CLUSTER_ID)
571

    
572
        return -1, rc.message if ::OpenNebula.is_error?(rc)
573

    
574
        template = "VCENTER_HOST=\"#{client.host}\"\n"\
575
                   "VCENTER_PASSWORD=\"#{client.pass}\"\n"\
576
                   "VCENTER_USER=\"#{client.user}\"\n"
577

    
578
        rc = one_host.update(template, false)
579

    
580
        if ::OpenNebula.is_error?(rc)
581
            error = rc.message
582

    
583
            rc = one_host.delete
584

    
585
            if ::OpenNebula.is_error?(rc)
586
                error << ". Host #{cluster_name} could not be"\
587
                    " deleted: #{rc.message}."
588
            end
589

    
590
            return -1, error
591
        end
592

    
593
        return 0, one_host.id
594
    end
595

    
596
    ############################################################################
597
    # Generate an OpenNebula monitor string for this host. Reference:
598
    # https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/Reference
599
    # Guide/vim.ComputeResource.Summary.html
600
    #   - effectiveCpu: Effective CPU resources (in MHz) available to run
601
    #     VMs. This is the aggregated from all running hosts excluding hosts in
602
    #     maintenance mode or unresponsive are not counted.
603
    #   - effectiveMemory: Effective memory resources (in MB) available to run
604
    #     VMs. Equivalente to effectiveCpu.
605
    #   - numCpuCores: Number of physical CPU cores.
606
    #   - numEffectiveHosts: Total number of effective hosts.
607
    #   - numHosts:Total number of hosts.
608
    #   - totalCpu: Aggregated CPU resources of all hosts, in MHz.
609
    #   - totalMemory: Aggregated memory resources of all hosts, in bytes.
610
    ############################################################################
611
    def monitor_cluster
612
        #Load the host systems
613
        summary = @cluster.summary
614

    
615
        mhz_core = summary.totalCpu.to_f / summary.numCpuCores.to_f
616
        eff_core = summary.effectiveCpu.to_f / mhz_core
617

    
618
        free_cpu  = sprintf('%.2f', eff_core * 100).to_f
619
        total_cpu = summary.numCpuCores.to_f * 100
620
        used_cpu  = sprintf('%.2f', total_cpu - free_cpu).to_f
621

    
622
        total_mem = summary.totalMemory.to_i / 1024
623
        free_mem  = summary.effectiveMemory.to_i * 1024
624

    
625
        str_info = ""
626

    
627
        # System
628
        str_info << "HYPERVISOR=vcenter\n"
629
        str_info << "PUBLIC_CLOUD=YES\n"
630
        str_info << "TOTALHOST=" << summary.numHosts.to_s << "\n"
631
        str_info << "AVAILHOST=" << summary.numEffectiveHosts.to_s << "\n"
632

    
633
        # CPU
634
        str_info << "CPUSPEED=" << mhz_core.to_s   << "\n"
635
        str_info << "TOTALCPU=" << total_cpu.to_s << "\n"
636
        str_info << "USEDCPU="  << used_cpu.to_s  << "\n"
637
        str_info << "FREECPU="  << free_cpu.to_s << "\n"
638

    
639
        # Memory
640
        str_info << "TOTALMEMORY=" << total_mem.to_s << "\n"
641
        str_info << "FREEMEMORY="  << free_mem.to_s << "\n"
642
        str_info << "USEDMEMORY="  << (total_mem - free_mem).to_s
643
    end
644

    
645
    ############################################################################
646
    # Generate a template with information for each ESX Host. Reference:
647
    # http://pubs.vmware.com/vi-sdk/visdk250/ReferenceGuide/vim.HostSystem.html
648
    #   - Summary: Basic information about the host, including connection state
649
    #     - hardware: Hardware configuration of the host. This might not be
650
    #       available for a disconnected host.
651
    #     - quickStats: Basic host statistics.
652
    ############################################################################
653
    def monitor_host_systems
654
        host_info = ""
655

    
656
        @cluster.host.each{|h|
657
            next if h.runtime.connectionState != "connected"
658

    
659
            summary = h.summary
660
            hw      = summary.hardware
661
            stats   = summary.quickStats
662

    
663
            total_cpu = hw.numCpuCores * 100
664
            used_cpu  = (stats.overallCpuUsage.to_f / hw.cpuMhz.to_f) * 100
665
            used_cpu  = sprintf('%.2f', used_cpu).to_f # Trim precission
666
            free_cpu  = total_cpu - used_cpu
667

    
668
            total_memory = hw.memorySize/1024
669
            used_memory  = stats.overallMemoryUsage*1024
670
            free_memory  = total_memory - used_memory
671

    
672
            host_info << "\nHOST=["
673
            host_info << "STATE=on,"
674
            host_info << "HOSTNAME=\""  << h.name.to_s  << "\","
675
            host_info << "MODELNAME=\"" << hw.cpuModel.to_s  << "\","
676
            host_info << "CPUSPEED="    << hw.cpuMhz.to_s    << ","
677
            host_info << "MAX_CPU="    << total_cpu.to_s << ","
678
            host_info << "USED_CPU="     << used_cpu.to_s  << ","
679
            host_info << "FREE_CPU="     << free_cpu.to_s << ","
680
            host_info << "MAX_MEM=" << total_memory.to_s << ","
681
            host_info << "USED_MEM="  << used_memory.to_s  << ","
682
            host_info << "FREE_MEM="  << free_memory.to_s
683
            host_info << "]"
684
        }
685

    
686
        return host_info
687
    end
688

    
689
    def monitor_vms
690
        str_info = ""
691
        @resource_pool.vm.each { |v|
692
            name   = v.name
693
            number = -1
694
            number = name.split('-').last if (name =~ /^one-\d*$/)
695

    
696
            vm = VCenterVm.new(@client, v)
697
            vm.monitor
698

    
699
            next if !vm.vm.config
700

    
701
            str_info << "\nVM = ["
702
            str_info << "ID=#{number},"
703
            str_info << "DEPLOY_ID=\"#{vm.vm.config.uuid}\","
704
            str_info << "VM_NAME=\"#{name}\","
705
            if number == -1
706
             vm_template_to_one = Base64.encode64(vm.vm_to_one).gsub("\n","")
707
             str_info << "IMPORT_TEMPLATE=\"#{vm_template_to_one}\","
708
            end
709
            str_info << "POLL=\"#{vm.info}\"]"
710
        }
711

    
712
        return str_info
713
    end
714
end
715

    
716
################################################################################
717
# This class is a high level abstraction of a VI VirtualMachine class with
718
# OpenNebula semantics.
719
################################################################################
720

    
721
class VCenterVm
722
    attr_reader :vm
723

    
724
    ############################################################################
725
    #  Creates a new VIVm using a RbVmomi::VirtualMachine object
726
    #    @param client [VCenterClient] client to connect to vCenter
727
    #    @param vm_vi [RbVmomi::VirtualMachine] it will be used if not nil
728
    ########################################################################
729
    def initialize(client, vm_vi )
730
        @vm     = vm_vi
731
        @client = client
732

    
733
        @used_cpu    = 0
734
        @used_memory = 0
735

    
736
        @netrx = 0
737
        @nettx = 0
738
    end
739

    
740
    ############################################################################
741
    # Deploys a VM
742
    #  @xml_text XML repsentation of the VM
743
    ############################################################################
744
    def self.deploy(xml_text, lcm_state, deploy_id, hostname)
745
        if lcm_state == "BOOT" || lcm_state == "BOOT_FAILURE"
746
            return clone_vm(xml_text)
747
        else
748
            hid         = VIClient::translate_hostname(hostname)
749
            connection  = VIClient.new(hid)
750
            vm          = connection.find_vm_template(deploy_id)
751

    
752
            # Find out if we need to reconfigure capacity
753
            xml = REXML::Document.new xml_text
754

    
755
            expected_cpu    = xml.root.elements["//TEMPLATE/VCPU"].text
756
            expected_memory = xml.root.elements["//TEMPLATE/MEMORY"].text
757
            current_cpu     = vm.config.hardware.numCPU
758
            current_memory  = vm.config.hardware.memoryMB
759

    
760
            if current_cpu != expected_cpu or current_memory != expected_memory
761
                capacity_hash = {:numCPUs  => expected_cpu.to_i,
762
                                 :memoryMB => expected_memory }
763
                spec = RbVmomi::VIM.VirtualMachineConfigSpec(capacity_hash)
764
                vm.ReconfigVM_Task(:spec => spec).wait_for_completion
765
            end
766

    
767
            vm.PowerOnVM_Task.wait_for_completion
768
            return vm.config.uuid
769
        end
770
    end
771

    
772
    ############################################################################
773
    # Cancels a VM
774
    #  @param deploy_id vcenter identifier of the VM
775
    #  @param hostname name of the host (equals the vCenter cluster)
776
    ############################################################################
777
    def self.cancel(deploy_id, hostname, lcm_state, keep_disks)
778
        case lcm_state
779
            when "SHUTDOWN_POWEROFF", "SHUTDOWN_UNDEPLOY"
780
                shutdown(deploy_id, hostname, lcm_state)
781
            when "CANCEL", "LCM_INIT", "CLEANUP_RESUBMIT"
782
                hid         = VIClient::translate_hostname(hostname)
783
                connection  = VIClient.new(hid)
784
                vm          = connection.find_vm_template(deploy_id)
785

    
786
                begin
787
                    if vm.summary.runtime.powerState == "poweredOn"
788
                        vm.PowerOffVM_Task.wait_for_completion
789
                    end
790
                rescue
791
                end
792
                detach_all_disks(vm) if keep_disks
793
                vm.Destroy_Task.wait_for_completion
794
        end
795
    end
796

    
797
    ############################################################################
798
    # Saves a VM
799
    #  @param deploy_id vcenter identifier of the VM
800
    #  @param hostname name of the host (equals the vCenter cluster)
801
    ############################################################################
802
    def self.save(deploy_id, hostname, lcm_state)
803
        case lcm_state
804
            when "SAVE_MIGRATE"
805
                raise "Migration between vCenters cluster not supported"
806
            when "SAVE_SUSPEND", "SAVE_STOP"
807
                hid         = VIClient::translate_hostname(hostname)
808
                connection  = VIClient.new(hid)
809
                vm          = connection.find_vm_template(deploy_id)
810

    
811
                vm.SuspendVM_Task.wait_for_completion
812
        end
813
    end
814

    
815
    ############################################################################
816
    # Resumes a VM
817
    #  @param deploy_id vcenter identifier of the VM
818
    #  @param hostname name of the host (equals the vCenter cluster)
819
    ############################################################################
820
    def self.resume(deploy_id, hostname)
821
        hid         = VIClient::translate_hostname(hostname)
822
        connection  = VIClient.new(hid)
823
        vm          = connection.find_vm_template(deploy_id)
824

    
825
        vm.PowerOnVM_Task.wait_for_completion
826
    end
827

    
828
    ############################################################################
829
    # Reboots a VM
830
    #  @param deploy_id vcenter identifier of the VM
831
    #  @param hostname name of the host (equals the vCenter cluster)
832
    ############################################################################
833
    def self.reboot(deploy_id, hostname)
834
        hid         = VIClient::translate_hostname(hostname)
835
        connection  = VIClient.new(hid)
836

    
837
        vm          = connection.find_vm_template(deploy_id)
838

    
839
        vm.RebootGuest.wait_for_completion
840
    end
841

    
842
    ############################################################################
843
    # Resets a VM
844
    #  @param deploy_id vcetranslate_hostnamnter identifier of the VM
845
    #  @param hostname name of the host (equals the vCenter cluster)
846
    ############################################################################
847
    def self.reset(deploy_id, hostname)
848
        hid         = VIClient::translate_hostname(hostname)
849
        connection  = VIClient.new(hid)
850

    
851
        vm          = connection.find_vm_template(deploy_id)
852

    
853
        vm.ResetVM_Task.wait_for_completion
854
    end
855

    
856
    ############################################################################
857
    # Shutdown a VM
858
    #  @param deploy_id vcenter identifier of the VM
859
    #  @param hostname name of the host (equals the vCenter cluster)
860
    ############################################################################
861
    def self.shutdown(deploy_id, hostname, lcm_state, keep_disks)
862
        hid         = VIClient::translate_hostname(hostname)
863
        connection  = VIClient.new(hid)
864

    
865
        vm          = connection.find_vm_template(deploy_id)
866

    
867
        case lcm_state
868
            when "SHUTDOWN"
869
                begin
870
                    vm.ShutdownGuest.wait_for_completion
871
                rescue
872
                end
873
                vm.PowerOffVM_Task.wait_for_completion
874
                detach_all_disks(vm) if keep_disks
875
                vm.Destroy_Task.wait_for_completion
876
            when "SHUTDOWN_POWEROFF", "SHUTDOWN_UNDEPLOY"
877
                begin
878
                    vm.ShutdownGuest.wait_for_completion
879
                rescue
880
                end
881
                vm.PowerOffVM_Task.wait_for_completion
882
        end
883
    end
884

    
885
    ############################################################################
886
    # Create VM snapshot
887
    #  @param deploy_id vcenter identifier of the VM
888
    #  @param hostname name of the host (equals the vCenter cluster)
889
    #  @param snaphot_name name of the snapshot
890
    ############################################################################
891
    def self.create_snapshot(deploy_id, hostname, snapshot_name)
892
        hid         = VIClient::translate_hostname(hostname)
893
        connection  = VIClient.new(hid)
894

    
895
        snapshot_hash = {
896
            :name => snapshot_name,
897
            :description => "OpenNebula Snapshot of VM #{deploy_id}",
898
            :memory => true,
899
            :quiesce => true
900
        }
901

    
902
        vm          = connection.find_vm_template(deploy_id)
903

    
904
        vm.CreateSnapshot_Task(snapshot_hash).wait_for_completion
905

    
906
        return snapshot_name
907
    end
908

    
909
    ############################################################################
910
    # Find VM snapshot
911
    #  @param list root list of VM snapshots
912
    #  @param snaphot_name name of the snapshot
913
    ############################################################################
914
    def self.find_snapshot(list, snapshot_name)
915
        list.each do |i|
916
            if i.name == snapshot_name
917
                return i.snapshot
918
            elsif !i.childSnapshotList.empty?
919
                snap = find_snapshot(i.childSnapshotList, snapshot_name)
920
                return snap if snap
921
            end
922
        end
923

    
924
        nil
925
    end
926

    
927
    ############################################################################
928
    # Delete VM snapshot
929
    #  @param deploy_id vcenter identifier of the VM
930
    #  @param hostname name of the host (equals the vCenter cluster)
931
    #  @param snaphot_name name of the snapshot
932
    ############################################################################
933
    def self.delete_snapshot(deploy_id, hostname, snapshot_name)
934
        hid         = VIClient::translate_hostname(hostname)
935
        connection  = VIClient.new(hid)
936

    
937
        vm          = connection.find_vm_template(deploy_id)
938

    
939
        list = vm.snapshot.rootSnapshotList
940

    
941
        snapshot = find_snapshot(list, snapshot_name)
942
        return nil if !snapshot
943

    
944
        delete_snapshot_hash = {
945
            :_this => snapshot,
946
            :removeChildren => false
947
        }
948

    
949
        snapshot.RemoveSnapshot_Task(delete_snapshot_hash).wait_for_completion
950
    end
951

    
952
    ############################################################################
953
    # Revert VM snapshot
954
    #  @param deploy_id vcenter identifier of the VM
955
    #  @param hostname name of the host (equals the vCenter cluster)
956
    #  @param snaphot_name name of the snapshot
957
    ############################################################################
958
    def self.revert_snapshot(deploy_id, hostname, snapshot_name)
959
        hid         = VIClient::translate_hostname(hostname)
960
        connection  = VIClient.new(hid)
961

    
962
        vm          = connection.find_vm_template(deploy_id)
963

    
964
        list = vm.snapshot.rootSnapshotList
965

    
966
        snapshot = find_snapshot(list, snapshot_name)
967
        return nil if !snapshot
968

    
969
        revert_snapshot_hash = {
970
            :_this => snapshot
971
        }
972

    
973
        snapshot.RevertToSnapshot_Task(revert_snapshot_hash).wait_for_completion
974
    end
975

    
976
    ############################################################################
977
    # Attach NIC to a VM
978
    #  @param deploy_id vcenter identifier of the VM
979
    #  @param mac MAC address of the NIC to be attached
980
    #  @param bridge name of the Network in vCenter
981
    #  @param model model of the NIC to be attached
982
    #  @param host hostname of the ESX where the VM is running
983
    ############################################################################
984
    def self.attach_nic(deploy_id, mac, bridge, model, host)
985
        hid         = VIClient::translate_hostname(host)
986
        connection  = VIClient.new(hid)
987

    
988
        vm          = connection.find_vm_template(deploy_id)
989

    
990
        spec_hash   = calculate_addnic_spec(vm, mac, bridge, model)
991

    
992
        spec        = RbVmomi::VIM.VirtualMachineConfigSpec({:deviceChange =>
993
                                                              [spec_hash]})
994

    
995
        vm.ReconfigVM_Task(:spec => spec).wait_for_completion
996
    end
997

    
998
    ############################################################################
999
    # Detach NIC from a VM
1000
    ############################################################################
1001
    def self.detach_nic(deploy_id, mac, host)
1002
        hid         = VIClient::translate_hostname(host)
1003
        connection  = VIClient.new(hid)
1004

    
1005
        vm   = connection.find_vm_template(deploy_id)
1006

    
1007
        nic  = vm.config.hardware.device.find { |d|
1008
                is_nic?(d) && (d.macAddress ==  mac)
1009
        }
1010

    
1011
        raise "Could not find NIC with mac address #{mac}" if nic.nil?
1012

    
1013
        spec = {
1014
            :deviceChange => [
1015
                :operation => :remove,
1016
                :device => nic
1017
            ]
1018
        }
1019

    
1020
        vm.ReconfigVM_Task(:spec => spec).wait_for_completion
1021
    end
1022

    
1023
    ########################################################################
1024
    #  Initialize the vm monitor information
1025
    ########################################################################
1026
    def monitor
1027
        @summary = @vm.summary
1028
        @state   = state_to_c(@summary.runtime.powerState)
1029

    
1030
        if @state != 'a'
1031
            @used_cpu    = 0
1032
            @used_memory = 0
1033

    
1034
            @netrx = 0
1035
            @nettx = 0
1036

    
1037
            return
1038
        end
1039

    
1040
        @used_memory = @summary.quickStats.hostMemoryUsage * 1024
1041

    
1042
        host   = @vm.runtime.host
1043
        cpuMhz = host.summary.hardware.cpuMhz.to_f
1044

    
1045
        @used_cpu   =
1046
                ((@summary.quickStats.overallCpuUsage.to_f / cpuMhz) * 100).to_s
1047
        @used_cpu   = sprintf('%.2f',@used_cpu).to_s
1048

    
1049
        # Check for negative values
1050
        @used_memory = 0 if @used_memory.to_i < 0
1051
        @used_cpu    = 0 if @used_cpu.to_i < 0
1052

    
1053
        @esx_host       = @vm.summary.runtime.host.name
1054
        @guest_ip       = @vm.guest.ipAddress
1055
        @guest_state    = @vm.guest.guestState
1056
        @vmware_tools   = @vm.guest.toolsRunningStatus
1057
        @vmtools_ver    = @vm.guest.toolsVersion
1058
        @vmtools_verst  = @vm.guest.toolsVersionStatus
1059

    
1060

    
1061
    end
1062

    
1063
    ########################################################################
1064
    #  Generates a OpenNebula IM Driver valid string with the monitor info
1065
    ########################################################################
1066
    def info
1067
      return 'STATE=d' if @state == 'd'
1068

    
1069
      str_info = ""
1070

    
1071
      str_info << "GUEST_IP=" << @guest_ip.to_s << " " if @guest_ip
1072
      str_info << "STATE="                      << @state                << " "
1073
      str_info << "CPU="                        << @used_cpu.to_s        << " "
1074
      str_info << "MEMORY="                     << @used_memory.to_s     << " "
1075
      str_info << "NETRX="                      << @netrx.to_s          << " "
1076
      str_info << "NETTX="                      << @nettx.to_s          << " "
1077
      str_info << "ESX_HOST="                   << @esx_host.to_s        << " "
1078
      str_info << "GUEST_STATE="                << @guest_state.to_s     << " "
1079
      str_info << "VMWARETOOLS_RUNNING_STATUS=" << @vmware_tools.to_s    << " "
1080
      str_info << "VMWARETOOLS_VERSION="        << @vmtools_ver.to_s     << " "
1081
      str_info << "VMWARETOOLS_VERSION_STATUS=" << @vmtools_verst.to_s   << " "
1082
    end
1083

    
1084
    ########################################################################
1085
    # Generates an OpenNebula Template for this VCenterVm
1086
    #
1087
    #
1088
    ########################################################################
1089
    def to_one
1090
        str = "NAME   = \"#{@vm.name}\"\n"\
1091
              "CPU    = \"#{@vm.config.hardware.numCPU}\"\n"\
1092
              "vCPU   = \"#{@vm.config.hardware.numCPU}\"\n"\
1093
              "MEMORY = \"#{@vm.config.hardware.memoryMB}\"\n"\
1094
              "HYPERVISOR = \"vcenter\"\n"\
1095
              "PUBLIC_CLOUD = [\n"\
1096
              "  TYPE        =\"vcenter\",\n"\
1097
              "  VM_TEMPLATE =\"#{@vm.config.uuid}\"\n"\
1098
              "]\n"\
1099
              "GRAPHICS = [\n"\
1100
              "  TYPE     =\"vnc\",\n"\
1101
              "  LISTEN   =\"0.0.0.0\"\n"\
1102
              "]\n"\
1103
         "SCHED_REQUIREMENTS=\"NAME=\\\"#{@vm.runtime.host.parent.name}\\\"\"\n"
1104

    
1105
        if @vm.config.annotation.nil? || @vm.config.annotation.empty?
1106
            str << "DESCRIPTION = \"vCenter Template imported by OpenNebula"\
1107
                " from Cluster #{@vm.runtime.host.parent.name}\"\n"
1108
        else
1109
            notes = @vm.config.annotation.gsub("\\", "\\\\").gsub("\"", "\\\"")
1110
            str << "DESCRIPTION = \"#{notes}\"\n"
1111
        end
1112

    
1113
        case @vm.guest.guestFullName
1114
            when /CentOS/i
1115
                str << "LOGO=images/logos/centos.png"
1116
            when /Debian/i
1117
                str << "LOGO=images/logos/debian.png"
1118
            when /Red Hat/i
1119
                str << "LOGO=images/logos/redhat.png"
1120
            when /Ubuntu/i
1121
                str << "LOGO=images/logos/ubuntu.png"
1122
            when /Windows XP/i
1123
                str << "LOGO=images/logos/windowsxp.png"
1124
            when /Windows/i
1125
                str << "LOGO=images/logos/windows8.png"
1126
            when /Linux/i
1127
                str << "LOGO=images/logos/linux.png"
1128
        end
1129

    
1130
        return str
1131
    end
1132

    
1133
    ########################################################################
1134
    # Generates an OpenNebula VirtualMachine for this VCenterVm
1135
    #
1136
    #
1137
    ########################################################################
1138
    def vm_to_one
1139
        host_name = @vm.runtime.host.parent.name
1140

    
1141
        str = "NAME   = \"#{@vm.name}\"\n"\
1142
              "CPU    = \"#{@vm.config.hardware.numCPU}\"\n"\
1143
              "vCPU   = \"#{@vm.config.hardware.numCPU}\"\n"\
1144
              "MEMORY = \"#{@vm.config.hardware.memoryMB}\"\n"\
1145
              "HYPERVISOR = \"vcenter\"\n"\
1146
              "PUBLIC_CLOUD = [\n"\
1147
              "  TYPE        =\"vcenter\",\n"\
1148
              "  VM_TEMPLATE =\"#{@vm.config.uuid}\"\n"\
1149
              "]\n"\
1150
              "IMPORT_VM_ID    = \"#{@vm.config.uuid}\"\n"\
1151
              "SCHED_REQUIREMENTS=\"NAME=\\\"#{host_name}\\\"\"\n"
1152

    
1153
        vp     = @vm.config.extraConfig.select{|v|
1154
                                           v[:key]=="remotedisplay.vnc.port"}
1155
        keymap = @vm.config.extraConfig.select{|v|
1156
                                           v[:key]=="remotedisplay.vnc.keymap"}
1157

    
1158
        if vp.size > 0
1159
            str << "GRAPHICS = [\n"\
1160
                   "  TYPE     =\"vnc\",\n"\
1161
                   "  LISTEN   =\"0.0.0.0\",\n"\
1162
                   "  PORT     =\"#{vp[0][:value]}\"\n"
1163
            str << " ,KEYMAP   =\"#{keymap[0][:value]}\"\n" if keymap[0]
1164
            str << "]\n"
1165
        end
1166

    
1167
        if @vm.config.annotation.nil? || @vm.config.annotation.empty?
1168
            str << "DESCRIPTION = \"vCenter Virtual Machine imported by"\
1169
                " OpenNebula from Cluster #{@vm.runtime.host.parent.name}\"\n"
1170
        else
1171
            notes = @vm.config.annotation.gsub("\\", "\\\\").gsub("\"", "\\\"")
1172
            str << "DESCRIPTION = \"#{notes}\"\n"
1173
        end
1174

    
1175
        return str
1176
    end
1177

    
1178
private
1179

    
1180
    ########################################################################
1181
    # Converts the VI string state to OpenNebula state convention
1182
    # Guest states are:
1183
    # - poweredOff   The virtual machine is currently powered off.
1184
    # - poweredOn    The virtual machine is currently powered on.
1185
    # - suspended    The virtual machine is currently suspended.
1186
    ########################################################################
1187
    def state_to_c(state)
1188
        case state
1189
            when 'poweredOn'
1190
                'a'
1191
            when 'suspended'
1192
                'p'
1193
            when 'poweredOff'
1194
                'd'
1195
            else
1196
                '-'
1197
        end
1198
    end
1199

    
1200
    ########################################################################
1201
    #  Checks if a RbVmomi::VIM::VirtualDevice is a network interface
1202
    ########################################################################
1203
    def self.is_nic?(device)
1204
        !device.class.ancestors.index(RbVmomi::VIM::VirtualEthernetCard).nil?
1205
    end
1206

    
1207
    ########################################################################
1208
    #  Checks if a RbVmomi::VIM::VirtualDevice is a disk
1209
    ########################################################################
1210
    def self.is_disk?(device)
1211
        !device.class.ancestors.index(RbVmomi::VIM::VirtualDisk).nil?
1212
    end
1213

    
1214
    ########################################################################
1215
    # Returns the spec to reconfig a VM and add a NIC
1216
    ########################################################################
1217
    def self.calculate_addnic_spec(vm, mac, bridge, model)
1218
        model       = model.nil? ? nil : model.downcase
1219
        network     = vm.runtime.host.network.select{|n| n.name==bridge}
1220
        backing     = nil
1221

    
1222
        if network.empty?
1223
            raise "Network #{bridge} not found in host #{vm.runtime.host.name}"
1224
        else
1225
            network = network[0]
1226
        end
1227

    
1228
        card_num = 1 # start in one, we want the next avaliable id
1229

    
1230
        vm.config.hardware.device.each{ |dv|
1231
            card_num = card_num + 1 if is_nic?(dv)
1232
        }
1233

    
1234
        nic_card = case model
1235
                        when "virtuale1000", "e1000"
1236
                            RbVmomi::VIM::VirtualE1000
1237
                        when "virtuale1000e", "e1000e"
1238
                            RbVmomi::VIM::VirtualE1000e
1239
                        when "virtualpcnet32", "pcnet32"
1240
                            RbVmomi::VIM::VirtualPCNet32
1241
                        when "virtualsriovethernetcard", "sriovethernetcard"
1242
                            RbVmomi::VIM::VirtualSriovEthernetCard
1243
                        when "virtualvmxnetm", "vmxnetm"
1244
                            RbVmomi::VIM::VirtualVmxnetm
1245
                        when "virtualvmxnet2", "vmnet2"
1246
                            RbVmomi::VIM::VirtualVmxnet2
1247
                        when "virtualvmxnet3", "vmxnet3"
1248
                            RbVmomi::VIM::VirtualVmxnet3
1249
                        else # If none matches, use VirtualE1000
1250
                            RbVmomi::VIM::VirtualE1000
1251
                   end
1252

    
1253
        if network.class == RbVmomi::VIM::Network
1254
            backing = RbVmomi::VIM.VirtualEthernetCardNetworkBackingInfo(
1255
                        :deviceName => bridge,
1256
                        :network => network)
1257
        else
1258
            port    = RbVmomi::VIM::DistributedVirtualSwitchPortConnection(
1259
                        :switchUuid =>
1260
                                network.config.distributedVirtualSwitch.uuid,
1261
                        :portgroupKey => network.key)
1262
            backing =
1263
              RbVmomi::VIM.VirtualEthernetCardDistributedVirtualPortBackingInfo(
1264
                 :port => port)
1265
        end
1266

    
1267
        return {:operation => :add,
1268
                :device => nic_card.new(
1269
                            :key => 0,
1270
                            :deviceInfo => {
1271
                                :label => "net" + card_num.to_s,
1272
                                :summary => bridge
1273
                            },
1274
                            :backing => backing,
1275
                            :addressType => mac ? 'manual' : 'generated',
1276
                            :macAddress  => mac
1277
                           )
1278
               }
1279
    end
1280

    
1281
    ########################################################################
1282
    #  Clone a vCenter VM Template and leaves it powered on
1283
    ########################################################################
1284
    def self.clone_vm(xml_text)
1285

    
1286
        xml = REXML::Document.new xml_text
1287
        pcs = xml.root.get_elements("//USER_TEMPLATE/PUBLIC_CLOUD")
1288

    
1289
        raise "Cannot find VCenter element in VM template." if pcs.nil?
1290

    
1291
        template = pcs.find { |t|
1292
            type = t.elements["TYPE"]
1293
            !type.nil? && type.text.downcase == "vcenter"
1294
        }
1295

    
1296
        raise "Cannot find vCenter element in VM template." if template.nil?
1297

    
1298
        uuid = template.elements["VM_TEMPLATE"]
1299

    
1300
        raise "Cannot find VM_TEMPLATE in VCenter element." if uuid.nil?
1301

    
1302
        uuid = uuid.text
1303
        vmid = xml.root.elements["/VM/ID"].text
1304
        hid = xml.root.elements["//HISTORY_RECORDS/HISTORY/HID"]
1305

    
1306
        raise "Cannot find host id in deployment file history." if hid.nil?
1307

    
1308
        context = xml.root.elements["//TEMPLATE/CONTEXT"]
1309
        connection  = VIClient.new(hid)
1310
        vc_template = connection.find_vm_template(uuid)
1311

    
1312
        relocate_spec = RbVmomi::VIM.VirtualMachineRelocateSpec(
1313
                          :diskMoveType => :moveChildMostDiskBacking,
1314
                          :pool         => connection.resource_pool)
1315

    
1316
        clone_spec = RbVmomi::VIM.VirtualMachineCloneSpec(
1317
                      :location => relocate_spec,
1318
                      :powerOn  => false,
1319
                      :template => false)
1320

    
1321
        begin
1322
            vm = vc_template.CloneVM_Task(
1323
                   :folder => vc_template.parent,
1324
                   :name   => "one-#{vmid}",
1325
                   :spec   => clone_spec).wait_for_completion
1326
        rescue Exception => e
1327

    
1328
            if !e.message.start_with?('DuplicateName')
1329
                raise "Cannot clone VM Template: #{e.message}"
1330
            end
1331

    
1332
            vm = connection.find_vm("one-#{vmid}")
1333

    
1334
            raise "Cannot clone VM Template" if vm.nil?
1335

    
1336
            vm.Destroy_Task.wait_for_completion
1337
            vm = vc_template.CloneVM_Task(
1338
                :folder => vc_template.parent,
1339
                :name   => "one-#{vmid}",
1340
                :spec   => clone_spec).wait_for_completion
1341
        end
1342

    
1343
        vm_uuid = vm.config.uuid
1344

    
1345
        # VNC Section
1346

    
1347
        vnc_port   = xml.root.elements["/VM/TEMPLATE/GRAPHICS/PORT"]
1348
        vnc_listen = xml.root.elements["/VM/TEMPLATE/GRAPHICS/LISTEN"]
1349
        vnc_keymap = xml.root.elements["/VM/TEMPLATE/GRAPHICS/KEYMAP"]
1350

    
1351
        if !vnc_listen
1352
            vnc_listen = "0.0.0.0"
1353
        else
1354
            vnc_listen = vnc_listen.text
1355
        end
1356

    
1357
        config_array     = []
1358
        context_vnc_spec = {}
1359

    
1360
        if vnc_port
1361
            config_array +=
1362
                     [{:key=>"remotedisplay.vnc.enabled",:value=>"TRUE"},
1363
                      {:key=>"remotedisplay.vnc.port",   :value=>vnc_port.text},
1364
                      {:key=>"remotedisplay.vnc.ip",     :value=>vnc_listen}]
1365
        end
1366

    
1367
        config_array += [{:key=>"remotedisplay.vnc.keymap",
1368
                          :value=>vnc_keymap.text}] if vnc_keymap
1369

    
1370
        # Context section
1371

    
1372
        if context
1373
            # Remove <CONTEXT> (9) and </CONTEXT>\n (11)
1374
            context_text = "# Context variables generated by OpenNebula\n"
1375
            context.elements.each{|context_element|
1376
                context_text += context_element.name + "='" +
1377
                                context_element.text.gsub("'", "\\'") + "'\n"
1378
            }
1379
            context_text = Base64.encode64(context_text.chop)
1380
            config_array +=
1381
                     [{:key=>"guestinfo.opennebula.context",
1382
                       :value=>context_text}]
1383
        end
1384

    
1385
        if config_array != []
1386
            context_vnc_spec = {:extraConfig =>config_array}
1387
        end
1388

    
1389
        # NIC section, build the reconfig hash
1390

    
1391
        nics     = xml.root.get_elements("//TEMPLATE/NIC")
1392
        nic_spec = {}
1393

    
1394
        if !nics.nil?
1395
            nic_array = []
1396
            nics.each{|nic|
1397
               mac    = nic.elements["MAC"].text
1398
               bridge = nic.elements["BRIDGE"].text
1399
               model  = nic.elements["MODEL"] ? nic.elements["MODEL"].text : nil
1400
               nic_array << calculate_addnic_spec(vm, mac, bridge, model)
1401
            }
1402

    
1403
            nic_spec = {:deviceChange => nic_array}
1404
        end
1405

    
1406
        # Capacity section
1407

    
1408
        cpu           = xml.root.elements["//TEMPLATE/VCPU"].text
1409
        memory        = xml.root.elements["//TEMPLATE/MEMORY"].text
1410
        capacity_spec = {:numCPUs  => cpu.to_i,
1411
                         :memoryMB => memory }
1412

    
1413
        # Perform the VM reconfiguration
1414
        spec_hash = context_vnc_spec.merge(nic_spec).merge(capacity_spec)
1415
        spec      = RbVmomi::VIM.VirtualMachineConfigSpec(spec_hash)
1416
        vm.ReconfigVM_Task(:spec => spec).wait_for_completion
1417

    
1418
        # Power on the VM
1419
        vm.PowerOnVM_Task.wait_for_completion
1420

    
1421
        return vm_uuid
1422
    end
1423

    
1424
    ############################################################################
1425
    # Detach all disks from a VM
1426
    ############################################################################
1427
    def self.detach_all_disks(vm)
1428
        disks  = vm.config.hardware.device.select { |d| is_disk?(d) }
1429

    
1430
        return if disks.nil?
1431

    
1432
        spec = { :deviceChange => [] }
1433

    
1434
        disks.each{|disk|
1435
            spec[:deviceChange] <<  {
1436
                :operation => :remove,
1437
                :device => disk
1438
            }
1439
        }
1440

    
1441
        vm.ReconfigVM_Task(:spec => spec).wait_for_completion
1442
    end
1443
end
1444
end