Statistics
| Branch: | Tag: | Revision:

one / src / flow / lib / models / role.rb @ 7d11d805

History | View | Annotate | Download (32.9 KB)

1
# -------------------------------------------------------------------------- #
2
# Copyright 2010-2015, C12G Labs S.L.                                        #
3
#                                                                            #
4
# Licensed under the Apache License, Version 2.0 (the "License"); you may    #
5
# not use this file except in compliance with the License. You may obtain    #
6
# a copy of the License at                                                   #
7
#                                                                            #
8
# http://www.apache.org/licenses/LICENSE-2.0                                 #
9
#                                                                            #
10
# Unless required by applicable law or agreed to in writing, software        #
11
# distributed under the License is distributed on an "AS IS" BASIS,          #
12
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.   #
13
# See the License for the specific language governing permissions and        #
14
# limitations under the License.                                             #
15
#--------------------------------------------------------------------------- #
16

    
17
require 'treetop'
18
require 'grammar'
19
require 'parse-cron'
20

    
21
if !(Gem.loaded_specs['treetop'].version >= Gem::Version.create('1.6.3'))
22
    raise "treetop gem version must be >= 1.6.3"
23
end
24

    
25
module OpenNebula
26
    class Role
27

    
28
        # Actions that can be performed on the VMs of a given Role
29
        SCHEDULE_ACTIONS = [
30
            'shutdown',
31
            'shutdown-hard',
32
            'undeploy',
33
            'undeploy-hard',
34
            'hold',
35
            'release',
36
            'stop',
37
            'suspend',
38
            'resume',
39
            'delete',
40
            'delete-recreate',
41
            'reboot',
42
            'reboot-hard',
43
            'poweroff',
44
            'poweroff-hard',
45
            'snapshot-create'
46
        ]
47

    
48
        STATE = {
49
            'PENDING'            => 0,
50
            'DEPLOYING'          => 1,
51
            'RUNNING'            => 2,
52
            'UNDEPLOYING'        => 3,
53
            'WARNING'            => 4,
54
            'DONE'               => 5,
55
            'FAILED_UNDEPLOYING' => 6,
56
            'FAILED_DEPLOYING'   => 7,
57
            'SCALING'            => 8,
58
            'FAILED_SCALING'     => 9,
59
            'COOLDOWN'           => 10
60
        }
61

    
62
        STATE_STR = [
63
            'PENDING',
64
            'DEPLOYING',
65
            'RUNNING',
66
            'UNDEPLOYING',
67
            'WARNING',
68
            'DONE',
69
            'FAILED_UNDEPLOYING',
70
            'FAILED_DEPLOYING',
71
            'SCALING',
72
            'FAILED_SCALING',
73
            'COOLDOWN'
74
        ]
75

    
76
        LOG_COMP = "ROL"
77

    
78
        def initialize(body, service)
79
            @body       = body
80
            @service    = service
81

    
82
            @body['nodes'] ||= []
83
            @body['disposed_nodes'] ||= []
84
        end
85

    
86
        def name
87
            return @body['name']
88
        end
89

    
90
        # Returns the role state
91
        # @return [Integer] the role state
92
        def state
93
            return @body['state'].to_i
94
        end
95

    
96
        # Returns the role parents
97
        # @return [Array] the role parents
98
        def parents
99
            return @body['parents'] || []
100
        end
101

    
102
        # Returns the role cardinality
103
        # @return [Integer] the role cardinality
104
        def cardinality
105
            return @body['cardinality'].to_i
106
        end
107

    
108
        # Sets a new cardinality for this role
109
        # @param [Integer] the new cardinality
110
        def set_cardinality(target_cardinality)
111
            dir = target_cardinality > cardinality ? "up" : "down"
112
            msg = "Role #{name} scaling #{dir} from #{cardinality} to #{target_cardinality} nodes"
113
            Log.info LOG_COMP, msg, @service.id()
114
            @service.log_info(msg)
115

    
116
            @body['cardinality'] = target_cardinality.to_i
117
        end
118

    
119
        # Updates the cardinality with the current number of nodes
120
        def update_cardinality()
121
            @body['cardinality'] = @body['nodes'].size()
122
        end
123

    
124
        # Returns the role max cardinality
125
        # @return [Integer,nil] the role cardinality, or nil if it was not defined
126
        def max_cardinality
127
            max = @body['max_vms']
128

    
129
            return nil if max.nil?
130

    
131
            return max.to_i
132
        end
133

    
134
        # Returns the role min cardinality
135
        # @return [Integer,nil] the role cardinality, or nil if it was not defined
136
        def min_cardinality
137
            min = @body['min_vms']
138

    
139
            return nil if min.nil?
140

    
141
            return min.to_i
142
        end
143

    
144
        # Returns the string representation of the service state
145
        # @return [String] the state string
146
        def state_str
147
            return STATE_STR[state]
148
        end
149

    
150
        # Returns the nodes of the role
151
        # @return [Array] the nodes
152
        def get_nodes
153
            @body['nodes']
154
        end
155

    
156
        # Sets a new state
157
        # @param [Integer] the new state
158
        # @return [true, false] true if the value was changed
159
        def set_state(state)
160
            if state < 0 || state > STATE_STR.size
161
                return false
162
            end
163

    
164
            @body['state'] = state.to_s
165

    
166
            if state == STATE['SCALING']
167

    
168
                elasticity_pol = @body['elasticity_policies']
169

    
170
                if !elasticity_pol.nil?
171
                    elasticity_pol.each do |policy|
172
                        policy.delete('true_evals')
173
                    end
174
                end
175
            end
176

    
177
            Log.info LOG_COMP, "Role #{name} new state: #{STATE_STR[state]}", @service.id()
178

    
179
            return true
180
        end
181

    
182
        # Retrieves the VM information for each Node in this Role. If a Node
183
        # is to be disposed and it is found in DONE, it will be cleaned
184
        #
185
        # @return [nil, OpenNebula::Error] nil in case of success, Error
186
        #   otherwise
187
        def info
188
            success = true
189

    
190
            nodes = @body['nodes']
191
            new_nodes = []
192
            disposed_nodes = @body['disposed_nodes']
193

    
194
            nodes.each do |node|
195
                vm_id = node['deploy_id']
196
                vm = OpenNebula::VirtualMachine.new_with_id(vm_id, @service.client)
197
                rc = vm.info
198

    
199
                if OpenNebula.is_error?(rc)
200
                    msg = "Role #{name} : VM #{vm_id} monitorization failed; #{rc.message}"
201
                    Log.error LOG_COMP, msg, @service.id()
202
                    @service.log_error(msg)
203

    
204
                    success = false
205
                    node['vm_info'] = nil
206

    
207
                    new_nodes << node
208
                else
209
                    node['vm_info'] = vm.to_hash
210

    
211
                    vm_state = node['vm_info']['VM']['STATE']
212
                    lcm_state = node['vm_info']['VM']['LCM_STATE']
213

    
214
                    running = (!Role.vm_failure?(vm_state, lcm_state) &&
215
                                vm_state == '3' && lcm_state >= '3')
216

    
217
                    if running && @service.ready_status_gate
218
                        running_status = node['vm_info']['VM']['USER_TEMPLATE']['READY'] || ""
219
                        running = running_status.upcase == "YES"
220
                    end
221

    
222
                    node['running'] = running
223

    
224
                    if (vm_state == '6')
225
                        # Store the VM id in the array of disposed nodes
226
                        disposed_nodes << vm_id
227
                    else
228
                        if (node['scale_up'] == "1" && vm_state == '3' && lcm_state == '3')
229
                            # If the VM was a scale-up and it reaches RUNNING,
230
                            # clear the flag
231
                            node.delete('scale_up')
232
                        end
233

    
234
                        new_nodes << node
235
                    end
236
                end
237
            end
238

    
239
            @body['nodes'] = new_nodes
240

    
241
            if !success
242
                return OpenNebula::Error.new()
243
            end
244

    
245
            return nil
246
        end
247

    
248
        # Deploys all the nodes in this role
249
        # @return [Array<true, nil>, Array<false, String>] true if all the VMs
250
        # were created, false and the error reason if there was a problem
251
        # creating the VMs
252
        def deploy(scale_up=false)
253
            n_nodes = cardinality() - get_nodes.size
254

    
255
            @body['last_vmname'] ||= 0
256

    
257
            if @body['vm_template_contents']
258
                extra_template = @body['vm_template_contents'].dup
259
            else
260
                extra_template = ""
261
            end
262

    
263
            extra_template <<
264
                "\nSERVICE_ID = #{@service.id()}" <<
265
                "\nROLE_NAME = \"#{@body['name']}\""
266

    
267
            n_nodes.times { |i|
268
                vm_name = @@vm_name_template.
269
                    gsub("$SERVICE_ID",    @service.id().to_s).
270
                    gsub("$SERVICE_NAME",  @service.name().to_s).
271
                    gsub("$ROLE_NAME",     name().to_s).
272
                    gsub("$VM_NUMBER",     @body['last_vmname'].to_s)
273

    
274
                @body['last_vmname'] += 1
275

    
276
                template_id = @body['vm_template']
277

    
278
                Log.debug LOG_COMP, "Role #{name} : Trying to instantiate template "\
279
                    "#{template_id}, with name #{vm_name}", @service.id()
280

    
281
                template = OpenNebula::Template.new_with_id(template_id, @service.client)
282

    
283
                vm_id = template.instantiate(vm_name, false, extra_template)
284

    
285
                if OpenNebula.is_error?(vm_id)
286
                    msg = "Role #{name} : Instantiate failed for template #{template_id}; #{vm_id.message}"
287
                    Log.error LOG_COMP, msg, @service.id()
288
                    @service.log_error(msg)
289

    
290
                    return [false, "Error trying to instantiate the VM Template" \
291
                        " #{template_id} in Role #{self.name}: #{vm_id.message}"]
292
                end
293

    
294
                Log.debug LOG_COMP, "Role #{name} : Instantiate success, VM ID #{vm_id}", @service.id()
295
                node = {
296
                    'deploy_id' => vm_id,
297
                }
298

    
299
                vm = OpenNebula::VirtualMachine.new_with_id(vm_id, @service.client)
300
                rc = vm.info
301
                if OpenNebula.is_error?(rc)
302
                    node['vm_info'] = nil
303
                else
304
                    node['vm_info'] = vm.to_hash
305
                end
306

    
307
                if scale_up
308
                    node['scale_up'] = '1'
309
                end
310

    
311
                @body['nodes'] << node
312
            }
313

    
314
            return [true, nil]
315
        end
316

    
317
        # Shutdown all the nodes in this role
318
        #
319
        # @param scale_down [true, false] true to shutdown and dispose the
320
        #   number of VMs needed to get down to cardinality nodes
321
        # @return [Array<true, nil>, Array<false, String>] true if all the VMs
322
        # were shutdown, false and the error reason if there was a problem
323
        # shutting down the VMs
324
        def shutdown(scale_down=false)
325
            success = true
326

    
327
            nodes = get_nodes
328

    
329
            if scale_down
330
                n_nodes = nodes.size - cardinality()
331
            else
332
                n_nodes = nodes.size
333
            end
334

    
335
            shutdown_nodes(nodes[0..n_nodes-1], scale_down)
336

    
337
            return [success, nil]
338
        end
339

    
340
        # Delete all the nodes in this role
341
        # @return [Array<true, nil>] All the VMs are deleted, and the return
342
        #   ignored
343
        def delete
344
            get_nodes.each { |node|
345
                vm_id = node['deploy_id']
346

    
347
                Log.debug LOG_COMP, "Role #{name} : Deleting VM #{vm_id}", @service.id()
348

    
349
                vm = OpenNebula::VirtualMachine.new_with_id(vm_id, @service.client)
350
                rc = vm.shutdown(true)
351

    
352
                if OpenNebula.is_error?(rc)
353
                    rc = vm.finalize
354
                end
355

    
356
                if OpenNebula.is_error?(rc)
357
                    msg = "Role #{name} : Delete failed for VM #{vm_id}; #{rc.message}"
358
                    Log.error LOG_COMP, msg, @service.id()
359
                    @service.log_error(msg)
360
                else
361
                    Log.debug LOG_COMP, "Role #{name} : Delete success for VM #{vm_id}", @service.id()
362
                end
363
            }
364

    
365
            return [true, nil]
366
        end
367

    
368
        # Changes the owner/group of all the nodes in this role
369
        #
370
        # @param [Integer] uid the new owner id. Set to -1 to leave the current one
371
        # @param [Integer] gid the new group id. Set to -1 to leave the current one
372
        #
373
        # @return [Array<true, nil>, Array<false, String>] true if all the VMs
374
        #   were updated, false and the error reason if there was a problem
375
        #   updating the VMs
376
        def chown(uid, gid)
377
            get_nodes.each { |node|
378
                vm_id = node['deploy_id']
379

    
380
                Log.debug LOG_COMP, "Role #{name} : Chown for VM #{vm_id}", @service.id()
381

    
382
                vm = OpenNebula::VirtualMachine.new_with_id(vm_id, @service.client)
383
                rc = vm.chown(uid, gid)
384

    
385
                if OpenNebula.is_error?(rc)
386
                    msg = "Role #{name} : Chown failed for VM #{vm_id}; #{rc.message}"
387
                    Log.error LOG_COMP, msg, @service.id()
388
                    @service.log_error(msg)
389

    
390
                    return [false, rc.message]
391
                else
392
                    Log.debug LOG_COMP, "Role #{name} : Chown success for VM #{vm_id}", @service.id()
393
                end
394
            }
395

    
396
            return [true, nil]
397
        end
398

    
399
        # Schedule the given action on all the VMs that belong to the Role
400
        # @param [String] action one of the available actions defined in SCHEDULE_ACTIONS
401
        # @param [Integer] period
402
        # @param [Integer] vm_per_period
403
        def batch_action(action, period, vms_per_period)
404
            vms_id = []
405

    
406
            # TODO: check action is a valid string, period vm_per_period integer
407

    
408
            error_msgs = []
409
            nodes = @body['nodes']
410
            now = Time.now.to_i
411

    
412
            do_offset = ( !period.nil? && period.to_i > 0 &&
413
                !vms_per_period.nil? && vms_per_period.to_i > 0 )
414

    
415
            time_offset = 0
416

    
417
            nodes.each_with_index do |node, index|
418
                vm_id = node['deploy_id']
419
                vm = OpenNebula::VirtualMachine.new_with_id(vm_id, @service.client)
420
                rc = vm.info
421

    
422
                if OpenNebula.is_error?(rc)
423
                    msg = "Role #{name} : VM #{vm_id} monitorization failed; #{rc.message}"
424
                    error_msgs << msg
425
                    Log.error LOG_COMP, msg, @service.id()
426
                    @service.log_error(msg)
427
                else
428
                    ids = vm.retrieve_elements('USER_TEMPLATE/SCHED_ACTION/ID')
429

    
430
                    id = 0
431
                    if (!ids.nil? && !ids.empty?)
432
                        ids.map! {|e| e.to_i }
433
                        id = ids.max + 1
434
                    end
435

    
436
                    tmp_str = vm.user_template_str
437

    
438
                    if do_offset
439
                        time_offset = (index / vms_per_period.to_i).floor * period.to_i
440
                    end
441

    
442
                    tmp_str << "\nSCHED_ACTION = "<<
443
                        "[ID = #{id}, ACTION = #{action}, TIME = #{now + time_offset}]"
444

    
445
                    rc = vm.update(tmp_str)
446
                    if OpenNebula.is_error?(rc)
447
                        msg = "Role #{name} : VM #{vm_id} error scheduling action; #{rc.message}"
448
                        error_msgs << msg
449
                        Log.error LOG_COMP, msg, @service.id()
450
                        @service.log_error(msg)
451
                    else
452
                        vms_id << vm.id
453
                    end
454
                end
455
            end
456

    
457
            log_msg = "Action:#{action} scheduled on Role:#{self.name} VMs:#{vms_id.join(',')}"
458
            Log.info LOG_COMP, log_msg, @service.id()
459

    
460
            if error_msgs.empty?
461
                return [true, log_msg]
462
            else
463
                error_msgs << log_msg
464
                return [false, error_msgs.join('\n')]
465
            end
466
        end
467

    
468
        # Returns true if the VM state is failure
469
        # @param [Integer] vm_state VM state
470
        # @param [Integer] lcm_state VM LCM state
471
        # @return [true,false] True if the lcm state is one of *_FAILURE
472
        def self.vm_failure?(vm_state, lcm_state)
473
            vm_state_str = VirtualMachine::VM_STATE[vm_state.to_i]
474
            lcm_state_str = VirtualMachine::LCM_STATE[lcm_state.to_i]
475

    
476
            if vm_state_str == 'ACTIVE' &&
477
                (   lcm_state_str == 'BOOT_FAILURE' ||
478
                    lcm_state_str == 'BOOT_MIGRATE_FAILURE' ||
479
                    lcm_state_str == 'PROLOG_MIGRATE_FAILURE' ||
480
                    lcm_state_str == 'PROLOG_FAILURE' ||
481
                    lcm_state_str == 'EPILOG_FAILURE' ||
482
                    lcm_state_str == 'EPILOG_STOP_FAILURE' ||
483
                    lcm_state_str == 'EPILOG_UNDEPLOY_FAILURE' ||
484
                    lcm_state_str == 'PROLOG_MIGRATE_POWEROFF_FAILURE' ||
485
                    lcm_state_str == 'PROLOG_MIGRATE_SUSPEND_FAILURE' ||
486
                    lcm_state_str == 'BOOT_UNDEPLOY_FAILURE' ||
487
                    lcm_state_str == 'BOOT_STOPPED_FAILURE' ||
488
                    lcm_state_str == 'PROLOG_RESUME_FAILURE' ||
489
                    lcm_state_str == 'PROLOG_UNDEPLOY_FAILURE')
490

    
491
                return true
492
            end
493

    
494
            return false
495
        end
496

    
497
        ########################################################################
498
        # Scalability
499
        ########################################################################
500

    
501
        # Returns a positive, 0, or negative number of nodes to adjust,
502
        #   according to the elasticity and scheduled policies
503
        # @return [Array<Integer>] positive, 0, or negative number of nodes to
504
        #   adjust, plus the cooldown period duration
505
        def scale?()
506
            elasticity_pol = @body['elasticity_policies']
507
            scheduled_pol = @body['scheduled_policies']
508

    
509
            elasticity_pol ||= []
510
            scheduled_pol ||= []
511

    
512
            scheduled_pol.each do |policy|
513
                diff = scale_time?(policy)
514
                return [diff, 0] if diff != 0
515
            end
516

    
517
            elasticity_pol.each do |policy|
518
                diff, cooldown_duration = scale_attributes?(policy)
519
                if diff != 0
520
                    cooldown_duration = @body['cooldown'] if cooldown_duration.nil?
521
                    cooldown_duration = @@default_cooldown if cooldown_duration.nil?
522

    
523
                    return [diff, cooldown_duration]
524
                end
525
            end
526

    
527
            # Implicit rule that scales up to maintain the min_cardinality, with
528
            # no cooldown period
529
            if cardinality < min_cardinality.to_i
530
                return [min_cardinality.to_i - cardinality, 0]
531
            end
532

    
533
            return [0, 0]
534
        end
535

    
536
        # Scales up or down the number of nodes needed to match the current
537
        # cardinality
538
        #
539
        # @return [Array<true, nil>, Array<false, String>] true if all the VMs
540
        # were created/shut down, false and the error reason if there
541
        # was a problem
542
        def scale()
543
            n_nodes = 0
544

    
545
            get_nodes.each do |node|
546
                n_nodes += 1 if node['disposed'] != "1"
547
            end
548

    
549
            diff = cardinality - n_nodes
550

    
551
            if diff > 0
552
                return deploy(true)
553
            elsif diff < 0
554
                return shutdown(true)
555
            end
556

    
557
            return [true, nil]
558
        end
559

    
560
        # Updates the duration for the next cooldown
561
        # @param cooldown_duration [Integer] duration for the next cooldown
562
        def set_cooldown_duration(cooldown_duration)
563
            @body['cooldown_duration'] = cooldown_duration.to_i
564
        end
565

    
566
        # Updates the duration for the next cooldown with the default value
567
        def set_default_cooldown_duration()
568
            cooldown_duration = @body['cooldown']
569
            cooldown_duration = @@default_cooldown if cooldown_duration.nil?
570

    
571
            set_cooldown_duration(cooldown_duration)
572
        end
573

    
574
        # Sets the cooldown end time from now + the duration set in set_cooldown_duration
575
        # @return [true, false] true if the cooldown duration is bigger than 0
576
        def apply_cooldown_duration()
577
            cooldown_duration = @body['cooldown_duration'].to_i
578

    
579
            if cooldown_duration != 0
580
                @body['cooldown_end'] = Time.now.to_i + cooldown_duration
581
                @body.delete('cooldown_duration')
582

    
583
                return true
584
            end
585

    
586
            return false
587
        end
588

    
589
        # Returns true if the cooldown period ended
590
        # @return [true, false] true if the cooldown period ended
591
        def cooldown_over?()
592
            return Time.now.to_i >= @body['cooldown_end'].to_i
593
        end
594

    
595
        def self.init_default_cooldown(default_cooldown)
596
            @@default_cooldown = default_cooldown
597
        end
598

    
599
        def self.init_default_shutdown(shutdown_action)
600
            @@default_shutdown = shutdown_action
601
        end
602

    
603
        def self.init_default_vm_name_template(vm_name_template)
604
            @@vm_name_template = vm_name_template
605
        end
606

    
607
        # Updates the role
608
        # @param [Hash] template
609
        # @return [nil, OpenNebula::Error] nil in case of success, Error
610
        #   otherwise
611
        def update(template)
612

    
613
            force = template['force'] == true
614
            new_cardinality = template["cardinality"]
615

    
616
            if new_cardinality.nil?
617
                return nil
618
            end
619

    
620
            new_cardinality = new_cardinality.to_i
621

    
622
            if !force
623
                if new_cardinality < min_cardinality().to_i
624
                    return OpenNebula::Error.new(
625
                        "Minimum cardinality is #{min_cardinality()}")
626

    
627
                elsif !max_cardinality().nil? && new_cardinality > max_cardinality().to_i
628
                    return OpenNebula::Error.new(
629
                        "Maximum cardinality is #{max_cardinality()}")
630

    
631
                end
632
            end
633

    
634
            set_cardinality(new_cardinality)
635

    
636
            return nil
637
        end
638

    
639
        ########################################################################
640
        # Recover
641
        ########################################################################
642

    
643
        def recover_deployment()
644
            recover()
645
        end
646

    
647
        def recover_warning()
648
            recover()
649
            deploy()
650
        end
651

    
652
        def recover_scale()
653
            recover()
654
            retry_scale()
655
        end
656

    
657

    
658
        ########################################################################
659
        ########################################################################
660

    
661

    
662
        private
663

    
664
        # Returns a positive, 0, or negative number of nodes to adjust,
665
        #   according to a SCHEDULED type policy
666
        # @param [Hash] A SCHEDULED type policy
667
        # @return [Integer] positive, 0, or negative number of nodes to adjust
668
        def scale_time?(elasticity_pol)
669
            now = Time.now.to_i
670

    
671
            last_eval = elasticity_pol['last_eval'].to_i
672

    
673
            elasticity_pol['last_eval'] = now
674

    
675
            # If this is the first time this is evaluated, ignore it.
676
            # We don't want to execute actions planned in the past when the
677
            # server starts.
678

    
679
            if last_eval == 0
680
                return 0
681
            end
682

    
683
            start_time  = elasticity_pol['start_time']
684
            target_vms = elasticity_pol['adjust']
685

    
686
            if target_vms.nil?
687
                # TODO error msg
688
                return 0
689
            end
690

    
691
            if !(start_time.nil? || start_time.empty?)
692
                begin
693
                    start_time = Time.parse(start_time).to_i
694
                rescue ArgumentError
695
                    # TODO error msg
696
                    return 0
697
                end
698
            else
699
                recurrence  = elasticity_pol['recurrence']
700

    
701
                if recurrence.nil? || recurrence.empty?
702
                    # TODO error msg
703
                    return 0
704
                end
705

    
706
                begin
707
                    cron_parser = CronParser.new(recurrence)
708

    
709
                    # This returns the next planned time, starting from the last
710
                    # step
711
                    start_time = cron_parser.next(Time.at(last_eval)).to_i
712
                rescue
713
                    # TODO error msg bad format
714
                    return 0
715
                end
716
            end
717

    
718
            # Only actions planned between last step and this one are triggered
719
            if start_time > last_eval && start_time <= now
720
                Log.debug LOG_COMP, "Role #{name} : scheduled scalability for "\
721
                    "#{Time.at(start_time)} triggered", @service.id()
722

    
723
                new_cardinality = calculate_new_cardinality(elasticity_pol)
724

    
725
                return new_cardinality - cardinality()
726
            end
727

    
728
            return 0
729
        end
730

    
731
        # Returns a positive, 0, or negative number of nodes to adjust,
732
        #   according to a policy based on attributes
733
        # @param [Hash] A policy based on attributes
734
        # @return [Array<Integer>] positive, 0, or negative number of nodes to
735
        #   adjust, plus the cooldown period duration
736
        def scale_attributes?(elasticity_pol)
737

    
738
            now = Time.now.to_i
739

    
740
            # TODO: enforce true_up_evals type in ServiceTemplate::ROLE_SCHEMA ?
741

    
742
            period_duration = elasticity_pol['period'].to_i
743
            period_number   = elasticity_pol['period_number'].to_i
744
            last_eval       = elasticity_pol['last_eval'].to_i
745
            true_evals      = elasticity_pol['true_evals'].to_i
746
            expression      = elasticity_pol['expression']
747

    
748
            if !last_eval.nil?
749
                if now < (last_eval + period_duration)
750
                    return [0, 0]
751
                end
752
            end
753

    
754
            elasticity_pol['last_eval'] = now
755

    
756
            new_cardinality = cardinality()
757
            new_evals       = 0
758

    
759
            exp_value, exp_st = scale_rule(expression)
760

    
761
            if exp_value
762
                new_evals = true_evals + 1
763
                new_evals = period_number if new_evals > period_number
764

    
765
                if new_evals >= period_number
766
                    Log.debug LOG_COMP, "Role #{name} : elasticy policy #{exp_st} "\
767
                        "triggered", @service.id()
768
                    new_cardinality = calculate_new_cardinality(elasticity_pol)
769
                end
770
            end
771

    
772
            elasticity_pol['true_evals'] = new_evals
773
            elasticity_pol['expression_evaluated'] = exp_st
774

    
775
            return [new_cardinality - cardinality(), elasticity_pol['cooldown']]
776
        end
777

    
778
        # Returns true if the scalability rule is triggered
779
        # @return true if the scalability rule is triggered
780
        def scale_rule(elas_expr)
781
            parser = ElasticityGrammarParser.new
782

    
783
            if elas_expr.nil? || elas_expr.empty?
784
                return false
785
            end
786

    
787
            treetop = parser.parse(elas_expr)
788
            if treetop.nil?
789
                return [false, "Parse error. '#{elas_expr}': #{parser.failure_reason}"]
790
            end
791

    
792
            val, st = treetop.result(self)
793

    
794
            return [val, st]
795
        end
796

    
797
        def calculate_new_cardinality(elasticity_pol)
798
            type    = elasticity_pol['type']
799
            adjust  = elasticity_pol['adjust'].to_i
800

    
801
            # Min is a hard limit, if the current cardinality + adjustment does
802
            # not reach it, the difference is added
803

    
804
            max = [cardinality(), max_cardinality.to_i].max()
805
#            min = [cardinality(), min_cardinality.to_i].min()
806
            min = min_cardinality.to_i
807

    
808
            case type.upcase
809
            when 'CHANGE'
810
                new_cardinality = cardinality() + adjust
811
            when 'PERCENTAGE_CHANGE'
812
                min_adjust_step = elasticity_pol['min_adjust_step'].to_i
813

    
814
                change = cardinality() * adjust / 100.0
815

    
816
                sign = change > 0 ? 1 : -1
817
                change = change.abs
818

    
819
                if change < 1
820
                    change = 1
821
                else
822
                    change = change.to_i
823
                end
824

    
825
                change = sign * [change, min_adjust_step].max
826

    
827
                new_cardinality = cardinality() + change
828

    
829
            when 'CARDINALITY'
830
                new_cardinality = adjust
831
            else
832
                # TODO: error message
833
                return cardinality()
834
            end
835

    
836
            # The cardinality can be forced to be outside the min,max
837
            # range. If that is the case, the scale up/down will not
838
            # move further outside the range. It will move towards the
839
            # range with the adjustement set, instead of jumping the
840
            # difference
841
            if (adjust > 0)
842
                new_cardinality = max if new_cardinality > max
843
            elsif (adjust < 0)
844
                new_cardinality = min if new_cardinality < min
845
            end
846

    
847
            return new_cardinality
848
        end
849

    
850
        # For a failed scale up, the cardinality is updated to the actual value
851
        # For a failed scale down, the shutdown actions are retried
852
        def retry_scale()
853
            nodes_dispose = get_nodes.select { |node|
854
                node['disposed'] == "1"
855
            }
856

    
857
            shutdown_nodes(nodes_dispose, true)
858

    
859
            set_cardinality( get_nodes.size() - nodes_dispose.size() )
860
        end
861

    
862
        # Deletes VMs in DONE or FAILED, and sends a resume action to VMs in UNKNOWN
863
        def recover()
864

    
865
            nodes = @body['nodes']
866
            new_nodes = []
867
            disposed_nodes = @body['disposed_nodes']
868

    
869
            nodes.each do |node|
870
                vm_state = nil
871
                vm_id = node['deploy_id']
872

    
873
                if node['vm_info'] && node['vm_info']['VM'] && node['vm_info']['VM']['STATE']
874
                    vm_state = node['vm_info']['VM']['STATE']
875
                    lcm_state = node['vm_info']['VM']['LCM_STATE']
876

    
877
                    vm_state_str = VirtualMachine::VM_STATE[vm_state.to_i]
878
                    lcm_state_str = VirtualMachine::LCM_STATE[lcm_state.to_i]
879
                end
880

    
881
                if vm_state == '6' # DONE
882
                    # Store the VM id in the array of disposed nodes
883
                    disposed_nodes << vm_id
884

    
885
                elsif ( Role.vm_failure?(vm_state, lcm_state) )
886
                    vm = OpenNebula::VirtualMachine.new_with_id(vm_id, @service.client)
887
                    rc = vm.finalize
888

    
889
                    if !OpenNebula.is_error?(rc)
890
                        # Store the VM id in the array of disposed nodes
891
                        disposed_nodes << vm_id
892

    
893
                        Log.debug LOG_COMP, "Role #{name} : Delete success for VM #{vm_id}", @service.id()
894
                    else
895
                        msg = "Role #{name} : Delete failed for VM #{vm_id}; #{rc.message}"
896
                        Log.error LOG_COMP, msg, @service.id()
897
                        @service.log_error(msg)
898

    
899
                        success = false
900

    
901
                        new_nodes << node
902
                    end
903
                elsif (vm_state == '3' && lcm_state == '16') # UNKNOWN
904
                    vm = OpenNebula::VirtualMachine.new_with_id(vm_id, @service.client)
905
                    vm.resume
906

    
907
                    new_nodes << node
908
                else
909
                    new_nodes << node
910
                end
911
            end
912

    
913
            @body['nodes'] = new_nodes
914
        end
915

    
916

    
917
        # Shuts down all the given roles
918
        # @param scale_down [true,false] True to set the 'disposed' node flag
919
        def shutdown_nodes(nodes, scale_down)
920

    
921
            action = @body['shutdown_action']
922

    
923
            if action.nil?
924
                action = @service.get_shutdown_action()
925
            end
926

    
927
            if action.nil?
928
                action = @@default_shutdown
929
            end
930

    
931
            nodes.each { |node|
932
                vm_id = node['deploy_id']
933

    
934
                Log.debug LOG_COMP, "Role #{name} : Shutting down VM #{vm_id}", @service.id()
935

    
936
                vm = OpenNebula::VirtualMachine.new_with_id(vm_id, @service.client)
937

    
938
                if action == 'shutdown-hard'
939
                    rc = vm.shutdown(true)
940
                else
941
                    rc = vm.shutdown
942
                end
943

    
944
                if scale_down
945
                    node['disposed'] = '1'
946
                end
947

    
948
                if OpenNebula.is_error?(rc)
949
                    msg = "Role #{name} : Shutdown failed for VM #{vm_id}, will perform a Delete; #{rc.message}"
950
                    Log.error LOG_COMP, msg, @service.id()
951
                    @service.log_error(msg)
952

    
953
                    rc = vm.finalize
954

    
955
                    if OpenNebula.is_error?(rc)
956
                        msg = "Role #{name} : Delete failed for VM #{vm_id}; #{rc.message}"
957
                        Log.error LOG_COMP, msg, @service.id()
958
                        @service.log_error(msg)
959

    
960
                        success = false
961
                        #return [false, rc.message]
962
                    else
963
                        Log.debug LOG_COMP, "Role #{name} : Delete success for VM #{vm_id}", @service.id()
964
                    end
965
                else
966
                    Log.debug LOG_COMP, "Role #{name} : Shutdown success for VM #{vm_id}", @service.id()
967
                end
968
            }
969
        end
970

    
971
    end
972
end