Statistics
| Branch: | Tag: | Revision:

one / src / scheduler / src / sched / Scheduler.cc @ 4697f1ee

History | View | Annotate | Download (46.7 KB)

1
/* -------------------------------------------------------------------------- */
2
/* Copyright 2002-2015, OpenNebula Project, OpenNebula Systems                */
3
/*                                                                            */
4
/* Licensed under the Apache License, Version 2.0 (the "License"); you may    */
5
/* not use this file except in compliance with the License. You may obtain    */
6
/* a copy of the License at                                                   */
7
/*                                                                            */
8
/* http://www.apache.org/licenses/LICENSE-2.0                                 */
9
/*                                                                            */
10
/* Unless required by applicable law or agreed to in writing, software        */
11
/* distributed under the License is distributed on an "AS IS" BASIS,          */
12
/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.   */
13
/* See the License for the specific language governing permissions and        */
14
/* limitations under the License.                                             */
15
/* -------------------------------------------------------------------------- */
16

    
17
#include <stdexcept>
18
#include <stdlib.h>
19

    
20
#include <signal.h>
21
#include <unistd.h>
22
#include <fcntl.h>
23
#include <sys/types.h>
24
#include <pwd.h>
25

    
26
#include <pthread.h>
27

    
28
#include <cmath>
29

    
30
#include "Scheduler.h"
31
#include "SchedulerTemplate.h"
32
#include "RankPolicy.h"
33
#include "NebulaLog.h"
34
#include "PoolObjectAuth.h"
35
#include "NebulaUtil.h"
36

    
37
using namespace std;
38

    
39

    
40
/* -------------------------------------------------------------------------- */
41
/* -------------------------------------------------------------------------- */
42

    
43
static double profile(bool start, const string& message="")
44
{
45
    static struct timespec estart, eend;
46
    double t;
47

    
48
    if (start)
49
    {
50
        clock_gettime(CLOCK_MONOTONIC, &estart);
51

    
52
        if (!message.empty())
53
        {
54
            NebulaLog::log("SCHED", Log::DDEBUG, message);
55
        }
56

    
57
        return 0;
58
    }
59

    
60
    clock_gettime(CLOCK_MONOTONIC, &eend);
61

    
62
    t = (eend.tv_sec + (eend.tv_nsec * pow(10,-9))) -
63
        (estart.tv_sec+(estart.tv_nsec*pow(10,-9)));
64

    
65
    if (!message.empty())
66
    {
67
        ostringstream oss;
68

    
69
        oss << message << " Total time: " << one_util::float_to_str(t) << "s";
70
        NebulaLog::log("SCHED", Log::DDEBUG, oss);
71
    }
72

    
73
    return t;
74
}
75

    
76
/* -------------------------------------------------------------------------- */
77
/* -------------------------------------------------------------------------- */
78

    
79
extern "C" void * scheduler_action_loop(void *arg)
80
{
81
    Scheduler *  sched;
82

    
83
    if ( arg == 0 )
84
    {
85
        return 0;
86
    }
87

    
88
    sched = static_cast<Scheduler *>(arg);
89

    
90
    NebulaLog::log("SCHED",Log::INFO,"Scheduler loop started.");
91

    
92
    sched->am.loop(sched->timer,0);
93

    
94
    NebulaLog::log("SCHED",Log::INFO,"Scheduler loop stopped.");
95

    
96
    return 0;
97
}
98

    
99
/* -------------------------------------------------------------------------- */
100
/* -------------------------------------------------------------------------- */
101

    
102
void Scheduler::start()
103
{
104
    int rc;
105

    
106
    ifstream      file;
107
    ostringstream oss;
108

    
109
    string etc_path;
110

    
111
    int          oned_port;
112
    unsigned int live_rescheds;
113

    
114
    pthread_attr_t pattr;
115

    
116
    // -----------------------------------------------------------
117
    // Configuration File
118
    // -----------------------------------------------------------
119
    string        log_file;
120
    const char *  nl = getenv("ONE_LOCATION");
121

    
122
    if (nl == 0) //OpenNebula installed under root directory
123
    {
124
        log_file = "/var/log/one/sched.log";
125
        etc_path = "/etc/one/";
126
    }
127
    else
128
    {
129
        oss << nl << "/var/sched.log";
130

    
131
        log_file = oss.str();
132

    
133
        oss.str("");
134
        oss << nl << "/etc/";
135

    
136
        etc_path = oss.str();
137
    }
138

    
139
    SchedulerTemplate conf(etc_path);
140

    
141
    if ( conf.load_configuration() != 0 )
142
    {
143
        throw runtime_error("Error reading configuration file.");
144
    }
145

    
146
    conf.get("ONED_PORT", oned_port);
147

    
148
    oss.str("");
149
    oss << "http://localhost:" << oned_port << "/RPC2";
150
    url = oss.str();
151

    
152
    conf.get("SCHED_INTERVAL", timer);
153

    
154
    conf.get("MAX_VM", machines_limit);
155

    
156
    conf.get("MAX_DISPATCH", dispatch_limit);
157

    
158
    conf.get("MAX_HOST", host_dispatch_limit);
159

    
160
    conf.get("LIVE_RESCHEDS", live_rescheds);
161

    
162
    // -----------------------------------------------------------
163
    // Log system & Configuration File
164
    // -----------------------------------------------------------
165

    
166
    try
167
    {
168
        NebulaLog::LogType log_system = NebulaLog::UNDEFINED;
169
        Log::MessageType   clevel     = Log::ERROR;;
170

    
171
        const VectorAttribute * log = conf.get("LOG");
172

    
173
        if ( log != 0 )
174
        {
175
            string value;
176
            int    ilevel;
177

    
178
            value      = log->vector_value("SYSTEM");
179
            log_system = NebulaLog::str_to_type(value);
180

    
181
            value  = log->vector_value("DEBUG_LEVEL");
182
            ilevel = atoi(value.c_str());
183

    
184
            if (Log::ERROR <= ilevel && ilevel <= Log::DDDEBUG)
185
            {
186
                clevel = static_cast<Log::MessageType>(ilevel);
187
            }
188
        }
189

    
190
        // Start the log system
191
        if ( log_system != NebulaLog::UNDEFINED )
192
        {
193
            NebulaLog::init_log_system(log_system,
194
                           clevel,
195
                           log_file.c_str(),
196
                           ios_base::trunc,
197
                           "mm_sched");
198
        }
199
        else
200
        {
201
            throw runtime_error("Unknown LOG_SYSTEM.");
202
        }
203

    
204
        NebulaLog::log("SCHED", Log::INFO, "Init Scheduler Log system");
205
    }
206
    catch(runtime_error &)
207
    {
208
        throw;
209
    }
210

    
211
    oss.str("");
212

    
213
    oss << "Starting Scheduler Daemon" << endl;
214
    oss << "----------------------------------------\n";
215
    oss << "     Scheduler Configuration File       \n";
216
    oss << "----------------------------------------\n";
217
    oss << conf;
218
    oss << "----------------------------------------";
219

    
220
    NebulaLog::log("SCHED", Log::INFO, oss);
221

    
222
    // -----------------------------------------------------------
223
    // XML-RPC Client
224
    // -----------------------------------------------------------
225

    
226
    try
227
    {
228
        long long message_size;
229

    
230
        conf.get("MESSAGE_SIZE", message_size);
231

    
232
        client = new Client("", url, message_size);
233

    
234
        oss.str("");
235

    
236
        oss << "XML-RPC client using " << client->get_message_size()
237
            << " bytes for response buffer.\n";
238

    
239
        NebulaLog::log("SCHED", Log::INFO, oss);
240
    }
241
    catch(runtime_error &)
242
    {
243
        throw;
244
    }
245

    
246
    xmlInitParser();
247

    
248
    // -------------------------------------------------------------------------
249
    // Get oned configuration, and init zone_id
250
    // -------------------------------------------------------------------------
251

    
252
    while (1)
253
    {
254
        try
255
        {
256
            xmlrpc_c::value result;
257

    
258
            client->call(client->get_endpoint(),        // serverUrl
259
                         "one.system.config",           // methodName
260
                         "s",                           // arguments format
261
                         &result,                       // resultP
262
                         client->get_oneauth().c_str());// auth string
263

    
264
            vector<xmlrpc_c::value> values =
265
                            xmlrpc_c::value_array(result).vectorValueValue();
266

    
267
            bool   success = xmlrpc_c::value_boolean(values[0]);
268
            string message = xmlrpc_c::value_string(values[1]);
269

    
270
            if (!success ||(oned_conf.from_xml(message) != 0))
271
            {
272
                ostringstream oss;
273

    
274
                oss << "Cannot contact oned, will retry... Error: " << message;
275

    
276
                NebulaLog::log("SCHED", Log::ERROR, oss);
277
            }
278

    
279
            break;
280
        }
281
        catch (exception const& e)
282
        {
283
            ostringstream oss;
284

    
285
            oss << "Cannot contact oned, will retry... Error: " << e.what();
286

    
287
            NebulaLog::log("SCHED", Log::ERROR, oss);
288
        }
289

    
290
        sleep(2);
291
    }
292

    
293
    NebulaLog::log("SCHED", Log::INFO, "oned successfully contacted.");
294

    
295
    zone_id = 0;
296

    
297
    const VectorAttribute * fed = oned_conf.get("FEDERATION");
298

    
299
    if (fed != 0)
300
    {
301
        if (fed->vector_value("ZONE_ID", zone_id) != 0)
302
        {
303
            zone_id = 0;
304
        }
305
    }
306

    
307
    oss.str("");
308
    oss << "Configuring scheduler for Zone ID: " << zone_id;
309

    
310
    NebulaLog::log("SCHED", Log::INFO, oss);
311

    
312
    // -------------------------------------------------------------------------
313
    // Pools
314
    // -------------------------------------------------------------------------
315

    
316
    hpool  = new HostPoolXML(client);
317
    upool  = new UserPoolXML(client);
318
    clpool = new ClusterPoolXML(client);
319
    vmpool = new VirtualMachinePoolXML(client,machines_limit,(live_rescheds==1));
320

    
321
    vmapool = new VirtualMachineActionsPoolXML(client, machines_limit);
322

    
323
    dspool     = new SystemDatastorePoolXML(client);
324
    img_dspool = new ImageDatastorePoolXML(client);
325

    
326
    acls = new AclXML(client, zone_id);
327

    
328
    // -----------------------------------------------------------
329
    // Load scheduler policies
330
    // -----------------------------------------------------------
331

    
332
    register_policies(conf);
333

    
334
    // -----------------------------------------------------------
335
    // Close stds, we no longer need them
336
    // -----------------------------------------------------------
337

    
338
    int fd;
339

    
340
    fd = open("/dev/null", O_RDWR);
341

    
342
    dup2(fd,0);
343
    dup2(fd,1);
344
    dup2(fd,2);
345

    
346
    close(fd);
347

    
348
    fcntl(0,F_SETFD,0); // Keep them open across exec funcs
349
    fcntl(1,F_SETFD,0);
350
    fcntl(2,F_SETFD,0);
351

    
352
    // -----------------------------------------------------------
353
    // Block all signals before creating any  thread
354
    // -----------------------------------------------------------
355

    
356
    sigset_t    mask;
357
    int         signal;
358

    
359
    sigfillset(&mask);
360

    
361
    pthread_sigmask(SIG_BLOCK, &mask, NULL);
362

    
363
    // -----------------------------------------------------------
364
    // Create the scheduler loop
365
    // -----------------------------------------------------------
366

    
367
    NebulaLog::log("SCHED",Log::INFO,"Starting scheduler loop...");
368

    
369
    pthread_attr_init (&pattr);
370
    pthread_attr_setdetachstate (&pattr, PTHREAD_CREATE_JOINABLE);
371

    
372
    rc = pthread_create(&sched_thread,&pattr,scheduler_action_loop,(void *) this);
373

    
374
    if ( rc != 0 )
375
    {
376
        NebulaLog::log("SCHED",Log::ERROR,
377
            "Could not start scheduler loop, exiting");
378

    
379
        return;
380
    }
381

    
382
    // -----------------------------------------------------------
383
    // Wait for a SIGTERM or SIGINT signal
384
    // -----------------------------------------------------------
385

    
386
    sigemptyset(&mask);
387

    
388
    sigaddset(&mask, SIGINT);
389
    sigaddset(&mask, SIGTERM);
390

    
391
    sigwait(&mask, &signal);
392

    
393
    am.trigger(ActionListener::ACTION_FINALIZE,0); //Cancel sched loop
394

    
395
    pthread_join(sched_thread,0);
396

    
397
    xmlCleanupParser();
398

    
399
    NebulaLog::finalize_log_system();
400
}
401

    
402
/* -------------------------------------------------------------------------- */
403
/* -------------------------------------------------------------------------- */
404

    
405
int Scheduler::set_up_pools()
406
{
407
    int                             rc;
408
    ostringstream                   oss;
409
    map<int,int>::const_iterator    it;
410
    map<int, int>                   shares;
411

    
412
    //--------------------------------------------------------------------------
413
    //Cleans the cache and get the pending VMs
414
    //--------------------------------------------------------------------------
415

    
416
    rc = vmpool->set_up();
417

    
418
    if ( rc != 0 )
419
    {
420
        return rc;
421
    }
422

    
423
    //--------------------------------------------------------------------------
424
    //Cleans the cache and get the datastores
425
    //--------------------------------------------------------------------------
426

    
427
    rc = dspool->set_up();
428

    
429
    if ( rc != 0 )
430
    {
431
        return rc;
432
    }
433

    
434
    rc = img_dspool->set_up();
435

    
436
    if ( rc != 0 )
437
    {
438
        return rc;
439
    }
440

    
441
    //--------------------------------------------------------------------------
442
    //Cleans the cache and get the hosts ids
443
    //--------------------------------------------------------------------------
444

    
445
    rc = upool->set_up();
446

    
447
    if ( rc != 0 )
448
    {
449
        return rc;
450
    }
451

    
452
    //--------------------------------------------------------------------------
453
    //Cleans the cache and get the hosts ids
454
    //--------------------------------------------------------------------------
455

    
456
    rc = hpool->set_up();
457

    
458
    if ( rc != 0 )
459
    {
460
        return rc;
461
    }
462

    
463
    //--------------------------------------------------------------------------
464
    //Cleans the cache and get the cluster information
465
    //--------------------------------------------------------------------------
466

    
467
    rc = clpool->set_up();
468

    
469
    if ( rc != 0 )
470
    {
471
        return rc;
472
    }
473

    
474
    //--------------------------------------------------------------------------
475
    //Cleans the cache and get the ACLs
476
    //--------------------------------------------------------------------------
477

    
478
    rc = acls->set_up();
479

    
480
    if ( rc != 0 )
481
    {
482
        return rc;
483
    }
484

    
485
    return 0;
486
};
487

    
488
/* -------------------------------------------------------------------------- */
489
/* -------------------------------------------------------------------------- */
490
/* -------------------------------------------------------------------------- */
491
/* -------------------------------------------------------------------------- */
492

    
493
/**
494
 *  Match hosts for this VM that:
495
 *    1. Fulfills ACL
496
 *    2. Meets user/policy requirements
497
 *    3. Have enough capacity to host the VM
498
 *
499
 *  @param acl pool
500
 *  @param users the user pool
501
 *  @param vm the virtual machine
502
 *  @param vm_memory vm requirement
503
 *  @param vm_cpu vm requirement
504
 *  @param vm_pci vm requirement
505
 *  @param host to evaluate vm assgiment
506
 *  @param n_auth number of hosts authorized for the user, incremented if needed
507
 *  @param n_error number of requirement errors, incremented if needed
508
 *  @param n_fits number of hosts with capacity that fits the VM requirements
509
 *  @param n_matched number of hosts that fullfil VM sched_requirements
510
 *  @param n_cluster_matched number of hosts that fulfill VM sched_cluster_requirements
511
 *  @param error, string describing why the host is not valid
512
 *  @return true for a positive match
513
 */
514
static bool match_host(AclXML * acls, UserPoolXML * upool, VirtualMachineXML* vm,
515
    int vmem, int vcpu, vector<VectorAttribute *>& vpci, HostXML * host,
516
    int &n_auth, int& n_error, int &n_fits, int &n_matched,
517
    int &n_cluster_matched, string &error)
518
{
519
    // -------------------------------------------------------------------------
520
    // Filter current Hosts for resched VMs
521
    // -------------------------------------------------------------------------
522
    if (vm->is_resched() && vm->get_hid() == host->get_hid())
523
    {
524
        error = "VM cannot be migrated to its current Host.";
525
        return false;
526
    }
527

    
528
    // -------------------------------------------------------------------------
529
    // Check that VM can be deployed in local hosts
530
    // -------------------------------------------------------------------------
531
    if (vm->is_only_public_cloud() && !host->is_public_cloud())
532
    {
533
        error = "VM requires a Public Cloud Host, but it's local.";
534
        return false;
535
    }
536

    
537
    // -------------------------------------------------------------------------
538
    // Check if user is authorized
539
    // -------------------------------------------------------------------------
540
    if ( vm->get_uid() != 0 && vm->get_gid() != 0 )
541
    {
542
        PoolObjectAuth hperms;
543

    
544
        hperms.oid      = host->get_hid();
545
        hperms.cids     = host->get_cids();
546
        hperms.obj_type = PoolObjectSQL::HOST;
547

    
548
        UserXML * user = upool->get(vm->get_uid());
549

    
550
        if (user == 0)
551
        {
552
            error = "User does not exists.";
553
            return false;
554
        }
555

    
556
        const vector<int> vgids = user->get_gids();
557

    
558
        set<int> gids(vgids.begin(), vgids.end());
559

    
560
        if ( !acls->authorize(vm->get_uid(), gids, hperms, AuthRequest::MANAGE))
561
        {
562
            error = "Permission denied.";
563
            return false;
564
        }
565
    }
566

    
567
    n_auth++;
568

    
569
    // -------------------------------------------------------------------------
570
    // Check host clusters
571
    // -------------------------------------------------------------------------
572

    
573
    if (host->is_in_cluster(vm->get_match_clusters()) != true)
574
    {
575
        error = "Host is not in any of the filtered Clusters.";
576
        return false;
577
    }
578

    
579
    n_cluster_matched++;
580

    
581
    // -------------------------------------------------------------------------
582
    // Check host capacity
583
    // -------------------------------------------------------------------------
584
    if (host->test_capacity(vcpu, vmem, vpci, error) != true)
585
    {
586
        return false;
587
    }
588

    
589
    n_fits++;
590

    
591
    // -------------------------------------------------------------------------
592
    // Evaluate VM requirements
593
    // -------------------------------------------------------------------------
594
    if (!vm->get_requirements().empty())
595
    {
596
        char * estr;
597
        bool   matched;
598

    
599
        if ( host->eval_bool(vm->get_requirements(), matched, &estr) != 0 )
600
        {
601
            ostringstream oss;
602

    
603
            n_error++;
604

    
605
            oss << "Error in SCHED_REQUIREMENTS: '" << vm->get_requirements()
606
                << "', error: " << estr;
607

    
608
            vm->log(oss.str());
609

    
610
            error = oss.str();
611

    
612
            free(estr);
613

    
614
            return false;
615
        }
616

    
617
        if (matched == false)
618
        {
619
            error = "It does not fulfill SCHED_REQUIREMENTS.";
620
            return false;
621
        }
622
    }
623

    
624
    n_matched++;
625

    
626
    return true;
627
};
628

    
629
/* -------------------------------------------------------------------------- */
630
/* -------------------------------------------------------------------------- */
631

    
632
/**
633
 *  Match system DS's for this VM that:
634
 *    1. Meet user/policy requirements
635
 *    2. Have enough capacity to host the VM
636
 *
637
 *  @param acl pool
638
 *  @param users the user pool
639
 *  @param vm the virtual machine
640
 *  @param vdisk vm requirement
641
 *  @param ds to evaluate vm assgiment
642
 *  @param n_auth number of ds authorized for the user, incremented if needed
643
 *  @param n_error number of requirement errors, incremented if needed
644
 *  @param n_matched number of system ds that fullfil VM sched_requirements
645
 *  @param n_fits number of system ds with capacity that fits the VM requirements
646
 *  @param n_cluster_matched number of system ds that fulfill VM sched_cluster_requirements
647
 *  @param error, string describing why the host is not valid
648
 *  @return true for a positive match
649
 */
650
static bool match_system_ds(AclXML * acls, UserPoolXML * upool,
651
    VirtualMachineXML* vm, long long vdisk, DatastoreXML * ds, int& n_auth,
652
    int& n_error, int& n_fits, int &n_matched,
653
    int &n_cluster_matched, string &error)
654
{
655
    // -------------------------------------------------------------------------
656
    // Check if user is authorized
657
    // -------------------------------------------------------------------------
658
    if ( vm->get_uid() != 0 && vm->get_gid() != 0 )
659
    {
660
        PoolObjectAuth dsperms;
661

    
662
        ds->get_permissions(dsperms);
663

    
664
        UserXML * user = upool->get(vm->get_uid());
665

    
666
        if (user == 0)
667
        {
668
            error = "User does not exists.";
669
            return false;
670
        }
671

    
672
        const vector<int> vgids = user->get_gids();
673

    
674
        set<int> gids(vgids.begin(), vgids.end());
675

    
676
        if ( !acls->authorize(vm->get_uid(), gids, dsperms, AuthRequest::USE))
677
        {
678
            error = "Permission denied.";
679
            return false;
680
        }
681
    }
682

    
683
    n_auth++;
684

    
685
    // -------------------------------------------------------------------------
686
    // Check host clusters
687
    // -------------------------------------------------------------------------
688

    
689
    if (ds->is_in_cluster(vm->get_match_clusters()) != true)
690
    {
691
        error = "System DS is not in any of the filtered Clusters.";
692
        return false;
693
    }
694

    
695
    n_cluster_matched++;
696

    
697
    // -------------------------------------------------------------------------
698
    // Check datastore capacity for shared systems DS (non-shared will be
699
    // checked in a per host basis during dispatch). Resume actions do not
700
    // add to shared system DS usage, and are skipped also
701
    // -------------------------------------------------------------------------
702
    if (ds->is_shared() && ds->is_monitored() && !vm->is_resume() &&
703
        !ds->test_capacity(vdisk, error))
704
    {
705
        return false;
706
    }
707

    
708
    n_fits++;
709

    
710
    // -------------------------------------------------------------------------
711
    // Evaluate VM requirements
712
    // -------------------------------------------------------------------------
713
    if (!vm->get_ds_requirements().empty())
714
    {
715
        char * estr;
716
        bool   matched;
717

    
718
        if ( ds->eval_bool(vm->get_ds_requirements(), matched, &estr) != 0 )
719
        {
720
            ostringstream oss;
721

    
722
            n_error++;
723

    
724
            oss << "Error in SCHED_DS_REQUIREMENTS: '"
725
                << vm->get_ds_requirements() << "', error: " << error;
726

    
727
            vm->log(oss.str());
728

    
729
            free(estr);
730
        }
731

    
732
        if (matched == false)
733
        {
734
            error = "It does not fulfill SCHED_DS_REQUIREMENTS.";
735
            return false;
736
        }
737
    }
738

    
739
    n_matched++;
740

    
741
    return true;
742
}
743

    
744
/* -------------------------------------------------------------------------- */
745
/* -------------------------------------------------------------------------- */
746

    
747
/**
748
 *  Match clusters for this VM that:
749
 *    1. Meet user/automatic requirements
750
 *
751
 *  @param vm the virtual machine
752
 *  @param cluster to evaluate vm assignment
753
 *  @param n_error number of requirement errors
754
 *  @param n_matched number of clusters that fulfill VM sched_cluster_requirements
755
 *  @param error, string describing why the cluster is not valid
756
 *  @return true for a positive match
757
 */
758
static bool match_cluster(VirtualMachineXML* vm, ClusterXML * cluster,
759
        int& n_error, int &n_matched, string &error)
760
{
761
    // -------------------------------------------------------------------------
762
    // Evaluate VM requirements
763
    // -------------------------------------------------------------------------
764
    if (!vm->get_cluster_requirements().empty())
765
    {
766
        char * estr;
767
        bool   matched;
768

    
769
        if ( cluster->eval_bool(vm->get_cluster_requirements(), matched, &estr) != 0 )
770
        {
771
            ostringstream oss;
772

    
773
            n_error++;
774

    
775
            oss << "Error in SCHED_CLUSTER_REQUIREMENTS: '"
776
                << vm->get_cluster_requirements() << "', error: " << error;
777

    
778
            vm->log(oss.str());
779

    
780
            free(estr);
781
        }
782

    
783
        if (matched == false)
784
        {
785
            ostringstream oss;
786

    
787
            oss << "It does not fulfill SCHED_CLUSTER_REQUIREMENTS: "
788
                << vm->get_cluster_requirements();
789
            error = oss.str();
790

    
791
            return false;
792
        }
793
    }
794

    
795
    n_matched++;
796

    
797
    return true;
798
}
799

    
800
/* -------------------------------------------------------------------------- */
801

    
802
static void log_match(int vid, const string& msg)
803
{
804
    ostringstream oss;
805

    
806
    oss << "Match-making results for VM " << vid << ":\n\t" << msg << endl;
807

    
808
    NebulaLog::log("SCHED", Log::DEBUG, oss);
809
}
810

    
811
/* -------------------------------------------------------------------------- */
812
/* -------------------------------------------------------------------------- */
813

    
814
void Scheduler::match_schedule()
815
{
816
    VirtualMachineXML * vm;
817

    
818
    int vm_memory;
819
    int vm_cpu;
820
    long long vm_disk;
821
    vector<VectorAttribute *> vm_pci;
822

    
823
    int n_resources;
824
    int n_matched;
825
    int n_auth;
826
    int n_error;
827
    int n_fits;
828
    int n_cluster_matched;
829

    
830
    ClusterXML * cluster;
831
    HostXML * host;
832
    DatastoreXML *ds;
833

    
834
    string m_error;
835

    
836
    map<int, ObjectXML*>::const_iterator  vm_it;
837
    map<int, ObjectXML*>::const_iterator  obj_it;
838

    
839
    vector<SchedulerPolicy *>::iterator it;
840

    
841
    const map<int, ObjectXML*> pending_vms = vmpool->get_objects();
842
    const map<int, ObjectXML*> clusters    = clpool->get_objects();
843
    const map<int, ObjectXML*> hosts       = hpool->get_objects();
844
    const map<int, ObjectXML*> datastores  = dspool->get_objects();
845
    const map<int, ObjectXML*> users       = upool->get_objects();
846

    
847
    double total_cl_match_time = 0;
848
    double total_host_match_time = 0;
849
    double total_host_rank_time = 0;
850
    double total_ds_match_time = 0;
851
    double total_ds_rank_time = 0;
852

    
853
    time_t stime = time(0);
854

    
855
    for (vm_it=pending_vms.begin(); vm_it != pending_vms.end(); vm_it++)
856
    {
857
        vm = static_cast<VirtualMachineXML*>(vm_it->second);
858

    
859
        vm->get_requirements(vm_cpu, vm_memory, vm_disk, vm_pci);
860

    
861
        n_resources = 0;
862
        n_fits    = 0;
863
        n_matched = 0;
864
        n_auth    = 0;
865
        n_error   = 0;
866

    
867
        //----------------------------------------------------------------------
868
        // Test Image Datastore capacity, but not for migrations or resume
869
        //----------------------------------------------------------------------
870
        if (!vm->is_resched() && !vm->is_resume())
871
        {
872
            if (vm->test_image_datastore_capacity(img_dspool, m_error) == false)
873
            {
874
                if (vm->is_public_cloud()) //No capacity needed for public cloud
875
                {
876
                    vm->set_only_public_cloud();
877
                }
878
                else
879
                {
880
                    log_match(vm->get_oid(), "Cannot schedule VM. "+m_error);
881

    
882
                    vm->log("Cannot schedule VM. "+m_error);
883
                    vmpool->update(vm);
884

    
885
                    continue;
886
                }
887
            }
888
        }
889

    
890
        // ---------------------------------------------------------------------
891
        // Match clusters for this VM.
892
        // ---------------------------------------------------------------------
893
        profile(true);
894

    
895
        for (obj_it=clusters.begin(); obj_it != clusters.end(); obj_it++)
896
        {
897
            cluster = static_cast<ClusterXML *>(obj_it->second);
898

    
899
            if (match_cluster(vm, cluster, n_error, n_matched, m_error))
900
            {
901
                vm->add_match_cluster(cluster->get_oid());
902

    
903
                n_resources++;
904
            }
905
            else
906
            {
907
                if ( n_error > 0 )
908
                {
909
                    log_match(vm->get_oid(), "Cannot schedule VM. " + m_error);
910
                    break;
911
                }
912
                else if (NebulaLog::log_level() >= Log::DDEBUG)
913
                {
914
                    ostringstream oss;
915
                    oss << "Hosts and System DS in Cluster "
916
                        << cluster->get_oid() << " discarded for VM "
917
                        << vm->get_oid() << ". " << m_error;
918

    
919
                    NebulaLog::log("SCHED", Log::DDEBUG, oss);
920
                }
921
            }
922
        }
923

    
924
        total_cl_match_time += profile(false);
925

    
926
        // ---------------------------------------------------------------------
927
        // Log scheduling errors to VM user if any
928
        // ---------------------------------------------------------------------
929

    
930
        if (n_resources == 0) //No clusters assigned, let's see why
931
        {
932
            // TODO
933
        }
934

    
935
        // ---------------------------------------------------------------------
936
        // Match hosts for this VM.
937
        // ---------------------------------------------------------------------
938
        profile(true);
939

    
940
        for (obj_it=hosts.begin(); obj_it != hosts.end(); obj_it++)
941
        {
942
            host = static_cast<HostXML *>(obj_it->second);
943

    
944
            if (match_host(acls, upool, vm, vm_memory, vm_cpu, vm_pci, host,
945
                    n_auth, n_error, n_fits, n_matched, n_cluster_matched, m_error))
946
            {
947
                vm->add_match_host(host->get_hid());
948

    
949
                n_resources++;
950
            }
951
            else
952
            {
953
                if ( n_error > 0 )
954
                {
955
                    log_match(vm->get_oid(), "Cannot schedule VM. " + m_error);
956
                    break;
957
                }
958
                else if (NebulaLog::log_level() >= Log::DDEBUG)
959
                {
960
                    ostringstream oss;
961
                    oss << "Host " << host->get_hid() << " discarded for VM "
962
                        << vm->get_oid() << ". " << m_error;
963

    
964
                    NebulaLog::log("SCHED", Log::DDEBUG, oss);
965
                }
966
            }
967
        }
968

    
969
        total_host_match_time += profile(false);
970

    
971
        // ---------------------------------------------------------------------
972
        // Log scheduling errors to VM user if any
973
        // ---------------------------------------------------------------------
974

    
975
        if (n_resources == 0) //No hosts assigned, let's see why
976
        {
977
            if (n_error == 0) //No syntax error
978
            {
979
                if (hosts.size() == 0)
980
                {
981
                    vm->log("No hosts enabled to run VMs");
982
                }
983
                else if (n_auth == 0)
984
                {
985
                    vm->log("User is not authorized to use any host");
986
                }
987
                else if (n_cluster_matched == 0)
988
                {
989
                    ostringstream oss;
990

    
991
                    oss << "No host meets capacity and SCHED_CLUSTER_REQUIREMENTS: "
992
                        << vm->get_cluster_requirements();
993

    
994
                    vm->log(oss.str());
995
                }
996
                else if (n_fits == 0)
997
                {
998
                    ostringstream oss;
999

    
1000
                    oss << "No host with enough capacity to deploy the VM";
1001

    
1002
                    vm->log(oss.str());
1003
                }
1004
                else if (n_matched == 0)
1005
                {
1006
                    ostringstream oss;
1007

    
1008
                    oss << "No host meets capacity and SCHED_REQUIREMENTS: "
1009
                        << vm->get_requirements();
1010

    
1011
                    vm->log(oss.str());
1012
                }
1013
            }
1014

    
1015
            vmpool->update(vm);
1016

    
1017
            log_match(vm->get_oid(), "Cannot schedule VM, there is no suitable host.");
1018

    
1019
            continue;
1020
        }
1021

    
1022
        // ---------------------------------------------------------------------
1023
        // Schedule matched hosts
1024
        // ---------------------------------------------------------------------
1025
        profile(true);
1026

    
1027
        for (it=host_policies.begin() ; it != host_policies.end() ; it++)
1028
        {
1029
            (*it)->schedule(vm);
1030
        }
1031

    
1032
        vm->sort_match_hosts();
1033

    
1034
        total_host_rank_time += profile(false);
1035

    
1036
        if (vm->is_resched())//Will use same system DS for migrations
1037
        {
1038
            vm->add_match_datastore(vm->get_dsid());
1039

    
1040
            continue;
1041
        }
1042

    
1043
        // ---------------------------------------------------------------------
1044
        // Match datastores for this VM
1045
        // ---------------------------------------------------------------------
1046

    
1047
        profile(true);
1048

    
1049
        n_resources = 0;
1050
        n_auth    = 0;
1051
        n_matched = 0;
1052
        n_error   = 0;
1053
        n_fits    = 0;
1054

    
1055
        for (obj_it=datastores.begin(); obj_it != datastores.end(); obj_it++)
1056
        {
1057
            ds = static_cast<DatastoreXML *>(obj_it->second);
1058

    
1059
            if (match_system_ds(acls, upool, vm, vm_disk, ds, n_auth, n_error,
1060
                        n_fits, n_matched, n_cluster_matched, m_error))
1061
            {
1062
                vm->add_match_datastore(ds->get_oid());
1063

    
1064
                n_resources++;
1065
            }
1066
            else
1067
            {
1068
                if (n_error > 0)
1069
                {
1070
                    log_match(vm->get_oid(), "Cannot schedule VM. " + m_error);
1071
                    break;
1072
                }
1073
                else if (NebulaLog::log_level() >= Log::DDEBUG)
1074
                {
1075
                    ostringstream oss;
1076
                    oss << "System DS " << ds->get_oid() << " discarded for VM "
1077
                        << vm->get_oid() << ". " << m_error;
1078

    
1079
                    NebulaLog::log("SCHED", Log::DDEBUG, oss);
1080
                }
1081
            }
1082
        }
1083

    
1084
        total_ds_match_time += profile(false);
1085

    
1086
        // ---------------------------------------------------------------------
1087
        // Log scheduling errors to VM user if any
1088
        // ---------------------------------------------------------------------
1089

    
1090
        if (n_resources == 0)
1091
        {
1092
            if (vm->is_public_cloud())//Public clouds don't need a system DS
1093
            {
1094
                vm->set_only_public_cloud();
1095

    
1096
                continue;
1097
            }
1098
            else//No datastores assigned, let's see why
1099
            {
1100
                if (n_error == 0)//No syntax error
1101
                {
1102
                    if (datastores.size() == 0)
1103
                    {
1104
                        vm->log("No system datastores found to run VMs");
1105
                    }
1106
                    else if (n_auth == 0)
1107
                    {
1108
                        vm->log("User is not authorized to use any system datastore");
1109
                    }
1110
                    else if (n_cluster_matched == 0)
1111
                    {
1112
                        ostringstream oss;
1113

    
1114
                        oss << "No system datastore meets capacity and "
1115
                            << "SCHED_CLUSTER_REQUIREMENTS: "
1116
                            << vm->get_cluster_requirements();
1117

    
1118
                        vm->log(oss.str());
1119
                    }
1120
                    else if (n_fits == 0)
1121
                    {
1122
                        ostringstream oss;
1123
                        oss <<  "No system datastore with enough capacity for the VM";
1124

    
1125
                        vm->log(oss.str());
1126
                    }
1127
                    else if (n_matched == 0)
1128
                    {
1129
                        ostringstream oss;
1130

    
1131
                        oss << "No system datastore meets capacity "
1132
                            << "and SCHED_DS_REQUIREMENTS: "
1133
                            << vm->get_ds_requirements();
1134

    
1135
                        vm->log(oss.str());
1136
                    }
1137
                }
1138

    
1139
                vm->clear_match_hosts();
1140

    
1141
                vmpool->update(vm);
1142

    
1143
                log_match(vm->get_oid(), "Cannot schedule VM, there is no suitable "
1144
                    "system ds.");
1145

    
1146
                continue;
1147
            }
1148
        }
1149

    
1150
        // ---------------------------------------------------------------------
1151
        // Schedule matched datastores
1152
        // ---------------------------------------------------------------------
1153

    
1154
        profile(true);
1155

    
1156
        for (it=ds_policies.begin() ; it != ds_policies.end() ; it++)
1157
        {
1158
            (*it)->schedule(vm);
1159
        }
1160

    
1161
        vm->sort_match_datastores();
1162

    
1163
        total_ds_rank_time += profile(false);
1164
    }
1165

    
1166
    if (NebulaLog::log_level() >= Log::DDEBUG)
1167
    {
1168
        ostringstream oss;
1169

    
1170
        oss << "Match Making statistics:\n"
1171
            << "\tNumber of VMs:            " << pending_vms.size() << endl
1172
            << "\tTotal time:               " << one_util::float_to_str(time(0) - stime) << "s" << endl
1173
            << "\tTotal Cluster Match time: " << one_util::float_to_str(total_cl_match_time) << "s" << endl
1174
            << "\tTotal Host Match time:    " << one_util::float_to_str(total_host_match_time) << "s" << endl
1175
            << "\tTotal Host Ranking time:  " << one_util::float_to_str(total_host_rank_time) << "s" << endl
1176
            << "\tTotal DS Match time:      " << one_util::float_to_str(total_ds_match_time) << "s" << endl
1177
            << "\tTotal DS Ranking time:    " << one_util::float_to_str(total_ds_rank_time) << "s" << endl;
1178

    
1179
        NebulaLog::log("SCHED", Log::DDEBUG, oss);
1180
    }
1181

    
1182
    if (NebulaLog::log_level() >= Log::DDDEBUG)
1183
    {
1184
        ostringstream oss;
1185

    
1186
        oss << "Scheduling Results:" << endl;
1187

    
1188
        for (map<int, ObjectXML*>::const_iterator vm_it=pending_vms.begin();
1189
            vm_it != pending_vms.end(); vm_it++)
1190
        {
1191
            vm = static_cast<VirtualMachineXML*>(vm_it->second);
1192

    
1193
            oss << *vm;
1194
        }
1195

    
1196
        NebulaLog::log("SCHED", Log::DDDEBUG, oss);
1197
    }
1198
}
1199

    
1200
/* -------------------------------------------------------------------------- */
1201
/* -------------------------------------------------------------------------- */
1202

    
1203
void Scheduler::dispatch()
1204
{
1205
    HostXML *           host;
1206
    DatastoreXML *      ds;
1207
    VirtualMachineXML * vm;
1208

    
1209
    ostringstream dss;
1210

    
1211
    int cpu, mem;
1212
    long long dsk;
1213
    vector<VectorAttribute *> pci;
1214

    
1215
    int hid, dsid;
1216
    set<int> cids;
1217
    bool test_cap_result;
1218

    
1219
    unsigned int dispatched_vms = 0;
1220

    
1221
    map<int, unsigned int>  host_vms;
1222
    pair<map<int,unsigned int>::iterator, bool> rc;
1223

    
1224
    map<int, ObjectXML*>::const_iterator vm_it;
1225

    
1226
    vector<Resource *>::const_reverse_iterator i, j;
1227

    
1228
    const map<int, ObjectXML*> pending_vms = vmpool->get_objects();
1229

    
1230
    dss << "Dispatching VMs to hosts:\n" << "\tVMID\tHost\tSystem DS\n"
1231
        << "\t-------------------------\n";
1232

    
1233
    //--------------------------------------------------------------------------
1234
    // Dispatch each VM till we reach the dispatch limit
1235
    //--------------------------------------------------------------------------
1236

    
1237
    for (vm_it = pending_vms.begin();
1238
         vm_it != pending_vms.end() &&
1239
            ( dispatch_limit <= 0 || dispatched_vms < dispatch_limit );
1240
         vm_it++)
1241
    {
1242
        vm = static_cast<VirtualMachineXML*>(vm_it->second);
1243

    
1244
        const vector<Resource *> resources = vm->get_match_hosts();
1245

    
1246
        //----------------------------------------------------------------------
1247
        // Test Image Datastore capacity, but not for migrations or resume
1248
        //----------------------------------------------------------------------
1249
        if (!resources.empty() && !vm->is_resched() && !vm->is_resume())
1250
        {
1251
            if (vm->test_image_datastore_capacity(img_dspool) == false)
1252
            {
1253
                if (vm->is_public_cloud())//No capacity needed for public cloud
1254
                {
1255
                    vm->set_only_public_cloud();
1256
                }
1257
                else
1258
                {
1259
                    continue;
1260
                }
1261
            }
1262
        }
1263

    
1264
        vm->get_requirements(cpu, mem, dsk, pci);
1265

    
1266
        //----------------------------------------------------------------------
1267
        // Get the highest ranked host and best System DS for it
1268
        //----------------------------------------------------------------------
1269
        for (i = resources.rbegin() ; i != resources.rend() ; i++)
1270
        {
1271
            hid  = (*i)->oid;
1272
            host = hpool->get(hid);
1273

    
1274
            if ( host == 0 )
1275
            {
1276
                continue;
1277
            }
1278

    
1279
            cids = host->get_cids();
1280

    
1281
            //------------------------------------------------------------------
1282
            // Test host capacity
1283
            //------------------------------------------------------------------
1284
            if (host->test_capacity(cpu, mem, pci) != true)
1285
            {
1286
                continue;
1287
            }
1288

    
1289
            //------------------------------------------------------------------
1290
            // Check that VM can be deployed in local hosts
1291
            //------------------------------------------------------------------
1292
            if (vm->is_only_public_cloud() && !host->is_public_cloud())
1293
            {
1294
                continue;
1295
            }
1296

    
1297
            //------------------------------------------------------------------
1298
            // Test host dispatch limit (init counter if needed)
1299
            //------------------------------------------------------------------
1300
            rc = host_vms.insert(make_pair(hid,0));
1301

    
1302
            if (rc.first->second >= host_dispatch_limit)
1303
            {
1304
                continue;
1305
            }
1306

    
1307
            //------------------------------------------------------------------
1308
            // Get the highest ranked datastore
1309
            //------------------------------------------------------------------
1310
            const vector<Resource *> ds_resources = vm->get_match_datastores();
1311

    
1312
            dsid = -1;
1313

    
1314
            // Skip the loop for public cloud hosts, they don't need a system DS
1315
            if (host->is_public_cloud())
1316
            {
1317
                j = ds_resources.rend();
1318
            }
1319
            else
1320
            {
1321
                j = ds_resources.rbegin();
1322
            }
1323

    
1324
            for ( ; j != ds_resources.rend() ; j++)
1325
            {
1326
                ds = dspool->get((*j)->oid);
1327

    
1328
                if ( ds == 0 )
1329
                {
1330
                    continue;
1331
                }
1332

    
1333
                //--------------------------------------------------------------
1334
                // Test cluster membership for datastore and selected host
1335
                //--------------------------------------------------------------
1336
                if (!ds->is_in_cluster(cids))
1337
                {
1338
                    continue;
1339
                }
1340

    
1341
                //--------------------------------------------------------------
1342
                // Test datastore capacity, but not for migrations
1343
                //--------------------------------------------------------------
1344

    
1345
                if (!vm->is_resched())
1346
                {
1347
                    if (ds->is_shared() && ds->is_monitored())
1348
                    {
1349
                        // A resume action tests DS capacity only
1350
                        // for non-shared system DS
1351
                        if (vm->is_resume())
1352
                        {
1353
                            test_cap_result = true;
1354
                        }
1355
                        else
1356
                        {
1357
                            test_cap_result = ds->test_capacity(dsk);
1358
                        }
1359
                    }
1360
                    else
1361
                    {
1362
                        test_cap_result = host->test_ds_capacity(ds->get_oid(), dsk);
1363
                    }
1364

    
1365
                    if (test_cap_result != true)
1366
                    {
1367
                        continue;
1368
                    }
1369
                }
1370

    
1371
                //--------------------------------------------------------------
1372
                //Select this DS to dispatch VM
1373
                //--------------------------------------------------------------
1374
                dsid = (*j)->oid;
1375

    
1376
                break;
1377
            }
1378

    
1379
            if (dsid == -1 && !host->is_public_cloud())//No system DS for this host
1380
            {
1381
                continue;
1382
            }
1383

    
1384
            //------------------------------------------------------------------
1385
            // Dispatch and update host and DS capacity, and dispatch counters
1386
            //------------------------------------------------------------------
1387
            if (vmpool->dispatch(vm_it->first, hid, dsid, vm->is_resched()) != 0)
1388
            {
1389
                continue;
1390
            }
1391

    
1392
            dss << "\t" << vm_it->first << "\t" << hid << "\t" << dsid << "\n";
1393

    
1394
            // DS capacity is only added for new deployments, not for migrations
1395
            // It is also omitted for VMs deployed in public cloud hosts
1396
            if (!vm->is_resched() && !host->is_public_cloud())
1397
            {
1398
                if (ds->is_shared() && ds->is_monitored())
1399
                {
1400
                    // Resumed VMs do not add to shared system DS capacity
1401
                    if (!vm->is_resume())
1402
                    {
1403
                        ds->add_capacity(dsk);
1404
                    }
1405
                }
1406
                else
1407
                {
1408
                    host->add_ds_capacity(ds->get_oid(), dsk);
1409
                }
1410

    
1411
                vm->add_image_datastore_capacity(img_dspool);
1412
            }
1413

    
1414
            host->add_capacity(vm->get_oid(), cpu, mem, pci);
1415

    
1416
            host_vms[hid]++;
1417

    
1418
            dispatched_vms++;
1419

    
1420
            break;
1421
        }
1422
    }
1423

    
1424
    if (vm_it != pending_vms.end())
1425
    {
1426
        dss << endl << "MAX_DISPATCH limit of " << dispatch_limit << " reached, "
1427
            << std::distance(vm_it, pending_vms.end()) << " VMs were not dispatched";
1428
    }
1429

    
1430
    NebulaLog::log("SCHED", Log::DEBUG, dss);
1431
}
1432

    
1433
/* -------------------------------------------------------------------------- */
1434
/* -------------------------------------------------------------------------- */
1435

    
1436
int Scheduler::do_scheduled_actions()
1437
{
1438
    VirtualMachineXML* vm;
1439

    
1440
    const map<int, ObjectXML*>  vms = vmapool->get_objects();
1441
    map<int, ObjectXML*>::const_iterator vm_it;
1442

    
1443
    vector<Attribute *> attributes;
1444
    vector<Attribute *>::iterator it;
1445

    
1446
    VectorAttribute* vatt;
1447

    
1448
    int action_time;
1449
    int done_time;
1450
    int has_time;
1451
    int has_done;
1452

    
1453
    string action_st, error_msg;
1454

    
1455
    time_t the_time = time(0);
1456
    string time_str = one_util::log_time(the_time);
1457

    
1458
    for (vm_it=vms.begin(); vm_it != vms.end(); vm_it++)
1459
    {
1460
        vm = static_cast<VirtualMachineXML*>(vm_it->second);
1461

    
1462
        vm->get_actions(attributes);
1463

    
1464
        // TODO: Sort actions by TIME
1465
        for (it=attributes.begin(); it != attributes.end(); it++)
1466
        {
1467
            vatt = dynamic_cast<VectorAttribute*>(*it);
1468

    
1469
            if (vatt == 0)
1470
            {
1471
                delete *it;
1472

    
1473
                continue;
1474
            }
1475

    
1476
            has_time  = vatt->vector_value("TIME", action_time);
1477
            has_done  = vatt->vector_value("DONE", done_time);
1478
            action_st = vatt->vector_value("ACTION");
1479

    
1480
            if (has_time == 0 && has_done == -1 && action_time < the_time)
1481
            {
1482
                ostringstream oss;
1483

    
1484
                int rc = VirtualMachineXML::parse_action_name(action_st);
1485

    
1486
                oss << "Executing action '" << action_st << "' for VM "
1487
                    << vm->get_oid() << " : ";
1488

    
1489
                if ( rc != 0 )
1490
                {
1491
                    error_msg = "This action is not supported.";
1492
                }
1493
                else
1494
                {
1495
                    rc = vmapool->action(vm->get_oid(), action_st, error_msg);
1496
                }
1497

    
1498
                if (rc == 0)
1499
                {
1500
                    vatt->remove("MESSAGE");
1501
                    vatt->replace("DONE", static_cast<int>(the_time));
1502

    
1503
                    oss << "Success.";
1504
                }
1505
                else
1506
                {
1507
                    ostringstream oss_aux;
1508

    
1509
                    oss_aux << time_str << " : " << error_msg;
1510

    
1511
                    vatt->replace("MESSAGE", oss_aux.str());
1512

    
1513
                    oss << "Failure. " << error_msg;
1514
                }
1515

    
1516
                NebulaLog::log("VM", Log::INFO, oss);
1517
            }
1518

    
1519
            vm->set_attribute(vatt);
1520
        }
1521

    
1522
        vmpool->update(vm);
1523
    }
1524

    
1525
    return 0;
1526
}
1527

    
1528
/* -------------------------------------------------------------------------- */
1529
/* -------------------------------------------------------------------------- */
1530

    
1531
void Scheduler::do_action(const string &name, void *args)
1532
{
1533
    int rc;
1534

    
1535
    if (name == ACTION_TIMER)
1536
    {
1537
        profile(true);
1538
        rc = vmapool->set_up();
1539
        profile(false,"Getting scheduled actions information.");
1540

    
1541
        if ( rc == 0 )
1542
        {
1543
            profile(true);
1544
            do_scheduled_actions();
1545
            profile(false,"Executing scheduled actions.");
1546
        }
1547

    
1548
        profile(true);
1549
        rc = set_up_pools();
1550
        profile(false,"Getting VM and Host information.");
1551

    
1552
        if ( rc != 0 )
1553
        {
1554
            return;
1555
        }
1556

    
1557
        match_schedule();
1558

    
1559
        profile(true);
1560
        dispatch();
1561
        profile(false,"Dispatching VMs to hosts.");
1562
    }
1563
    else if (name == ACTION_FINALIZE)
1564
    {
1565
        NebulaLog::log("SCHED",Log::INFO,"Stopping the scheduler...");
1566
    }
1567
}