Statistics
| Branch: | Tag: | Revision:

one / src / scheduler / src / sched / Scheduler.cc @ 1f571426

History | View | Annotate | Download (46 KB)

1
/* -------------------------------------------------------------------------- */
2
/* Copyright 2002-2017, OpenNebula Project, OpenNebula Systems                */
3
/*                                                                            */
4
/* Licensed under the Apache License, Version 2.0 (the "License"); you may    */
5
/* not use this file except in compliance with the License. You may obtain    */
6
/* a copy of the License at                                                   */
7
/*                                                                            */
8
/* http://www.apache.org/licenses/LICENSE-2.0                                 */
9
/*                                                                            */
10
/* Unless required by applicable law or agreed to in writing, software        */
11
/* distributed under the License is distributed on an "AS IS" BASIS,          */
12
/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.   */
13
/* See the License for the specific language governing permissions and        */
14
/* limitations under the License.                                             */
15
/* -------------------------------------------------------------------------- */
16

    
17
#include <stdexcept>
18
#include <stdlib.h>
19

    
20
#include <signal.h>
21
#include <unistd.h>
22
#include <fcntl.h>
23
#include <sys/types.h>
24
#include <pwd.h>
25

    
26
#include <pthread.h>
27

    
28
#include <cmath>
29
#include <iomanip>
30

    
31
#include "Scheduler.h"
32
#include "SchedulerTemplate.h"
33
#include "RankPolicy.h"
34
#include "NebulaLog.h"
35
#include "PoolObjectAuth.h"
36
#include "NebulaUtil.h"
37

    
38
using namespace std;
39

    
40

    
41
/* -------------------------------------------------------------------------- */
42
/* -------------------------------------------------------------------------- */
43

    
44
static double profile(bool start, const string& message="")
45
{
46
    static struct timespec estart, eend;
47
    double t;
48

    
49
    if (start)
50
    {
51
        clock_gettime(CLOCK_MONOTONIC, &estart);
52

    
53
        if (!message.empty())
54
        {
55
            NebulaLog::log("SCHED", Log::DDEBUG, message);
56
        }
57

    
58
        return 0;
59
    }
60

    
61
    clock_gettime(CLOCK_MONOTONIC, &eend);
62

    
63
    t = (eend.tv_sec + (eend.tv_nsec * pow(10,-9))) -
64
        (estart.tv_sec+(estart.tv_nsec*pow(10,-9)));
65

    
66
    if (!message.empty())
67
    {
68
        ostringstream oss;
69

    
70
        oss << message << " Total time: " << one_util::float_to_str(t) << "s";
71
        NebulaLog::log("SCHED", Log::DDEBUG, oss);
72
    }
73

    
74
    return t;
75
}
76

    
77
/* -------------------------------------------------------------------------- */
78
/* -------------------------------------------------------------------------- */
79

    
80
extern "C" void * scheduler_action_loop(void *arg)
81
{
82
    Scheduler *  sched;
83

    
84
    if ( arg == 0 )
85
    {
86
        return 0;
87
    }
88

    
89
    sched = static_cast<Scheduler *>(arg);
90

    
91
    NebulaLog::log("SCHED",Log::INFO,"Scheduler loop started.");
92

    
93
    sched->am.loop(sched->timer);
94

    
95
    NebulaLog::log("SCHED",Log::INFO,"Scheduler loop stopped.");
96

    
97
    return 0;
98
}
99

    
100
/* -------------------------------------------------------------------------- */
101
/* -------------------------------------------------------------------------- */
102

    
103
void Scheduler::start()
104
{
105
    int rc;
106

    
107
    ifstream      file;
108
    ostringstream oss;
109

    
110
    string etc_path;
111

    
112
    unsigned int live_rescheds;
113

    
114
    pthread_attr_t pattr;
115

    
116
    // -----------------------------------------------------------
117
    // Configuration File
118
    // -----------------------------------------------------------
119
    string        log_file;
120
    const char *  nl = getenv("ONE_LOCATION");
121

    
122
    if (nl == 0) //OpenNebula installed under root directory
123
    {
124
        log_file = "/var/log/one/sched.log";
125
        etc_path = "/etc/one/";
126
    }
127
    else
128
    {
129
        oss << nl << "/var/sched.log";
130

    
131
        log_file = oss.str();
132

    
133
        oss.str("");
134
        oss << nl << "/etc/";
135

    
136
        etc_path = oss.str();
137
    }
138

    
139
    SchedulerTemplate conf(etc_path);
140

    
141
    if ( conf.load_configuration() != 0 )
142
    {
143
        throw runtime_error("Error reading configuration file.");
144
    }
145

    
146
    conf.get("ONE_XMLRPC", one_xmlrpc);
147

    
148
    conf.get("SCHED_INTERVAL", timer);
149

    
150
    conf.get("MAX_VM", machines_limit);
151

    
152
    conf.get("MAX_DISPATCH", dispatch_limit);
153

    
154
    conf.get("MAX_HOST", host_dispatch_limit);
155

    
156
    conf.get("LIVE_RESCHEDS", live_rescheds);
157

    
158
    // -----------------------------------------------------------
159
    // Log system & Configuration File
160
    // -----------------------------------------------------------
161

    
162
    try
163
    {
164
        NebulaLog::LogType log_system = NebulaLog::UNDEFINED;
165
        Log::MessageType   clevel     = Log::ERROR;;
166

    
167
        const VectorAttribute * log = conf.get("LOG");
168

    
169
        if ( log != 0 )
170
        {
171
            string value;
172
            int    ilevel;
173

    
174
            value      = log->vector_value("SYSTEM");
175
            log_system = NebulaLog::str_to_type(value);
176

    
177
            value  = log->vector_value("DEBUG_LEVEL");
178
            ilevel = atoi(value.c_str());
179

    
180
            if (Log::ERROR <= ilevel && ilevel <= Log::DDDEBUG)
181
            {
182
                clevel = static_cast<Log::MessageType>(ilevel);
183
            }
184
        }
185

    
186
        // Start the log system
187
        if ( log_system != NebulaLog::UNDEFINED )
188
        {
189
            NebulaLog::init_log_system(log_system,
190
                           clevel,
191
                           log_file.c_str(),
192
                           ios_base::trunc,
193
                           "mm_sched");
194
        }
195
        else
196
        {
197
            throw runtime_error("Unknown LOG_SYSTEM.");
198
        }
199

    
200
        NebulaLog::log("SCHED", Log::INFO, "Init Scheduler Log system");
201
    }
202
    catch(runtime_error &)
203
    {
204
        throw;
205
    }
206

    
207
    oss.str("");
208

    
209
    oss << "Starting Scheduler Daemon" << endl;
210
    oss << "----------------------------------------\n";
211
    oss << "     Scheduler Configuration File       \n";
212
    oss << "----------------------------------------\n";
213
    oss << conf;
214
    oss << "----------------------------------------";
215

    
216
    NebulaLog::log("SCHED", Log::INFO, oss);
217

    
218
    // -----------------------------------------------------------
219
    // XML-RPC Client
220
    // -----------------------------------------------------------
221

    
222
    try
223
    {
224
        long long    message_size;
225
        unsigned int timeout;
226

    
227
        conf.get("MESSAGE_SIZE", message_size);
228

    
229
        conf.get("TIMEOUT", timeout);
230

    
231
        Client::initialize("", one_xmlrpc, message_size, timeout);
232

    
233
        oss.str("");
234

    
235
        oss << "XML-RPC client using " << (Client::client())->get_message_size()
236
            << " bytes for response buffer.\n";
237

    
238
        NebulaLog::log("SCHED", Log::INFO, oss);
239
    }
240
    catch(runtime_error &)
241
    {
242
        throw;
243
    }
244

    
245
    xmlInitParser();
246

    
247
    // -------------------------------------------------------------------------
248
    // Get oned configuration, and init zone_id
249
    // -------------------------------------------------------------------------
250

    
251
    while (1)
252
    {
253
        try
254
        {
255
            xmlrpc_c::value result;
256
            vector<xmlrpc_c::value> values;
257

    
258
            Client * client = Client::client();
259

    
260
            client->call("one.system.config", "", &result);
261

    
262
            values = xmlrpc_c::value_array(result).vectorValueValue();
263

    
264
            bool   success = xmlrpc_c::value_boolean(values[0]);
265
            string message = xmlrpc_c::value_string(values[1]);
266

    
267
            if (!success ||(oned_conf.from_xml(message) != 0))
268
            {
269
                ostringstream oss;
270

    
271
                oss << "Cannot contact oned, will retry... Error: " << message;
272

    
273
                NebulaLog::log("SCHED", Log::ERROR, oss);
274
            }
275

    
276
            break;
277
        }
278
        catch (exception const& e)
279
        {
280
            ostringstream oss;
281

    
282
            oss << "Cannot contact oned, will retry... Error: " << e.what();
283

    
284
            NebulaLog::log("SCHED", Log::ERROR, oss);
285
        }
286

    
287
        sleep(2);
288
    }
289

    
290
    NebulaLog::log("SCHED", Log::INFO, "oned successfully contacted.");
291

    
292
    zone_id = 0;
293

    
294
    const VectorAttribute * fed = oned_conf.get("FEDERATION");
295

    
296
    if (fed != 0)
297
    {
298
        if (fed->vector_value("ZONE_ID", zone_id) != 0)
299
        {
300
            zone_id = 0;
301
        }
302
    }
303

    
304
    oss.str("");
305
    oss << "Configuring scheduler for Zone ID: " << zone_id;
306

    
307
    NebulaLog::log("SCHED", Log::INFO, oss);
308

    
309
    // -------------------------------------------------------------------------
310
    // Pools
311
    // -------------------------------------------------------------------------
312
    Client * client = Client::client();
313

    
314
    acls  = new AclXML(client, zone_id);
315
    upool = new UserPoolXML(client);
316

    
317
    hpool  = new HostPoolXML(client);
318
    clpool = new ClusterPoolXML(client);
319

    
320
    dspool     = new SystemDatastorePoolXML(client);
321
    img_dspool = new ImageDatastorePoolXML(client);
322

    
323
    vm_roles_pool = new VirtualMachineRolePoolXML(client, machines_limit);
324
    vmpool = new VirtualMachinePoolXML(client, machines_limit, live_rescheds==1);
325

    
326
    vmgpool = new VMGroupPoolXML(client);
327

    
328
    vmapool = new VirtualMachineActionsPoolXML(client, machines_limit);
329

    
330
    // -----------------------------------------------------------
331
    // Load scheduler policies
332
    // -----------------------------------------------------------
333

    
334
    register_policies(conf);
335

    
336
    // -----------------------------------------------------------
337
    // Close stds, we no longer need them
338
    // -----------------------------------------------------------
339

    
340
    int fd;
341

    
342
    fd = open("/dev/null", O_RDWR);
343

    
344
    dup2(fd,0);
345
    dup2(fd,1);
346
    dup2(fd,2);
347

    
348
    close(fd);
349

    
350
    fcntl(0,F_SETFD,0); // Keep them open across exec funcs
351
    fcntl(1,F_SETFD,0);
352
    fcntl(2,F_SETFD,0);
353

    
354
    // -----------------------------------------------------------
355
    // Block all signals before creating any  thread
356
    // -----------------------------------------------------------
357

    
358
    sigset_t    mask;
359
    int         signal;
360

    
361
    sigfillset(&mask);
362

    
363
    pthread_sigmask(SIG_BLOCK, &mask, NULL);
364

    
365
    // -----------------------------------------------------------
366
    // Create the scheduler loop
367
    // -----------------------------------------------------------
368

    
369
    NebulaLog::log("SCHED",Log::INFO,"Starting scheduler loop...");
370

    
371
    pthread_attr_init (&pattr);
372
    pthread_attr_setdetachstate (&pattr, PTHREAD_CREATE_JOINABLE);
373

    
374
    rc = pthread_create(&sched_thread,&pattr,scheduler_action_loop,(void *) this);
375

    
376
    if ( rc != 0 )
377
    {
378
        NebulaLog::log("SCHED",Log::ERROR,
379
            "Could not start scheduler loop, exiting");
380

    
381
        return;
382
    }
383

    
384
    // -----------------------------------------------------------
385
    // Wait for a SIGTERM or SIGINT signal
386
    // -----------------------------------------------------------
387

    
388
    sigemptyset(&mask);
389

    
390
    sigaddset(&mask, SIGINT);
391
    sigaddset(&mask, SIGTERM);
392

    
393
    sigwait(&mask, &signal);
394

    
395
    am.finalize();
396

    
397
    pthread_join(sched_thread,0);
398

    
399
    xmlCleanupParser();
400

    
401
    NebulaLog::finalize_log_system();
402
}
403

    
404
/* -------------------------------------------------------------------------- */
405
/* -------------------------------------------------------------------------- */
406

    
407
int Scheduler::set_up_pools()
408
{
409
    int                             rc;
410
    ostringstream                   oss;
411
    map<int,int>::const_iterator    it;
412
    map<int, int>                   shares;
413

    
414
    //--------------------------------------------------------------------------
415
    //Cleans the cache and get the pools
416
    //--------------------------------------------------------------------------
417

    
418
    rc = vmpool->set_up();
419

    
420
    if ( rc != 0 )
421
    {
422
        return rc;
423
    }
424

    
425
    rc = dspool->set_up();
426

    
427
    if ( rc != 0 )
428
    {
429
        return rc;
430
    }
431

    
432
    rc = img_dspool->set_up();
433

    
434
    if ( rc != 0 )
435
    {
436
        return rc;
437
    }
438

    
439
    rc = upool->set_up();
440

    
441
    if ( rc != 0 )
442
    {
443
        return rc;
444
    }
445

    
446
    rc = hpool->set_up();
447

    
448
    if ( rc != 0 )
449
    {
450
        return rc;
451
    }
452

    
453
    rc = clpool->set_up();
454

    
455
    if ( rc != 0 )
456
    {
457
        return rc;
458
    }
459

    
460
    hpool->merge_clusters(clpool);
461

    
462
    rc = acls->set_up();
463

    
464
    if ( rc != 0 )
465
    {
466
        return rc;
467
    }
468

    
469
    rc = vmgpool->set_up();
470

    
471
    if ( rc != 0 )
472
    {
473
        return rc;
474
    }
475

    
476
    rc = vm_roles_pool->set_up();
477

    
478
    if ( rc != 0 )
479
    {
480
        return rc;
481
    }
482

    
483
    return 0;
484
};
485

    
486
/* -------------------------------------------------------------------------- */
487
/* -------------------------------------------------------------------------- */
488
/* -------------------------------------------------------------------------- */
489
/* -------------------------------------------------------------------------- */
490

    
491
/**
492
 *  Match hosts for this VM that:
493
 *    1. Fulfills ACL
494
 *    2. Meets user/policy requirements
495
 *    3. Have enough capacity to host the VM
496
 *
497
 *  @param acl pool
498
 *  @param users the user pool
499
 *  @param vm the virtual machine
500
 *  @param vm_memory vm requirement
501
 *  @param vm_cpu vm requirement
502
 *  @param vm_pci vm requirement
503
 *  @param host to evaluate vm assgiment
504
 *  @param n_auth number of hosts authorized for the user, incremented if needed
505
 *  @param n_error number of requirement errors, incremented if needed
506
 *  @param n_fits number of hosts with capacity that fits the VM requirements
507
 *  @param n_matched number of hosts that fullfil VM sched_requirements
508
 *  @param error, string describing why the host is not valid
509
 *  @return true for a positive match
510
 */
511
static bool match_host(AclXML * acls, UserPoolXML * upool, VirtualMachineXML* vm,
512
    int vmem, int vcpu, vector<VectorAttribute *>& vpci, HostXML * host,
513
    int &n_auth, int& n_error, int &n_fits, int &n_matched, string &error)
514
{
515
    // -------------------------------------------------------------------------
516
    // Filter current Hosts for resched VMs
517
    // -------------------------------------------------------------------------
518
    if (vm->is_resched() && vm->get_hid() == host->get_hid())
519
    {
520
        error = "VM cannot be migrated to its current Host.";
521
        return false;
522
    }
523

    
524
    // -------------------------------------------------------------------------
525
    // Check that VM can be deployed in local hosts
526
    // -------------------------------------------------------------------------
527
    if (vm->is_only_public_cloud() && !host->is_public_cloud())
528
    {
529
        error = "VM requires a Public Cloud Host, but it's local.";
530
        return false;
531
    }
532

    
533
    // -------------------------------------------------------------------------
534
    // Check if user is authorized
535
    // -------------------------------------------------------------------------
536
    if ( vm->get_uid() != 0 && vm->get_gid() != 0 )
537
    {
538
        PoolObjectAuth hperms;
539

    
540
        host->get_permissions(hperms);
541

    
542
        UserXML * user = upool->get(vm->get_uid());
543

    
544
        if (user == 0)
545
        {
546
            error = "User does not exists.";
547
            return false;
548
        }
549

    
550
        const vector<int> vgids = user->get_gids();
551

    
552
        set<int> gids(vgids.begin(), vgids.end());
553

    
554
        if ( !acls->authorize(vm->get_uid(), gids, hperms, AuthRequest::MANAGE))
555
        {
556
            error = "Permission denied.";
557
            return false;
558
        }
559
    }
560

    
561
    n_auth++;
562

    
563
    // -------------------------------------------------------------------------
564
    // Check host capacity
565
    // -------------------------------------------------------------------------
566
    if (host->test_capacity(vcpu, vmem, vpci, error) != true)
567
    {
568
        return false;
569
    }
570

    
571
    n_fits++;
572

    
573
    // -------------------------------------------------------------------------
574
    // Evaluate VM requirements
575
    // -------------------------------------------------------------------------
576
    if (!vm->get_requirements().empty())
577
    {
578
        char * estr;
579
        bool   matched;
580

    
581
        if ( host->eval_bool(vm->get_requirements(), matched, &estr) != 0 )
582
        {
583
            ostringstream oss;
584

    
585
            n_error++;
586

    
587
            oss << "Error in SCHED_REQUIREMENTS: '" << vm->get_requirements()
588
                << "', error: " << estr;
589

    
590
            vm->log(oss.str());
591

    
592
            error = oss.str();
593

    
594
            free(estr);
595

    
596
            return false;
597
        }
598

    
599
        if (matched == false)
600
        {
601
            error = "It does not fulfill SCHED_REQUIREMENTS: " +
602
                vm->get_requirements();
603
            return false;
604
        }
605
    }
606

    
607
    n_matched++;
608

    
609
    return true;
610
};
611

    
612
/* -------------------------------------------------------------------------- */
613
/* -------------------------------------------------------------------------- */
614

    
615
/**
616
 *  Match system DS's for this VM that:
617
 *    1. Meet user/policy requirements
618
 *    2. Have enough capacity to host the VM
619
 *
620
 *  @param acl pool
621
 *  @param users the user pool
622
 *  @param vm the virtual machine
623
 *  @param vdisk vm requirement
624
 *  @param ds to evaluate vm assgiment
625
 *  @param n_auth number of ds authorized for the user, incremented if needed
626
 *  @param n_error number of requirement errors, incremented if needed
627
 *  @param n_matched number of system ds that fullfil VM sched_requirements
628
 *  @param n_fits number of system ds with capacity that fits the VM requirements
629
 *  @param error, string describing why the host is not valid
630
 *  @return true for a positive match
631
 */
632
static bool match_system_ds(AclXML * acls, UserPoolXML * upool,
633
    VirtualMachineXML* vm, long long vdisk, DatastoreXML * ds, int& n_auth,
634
    int& n_error, int& n_fits, int &n_matched, string &error)
635
{
636
    // -------------------------------------------------------------------------
637
    // Check if user is authorized
638
    // -------------------------------------------------------------------------
639
    if ( vm->get_uid() != 0 && vm->get_gid() != 0 )
640
    {
641
        PoolObjectAuth dsperms;
642

    
643
        ds->get_permissions(dsperms);
644

    
645
        UserXML * user = upool->get(vm->get_uid());
646

    
647
        if (user == 0)
648
        {
649
            error = "User does not exists.";
650
            return false;
651
        }
652

    
653
        const vector<int> vgids = user->get_gids();
654

    
655
        set<int> gids(vgids.begin(), vgids.end());
656

    
657
        if ( !acls->authorize(vm->get_uid(), gids, dsperms, AuthRequest::USE))
658
        {
659
            error = "Permission denied.";
660
            return false;
661
        }
662
    }
663

    
664
    n_auth++;
665

    
666
    // -------------------------------------------------------------------------
667
    // Check datastore capacity for shared systems DS (non-shared will be
668
    // checked in a per host basis during dispatch). Resume actions do not
669
    // add to shared system DS usage, and are skipped also
670
    // -------------------------------------------------------------------------
671
    if (ds->is_shared() && ds->is_monitored() && !vm->is_resume() &&
672
        !ds->test_capacity(vdisk, error))
673
    {
674
        return false;
675
    }
676

    
677
    n_fits++;
678

    
679
    // -------------------------------------------------------------------------
680
    // Evaluate VM requirements
681
    // -------------------------------------------------------------------------
682
    if (!vm->get_ds_requirements().empty())
683
    {
684
        char * estr;
685
        bool   matched;
686

    
687
        if ( ds->eval_bool(vm->get_ds_requirements(), matched, &estr) != 0 )
688
        {
689
            ostringstream oss;
690

    
691
            n_error++;
692

    
693
            oss << "Error in SCHED_DS_REQUIREMENTS: '"
694
                << vm->get_ds_requirements() << "', error: " << estr;
695

    
696
            vm->log(oss.str());
697

    
698
            error = oss.str();
699

    
700
            free(estr);
701

    
702
            return false;
703
        }
704

    
705
        if (matched == false)
706
        {
707
            error = "It does not fulfill SCHED_DS_REQUIREMENTS.";
708
            return false;
709
        }
710
    }
711

    
712
    n_matched++;
713

    
714
    return true;
715
}
716

    
717
/* -------------------------------------------------------------------------- */
718

    
719
static void log_match(int vid, const string& msg)
720
{
721
    ostringstream oss;
722

    
723
    oss << "Match-making results for VM " << vid << ":\n\t" << msg << endl;
724

    
725
    NebulaLog::log("SCHED", Log::DEBUG, oss);
726
}
727

    
728
/* -------------------------------------------------------------------------- */
729
/* -------------------------------------------------------------------------- */
730

    
731
void Scheduler::match_schedule()
732
{
733
    VirtualMachineXML * vm;
734

    
735
    int vm_memory;
736
    int vm_cpu;
737
    long long vm_disk;
738
    vector<VectorAttribute *> vm_pci;
739

    
740
    int n_resources;
741
    int n_matched;
742
    int n_auth;
743
    int n_error;
744
    int n_fits;
745

    
746
    HostXML * host;
747
    DatastoreXML *ds;
748

    
749
    string m_error;
750

    
751
    map<int, ObjectXML*>::const_iterator  vm_it;
752
    map<int, ObjectXML*>::const_iterator  obj_it;
753

    
754
    vector<SchedulerPolicy *>::iterator it;
755

    
756
    const map<int, ObjectXML*> pending_vms = vmpool->get_objects();
757
    const map<int, ObjectXML*> hosts       = hpool->get_objects();
758
    const map<int, ObjectXML*> datastores  = dspool->get_objects();
759
    const map<int, ObjectXML*> users       = upool->get_objects();
760

    
761
    double total_cl_match_time = 0;
762
    double total_host_match_time = 0;
763
    double total_host_rank_time = 0;
764
    double total_ds_match_time = 0;
765
    double total_ds_rank_time = 0;
766

    
767
    time_t stime = time(0);
768

    
769
    for (vm_it=pending_vms.begin(); vm_it != pending_vms.end(); vm_it++)
770
    {
771
        vm = static_cast<VirtualMachineXML*>(vm_it->second);
772

    
773
        vm->get_requirements(vm_cpu, vm_memory, vm_disk, vm_pci);
774

    
775
        n_resources = 0;
776
        n_fits    = 0;
777
        n_matched = 0;
778
        n_auth    = 0;
779
        n_error   = 0;
780

    
781
        //----------------------------------------------------------------------
782
        // Test Image Datastore capacity, but not for migrations or resume
783
        //----------------------------------------------------------------------
784
        if (!vm->is_resched() && !vm->is_resume())
785
        {
786
            if (vm->test_image_datastore_capacity(img_dspool, m_error) == false)
787
            {
788
                if (vm->is_public_cloud()) //No capacity needed for public cloud
789
                {
790
                    vm->set_only_public_cloud();
791
                }
792
                else
793
                {
794
                    log_match(vm->get_oid(), "Cannot schedule VM. "+ m_error);
795

    
796
                    vm->log("Cannot schedule VM. "+ m_error);
797
                    vmpool->update(vm);
798

    
799
                    continue;
800
                }
801
            }
802
        }
803

    
804
        // ---------------------------------------------------------------------
805
        // Match hosts for this VM.
806
        // ---------------------------------------------------------------------
807
        profile(true);
808

    
809
        for (obj_it=hosts.begin(); obj_it != hosts.end(); obj_it++)
810
        {
811
            host = static_cast<HostXML *>(obj_it->second);
812

    
813
            if (match_host(acls, upool, vm, vm_memory, vm_cpu, vm_pci, host,
814
                    n_auth, n_error, n_fits, n_matched, m_error))
815
            {
816
                vm->add_match_host(host->get_hid());
817

    
818
                n_resources++;
819
            }
820
            else
821
            {
822
                if ( n_error > 0 )
823
                {
824
                    log_match(vm->get_oid(), "Cannot schedule VM. " + m_error);
825
                    break;
826
                }
827
                else if (NebulaLog::log_level() >= Log::DDEBUG)
828
                {
829
                    ostringstream oss;
830
                    oss << "Host " << host->get_hid() << " discarded for VM "
831
                        << vm->get_oid() << ". " << m_error;
832

    
833
                    NebulaLog::log("SCHED", Log::DDEBUG, oss);
834
                }
835
            }
836
        }
837

    
838
        total_host_match_time += profile(false);
839

    
840
        // ---------------------------------------------------------------------
841
        // Log scheduling errors to VM user if any
842
        // ---------------------------------------------------------------------
843

    
844
        if (n_resources == 0) //No hosts assigned, let's see why
845
        {
846
            if (n_error == 0) //No syntax error
847
            {
848
                if (hosts.size() == 0)
849
                {
850
                    vm->log("No hosts enabled to run VMs");
851
                }
852
                else if (n_auth == 0)
853
                {
854
                    vm->log("User is not authorized to use any host");
855
                }
856
                else if (n_fits == 0)
857
                {
858
                    ostringstream oss;
859

    
860
                    oss << "No host with enough capacity to deploy the VM";
861

    
862
                    vm->log(oss.str());
863
                }
864
                else if (n_matched == 0)
865
                {
866
                    ostringstream oss;
867

    
868
                    oss << "No host meets capacity and SCHED_REQUIREMENTS: "
869
                        << vm->get_requirements();
870

    
871
                    vm->log(oss.str());
872
                }
873
            }
874

    
875
            vmpool->update(vm);
876

    
877
            log_match(vm->get_oid(),
878
                    "Cannot schedule VM, there is no suitable host.");
879

    
880
            continue;
881
        }
882

    
883
        // ---------------------------------------------------------------------
884
        // Schedule matched hosts
885
        // ---------------------------------------------------------------------
886
        profile(true);
887

    
888
        for (it=host_policies.begin() ; it != host_policies.end() ; it++)
889
        {
890
            (*it)->schedule(vm);
891
        }
892

    
893
        vm->sort_match_hosts();
894

    
895
        total_host_rank_time += profile(false);
896

    
897
        if (vm->is_resched())//Will use same system DS for migrations
898
        {
899
            vm->add_match_datastore(vm->get_dsid());
900

    
901
            continue;
902
        }
903

    
904
        // ---------------------------------------------------------------------
905
        // Match datastores for this VM
906
        // ---------------------------------------------------------------------
907

    
908
        profile(true);
909

    
910
        n_resources = 0;
911
        n_auth    = 0;
912
        n_matched = 0;
913
        n_error   = 0;
914
        n_fits    = 0;
915

    
916
        for (obj_it=datastores.begin(); obj_it != datastores.end(); obj_it++)
917
        {
918
            ds = static_cast<DatastoreXML *>(obj_it->second);
919

    
920
            if (match_system_ds(acls, upool, vm, vm_disk, ds, n_auth, n_error,
921
                        n_fits, n_matched, m_error))
922
            {
923
                vm->add_match_datastore(ds->get_oid());
924

    
925
                n_resources++;
926
            }
927
            else
928
            {
929
                if (n_error > 0)
930
                {
931
                    log_match(vm->get_oid(), "Cannot schedule VM. " + m_error);
932
                    break;
933
                }
934
                else if (NebulaLog::log_level() >= Log::DDEBUG)
935
                {
936
                    ostringstream oss;
937
                    oss << "System DS " << ds->get_oid() << " discarded for VM "
938
                        << vm->get_oid() << ". " << m_error;
939

    
940
                    NebulaLog::log("SCHED", Log::DDEBUG, oss);
941
                }
942
            }
943
        }
944

    
945
        total_ds_match_time += profile(false);
946

    
947
        // ---------------------------------------------------------------------
948
        // Log scheduling errors to VM user if any
949
        // ---------------------------------------------------------------------
950

    
951
        if (n_resources == 0)
952
        {
953
            if (vm->is_public_cloud())//Public clouds don't need a system DS
954
            {
955
                vm->set_only_public_cloud();
956

    
957
                continue;
958
            }
959
            else//No datastores assigned, let's see why
960
            {
961
                if (n_error == 0)//No syntax error
962
                {
963
                    if (datastores.size() == 0)
964
                    {
965
                        vm->log("No system datastores found to run VMs");
966
                    }
967
                    else if (n_auth == 0)
968
                    {
969
                        vm->log("User is not authorized to use any system datastore");
970
                    }
971
                    else if (n_fits == 0)
972
                    {
973
                        ostringstream oss;
974
                        oss <<  "No system datastore with enough capacity for the VM";
975

    
976
                        vm->log(oss.str());
977
                    }
978
                    else if (n_matched == 0)
979
                    {
980
                        ostringstream oss;
981

    
982
                        oss << "No system datastore meets capacity "
983
                            << "and SCHED_DS_REQUIREMENTS: "
984
                            << vm->get_ds_requirements();
985

    
986
                        vm->log(oss.str());
987
                    }
988
                }
989

    
990
                vm->clear_match_hosts();
991

    
992
                vmpool->update(vm);
993

    
994
                log_match(vm->get_oid(), "Cannot schedule VM, there is no suitable "
995
                    "system ds.");
996

    
997
                continue;
998
            }
999
        }
1000

    
1001
        // ---------------------------------------------------------------------
1002
        // Schedule matched datastores
1003
        // ---------------------------------------------------------------------
1004

    
1005
        profile(true);
1006

    
1007
        for (it=ds_policies.begin() ; it != ds_policies.end() ; it++)
1008
        {
1009
            (*it)->schedule(vm);
1010
        }
1011

    
1012
        vm->sort_match_datastores();
1013

    
1014
        total_ds_rank_time += profile(false);
1015
    }
1016

    
1017
    if (NebulaLog::log_level() >= Log::DDEBUG)
1018
    {
1019
        ostringstream oss;
1020

    
1021
        oss << "Match Making statistics:\n"
1022
            << "\tNumber of VMs:            "
1023
            << pending_vms.size() << endl
1024
            << "\tTotal time:               "
1025
            << one_util::float_to_str(time(0) - stime) << "s" << endl
1026
            << "\tTotal Cluster Match time: "
1027
            << one_util::float_to_str(total_cl_match_time) << "s" << endl
1028
            << "\tTotal Host Match time:    "
1029
            << one_util::float_to_str(total_host_match_time) << "s" << endl
1030
            << "\tTotal Host Ranking time:  "
1031
            << one_util::float_to_str(total_host_rank_time) << "s" << endl
1032
            << "\tTotal DS Match time:      "
1033
            << one_util::float_to_str(total_ds_match_time) << "s" << endl
1034
            << "\tTotal DS Ranking time:    "
1035
            << one_util::float_to_str(total_ds_rank_time) << "s" << endl;
1036

    
1037
        NebulaLog::log("SCHED", Log::DDEBUG, oss);
1038
    }
1039

    
1040
    if (NebulaLog::log_level() >= Log::DDDEBUG)
1041
    {
1042
        ostringstream oss;
1043

    
1044
        oss << "Scheduling Results:" << endl;
1045

    
1046
        for (map<int, ObjectXML*>::const_iterator vm_it=pending_vms.begin();
1047
            vm_it != pending_vms.end(); vm_it++)
1048
        {
1049
            vm = static_cast<VirtualMachineXML*>(vm_it->second);
1050

    
1051
            oss << *vm;
1052
        }
1053

    
1054
        NebulaLog::log("SCHED", Log::DDDEBUG, oss);
1055
    }
1056
}
1057

    
1058
/* -------------------------------------------------------------------------- */
1059
/* -------------------------------------------------------------------------- */
1060

    
1061
void Scheduler::dispatch()
1062
{
1063
    HostXML *           host;
1064
    DatastoreXML *      ds;
1065
    VirtualMachineXML * vm;
1066

    
1067
    ostringstream dss;
1068
    string        error;
1069

    
1070
    int cpu, mem;
1071
    long long dsk;
1072
    vector<VectorAttribute *> pci;
1073

    
1074
    int hid, dsid, cid;
1075

    
1076
    unsigned int dispatched_vms = 0;
1077
    bool dispatched, matched;
1078
    char * estr;
1079

    
1080
    vector<Resource *>::const_reverse_iterator i, j, k;
1081

    
1082
    vector<SchedulerPolicy *>::iterator sp_it;
1083

    
1084
    //--------------------------------------------------------------------------
1085
    // Schedule pending VMs according to the VM policies (e.g. User priority)
1086
    //--------------------------------------------------------------------------
1087
    for (sp_it = vm_policies.begin() ; sp_it != vm_policies.end() ; ++sp_it)
1088
    {
1089
        (*sp_it)->schedule(0);
1090
    }
1091

    
1092
    vmpool->sort_vm_resources();
1093

    
1094
    const vector<Resource *> vm_rs = vmpool->get_vm_resources();
1095

    
1096
    //--------------------------------------------------------------------------
1097
    dss << "Dispatching VMs to hosts:\n" 
1098
        << "\tVMID\tPriority\tHost\tSystem DS\n"
1099
        << "\t--------------------------------------------------------------\n";
1100
    //--------------------------------------------------------------------------
1101

    
1102
    //--------------------------------------------------------------------------
1103
    // Dispatch each VM till we reach the dispatch limit
1104
    //--------------------------------------------------------------------------
1105
    for (k = vm_rs.rbegin(); k != vm_rs.rend() &&
1106
            ( dispatch_limit <= 0 || dispatched_vms < dispatch_limit ); ++k)
1107
    {
1108
        dispatched = false;
1109

    
1110
        vm = vmpool->get((*k)->oid);
1111

    
1112
        const vector<Resource *> resources = vm->get_match_hosts();
1113

    
1114
        //----------------------------------------------------------------------
1115
        // Test Image Datastore capacity, but not for migrations or resume
1116
        //----------------------------------------------------------------------
1117
        if (!resources.empty() && !vm->is_resched() && !vm->is_resume())
1118
        {
1119
            if (vm->test_image_datastore_capacity(img_dspool, error) == false)
1120
            {
1121
                if (vm->is_public_cloud())//No capacity needed for public cloud
1122
                {
1123
                    vm->set_only_public_cloud();
1124
                }
1125
                else
1126
                {
1127
                    vm->log("Cannot dispatch VM. " + error);
1128

    
1129
                    vmpool->update(vm);
1130

    
1131
                    continue;
1132
                }
1133
            }
1134
        }
1135

    
1136
        vm->get_requirements(cpu, mem, dsk, pci);
1137

    
1138
        //----------------------------------------------------------------------
1139
        // Get the highest ranked host and best System DS for it
1140
        //----------------------------------------------------------------------
1141
        for (i = resources.rbegin() ; i != resources.rend() ; i++)
1142
        {
1143
            hid  = (*i)->oid;
1144
            host = hpool->get(hid);
1145

    
1146
            if ( host == 0 )
1147
            {
1148
                continue;
1149
            }
1150

    
1151
            cid = host->get_cid();
1152

    
1153
            //------------------------------------------------------------------
1154
            // Check host still match requirements with CURRENT_VMS
1155
            //------------------------------------------------------------------
1156
            matched = true;
1157

    
1158
            if ( one_util::regex_match("CURRENT_VMS",
1159
                        vm->get_requirements().c_str()) == 0 )
1160
            {
1161
                if (host->eval_bool(vm->get_requirements(), matched, &estr)!=0)
1162
                {
1163
                    free(estr);
1164
                    continue;
1165
                }
1166
            }
1167

    
1168
            if (matched == false)
1169
            {
1170
                std::ostringstream mss;
1171

    
1172
                mss << "Host " << hid << " no longer meets requirements for VM "
1173
                    << vm->get_oid();
1174

    
1175
                NebulaLog::log("SCHED", Log::DEBUG, mss);
1176
                continue;
1177
            }
1178

    
1179
            //------------------------------------------------------------------
1180
            // Test host capacity
1181
            //------------------------------------------------------------------
1182
            if (host->test_capacity(cpu, mem, pci) != true)
1183
            {
1184
                continue;
1185
            }
1186

    
1187
            //------------------------------------------------------------------
1188
            // Check that VM can be deployed in local hosts
1189
            //------------------------------------------------------------------
1190
            if (vm->is_only_public_cloud() && !host->is_public_cloud())
1191
            {
1192
                continue;
1193
            }
1194

    
1195
            //------------------------------------------------------------------
1196
            // Test host dispatch limit
1197
            //------------------------------------------------------------------
1198
            if (host->dispatched() >= host_dispatch_limit)
1199
            {
1200
                continue;
1201
            }
1202

    
1203
            //------------------------------------------------------------------
1204
            // Get the highest ranked datastore
1205
            //------------------------------------------------------------------
1206
            const vector<Resource *> ds_resources = vm->get_match_datastores();
1207

    
1208
            dsid = -1;
1209

    
1210
            // Skip the loop for public cloud hosts, they don't need a system DS
1211
            if (host->is_public_cloud())
1212
            {
1213
                j = ds_resources.rend();
1214
            }
1215
            else
1216
            {
1217
                j = ds_resources.rbegin();
1218
            }
1219

    
1220
            for ( ; j != ds_resources.rend() ; j++)
1221
            {
1222
                ds = dspool->get((*j)->oid);
1223

    
1224
                if ( ds == 0 )
1225
                {
1226
                    continue;
1227
                }
1228

    
1229
                //--------------------------------------------------------------
1230
                // Test cluster membership for datastore and selected host
1231
                //--------------------------------------------------------------
1232
                if (!ds->is_in_cluster(cid))
1233
                {
1234
                    continue;
1235
                }
1236

    
1237
                //--------------------------------------------------------------
1238
                // Test datastore capacity
1239
                //   - Shared DS does not need to check capacity if VM is
1240
                //     migrated or resumed
1241
                //   - Non-shared DS will always check host capacity
1242
                //--------------------------------------------------------------
1243
                bool ds_capacity = false;
1244

    
1245
                if (ds->is_shared())
1246
                {
1247
                    if (!ds->is_monitored())
1248
                    {
1249
                        ds_capacity = false;
1250
                    }
1251
                    else if (vm->is_resched() || vm->is_resume())
1252
                    {
1253
                        ds_capacity = true;
1254
                    }
1255
                    else
1256
                    {
1257
                        ds_capacity =  ds->test_capacity(dsk);
1258
                    }
1259
                }
1260
                else
1261
                {
1262
                    ds_capacity = host->test_ds_capacity(ds->get_oid(), dsk);
1263
                }
1264

    
1265
                if (!ds_capacity)
1266
                {
1267
                    continue;
1268
                }
1269

    
1270
                //--------------------------------------------------------------
1271
                //Select this DS to dispatch VM
1272
                //--------------------------------------------------------------
1273
                dsid = (*j)->oid;
1274

    
1275
                break;
1276
            }
1277

    
1278
            if (dsid == -1 && !host->is_public_cloud())
1279
            {
1280
                continue;
1281
            }
1282

    
1283
            //------------------------------------------------------------------
1284
            // Dispatch and update host and DS capacity, and dispatch counters
1285
            //------------------------------------------------------------------
1286
            if (vmpool->dispatch((*k)->oid, hid, dsid, vm->is_resched()) != 0)
1287
            {
1288
                continue;
1289
            }
1290

    
1291
            //------------------------------------------------------------------
1292
            dss << "\t" << (*k)->oid << "\t" << (*k)->priority << "\t\t" << hid
1293
                << "\t" << dsid << "\n";
1294
            //------------------------------------------------------------------
1295

    
1296
            // DS capacity skip VMs deployed in public cloud hosts
1297
            if (!host->is_public_cloud())
1298
            {
1299
                // ------------ Add system DS usage -------------
1300
                if (ds->is_shared())
1301
                {
1302
                    if (!vm->is_resched() && !vm->is_resume())
1303
                    {
1304
                        ds->add_capacity(dsk);
1305
                    }
1306
                }
1307
                else
1308
                {
1309
                    host->add_ds_capacity(ds->get_oid(), dsk);
1310
                }
1311

    
1312
                // ---------- Add image DS usage (i.e. clone = self) ----------
1313
                if (!vm->is_resched())
1314
                {
1315
                    vm->add_image_datastore_capacity(img_dspool);
1316
                }
1317
            }
1318

    
1319
            //------------------------------------------------------------------
1320
            // VM leaders needs to add the select host to the affined VMs
1321
            //------------------------------------------------------------------
1322
            const set<int>& affined_vms = vm->get_affined_vms();
1323

    
1324
            if ( affined_vms.size() > 0 )
1325
            {
1326
                set<int>::const_iterator it;
1327

    
1328
                for ( it = affined_vms.begin(); it != affined_vms.end(); ++it )
1329
                {
1330
                    VirtualMachineXML * avm = vmpool->get(*it);
1331

    
1332
                    if ( avm == 0 )
1333
                    {
1334
                        continue;
1335
                    }
1336

    
1337
                    avm->add_match_host(hid);
1338
                    avm->add_match_datastore(dsid);
1339
                }
1340
            }
1341

    
1342
            //------------------------------------------------------------------
1343
            // Update usage and statistics counters
1344
            //------------------------------------------------------------------
1345
            host->add_capacity(vm->get_oid(), cpu, mem, pci);
1346

    
1347
            dispatched_vms++;
1348

    
1349
            dispatched = true;
1350

    
1351
            break;
1352
        }
1353

    
1354
        if (!dispatched)
1355
        {
1356
            vm->log("Cannot dispatch VM to any Host. Possible reasons: Not "
1357
                "enough capacity in Host or System DS, or dispatch limit "
1358
                "reached");
1359
            vmpool->update(vm);
1360
        }
1361
    }
1362

    
1363
    if (k != vm_rs.rend())
1364
    {
1365
        dss << endl << "MAX_DISPATCH limit of " << dispatch_limit << " reached, "
1366
            << std::distance(k, vm_rs.rend())
1367
            << " VMs were not dispatched";
1368
    }
1369

    
1370
    NebulaLog::log("SCHED", Log::DEBUG, dss);
1371
}
1372

    
1373
/* -------------------------------------------------------------------------- */
1374
/* -------------------------------------------------------------------------- */
1375

    
1376
int Scheduler::do_scheduled_actions()
1377
{
1378
    VirtualMachineXML* vm;
1379

    
1380
    const map<int, ObjectXML*>  vms = vmapool->get_objects();
1381
    map<int, ObjectXML*>::const_iterator vm_it;
1382

    
1383
    vector<Attribute *> attributes;
1384
    vector<Attribute *>::iterator it;
1385

    
1386
    VectorAttribute* vatt;
1387

    
1388
    int action_time;
1389
    int done_time;
1390
    int has_time;
1391
    int has_done;
1392

    
1393
    string action_st, error_msg;
1394

    
1395
    time_t the_time = time(0);
1396
    string time_str = one_util::log_time(the_time);
1397

    
1398
    for (vm_it=vms.begin(); vm_it != vms.end(); vm_it++)
1399
    {
1400
        vm = static_cast<VirtualMachineXML*>(vm_it->second);
1401

    
1402
        vm->get_actions(attributes);
1403

    
1404
        // TODO: Sort actions by TIME
1405
        for (it=attributes.begin(); it != attributes.end(); it++)
1406
        {
1407
            vatt = dynamic_cast<VectorAttribute*>(*it);
1408

    
1409
            if (vatt == 0)
1410
            {
1411
                delete *it;
1412

    
1413
                continue;
1414
            }
1415

    
1416
            has_time  = vatt->vector_value("TIME", action_time);
1417
            has_done  = vatt->vector_value("DONE", done_time);
1418
            action_st = vatt->vector_value("ACTION");
1419

    
1420
            if (has_time == 0 && has_done == -1 && action_time < the_time)
1421
            {
1422
                ostringstream oss;
1423

    
1424
                int rc = VirtualMachineXML::parse_action_name(action_st);
1425

    
1426
                oss << "Executing action '" << action_st << "' for VM "
1427
                    << vm->get_oid() << " : ";
1428

    
1429
                if ( rc != 0 )
1430
                {
1431
                    error_msg = "This action is not supported.";
1432
                }
1433
                else
1434
                {
1435
                    rc = vmapool->action(vm->get_oid(), action_st, error_msg);
1436
                }
1437

    
1438
                if (rc == 0)
1439
                {
1440
                    vatt->remove("MESSAGE");
1441
                    vatt->replace("DONE", static_cast<int>(the_time));
1442

    
1443
                    oss << "Success.";
1444
                }
1445
                else
1446
                {
1447
                    ostringstream oss_aux;
1448

    
1449
                    oss_aux << time_str << " : " << error_msg;
1450

    
1451
                    vatt->replace("MESSAGE", oss_aux.str());
1452

    
1453
                    oss << "Failure. " << error_msg;
1454
                }
1455

    
1456
                NebulaLog::log("VM", Log::INFO, oss);
1457
            }
1458

    
1459
            vm->set_attribute(vatt);
1460
        }
1461

    
1462
        vmpool->update(vm);
1463
    }
1464

    
1465
    return 0;
1466
}
1467

    
1468
/* -------------------------------------------------------------------------- */
1469
/* -------------------------------------------------------------------------- */
1470

    
1471
void Scheduler::do_vm_groups()
1472
{
1473
    map<int, ObjectXML*>::const_iterator it;
1474
    const map<int, ObjectXML*> vmgrps = vmgpool->get_objects();
1475

    
1476
    ostringstream oss;
1477

    
1478
    oss << "VM Group Scheduling information\n";
1479

    
1480
    for (it = vmgrps.begin(); it != vmgrps.end() ; ++it)
1481
    {
1482
        VMGroupXML * grp = static_cast<VMGroupXML*>(it->second);
1483

    
1484
        oss << setfill('*') << setw(80) << '*' << setfill(' ') << "\n"
1485
            << "SCHEDULING RESULTS FOR VM GROUP " << grp->get_oid() << ", "
1486
            << grp->get_name() <<"\n"
1487
            << setfill('*') << setw(80) << '*' << setfill(' ') << "\n";
1488

    
1489
        oss << *grp << "\n";
1490

    
1491
        grp->set_affinity_requirements(vmpool, vm_roles_pool, oss);
1492

    
1493
        grp->set_antiaffinity_requirements(vmpool, oss);
1494

    
1495
        grp->set_host_requirements(vmpool, oss);
1496
    }
1497

    
1498
    NebulaLog::log("VMGRP", Log::DDDEBUG, oss);
1499
}
1500

    
1501
/* -------------------------------------------------------------------------- */
1502
/* -------------------------------------------------------------------------- */
1503

    
1504
void Scheduler::timer_action(const ActionRequest& ar)
1505
{
1506
    int rc;
1507

    
1508
    try
1509
    {
1510
        xmlrpc_c::value result;
1511

    
1512
        Client::client()->call("one.zone.raftstatus", "", &result);
1513

    
1514
        vector<xmlrpc_c::value> values =
1515
                        xmlrpc_c::value_array(result).vectorValueValue();
1516

    
1517
        bool success = xmlrpc_c::value_boolean(values[0]);
1518
        string msg   = xmlrpc_c::value_string(values[1]);
1519

    
1520
        if ( success )
1521
        {
1522
            int state;
1523

    
1524
            Template raft(false, '=', "RAFT");
1525

    
1526
            if ( raft.from_xml(msg) != 0 )
1527
            {
1528
                NebulaLog::log("SCHED", Log::ERROR, "Error parsing oned info");
1529
                return;
1530
            }
1531

    
1532
           if ( raft.get("STATE", state) == false )
1533
           {
1534
                NebulaLog::log("SCHED", Log::ERROR, "Cannot get oned state");
1535
                return;
1536
           }
1537

    
1538
           if ( state != 3 && state != 0 )
1539
           {
1540
                NebulaLog::log("SCHED", Log::ERROR, "oned is not leader");
1541
                return;
1542
           }
1543
        }
1544
        else
1545
        {
1546
            NebulaLog::log("SCHED", Log::ERROR, "Cannot contact oned: " + msg);
1547
            return;
1548
        }
1549
    }
1550
    catch (exception const& e)
1551
    {
1552
        ostringstream ess;
1553

    
1554
        ess << "Cannot contact oned: " << e.what();
1555

    
1556
        NebulaLog::log("SCHED", Log::ERROR, ess);
1557
        return;
1558
    }
1559

    
1560
    profile(true);
1561
    rc = vmapool->set_up();
1562
    profile(false,"Getting scheduled actions information.");
1563

    
1564
    if ( rc == 0 )
1565
    {
1566
        profile(true);
1567
        do_scheduled_actions();
1568
        profile(false,"Executing scheduled actions.");
1569
    }
1570

    
1571
    profile(true);
1572
    rc = set_up_pools();
1573
    profile(false,"Getting VM and Host information.");
1574

    
1575
    if ( rc != 0 )
1576
    {
1577
        return;
1578
    }
1579

    
1580
    profile(true);
1581
    do_vm_groups();
1582
    profile(false,"Setting VM groups placement constraints.");
1583

    
1584
    match_schedule();
1585

    
1586
    profile(true);
1587
    dispatch();
1588
    profile(false,"Dispatching VMs to hosts.");
1589
}