25 #ifndef _XENO_NUCLEUS_SCHED_H 
   26 #define _XENO_NUCLEUS_SCHED_H 
   31 #include <nucleus/thread.h> 
   33 #if defined(__KERNEL__) || defined(__XENO_SIM__) 
   35 #include <nucleus/schedqueue.h> 
   41 #define XNKCOUT         0x80000000       
   42 #define XNINTCK         0x40000000       
   43 #define XNINSW          0x20000000       
   44 #define XNRESCHED       0x10000000       
   47 #define XNHTICK         0x00008000       
   48 #define XNINIRQ         0x00004000       
   49 #define XNHDEFER        0x00002000       
   50 #define XNINLOCK        0x00001000       
   53 #define XNRPICK         0x80000000       
   56         xnsched_queue_t runnable;       
 
   57 #ifdef CONFIG_XENO_OPT_PRIOCPL 
   58         xnsched_queue_t relaxed;        
 
   73         xnarch_cpumask_t resched;       
 
   77 #ifdef CONFIG_XENO_OPT_SCHED_TP 
   80 #ifdef CONFIG_XENO_OPT_SCHED_SPORADIC 
   81         struct xnsched_sporadic pss;    
 
   84         xntimerq_t timerqueue;          
 
   87         struct xnthread *zombie;
 
   90 #ifdef CONFIG_XENO_HW_UNLOCKED_SWITCH 
   91         struct xnthread *last;
 
   94 #ifdef CONFIG_XENO_HW_FPU 
   95         struct xnthread *fpuholder;     
 
   98 #ifdef CONFIG_XENO_OPT_WATCHDOG 
   99         struct xntimer wdtimer; 
 
  103 #ifdef CONFIG_XENO_OPT_STATS 
  104         xnticks_t last_account_switch;  
 
  105         xnstat_exectime_t *current_account;     
 
  108 #ifdef CONFIG_XENO_OPT_PRIOCPL 
  109         DECLARE_XNLOCK(rpilock);        
 
  113 #ifdef CONFIG_XENO_OPT_PERVASIVE 
  114         struct task_struct *gatekeeper;
 
  115         struct semaphore gksync;
 
  116         struct xnthread *gktarget;
 
  121 union xnsched_policy_param;
 
  123 struct xnsched_class {
 
  125         void (*sched_init)(
struct xnsched *sched);
 
  126         void (*sched_enqueue)(
struct xnthread *thread);
 
  127         void (*sched_dequeue)(
struct xnthread *thread);
 
  128         void (*sched_requeue)(
struct xnthread *thread);
 
  129         struct xnthread *(*sched_pick)(
struct xnsched *sched);
 
  130         void (*sched_tick)(
struct xnthread *curr);
 
  131         void (*sched_rotate)(
struct xnsched *sched,
 
  132                              const union xnsched_policy_param *p);
 
  133         void (*sched_migrate)(
struct xnthread *thread,
 
  135         void (*sched_setparam)(
struct xnthread *thread,
 
  136                                const union xnsched_policy_param *p);
 
  137         void (*sched_getparam)(
struct xnthread *thread,
 
  138                                union xnsched_policy_param *p);
 
  139         void (*sched_trackprio)(
struct xnthread *thread,
 
  140                                 const union xnsched_policy_param *p);
 
  141         int (*sched_declare)(
struct xnthread *thread,
 
  142                              const union xnsched_policy_param *p);
 
  143         void (*sched_forget)(
struct xnthread *thread);
 
  144 #ifdef CONFIG_XENO_OPT_PRIOCPL 
  145         struct xnthread *(*sched_push_rpi)(
struct xnsched *sched,
 
  146                                            struct xnthread *thread);
 
  147         void (*sched_pop_rpi)(
struct xnthread *thread);
 
  148         struct xnthread *(*sched_peek_rpi)(
struct xnsched *sched);
 
  149         void (*sched_suspend_rpi)(
struct xnthread *thread);
 
  150         void (*sched_resume_rpi)(
struct xnthread *thread);
 
  152 #ifdef CONFIG_XENO_OPT_VFILE 
  153         int (*sched_init_vfile)(
struct xnsched_class *schedclass,
 
  154                                 struct xnvfile_directory *vfroot);
 
  155         void (*sched_cleanup_vfile)(
struct xnsched_class *schedclass);
 
  158         struct xnsched_class *next;
 
  163 #define XNSCHED_CLASS_MAX_PRIO          1024 
  164 #define XNSCHED_CLASS_WEIGHT(n)         (n * XNSCHED_CLASS_MAX_PRIO) 
  167 #define XNSCHED_RUNPRIO   0x80000000 
  170 #define xnsched_cpu(__sched__)  ((__sched__)->cpu) 
  172 #define xnsched_cpu(__sched__)  ({ (void)__sched__; 0; }) 
  176 static inline int xnsched_resched_p(
struct xnsched *sched)
 
  182 #define xnsched_set_self_resched(__sched__) do {                \ 
  183   XENO_BUGON(NUCLEUS, __sched__ != xnpod_current_sched());      \ 
  184   __setbits((__sched__)->status, XNRESCHED);                    \ 
  189 #define xnsched_set_resched(__sched__) do {                             \ 
  190   xnsched_t *current_sched = xnpod_current_sched();                     \ 
  191   if (current_sched == (__sched__))                                     \ 
  192       __setbits(current_sched->status, XNRESCHED);                      \ 
  193   else if (!xnsched_resched_p(__sched__)) {                             \ 
  194       xnarch_cpu_set(xnsched_cpu(__sched__), current_sched->resched);   \ 
  195       __setbits((__sched__)->status, XNRESCHED);                        \ 
  196       __setbits(current_sched->status, XNRESCHED);                      \ 
  200 #define xnsched_set_resched     xnsched_set_self_resched 
  203 void xnsched_zombie_hooks(
struct xnthread *thread);
 
  205 void __xnsched_finalize_zombie(
struct xnsched *sched);
 
  207 static inline void xnsched_finalize_zombie(
struct xnsched *sched)
 
  210                 __xnsched_finalize_zombie(sched);
 
  213 #ifdef CONFIG_XENO_HW_UNLOCKED_SWITCH 
  215 struct xnsched *xnsched_finish_unlocked_switch(
struct xnsched *sched);
 
  217 #define xnsched_resched_after_unlocked_switch() xnpod_schedule() 
  220 int xnsched_maybe_resched_after_unlocked_switch(
struct xnsched *sched)
 
  228 #define xnsched_finish_unlocked_switch(__sched__)       \ 
  229         ({ XENO_BUGON(NUCLEUS, !irqs_disabled_hw());    \ 
  230                 xnpod_current_sched(); }) 
  232 #define xnsched_finish_unlocked_switch(__sched__)       \ 
  233         ({ XENO_BUGON(NUCLEUS, !irqs_disabled_hw());    \ 
  237 #define xnsched_resched_after_unlocked_switch()         do { } while(0) 
  239 #define xnsched_maybe_resched_after_unlocked_switch(sched)      \ 
  240         ({ (void)(sched); 0; }) 
  244 #ifdef CONFIG_XENO_OPT_WATCHDOG 
  245 static inline void xnsched_reset_watchdog(
struct xnsched *sched)
 
  250 static inline void xnsched_reset_watchdog(
struct xnsched *sched)
 
  258 int xnsched_init_proc(
void);
 
  260 void xnsched_cleanup_proc(
void);
 
  262 void xnsched_register_classes(
void);
 
  264 void xnsched_init(
struct xnsched *sched, 
int cpu);
 
  266 void xnsched_destroy(
struct xnsched *sched);
 
  268 struct xnthread *xnsched_pick_next(
struct xnsched *sched);
 
  270 void xnsched_putback(
struct xnthread *thread);
 
  272 int xnsched_set_policy(
struct xnthread *thread,
 
  273                        struct xnsched_class *sched_class,
 
  274                        const union xnsched_policy_param *p);
 
  276 void xnsched_track_policy(
struct xnthread *thread,
 
  277                           struct xnthread *target);
 
  279 void xnsched_migrate(
struct xnthread *curr,
 
  282 void xnsched_migrate_passive(
struct xnthread *thread,
 
  317                                   struct xnsched_class *sched_class,
 
  318                                   const union xnsched_policy_param *sched_param)
 
  320         sched_class->sched_rotate(sched, sched_param);
 
  323 static inline int xnsched_init_tcb(
struct xnthread *thread)
 
  327         xnsched_idle_init_tcb(thread);
 
  328         xnsched_rt_init_tcb(thread);
 
  329 #ifdef CONFIG_XENO_OPT_SCHED_TP 
  330         ret = xnsched_tp_init_tcb(thread);
 
  334 #ifdef CONFIG_XENO_OPT_SCHED_SPORADIC 
  335         ret = xnsched_sporadic_init_tcb(thread);
 
  342 static inline int xnsched_root_priority(
struct xnsched *sched)
 
  344         return sched->
rootcb.cprio;
 
  347 static inline struct xnsched_class *xnsched_root_class(
struct xnsched *sched)
 
  349         return sched->
rootcb.sched_class;
 
  352 static inline void xnsched_tick(
struct xnthread *curr, 
struct xntbase *tbase)
 
  354         struct xnsched_class *sched_class = curr->sched_class;
 
  360         if (xnthread_time_base(curr) == tbase &&
 
  361             sched_class != &xnsched_class_idle &&
 
  362             sched_class == curr->base_class &&
 
  364                 sched_class->sched_tick(curr);
 
  367 #ifdef CONFIG_XENO_OPT_SCHED_CLASSES 
  369 static inline void xnsched_enqueue(
struct xnthread *thread)
 
  371         struct xnsched_class *sched_class = thread->sched_class;
 
  373         if (sched_class != &xnsched_class_idle)
 
  374                 sched_class->sched_enqueue(thread);
 
  377 static inline void xnsched_dequeue(
struct xnthread *thread)
 
  379         struct xnsched_class *sched_class = thread->sched_class;
 
  381         if (sched_class != &xnsched_class_idle)
 
  382                 sched_class->sched_dequeue(thread);
 
  385 static inline void xnsched_requeue(
struct xnthread *thread)
 
  387         struct xnsched_class *sched_class = thread->sched_class;
 
  389         if (sched_class != &xnsched_class_idle)
 
  390                 sched_class->sched_requeue(thread);
 
  393 static inline int xnsched_weighted_bprio(
struct xnthread *thread)
 
  395         return thread->bprio + thread->sched_class->weight;
 
  398 static inline int xnsched_weighted_cprio(
struct xnthread *thread)
 
  400         return thread->cprio + thread->sched_class->weight;
 
  403 static inline void xnsched_setparam(
struct xnthread *thread,
 
  404                                     const union xnsched_policy_param *p)
 
  406         thread->sched_class->sched_setparam(thread, p);
 
  409 static inline void xnsched_getparam(
struct xnthread *thread,
 
  410                                     union xnsched_policy_param *p)
 
  412         thread->sched_class->sched_getparam(thread, p);
 
  415 static inline void xnsched_trackprio(
struct xnthread *thread,
 
  416                                      const union xnsched_policy_param *p)
 
  418         thread->sched_class->sched_trackprio(thread, p);
 
  421 static inline void xnsched_forget(
struct xnthread *thread)
 
  423         struct xnsched_class *sched_class = thread->base_class;
 
  425         --sched_class->nthreads;
 
  427         if (sched_class->sched_forget)
 
  428                 sched_class->sched_forget(thread);
 
  431 #ifdef CONFIG_XENO_OPT_PRIOCPL 
  433 static inline struct xnthread *xnsched_push_rpi(
struct xnsched *sched,
 
  434                                                 struct xnthread *thread)
 
  436         return thread->sched_class->sched_push_rpi(sched, thread);
 
  439 static inline void xnsched_pop_rpi(
struct xnthread *thread)
 
  441         thread->sched_class->sched_pop_rpi(thread);
 
  444 static inline void xnsched_suspend_rpi(
struct xnthread *thread)
 
  446         struct xnsched_class *sched_class = thread->sched_class;
 
  448         if (sched_class->sched_suspend_rpi)
 
  449                 sched_class->sched_suspend_rpi(thread);
 
  452 static inline void xnsched_resume_rpi(
struct xnthread *thread)
 
  454         struct xnsched_class *sched_class = thread->sched_class;
 
  456         if (sched_class->sched_resume_rpi)
 
  457                 sched_class->sched_resume_rpi(thread);
 
  469 static inline void xnsched_enqueue(
struct xnthread *thread)
 
  471         struct xnsched_class *sched_class = thread->sched_class;
 
  473         if (sched_class != &xnsched_class_idle)
 
  474                 __xnsched_rt_enqueue(thread);
 
  477 static inline void xnsched_dequeue(
struct xnthread *thread)
 
  479         struct xnsched_class *sched_class = thread->sched_class;
 
  481         if (sched_class != &xnsched_class_idle)
 
  482                 __xnsched_rt_dequeue(thread);
 
  485 static inline void xnsched_requeue(
struct xnthread *thread)
 
  487         struct xnsched_class *sched_class = thread->sched_class;
 
  489         if (sched_class != &xnsched_class_idle)
 
  490                 __xnsched_rt_requeue(thread);
 
  493 static inline int xnsched_weighted_bprio(
struct xnthread *thread)
 
  495         return thread->bprio;
 
  498 static inline int xnsched_weighted_cprio(
struct xnthread *thread)
 
  500         return thread->cprio;
 
  503 static inline void xnsched_setparam(
struct xnthread *thread,
 
  504                                     const union xnsched_policy_param *p)
 
  506         struct xnsched_class *sched_class = thread->sched_class;
 
  508         if (sched_class != &xnsched_class_idle)
 
  509                 __xnsched_rt_setparam(thread, p);
 
  511                 __xnsched_idle_setparam(thread, p);
 
  514 static inline void xnsched_getparam(
struct xnthread *thread,
 
  515                                     union xnsched_policy_param *p)
 
  517         struct xnsched_class *sched_class = thread->sched_class;
 
  519         if (sched_class != &xnsched_class_idle)
 
  520                 __xnsched_rt_getparam(thread, p);
 
  522                 __xnsched_idle_getparam(thread, p);
 
  525 static inline void xnsched_trackprio(
struct xnthread *thread,
 
  526                                      const union xnsched_policy_param *p)
 
  528         struct xnsched_class *sched_class = thread->sched_class;
 
  530         if (sched_class != &xnsched_class_idle)
 
  531                 __xnsched_rt_trackprio(thread, p);
 
  533                 __xnsched_idle_trackprio(thread, p);
 
  536 static inline void xnsched_forget(
struct xnthread *thread)
 
  538         --thread->base_class->nthreads;
 
  539         __xnsched_rt_forget(thread);
 
  542 #ifdef CONFIG_XENO_OPT_PRIOCPL 
  544 static inline struct xnthread *xnsched_push_rpi(
struct xnsched *sched,
 
  545                                                 struct xnthread *thread)
 
  547         return __xnsched_rt_push_rpi(sched, thread);
 
  550 static inline void xnsched_pop_rpi(
struct xnthread *thread)
 
  552         __xnsched_rt_pop_rpi(thread);
 
  555 static inline void xnsched_suspend_rpi(
struct xnthread *thread)
 
  557         __xnsched_rt_suspend_rpi(thread);
 
  560 static inline void xnsched_resume_rpi(
struct xnthread *thread)
 
  562         __xnsched_rt_resume_rpi(thread);
 
  569 void xnsched_renice_root(
struct xnsched *sched,
 
  570                          struct xnthread *target);
 
  572 struct xnthread *xnsched_peek_rpi(
struct xnsched *sched);
 
This file is part of the Xenomai project. 
Definitions for the RT scheduling class. 
struct xnthread * curr
Definition: sched.h:71
Definitions for the TP scheduling class. 
#define XNRRB
Undergoes a round-robin scheduling. 
Definition: thread.h:51
xnflags_t lflags
Definition: sched.h:69
static void xnsched_rotate(struct xnsched *sched, struct xnsched_class *sched_class, const union xnsched_policy_param *sched_param)
Rotate a scheduler runqueue. 
Definition: sched.h:316
volatile unsigned inesting
Definition: sched.h:85
Definitions for the SSP scheduling class. 
Scheduling information structure. 
Definition: sched.h:66
struct xnthread rootcb
Definition: sched.h:88
struct xnsched xnsched_t
Scheduling information structure. 
struct xnsched_rt rt
Definition: sched.h:76
struct xntimer htimer
Definition: sched.h:86
#define XNRESCHED
Definition: sched.h:44
xnflags_t status
Definition: sched.h:68
#define XNLOCK
Holds the scheduler lock (i.e. 
Definition: thread.h:50
Definitions for the IDLE scheduling class.