Xenomai API  2.5.6.1
include/nucleus/sched.h
Go to the documentation of this file.
00001 
00025 #ifndef _XENO_NUCLEUS_SCHED_H
00026 #define _XENO_NUCLEUS_SCHED_H
00027 
00031 #include <nucleus/thread.h>
00032 
00033 #if defined(__KERNEL__) || defined(__XENO_SIM__)
00034 
00035 #include <nucleus/schedqueue.h>
00036 #include <nucleus/sched-tp.h>
00037 #include <nucleus/sched-sporadic.h>
00038 
00039 /* Sched status flags */
00040 #define XNKCOUT         0x80000000      /* Sched callout context */
00041 #define XNINTCK         0x40000000      /* In master tick handler context */
00042 #define XNSWLOCK        0x20000000      /* In context switch */
00043 #define XNRESCHED       0x10000000      /* Needs rescheduling */
00044 
00045 /* Sched local flags */
00046 #define XNHTICK         0x00008000      /* Host tick pending  */
00047 #define XNINIRQ         0x00004000      /* In IRQ handling context */
00048 #define XNHDEFER        0x00002000      /* Host tick deferred */
00049 
00050 /* Sched RPI status flags */
00051 #define XNRPICK         0x80000000      /* Check RPI state */
00052 
00053 struct xnsched_rt {
00054         xnsched_queue_t runnable;       
00055 #ifdef CONFIG_XENO_OPT_PRIOCPL
00056         xnsched_queue_t relaxed;        
00057 #endif /* CONFIG_XENO_OPT_PRIOCPL */
00058 };
00059 
00064 typedef struct xnsched {
00065 
00066         xnflags_t status;               
00067         xnflags_t lflags;               
00068         int cpu;
00069         struct xnthread *curr;          
00070 #ifdef CONFIG_SMP
00071         xnarch_cpumask_t resched;       
00072 #endif
00073 
00074         struct xnsched_rt rt;           
00075 #ifdef CONFIG_XENO_OPT_SCHED_TP
00076         struct xnsched_tp tp;           
00077 #endif
00078 #ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
00079         struct xnsched_sporadic pss;    
00080 #endif
00081 
00082         xntimerq_t timerqueue;          /* !< Core timer queue. */
00083         volatile unsigned inesting;     
00084         struct xntimer htimer;          
00085         struct xnthread *zombie;
00086         struct xnthread rootcb;         
00088 #ifdef CONFIG_XENO_HW_UNLOCKED_SWITCH
00089         struct xnthread *last;
00090 #endif
00091 
00092 #ifdef CONFIG_XENO_HW_FPU
00093         struct xnthread *fpuholder;     
00094 #endif
00095 
00096 #ifdef CONFIG_XENO_OPT_WATCHDOG
00097         struct xntimer wdtimer; 
00098         int wdcount;            
00099 #endif
00100 
00101 #ifdef CONFIG_XENO_OPT_STATS
00102         xnticks_t last_account_switch;  
00103         xnstat_exectime_t *current_account;     
00104 #endif
00105 
00106 #ifdef CONFIG_XENO_OPT_PRIOCPL
00107         DECLARE_XNLOCK(rpilock);        
00108         xnflags_t rpistatus;
00109 #endif
00110 
00111 #ifdef CONFIG_XENO_OPT_PERVASIVE
00112         struct task_struct *gatekeeper;
00113         wait_queue_head_t gkwaitq;
00114         struct semaphore gksync;
00115         struct xnthread *gktarget;
00116 #endif
00117 
00118 } xnsched_t;
00119 
00120 union xnsched_policy_param;
00121 
00122 struct xnsched_class {
00123 
00124         void (*sched_init)(struct xnsched *sched);
00125         void (*sched_enqueue)(struct xnthread *thread);
00126         void (*sched_dequeue)(struct xnthread *thread);
00127         void (*sched_requeue)(struct xnthread *thread);
00128         struct xnthread *(*sched_pick)(struct xnsched *sched);
00129         void (*sched_tick)(struct xnthread *curr);
00130         void (*sched_rotate)(struct xnsched *sched,
00131                              const union xnsched_policy_param *p);
00132         void (*sched_migrate)(struct xnthread *thread,
00133                               struct xnsched *sched);
00134         void (*sched_setparam)(struct xnthread *thread,
00135                                const union xnsched_policy_param *p);
00136         void (*sched_getparam)(struct xnthread *thread,
00137                                union xnsched_policy_param *p);
00138         void (*sched_trackprio)(struct xnthread *thread,
00139                                 const union xnsched_policy_param *p);
00140         int (*sched_declare)(struct xnthread *thread,
00141                              const union xnsched_policy_param *p);
00142         void (*sched_forget)(struct xnthread *thread);
00143 #ifdef CONFIG_XENO_OPT_PRIOCPL
00144         struct xnthread *(*sched_push_rpi)(struct xnsched *sched,
00145                                            struct xnthread *thread);
00146         void (*sched_pop_rpi)(struct xnthread *thread);
00147         struct xnthread *(*sched_peek_rpi)(struct xnsched *sched);
00148         void (*sched_suspend_rpi)(struct xnthread *thread);
00149         void (*sched_resume_rpi)(struct xnthread *thread);
00150 #endif
00151 #ifdef CONFIG_PROC_FS
00152         void (*sched_init_proc)(struct proc_dir_entry *root);
00153         void (*sched_cleanup_proc)(struct proc_dir_entry *root);
00154         struct proc_dir_entry *proc;
00155 #endif
00156         int nthreads;
00157         struct xnsched_class *next;
00158         int weight;
00159         const char *name;
00160 };
00161 
00162 #define XNSCHED_CLASS_MAX_THREADS       32768
00163 #define XNSCHED_CLASS_WEIGHT(n)         (n * XNSCHED_CLASS_MAX_THREADS)
00164 
00165 /* Placeholder for current thread priority */
00166 #define XNSCHED_RUNPRIO   0x80000000
00167 
00168 #ifdef CONFIG_SMP
00169 #define xnsched_cpu(__sched__)  ((__sched__)->cpu)
00170 #else /* !CONFIG_SMP */
00171 #define xnsched_cpu(__sched__)  ({ (void)__sched__; 0; })
00172 #endif /* CONFIG_SMP */
00173 
00174 /* Test resched flag of given sched. */
00175 static inline int xnsched_resched_p(struct xnsched *sched)
00176 {
00177         return testbits(sched->status, XNRESCHED);
00178 }
00179 
00180 /* Set self resched flag for the given scheduler. */
00181 #define xnsched_set_self_resched(__sched__) do {                \
00182   XENO_BUGON(NUCLEUS, __sched__ != xnpod_current_sched());      \
00183   __setbits((__sched__)->status, XNRESCHED);                    \
00184 } while (0)
00185 
00186 /* Set resched flag for the given scheduler. */
00187 #ifdef CONFIG_SMP
00188 #define xnsched_set_resched(__sched__) do {                             \
00189   xnsched_t *current_sched = xnpod_current_sched();                     \
00190   if (current_sched == (__sched__))                                     \
00191       __setbits(current_sched->status, XNRESCHED);                      \
00192   else if (!xnsched_resched_p(__sched__)) {                             \
00193       xnarch_cpu_set(xnsched_cpu(__sched__), current_sched->resched);   \
00194       __setbits((__sched__)->status, XNRESCHED);                        \
00195       __setbits(current_sched->status, XNRESCHED);                      \
00196   }                                                                     \
00197 } while (0)
00198 #else /* !CONFIG_SMP */
00199 #define xnsched_set_resched     xnsched_set_self_resched
00200 #endif /* !CONFIG_SMP */
00201 
00202 void xnsched_zombie_hooks(struct xnthread *thread);
00203 
00204 void __xnsched_finalize_zombie(struct xnsched *sched);
00205 
00206 static inline void xnsched_finalize_zombie(struct xnsched *sched)
00207 {
00208         if (sched->zombie)
00209                 __xnsched_finalize_zombie(sched);
00210 }
00211 
00212 #ifdef CONFIG_XENO_HW_UNLOCKED_SWITCH
00213 
00214 struct xnsched *xnsched_finish_unlocked_switch(struct xnsched *sched);
00215 
00216 #define xnsched_resched_after_unlocked_switch() xnpod_schedule()
00217 
00218 static inline
00219 int xnsched_maybe_resched_after_unlocked_switch(struct xnsched *sched)
00220 {
00221         return testbits(sched->status, XNRESCHED);
00222 }
00223 
00224 #else /* !CONFIG_XENO_HW_UNLOCKED_SWITCH */
00225 
00226 #ifdef CONFIG_SMP
00227 #define xnsched_finish_unlocked_switch(__sched__)       \
00228         ({ XENO_BUGON(NUCLEUS, !irqs_disabled_hw());    \
00229                 xnpod_current_sched(); })
00230 #else /* !CONFIG_SMP */
00231 #define xnsched_finish_unlocked_switch(__sched__)       \
00232         ({ XENO_BUGON(NUCLEUS, !irqs_disabled_hw());    \
00233                 (__sched__); })
00234 #endif /* !CONFIG_SMP */
00235 
00236 #define xnsched_resched_after_unlocked_switch()         do { } while(0)
00237 
00238 #define xnsched_maybe_resched_after_unlocked_switch(sched)      \
00239         ({ (void)(sched); 0; })
00240 
00241 #endif /* !CONFIG_XENO_HW_UNLOCKED_SWITCH */
00242 
00243 #ifdef CONFIG_XENO_OPT_WATCHDOG
00244 static inline void xnsched_reset_watchdog(struct xnsched *sched)
00245 {
00246         sched->wdcount = 0;
00247 }
00248 #else /* !CONFIG_XENO_OPT_WATCHDOG */
00249 static inline void xnsched_reset_watchdog(struct xnsched *sched)
00250 {
00251 }
00252 #endif /* CONFIG_XENO_OPT_WATCHDOG */
00253 
00254 #include <nucleus/sched-idle.h>
00255 #include <nucleus/sched-rt.h>
00256 
00257 void xnsched_init_proc(void);
00258 
00259 void xnsched_cleanup_proc(void);
00260 
00261 void xnsched_register_classes(void);
00262 
00263 void xnsched_init(struct xnsched *sched, int cpu);
00264 
00265 void xnsched_destroy(struct xnsched *sched);
00266 
00267 struct xnthread *xnsched_pick_next(struct xnsched *sched);
00268 
00269 void xnsched_putback(struct xnthread *thread);
00270 
00271 int xnsched_set_policy(struct xnthread *thread,
00272                        struct xnsched_class *sched_class,
00273                        const union xnsched_policy_param *p);
00274 
00275 void xnsched_track_policy(struct xnthread *thread,
00276                           struct xnthread *target);
00277 
00278 void xnsched_migrate(struct xnthread *thread,
00279                      struct xnsched *sched);
00280 
00281 void xnsched_migrate_passive(struct xnthread *thread,
00282                              struct xnsched *sched);
00283 
00315 static inline void xnsched_rotate(struct xnsched *sched,
00316                                   struct xnsched_class *sched_class,
00317                                   const union xnsched_policy_param *sched_param)
00318 {
00319         sched_class->sched_rotate(sched, sched_param);
00320 }
00321 
00322 static inline int xnsched_init_tcb(struct xnthread *thread)
00323 {
00324         int ret = 0;
00325 
00326         xnsched_idle_init_tcb(thread);
00327         xnsched_rt_init_tcb(thread);
00328 #ifdef CONFIG_XENO_OPT_SCHED_TP
00329         ret = xnsched_tp_init_tcb(thread);
00330         if (ret)
00331                 return ret;
00332 #endif /* CONFIG_XENO_OPT_SCHED_TP */
00333 #ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
00334         ret = xnsched_sporadic_init_tcb(thread);
00335         if (ret)
00336                 return ret;
00337 #endif /* CONFIG_XENO_OPT_SCHED_SPORADIC */
00338         return ret;
00339 }
00340 
00341 static inline int xnsched_root_priority(struct xnsched *sched)
00342 {
00343         return sched->rootcb.cprio;
00344 }
00345 
00346 static inline struct xnsched_class *xnsched_root_class(struct xnsched *sched)
00347 {
00348         return sched->rootcb.sched_class;
00349 }
00350 
00351 static inline void xnsched_tick(struct xnthread *curr, struct xntbase *tbase)
00352 {
00353         struct xnsched_class *sched_class = curr->sched_class;
00354         /*
00355          * A thread that undergoes round-robin scheduling only
00356          * consumes its time slice when it runs within its own
00357          * scheduling class, which excludes temporary PIP boosts.
00358          */
00359         if (xnthread_time_base(curr) == tbase &&
00360             sched_class != &xnsched_class_idle &&
00361             sched_class == curr->base_class &&
00362             xnthread_test_state(curr, XNTHREAD_BLOCK_BITS|XNLOCK|XNRRB) == XNRRB)
00363                 sched_class->sched_tick(curr);
00364 }
00365 
00366 #ifdef CONFIG_XENO_OPT_SCHED_CLASSES
00367 
00368 static inline void xnsched_enqueue(struct xnthread *thread)
00369 {
00370         struct xnsched_class *sched_class = thread->sched_class;
00371 
00372         if (sched_class != &xnsched_class_idle)
00373                 sched_class->sched_enqueue(thread);
00374 }
00375 
00376 static inline void xnsched_dequeue(struct xnthread *thread)
00377 {
00378         struct xnsched_class *sched_class = thread->sched_class;
00379 
00380         if (sched_class != &xnsched_class_idle)
00381                 sched_class->sched_dequeue(thread);
00382 }
00383 
00384 static inline void xnsched_requeue(struct xnthread *thread)
00385 {
00386         struct xnsched_class *sched_class = thread->sched_class;
00387 
00388         if (sched_class != &xnsched_class_idle)
00389                 sched_class->sched_requeue(thread);
00390 }
00391 
00392 static inline int xnsched_weighted_bprio(struct xnthread *thread)
00393 {
00394         return thread->bprio + thread->sched_class->weight;
00395 }
00396 
00397 static inline int xnsched_weighted_cprio(struct xnthread *thread)
00398 {
00399         return thread->cprio + thread->sched_class->weight;
00400 }
00401 
00402 static inline void xnsched_setparam(struct xnthread *thread,
00403                                     const union xnsched_policy_param *p)
00404 {
00405         thread->sched_class->sched_setparam(thread, p);
00406 }
00407 
00408 static inline void xnsched_getparam(struct xnthread *thread,
00409                                     union xnsched_policy_param *p)
00410 {
00411         thread->sched_class->sched_getparam(thread, p);
00412 }
00413 
00414 static inline void xnsched_trackprio(struct xnthread *thread,
00415                                      const union xnsched_policy_param *p)
00416 {
00417         thread->sched_class->sched_trackprio(thread, p);
00418 }
00419 
00420 static inline void xnsched_forget(struct xnthread *thread)
00421 {
00422         struct xnsched_class *sched_class = thread->base_class;
00423 
00424         --sched_class->nthreads;
00425 
00426         if (sched_class->sched_forget)
00427                 sched_class->sched_forget(thread);
00428 }
00429 
00430 #ifdef CONFIG_XENO_OPT_PRIOCPL
00431 
00432 static inline struct xnthread *xnsched_push_rpi(struct xnsched *sched,
00433                                                 struct xnthread *thread)
00434 {
00435         return thread->sched_class->sched_push_rpi(sched, thread);
00436 }
00437 
00438 static inline void xnsched_pop_rpi(struct xnthread *thread)
00439 {
00440         thread->sched_class->sched_pop_rpi(thread);
00441 }
00442 
00443 static inline void xnsched_suspend_rpi(struct xnthread *thread)
00444 {
00445         struct xnsched_class *sched_class = thread->sched_class;
00446 
00447         if (sched_class->sched_suspend_rpi)
00448                 sched_class->sched_suspend_rpi(thread);
00449 }
00450 
00451 static inline void xnsched_resume_rpi(struct xnthread *thread)
00452 {
00453         struct xnsched_class *sched_class = thread->sched_class;
00454 
00455         if (sched_class->sched_resume_rpi)
00456                 sched_class->sched_resume_rpi(thread);
00457 }
00458 
00459 #endif /* CONFIG_XENO_OPT_PRIOCPL */
00460 
00461 #else /* !CONFIG_XENO_OPT_SCHED_CLASSES */
00462 
00463 /*
00464  * If only the RT and IDLE scheduling classes are compiled in, we can
00465  * fully inline common helpers for dealing with those.
00466  */
00467 
00468 static inline void xnsched_enqueue(struct xnthread *thread)
00469 {
00470         struct xnsched_class *sched_class = thread->sched_class;
00471 
00472         if (sched_class != &xnsched_class_idle)
00473                 __xnsched_rt_enqueue(thread);
00474 }
00475 
00476 static inline void xnsched_dequeue(struct xnthread *thread)
00477 {
00478         struct xnsched_class *sched_class = thread->sched_class;
00479 
00480         if (sched_class != &xnsched_class_idle)
00481                 __xnsched_rt_dequeue(thread);
00482 }
00483 
00484 static inline void xnsched_requeue(struct xnthread *thread)
00485 {
00486         struct xnsched_class *sched_class = thread->sched_class;
00487 
00488         if (sched_class != &xnsched_class_idle)
00489                 __xnsched_rt_requeue(thread);
00490 }
00491 
00492 static inline int xnsched_weighted_bprio(struct xnthread *thread)
00493 {
00494         return thread->bprio;
00495 }
00496 
00497 static inline int xnsched_weighted_cprio(struct xnthread *thread)
00498 {
00499         return thread->cprio;
00500 }
00501 
00502 static inline void xnsched_setparam(struct xnthread *thread,
00503                                     const union xnsched_policy_param *p)
00504 {
00505         struct xnsched_class *sched_class = thread->sched_class;
00506 
00507         if (sched_class != &xnsched_class_idle)
00508                 __xnsched_rt_setparam(thread, p);
00509         else
00510                 __xnsched_idle_setparam(thread, p);
00511 }
00512 
00513 static inline void xnsched_getparam(struct xnthread *thread,
00514                                     union xnsched_policy_param *p)
00515 {
00516         struct xnsched_class *sched_class = thread->sched_class;
00517 
00518         if (sched_class != &xnsched_class_idle)
00519                 __xnsched_rt_getparam(thread, p);
00520         else
00521                 __xnsched_idle_getparam(thread, p);
00522 }
00523 
00524 static inline void xnsched_trackprio(struct xnthread *thread,
00525                                      const union xnsched_policy_param *p)
00526 {
00527         struct xnsched_class *sched_class = thread->sched_class;
00528 
00529         if (sched_class != &xnsched_class_idle)
00530                 __xnsched_rt_trackprio(thread, p);
00531         else
00532                 __xnsched_idle_trackprio(thread, p);
00533 }
00534 
00535 static inline void xnsched_forget(struct xnthread *thread)
00536 {
00537         --thread->base_class->nthreads;
00538         __xnsched_rt_forget(thread);
00539 }
00540 
00541 #ifdef CONFIG_XENO_OPT_PRIOCPL
00542 
00543 static inline struct xnthread *xnsched_push_rpi(struct xnsched *sched,
00544                                                 struct xnthread *thread)
00545 {
00546         return __xnsched_rt_push_rpi(sched, thread);
00547 }
00548 
00549 static inline void xnsched_pop_rpi(struct xnthread *thread)
00550 {
00551         __xnsched_rt_pop_rpi(thread);
00552 }
00553 
00554 static inline void xnsched_suspend_rpi(struct xnthread *thread)
00555 {
00556         __xnsched_rt_suspend_rpi(thread);
00557 }
00558 
00559 static inline void xnsched_resume_rpi(struct xnthread *thread)
00560 {
00561         __xnsched_rt_resume_rpi(thread);
00562 }
00563 
00564 #endif /* CONFIG_XENO_OPT_PRIOCPL */
00565 
00566 #endif /* !CONFIG_XENO_OPT_SCHED_CLASSES */
00567 
00568 void xnsched_renice_root(struct xnsched *sched,
00569                          struct xnthread *target);
00570 
00571 struct xnthread *xnsched_peek_rpi(struct xnsched *sched);
00572 
00573 #else /* !(__KERNEL__ || __XENO_SIM__) */
00574 
00575 #include <nucleus/sched-idle.h>
00576 #include <nucleus/sched-rt.h>
00577 
00578 #endif /* !(__KERNEL__ || __XENO_SIM__) */
00579 
00582 #endif /* !_XENO_NUCLEUS_SCHED_H */
 All Data Structures Files Functions Variables Typedefs Enumerations Enumerator Defines