Xenomai API  2.6.5
sched.h
Go to the documentation of this file.
1 
25 #ifndef _XENO_NUCLEUS_SCHED_H
26 #define _XENO_NUCLEUS_SCHED_H
27 
31 #include <nucleus/thread.h>
32 
33 #if defined(__KERNEL__) || defined(__XENO_SIM__)
34 
35 #include <nucleus/schedqueue.h>
36 #include <nucleus/sched-tp.h>
37 #include <nucleus/sched-sporadic.h>
38 #include <nucleus/vfile.h>
39 
40 /* Sched status flags */
41 #define XNKCOUT 0x80000000
42 #define XNINTCK 0x40000000
43 #define XNINSW 0x20000000
44 #define XNRESCHED 0x10000000
46 /* Sched local flags */
47 #define XNHTICK 0x00008000
48 #define XNINIRQ 0x00004000
49 #define XNHDEFER 0x00002000
50 #define XNINLOCK 0x00001000
52 /* Sched RPI status flags */
53 #define XNRPICK 0x80000000
55 struct xnsched_rt {
56  xnsched_queue_t runnable;
57 #ifdef CONFIG_XENO_OPT_PRIOCPL
58  xnsched_queue_t relaxed;
59 #endif /* CONFIG_XENO_OPT_PRIOCPL */
60 };
61 
66 typedef struct xnsched {
67 
68  xnflags_t status;
69  xnflags_t lflags;
70  int cpu;
71  struct xnthread *curr;
72 #ifdef CONFIG_SMP
73  xnarch_cpumask_t resched;
74 #endif
75 
76  struct xnsched_rt rt;
77 #ifdef CONFIG_XENO_OPT_SCHED_TP
78  struct xnsched_tp tp;
79 #endif
80 #ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
81  struct xnsched_sporadic pss;
82 #endif
83 
84  xntimerq_t timerqueue; /* !< Core timer queue. */
85  volatile unsigned inesting;
86  struct xntimer htimer;
87  struct xnthread *zombie;
88  struct xnthread rootcb;
90 #ifdef CONFIG_XENO_HW_UNLOCKED_SWITCH
91  struct xnthread *last;
92 #endif
93 
94 #ifdef CONFIG_XENO_HW_FPU
95  struct xnthread *fpuholder;
96 #endif
97 
98 #ifdef CONFIG_XENO_OPT_WATCHDOG
99  struct xntimer wdtimer;
100  int wdcount;
101 #endif
102 
103 #ifdef CONFIG_XENO_OPT_STATS
104  xnticks_t last_account_switch;
105  xnstat_exectime_t *current_account;
106 #endif
107 
108 #ifdef CONFIG_XENO_OPT_PRIOCPL
109  DECLARE_XNLOCK(rpilock);
110  xnflags_t rpistatus;
111 #endif
112 
113 #ifdef CONFIG_XENO_OPT_PERVASIVE
114  struct task_struct *gatekeeper;
115  struct semaphore gksync;
116  struct xnthread *gktarget;
117 #endif
118 
119 } xnsched_t;
120 
121 union xnsched_policy_param;
122 
123 struct xnsched_class {
124 
125  void (*sched_init)(struct xnsched *sched);
126  void (*sched_enqueue)(struct xnthread *thread);
127  void (*sched_dequeue)(struct xnthread *thread);
128  void (*sched_requeue)(struct xnthread *thread);
129  struct xnthread *(*sched_pick)(struct xnsched *sched);
130  void (*sched_tick)(struct xnthread *curr);
131  void (*sched_rotate)(struct xnsched *sched,
132  const union xnsched_policy_param *p);
133  void (*sched_migrate)(struct xnthread *thread,
134  struct xnsched *sched);
135  void (*sched_setparam)(struct xnthread *thread,
136  const union xnsched_policy_param *p);
137  void (*sched_getparam)(struct xnthread *thread,
138  union xnsched_policy_param *p);
139  void (*sched_trackprio)(struct xnthread *thread,
140  const union xnsched_policy_param *p);
141  int (*sched_declare)(struct xnthread *thread,
142  const union xnsched_policy_param *p);
143  void (*sched_forget)(struct xnthread *thread);
144 #ifdef CONFIG_XENO_OPT_PRIOCPL
145  struct xnthread *(*sched_push_rpi)(struct xnsched *sched,
146  struct xnthread *thread);
147  void (*sched_pop_rpi)(struct xnthread *thread);
148  struct xnthread *(*sched_peek_rpi)(struct xnsched *sched);
149  void (*sched_suspend_rpi)(struct xnthread *thread);
150  void (*sched_resume_rpi)(struct xnthread *thread);
151 #endif
152 #ifdef CONFIG_XENO_OPT_VFILE
153  int (*sched_init_vfile)(struct xnsched_class *schedclass,
154  struct xnvfile_directory *vfroot);
155  void (*sched_cleanup_vfile)(struct xnsched_class *schedclass);
156 #endif
157  int nthreads;
158  struct xnsched_class *next;
159  int weight;
160  const char *name;
161 };
162 
163 #define XNSCHED_CLASS_MAX_PRIO 1024
164 #define XNSCHED_CLASS_WEIGHT(n) (n * XNSCHED_CLASS_MAX_PRIO)
165 
166 /* Placeholder for current thread priority */
167 #define XNSCHED_RUNPRIO 0x80000000
168 
169 #ifdef CONFIG_SMP
170 #define xnsched_cpu(__sched__) ((__sched__)->cpu)
171 #else /* !CONFIG_SMP */
172 #define xnsched_cpu(__sched__) ({ (void)__sched__; 0; })
173 #endif /* CONFIG_SMP */
174 
175 /* Test resched flag of given sched. */
176 static inline int xnsched_resched_p(struct xnsched *sched)
177 {
178  return testbits(sched->status, XNRESCHED);
179 }
180 
181 /* Set self resched flag for the given scheduler. */
182 #define xnsched_set_self_resched(__sched__) do { \
183  XENO_BUGON(NUCLEUS, __sched__ != xnpod_current_sched()); \
184  __setbits((__sched__)->status, XNRESCHED); \
185 } while (0)
186 
187 /* Set resched flag for the given scheduler. */
188 #ifdef CONFIG_SMP
189 #define xnsched_set_resched(__sched__) do { \
190  xnsched_t *current_sched = xnpod_current_sched(); \
191  if (current_sched == (__sched__)) \
192  __setbits(current_sched->status, XNRESCHED); \
193  else if (!xnsched_resched_p(__sched__)) { \
194  xnarch_cpu_set(xnsched_cpu(__sched__), current_sched->resched); \
195  __setbits((__sched__)->status, XNRESCHED); \
196  __setbits(current_sched->status, XNRESCHED); \
197  } \
198 } while (0)
199 #else /* !CONFIG_SMP */
200 #define xnsched_set_resched xnsched_set_self_resched
201 #endif /* !CONFIG_SMP */
202 
203 void xnsched_zombie_hooks(struct xnthread *thread);
204 
205 void __xnsched_finalize_zombie(struct xnsched *sched);
206 
207 static inline void xnsched_finalize_zombie(struct xnsched *sched)
208 {
209  if (sched->zombie)
210  __xnsched_finalize_zombie(sched);
211 }
212 
213 #ifdef CONFIG_XENO_HW_UNLOCKED_SWITCH
214 
215 struct xnsched *xnsched_finish_unlocked_switch(struct xnsched *sched);
216 
217 #define xnsched_resched_after_unlocked_switch() xnpod_schedule()
218 
219 static inline
220 int xnsched_maybe_resched_after_unlocked_switch(struct xnsched *sched)
221 {
222  return testbits(sched->status, XNRESCHED);
223 }
224 
225 #else /* !CONFIG_XENO_HW_UNLOCKED_SWITCH */
226 
227 #ifdef CONFIG_SMP
228 #define xnsched_finish_unlocked_switch(__sched__) \
229  ({ XENO_BUGON(NUCLEUS, !irqs_disabled_hw()); \
230  xnpod_current_sched(); })
231 #else /* !CONFIG_SMP */
232 #define xnsched_finish_unlocked_switch(__sched__) \
233  ({ XENO_BUGON(NUCLEUS, !irqs_disabled_hw()); \
234  (__sched__); })
235 #endif /* !CONFIG_SMP */
236 
237 #define xnsched_resched_after_unlocked_switch() do { } while(0)
238 
239 #define xnsched_maybe_resched_after_unlocked_switch(sched) \
240  ({ (void)(sched); 0; })
241 
242 #endif /* !CONFIG_XENO_HW_UNLOCKED_SWITCH */
243 
244 #ifdef CONFIG_XENO_OPT_WATCHDOG
245 static inline void xnsched_reset_watchdog(struct xnsched *sched)
246 {
247  sched->wdcount = 0;
248 }
249 #else /* !CONFIG_XENO_OPT_WATCHDOG */
250 static inline void xnsched_reset_watchdog(struct xnsched *sched)
251 {
252 }
253 #endif /* CONFIG_XENO_OPT_WATCHDOG */
254 
255 #include <nucleus/sched-idle.h>
256 #include <nucleus/sched-rt.h>
257 
258 int xnsched_init_proc(void);
259 
260 void xnsched_cleanup_proc(void);
261 
262 void xnsched_register_classes(void);
263 
264 void xnsched_init(struct xnsched *sched, int cpu);
265 
266 void xnsched_destroy(struct xnsched *sched);
267 
268 struct xnthread *xnsched_pick_next(struct xnsched *sched);
269 
270 void xnsched_putback(struct xnthread *thread);
271 
272 int xnsched_set_policy(struct xnthread *thread,
273  struct xnsched_class *sched_class,
274  const union xnsched_policy_param *p);
275 
276 void xnsched_track_policy(struct xnthread *thread,
277  struct xnthread *target);
278 
279 void xnsched_migrate(struct xnthread *curr,
280  struct xnsched *sched);
281 
282 void xnsched_migrate_passive(struct xnthread *thread,
283  struct xnsched *sched);
284 
316 static inline void xnsched_rotate(struct xnsched *sched,
317  struct xnsched_class *sched_class,
318  const union xnsched_policy_param *sched_param)
319 {
320  sched_class->sched_rotate(sched, sched_param);
321 }
322 
323 static inline int xnsched_init_tcb(struct xnthread *thread)
324 {
325  int ret = 0;
326 
327  xnsched_idle_init_tcb(thread);
328  xnsched_rt_init_tcb(thread);
329 #ifdef CONFIG_XENO_OPT_SCHED_TP
330  ret = xnsched_tp_init_tcb(thread);
331  if (ret)
332  return ret;
333 #endif /* CONFIG_XENO_OPT_SCHED_TP */
334 #ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
335  ret = xnsched_sporadic_init_tcb(thread);
336  if (ret)
337  return ret;
338 #endif /* CONFIG_XENO_OPT_SCHED_SPORADIC */
339  return ret;
340 }
341 
342 static inline int xnsched_root_priority(struct xnsched *sched)
343 {
344  return sched->rootcb.cprio;
345 }
346 
347 static inline struct xnsched_class *xnsched_root_class(struct xnsched *sched)
348 {
349  return sched->rootcb.sched_class;
350 }
351 
352 static inline void xnsched_tick(struct xnthread *curr, struct xntbase *tbase)
353 {
354  struct xnsched_class *sched_class = curr->sched_class;
355  /*
356  * A thread that undergoes round-robin scheduling only
357  * consumes its time slice when it runs within its own
358  * scheduling class, which excludes temporary PIP boosts.
359  */
360  if (xnthread_time_base(curr) == tbase &&
361  sched_class != &xnsched_class_idle &&
362  sched_class == curr->base_class &&
363  xnthread_test_state(curr, XNTHREAD_BLOCK_BITS|XNLOCK|XNRRB) == XNRRB)
364  sched_class->sched_tick(curr);
365 }
366 
367 #ifdef CONFIG_XENO_OPT_SCHED_CLASSES
368 
369 static inline void xnsched_enqueue(struct xnthread *thread)
370 {
371  struct xnsched_class *sched_class = thread->sched_class;
372 
373  if (sched_class != &xnsched_class_idle)
374  sched_class->sched_enqueue(thread);
375 }
376 
377 static inline void xnsched_dequeue(struct xnthread *thread)
378 {
379  struct xnsched_class *sched_class = thread->sched_class;
380 
381  if (sched_class != &xnsched_class_idle)
382  sched_class->sched_dequeue(thread);
383 }
384 
385 static inline void xnsched_requeue(struct xnthread *thread)
386 {
387  struct xnsched_class *sched_class = thread->sched_class;
388 
389  if (sched_class != &xnsched_class_idle)
390  sched_class->sched_requeue(thread);
391 }
392 
393 static inline int xnsched_weighted_bprio(struct xnthread *thread)
394 {
395  return thread->bprio + thread->sched_class->weight;
396 }
397 
398 static inline int xnsched_weighted_cprio(struct xnthread *thread)
399 {
400  return thread->cprio + thread->sched_class->weight;
401 }
402 
403 static inline void xnsched_setparam(struct xnthread *thread,
404  const union xnsched_policy_param *p)
405 {
406  thread->sched_class->sched_setparam(thread, p);
407 }
408 
409 static inline void xnsched_getparam(struct xnthread *thread,
410  union xnsched_policy_param *p)
411 {
412  thread->sched_class->sched_getparam(thread, p);
413 }
414 
415 static inline void xnsched_trackprio(struct xnthread *thread,
416  const union xnsched_policy_param *p)
417 {
418  thread->sched_class->sched_trackprio(thread, p);
419 }
420 
421 static inline void xnsched_forget(struct xnthread *thread)
422 {
423  struct xnsched_class *sched_class = thread->base_class;
424 
425  --sched_class->nthreads;
426 
427  if (sched_class->sched_forget)
428  sched_class->sched_forget(thread);
429 }
430 
431 #ifdef CONFIG_XENO_OPT_PRIOCPL
432 
433 static inline struct xnthread *xnsched_push_rpi(struct xnsched *sched,
434  struct xnthread *thread)
435 {
436  return thread->sched_class->sched_push_rpi(sched, thread);
437 }
438 
439 static inline void xnsched_pop_rpi(struct xnthread *thread)
440 {
441  thread->sched_class->sched_pop_rpi(thread);
442 }
443 
444 static inline void xnsched_suspend_rpi(struct xnthread *thread)
445 {
446  struct xnsched_class *sched_class = thread->sched_class;
447 
448  if (sched_class->sched_suspend_rpi)
449  sched_class->sched_suspend_rpi(thread);
450 }
451 
452 static inline void xnsched_resume_rpi(struct xnthread *thread)
453 {
454  struct xnsched_class *sched_class = thread->sched_class;
455 
456  if (sched_class->sched_resume_rpi)
457  sched_class->sched_resume_rpi(thread);
458 }
459 
460 #endif /* CONFIG_XENO_OPT_PRIOCPL */
461 
462 #else /* !CONFIG_XENO_OPT_SCHED_CLASSES */
463 
464 /*
465  * If only the RT and IDLE scheduling classes are compiled in, we can
466  * fully inline common helpers for dealing with those.
467  */
468 
469 static inline void xnsched_enqueue(struct xnthread *thread)
470 {
471  struct xnsched_class *sched_class = thread->sched_class;
472 
473  if (sched_class != &xnsched_class_idle)
474  __xnsched_rt_enqueue(thread);
475 }
476 
477 static inline void xnsched_dequeue(struct xnthread *thread)
478 {
479  struct xnsched_class *sched_class = thread->sched_class;
480 
481  if (sched_class != &xnsched_class_idle)
482  __xnsched_rt_dequeue(thread);
483 }
484 
485 static inline void xnsched_requeue(struct xnthread *thread)
486 {
487  struct xnsched_class *sched_class = thread->sched_class;
488 
489  if (sched_class != &xnsched_class_idle)
490  __xnsched_rt_requeue(thread);
491 }
492 
493 static inline int xnsched_weighted_bprio(struct xnthread *thread)
494 {
495  return thread->bprio;
496 }
497 
498 static inline int xnsched_weighted_cprio(struct xnthread *thread)
499 {
500  return thread->cprio;
501 }
502 
503 static inline void xnsched_setparam(struct xnthread *thread,
504  const union xnsched_policy_param *p)
505 {
506  struct xnsched_class *sched_class = thread->sched_class;
507 
508  if (sched_class != &xnsched_class_idle)
509  __xnsched_rt_setparam(thread, p);
510  else
511  __xnsched_idle_setparam(thread, p);
512 }
513 
514 static inline void xnsched_getparam(struct xnthread *thread,
515  union xnsched_policy_param *p)
516 {
517  struct xnsched_class *sched_class = thread->sched_class;
518 
519  if (sched_class != &xnsched_class_idle)
520  __xnsched_rt_getparam(thread, p);
521  else
522  __xnsched_idle_getparam(thread, p);
523 }
524 
525 static inline void xnsched_trackprio(struct xnthread *thread,
526  const union xnsched_policy_param *p)
527 {
528  struct xnsched_class *sched_class = thread->sched_class;
529 
530  if (sched_class != &xnsched_class_idle)
531  __xnsched_rt_trackprio(thread, p);
532  else
533  __xnsched_idle_trackprio(thread, p);
534 }
535 
536 static inline void xnsched_forget(struct xnthread *thread)
537 {
538  --thread->base_class->nthreads;
539  __xnsched_rt_forget(thread);
540 }
541 
542 #ifdef CONFIG_XENO_OPT_PRIOCPL
543 
544 static inline struct xnthread *xnsched_push_rpi(struct xnsched *sched,
545  struct xnthread *thread)
546 {
547  return __xnsched_rt_push_rpi(sched, thread);
548 }
549 
550 static inline void xnsched_pop_rpi(struct xnthread *thread)
551 {
552  __xnsched_rt_pop_rpi(thread);
553 }
554 
555 static inline void xnsched_suspend_rpi(struct xnthread *thread)
556 {
557  __xnsched_rt_suspend_rpi(thread);
558 }
559 
560 static inline void xnsched_resume_rpi(struct xnthread *thread)
561 {
562  __xnsched_rt_resume_rpi(thread);
563 }
564 
565 #endif /* CONFIG_XENO_OPT_PRIOCPL */
566 
567 #endif /* !CONFIG_XENO_OPT_SCHED_CLASSES */
568 
569 void xnsched_renice_root(struct xnsched *sched,
570  struct xnthread *target);
571 
572 struct xnthread *xnsched_peek_rpi(struct xnsched *sched);
573 
574 #else /* !(__KERNEL__ || __XENO_SIM__) */
575 
576 #include <nucleus/sched-idle.h>
577 #include <nucleus/sched-rt.h>
578 
579 #endif /* !(__KERNEL__ || __XENO_SIM__) */
580 
583 #endif /* !_XENO_NUCLEUS_SCHED_H */
This file is part of the Xenomai project.
Definitions for the RT scheduling class.
struct xnthread * curr
Definition: sched.h:71
Definitions for the TP scheduling class.
#define XNRRB
Undergoes a round-robin scheduling.
Definition: thread.h:51
xnflags_t lflags
Definition: sched.h:69
static void xnsched_rotate(struct xnsched *sched, struct xnsched_class *sched_class, const union xnsched_policy_param *sched_param)
Rotate a scheduler runqueue.
Definition: sched.h:316
volatile unsigned inesting
Definition: sched.h:85
Definitions for the SSP scheduling class.
Scheduling information structure.
Definition: sched.h:66
struct xnthread rootcb
Definition: sched.h:88
struct xnsched xnsched_t
Scheduling information structure.
struct xnsched_rt rt
Definition: sched.h:76
struct xntimer htimer
Definition: sched.h:86
#define XNRESCHED
Definition: sched.h:44
xnflags_t status
Definition: sched.h:68
#define XNLOCK
Holds the scheduler lock (i.e.
Definition: thread.h:50
Definitions for the IDLE scheduling class.