Xenomai API  2.6.5
sched-rt.h
Go to the documentation of this file.
1 
23 #ifndef _XENO_NUCLEUS_SCHED_RT_H
24 #define _XENO_NUCLEUS_SCHED_RT_H
25 
26 #ifndef _XENO_NUCLEUS_SCHED_H
27 #error "please don't include nucleus/sched-rt.h directly"
28 #endif
29 
30 /* Priority scale for the RT scheduling class. */
31 #define XNSCHED_RT_MIN_PRIO 0
32 #define XNSCHED_RT_MAX_PRIO 257
33 #define XNSCHED_RT_NR_PRIO (XNSCHED_RT_MAX_PRIO - XNSCHED_RT_MIN_PRIO + 1)
34 
35 /*
36  * Builtin priorities shared by all core APIs. Those APIs, namely
37  * POSIX, native and RTDM, only use a sub-range of the available
38  * priority levels from the RT scheduling class, in order to exhibit a
39  * 1:1 mapping with Linux's SCHED_FIFO ascending priority scale
40  * [1..99]. Non-core APIs with inverted priority scales (e.g. VxWorks,
41  * VRTX), normalize the priority values internally when calling the
42  * priority-sensitive services of the nucleus, so that they fit into
43  * the RT priority scale.
44  */
45 #define XNSCHED_LOW_PRIO 0
46 #define XNSCHED_HIGH_PRIO 99
47 #define XNSCHED_IRQ_PRIO XNSCHED_RT_MAX_PRIO /* For IRQ servers. */
48 
49 #if defined(__KERNEL__) || defined(__XENO_SIM__)
50 
51 #if XNSCHED_RT_NR_PRIO > XNSCHED_CLASS_MAX_PRIO || \
52  (defined(CONFIG_XENO_OPT_SCALABLE_SCHED) && \
53  XNSCHED_RT_NR_PRIO > XNSCHED_MLQ_LEVELS)
54 #error "RT class has too many priority levels"
55 #endif
56 
57 extern struct xnsched_class xnsched_class_rt;
58 
59 extern struct xnsched_class xnsched_class_idle;
60 
61 #define xnsched_class_default xnsched_class_rt
62 
63 static inline void __xnsched_rt_requeue(struct xnthread *thread)
64 {
65  sched_insertpql(&thread->sched->rt.runnable,
66  &thread->rlink, thread->cprio);
67 }
68 
69 static inline void __xnsched_rt_enqueue(struct xnthread *thread)
70 {
71  sched_insertpqf(&thread->sched->rt.runnable,
72  &thread->rlink, thread->cprio);
73 }
74 
75 static inline void __xnsched_rt_dequeue(struct xnthread *thread)
76 {
77  sched_removepq(&thread->sched->rt.runnable, &thread->rlink);
78 }
79 
80 static inline struct xnthread *__xnsched_rt_pick(struct xnsched *sched)
81 {
82  struct xnpholder *h = sched_getpq(&sched->rt.runnable);
83  return h ? link2thread(h, rlink) : NULL;
84 }
85 
86 static inline void __xnsched_rt_setparam(struct xnthread *thread,
87  const union xnsched_policy_param *p)
88 {
89  thread->cprio = p->rt.prio;
90  if (xnthread_test_state(thread, XNSHADOW | XNBOOST) == XNSHADOW) {
91  if (thread->cprio)
92  xnthread_clear_state(thread, XNOTHER);
93  else
94  xnthread_set_state(thread, XNOTHER);
95  }
96 }
97 
98 static inline void __xnsched_rt_getparam(struct xnthread *thread,
99  union xnsched_policy_param *p)
100 {
101  p->rt.prio = thread->cprio;
102 }
103 
104 static inline void __xnsched_rt_trackprio(struct xnthread *thread,
105  const union xnsched_policy_param *p)
106 {
107  if (p)
108  __xnsched_rt_setparam(thread, p);
109  else
110  thread->cprio = thread->bprio;
111 }
112 
113 static inline void __xnsched_rt_forget(struct xnthread *thread)
114 {
115 }
116 
117 static inline int xnsched_rt_init_tcb(struct xnthread *thread)
118 {
119  return 0;
120 }
121 
122 void xnsched_rt_tick(struct xnthread *curr);
123 
124 #ifdef CONFIG_XENO_OPT_PRIOCPL
125 
126 static inline struct xnthread *__xnsched_rt_push_rpi(struct xnsched *sched,
127  struct xnthread *thread)
128 {
129  sched_insertpqf(&sched->rt.relaxed, &thread->xlink, thread->cprio);
130  return link2thread(sched_getheadpq(&sched->rt.relaxed), xlink);
131 }
132 
133 static inline void __xnsched_rt_pop_rpi(struct xnthread *thread)
134 {
135  struct xnsched *sched = thread->rpi;
136  sched_removepq(&sched->rt.relaxed, &thread->xlink);
137 }
138 
139 static inline struct xnthread *__xnsched_rt_peek_rpi(struct xnsched *sched)
140 {
141  struct xnpholder *h = sched_getheadpq(&sched->rt.relaxed);
142  return h ? link2thread(h, xlink) : NULL;
143 }
144 
145 static inline void __xnsched_rt_suspend_rpi(struct xnthread *thread)
146 {
147 }
148 
149 static inline void __xnsched_rt_resume_rpi(struct xnthread *thread)
150 {
151 }
152 
153 #endif /* CONFIG_XENO_OPT_PRIOCPL */
154 
155 #endif /* __KERNEL__ || __XENO_SIM__ */
156 
157 #endif /* !_XENO_NUCLEUS_SCHED_RT_H */
#define XNOTHER
Non real-time shadow (prio=0)
Definition: thread.h:65
#define XNSHADOW
Shadow thread.
Definition: thread.h:63
Scheduling information structure.
Definition: sched.h:66
struct xnsched_rt rt
Definition: sched.h:76
#define XNBOOST
Undergoes a PIP boost.
Definition: thread.h:48