Xenomai API  2.6.5
pod.h
Go to the documentation of this file.
1 
28 #ifndef _XENO_NUCLEUS_POD_H
29 #define _XENO_NUCLEUS_POD_H
30 
34 #include <nucleus/sched.h>
35 
36 /* Pod status flags */
37 #define XNFATAL 0x00000001 /* Fatal error in progress */
38 #define XNPEXEC 0x00000002 /* Pod is active (a skin is attached) */
39 
40 /* These flags are available to the real-time interfaces */
41 #define XNPOD_SPARE0 0x01000000
42 #define XNPOD_SPARE1 0x02000000
43 #define XNPOD_SPARE2 0x04000000
44 #define XNPOD_SPARE3 0x08000000
45 #define XNPOD_SPARE4 0x10000000
46 #define XNPOD_SPARE5 0x20000000
47 #define XNPOD_SPARE6 0x40000000
48 #define XNPOD_SPARE7 0x80000000
49 
50 #define XNPOD_NORMAL_EXIT 0x0
51 #define XNPOD_FATAL_EXIT 0x1
52 
53 #define XNPOD_ALL_CPUS XNARCH_CPU_MASK_ALL
54 
55 #define XNPOD_FATAL_BUFSZ 16384
56 
57 #define nkpod (&nkpod_struct)
58 
59 struct xnsynch;
60 
67 struct xnpod {
68 
69  xnflags_t status;
71  xnsched_t sched[XNARCH_NR_CPUS];
73  xnqueue_t threadq;
74 #ifdef CONFIG_XENO_OPT_VFILE
75  struct xnvfile_rev_tag threadlist_tag;
76 #endif
77  xnqueue_t tstartq,
78  tswitchq,
79  tdeleteq;
81  atomic_counter_t timerlck;
83  xntimer_t tslicer;
84  int tsliced;
86  int refcnt;
88 #ifdef __XENO_SIM__
89  void (*schedhook) (xnthread_t *thread, xnflags_t mask);
90 #endif /* __XENO_SIM__ */
91 };
92 
93 typedef struct xnpod xnpod_t;
94 
95 DECLARE_EXTERN_XNLOCK(nklock);
96 
97 extern u_long nklatency;
98 
99 extern u_long nktimerlat;
100 
101 extern xnarch_cpumask_t nkaffinity;
102 
103 extern xnpod_t nkpod_struct;
104 
105 #ifdef CONFIG_XENO_OPT_VFILE
106 int xnpod_init_proc(void);
107 void xnpod_cleanup_proc(void);
108 #else /* !CONFIG_XENO_OPT_VFILE */
109 static inline int xnpod_init_proc(void) { return 0; }
110 static inline void xnpod_cleanup_proc(void) {}
111 #endif /* !CONFIG_XENO_OPT_VFILE */
112 
113 static inline int xnpod_mount(void)
114 {
115  xnsched_register_classes();
116  return xnpod_init_proc();
117 }
118 
119 static inline void xnpod_umount(void)
120 {
121  xnpod_cleanup_proc();
122 }
123 
124 #ifdef __cplusplus
125 extern "C" {
126 #endif
127 
128 int __xnpod_set_thread_schedparam(struct xnthread *thread,
129  struct xnsched_class *sched_class,
130  const union xnsched_policy_param *sched_param,
131  int propagate);
132 
133 #ifdef CONFIG_XENO_HW_FPU
134 void xnpod_switch_fpu(xnsched_t *sched);
135 #endif /* CONFIG_XENO_HW_FPU */
136 
137 void __xnpod_schedule(struct xnsched *sched);
138 
139  /* -- Beginning of the exported interface */
140 
141 #define xnpod_sched_slot(cpu) \
142  (&nkpod->sched[cpu])
143 
144 #define xnpod_current_sched() \
145  xnpod_sched_slot(xnarch_current_cpu())
146 
147 #define xnpod_active_p() \
148  testbits(nkpod->status, XNPEXEC)
149 
150 #define xnpod_fatal_p() \
151  testbits(nkpod->status, XNFATAL)
152 
153 #define xnpod_interrupt_p() \
154  testbits(xnpod_current_sched()->lflags, XNINIRQ)
155 
156 #define xnpod_callout_p() \
157  testbits(xnpod_current_sched()->status, XNKCOUT)
158 
159 #define xnpod_asynch_p() \
160  ({ \
161  xnsched_t *sched = xnpod_current_sched(); \
162  testbits(sched->status | sched->lflags, XNKCOUT|XNINIRQ); \
163  })
164 
165 #define xnpod_current_thread() \
166  (xnpod_current_sched()->curr)
167 
168 #define xnpod_current_root() \
169  (&xnpod_current_sched()->rootcb)
170 
171 #ifdef CONFIG_XENO_OPT_PERVASIVE
172 #define xnpod_current_p(thread) \
173  ({ int __shadow_p = xnthread_test_state(thread, XNSHADOW); \
174  int __curr_p = __shadow_p ? xnshadow_thread(current) == thread \
175  : thread == xnpod_current_thread(); \
176  __curr_p;})
177 #else
178 #define xnpod_current_p(thread) \
179  (xnpod_current_thread() == (thread))
180 #endif
181 
182 #define xnpod_locked_p() \
183  xnthread_test_state(xnpod_current_thread(), XNLOCK)
184 
185 #define xnpod_unblockable_p() \
186  (xnpod_asynch_p() || xnthread_test_state(xnpod_current_thread(), XNROOT))
187 
188 #define xnpod_root_p() \
189  xnthread_test_state(xnpod_current_thread(),XNROOT)
190 
191 #define xnpod_shadow_p() \
192  xnthread_test_state(xnpod_current_thread(),XNSHADOW)
193 
194 #define xnpod_userspace_p() \
195  xnthread_test_state(xnpod_current_thread(),XNROOT|XNSHADOW)
196 
197 #define xnpod_primary_p() \
198  (!(xnpod_asynch_p() || xnpod_root_p()))
199 
200 #define xnpod_secondary_p() xnpod_root_p()
201 
202 #define xnpod_idle_p() xnpod_root_p()
203 
204 int xnpod_init(void);
205 
206 int xnpod_enable_timesource(void);
207 
208 void xnpod_disable_timesource(void);
209 
210 void xnpod_shutdown(int xtype);
211 
212 int xnpod_init_thread(struct xnthread *thread,
213  const struct xnthread_init_attr *attr,
214  struct xnsched_class *sched_class,
215  const union xnsched_policy_param *sched_param);
216 
217 int xnpod_start_thread(xnthread_t *thread,
218  const struct xnthread_start_attr *attr);
219 
220 void xnpod_stop_thread(xnthread_t *thread);
221 
222 void xnpod_restart_thread(xnthread_t *thread);
223 
224 void xnpod_delete_thread(xnthread_t *thread);
225 
226 void xnpod_abort_thread(xnthread_t *thread);
227 
228 xnflags_t xnpod_set_thread_mode(xnthread_t *thread,
229  xnflags_t clrmask,
230  xnflags_t setmask);
231 
232 void xnpod_suspend_thread(xnthread_t *thread,
233  xnflags_t mask,
234  xnticks_t timeout,
235  xntmode_t timeout_mode,
236  struct xnsynch *wchan);
237 
238 void xnpod_resume_thread(xnthread_t *thread,
239  xnflags_t mask);
240 
241 int xnpod_unblock_thread(xnthread_t *thread);
242 
243 int xnpod_set_thread_schedparam(struct xnthread *thread,
244  struct xnsched_class *sched_class,
245  const union xnsched_policy_param *sched_param);
246 
247 int xnpod_migrate_thread(int cpu);
248 
249 void xnpod_dispatch_signals(void);
250 
251 static inline void xnpod_schedule(void)
252 {
253  struct xnsched *sched;
254  /*
255  * NOTE: Since __xnpod_schedule() won't run if an escalation
256  * to primary domain is needed, we won't use critical
257  * scheduler information before we actually run in primary
258  * mode; therefore we can first test the scheduler status then
259  * escalate. Running in the primary domain means that no
260  * Linux-triggered CPU migration may occur from that point
261  * either. Finally, since migration is always a self-directed
262  * operation for Xenomai threads, we can safely read the
263  * scheduler state bits without holding the nklock.
264  *
265  * Said differently, if we race here because of a CPU
266  * migration, it must have been Linux-triggered because we run
267  * in secondary mode; in which case we will escalate to the
268  * primary domain, then unwind the current call frame without
269  * running the rescheduling procedure in
270  * __xnpod_schedule(). Therefore, the scheduler pointer will
271  * be either valid, or unused.
272  */
273  sched = xnpod_current_sched();
274  /*
275  * No immediate rescheduling is possible if an ISR or callout
276  * context is active, or if we are caught in the middle of a
277  * unlocked context switch.
278  */
279 #if XENO_DEBUG(NUCLEUS)
280  if (testbits(sched->status | sched->lflags,
282  return;
283 #else /* !XENO_DEBUG(NUCLEUS) */
284  if (testbits(sched->status | sched->lflags,
286  return;
287 #endif /* !XENO_DEBUG(NUCLEUS) */
288 
289  __xnpod_schedule(sched);
290 }
291 
292 void ___xnpod_lock_sched(xnsched_t *sched);
293 
294 void ___xnpod_unlock_sched(xnsched_t *sched);
295 
296 static inline void __xnpod_lock_sched(void)
297 {
298  xnsched_t *sched;
299 
300  barrier();
301  sched = xnpod_current_sched();
302  ___xnpod_lock_sched(sched);
303 }
304 
305 static inline void __xnpod_unlock_sched(void)
306 {
307  xnsched_t *sched;
308 
309  barrier();
310  sched = xnpod_current_sched();
311  ___xnpod_unlock_sched(sched);
312 }
313 
314 static inline void xnpod_lock_sched(void)
315 {
316  xnsched_t *sched;
317  spl_t s;
318 
319  xnlock_get_irqsave(&nklock, s);
320  sched = xnpod_current_sched();
321  ___xnpod_lock_sched(sched);
322  xnlock_put_irqrestore(&nklock, s);
323 }
324 
325 static inline void xnpod_unlock_sched(void)
326 {
327  xnsched_t *sched;
328  spl_t s;
329 
330  xnlock_get_irqsave(&nklock, s);
331  sched = xnpod_current_sched();
332  ___xnpod_unlock_sched(sched);
333  xnlock_put_irqrestore(&nklock, s);
334 }
335 
336 void xnpod_fire_callouts(xnqueue_t *hookq,
337  xnthread_t *thread);
338 
339 static inline void xnpod_run_hooks(struct xnqueue *q,
340  struct xnthread *thread, const char *type)
341 {
342  if (!emptyq_p(q)) {
343  trace_mark(xn_nucleus, thread_callout,
344  "thread %p thread_name %s hook %s",
345  thread, xnthread_name(thread), type);
346  xnpod_fire_callouts(q, thread);
347  }
348 }
349 
350 int xnpod_set_thread_periodic(xnthread_t *thread,
351  xnticks_t idate,
352  xnticks_t period);
353 
354 int xnpod_wait_thread_period(unsigned long *overruns_r);
355 
356 int xnpod_set_thread_tslice(struct xnthread *thread,
357  xnticks_t quantum);
358 
359 static inline xntime_t xnpod_get_cpu_time(void)
360 {
361  return xnarch_get_cpu_time();
362 }
363 
364 int xnpod_add_hook(int type, void (*routine) (xnthread_t *));
365 
366 int xnpod_remove_hook(int type, void (*routine) (xnthread_t *));
367 
368 static inline void xnpod_yield(void)
369 {
370  xnpod_resume_thread(xnpod_current_thread(), 0);
371  xnpod_schedule();
372 }
373 
374 static inline void xnpod_delay(xnticks_t timeout)
375 {
376  xnpod_suspend_thread(xnpod_current_thread(), XNDELAY, timeout, XN_RELATIVE, NULL);
377 }
378 
379 static inline void xnpod_suspend_self(void)
380 {
381  xnpod_suspend_thread(xnpod_current_thread(), XNSUSP, XN_INFINITE, XN_RELATIVE, NULL);
382 }
383 
384 static inline void xnpod_delete_self(void)
385 {
386  xnpod_delete_thread(xnpod_current_thread());
387 }
388 
389 #ifdef __cplusplus
390 }
391 #endif
392 
395 #endif /* !_XENO_NUCLEUS_POD_H */
Real-time pod descriptor.
Definition: pod.h:67
void xnpod_shutdown(int xtype)
Shutdown the current pod.
Definition: pod.c:453
int xnpod_wait_thread_period(unsigned long *overruns_r)
Wait for the next periodic release point.
Definition: pod.c:2951
#define XNINSW
Definition: sched.h:43
xnsched_t sched[XNARCH_NR_CPUS]
Definition: pod.h:71
void xnpod_suspend_thread(xnthread_t *thread, xnflags_t mask, xnticks_t timeout, xntmode_t timeout_mode, struct xnsynch *wchan)
Suspend a thread.
Definition: pod.c:1335
#define XNINIRQ
Definition: sched.h:48
int xnpod_remove_hook(int type, void(*routine)(xnthread_t *))
Remove a nucleus hook.
Definition: pod.c:2466
int xnpod_set_thread_periodic(xnthread_t *thread, xnticks_t idate, xnticks_t period)
Make a thread periodic.
Definition: pod.c:2850
void xnpod_stop_thread(xnthread_t *thread)
Stop a thread.
Definition: pod.c:875
Snapshot revision tag.
Definition: vfile.h:490
Scheduler interface header.
xnqueue_t tdeleteq
Definition: pod.h:77
#define XNINLOCK
Definition: sched.h:50
int xnpod_unblock_thread(xnthread_t *thread)
Unblock a thread.
Definition: pod.c:1713
#define XNKCOUT
Definition: sched.h:41
int xnpod_enable_timesource(void)
Activate the core time source.
Definition: pod.c:2659
xnqueue_t threadq
Definition: pod.h:73
int xnpod_set_thread_schedparam(struct xnthread *thread, struct xnsched_class *sched_class, const union xnsched_policy_param *sched_param)
Change the base scheduling parameters of a thread.
Definition: pod.c:1817
xnflags_t status
Definition: pod.h:69
atomic_counter_t timerlck
Definition: pod.h:81
xnflags_t lflags
Definition: sched.h:69
int xnpod_init(void)
Initialize the core pod.
Definition: pod.c:331
int xnpod_set_thread_tslice(struct xnthread *thread, xnticks_t quantum)
Set thread time-slicing information.
Definition: pod.c:3048
Scheduling information structure.
Definition: sched.h:66
int xnpod_add_hook(int type, void(*routine)(xnthread_t *))
Install a nucleus hook.
Definition: pod.c:2395
int xnpod_start_thread(xnthread_t *thread, const struct xnthread_start_attr *attr)
Initial start of a newly created thread.
Definition: pod.c:728
static void xnpod_schedule(void)
Rescheduling procedure entry point.
Definition: pod.h:251
void xnpod_delete_thread(xnthread_t *thread)
Delete a thread.
Definition: pod.c:1070
xnqueue_t tswitchq
Definition: pod.h:77
void xnpod_abort_thread(xnthread_t *thread)
Abort a thread.
Definition: pod.c:1247
int xnpod_init_thread(struct xnthread *thread, const struct xnthread_init_attr *attr, struct xnsched_class *sched_class, const union xnsched_policy_param *sched_param)
Initialize a new thread.
Definition: pod.c:624
int xnpod_migrate_thread(int cpu)
Migrate the current thread.
Definition: pod.c:1915
#define XNRESCHED
Definition: sched.h:44
xnflags_t xnpod_set_thread_mode(xnthread_t *thread, xnflags_t clrmask, xnflags_t setmask)
Change a thread's control mode.
Definition: pod.c:1002
xnflags_t status
Definition: sched.h:68
xnqueue_t tstartq
Definition: pod.h:77
void xnpod_disable_timesource(void)
Stop the core time source.
Definition: pod.c:2764
void xnpod_restart_thread(xnthread_t *thread)
Restart a thread.
Definition: pod.c:927
int refcnt
Definition: pod.h:86
#define XNDELAY
Delayed.
Definition: thread.h:37
void xnpod_resume_thread(xnthread_t *thread, xnflags_t mask)
Resume a thread.
Definition: pod.c:1576
#define XNSUSP
Suspended.
Definition: thread.h:35
void xnpod_dispatch_signals(void)
Deliver pending asynchronous signals to the running thread.
Definition: pod.c:1981
int tsliced
Definition: pod.h:84
xntimer_t tslicer
Definition: pod.h:83