Xenomai  3.0.15
sched.h
1 /*
2  * Copyright (C) 2008 Philippe Gerum <[email protected]>.
3  *
4  * Xenomai is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published
6  * by the Free Software Foundation; either version 2 of the License,
7  * or (at your option) any later version.
8  *
9  * Xenomai is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with Xenomai; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
17  * 02111-1307, USA.
18  */
19 #ifndef _COBALT_KERNEL_SCHED_H
20 #define _COBALT_KERNEL_SCHED_H
21 
22 #include <linux/percpu.h>
23 #include <cobalt/kernel/lock.h>
24 #include <cobalt/kernel/thread.h>
25 #include <cobalt/kernel/schedqueue.h>
26 #include <cobalt/kernel/sched-tp.h>
27 #include <cobalt/kernel/sched-weak.h>
28 #include <cobalt/kernel/sched-sporadic.h>
29 #include <cobalt/kernel/sched-quota.h>
30 #include <cobalt/kernel/vfile.h>
31 #include <cobalt/kernel/assert.h>
32 #include <asm/xenomai/machine.h>
33 
39 /* Sched status flags */
40 #define XNRESCHED 0x10000000 /* Needs rescheduling */
41 #define XNINSW 0x20000000 /* In context switch */
42 #define XNINTCK 0x40000000 /* In master tick handler context */
43 
44 /* Sched local flags */
45 #define XNIDLE 0x00010000 /* Idle (no outstanding timer) */
46 #define XNHTICK 0x00008000 /* Host tick pending */
47 #define XNINIRQ 0x00004000 /* In IRQ handling context */
48 #define XNHDEFER 0x00002000 /* Host tick deferred */
49 
50 struct xnsched_rt {
51  xnsched_queue_t runnable;
52 };
53 
58 struct xnsched {
60  unsigned long status;
62  unsigned long lflags;
64  struct xnthread *curr;
65 #ifdef CONFIG_SMP
67  int cpu;
69  cpumask_t resched;
70 #endif
72  struct xnsched_rt rt;
73 #ifdef CONFIG_XENO_OPT_SCHED_WEAK
75  struct xnsched_weak weak;
76 #endif
77 #ifdef CONFIG_XENO_OPT_SCHED_TP
79  struct xnsched_tp tp;
80 #endif
81 #ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
83  struct xnsched_sporadic pss;
84 #endif
85 #ifdef CONFIG_XENO_OPT_SCHED_QUOTA
87  struct xnsched_quota quota;
88 #endif
90  volatile unsigned inesting;
92  struct xntimer htimer;
94  struct xntimer rrbtimer;
96  struct xnthread rootcb;
97 #ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
98  struct xnthread *last;
99 #endif
100 #ifdef CONFIG_XENO_ARCH_FPU
102  struct xnthread *fpuholder;
103 #endif
104 #ifdef CONFIG_XENO_OPT_WATCHDOG
106  struct xntimer wdtimer;
108  int wdcount;
109 #endif
110 #ifdef CONFIG_XENO_OPT_STATS
112  xnticks_t last_account_switch;
114  xnstat_exectime_t *current_account;
115 #endif
116 };
117 
118 DECLARE_PER_CPU(struct xnsched, nksched);
119 
120 extern cpumask_t cobalt_cpu_affinity;
121 
122 extern struct list_head nkthreadq;
123 
124 extern int cobalt_nrthreads;
125 
126 #ifdef CONFIG_XENO_OPT_VFILE
127 extern struct xnvfile_rev_tag nkthreadlist_tag;
128 #endif
129 
130 union xnsched_policy_param;
131 
132 struct xnsched_class {
133  void (*sched_init)(struct xnsched *sched);
134  void (*sched_enqueue)(struct xnthread *thread);
135  void (*sched_dequeue)(struct xnthread *thread);
136  void (*sched_requeue)(struct xnthread *thread);
137  struct xnthread *(*sched_pick)(struct xnsched *sched);
138  void (*sched_tick)(struct xnsched *sched);
139  void (*sched_rotate)(struct xnsched *sched,
140  const union xnsched_policy_param *p);
141  void (*sched_migrate)(struct xnthread *thread,
142  struct xnsched *sched);
143  int (*sched_chkparam)(struct xnthread *thread,
144  const union xnsched_policy_param *p);
170  void (*sched_setparam)(struct xnthread *thread,
171  const union xnsched_policy_param *p);
172  void (*sched_getparam)(struct xnthread *thread,
173  union xnsched_policy_param *p);
174  void (*sched_trackprio)(struct xnthread *thread,
175  const union xnsched_policy_param *p);
176  int (*sched_declare)(struct xnthread *thread,
177  const union xnsched_policy_param *p);
178  void (*sched_forget)(struct xnthread *thread);
179  void (*sched_kick)(struct xnthread *thread);
180 #ifdef CONFIG_XENO_OPT_VFILE
181  int (*sched_init_vfile)(struct xnsched_class *schedclass,
182  struct xnvfile_directory *vfroot);
183  void (*sched_cleanup_vfile)(struct xnsched_class *schedclass);
184 #endif
185  int nthreads;
186  struct xnsched_class *next;
187  int weight;
188  int policy;
189  const char *name;
190 };
191 
192 #define XNSCHED_CLASS_WEIGHT(n) (n * XNSCHED_CLASS_WEIGHT_FACTOR)
193 
194 /* Placeholder for current thread priority */
195 #define XNSCHED_RUNPRIO 0x80000000
196 
197 #define xnsched_for_each_thread(__thread) \
198  list_for_each_entry(__thread, &nkthreadq, glink)
199 
200 #ifdef CONFIG_SMP
201 static inline int xnsched_cpu(struct xnsched *sched)
202 {
203  return sched->cpu;
204 }
205 #else /* !CONFIG_SMP */
206 static inline int xnsched_cpu(struct xnsched *sched)
207 {
208  return 0;
209 }
210 #endif /* CONFIG_SMP */
211 
212 static inline struct xnsched *xnsched_struct(int cpu)
213 {
214  return &per_cpu(nksched, cpu);
215 }
216 
217 static inline struct xnsched *xnsched_current(void)
218 {
219  /* IRQs off */
220  return raw_cpu_ptr(&nksched);
221 }
222 
223 static inline struct xnthread *xnsched_current_thread(void)
224 {
225  return xnsched_current()->curr;
226 }
227 
228 /* Test resched flag of given sched. */
229 static inline int xnsched_resched_p(struct xnsched *sched)
230 {
231  return sched->status & XNRESCHED;
232 }
233 
234 /* Set self resched flag for the current scheduler. */
235 static inline void xnsched_set_self_resched(struct xnsched *sched)
236 {
237  sched->status |= XNRESCHED;
238 }
239 
240 #define xnsched_realtime_domain cobalt_pipeline.domain
241 
242 /* Set resched flag for the given scheduler. */
243 #ifdef CONFIG_SMP
244 
245 static inline void xnsched_set_resched(struct xnsched *sched)
246 {
247  struct xnsched *current_sched = xnsched_current();
248 
249  if (current_sched == sched)
250  current_sched->status |= XNRESCHED;
251  else if (!xnsched_resched_p(sched)) {
252  cpumask_set_cpu(xnsched_cpu(sched), &current_sched->resched);
253  sched->status |= XNRESCHED;
254  current_sched->status |= XNRESCHED;
255  }
256 }
257 
258 #define xnsched_realtime_cpus cobalt_pipeline.supported_cpus
259 
260 static inline int xnsched_supported_cpu(int cpu)
261 {
262  return cpumask_test_cpu(cpu, &xnsched_realtime_cpus);
263 }
264 
265 static inline int xnsched_threading_cpu(int cpu)
266 {
267  return cpumask_test_cpu(cpu, &cobalt_cpu_affinity);
268 }
269 
270 #else /* !CONFIG_SMP */
271 
272 static inline void xnsched_set_resched(struct xnsched *sched)
273 {
274  xnsched_set_self_resched(sched);
275 }
276 
277 #define xnsched_realtime_cpus CPU_MASK_ALL
278 
279 static inline int xnsched_supported_cpu(int cpu)
280 {
281  return 1;
282 }
283 
284 static inline int xnsched_threading_cpu(int cpu)
285 {
286  return 1;
287 }
288 
289 #endif /* !CONFIG_SMP */
290 
291 #define for_each_realtime_cpu(cpu) \
292  for_each_online_cpu(cpu) \
293  if (xnsched_supported_cpu(cpu)) \
294 
295 int ___xnsched_run(struct xnsched *sched);
296 
297 void __xnsched_run_handler(void);
298 
299 static inline int __xnsched_run(struct xnsched *sched)
300 {
301  /*
302  * Reschedule if XNSCHED is pending, but never over an IRQ
303  * handler or in the middle of unlocked context switch.
304  */
305  if (((sched->status|sched->lflags) &
306  (XNINIRQ|XNINSW|XNRESCHED)) != XNRESCHED)
307  return 0;
308 
309  return ___xnsched_run(sched);
310 }
311 
312 static inline int xnsched_run(void)
313 {
314  struct xnsched *sched = xnsched_current();
315  /*
316  * sched->curr is shared locklessly with ___xnsched_run().
317  * READ_ONCE() makes sure the compiler never uses load tearing
318  * for reading this pointer piecemeal, so that multiple stores
319  * occurring concurrently on remote CPUs never yield a
320  * spurious merged value on the local one.
321  */
322  struct xnthread *curr = READ_ONCE(sched->curr);
323 
324  /*
325  * If running over the root thread, hard irqs must be off
326  * (asserted out of line in ___xnsched_run()).
327  */
328  return curr->lock_count > 0 ? 0 : __xnsched_run(sched);
329 }
330 
331 void xnsched_lock(void);
332 
333 void xnsched_unlock(void);
334 
335 static inline int xnsched_interrupt_p(void)
336 {
337  return xnsched_current()->lflags & XNINIRQ;
338 }
339 
340 static inline int xnsched_root_p(void)
341 {
342  return xnthread_test_state(xnsched_current_thread(), XNROOT);
343 }
344 
345 static inline int xnsched_unblockable_p(void)
346 {
347  return xnsched_interrupt_p() || xnsched_root_p();
348 }
349 
350 static inline int xnsched_primary_p(void)
351 {
352  return !xnsched_unblockable_p();
353 }
354 
355 #ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
356 
357 struct xnsched *xnsched_finish_unlocked_switch(struct xnsched *sched);
358 
359 #define xnsched_resched_after_unlocked_switch() xnsched_run()
360 
361 static inline
362 int xnsched_maybe_resched_after_unlocked_switch(struct xnsched *sched)
363 {
364  return sched->status & XNRESCHED;
365 }
366 
367 #else /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
368 
369 static inline struct xnsched *
370 xnsched_finish_unlocked_switch(struct xnsched *sched)
371 {
372  XENO_BUG_ON(COBALT, !hard_irqs_disabled());
373  return xnsched_current();
374 }
375 
376 static inline void xnsched_resched_after_unlocked_switch(void) { }
377 
378 static inline int
379 xnsched_maybe_resched_after_unlocked_switch(struct xnsched *sched)
380 {
381  return 0;
382 }
383 
384 #endif /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
385 
386 #ifdef CONFIG_XENO_OPT_WATCHDOG
387 static inline void xnsched_reset_watchdog(struct xnsched *sched)
388 {
389  sched->wdcount = 0;
390 }
391 #else /* !CONFIG_XENO_OPT_WATCHDOG */
392 static inline void xnsched_reset_watchdog(struct xnsched *sched)
393 {
394 }
395 #endif /* CONFIG_XENO_OPT_WATCHDOG */
396 
397 #include <cobalt/kernel/sched-idle.h>
398 #include <cobalt/kernel/sched-rt.h>
399 
400 int xnsched_init_proc(void);
401 
402 void xnsched_cleanup_proc(void);
403 
404 void xnsched_register_classes(void);
405 
406 void xnsched_init(struct xnsched *sched, int cpu);
407 
408 void xnsched_destroy(struct xnsched *sched);
409 
410 struct xnthread *xnsched_pick_next(struct xnsched *sched);
411 
412 void xnsched_putback(struct xnthread *thread);
413 
414 int xnsched_set_policy(struct xnthread *thread,
415  struct xnsched_class *sched_class,
416  const union xnsched_policy_param *p);
417 
418 void xnsched_track_policy(struct xnthread *thread,
419  struct xnthread *target);
420 
421 void xnsched_migrate(struct xnthread *thread,
422  struct xnsched *sched);
423 
424 void xnsched_migrate_passive(struct xnthread *thread,
425  struct xnsched *sched);
426 
449 static inline void xnsched_rotate(struct xnsched *sched,
450  struct xnsched_class *sched_class,
451  const union xnsched_policy_param *sched_param)
452 {
453  sched_class->sched_rotate(sched, sched_param);
454 }
455 
456 static inline int xnsched_init_thread(struct xnthread *thread)
457 {
458  int ret = 0;
459 
460  xnsched_idle_init_thread(thread);
461  xnsched_rt_init_thread(thread);
462 
463 #ifdef CONFIG_XENO_OPT_SCHED_TP
464  ret = xnsched_tp_init_thread(thread);
465  if (ret)
466  return ret;
467 #endif /* CONFIG_XENO_OPT_SCHED_TP */
468 #ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
469  ret = xnsched_sporadic_init_thread(thread);
470  if (ret)
471  return ret;
472 #endif /* CONFIG_XENO_OPT_SCHED_SPORADIC */
473 #ifdef CONFIG_XENO_OPT_SCHED_QUOTA
474  ret = xnsched_quota_init_thread(thread);
475  if (ret)
476  return ret;
477 #endif /* CONFIG_XENO_OPT_SCHED_QUOTA */
478 
479  return ret;
480 }
481 
482 static inline int xnsched_root_priority(struct xnsched *sched)
483 {
484  return sched->rootcb.cprio;
485 }
486 
487 static inline struct xnsched_class *xnsched_root_class(struct xnsched *sched)
488 {
489  return sched->rootcb.sched_class;
490 }
491 
492 static inline void xnsched_tick(struct xnsched *sched)
493 {
494  struct xnthread *curr = sched->curr;
495  struct xnsched_class *sched_class = curr->sched_class;
496  /*
497  * A thread that undergoes round-robin scheduling only
498  * consumes its time slice when it runs within its own
499  * scheduling class, which excludes temporary PIP boosts, and
500  * does not hold the scheduler lock.
501  */
502  if (sched_class == curr->base_class &&
503  sched_class->sched_tick &&
504  xnthread_test_state(curr, XNTHREAD_BLOCK_BITS|XNRRB) == XNRRB &&
505  curr->lock_count == 0)
506  sched_class->sched_tick(sched);
507 }
508 
509 static inline int xnsched_chkparam(struct xnsched_class *sched_class,
510  struct xnthread *thread,
511  const union xnsched_policy_param *p)
512 {
513  if (sched_class->sched_chkparam)
514  return sched_class->sched_chkparam(thread, p);
515 
516  return 0;
517 }
518 
519 static inline int xnsched_declare(struct xnsched_class *sched_class,
520  struct xnthread *thread,
521  const union xnsched_policy_param *p)
522 {
523  int ret;
524 
525  if (sched_class->sched_declare) {
526  ret = sched_class->sched_declare(thread, p);
527  if (ret)
528  return ret;
529  }
530  if (sched_class != thread->base_class)
531  sched_class->nthreads++;
532 
533  return 0;
534 }
535 
536 #ifdef CONFIG_XENO_OPT_SCHED_CLASSES
537 
538 static inline void xnsched_enqueue(struct xnthread *thread)
539 {
540  struct xnsched_class *sched_class = thread->sched_class;
541 
542  if (sched_class != &xnsched_class_idle)
543  sched_class->sched_enqueue(thread);
544 }
545 
546 static inline void xnsched_dequeue(struct xnthread *thread)
547 {
548  struct xnsched_class *sched_class = thread->sched_class;
549 
550  if (sched_class != &xnsched_class_idle)
551  sched_class->sched_dequeue(thread);
552 }
553 
554 static inline void xnsched_requeue(struct xnthread *thread)
555 {
556  struct xnsched_class *sched_class = thread->sched_class;
557 
558  if (sched_class != &xnsched_class_idle)
559  sched_class->sched_requeue(thread);
560 }
561 
562 static inline void xnsched_setparam(struct xnthread *thread,
563  const union xnsched_policy_param *p)
564 {
565  thread->sched_class->sched_setparam(thread, p);
566  thread->wprio = thread->cprio + thread->sched_class->weight;
567 }
568 
569 static inline void xnsched_getparam(struct xnthread *thread,
570  union xnsched_policy_param *p)
571 {
572  thread->sched_class->sched_getparam(thread, p);
573 }
574 
575 static inline void xnsched_trackprio(struct xnthread *thread,
576  const union xnsched_policy_param *p)
577 {
578  thread->sched_class->sched_trackprio(thread, p);
579  thread->wprio = thread->cprio + thread->sched_class->weight;
580 }
581 
582 static inline void xnsched_forget(struct xnthread *thread)
583 {
584  struct xnsched_class *sched_class = thread->base_class;
585 
586  --sched_class->nthreads;
587 
588  if (sched_class->sched_forget)
589  sched_class->sched_forget(thread);
590 }
591 
592 static inline void xnsched_kick(struct xnthread *thread)
593 {
594  struct xnsched_class *sched_class = thread->base_class;
595 
596  xnthread_set_info(thread, XNKICKED);
597 
598  if (sched_class->sched_kick)
599  sched_class->sched_kick(thread);
600 
601  xnsched_set_resched(thread->sched);
602 }
603 
604 #else /* !CONFIG_XENO_OPT_SCHED_CLASSES */
605 
606 /*
607  * If only the RT and IDLE scheduling classes are compiled in, we can
608  * fully inline common helpers for dealing with those.
609  */
610 
611 static inline void xnsched_enqueue(struct xnthread *thread)
612 {
613  struct xnsched_class *sched_class = thread->sched_class;
614 
615  if (sched_class != &xnsched_class_idle)
616  __xnsched_rt_enqueue(thread);
617 }
618 
619 static inline void xnsched_dequeue(struct xnthread *thread)
620 {
621  struct xnsched_class *sched_class = thread->sched_class;
622 
623  if (sched_class != &xnsched_class_idle)
624  __xnsched_rt_dequeue(thread);
625 }
626 
627 static inline void xnsched_requeue(struct xnthread *thread)
628 {
629  struct xnsched_class *sched_class = thread->sched_class;
630 
631  if (sched_class != &xnsched_class_idle)
632  __xnsched_rt_requeue(thread);
633 }
634 
635 static inline void xnsched_setparam(struct xnthread *thread,
636  const union xnsched_policy_param *p)
637 {
638  struct xnsched_class *sched_class = thread->sched_class;
639 
640  if (sched_class != &xnsched_class_idle)
641  __xnsched_rt_setparam(thread, p);
642  else
643  __xnsched_idle_setparam(thread, p);
644 
645  thread->wprio = thread->cprio + sched_class->weight;
646 }
647 
648 static inline void xnsched_getparam(struct xnthread *thread,
649  union xnsched_policy_param *p)
650 {
651  struct xnsched_class *sched_class = thread->sched_class;
652 
653  if (sched_class != &xnsched_class_idle)
654  __xnsched_rt_getparam(thread, p);
655  else
656  __xnsched_idle_getparam(thread, p);
657 }
658 
659 static inline void xnsched_trackprio(struct xnthread *thread,
660  const union xnsched_policy_param *p)
661 {
662  struct xnsched_class *sched_class = thread->sched_class;
663 
664  if (sched_class != &xnsched_class_idle)
665  __xnsched_rt_trackprio(thread, p);
666  else
667  __xnsched_idle_trackprio(thread, p);
668 
669  thread->wprio = thread->cprio + sched_class->weight;
670 }
671 
672 static inline void xnsched_forget(struct xnthread *thread)
673 {
674  --thread->base_class->nthreads;
675  __xnsched_rt_forget(thread);
676 }
677 
678 static inline void xnsched_kick(struct xnthread *thread)
679 {
680  xnthread_set_info(thread, XNKICKED);
681  xnsched_set_resched(thread->sched);
682 }
683 
684 #endif /* !CONFIG_XENO_OPT_SCHED_CLASSES */
685 
688 #endif /* !_COBALT_KERNEL_SCHED_H */
static void xnsched_rotate(struct xnsched *sched, struct xnsched_class *sched_class, const union xnsched_policy_param *sched_param)
Rotate a scheduler runqueue.
Definition: sched.h:449
static int xnsched_run(void)
The rescheduling procedure.
Definition: sched.h:312
#define XNKICKED
Forced out of primary mode.
Definition: thread.h:69
#define XNRRB
Undergoes a round-robin scheduling.
Definition: thread.h:45
#define XNROOT
Root thread (that is, Linux/IDLE)
Definition: thread.h:48
Scheduling information structure.
Definition: sched.h:58
struct xnsched_rt rt
Definition: sched.h:72
int cpu
Definition: sched.h:67
cpumask_t resched
Definition: sched.h:69
struct xntimer rrbtimer
Definition: sched.h:94
volatile unsigned inesting
Definition: sched.h:90
unsigned long status
Definition: sched.h:60
struct xntimer htimer
Definition: sched.h:92
unsigned long lflags
Definition: sched.h:62
struct xnthread * curr
Definition: sched.h:64
Snapshot revision tag .
Definition: vfile.h:482