19 #ifndef _COBALT_KERNEL_SCHED_H
20 #define _COBALT_KERNEL_SCHED_H
22 #include <linux/percpu.h>
23 #include <cobalt/kernel/lock.h>
24 #include <cobalt/kernel/thread.h>
25 #include <cobalt/kernel/schedqueue.h>
26 #include <cobalt/kernel/sched-tp.h>
27 #include <cobalt/kernel/sched-weak.h>
28 #include <cobalt/kernel/sched-sporadic.h>
29 #include <cobalt/kernel/sched-quota.h>
30 #include <cobalt/kernel/vfile.h>
31 #include <cobalt/kernel/assert.h>
32 #include <asm/xenomai/machine.h>
40 #define XNRESCHED 0x10000000
41 #define XNINSW 0x20000000
42 #define XNINTCK 0x40000000
45 #define XNIDLE 0x00010000
46 #define XNHTICK 0x00008000
47 #define XNINIRQ 0x00004000
48 #define XNHDEFER 0x00002000
51 xnsched_queue_t runnable;
73 #ifdef CONFIG_XENO_OPT_SCHED_WEAK
75 struct xnsched_weak weak;
77 #ifdef CONFIG_XENO_OPT_SCHED_TP
81 #ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
83 struct xnsched_sporadic pss;
85 #ifdef CONFIG_XENO_OPT_SCHED_QUOTA
87 struct xnsched_quota quota;
96 struct xnthread rootcb;
97 #ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
98 struct xnthread *last;
100 #ifdef CONFIG_XENO_ARCH_FPU
102 struct xnthread *fpuholder;
104 #ifdef CONFIG_XENO_OPT_WATCHDOG
106 struct xntimer wdtimer;
110 #ifdef CONFIG_XENO_OPT_STATS
112 xnticks_t last_account_switch;
114 xnstat_exectime_t *current_account;
118 DECLARE_PER_CPU(
struct xnsched, nksched);
120 extern cpumask_t cobalt_cpu_affinity;
122 extern struct list_head nkthreadq;
124 extern int cobalt_nrthreads;
126 #ifdef CONFIG_XENO_OPT_VFILE
130 union xnsched_policy_param;
132 struct xnsched_class {
133 void (*sched_init)(
struct xnsched *sched);
134 void (*sched_enqueue)(
struct xnthread *thread);
135 void (*sched_dequeue)(
struct xnthread *thread);
136 void (*sched_requeue)(
struct xnthread *thread);
137 struct xnthread *(*sched_pick)(
struct xnsched *sched);
138 void (*sched_tick)(
struct xnsched *sched);
139 void (*sched_rotate)(
struct xnsched *sched,
140 const union xnsched_policy_param *p);
141 void (*sched_migrate)(
struct xnthread *thread,
143 int (*sched_chkparam)(
struct xnthread *thread,
144 const union xnsched_policy_param *p);
170 void (*sched_setparam)(
struct xnthread *thread,
171 const union xnsched_policy_param *p);
172 void (*sched_getparam)(
struct xnthread *thread,
173 union xnsched_policy_param *p);
174 void (*sched_trackprio)(
struct xnthread *thread,
175 const union xnsched_policy_param *p);
176 int (*sched_declare)(
struct xnthread *thread,
177 const union xnsched_policy_param *p);
178 void (*sched_forget)(
struct xnthread *thread);
179 void (*sched_kick)(
struct xnthread *thread);
180 #ifdef CONFIG_XENO_OPT_VFILE
181 int (*sched_init_vfile)(
struct xnsched_class *schedclass,
182 struct xnvfile_directory *vfroot);
183 void (*sched_cleanup_vfile)(
struct xnsched_class *schedclass);
186 struct xnsched_class *next;
192 #define XNSCHED_CLASS_WEIGHT(n) (n * XNSCHED_CLASS_WEIGHT_FACTOR)
195 #define XNSCHED_RUNPRIO 0x80000000
197 #define xnsched_for_each_thread(__thread) \
198 list_for_each_entry(__thread, &nkthreadq, glink)
201 static inline int xnsched_cpu(
struct xnsched *sched)
206 static inline int xnsched_cpu(
struct xnsched *sched)
212 static inline struct xnsched *xnsched_struct(
int cpu)
214 return &per_cpu(nksched,
cpu);
217 static inline struct xnsched *xnsched_current(
void)
220 return raw_cpu_ptr(&nksched);
223 static inline struct xnthread *xnsched_current_thread(
void)
225 return xnsched_current()->
curr;
229 static inline int xnsched_resched_p(
struct xnsched *sched)
231 return sched->
status & XNRESCHED;
235 static inline void xnsched_set_self_resched(
struct xnsched *sched)
237 sched->
status |= XNRESCHED;
240 #define xnsched_realtime_domain cobalt_pipeline.domain
245 static inline void xnsched_set_resched(
struct xnsched *sched)
247 struct xnsched *current_sched = xnsched_current();
249 if (current_sched == sched)
250 current_sched->
status |= XNRESCHED;
251 else if (!xnsched_resched_p(sched)) {
252 cpumask_set_cpu(xnsched_cpu(sched), ¤t_sched->
resched);
253 sched->
status |= XNRESCHED;
254 current_sched->
status |= XNRESCHED;
258 #define xnsched_realtime_cpus cobalt_pipeline.supported_cpus
260 static inline int xnsched_supported_cpu(
int cpu)
262 return cpumask_test_cpu(
cpu, &xnsched_realtime_cpus);
265 static inline int xnsched_threading_cpu(
int cpu)
267 return cpumask_test_cpu(
cpu, &cobalt_cpu_affinity);
272 static inline void xnsched_set_resched(
struct xnsched *sched)
274 xnsched_set_self_resched(sched);
277 #define xnsched_realtime_cpus CPU_MASK_ALL
279 static inline int xnsched_supported_cpu(
int cpu)
284 static inline int xnsched_threading_cpu(
int cpu)
291 #define for_each_realtime_cpu(cpu) \
292 for_each_online_cpu(cpu) \
293 if (xnsched_supported_cpu(cpu)) \
295 int ___xnsched_run(
struct xnsched *sched);
297 void __xnsched_run_handler(
void);
299 static inline int __xnsched_run(
struct xnsched *sched)
306 (XNINIRQ|XNINSW|XNRESCHED)) != XNRESCHED)
309 return ___xnsched_run(sched);
314 struct xnsched *sched = xnsched_current();
322 struct xnthread *curr = READ_ONCE(sched->
curr);
328 return curr->lock_count > 0 ? 0 : __xnsched_run(sched);
331 void xnsched_lock(
void);
333 void xnsched_unlock(
void);
335 static inline int xnsched_interrupt_p(
void)
337 return xnsched_current()->
lflags & XNINIRQ;
340 static inline int xnsched_root_p(
void)
342 return xnthread_test_state(xnsched_current_thread(),
XNROOT);
345 static inline int xnsched_unblockable_p(
void)
347 return xnsched_interrupt_p() || xnsched_root_p();
350 static inline int xnsched_primary_p(
void)
352 return !xnsched_unblockable_p();
355 #ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
357 struct xnsched *xnsched_finish_unlocked_switch(
struct xnsched *sched);
359 #define xnsched_resched_after_unlocked_switch() xnsched_run()
362 int xnsched_maybe_resched_after_unlocked_switch(
struct xnsched *sched)
364 return sched->
status & XNRESCHED;
370 xnsched_finish_unlocked_switch(
struct xnsched *sched)
372 XENO_BUG_ON(COBALT, !hard_irqs_disabled());
373 return xnsched_current();
376 static inline void xnsched_resched_after_unlocked_switch(
void) { }
379 xnsched_maybe_resched_after_unlocked_switch(
struct xnsched *sched)
386 #ifdef CONFIG_XENO_OPT_WATCHDOG
387 static inline void xnsched_reset_watchdog(
struct xnsched *sched)
392 static inline void xnsched_reset_watchdog(
struct xnsched *sched)
397 #include <cobalt/kernel/sched-idle.h>
398 #include <cobalt/kernel/sched-rt.h>
400 int xnsched_init_proc(
void);
402 void xnsched_cleanup_proc(
void);
404 void xnsched_register_classes(
void);
406 void xnsched_init(
struct xnsched *sched,
int cpu);
408 void xnsched_destroy(
struct xnsched *sched);
410 struct xnthread *xnsched_pick_next(
struct xnsched *sched);
412 void xnsched_putback(
struct xnthread *thread);
414 int xnsched_set_policy(
struct xnthread *thread,
415 struct xnsched_class *sched_class,
416 const union xnsched_policy_param *p);
418 void xnsched_track_policy(
struct xnthread *thread,
419 struct xnthread *target);
421 void xnsched_migrate(
struct xnthread *thread,
424 void xnsched_migrate_passive(
struct xnthread *thread,
450 struct xnsched_class *sched_class,
451 const union xnsched_policy_param *sched_param)
453 sched_class->sched_rotate(sched, sched_param);
456 static inline int xnsched_init_thread(
struct xnthread *thread)
460 xnsched_idle_init_thread(thread);
461 xnsched_rt_init_thread(thread);
463 #ifdef CONFIG_XENO_OPT_SCHED_TP
464 ret = xnsched_tp_init_thread(thread);
468 #ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
469 ret = xnsched_sporadic_init_thread(thread);
473 #ifdef CONFIG_XENO_OPT_SCHED_QUOTA
474 ret = xnsched_quota_init_thread(thread);
482 static inline int xnsched_root_priority(
struct xnsched *sched)
484 return sched->rootcb.cprio;
487 static inline struct xnsched_class *xnsched_root_class(
struct xnsched *sched)
489 return sched->rootcb.sched_class;
492 static inline void xnsched_tick(
struct xnsched *sched)
494 struct xnthread *curr = sched->
curr;
495 struct xnsched_class *sched_class = curr->sched_class;
502 if (sched_class == curr->base_class &&
503 sched_class->sched_tick &&
504 xnthread_test_state(curr, XNTHREAD_BLOCK_BITS|
XNRRB) ==
XNRRB &&
505 curr->lock_count == 0)
506 sched_class->sched_tick(sched);
509 static inline int xnsched_chkparam(
struct xnsched_class *sched_class,
510 struct xnthread *thread,
511 const union xnsched_policy_param *p)
513 if (sched_class->sched_chkparam)
514 return sched_class->sched_chkparam(thread, p);
519 static inline int xnsched_declare(
struct xnsched_class *sched_class,
520 struct xnthread *thread,
521 const union xnsched_policy_param *p)
525 if (sched_class->sched_declare) {
526 ret = sched_class->sched_declare(thread, p);
530 if (sched_class != thread->base_class)
531 sched_class->nthreads++;
536 #ifdef CONFIG_XENO_OPT_SCHED_CLASSES
538 static inline void xnsched_enqueue(
struct xnthread *thread)
540 struct xnsched_class *sched_class = thread->sched_class;
542 if (sched_class != &xnsched_class_idle)
543 sched_class->sched_enqueue(thread);
546 static inline void xnsched_dequeue(
struct xnthread *thread)
548 struct xnsched_class *sched_class = thread->sched_class;
550 if (sched_class != &xnsched_class_idle)
551 sched_class->sched_dequeue(thread);
554 static inline void xnsched_requeue(
struct xnthread *thread)
556 struct xnsched_class *sched_class = thread->sched_class;
558 if (sched_class != &xnsched_class_idle)
559 sched_class->sched_requeue(thread);
562 static inline void xnsched_setparam(
struct xnthread *thread,
563 const union xnsched_policy_param *p)
565 thread->sched_class->sched_setparam(thread, p);
566 thread->wprio = thread->cprio + thread->sched_class->weight;
569 static inline void xnsched_getparam(
struct xnthread *thread,
570 union xnsched_policy_param *p)
572 thread->sched_class->sched_getparam(thread, p);
575 static inline void xnsched_trackprio(
struct xnthread *thread,
576 const union xnsched_policy_param *p)
578 thread->sched_class->sched_trackprio(thread, p);
579 thread->wprio = thread->cprio + thread->sched_class->weight;
582 static inline void xnsched_forget(
struct xnthread *thread)
584 struct xnsched_class *sched_class = thread->base_class;
586 --sched_class->nthreads;
588 if (sched_class->sched_forget)
589 sched_class->sched_forget(thread);
592 static inline void xnsched_kick(
struct xnthread *thread)
594 struct xnsched_class *sched_class = thread->base_class;
596 xnthread_set_info(thread,
XNKICKED);
598 if (sched_class->sched_kick)
599 sched_class->sched_kick(thread);
601 xnsched_set_resched(thread->sched);
611 static inline void xnsched_enqueue(
struct xnthread *thread)
613 struct xnsched_class *sched_class = thread->sched_class;
615 if (sched_class != &xnsched_class_idle)
616 __xnsched_rt_enqueue(thread);
619 static inline void xnsched_dequeue(
struct xnthread *thread)
621 struct xnsched_class *sched_class = thread->sched_class;
623 if (sched_class != &xnsched_class_idle)
624 __xnsched_rt_dequeue(thread);
627 static inline void xnsched_requeue(
struct xnthread *thread)
629 struct xnsched_class *sched_class = thread->sched_class;
631 if (sched_class != &xnsched_class_idle)
632 __xnsched_rt_requeue(thread);
635 static inline void xnsched_setparam(
struct xnthread *thread,
636 const union xnsched_policy_param *p)
638 struct xnsched_class *sched_class = thread->sched_class;
640 if (sched_class != &xnsched_class_idle)
641 __xnsched_rt_setparam(thread, p);
643 __xnsched_idle_setparam(thread, p);
645 thread->wprio = thread->cprio + sched_class->weight;
648 static inline void xnsched_getparam(
struct xnthread *thread,
649 union xnsched_policy_param *p)
651 struct xnsched_class *sched_class = thread->sched_class;
653 if (sched_class != &xnsched_class_idle)
654 __xnsched_rt_getparam(thread, p);
656 __xnsched_idle_getparam(thread, p);
659 static inline void xnsched_trackprio(
struct xnthread *thread,
660 const union xnsched_policy_param *p)
662 struct xnsched_class *sched_class = thread->sched_class;
664 if (sched_class != &xnsched_class_idle)
665 __xnsched_rt_trackprio(thread, p);
667 __xnsched_idle_trackprio(thread, p);
669 thread->wprio = thread->cprio + sched_class->weight;
672 static inline void xnsched_forget(
struct xnthread *thread)
674 --thread->base_class->nthreads;
675 __xnsched_rt_forget(thread);
678 static inline void xnsched_kick(
struct xnthread *thread)
680 xnthread_set_info(thread,
XNKICKED);
681 xnsched_set_resched(thread->sched);
static void xnsched_rotate(struct xnsched *sched, struct xnsched_class *sched_class, const union xnsched_policy_param *sched_param)
Rotate a scheduler runqueue.
Definition: sched.h:449
static int xnsched_run(void)
The rescheduling procedure.
Definition: sched.h:312
#define XNKICKED
Forced out of primary mode.
Definition: thread.h:69
#define XNRRB
Undergoes a round-robin scheduling.
Definition: thread.h:45
#define XNROOT
Root thread (that is, Linux/IDLE)
Definition: thread.h:48
Scheduling information structure.
Definition: sched.h:58
struct xnsched_rt rt
Definition: sched.h:72
int cpu
Definition: sched.h:67
cpumask_t resched
Definition: sched.h:69
struct xntimer rrbtimer
Definition: sched.h:94
volatile unsigned inesting
Definition: sched.h:90
unsigned long status
Definition: sched.h:60
struct xntimer htimer
Definition: sched.h:92
unsigned long lflags
Definition: sched.h:62
struct xnthread * curr
Definition: sched.h:64
Snapshot revision tag .
Definition: vfile.h:482