19 #ifndef _COBALT_KERNEL_SCHED_H
20 #define _COBALT_KERNEL_SCHED_H
22 #include <linux/percpu.h>
23 #include <cobalt/kernel/lock.h>
24 #include <cobalt/kernel/thread.h>
25 #include <cobalt/kernel/schedqueue.h>
26 #include <cobalt/kernel/sched-tp.h>
27 #include <cobalt/kernel/sched-weak.h>
28 #include <cobalt/kernel/sched-sporadic.h>
29 #include <cobalt/kernel/sched-quota.h>
30 #include <cobalt/kernel/vfile.h>
31 #include <cobalt/kernel/assert.h>
32 #include <asm/xenomai/machine.h>
33 #include <pipeline/sched.h>
41 #define XNRESCHED 0x10000000
42 #define XNINSW 0x20000000
43 #define XNINTCK 0x40000000
46 #define XNIDLE 0x00010000
47 #define XNHTICK 0x00008000
48 #define XNINIRQ 0x00004000
49 #define XNHDEFER 0x00002000
54 #define XNTSTOP 0x00000800
57 xnsched_queue_t runnable;
79 #ifdef CONFIG_XENO_OPT_SCHED_WEAK
81 struct xnsched_weak weak;
83 #ifdef CONFIG_XENO_OPT_SCHED_TP
87 #ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
89 struct xnsched_sporadic pss;
91 #ifdef CONFIG_XENO_OPT_SCHED_QUOTA
93 struct xnsched_quota quota;
102 struct xnthread rootcb;
103 #ifdef CONFIG_XENO_ARCH_FPU
105 struct xnthread *fpuholder;
107 #ifdef CONFIG_XENO_OPT_WATCHDOG
109 struct xntimer wdtimer;
111 #ifdef CONFIG_XENO_OPT_STATS
113 xnticks_t last_account_switch;
115 xnstat_exectime_t *current_account;
119 DECLARE_PER_CPU(
struct xnsched, nksched);
121 extern cpumask_t cobalt_cpu_affinity;
123 extern struct list_head nkthreadq;
125 extern int cobalt_nrthreads;
127 #ifdef CONFIG_XENO_OPT_VFILE
131 union xnsched_policy_param;
133 struct xnsched_class {
134 void (*sched_init)(
struct xnsched *sched);
135 void (*sched_enqueue)(
struct xnthread *thread);
136 void (*sched_dequeue)(
struct xnthread *thread);
137 void (*sched_requeue)(
struct xnthread *thread);
138 struct xnthread *(*sched_pick)(
struct xnsched *sched);
139 void (*sched_tick)(
struct xnsched *sched);
140 void (*sched_rotate)(
struct xnsched *sched,
141 const union xnsched_policy_param *p);
142 void (*sched_migrate)(
struct xnthread *thread,
144 int (*sched_chkparam)(
struct xnthread *thread,
145 const union xnsched_policy_param *p);
171 bool (*sched_setparam)(
struct xnthread *thread,
172 const union xnsched_policy_param *p);
173 void (*sched_getparam)(
struct xnthread *thread,
174 union xnsched_policy_param *p);
175 void (*sched_trackprio)(
struct xnthread *thread,
176 const union xnsched_policy_param *p);
177 void (*sched_protectprio)(
struct xnthread *thread,
int prio);
178 int (*sched_declare)(
struct xnthread *thread,
179 const union xnsched_policy_param *p);
180 void (*sched_forget)(
struct xnthread *thread);
181 void (*sched_kick)(
struct xnthread *thread);
182 #ifdef CONFIG_XENO_OPT_VFILE
183 int (*sched_init_vfile)(
struct xnsched_class *schedclass,
184 struct xnvfile_directory *vfroot);
185 void (*sched_cleanup_vfile)(
struct xnsched_class *schedclass);
188 struct xnsched_class *next;
194 #define XNSCHED_CLASS_WEIGHT(n) (n * XNSCHED_CLASS_WEIGHT_FACTOR)
197 #define XNSCHED_RUNPRIO 0x80000000
199 #define xnsched_for_each_thread(__thread) \
200 list_for_each_entry(__thread, &nkthreadq, glink)
203 static inline int xnsched_cpu(
struct xnsched *sched)
208 static inline int xnsched_cpu(
struct xnsched *sched)
214 static inline struct xnsched *xnsched_struct(
int cpu)
216 return &per_cpu(nksched,
cpu);
219 static inline struct xnsched *xnsched_current(
void)
222 return raw_cpu_ptr(&nksched);
225 static inline struct xnthread *xnsched_current_thread(
void)
227 return xnsched_current()->
curr;
231 static inline int xnsched_resched_p(
struct xnsched *sched)
233 return sched->
status & XNRESCHED;
237 static inline void xnsched_set_self_resched(
struct xnsched *sched)
239 sched->
status |= XNRESCHED;
245 static inline void xnsched_set_resched(
struct xnsched *sched)
247 struct xnsched *current_sched = xnsched_current();
249 if (current_sched == sched)
250 current_sched->
status |= XNRESCHED;
251 else if (!xnsched_resched_p(sched)) {
252 cpumask_set_cpu(xnsched_cpu(sched), ¤t_sched->
resched);
253 sched->
status |= XNRESCHED;
254 current_sched->
status |= XNRESCHED;
258 #define xnsched_realtime_cpus cobalt_pipeline.supported_cpus
260 static inline int xnsched_supported_cpu(
int cpu)
262 return cpumask_test_cpu(
cpu, &xnsched_realtime_cpus);
265 static inline int xnsched_threading_cpu(
int cpu)
267 return cpumask_test_cpu(
cpu, &cobalt_cpu_affinity);
272 static inline void xnsched_set_resched(
struct xnsched *sched)
274 xnsched_set_self_resched(sched);
277 #define xnsched_realtime_cpus CPU_MASK_ALL
279 static inline int xnsched_supported_cpu(
int cpu)
284 static inline int xnsched_threading_cpu(
int cpu)
291 #define for_each_realtime_cpu(cpu) \
292 for_each_online_cpu(cpu) \
293 if (xnsched_supported_cpu(cpu)) \
295 int ___xnsched_run(
struct xnsched *sched);
297 void __xnsched_run_handler(
void);
299 static inline int __xnsched_run(
struct xnsched *sched)
306 (XNINIRQ|XNINSW|XNRESCHED)) != XNRESCHED)
309 return pipeline_schedule(sched);
314 struct xnsched *sched = xnsched_current();
322 struct xnthread *curr = READ_ONCE(sched->
curr);
328 return curr->lock_count > 0 ? 0 : __xnsched_run(sched);
331 void xnsched_lock(
void);
333 void xnsched_unlock(
void);
335 static inline int xnsched_interrupt_p(
void)
337 return xnsched_current()->
lflags & XNINIRQ;
340 static inline int xnsched_root_p(
void)
342 return xnthread_test_state(xnsched_current_thread(),
XNROOT);
345 static inline int xnsched_unblockable_p(
void)
347 return xnsched_interrupt_p() || xnsched_root_p();
350 static inline int xnsched_primary_p(
void)
352 return !xnsched_unblockable_p();
355 bool xnsched_set_effective_priority(
struct xnthread *thread,
358 #include <cobalt/kernel/sched-idle.h>
359 #include <cobalt/kernel/sched-rt.h>
361 int xnsched_init_proc(
void);
363 void xnsched_cleanup_proc(
void);
365 void xnsched_register_classes(
void);
367 void xnsched_init_all(
void);
369 void xnsched_destroy_all(
void);
371 struct xnthread *xnsched_pick_next(
struct xnsched *sched);
373 void xnsched_putback(
struct xnthread *thread);
375 int xnsched_set_policy(
struct xnthread *thread,
376 struct xnsched_class *sched_class,
377 const union xnsched_policy_param *p);
379 void xnsched_track_policy(
struct xnthread *thread,
380 struct xnthread *target);
382 void xnsched_protect_priority(
struct xnthread *thread,
385 void xnsched_migrate(
struct xnthread *thread,
388 void xnsched_migrate_passive(
struct xnthread *thread,
414 struct xnsched_class *sched_class,
415 const union xnsched_policy_param *sched_param)
417 sched_class->sched_rotate(sched, sched_param);
420 static inline int xnsched_init_thread(
struct xnthread *thread)
424 xnsched_idle_init_thread(thread);
425 xnsched_rt_init_thread(thread);
427 #ifdef CONFIG_XENO_OPT_SCHED_TP
428 ret = xnsched_tp_init_thread(thread);
432 #ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
433 ret = xnsched_sporadic_init_thread(thread);
437 #ifdef CONFIG_XENO_OPT_SCHED_QUOTA
438 ret = xnsched_quota_init_thread(thread);
446 static inline int xnsched_root_priority(
struct xnsched *sched)
448 return sched->rootcb.cprio;
451 static inline struct xnsched_class *xnsched_root_class(
struct xnsched *sched)
453 return sched->rootcb.sched_class;
456 static inline void xnsched_tick(
struct xnsched *sched)
458 struct xnthread *curr = sched->
curr;
459 struct xnsched_class *sched_class = curr->sched_class;
466 if (sched_class == curr->base_class &&
467 sched_class->sched_tick &&
468 xnthread_test_state(curr, XNTHREAD_BLOCK_BITS|
XNRRB) ==
XNRRB &&
469 curr->lock_count == 0)
470 sched_class->sched_tick(sched);
473 static inline int xnsched_chkparam(
struct xnsched_class *sched_class,
474 struct xnthread *thread,
475 const union xnsched_policy_param *p)
477 if (sched_class->sched_chkparam)
478 return sched_class->sched_chkparam(thread, p);
483 static inline int xnsched_declare(
struct xnsched_class *sched_class,
484 struct xnthread *thread,
485 const union xnsched_policy_param *p)
489 if (sched_class->sched_declare) {
490 ret = sched_class->sched_declare(thread, p);
494 if (sched_class != thread->base_class)
495 sched_class->nthreads++;
500 static inline int xnsched_calc_wprio(
struct xnsched_class *sched_class,
503 return prio + sched_class->weight;
506 #ifdef CONFIG_XENO_OPT_SCHED_CLASSES
508 static inline void xnsched_enqueue(
struct xnthread *thread)
510 struct xnsched_class *sched_class = thread->sched_class;
512 if (sched_class != &xnsched_class_idle)
513 sched_class->sched_enqueue(thread);
516 static inline void xnsched_dequeue(
struct xnthread *thread)
518 struct xnsched_class *sched_class = thread->sched_class;
520 if (sched_class != &xnsched_class_idle)
521 sched_class->sched_dequeue(thread);
524 static inline void xnsched_requeue(
struct xnthread *thread)
526 struct xnsched_class *sched_class = thread->sched_class;
528 if (sched_class != &xnsched_class_idle)
529 sched_class->sched_requeue(thread);
533 bool xnsched_setparam(
struct xnthread *thread,
534 const union xnsched_policy_param *p)
536 return thread->base_class->sched_setparam(thread, p);
539 static inline void xnsched_getparam(
struct xnthread *thread,
540 union xnsched_policy_param *p)
542 thread->sched_class->sched_getparam(thread, p);
545 static inline void xnsched_trackprio(
struct xnthread *thread,
546 const union xnsched_policy_param *p)
548 thread->sched_class->sched_trackprio(thread, p);
549 thread->wprio = xnsched_calc_wprio(thread->sched_class, thread->cprio);
552 static inline void xnsched_protectprio(
struct xnthread *thread,
int prio)
554 thread->sched_class->sched_protectprio(thread, prio);
555 thread->wprio = xnsched_calc_wprio(thread->sched_class, thread->cprio);
558 static inline void xnsched_forget(
struct xnthread *thread)
560 struct xnsched_class *sched_class = thread->base_class;
562 --sched_class->nthreads;
564 if (sched_class->sched_forget)
565 sched_class->sched_forget(thread);
568 static inline void xnsched_kick(
struct xnthread *thread)
570 struct xnsched_class *sched_class = thread->base_class;
572 xnthread_set_info(thread,
XNKICKED);
574 if (sched_class->sched_kick)
575 sched_class->sched_kick(thread);
577 xnsched_set_resched(thread->sched);
587 static inline void xnsched_enqueue(
struct xnthread *thread)
589 struct xnsched_class *sched_class = thread->sched_class;
591 if (sched_class != &xnsched_class_idle)
592 __xnsched_rt_enqueue(thread);
595 static inline void xnsched_dequeue(
struct xnthread *thread)
597 struct xnsched_class *sched_class = thread->sched_class;
599 if (sched_class != &xnsched_class_idle)
600 __xnsched_rt_dequeue(thread);
603 static inline void xnsched_requeue(
struct xnthread *thread)
605 struct xnsched_class *sched_class = thread->sched_class;
607 if (sched_class != &xnsched_class_idle)
608 __xnsched_rt_requeue(thread);
611 static inline bool xnsched_setparam(
struct xnthread *thread,
612 const union xnsched_policy_param *p)
614 struct xnsched_class *sched_class = thread->base_class;
616 if (sched_class == &xnsched_class_idle)
617 return __xnsched_idle_setparam(thread, p);
619 return __xnsched_rt_setparam(thread, p);
622 static inline void xnsched_getparam(
struct xnthread *thread,
623 union xnsched_policy_param *p)
625 struct xnsched_class *sched_class = thread->sched_class;
627 if (sched_class == &xnsched_class_idle)
628 __xnsched_idle_getparam(thread, p);
630 __xnsched_rt_getparam(thread, p);
633 static inline void xnsched_trackprio(
struct xnthread *thread,
634 const union xnsched_policy_param *p)
636 struct xnsched_class *sched_class = thread->sched_class;
638 if (sched_class == &xnsched_class_idle)
639 __xnsched_idle_trackprio(thread, p);
641 __xnsched_rt_trackprio(thread, p);
643 thread->wprio = xnsched_calc_wprio(sched_class, thread->cprio);
646 static inline void xnsched_protectprio(
struct xnthread *thread,
int prio)
648 struct xnsched_class *sched_class = thread->sched_class;
650 if (sched_class == &xnsched_class_idle)
651 __xnsched_idle_protectprio(thread, prio);
653 __xnsched_rt_protectprio(thread, prio);
655 thread->wprio = xnsched_calc_wprio(sched_class, thread->cprio);
658 static inline void xnsched_forget(
struct xnthread *thread)
660 --thread->base_class->nthreads;
661 __xnsched_rt_forget(thread);
664 static inline void xnsched_kick(
struct xnthread *thread)
666 xnthread_set_info(thread,
XNKICKED);
667 xnsched_set_resched(thread->sched);
static void xnsched_rotate(struct xnsched *sched, struct xnsched_class *sched_class, const union xnsched_policy_param *sched_param)
Rotate a scheduler runqueue.
Definition: sched.h:413
static int xnsched_run(void)
The rescheduling procedure.
Definition: sched.h:312
#define XNKICKED
Forced out of primary mode.
Definition: thread.h:69
#define XNRRB
Undergoes a round-robin scheduling.
Definition: thread.h:44
#define XNROOT
Root thread (that is, Linux/IDLE)
Definition: thread.h:47
Scheduling information structure.
Definition: sched.h:64
struct xnsched_rt rt
Definition: sched.h:78
int cpu
Definition: sched.h:73
cpumask_t resched
Definition: sched.h:75
struct xntimer rrbtimer
Definition: sched.h:100
volatile unsigned inesting
Definition: sched.h:96
unsigned long status
Definition: sched.h:66
struct xntimer htimer
Definition: sched.h:98
unsigned long lflags
Definition: sched.h:68
struct xnthread * curr
Definition: sched.h:70
Snapshot revision tag .
Definition: vfile.h:482