19 #ifndef _COBALT_KERNEL_SCHED_H
20 #define _COBALT_KERNEL_SCHED_H
22 #include <linux/percpu.h>
23 #include <cobalt/kernel/lock.h>
24 #include <cobalt/kernel/thread.h>
25 #include <cobalt/kernel/schedqueue.h>
26 #include <cobalt/kernel/sched-tp.h>
27 #include <cobalt/kernel/sched-weak.h>
28 #include <cobalt/kernel/sched-sporadic.h>
29 #include <cobalt/kernel/sched-quota.h>
30 #include <cobalt/kernel/vfile.h>
31 #include <cobalt/kernel/assert.h>
32 #include <asm/xenomai/machine.h>
40 #define XNRESCHED 0x10000000
41 #define XNINSW 0x20000000
42 #define XNINTCK 0x40000000
45 #define XNHTICK 0x00008000
46 #define XNINIRQ 0x00004000
47 #define XNHDEFER 0x00002000
48 #define XNINLOCK 0x00001000
51 xnsched_queue_t runnable;
73 #ifdef CONFIG_XENO_OPT_SCHED_WEAK
75 struct xnsched_weak weak;
77 #ifdef CONFIG_XENO_OPT_SCHED_TP
81 #ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
83 struct xnsched_sporadic pss;
85 #ifdef CONFIG_XENO_OPT_SCHED_QUOTA
87 struct xnsched_quota quota;
96 struct xnthread rootcb;
97 #ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
98 struct xnthread *last;
100 #ifdef CONFIG_XENO_ARCH_FPU
102 struct xnthread *fpuholder;
104 #ifdef CONFIG_XENO_OPT_WATCHDOG
106 struct xntimer wdtimer;
110 #ifdef CONFIG_XENO_OPT_STATS
112 xnticks_t last_account_switch;
114 xnstat_exectime_t *current_account;
118 DECLARE_PER_CPU(
struct xnsched, nksched);
120 extern cpumask_t nkaffinity;
122 extern struct list_head nkthreadq;
124 extern int nknrthreads;
126 #ifdef CONFIG_XENO_OPT_VFILE
130 union xnsched_policy_param;
132 struct xnsched_class {
133 void (*sched_init)(
struct xnsched *sched);
134 void (*sched_enqueue)(
struct xnthread *thread);
135 void (*sched_dequeue)(
struct xnthread *thread);
136 void (*sched_requeue)(
struct xnthread *thread);
137 struct xnthread *(*sched_pick)(
struct xnsched *sched);
138 void (*sched_tick)(
struct xnsched *sched);
139 void (*sched_rotate)(
struct xnsched *sched,
140 const union xnsched_policy_param *p);
141 void (*sched_migrate)(
struct xnthread *thread,
143 void (*sched_setparam)(
struct xnthread *thread,
144 const union xnsched_policy_param *p);
145 void (*sched_getparam)(
struct xnthread *thread,
146 union xnsched_policy_param *p);
147 void (*sched_trackprio)(
struct xnthread *thread,
148 const union xnsched_policy_param *p);
149 int (*sched_declare)(
struct xnthread *thread,
150 const union xnsched_policy_param *p);
151 void (*sched_forget)(
struct xnthread *thread);
152 void (*sched_kick)(
struct xnthread *thread);
153 #ifdef CONFIG_XENO_OPT_VFILE
154 int (*sched_init_vfile)(
struct xnsched_class *schedclass,
155 struct xnvfile_directory *vfroot);
156 void (*sched_cleanup_vfile)(
struct xnsched_class *schedclass);
159 struct xnsched_class *next;
165 #define XNSCHED_CLASS_WEIGHT(n) (n * XNSCHED_CLASS_WEIGHT_FACTOR)
168 #define XNSCHED_RUNPRIO 0x80000000
170 #define xnsched_for_each_thread(__thread) \
171 list_for_each_entry(__thread, &nkthreadq, glink)
174 static inline int xnsched_cpu(
struct xnsched *sched)
179 static inline int xnsched_cpu(
struct xnsched *sched)
185 static inline struct xnsched *xnsched_struct(
int cpu)
187 return &per_cpu(nksched, cpu);
190 static inline struct xnsched *xnsched_current(
void)
193 return __this_cpu_ptr(&nksched);
196 static inline struct xnthread *xnsched_current_thread(
void)
198 return xnsched_current()->
curr;
202 static inline int xnsched_resched_p(
struct xnsched *sched)
204 return sched->
status & XNRESCHED;
208 static inline void xnsched_set_self_resched(
struct xnsched *sched)
210 sched->
status |= XNRESCHED;
213 #define xnsched_realtime_domain xnarch_machdata.domain
218 static inline void xnsched_set_resched(
struct xnsched *sched)
220 struct xnsched *current_sched = xnsched_current();
222 if (current_sched == sched)
223 current_sched->
status |= XNRESCHED;
224 else if (!xnsched_resched_p(sched)) {
225 cpu_set(xnsched_cpu(sched), current_sched->
resched);
226 sched->
status |= XNRESCHED;
227 current_sched->
status |= XNRESCHED;
231 #define xnsched_realtime_cpus xnarch_machdata.supported_cpus
233 static inline int xnsched_supported_cpu(
int cpu)
235 return cpu_isset(cpu, xnsched_realtime_cpus);
240 static inline void xnsched_set_resched(
struct xnsched *sched)
242 xnsched_set_self_resched(sched);
245 #define xnsched_realtime_cpus CPU_MASK_ALL
247 static inline int xnsched_supported_cpu(
int cpu)
254 #define for_each_realtime_cpu(cpu) \
255 for_each_online_cpu(cpu) \
256 if (xnsched_supported_cpu(cpu)) \
258 int __xnsched_run(
struct xnsched *sched);
260 void __xnsched_run_handler(
void);
286 sched = xnsched_current();
294 (XNINIRQ|XNINSW|XNRESCHED|XNINLOCK)) != XNRESCHED)
297 return __xnsched_run(sched);
300 void ___xnsched_lock(
struct xnsched *sched);
302 void ___xnsched_unlock(
struct xnsched *sched);
304 void ___xnsched_unlock_fully(
struct xnsched *sched);
306 static inline void __xnsched_lock(
void)
311 sched = xnsched_current();
312 ___xnsched_lock(sched);
315 static inline void __xnsched_unlock(
void)
320 sched = xnsched_current();
321 ___xnsched_unlock(sched);
324 static inline void __xnsched_unlock_fully(
void)
329 sched = xnsched_current();
330 ___xnsched_unlock_fully(sched);
333 static inline void xnsched_lock(
void)
338 xnlock_get_irqsave(&nklock, s);
339 sched = xnsched_current();
340 ___xnsched_lock(sched);
341 xnlock_put_irqrestore(&nklock, s);
344 static inline void xnsched_unlock(
void)
349 xnlock_get_irqsave(&nklock, s);
350 sched = xnsched_current();
351 ___xnsched_unlock(sched);
352 xnlock_put_irqrestore(&nklock, s);
355 static inline int xnsched_interrupt_p(
void)
357 return xnsched_current()->
lflags & XNINIRQ;
360 static inline int xnsched_locked_p(
void)
362 return xnthread_test_state(xnsched_current_thread(),
XNLOCK);
365 static inline int xnsched_root_p(
void)
367 return xnthread_test_state(xnsched_current_thread(),
XNROOT);
370 static inline int xnsched_unblockable_p(
void)
372 return xnsched_interrupt_p() || xnsched_root_p();
375 static inline int xnsched_primary_p(
void)
377 return !xnsched_unblockable_p();
380 #ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
382 struct xnsched *xnsched_finish_unlocked_switch(
struct xnsched *sched);
384 #define xnsched_resched_after_unlocked_switch() xnsched_run()
387 int xnsched_maybe_resched_after_unlocked_switch(
struct xnsched *sched)
389 return sched->
status & XNRESCHED;
395 xnsched_finish_unlocked_switch(
struct xnsched *sched)
397 XENO_BUGON(COBALT, !hard_irqs_disabled());
398 return xnsched_current();
401 static inline void xnsched_resched_after_unlocked_switch(
void) { }
404 xnsched_maybe_resched_after_unlocked_switch(
struct xnsched *sched)
411 #ifdef CONFIG_XENO_OPT_WATCHDOG
412 static inline void xnsched_reset_watchdog(
struct xnsched *sched)
417 static inline void xnsched_reset_watchdog(
struct xnsched *sched)
422 #include <cobalt/kernel/sched-idle.h>
423 #include <cobalt/kernel/sched-rt.h>
425 int xnsched_init_proc(
void);
427 void xnsched_cleanup_proc(
void);
429 void xnsched_register_classes(
void);
431 void xnsched_init(
struct xnsched *sched,
int cpu);
433 void xnsched_destroy(
struct xnsched *sched);
435 struct xnthread *xnsched_pick_next(
struct xnsched *sched);
437 void xnsched_putback(
struct xnthread *thread);
439 int xnsched_set_policy(
struct xnthread *thread,
440 struct xnsched_class *sched_class,
441 const union xnsched_policy_param *p);
443 void xnsched_track_policy(
struct xnthread *thread,
444 struct xnthread *target);
446 void xnsched_migrate(
struct xnthread *thread,
449 void xnsched_migrate_passive(
struct xnthread *thread,
475 struct xnsched_class *sched_class,
476 const union xnsched_policy_param *sched_param)
478 sched_class->sched_rotate(sched, sched_param);
481 static inline int xnsched_init_thread(
struct xnthread *thread)
485 xnsched_idle_init_thread(thread);
486 xnsched_rt_init_thread(thread);
488 #ifdef CONFIG_XENO_OPT_SCHED_TP
489 ret = xnsched_tp_init_thread(thread);
493 #ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
494 ret = xnsched_sporadic_init_thread(thread);
498 #ifdef CONFIG_XENO_OPT_SCHED_QUOTA
499 ret = xnsched_quota_init_thread(thread);
507 static inline int xnsched_root_priority(
struct xnsched *sched)
509 return sched->rootcb.cprio;
512 static inline struct xnsched_class *xnsched_root_class(
struct xnsched *sched)
514 return sched->rootcb.sched_class;
517 static inline void xnsched_tick(
struct xnsched *sched)
519 struct xnthread *curr = sched->
curr;
520 struct xnsched_class *sched_class = curr->sched_class;
527 if (sched_class == curr->base_class &&
528 sched_class->sched_tick &&
530 sched_class->sched_tick(sched);
533 static inline int xnsched_declare(
struct xnsched_class *sched_class,
534 struct xnthread *thread,
535 const union xnsched_policy_param *p)
539 if (sched_class->sched_declare) {
540 ret = sched_class->sched_declare(thread, p);
544 if (sched_class != thread->base_class)
545 sched_class->nthreads++;
550 #ifdef CONFIG_XENO_OPT_SCHED_CLASSES
552 static inline void xnsched_enqueue(
struct xnthread *thread)
554 struct xnsched_class *sched_class = thread->sched_class;
556 if (sched_class != &xnsched_class_idle)
557 sched_class->sched_enqueue(thread);
560 static inline void xnsched_dequeue(
struct xnthread *thread)
562 struct xnsched_class *sched_class = thread->sched_class;
564 if (sched_class != &xnsched_class_idle)
565 sched_class->sched_dequeue(thread);
568 static inline void xnsched_requeue(
struct xnthread *thread)
570 struct xnsched_class *sched_class = thread->sched_class;
572 if (sched_class != &xnsched_class_idle)
573 sched_class->sched_requeue(thread);
576 static inline void xnsched_setparam(
struct xnthread *thread,
577 const union xnsched_policy_param *p)
579 thread->sched_class->sched_setparam(thread, p);
580 thread->wprio = thread->cprio + thread->sched_class->weight;
583 static inline void xnsched_getparam(
struct xnthread *thread,
584 union xnsched_policy_param *p)
586 thread->sched_class->sched_getparam(thread, p);
589 static inline void xnsched_trackprio(
struct xnthread *thread,
590 const union xnsched_policy_param *p)
592 thread->sched_class->sched_trackprio(thread, p);
593 thread->wprio = thread->cprio + thread->sched_class->weight;
596 static inline void xnsched_forget(
struct xnthread *thread)
598 struct xnsched_class *sched_class = thread->base_class;
600 --sched_class->nthreads;
602 if (sched_class->sched_forget)
603 sched_class->sched_forget(thread);
606 static inline void xnsched_kick(
struct xnthread *thread)
608 struct xnsched_class *sched_class = thread->base_class;
610 xnthread_set_info(thread,
XNKICKED);
612 if (sched_class->sched_kick)
613 sched_class->sched_kick(thread);
615 xnsched_set_resched(thread->sched);
625 static inline void xnsched_enqueue(
struct xnthread *thread)
627 struct xnsched_class *sched_class = thread->sched_class;
629 if (sched_class != &xnsched_class_idle)
630 __xnsched_rt_enqueue(thread);
633 static inline void xnsched_dequeue(
struct xnthread *thread)
635 struct xnsched_class *sched_class = thread->sched_class;
637 if (sched_class != &xnsched_class_idle)
638 __xnsched_rt_dequeue(thread);
641 static inline void xnsched_requeue(
struct xnthread *thread)
643 struct xnsched_class *sched_class = thread->sched_class;
645 if (sched_class != &xnsched_class_idle)
646 __xnsched_rt_requeue(thread);
649 static inline void xnsched_setparam(
struct xnthread *thread,
650 const union xnsched_policy_param *p)
652 struct xnsched_class *sched_class = thread->sched_class;
654 if (sched_class != &xnsched_class_idle)
655 __xnsched_rt_setparam(thread, p);
657 __xnsched_idle_setparam(thread, p);
659 thread->wprio = thread->cprio + sched_class->weight;
662 static inline void xnsched_getparam(
struct xnthread *thread,
663 union xnsched_policy_param *p)
665 struct xnsched_class *sched_class = thread->sched_class;
667 if (sched_class != &xnsched_class_idle)
668 __xnsched_rt_getparam(thread, p);
670 __xnsched_idle_getparam(thread, p);
673 static inline void xnsched_trackprio(
struct xnthread *thread,
674 const union xnsched_policy_param *p)
676 struct xnsched_class *sched_class = thread->sched_class;
678 if (sched_class != &xnsched_class_idle)
679 __xnsched_rt_trackprio(thread, p);
681 __xnsched_idle_trackprio(thread, p);
683 thread->wprio = thread->cprio + sched_class->weight;
686 static inline void xnsched_forget(
struct xnthread *thread)
688 --thread->base_class->nthreads;
689 __xnsched_rt_forget(thread);
692 static inline void xnsched_kick(
struct xnthread *thread)
694 xnthread_set_info(thread,
XNKICKED);
695 xnsched_set_resched(thread->sched);
struct xnthread * curr
Definition: sched.h:64
Snapshot revision tag.
Definition: vfile.h:482
#define XNKICKED
Forced out of primary mode.
Definition: thread.h:69
#define XNRRB
Undergoes a round-robin scheduling.
Definition: thread.h:46
int cpu
Definition: sched.h:67
#define XNROOT
Root thread (that is, Linux/IDLE)
Definition: thread.h:49
volatile unsigned inesting
Definition: sched.h:90
Scheduling information structure.
Definition: sched.h:58
struct xnsched_rt rt
Definition: sched.h:72
struct xntimer htimer
Definition: sched.h:92
unsigned long lflags
Definition: sched.h:62
struct xntimer rrbtimer
Definition: sched.h:94
#define XNLOCK
Holds the scheduler lock (i.e.
Definition: thread.h:45
unsigned long status
Definition: sched.h:60
static void xnsched_rotate(struct xnsched *sched, struct xnsched_class *sched_class, const union xnsched_policy_param *sched_param)
Rotate a scheduler runqueue.
Definition: sched.h:474
static int xnsched_run(void)
The rescheduling procedure.
Definition: sched.h:262
cpumask_t resched
Definition: sched.h:69