Xenomai  3.0-rc3
sched.h
1 /*
2  * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
3  *
4  * Xenomai is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published
6  * by the Free Software Foundation; either version 2 of the License,
7  * or (at your option) any later version.
8  *
9  * Xenomai is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with Xenomai; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
17  * 02111-1307, USA.
18  */
19 #ifndef _COBALT_KERNEL_SCHED_H
20 #define _COBALT_KERNEL_SCHED_H
21 
22 #include <linux/percpu.h>
23 #include <cobalt/kernel/lock.h>
24 #include <cobalt/kernel/thread.h>
25 #include <cobalt/kernel/schedqueue.h>
26 #include <cobalt/kernel/sched-tp.h>
27 #include <cobalt/kernel/sched-weak.h>
28 #include <cobalt/kernel/sched-sporadic.h>
29 #include <cobalt/kernel/sched-quota.h>
30 #include <cobalt/kernel/vfile.h>
31 #include <cobalt/kernel/assert.h>
32 #include <asm/xenomai/machine.h>
33 
39 /* Sched status flags */
40 #define XNRESCHED 0x10000000 /* Needs rescheduling */
41 #define XNINSW 0x20000000 /* In context switch */
42 #define XNINTCK 0x40000000 /* In master tick handler context */
43 
44 /* Sched local flags */
45 #define XNHTICK 0x00008000 /* Host tick pending */
46 #define XNINIRQ 0x00004000 /* In IRQ handling context */
47 #define XNHDEFER 0x00002000 /* Host tick deferred */
48 #define XNINLOCK 0x00001000 /* Scheduler locked */
49 
50 struct xnsched_rt {
51  xnsched_queue_t runnable;
52 };
53 
58 struct xnsched {
60  unsigned long status;
62  unsigned long lflags;
64  struct xnthread *curr;
65 #ifdef CONFIG_SMP
66 
67  int cpu;
69  cpumask_t resched;
70 #endif
71 
72  struct xnsched_rt rt;
73 #ifdef CONFIG_XENO_OPT_SCHED_WEAK
74 
75  struct xnsched_weak weak;
76 #endif
77 #ifdef CONFIG_XENO_OPT_SCHED_TP
78 
79  struct xnsched_tp tp;
80 #endif
81 #ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
82 
83  struct xnsched_sporadic pss;
84 #endif
85 #ifdef CONFIG_XENO_OPT_SCHED_QUOTA
86 
87  struct xnsched_quota quota;
88 #endif
89 
90  volatile unsigned inesting;
92  struct xntimer htimer;
94  struct xntimer rrbtimer;
96  struct xnthread rootcb;
97 #ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
98  struct xnthread *last;
99 #endif
100 #ifdef CONFIG_XENO_ARCH_FPU
101 
102  struct xnthread *fpuholder;
103 #endif
104 #ifdef CONFIG_XENO_OPT_WATCHDOG
105 
106  struct xntimer wdtimer;
108  int wdcount;
109 #endif
110 #ifdef CONFIG_XENO_OPT_STATS
111 
112  xnticks_t last_account_switch;
114  xnstat_exectime_t *current_account;
115 #endif
116 };
117 
118 DECLARE_PER_CPU(struct xnsched, nksched);
119 
120 extern cpumask_t nkaffinity;
121 
122 extern struct list_head nkthreadq;
123 
124 extern int nknrthreads;
125 
126 #ifdef CONFIG_XENO_OPT_VFILE
127 extern struct xnvfile_rev_tag nkthreadlist_tag;
128 #endif
129 
130 union xnsched_policy_param;
131 
132 struct xnsched_class {
133  void (*sched_init)(struct xnsched *sched);
134  void (*sched_enqueue)(struct xnthread *thread);
135  void (*sched_dequeue)(struct xnthread *thread);
136  void (*sched_requeue)(struct xnthread *thread);
137  struct xnthread *(*sched_pick)(struct xnsched *sched);
138  void (*sched_tick)(struct xnsched *sched);
139  void (*sched_rotate)(struct xnsched *sched,
140  const union xnsched_policy_param *p);
141  void (*sched_migrate)(struct xnthread *thread,
142  struct xnsched *sched);
143  void (*sched_setparam)(struct xnthread *thread,
144  const union xnsched_policy_param *p);
145  void (*sched_getparam)(struct xnthread *thread,
146  union xnsched_policy_param *p);
147  void (*sched_trackprio)(struct xnthread *thread,
148  const union xnsched_policy_param *p);
149  int (*sched_declare)(struct xnthread *thread,
150  const union xnsched_policy_param *p);
151  void (*sched_forget)(struct xnthread *thread);
152  void (*sched_kick)(struct xnthread *thread);
153 #ifdef CONFIG_XENO_OPT_VFILE
154  int (*sched_init_vfile)(struct xnsched_class *schedclass,
155  struct xnvfile_directory *vfroot);
156  void (*sched_cleanup_vfile)(struct xnsched_class *schedclass);
157 #endif
158  int nthreads;
159  struct xnsched_class *next;
160  int weight;
161  int policy;
162  const char *name;
163 };
164 
165 #define XNSCHED_CLASS_WEIGHT(n) (n * XNSCHED_CLASS_WEIGHT_FACTOR)
166 
167 /* Placeholder for current thread priority */
168 #define XNSCHED_RUNPRIO 0x80000000
169 
170 #define xnsched_for_each_thread(__thread) \
171  list_for_each_entry(__thread, &nkthreadq, glink)
172 
173 #ifdef CONFIG_SMP
174 static inline int xnsched_cpu(struct xnsched *sched)
175 {
176  return sched->cpu;
177 }
178 #else /* !CONFIG_SMP */
179 static inline int xnsched_cpu(struct xnsched *sched)
180 {
181  return 0;
182 }
183 #endif /* CONFIG_SMP */
184 
185 static inline struct xnsched *xnsched_struct(int cpu)
186 {
187  return &per_cpu(nksched, cpu);
188 }
189 
190 static inline struct xnsched *xnsched_current(void)
191 {
192  /* IRQs off */
193  return __this_cpu_ptr(&nksched);
194 }
195 
196 static inline struct xnthread *xnsched_current_thread(void)
197 {
198  return xnsched_current()->curr;
199 }
200 
201 /* Test resched flag of given sched. */
202 static inline int xnsched_resched_p(struct xnsched *sched)
203 {
204  return sched->status & XNRESCHED;
205 }
206 
207 /* Set self resched flag for the current scheduler. */
208 static inline void xnsched_set_self_resched(struct xnsched *sched)
209 {
210  sched->status |= XNRESCHED;
211 }
212 
213 #define xnsched_realtime_domain xnarch_machdata.domain
214 
215 /* Set resched flag for the given scheduler. */
216 #ifdef CONFIG_SMP
217 
218 static inline void xnsched_set_resched(struct xnsched *sched)
219 {
220  struct xnsched *current_sched = xnsched_current();
221 
222  if (current_sched == sched)
223  current_sched->status |= XNRESCHED;
224  else if (!xnsched_resched_p(sched)) {
225  cpu_set(xnsched_cpu(sched), current_sched->resched);
226  sched->status |= XNRESCHED;
227  current_sched->status |= XNRESCHED;
228  }
229 }
230 
231 #define xnsched_realtime_cpus xnarch_machdata.supported_cpus
232 
233 static inline int xnsched_supported_cpu(int cpu)
234 {
235  return cpu_isset(cpu, xnsched_realtime_cpus);
236 }
237 
238 #else /* !CONFIG_SMP */
239 
240 static inline void xnsched_set_resched(struct xnsched *sched)
241 {
242  xnsched_set_self_resched(sched);
243 }
244 
245 #define xnsched_realtime_cpus CPU_MASK_ALL
246 
247 static inline int xnsched_supported_cpu(int cpu)
248 {
249  return 1;
250 }
251 
252 #endif /* !CONFIG_SMP */
253 
254 #define for_each_realtime_cpu(cpu) \
255  for_each_online_cpu(cpu) \
256  if (xnsched_supported_cpu(cpu)) \
257 
258 int __xnsched_run(struct xnsched *sched);
259 
260 void __xnsched_run_handler(void);
261 
262 static inline int xnsched_run(void)
263 {
264  struct xnsched *sched;
265  /*
266  * NOTE: Since __xnsched_run() won't run if an escalation to
267  * primary domain is needed, we won't use critical scheduler
268  * information before we actually run in primary mode;
269  * therefore we can first test the scheduler status then
270  * escalate.
271  *
272  * Running in the primary domain means that no Linux-triggered
273  * CPU migration may occur from that point either. Finally,
274  * since migration is always a self-directed operation for
275  * Xenomai threads, we can safely read the scheduler state
276  * bits without holding the nklock.
277  *
278  * Said differently, if we race here because of a CPU
279  * migration, it must have been Linux-triggered because we run
280  * in secondary mode; in which case we will escalate to the
281  * primary domain, then unwind the current call frame without
282  * running the rescheduling procedure in
283  * __xnsched_run(). Therefore, the scheduler slot
284  * (i.e. "sched") will be either valid, or unused.
285  */
286  sched = xnsched_current();
287  smp_rmb();
288  /*
289  * No immediate rescheduling is possible if an ISR context is
290  * active, the current thread holds the scheduler lock, or if
291  * we are caught in the middle of an unlocked context switch.
292  */
293  if (((sched->status|sched->lflags) &
294  (XNINIRQ|XNINSW|XNRESCHED|XNINLOCK)) != XNRESCHED)
295  return 0;
296 
297  return __xnsched_run(sched);
298 }
299 
300 void ___xnsched_lock(struct xnsched *sched);
301 
302 void ___xnsched_unlock(struct xnsched *sched);
303 
304 void ___xnsched_unlock_fully(struct xnsched *sched);
305 
306 static inline void __xnsched_lock(void)
307 {
308  struct xnsched *sched;
309 
310  barrier();
311  sched = xnsched_current();
312  ___xnsched_lock(sched);
313 }
314 
315 static inline void __xnsched_unlock(void)
316 {
317  struct xnsched *sched;
318 
319  barrier();
320  sched = xnsched_current();
321  ___xnsched_unlock(sched);
322 }
323 
324 static inline void __xnsched_unlock_fully(void)
325 {
326  struct xnsched *sched;
327 
328  barrier();
329  sched = xnsched_current();
330  ___xnsched_unlock_fully(sched);
331 }
332 
333 static inline void xnsched_lock(void)
334 {
335  struct xnsched *sched;
336  spl_t s;
337 
338  xnlock_get_irqsave(&nklock, s);
339  sched = xnsched_current();
340  ___xnsched_lock(sched);
341  xnlock_put_irqrestore(&nklock, s);
342 }
343 
344 static inline void xnsched_unlock(void)
345 {
346  struct xnsched *sched;
347  spl_t s;
348 
349  xnlock_get_irqsave(&nklock, s);
350  sched = xnsched_current();
351  ___xnsched_unlock(sched);
352  xnlock_put_irqrestore(&nklock, s);
353 }
354 
355 static inline int xnsched_interrupt_p(void)
356 {
357  return xnsched_current()->lflags & XNINIRQ;
358 }
359 
360 static inline int xnsched_locked_p(void)
361 {
362  return xnthread_test_state(xnsched_current_thread(), XNLOCK);
363 }
364 
365 static inline int xnsched_root_p(void)
366 {
367  return xnthread_test_state(xnsched_current_thread(), XNROOT);
368 }
369 
370 static inline int xnsched_unblockable_p(void)
371 {
372  return xnsched_interrupt_p() || xnsched_root_p();
373 }
374 
375 static inline int xnsched_primary_p(void)
376 {
377  return !xnsched_unblockable_p();
378 }
379 
380 #ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
381 
382 struct xnsched *xnsched_finish_unlocked_switch(struct xnsched *sched);
383 
384 #define xnsched_resched_after_unlocked_switch() xnsched_run()
385 
386 static inline
387 int xnsched_maybe_resched_after_unlocked_switch(struct xnsched *sched)
388 {
389  return sched->status & XNRESCHED;
390 }
391 
392 #else /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
393 
394 static inline struct xnsched *
395 xnsched_finish_unlocked_switch(struct xnsched *sched)
396 {
397  XENO_BUGON(COBALT, !hard_irqs_disabled());
398  return xnsched_current();
399 }
400 
401 static inline void xnsched_resched_after_unlocked_switch(void) { }
402 
403 static inline int
404 xnsched_maybe_resched_after_unlocked_switch(struct xnsched *sched)
405 {
406  return 0;
407 }
408 
409 #endif /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
410 
411 #ifdef CONFIG_XENO_OPT_WATCHDOG
412 static inline void xnsched_reset_watchdog(struct xnsched *sched)
413 {
414  sched->wdcount = 0;
415 }
416 #else /* !CONFIG_XENO_OPT_WATCHDOG */
417 static inline void xnsched_reset_watchdog(struct xnsched *sched)
418 {
419 }
420 #endif /* CONFIG_XENO_OPT_WATCHDOG */
421 
422 #include <cobalt/kernel/sched-idle.h>
423 #include <cobalt/kernel/sched-rt.h>
424 
425 int xnsched_init_proc(void);
426 
427 void xnsched_cleanup_proc(void);
428 
429 void xnsched_register_classes(void);
430 
431 void xnsched_init(struct xnsched *sched, int cpu);
432 
433 void xnsched_destroy(struct xnsched *sched);
434 
435 struct xnthread *xnsched_pick_next(struct xnsched *sched);
436 
437 void xnsched_putback(struct xnthread *thread);
438 
439 int xnsched_set_policy(struct xnthread *thread,
440  struct xnsched_class *sched_class,
441  const union xnsched_policy_param *p);
442 
443 void xnsched_track_policy(struct xnthread *thread,
444  struct xnthread *target);
445 
446 void xnsched_migrate(struct xnthread *thread,
447  struct xnsched *sched);
448 
449 void xnsched_migrate_passive(struct xnthread *thread,
450  struct xnsched *sched);
451 
474 static inline void xnsched_rotate(struct xnsched *sched,
475  struct xnsched_class *sched_class,
476  const union xnsched_policy_param *sched_param)
477 {
478  sched_class->sched_rotate(sched, sched_param);
479 }
480 
481 static inline int xnsched_init_thread(struct xnthread *thread)
482 {
483  int ret = 0;
484 
485  xnsched_idle_init_thread(thread);
486  xnsched_rt_init_thread(thread);
487 
488 #ifdef CONFIG_XENO_OPT_SCHED_TP
489  ret = xnsched_tp_init_thread(thread);
490  if (ret)
491  return ret;
492 #endif /* CONFIG_XENO_OPT_SCHED_TP */
493 #ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
494  ret = xnsched_sporadic_init_thread(thread);
495  if (ret)
496  return ret;
497 #endif /* CONFIG_XENO_OPT_SCHED_SPORADIC */
498 #ifdef CONFIG_XENO_OPT_SCHED_QUOTA
499  ret = xnsched_quota_init_thread(thread);
500  if (ret)
501  return ret;
502 #endif /* CONFIG_XENO_OPT_SCHED_QUOTA */
503 
504  return ret;
505 }
506 
507 static inline int xnsched_root_priority(struct xnsched *sched)
508 {
509  return sched->rootcb.cprio;
510 }
511 
512 static inline struct xnsched_class *xnsched_root_class(struct xnsched *sched)
513 {
514  return sched->rootcb.sched_class;
515 }
516 
517 static inline void xnsched_tick(struct xnsched *sched)
518 {
519  struct xnthread *curr = sched->curr;
520  struct xnsched_class *sched_class = curr->sched_class;
521  /*
522  * A thread that undergoes round-robin scheduling only
523  * consumes its time slice when it runs within its own
524  * scheduling class, which excludes temporary PIP boosts, and
525  * does not hold the scheduler lock.
526  */
527  if (sched_class == curr->base_class &&
528  sched_class->sched_tick &&
529  xnthread_test_state(curr, XNTHREAD_BLOCK_BITS|XNLOCK|XNRRB) == XNRRB)
530  sched_class->sched_tick(sched);
531 }
532 
533 static inline int xnsched_declare(struct xnsched_class *sched_class,
534  struct xnthread *thread,
535  const union xnsched_policy_param *p)
536 {
537  int ret;
538 
539  if (sched_class->sched_declare) {
540  ret = sched_class->sched_declare(thread, p);
541  if (ret)
542  return ret;
543  }
544  if (sched_class != thread->base_class)
545  sched_class->nthreads++;
546 
547  return 0;
548 }
549 
550 #ifdef CONFIG_XENO_OPT_SCHED_CLASSES
551 
552 static inline void xnsched_enqueue(struct xnthread *thread)
553 {
554  struct xnsched_class *sched_class = thread->sched_class;
555 
556  if (sched_class != &xnsched_class_idle)
557  sched_class->sched_enqueue(thread);
558 }
559 
560 static inline void xnsched_dequeue(struct xnthread *thread)
561 {
562  struct xnsched_class *sched_class = thread->sched_class;
563 
564  if (sched_class != &xnsched_class_idle)
565  sched_class->sched_dequeue(thread);
566 }
567 
568 static inline void xnsched_requeue(struct xnthread *thread)
569 {
570  struct xnsched_class *sched_class = thread->sched_class;
571 
572  if (sched_class != &xnsched_class_idle)
573  sched_class->sched_requeue(thread);
574 }
575 
576 static inline void xnsched_setparam(struct xnthread *thread,
577  const union xnsched_policy_param *p)
578 {
579  thread->sched_class->sched_setparam(thread, p);
580  thread->wprio = thread->cprio + thread->sched_class->weight;
581 }
582 
583 static inline void xnsched_getparam(struct xnthread *thread,
584  union xnsched_policy_param *p)
585 {
586  thread->sched_class->sched_getparam(thread, p);
587 }
588 
589 static inline void xnsched_trackprio(struct xnthread *thread,
590  const union xnsched_policy_param *p)
591 {
592  thread->sched_class->sched_trackprio(thread, p);
593  thread->wprio = thread->cprio + thread->sched_class->weight;
594 }
595 
596 static inline void xnsched_forget(struct xnthread *thread)
597 {
598  struct xnsched_class *sched_class = thread->base_class;
599 
600  --sched_class->nthreads;
601 
602  if (sched_class->sched_forget)
603  sched_class->sched_forget(thread);
604 }
605 
606 static inline void xnsched_kick(struct xnthread *thread)
607 {
608  struct xnsched_class *sched_class = thread->base_class;
609 
610  xnthread_set_info(thread, XNKICKED);
611 
612  if (sched_class->sched_kick)
613  sched_class->sched_kick(thread);
614 
615  xnsched_set_resched(thread->sched);
616 }
617 
618 #else /* !CONFIG_XENO_OPT_SCHED_CLASSES */
619 
620 /*
621  * If only the RT and IDLE scheduling classes are compiled in, we can
622  * fully inline common helpers for dealing with those.
623  */
624 
625 static inline void xnsched_enqueue(struct xnthread *thread)
626 {
627  struct xnsched_class *sched_class = thread->sched_class;
628 
629  if (sched_class != &xnsched_class_idle)
630  __xnsched_rt_enqueue(thread);
631 }
632 
633 static inline void xnsched_dequeue(struct xnthread *thread)
634 {
635  struct xnsched_class *sched_class = thread->sched_class;
636 
637  if (sched_class != &xnsched_class_idle)
638  __xnsched_rt_dequeue(thread);
639 }
640 
641 static inline void xnsched_requeue(struct xnthread *thread)
642 {
643  struct xnsched_class *sched_class = thread->sched_class;
644 
645  if (sched_class != &xnsched_class_idle)
646  __xnsched_rt_requeue(thread);
647 }
648 
649 static inline void xnsched_setparam(struct xnthread *thread,
650  const union xnsched_policy_param *p)
651 {
652  struct xnsched_class *sched_class = thread->sched_class;
653 
654  if (sched_class != &xnsched_class_idle)
655  __xnsched_rt_setparam(thread, p);
656  else
657  __xnsched_idle_setparam(thread, p);
658 
659  thread->wprio = thread->cprio + sched_class->weight;
660 }
661 
662 static inline void xnsched_getparam(struct xnthread *thread,
663  union xnsched_policy_param *p)
664 {
665  struct xnsched_class *sched_class = thread->sched_class;
666 
667  if (sched_class != &xnsched_class_idle)
668  __xnsched_rt_getparam(thread, p);
669  else
670  __xnsched_idle_getparam(thread, p);
671 }
672 
673 static inline void xnsched_trackprio(struct xnthread *thread,
674  const union xnsched_policy_param *p)
675 {
676  struct xnsched_class *sched_class = thread->sched_class;
677 
678  if (sched_class != &xnsched_class_idle)
679  __xnsched_rt_trackprio(thread, p);
680  else
681  __xnsched_idle_trackprio(thread, p);
682 
683  thread->wprio = thread->cprio + sched_class->weight;
684 }
685 
686 static inline void xnsched_forget(struct xnthread *thread)
687 {
688  --thread->base_class->nthreads;
689  __xnsched_rt_forget(thread);
690 }
691 
692 static inline void xnsched_kick(struct xnthread *thread)
693 {
694  xnthread_set_info(thread, XNKICKED);
695  xnsched_set_resched(thread->sched);
696 }
697 
698 #endif /* !CONFIG_XENO_OPT_SCHED_CLASSES */
699 
702 #endif /* !_COBALT_KERNEL_SCHED_H */
struct xnthread * curr
Definition: sched.h:64
Snapshot revision tag.
Definition: vfile.h:482
#define XNKICKED
Forced out of primary mode.
Definition: thread.h:69
#define XNRRB
Undergoes a round-robin scheduling.
Definition: thread.h:46
int cpu
Definition: sched.h:67
#define XNROOT
Root thread (that is, Linux/IDLE)
Definition: thread.h:49
volatile unsigned inesting
Definition: sched.h:90
Scheduling information structure.
Definition: sched.h:58
struct xnsched_rt rt
Definition: sched.h:72
struct xntimer htimer
Definition: sched.h:92
unsigned long lflags
Definition: sched.h:62
struct xntimer rrbtimer
Definition: sched.h:94
#define XNLOCK
Holds the scheduler lock (i.e.
Definition: thread.h:45
unsigned long status
Definition: sched.h:60
static void xnsched_rotate(struct xnsched *sched, struct xnsched_class *sched_class, const union xnsched_policy_param *sched_param)
Rotate a scheduler runqueue.
Definition: sched.h:474
static int xnsched_run(void)
The rescheduling procedure.
Definition: sched.h:262
cpumask_t resched
Definition: sched.h:69