Xenomai  3.0-rc3
thread.h
1 /*
2  * Copyright (C) 2001,2002,2003 Philippe Gerum <rpm@xenomai.org>.
3  *
4  * Xenomai is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published
6  * by the Free Software Foundation; either version 2 of the License,
7  * or (at your option) any later version.
8  *
9  * Xenomai is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with Xenomai; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
17  * 02111-1307, USA.
18  */
19 #ifndef _COBALT_KERNEL_THREAD_H
20 #define _COBALT_KERNEL_THREAD_H
21 
22 #include <linux/sched.h>
23 #include <linux/sched/rt.h>
24 #include <cobalt/kernel/list.h>
25 #include <cobalt/kernel/stat.h>
26 #include <cobalt/kernel/timer.h>
27 #include <cobalt/kernel/registry.h>
28 #include <cobalt/kernel/schedparam.h>
29 #include <cobalt/kernel/trace.h>
30 #include <cobalt/kernel/synch.h>
31 #include <cobalt/uapi/kernel/thread.h>
32 #include <asm/xenomai/machine.h>
33 #include <asm/xenomai/thread.h>
34 
39 #define XNTHREAD_BLOCK_BITS (XNSUSP|XNPEND|XNDELAY|XNDORMANT|XNRELAX|XNMIGRATE|XNHELD)
40 #define XNTHREAD_MODE_BITS (XNLOCK|XNRRB|XNWARN|XNTRAPLB)
41 
42 struct xnthread;
43 struct xnsched;
44 struct xnselector;
45 struct xnsched_class;
46 struct xnsched_tpslot;
47 struct xnthread_personality;
48 struct completion;
49 
50 struct xnthread_init_attr {
51  struct xnthread_personality *personality;
52  cpumask_t affinity;
53  int flags;
54  const char *name;
55 };
56 
57 struct xnthread_start_attr {
58  int mode;
59  void (*entry)(void *cookie);
60  void *cookie;
61 };
62 
63 struct xnthread_wait_context {
64  int posted;
65 };
66 
67 struct xnthread_personality {
68  const char *name;
69  unsigned int magic;
70  int xid;
71  atomic_t refcnt;
72  struct {
73  void *(*attach_process)(void);
74  void (*detach_process)(void *arg);
75  void (*map_thread)(struct xnthread *thread);
76  struct xnthread_personality *(*relax_thread)(struct xnthread *thread);
77  struct xnthread_personality *(*harden_thread)(struct xnthread *thread);
78  struct xnthread_personality *(*move_thread)(struct xnthread *thread,
79  int dest_cpu);
80  struct xnthread_personality *(*exit_thread)(struct xnthread *thread);
81  struct xnthread_personality *(*finalize_thread)(struct xnthread *thread);
82  } ops;
83  struct module *module;
84 };
85 
86 struct xnthread {
87  struct xnarchtcb tcb; /* Architecture-dependent block */
88 
89  __u32 state; /* Thread state flags */
90  __u32 info; /* Thread information flags */
91 
92  struct xnsched *sched; /* Thread scheduler */
93  struct xnsched_class *sched_class; /* Current scheduling class */
94  struct xnsched_class *base_class; /* Base scheduling class */
95 
96 #ifdef CONFIG_XENO_OPT_SCHED_TP
97  struct xnsched_tpslot *tps; /* Current partition slot for TP scheduling */
98  struct list_head tp_link; /* Link in per-sched TP thread queue */
99 #endif
100 #ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
101  struct xnsched_sporadic_data *pss; /* Sporadic scheduling data. */
102 #endif
103 #ifdef CONFIG_XENO_OPT_SCHED_QUOTA
104  struct xnsched_quota_group *quota; /* Quota scheduling group. */
105  struct list_head quota_expired;
106  struct list_head quota_next;
107 #endif
108 
109  unsigned int idtag; /* Unique ID tag */
110 
111  cpumask_t affinity; /* Processor affinity. */
112 
113  int bprio; /* Base priority (before PIP boost) */
114 
115  int cprio; /* Current priority */
116 
120  int wprio;
121 
122  int lock_count;
128  struct list_head rlink;
129 
134  struct list_head plink;
135 
137  struct list_head glink;
138 
143  struct list_head claimq;
144 
145  struct xnsynch *wchan; /* Resource the thread pends on */
146 
147  struct xnsynch *wwake; /* Wait channel the thread was resumed from */
148 
149  int res_count; /* Held resources count */
150 
151  struct xntimer rtimer; /* Resource timer */
152 
153  struct xntimer ptimer; /* Periodic timer */
154 
155  xnticks_t rrperiod; /* Allotted round-robin period (ns) */
156 
157  struct xnthread_wait_context *wcontext; /* Active wait context. */
158 
159  struct {
160  xnstat_counter_t ssw; /* Primary -> secondary mode switch count */
161  xnstat_counter_t csw; /* Context switches (includes secondary -> primary switches) */
162  xnstat_counter_t xsc; /* Xenomai syscalls */
163  xnstat_counter_t pf; /* Number of page faults */
164  xnstat_exectime_t account; /* Execution time accounting entity */
165  xnstat_exectime_t lastperiod; /* Interval marker for execution time reports */
166  } stat;
167 
168  struct xnselector *selector; /* For select. */
169 
170  xnhandle_t handle; /* Handle in registry */
171 
172  char name[XNOBJECT_NAME_LEN]; /* Symbolic name of thread */
173 
174  void (*entry)(void *cookie); /* Thread entry routine */
175  void *cookie; /* Cookie to pass to the entry routine */
176 
177  struct xnthread_user_window *u_window; /* Data visible from userland. */
178 
179  struct xnthread_personality *personality;
180 
181 #ifdef CONFIG_XENO_OPT_DEBUG
182  const char *exe_path; /* Executable path */
183  u32 proghash; /* Hash value for exe_path */
184 #endif
185 
186  struct xnsynch join_synch;
187 };
188 
189 static inline int xnthread_get_state(const struct xnthread *thread)
190 {
191  return thread->state;
192 }
193 
194 static inline int xnthread_test_state(struct xnthread *thread, int bits)
195 {
196  return thread->state & bits;
197 }
198 
199 static inline void xnthread_set_state(struct xnthread *thread, int bits)
200 {
201  thread->state |= bits;
202 }
203 
204 static inline void xnthread_clear_state(struct xnthread *thread, int bits)
205 {
206  thread->state &= ~bits;
207 }
208 
209 static inline int xnthread_test_info(struct xnthread *thread, int bits)
210 {
211  return thread->info & bits;
212 }
213 
214 static inline void xnthread_set_info(struct xnthread *thread, int bits)
215 {
216  thread->info |= bits;
217 }
218 
219 static inline void xnthread_clear_info(struct xnthread *thread, int bits)
220 {
221  thread->info &= ~bits;
222 }
223 
224 static inline struct xnarchtcb *xnthread_archtcb(struct xnthread *thread)
225 {
226  return &thread->tcb;
227 }
228 
229 static inline int xnthread_base_priority(const struct xnthread *thread)
230 {
231  return thread->bprio;
232 }
233 
234 static inline int xnthread_current_priority(const struct xnthread *thread)
235 {
236  return thread->cprio;
237 }
238 
239 static inline struct task_struct *xnthread_host_task(struct xnthread *thread)
240 {
241  return xnthread_archtcb(thread)->core.host_task;
242 }
243 
244 static inline pid_t xnthread_host_pid(struct xnthread *thread)
245 {
246  if (xnthread_test_state(thread, XNROOT))
247  return 0;
248 
249  return xnthread_host_task(thread)->pid;
250 }
251 
252 #define xnthread_for_each_claimed(__pos, __thread) \
253  list_for_each_entry(__pos, &(__thread)->claimq, link)
254 
255 #define xnthread_for_each_claimed_safe(__pos, __tmp, __thread) \
256  list_for_each_entry_safe(__pos, __tmp, &(__thread)->claimq, link)
257 
258 #define xnthread_run_handler(__t, __h, __a...) \
259  do { \
260  struct xnthread_personality *__p__ = (__t)->personality; \
261  if ((__p__)->ops.__h) \
262  (__p__)->ops.__h(__t, ##__a); \
263  } while (0)
264 
265 #define xnthread_run_handler_stack(__t, __h, __a...) \
266  do { \
267  struct xnthread_personality *__p__ = (__t)->personality; \
268  do { \
269  if ((__p__)->ops.__h == NULL) \
270  break; \
271  __p__ = (__p__)->ops.__h(__t, ##__a); \
272  } while (__p__); \
273  } while (0)
274 
275 static inline
276 struct xnthread_wait_context *xnthread_get_wait_context(struct xnthread *thread)
277 {
278  return thread->wcontext;
279 }
280 
281 static inline
282 int xnthread_register(struct xnthread *thread, const char *name)
283 {
284  return xnregistry_enter(name, thread, &thread->handle, NULL);
285 }
286 
287 static inline
288 struct xnthread *xnthread_lookup(xnhandle_t threadh)
289 {
290  struct xnthread *thread = xnregistry_lookup(threadh, NULL);
291  return thread && thread->handle == xnhandle_get_index(threadh) ? thread : NULL;
292 }
293 
294 static inline void xnthread_sync_window(struct xnthread *thread)
295 {
296  if (thread->u_window) {
297  thread->u_window->state = thread->state;
298  thread->u_window->info = thread->info;
299  }
300 }
301 
302 static inline
303 void xnthread_clear_sync_window(struct xnthread *thread, int state_bits)
304 {
305  if (thread->u_window) {
306  thread->u_window->state = thread->state & ~state_bits;
307  thread->u_window->info = thread->info;
308  }
309 }
310 
311 static inline
312 void xnthread_set_sync_window(struct xnthread *thread, int state_bits)
313 {
314  if (thread->u_window) {
315  thread->u_window->state = thread->state | state_bits;
316  thread->u_window->info = thread->info;
317  }
318 }
319 
320 static inline int normalize_priority(int prio)
321 {
322  return prio < MAX_RT_PRIO ? prio : MAX_RT_PRIO - 1;
323 }
324 
325 int __xnthread_init(struct xnthread *thread,
326  const struct xnthread_init_attr *attr,
327  struct xnsched *sched,
328  struct xnsched_class *sched_class,
329  const union xnsched_policy_param *sched_param);
330 
331 void __xnthread_test_cancel(struct xnthread *curr);
332 
333 void __xnthread_cleanup(struct xnthread *curr);
334 
335 void __xnthread_discard(struct xnthread *thread);
336 
352 static inline struct xnthread *xnthread_current(void)
353 {
354  return ipipe_current_threadinfo()->thread;
355 }
356 
368 static inline struct xnthread *xnthread_from_task(struct task_struct *p)
369 {
370  return ipipe_task_threadinfo(p)->thread;
371 }
372 
382 static inline void xnthread_test_cancel(void)
383 {
384  struct xnthread *curr = xnthread_current();
385 
386  if (curr && xnthread_test_info(curr, XNCANCELD))
387  __xnthread_test_cancel(curr);
388 }
389 
390 static inline
391 void xnthread_complete_wait(struct xnthread_wait_context *wc)
392 {
393  wc->posted = 1;
394 }
395 
396 static inline
397 int xnthread_wait_complete_p(struct xnthread_wait_context *wc)
398 {
399  return wc->posted;
400 }
401 
402 #ifdef CONFIG_XENO_ARCH_FPU
403 void xnthread_switch_fpu(struct xnsched *sched);
404 #else
405 static inline void xnthread_switch_fpu(struct xnsched *sched) { }
406 #endif /* CONFIG_XENO_ARCH_FPU */
407 
408 void xnthread_init_shadow_tcb(struct xnthread *thread);
409 
410 void xnthread_init_root_tcb(struct xnthread *thread);
411 
412 void xnthread_deregister(struct xnthread *thread);
413 
414 char *xnthread_format_status(unsigned long status, char *buf, int size);
415 
416 xnticks_t xnthread_get_timeout(struct xnthread *thread, xnticks_t ns);
417 
418 xnticks_t xnthread_get_period(struct xnthread *thread);
419 
420 void xnthread_prepare_wait(struct xnthread_wait_context *wc);
421 
422 int xnthread_init(struct xnthread *thread,
423  const struct xnthread_init_attr *attr,
424  struct xnsched_class *sched_class,
425  const union xnsched_policy_param *sched_param);
426 
427 int xnthread_start(struct xnthread *thread,
428  const struct xnthread_start_attr *attr);
429 
430 int xnthread_set_mode(struct xnthread *thread,
431  int clrmask,
432  int setmask);
433 
434 void xnthread_suspend(struct xnthread *thread,
435  int mask,
436  xnticks_t timeout,
437  xntmode_t timeout_mode,
438  struct xnsynch *wchan);
439 
440 void xnthread_resume(struct xnthread *thread,
441  int mask);
442 
443 int xnthread_unblock(struct xnthread *thread);
444 
445 int xnthread_set_periodic(struct xnthread *thread,
446  xnticks_t idate,
447  xntmode_t timeout_mode,
448  xnticks_t period);
449 
450 int xnthread_wait_period(unsigned long *overruns_r);
451 
452 int xnthread_set_slice(struct xnthread *thread,
453  xnticks_t quantum);
454 
455 void xnthread_cancel(struct xnthread *thread);
456 
457 int xnthread_join(struct xnthread *thread, bool uninterruptible);
458 
459 int xnthread_harden(void);
460 
461 void xnthread_relax(int notify, int reason);
462 
463 void __xnthread_kick(struct xnthread *thread);
464 
465 void xnthread_kick(struct xnthread *thread);
466 
467 void __xnthread_demote(struct xnthread *thread);
468 
469 void xnthread_demote(struct xnthread *thread);
470 
471 void xnthread_signal(struct xnthread *thread,
472  int sig, int arg);
473 
474 void xnthread_pin_initial(struct xnthread *thread);
475 
476 int xnthread_map(struct xnthread *thread,
477  struct completion *done);
478 
479 void xnthread_call_mayday(struct xnthread *thread, int reason);
480 
481 #ifdef CONFIG_SMP
482 int xnthread_migrate(int cpu);
483 
484 void xnthread_migrate_passive(struct xnthread *thread,
485  struct xnsched *sched);
486 #else
487 
488 static inline int xnthread_migrate(int cpu)
489 {
490  return cpu ? -EINVAL : 0;
491 }
492 
493 static inline void xnthread_migrate_passive(struct xnthread *thread,
494  struct xnsched *sched)
495 { }
496 
497 #endif
498 
499 int __xnthread_set_schedparam(struct xnthread *thread,
500  struct xnsched_class *sched_class,
501  const union xnsched_policy_param *sched_param);
502 
503 int xnthread_set_schedparam(struct xnthread *thread,
504  struct xnsched_class *sched_class,
505  const union xnsched_policy_param *sched_param);
506 
507 int xnthread_killall(int grace, int mask);
508 
509 extern struct xnthread_personality xenomai_personality;
510 
513 #endif /* !_COBALT_KERNEL_THREAD_H */
void xnthread_relax(int notify, int reason)
Switch a shadow thread back to the Linux domain.
Definition: thread.c:1974
void xnthread_cancel(struct xnthread *thread)
Cancel a thread.
Definition: thread.c:1468
#define XNCANCELD
Cancellation request is pending.
Definition: thread.h:72
static void xnthread_test_cancel(void)
Introduce a thread cancellation point.
Definition: thread.h:382
int xnregistry_enter(const char *key, void *objaddr, xnhandle_t *phandle, struct xnpnode *pnode)
Register a real-time object.
Definition: registry.c:627
void xnthread_suspend(struct xnthread *thread, int mask, xnticks_t timeout, xntmode_t timeout_mode, struct xnsynch *wchan)
Suspend a thread.
Definition: thread.c:833
int xnthread_set_slice(struct xnthread *thread, xnticks_t quantum)
Set thread time-slicing information.
Definition: thread.c:1409
#define XNROOT
Root thread (that is, Linux/IDLE)
Definition: thread.h:49
static struct xnthread * xnthread_current(void)
Retrieve the current Cobalt core TCB.
Definition: thread.h:352
int xnthread_wait_period(unsigned long *overruns_r)
Wait for the next periodic release point.
Definition: thread.c:1335
Scheduling information structure.
Definition: sched.h:58
int xnthread_map(struct xnthread *thread, struct completion *done)
Create a shadow thread context over a kernel task.
Definition: thread.c:2385
static void * xnregistry_lookup(xnhandle_t handle, unsigned long *cstamp_r)
Find a real-time object into the registry.
Definition: registry.h:175
int xnthread_init(struct xnthread *thread, const struct xnthread_init_attr *attr, struct xnsched_class *sched_class, const union xnsched_policy_param *sched_param)
Initialize a new thread.
Definition: thread.c:579
int xnthread_harden(void)
Migrate a Linux task to the Xenomai domain.
Definition: thread.c:1865
int xnthread_set_periodic(struct xnthread *thread, xnticks_t idate, xntmode_t timeout_mode, xnticks_t period)
Make a thread periodic.
Definition: thread.c:1254
int xnthread_start(struct xnthread *thread, const struct xnthread_start_attr *attr)
Start a newly created thread.
Definition: thread.c:660
Copyright © 2011 Gilles Chanteperdrix gilles.chanteperdrix@xenomai.org.
Definition: atomic.h:24
int xnthread_unblock(struct xnthread *thread)
Unblock a thread.
Definition: thread.c:1167
static struct xnthread * xnthread_from_task(struct task_struct *p)
Retrieve the Cobalt core TCB attached to a Linux task.
Definition: thread.h:368
int xnthread_join(struct xnthread *thread, bool uninterruptible)
Join with a terminated thread.
Definition: thread.c:1554
int xnthread_set_mode(struct xnthread *thread, int clrmask, int setmask)
Change thread control mode.
Definition: thread.c:737
int xnthread_set_schedparam(struct xnthread *thread, struct xnsched_class *sched_class, const union xnsched_policy_param *sched_param)
Change the base scheduling parameters of a thread.
Definition: thread.c:1764
void xnthread_resume(struct xnthread *thread, int mask)
Resume a thread.
Definition: thread.c:1049
int xnthread_migrate(int cpu)
Migrate the current thread.
Definition: thread.c:1639