Xenomai  3.0-rc3
cobalt-core.h
1 #undef TRACE_SYSTEM
2 #define TRACE_SYSTEM cobalt-core
3 
4 #if !defined(_TRACE_COBALT_CORE_H) || defined(TRACE_HEADER_MULTI_READ)
5 #define _TRACE_COBALT_CORE_H
6 
7 #include <linux/tracepoint.h>
8 
9 DECLARE_EVENT_CLASS(thread_event,
10  TP_PROTO(struct xnthread *thread),
11  TP_ARGS(thread),
12 
13  TP_STRUCT__entry(
14  __field(struct xnthread *, thread)
15  __string(name, thread->name)
16  __field(pid_t, pid)
17  __field(unsigned long, state)
18  __field(unsigned long, info)
19  ),
20 
21  TP_fast_assign(
22  __entry->thread = thread;
23  __assign_str(name, thread->name);
24  __entry->state = thread->state;
25  __entry->info = thread->info;
26  __entry->pid = xnthread_host_pid(thread);
27  ),
28 
29  TP_printk("thread=%p(%s) pid=%d state=0x%lx info=0x%lx",
30  __entry->thread, __get_str(name), __entry->pid,
31  __entry->state, __entry->info)
32 );
33 
34 DECLARE_EVENT_CLASS(synch_wait_event,
35  TP_PROTO(struct xnsynch *synch, struct xnthread *thread),
36  TP_ARGS(synch, thread),
37 
38  TP_STRUCT__entry(
39  __field(struct xnthread *, thread)
40  __string(name, thread->name)
41  __field(struct xnsynch *, synch)
42  ),
43 
44  TP_fast_assign(
45  __entry->thread = thread;
46  __assign_str(name, thread->name);
47  __entry->synch = synch;
48  ),
49 
50  TP_printk("synch=%p thread=%p(%s)",
51  __entry->synch, __entry->thread, __get_str(name))
52 );
53 
54 DECLARE_EVENT_CLASS(synch_post_event,
55  TP_PROTO(struct xnsynch *synch),
56  TP_ARGS(synch),
57 
58  TP_STRUCT__entry(
59  __field(struct xnsynch *, synch)
60  ),
61 
62  TP_fast_assign(
63  __entry->synch = synch;
64  ),
65 
66  TP_printk("synch=%p", __entry->synch)
67 );
68 
69 DECLARE_EVENT_CLASS(irq_event,
70  TP_PROTO(unsigned int irq),
71  TP_ARGS(irq),
72 
73  TP_STRUCT__entry(
74  __field(unsigned int, irq)
75  ),
76 
77  TP_fast_assign(
78  __entry->irq = irq;
79  ),
80 
81  TP_printk("irq=%u", __entry->irq)
82 );
83 
84 DECLARE_EVENT_CLASS(clock_event,
85  TP_PROTO(unsigned int irq),
86  TP_ARGS(irq),
87 
88  TP_STRUCT__entry(
89  __field(unsigned int, irq)
90  ),
91 
92  TP_fast_assign(
93  __entry->irq = irq;
94  ),
95 
96  TP_printk("clock_irq=%u", __entry->irq)
97 );
98 
99 DECLARE_EVENT_CLASS(thread_migrate,
100  TP_PROTO(struct xnthread *thread, unsigned int cpu),
101  TP_ARGS(thread, cpu),
102 
103  TP_STRUCT__entry(
104  __field(struct xnthread *, thread)
105  __string(name, thread->name)
106  __field(unsigned int, cpu)
107  ),
108 
109  TP_fast_assign(
110  __entry->thread = thread;
111  __assign_str(name, thread->name);
112  __entry->cpu = cpu;
113  ),
114 
115  TP_printk("thread=%p(%s) cpu=%u",
116  __entry->thread, __get_str(name), __entry->cpu)
117 );
118 
119 DECLARE_EVENT_CLASS(timer_event,
120  TP_PROTO(struct xntimer *timer),
121  TP_ARGS(timer),
122 
123  TP_STRUCT__entry(
124  __field(struct xntimer *, timer)
125  ),
126 
127  TP_fast_assign(
128  __entry->timer = timer;
129  ),
130 
131  TP_printk("timer=%p", __entry->timer)
132 );
133 
134 TRACE_EVENT(cobalt_schedule,
135  TP_PROTO(struct xnsched *sched),
136  TP_ARGS(sched),
137 
138  TP_STRUCT__entry(
139  __field(unsigned long, status)
140  ),
141 
142  TP_fast_assign(
143  __entry->status = sched->status;
144  ),
145 
146  TP_printk("status=0x%lx", __entry->status)
147 );
148 
149 TRACE_EVENT(cobalt_schedule_remote,
150  TP_PROTO(struct xnsched *sched),
151  TP_ARGS(sched),
152 
153  TP_STRUCT__entry(
154  __field(unsigned long, status)
155  ),
156 
157  TP_fast_assign(
158  __entry->status = sched->status;
159  ),
160 
161  TP_printk("status=0x%lx", __entry->status)
162 );
163 
164 TRACE_EVENT(cobalt_switch_context,
165  TP_PROTO(struct xnthread *prev, struct xnthread *next),
166  TP_ARGS(prev, next),
167 
168  TP_STRUCT__entry(
169  __field(struct xnthread *, prev)
170  __field(struct xnthread *, next)
171  __string(prev_name, prev->name)
172  __string(next_name, next->name)
173  ),
174 
175  TP_fast_assign(
176  __entry->prev = prev;
177  __entry->next = next;
178  __assign_str(prev_name, prev->name);
179  __assign_str(next_name, next->name);
180  ),
181 
182  TP_printk("prev=%p(%s) next=%p(%s)",
183  __entry->prev, __get_str(prev_name),
184  __entry->next, __get_str(next_name))
185 );
186 
187 TRACE_EVENT(cobalt_thread_init,
188  TP_PROTO(struct xnthread *thread,
189  const struct xnthread_init_attr *attr,
190  struct xnsched_class *sched_class),
191  TP_ARGS(thread, attr, sched_class),
192 
193  TP_STRUCT__entry(
194  __field(struct xnthread *, thread)
195  __string(thread_name, thread->name)
196  __string(class_name, sched_class->name)
197  __field(unsigned long, flags)
198  __field(int, cprio)
199  ),
200 
201  TP_fast_assign(
202  __entry->thread = thread;
203  __assign_str(thread_name, thread->name);
204  __entry->flags = attr->flags;
205  __assign_str(class_name, sched_class->name);
206  __entry->cprio = thread->cprio;
207  ),
208 
209  TP_printk("thread=%p(%s) flags=0x%lx class=%s prio=%d",
210  __entry->thread, __get_str(thread_name), __entry->flags,
211  __get_str(class_name), __entry->cprio)
212 );
213 
214 TRACE_EVENT(cobalt_thread_suspend,
215  TP_PROTO(struct xnthread *thread, unsigned long mask, xnticks_t timeout,
216  xntmode_t timeout_mode, struct xnsynch *wchan),
217  TP_ARGS(thread, mask, timeout, timeout_mode, wchan),
218 
219  TP_STRUCT__entry(
220  __field(struct xnthread *, thread)
221  __field(unsigned long, mask)
222  __field(xnticks_t, timeout)
223  __field(xntmode_t, timeout_mode)
224  __field(struct xnsynch *, wchan)
225  ),
226 
227  TP_fast_assign(
228  __entry->thread = thread;
229  __entry->mask = mask;
230  __entry->timeout = timeout;
231  __entry->timeout_mode = timeout_mode;
232  __entry->wchan = wchan;
233  ),
234 
235  TP_printk("thread=%p mask=%lu timeout=%Lu timeout_mode=%d wchan=%p",
236  __entry->thread, __entry->mask,
237  __entry->timeout, __entry->timeout_mode, __entry->wchan)
238 );
239 
240 TRACE_EVENT(cobalt_thread_resume,
241  TP_PROTO(struct xnthread *thread, unsigned long mask),
242  TP_ARGS(thread, mask),
243 
244  TP_STRUCT__entry(
245  __field(struct xnthread *, thread)
246  __field(unsigned long, mask)
247  ),
248 
249  TP_fast_assign(
250  __entry->thread = thread;
251  __entry->mask = mask;
252  ),
253 
254  TP_printk("thread=%p mask=0x%lx",
255  __entry->thread, __entry->mask)
256 );
257 
258 TRACE_EVENT(cobalt_thread_fault,
259  TP_PROTO(struct xnthread *thread, struct ipipe_trap_data *td),
260  TP_ARGS(thread, td),
261 
262  TP_STRUCT__entry(
263  __field(struct xnthread *, thread)
264  __string(name, thread->name)
265  __field(void *, ip)
266  __field(unsigned int, type)
267  ),
268 
269  TP_fast_assign(
270  __entry->thread = thread;
271  __assign_str(name, thread->name);
272  __entry->ip = (void *)xnarch_fault_pc(td);
273  __entry->type = xnarch_fault_trap(td);
274  ),
275 
276  TP_printk("thread=%p(%s) ip=%p type=%x",
277  __entry->thread, __get_str(name), __entry->ip,
278  __entry->type)
279 );
280 
281 DEFINE_EVENT(thread_event, cobalt_thread_start,
282  TP_PROTO(struct xnthread *thread),
283  TP_ARGS(thread)
284 );
285 
286 DEFINE_EVENT(thread_event, cobalt_thread_cancel,
287  TP_PROTO(struct xnthread *thread),
288  TP_ARGS(thread)
289 );
290 
291 DEFINE_EVENT(thread_event, cobalt_thread_join,
292  TP_PROTO(struct xnthread *thread),
293  TP_ARGS(thread)
294 );
295 
296 DEFINE_EVENT(thread_event, cobalt_thread_unblock,
297  TP_PROTO(struct xnthread *thread),
298  TP_ARGS(thread)
299 );
300 
301 DEFINE_EVENT(thread_event, cobalt_thread_wait_period,
302  TP_PROTO(struct xnthread *thread),
303  TP_ARGS(thread)
304 );
305 
306 DEFINE_EVENT(thread_event, cobalt_thread_missed_period,
307  TP_PROTO(struct xnthread *thread),
308  TP_ARGS(thread)
309 );
310 
311 DEFINE_EVENT(thread_event, cobalt_thread_set_mode,
312  TP_PROTO(struct xnthread *thread),
313  TP_ARGS(thread)
314 );
315 
316 DEFINE_EVENT(thread_migrate, cobalt_thread_migrate,
317  TP_PROTO(struct xnthread *thread, unsigned int cpu),
318  TP_ARGS(thread, cpu)
319 );
320 
321 DEFINE_EVENT(thread_migrate, cobalt_thread_migrate_passive,
322  TP_PROTO(struct xnthread *thread, unsigned int cpu),
323  TP_ARGS(thread, cpu)
324 );
325 
326 DEFINE_EVENT(thread_event, cobalt_shadow_gohard,
327  TP_PROTO(struct xnthread *thread),
328  TP_ARGS(thread)
329 );
330 
331 DEFINE_EVENT(thread_event, cobalt_watchdog_signal,
332  TP_PROTO(struct xnthread *thread),
333  TP_ARGS(thread)
334 );
335 
336 DEFINE_EVENT(thread_event, cobalt_shadow_hardened,
337  TP_PROTO(struct xnthread *thread),
338  TP_ARGS(thread)
339 );
340 
341 DEFINE_EVENT(thread_event, cobalt_shadow_gorelax,
342  TP_PROTO(struct xnthread *thread),
343  TP_ARGS(thread)
344 );
345 
346 DEFINE_EVENT(thread_event, cobalt_shadow_relaxed,
347  TP_PROTO(struct xnthread *thread),
348  TP_ARGS(thread)
349 );
350 
351 DEFINE_EVENT(thread_event, cobalt_shadow_entry,
352  TP_PROTO(struct xnthread *thread),
353  TP_ARGS(thread)
354 );
355 
356 TRACE_EVENT(cobalt_shadow_map,
357  TP_PROTO(struct xnthread *thread),
358  TP_ARGS(thread),
359 
360  TP_STRUCT__entry(
361  __field(struct xnthread *, thread)
362  __string(name, thread->name)
363  __field(int, prio)
364  ),
365 
366  TP_fast_assign(
367  __entry->thread = thread;
368  __assign_str(name, thread->name);
369  __entry->prio = xnthread_base_priority(thread);
370  ),
371 
372  TP_printk("thread=%p(%s) prio=%d",
373  __entry->thread, __get_str(name), __entry->prio)
374 );
375 
376 DEFINE_EVENT(thread_event, cobalt_shadow_unmap,
377  TP_PROTO(struct xnthread *thread),
378  TP_ARGS(thread)
379 );
380 
381 TRACE_EVENT(cobalt_lostage_request,
382  TP_PROTO(const char *type, struct task_struct *task),
383  TP_ARGS(type, task),
384 
385  TP_STRUCT__entry(
386  __field(pid_t, pid)
387  __array(char, comm, TASK_COMM_LEN)
388  __field(const char *, type)
389  ),
390 
391  TP_fast_assign(
392  __entry->type = type;
393  __entry->pid = task->pid;
394  memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
395  ),
396 
397  TP_printk("request=%s pid=%d comm=%s",
398  __entry->type, __entry->pid, __entry->comm)
399 );
400 
401 TRACE_EVENT(cobalt_lostage_wakeup,
402  TP_PROTO(struct task_struct *task),
403  TP_ARGS(task),
404 
405  TP_STRUCT__entry(
406  __field(pid_t, pid)
407  __array(char, comm, TASK_COMM_LEN)
408  ),
409 
410  TP_fast_assign(
411  __entry->pid = task->pid;
412  memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
413  ),
414 
415  TP_printk("pid=%d comm=%s",
416  __entry->pid, __entry->comm)
417 );
418 
419 TRACE_EVENT(cobalt_lostage_signal,
420  TP_PROTO(struct task_struct *task, int sig),
421  TP_ARGS(task, sig),
422 
423  TP_STRUCT__entry(
424  __field(pid_t, pid)
425  __array(char, comm, TASK_COMM_LEN)
426  __field(int, sig)
427  ),
428 
429  TP_fast_assign(
430  __entry->pid = task->pid;
431  __entry->sig = sig;
432  memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
433  ),
434 
435  TP_printk("pid=%d comm=%s sig=%d",
436  __entry->pid, __entry->comm, __entry->sig)
437 );
438 
439 DEFINE_EVENT(irq_event, cobalt_irq_entry,
440  TP_PROTO(unsigned int irq),
441  TP_ARGS(irq)
442 );
443 
444 DEFINE_EVENT(irq_event, cobalt_irq_exit,
445  TP_PROTO(unsigned int irq),
446  TP_ARGS(irq)
447 );
448 
449 DEFINE_EVENT(irq_event, cobalt_irq_attach,
450  TP_PROTO(unsigned int irq),
451  TP_ARGS(irq)
452 );
453 
454 DEFINE_EVENT(irq_event, cobalt_irq_detach,
455  TP_PROTO(unsigned int irq),
456  TP_ARGS(irq)
457 );
458 
459 DEFINE_EVENT(irq_event, cobalt_irq_enable,
460  TP_PROTO(unsigned int irq),
461  TP_ARGS(irq)
462 );
463 
464 DEFINE_EVENT(irq_event, cobalt_irq_disable,
465  TP_PROTO(unsigned int irq),
466  TP_ARGS(irq)
467 );
468 
469 DEFINE_EVENT(clock_event, cobalt_clock_entry,
470  TP_PROTO(unsigned int irq),
471  TP_ARGS(irq)
472 );
473 
474 DEFINE_EVENT(clock_event, cobalt_clock_exit,
475  TP_PROTO(unsigned int irq),
476  TP_ARGS(irq)
477 );
478 
479 DEFINE_EVENT(timer_event, cobalt_timer_stop,
480  TP_PROTO(struct xntimer *timer),
481  TP_ARGS(timer)
482 );
483 
484 DEFINE_EVENT(timer_event, cobalt_timer_expire,
485  TP_PROTO(struct xntimer *timer),
486  TP_ARGS(timer)
487 );
488 
489 #define cobalt_print_timer_mode(mode) \
490  __print_symbolic(mode, \
491  { XN_RELATIVE, "rel" }, \
492  { XN_ABSOLUTE, "abs" }, \
493  { XN_REALTIME, "rt" })
494 
495 TRACE_EVENT(cobalt_timer_start,
496  TP_PROTO(struct xntimer *timer, xnticks_t value, xnticks_t interval,
497  xntmode_t mode),
498  TP_ARGS(timer, value, interval, mode),
499 
500  TP_STRUCT__entry(
501  __field(struct xntimer *, timer)
502 #ifdef CONFIG_XENO_OPT_STATS
503  __string(name, timer->name)
504 #endif
505  __field(xnticks_t, value)
506  __field(xnticks_t, interval)
507  __field(xntmode_t, mode)
508  ),
509 
510  TP_fast_assign(
511  __entry->timer = timer;
512 #ifdef CONFIG_XENO_OPT_STATS
513  __assign_str(name, timer->name);
514 #endif
515  __entry->value = value;
516  __entry->interval = interval;
517  __entry->mode = mode;
518  ),
519 
520  TP_printk("timer=%p(%s) value=%Lu interval=%Lu mode=%s",
521  __entry->timer,
522 #ifdef CONFIG_XENO_OPT_STATS
523  __get_str(name),
524 #else
525  "(anon)",
526 #endif
527  __entry->value, __entry->interval,
528  cobalt_print_timer_mode(__entry->mode))
529 );
530 
531 #ifdef CONFIG_SMP
532 
533 TRACE_EVENT(cobalt_timer_migrate,
534  TP_PROTO(struct xntimer *timer, unsigned int cpu),
535  TP_ARGS(timer, cpu),
536 
537  TP_STRUCT__entry(
538  __field(struct xntimer *, timer)
539  __field(unsigned int, cpu)
540  ),
541 
542  TP_fast_assign(
543  __entry->timer = timer;
544  __entry->cpu = cpu;
545  ),
546 
547  TP_printk("timer=%p cpu=%u",
548  __entry->timer, __entry->cpu)
549 );
550 
551 #endif /* CONFIG_SMP */
552 
553 DEFINE_EVENT(synch_wait_event, cobalt_synch_sleepon,
554  TP_PROTO(struct xnsynch *synch, struct xnthread *thread),
555  TP_ARGS(synch, thread)
556 );
557 
558 DEFINE_EVENT(synch_wait_event, cobalt_synch_try_acquire,
559  TP_PROTO(struct xnsynch *synch, struct xnthread *thread),
560  TP_ARGS(synch, thread)
561 );
562 
563 DEFINE_EVENT(synch_wait_event, cobalt_synch_acquire,
564  TP_PROTO(struct xnsynch *synch, struct xnthread *thread),
565  TP_ARGS(synch, thread)
566 );
567 
568 DEFINE_EVENT(synch_post_event, cobalt_synch_release,
569  TP_PROTO(struct xnsynch *synch),
570  TP_ARGS(synch)
571 );
572 
573 DEFINE_EVENT(synch_post_event, cobalt_synch_wakeup,
574  TP_PROTO(struct xnsynch *synch),
575  TP_ARGS(synch)
576 );
577 
578 DEFINE_EVENT(synch_post_event, cobalt_synch_wakeup_many,
579  TP_PROTO(struct xnsynch *synch),
580  TP_ARGS(synch)
581 );
582 
583 DEFINE_EVENT(synch_post_event, cobalt_synch_flush,
584  TP_PROTO(struct xnsynch *synch),
585  TP_ARGS(synch)
586 );
587 
588 DEFINE_EVENT(synch_post_event, cobalt_synch_forget,
589  TP_PROTO(struct xnsynch *synch),
590  TP_ARGS(synch)
591 );
592 
593 #endif /* _TRACE_COBALT_CORE_H */
594 
595 /* This part must be outside protection */
596 #include <trace/define_trace.h>
Scheduling information structure.
Definition: sched.h:58
unsigned long status
Definition: sched.h:60