2 #define TRACE_SYSTEM cobalt-core
4 #if !defined(_TRACE_COBALT_CORE_H) || defined(TRACE_HEADER_MULTI_READ)
5 #define _TRACE_COBALT_CORE_H
7 #include <linux/tracepoint.h>
9 DECLARE_EVENT_CLASS(thread_event,
10 TP_PROTO(
struct xnthread *thread),
14 __field(
struct xnthread *, thread)
15 __string(name, thread->name)
17 __field(
unsigned long, state)
18 __field(
unsigned long, info)
22 __entry->thread = thread;
23 __assign_str(name, thread->name);
24 __entry->state = thread->state;
25 __entry->info = thread->info;
26 __entry->pid = xnthread_host_pid(thread);
29 TP_printk(
"thread=%p(%s) pid=%d state=0x%lx info=0x%lx",
30 __entry->thread, __get_str(name), __entry->pid,
31 __entry->state, __entry->info)
34 DECLARE_EVENT_CLASS(synch_wait_event,
35 TP_PROTO(
struct xnsynch *synch,
struct xnthread *thread),
36 TP_ARGS(synch, thread),
39 __field(
struct xnthread *, thread)
40 __string(name, thread->name)
41 __field(
struct xnsynch *, synch)
45 __entry->thread = thread;
46 __assign_str(name, thread->name);
47 __entry->synch = synch;
50 TP_printk(
"synch=%p thread=%p(%s)",
51 __entry->synch, __entry->thread, __get_str(name))
54 DECLARE_EVENT_CLASS(synch_post_event,
55 TP_PROTO(
struct xnsynch *synch),
59 __field(
struct xnsynch *, synch)
63 __entry->synch = synch;
66 TP_printk(
"synch=%p", __entry->synch)
69 DECLARE_EVENT_CLASS(irq_event,
70 TP_PROTO(
unsigned int irq),
74 __field(
unsigned int, irq)
81 TP_printk(
"irq=%u", __entry->irq)
84 DECLARE_EVENT_CLASS(clock_event,
85 TP_PROTO(
unsigned int irq),
89 __field(
unsigned int, irq)
96 TP_printk(
"clock_irq=%u", __entry->irq)
99 DECLARE_EVENT_CLASS(thread_migrate,
100 TP_PROTO(
struct xnthread *thread,
unsigned int cpu),
101 TP_ARGS(thread, cpu),
104 __field(
struct xnthread *, thread)
105 __string(name, thread->name)
106 __field(
unsigned int, cpu)
110 __entry->thread = thread;
111 __assign_str(name, thread->name);
115 TP_printk(
"thread=%p(%s) cpu=%u",
116 __entry->thread, __get_str(name), __entry->cpu)
119 DECLARE_EVENT_CLASS(timer_event,
120 TP_PROTO(
struct xntimer *timer),
124 __field(
struct xntimer *, timer)
128 __entry->timer = timer;
131 TP_printk(
"timer=%p", __entry->timer)
134 TRACE_EVENT(cobalt_schedule,
135 TP_PROTO(
struct xnsched *sched),
139 __field(
unsigned long, status)
143 __entry->status = sched->
status;
146 TP_printk(
"status=0x%lx", __entry->status)
149 TRACE_EVENT(cobalt_schedule_remote,
150 TP_PROTO(
struct xnsched *sched),
154 __field(
unsigned long, status)
158 __entry->status = sched->
status;
161 TP_printk(
"status=0x%lx", __entry->status)
164 TRACE_EVENT(cobalt_switch_context,
165 TP_PROTO(
struct xnthread *prev,
struct xnthread *next),
169 __field(
struct xnthread *, prev)
170 __field(
struct xnthread *, next)
171 __string(prev_name, prev->name)
172 __string(next_name, next->name)
176 __entry->prev = prev;
177 __entry->next = next;
178 __assign_str(prev_name, prev->name);
179 __assign_str(next_name, next->name);
182 TP_printk(
"prev=%p(%s) next=%p(%s)",
183 __entry->prev, __get_str(prev_name),
184 __entry->next, __get_str(next_name))
187 TRACE_EVENT(cobalt_thread_init,
188 TP_PROTO(
struct xnthread *thread,
189 const struct xnthread_init_attr *attr,
190 struct xnsched_class *sched_class),
191 TP_ARGS(thread, attr, sched_class),
194 __field(
struct xnthread *, thread)
195 __string(thread_name, thread->name)
196 __string(class_name, sched_class->name)
197 __field(
unsigned long, flags)
202 __entry->thread = thread;
203 __assign_str(thread_name, thread->name);
204 __entry->flags = attr->flags;
205 __assign_str(class_name, sched_class->name);
206 __entry->cprio = thread->cprio;
209 TP_printk(
"thread=%p(%s) flags=0x%lx class=%s prio=%d",
210 __entry->thread, __get_str(thread_name), __entry->flags,
211 __get_str(class_name), __entry->cprio)
214 TRACE_EVENT(cobalt_thread_suspend,
215 TP_PROTO(
struct xnthread *thread,
unsigned long mask, xnticks_t timeout,
216 xntmode_t timeout_mode,
struct xnsynch *wchan),
217 TP_ARGS(thread, mask, timeout, timeout_mode, wchan),
220 __field(
struct xnthread *, thread)
221 __field(
unsigned long, mask)
222 __field(xnticks_t, timeout)
223 __field(xntmode_t, timeout_mode)
224 __field(
struct xnsynch *, wchan)
228 __entry->thread = thread;
229 __entry->mask = mask;
230 __entry->timeout = timeout;
231 __entry->timeout_mode = timeout_mode;
232 __entry->wchan = wchan;
235 TP_printk(
"thread=%p mask=%lu timeout=%Lu timeout_mode=%d wchan=%p",
236 __entry->thread, __entry->mask,
237 __entry->timeout, __entry->timeout_mode, __entry->wchan)
240 TRACE_EVENT(cobalt_thread_resume,
241 TP_PROTO(
struct xnthread *thread,
unsigned long mask),
242 TP_ARGS(thread, mask),
245 __field(
struct xnthread *, thread)
246 __field(
unsigned long, mask)
250 __entry->thread = thread;
251 __entry->mask = mask;
254 TP_printk(
"thread=%p mask=0x%lx",
255 __entry->thread, __entry->mask)
258 TRACE_EVENT(cobalt_thread_fault,
259 TP_PROTO(
struct xnthread *thread,
struct ipipe_trap_data *td),
263 __field(
struct xnthread *, thread)
264 __string(name, thread->name)
266 __field(
unsigned int, type)
270 __entry->thread = thread;
271 __assign_str(name, thread->name);
272 __entry->ip = (
void *)xnarch_fault_pc(td);
273 __entry->type = xnarch_fault_trap(td);
276 TP_printk(
"thread=%p(%s) ip=%p type=%x",
277 __entry->thread, __get_str(name), __entry->ip,
281 DEFINE_EVENT(thread_event, cobalt_thread_start,
282 TP_PROTO(
struct xnthread *thread),
286 DEFINE_EVENT(thread_event, cobalt_thread_cancel,
287 TP_PROTO(
struct xnthread *thread),
291 DEFINE_EVENT(thread_event, cobalt_thread_join,
292 TP_PROTO(
struct xnthread *thread),
296 DEFINE_EVENT(thread_event, cobalt_thread_unblock,
297 TP_PROTO(
struct xnthread *thread),
301 DEFINE_EVENT(thread_event, cobalt_thread_wait_period,
302 TP_PROTO(
struct xnthread *thread),
306 DEFINE_EVENT(thread_event, cobalt_thread_missed_period,
307 TP_PROTO(
struct xnthread *thread),
311 DEFINE_EVENT(thread_event, cobalt_thread_set_mode,
312 TP_PROTO(
struct xnthread *thread),
316 DEFINE_EVENT(thread_migrate, cobalt_thread_migrate,
317 TP_PROTO(
struct xnthread *thread,
unsigned int cpu),
321 DEFINE_EVENT(thread_migrate, cobalt_thread_migrate_passive,
322 TP_PROTO(
struct xnthread *thread,
unsigned int cpu),
326 DEFINE_EVENT(thread_event, cobalt_shadow_gohard,
327 TP_PROTO(
struct xnthread *thread),
331 DEFINE_EVENT(thread_event, cobalt_watchdog_signal,
332 TP_PROTO(
struct xnthread *thread),
336 DEFINE_EVENT(thread_event, cobalt_shadow_hardened,
337 TP_PROTO(
struct xnthread *thread),
341 DEFINE_EVENT(thread_event, cobalt_shadow_gorelax,
342 TP_PROTO(
struct xnthread *thread),
346 DEFINE_EVENT(thread_event, cobalt_shadow_relaxed,
347 TP_PROTO(
struct xnthread *thread),
351 DEFINE_EVENT(thread_event, cobalt_shadow_entry,
352 TP_PROTO(
struct xnthread *thread),
356 TRACE_EVENT(cobalt_shadow_map,
357 TP_PROTO(
struct xnthread *thread),
361 __field(
struct xnthread *, thread)
362 __string(name, thread->name)
367 __entry->thread = thread;
368 __assign_str(name, thread->name);
369 __entry->prio = xnthread_base_priority(thread);
372 TP_printk(
"thread=%p(%s) prio=%d",
373 __entry->thread, __get_str(name), __entry->prio)
376 DEFINE_EVENT(thread_event, cobalt_shadow_unmap,
377 TP_PROTO(
struct xnthread *thread),
381 TRACE_EVENT(cobalt_lostage_request,
382 TP_PROTO(
const char *type,
struct task_struct *task),
387 __array(
char, comm, TASK_COMM_LEN)
388 __field(
const char *, type)
392 __entry->type = type;
393 __entry->pid = task->pid;
394 memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
397 TP_printk(
"request=%s pid=%d comm=%s",
398 __entry->type, __entry->pid, __entry->comm)
401 TRACE_EVENT(cobalt_lostage_wakeup,
402 TP_PROTO(
struct task_struct *task),
407 __array(
char, comm, TASK_COMM_LEN)
411 __entry->pid = task->pid;
412 memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
415 TP_printk(
"pid=%d comm=%s",
416 __entry->pid, __entry->comm)
419 TRACE_EVENT(cobalt_lostage_signal,
420 TP_PROTO(
struct task_struct *task,
int sig),
425 __array(
char, comm, TASK_COMM_LEN)
430 __entry->pid = task->pid;
432 memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
435 TP_printk(
"pid=%d comm=%s sig=%d",
436 __entry->pid, __entry->comm, __entry->sig)
439 DEFINE_EVENT(irq_event, cobalt_irq_entry,
440 TP_PROTO(
unsigned int irq),
444 DEFINE_EVENT(irq_event, cobalt_irq_exit,
445 TP_PROTO(
unsigned int irq),
449 DEFINE_EVENT(irq_event, cobalt_irq_attach,
450 TP_PROTO(
unsigned int irq),
454 DEFINE_EVENT(irq_event, cobalt_irq_detach,
455 TP_PROTO(
unsigned int irq),
459 DEFINE_EVENT(irq_event, cobalt_irq_enable,
460 TP_PROTO(
unsigned int irq),
464 DEFINE_EVENT(irq_event, cobalt_irq_disable,
465 TP_PROTO(
unsigned int irq),
469 DEFINE_EVENT(clock_event, cobalt_clock_entry,
470 TP_PROTO(
unsigned int irq),
474 DEFINE_EVENT(clock_event, cobalt_clock_exit,
475 TP_PROTO(
unsigned int irq),
479 DEFINE_EVENT(timer_event, cobalt_timer_stop,
480 TP_PROTO(
struct xntimer *timer),
484 DEFINE_EVENT(timer_event, cobalt_timer_expire,
485 TP_PROTO(
struct xntimer *timer),
489 #define cobalt_print_timer_mode(mode) \
490 __print_symbolic(mode, \
491 { XN_RELATIVE, "rel" }, \
492 { XN_ABSOLUTE, "abs" }, \
493 { XN_REALTIME, "rt" })
495 TRACE_EVENT(cobalt_timer_start,
496 TP_PROTO(
struct xntimer *timer, xnticks_t value, xnticks_t interval,
498 TP_ARGS(timer, value, interval, mode),
501 __field(
struct xntimer *, timer)
502 #ifdef CONFIG_XENO_OPT_STATS
503 __string(name, timer->name)
505 __field(xnticks_t, value)
506 __field(xnticks_t, interval)
507 __field(xntmode_t, mode)
511 __entry->timer = timer;
512 #ifdef CONFIG_XENO_OPT_STATS
513 __assign_str(name, timer->name);
515 __entry->value = value;
516 __entry->interval = interval;
517 __entry->mode = mode;
520 TP_printk(
"timer=%p(%s) value=%Lu interval=%Lu mode=%s",
522 #ifdef CONFIG_XENO_OPT_STATS
527 __entry->value, __entry->interval,
528 cobalt_print_timer_mode(__entry->mode))
533 TRACE_EVENT(cobalt_timer_migrate,
534 TP_PROTO(
struct xntimer *timer,
unsigned int cpu),
538 __field(
struct xntimer *, timer)
539 __field(
unsigned int, cpu)
543 __entry->timer = timer;
547 TP_printk(
"timer=%p cpu=%u",
548 __entry->timer, __entry->cpu)
553 DEFINE_EVENT(synch_wait_event, cobalt_synch_sleepon,
554 TP_PROTO(
struct xnsynch *synch,
struct xnthread *thread),
555 TP_ARGS(synch, thread)
558 DEFINE_EVENT(synch_wait_event, cobalt_synch_try_acquire,
559 TP_PROTO(
struct xnsynch *synch,
struct xnthread *thread),
560 TP_ARGS(synch, thread)
563 DEFINE_EVENT(synch_wait_event, cobalt_synch_acquire,
564 TP_PROTO(
struct xnsynch *synch,
struct xnthread *thread),
565 TP_ARGS(synch, thread)
568 DEFINE_EVENT(synch_post_event, cobalt_synch_release,
569 TP_PROTO(
struct xnsynch *synch),
573 DEFINE_EVENT(synch_post_event, cobalt_synch_wakeup,
574 TP_PROTO(
struct xnsynch *synch),
578 DEFINE_EVENT(synch_post_event, cobalt_synch_wakeup_many,
579 TP_PROTO(
struct xnsynch *synch),
583 DEFINE_EVENT(synch_post_event, cobalt_synch_flush,
584 TP_PROTO(
struct xnsynch *synch),
588 DEFINE_EVENT(synch_post_event, cobalt_synch_forget,
589 TP_PROTO(
struct xnsynch *synch),
596 #include <trace/define_trace.h>
Scheduling information structure.
Definition: sched.h:58
unsigned long status
Definition: sched.h:60