1 // SPDX-License-Identifier: GPL-2.0
2
3 #ifndef _LINUX_KERNEL_TRACE_H
4 #define _LINUX_KERNEL_TRACE_H
5
6 #include <linux/fs.h>
7 #include <linux/atomic.h>
8 #include <linux/sched.h>
9 #include <linux/clocksource.h>
10 #include <linux/ring_buffer.h>
11 #include <linux/mmiotrace.h>
12 #include <linux/tracepoint.h>
13 #include <linux/ftrace.h>
14 #include <linux/trace.h>
15 #include <linux/hw_breakpoint.h>
16 #include <linux/trace_seq.h>
17 #include <linux/trace_events.h>
18 #include <linux/compiler.h>
19 #include <linux/glob.h>
20 #include <linux/irq_work.h>
21 #include <linux/workqueue.h>
22 #include <linux/ctype.h>
23 #include <linux/once_lite.h>
24
25 #include "pid_list.h"
26
27 #ifdef CONFIG_FTRACE_SYSCALLS
28 #include <asm/unistd.h> /* For NR_syscalls */
29 #include <asm/syscall.h> /* some archs define it here */
30 #endif
31
32 #define TRACE_MODE_WRITE 0640
33 #define TRACE_MODE_READ 0440
34
35 enum trace_type {
36 __TRACE_FIRST_TYPE = 0,
37
38 TRACE_FN,
39 TRACE_CTX,
40 TRACE_WAKE,
41 TRACE_STACK,
42 TRACE_PRINT,
43 TRACE_BPRINT,
44 TRACE_MMIO_RW,
45 TRACE_MMIO_MAP,
46 TRACE_BRANCH,
47 TRACE_GRAPH_RET,
48 TRACE_GRAPH_ENT,
49 TRACE_USER_STACK,
50 TRACE_BLK,
51 TRACE_BPUTS,
52 TRACE_HWLAT,
53 TRACE_OSNOISE,
54 TRACE_TIMERLAT,
55 TRACE_RAW_DATA,
56 TRACE_FUNC_REPEATS,
57
58 __TRACE_LAST_TYPE,
59 };
60
61
62 #undef __field
63 #define __field(type, item) type item;
64
65 #undef __field_fn
66 #define __field_fn(type, item) type item;
67
68 #undef __field_struct
69 #define __field_struct(type, item) __field(type, item)
70
71 #undef __field_desc
72 #define __field_desc(type, container, item)
73
74 #undef __field_packed
75 #define __field_packed(type, container, item)
76
77 #undef __array
78 #define __array(type, item, size) type item[size];
79
80 #undef __array_desc
81 #define __array_desc(type, container, item, size)
82
83 #undef __dynamic_array
84 #define __dynamic_array(type, item) type item[];
85
86 #undef __rel_dynamic_array
87 #define __rel_dynamic_array(type, item) type item[];
88
89 #undef F_STRUCT
90 #define F_STRUCT(args...) args
91
92 #undef FTRACE_ENTRY
93 #define FTRACE_ENTRY(name, struct_name, id, tstruct, print) \
94 struct struct_name { \
95 struct trace_entry ent; \
96 tstruct \
97 }
98
99 #undef FTRACE_ENTRY_DUP
100 #define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk)
101
102 #undef FTRACE_ENTRY_REG
103 #define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, regfn) \
104 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print))
105
106 #undef FTRACE_ENTRY_PACKED
107 #define FTRACE_ENTRY_PACKED(name, struct_name, id, tstruct, print) \
108 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print)) __packed
109
110 #include "trace_entries.h"
111
112 /* Use this for memory failure errors */
113 #define MEM_FAIL(condition, fmt, ...) \
114 DO_ONCE_LITE_IF(condition, pr_err, "ERROR: " fmt, ##__VA_ARGS__)
115
116 #define HIST_STACKTRACE_DEPTH 16
117 #define HIST_STACKTRACE_SIZE (HIST_STACKTRACE_DEPTH * sizeof(unsigned long))
118 #define HIST_STACKTRACE_SKIP 5
119
120 /*
121 * syscalls are special, and need special handling, this is why
122 * they are not included in trace_entries.h
123 */
124 struct syscall_trace_enter {
125 struct trace_entry ent;
126 int nr;
127 unsigned long args[];
128 };
129
130 struct syscall_trace_exit {
131 struct trace_entry ent;
132 int nr;
133 long ret;
134 };
135
136 struct kprobe_trace_entry_head {
137 struct trace_entry ent;
138 unsigned long ip;
139 };
140
141 struct eprobe_trace_entry_head {
142 struct trace_entry ent;
143 };
144
145 struct kretprobe_trace_entry_head {
146 struct trace_entry ent;
147 unsigned long func;
148 unsigned long ret_ip;
149 };
150
151 #define TRACE_BUF_SIZE 1024
152
153 struct trace_array;
154
155 /*
156 * The CPU trace array - it consists of thousands of trace entries
157 * plus some other descriptor data: (for example which task started
158 * the trace, etc.)
159 */
160 struct trace_array_cpu {
161 atomic_t disabled;
162 void *buffer_page; /* ring buffer spare */
163
164 unsigned long entries;
165 unsigned long saved_latency;
166 unsigned long critical_start;
167 unsigned long critical_end;
168 unsigned long critical_sequence;
169 unsigned long nice;
170 unsigned long policy;
171 unsigned long rt_priority;
172 unsigned long skipped_entries;
173 u64 preempt_timestamp;
174 pid_t pid;
175 kuid_t uid;
176 char comm[TASK_COMM_LEN];
177
178 #ifdef CONFIG_FUNCTION_TRACER
179 int ftrace_ignore_pid;
180 #endif
181 bool ignore_pid;
182 };
183
184 struct tracer;
185 struct trace_option_dentry;
186
187 struct array_buffer {
188 struct trace_array *tr;
189 struct trace_buffer *buffer;
190 struct trace_array_cpu __percpu *data;
191 u64 time_start;
192 int cpu;
193 };
194
195 #define TRACE_FLAGS_MAX_SIZE 32
196
197 struct trace_options {
198 struct tracer *tracer;
199 struct trace_option_dentry *topts;
200 };
201
202 struct trace_pid_list *trace_pid_list_alloc(void);
203 void trace_pid_list_free(struct trace_pid_list *pid_list);
204 bool trace_pid_list_is_set(struct trace_pid_list *pid_list, unsigned int pid);
205 int trace_pid_list_set(struct trace_pid_list *pid_list, unsigned int pid);
206 int trace_pid_list_clear(struct trace_pid_list *pid_list, unsigned int pid);
207 int trace_pid_list_first(struct trace_pid_list *pid_list, unsigned int *pid);
208 int trace_pid_list_next(struct trace_pid_list *pid_list, unsigned int pid,
209 unsigned int *next);
210
211 enum {
212 TRACE_PIDS = BIT(0),
213 TRACE_NO_PIDS = BIT(1),
214 };
215
pid_type_enabled(int type,struct trace_pid_list * pid_list,struct trace_pid_list * no_pid_list)216 static inline bool pid_type_enabled(int type, struct trace_pid_list *pid_list,
217 struct trace_pid_list *no_pid_list)
218 {
219 /* Return true if the pid list in type has pids */
220 return ((type & TRACE_PIDS) && pid_list) ||
221 ((type & TRACE_NO_PIDS) && no_pid_list);
222 }
223
still_need_pid_events(int type,struct trace_pid_list * pid_list,struct trace_pid_list * no_pid_list)224 static inline bool still_need_pid_events(int type, struct trace_pid_list *pid_list,
225 struct trace_pid_list *no_pid_list)
226 {
227 /*
228 * Turning off what is in @type, return true if the "other"
229 * pid list, still has pids in it.
230 */
231 return (!(type & TRACE_PIDS) && pid_list) ||
232 (!(type & TRACE_NO_PIDS) && no_pid_list);
233 }
234
235 typedef bool (*cond_update_fn_t)(struct trace_array *tr, void *cond_data);
236
237 /**
238 * struct cond_snapshot - conditional snapshot data and callback
239 *
240 * The cond_snapshot structure encapsulates a callback function and
241 * data associated with the snapshot for a given tracing instance.
242 *
243 * When a snapshot is taken conditionally, by invoking
244 * tracing_snapshot_cond(tr, cond_data), the cond_data passed in is
245 * passed in turn to the cond_snapshot.update() function. That data
246 * can be compared by the update() implementation with the cond_data
247 * contained within the struct cond_snapshot instance associated with
248 * the trace_array. Because the tr->max_lock is held throughout the
249 * update() call, the update() function can directly retrieve the
250 * cond_snapshot and cond_data associated with the per-instance
251 * snapshot associated with the trace_array.
252 *
253 * The cond_snapshot.update() implementation can save data to be
254 * associated with the snapshot if it decides to, and returns 'true'
255 * in that case, or it returns 'false' if the conditional snapshot
256 * shouldn't be taken.
257 *
258 * The cond_snapshot instance is created and associated with the
259 * user-defined cond_data by tracing_cond_snapshot_enable().
260 * Likewise, the cond_snapshot instance is destroyed and is no longer
261 * associated with the trace instance by
262 * tracing_cond_snapshot_disable().
263 *
264 * The method below is required.
265 *
266 * @update: When a conditional snapshot is invoked, the update()
267 * callback function is invoked with the tr->max_lock held. The
268 * update() implementation signals whether or not to actually
269 * take the snapshot, by returning 'true' if so, 'false' if no
270 * snapshot should be taken. Because the max_lock is held for
271 * the duration of update(), the implementation is safe to
272 * directly retrieved and save any implementation data it needs
273 * to in association with the snapshot.
274 */
275 struct cond_snapshot {
276 void *cond_data;
277 cond_update_fn_t update;
278 };
279
280 /*
281 * struct trace_func_repeats - used to keep track of the consecutive
282 * (on the same CPU) calls of a single function.
283 */
284 struct trace_func_repeats {
285 unsigned long ip;
286 unsigned long parent_ip;
287 unsigned long count;
288 u64 ts_last_call;
289 };
290
291 /*
292 * The trace array - an array of per-CPU trace arrays. This is the
293 * highest level data structure that individual tracers deal with.
294 * They have on/off state as well:
295 */
296 struct trace_array {
297 struct list_head list;
298 char *name;
299 struct array_buffer array_buffer;
300 #ifdef CONFIG_TRACER_MAX_TRACE
301 /*
302 * The max_buffer is used to snapshot the trace when a maximum
303 * latency is reached, or when the user initiates a snapshot.
304 * Some tracers will use this to store a maximum trace while
305 * it continues examining live traces.
306 *
307 * The buffers for the max_buffer are set up the same as the array_buffer
308 * When a snapshot is taken, the buffer of the max_buffer is swapped
309 * with the buffer of the array_buffer and the buffers are reset for
310 * the array_buffer so the tracing can continue.
311 */
312 struct array_buffer max_buffer;
313 bool allocated_snapshot;
314 #endif
315 #ifdef CONFIG_TRACER_MAX_TRACE
316 unsigned long max_latency;
317 #ifdef CONFIG_FSNOTIFY
318 struct dentry *d_max_latency;
319 struct work_struct fsnotify_work;
320 struct irq_work fsnotify_irqwork;
321 #endif
322 #endif
323 struct trace_pid_list __rcu *filtered_pids;
324 struct trace_pid_list __rcu *filtered_no_pids;
325 /*
326 * max_lock is used to protect the swapping of buffers
327 * when taking a max snapshot. The buffers themselves are
328 * protected by per_cpu spinlocks. But the action of the swap
329 * needs its own lock.
330 *
331 * This is defined as a arch_spinlock_t in order to help
332 * with performance when lockdep debugging is enabled.
333 *
334 * It is also used in other places outside the update_max_tr
335 * so it needs to be defined outside of the
336 * CONFIG_TRACER_MAX_TRACE.
337 */
338 arch_spinlock_t max_lock;
339 int buffer_disabled;
340 #ifdef CONFIG_FTRACE_SYSCALLS
341 int sys_refcount_enter;
342 int sys_refcount_exit;
343 struct trace_event_file __rcu *enter_syscall_files[NR_syscalls];
344 struct trace_event_file __rcu *exit_syscall_files[NR_syscalls];
345 #endif
346 int stop_count;
347 int clock_id;
348 int nr_topts;
349 bool clear_trace;
350 int buffer_percent;
351 unsigned int n_err_log_entries;
352 struct tracer *current_trace;
353 unsigned int trace_flags;
354 unsigned char trace_flags_index[TRACE_FLAGS_MAX_SIZE];
355 unsigned int flags;
356 raw_spinlock_t start_lock;
357 struct list_head err_log;
358 struct dentry *dir;
359 struct dentry *options;
360 struct dentry *percpu_dir;
361 struct dentry *event_dir;
362 struct trace_options *topts;
363 struct list_head systems;
364 struct list_head events;
365 struct trace_event_file *trace_marker_file;
366 cpumask_var_t tracing_cpumask; /* only trace on set CPUs */
367 int ref;
368 int trace_ref;
369 #ifdef CONFIG_FUNCTION_TRACER
370 struct ftrace_ops *ops;
371 struct trace_pid_list __rcu *function_pids;
372 struct trace_pid_list __rcu *function_no_pids;
373 #ifdef CONFIG_DYNAMIC_FTRACE
374 /* All of these are protected by the ftrace_lock */
375 struct list_head func_probes;
376 struct list_head mod_trace;
377 struct list_head mod_notrace;
378 #endif
379 /* function tracing enabled */
380 int function_enabled;
381 #endif
382 int no_filter_buffering_ref;
383 struct list_head hist_vars;
384 #ifdef CONFIG_TRACER_SNAPSHOT
385 struct cond_snapshot *cond_snapshot;
386 #endif
387 struct trace_func_repeats __percpu *last_func_repeats;
388 };
389
390 enum {
391 TRACE_ARRAY_FL_GLOBAL = (1 << 0)
392 };
393
394 extern struct list_head ftrace_trace_arrays;
395
396 extern struct mutex trace_types_lock;
397
398 extern int trace_array_get(struct trace_array *tr);
399 extern int tracing_check_open_get_tr(struct trace_array *tr);
400 extern struct trace_array *trace_array_find(const char *instance);
401 extern struct trace_array *trace_array_find_get(const char *instance);
402
403 extern u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_event *rbe);
404 extern int tracing_set_filter_buffering(struct trace_array *tr, bool set);
405 extern int tracing_set_clock(struct trace_array *tr, const char *clockstr);
406
407 extern bool trace_clock_in_ns(struct trace_array *tr);
408
409 /*
410 * The global tracer (top) should be the first trace array added,
411 * but we check the flag anyway.
412 */
top_trace_array(void)413 static inline struct trace_array *top_trace_array(void)
414 {
415 struct trace_array *tr;
416
417 if (list_empty(&ftrace_trace_arrays))
418 return NULL;
419
420 tr = list_entry(ftrace_trace_arrays.prev,
421 typeof(*tr), list);
422 WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
423 return tr;
424 }
425
426 #define FTRACE_CMP_TYPE(var, type) \
427 __builtin_types_compatible_p(typeof(var), type *)
428
429 #undef IF_ASSIGN
430 #define IF_ASSIGN(var, entry, etype, id) \
431 if (FTRACE_CMP_TYPE(var, etype)) { \
432 var = (typeof(var))(entry); \
433 WARN_ON(id != 0 && (entry)->type != id); \
434 break; \
435 }
436
437 /* Will cause compile errors if type is not found. */
438 extern void __ftrace_bad_type(void);
439
440 /*
441 * The trace_assign_type is a verifier that the entry type is
442 * the same as the type being assigned. To add new types simply
443 * add a line with the following format:
444 *
445 * IF_ASSIGN(var, ent, type, id);
446 *
447 * Where "type" is the trace type that includes the trace_entry
448 * as the "ent" item. And "id" is the trace identifier that is
449 * used in the trace_type enum.
450 *
451 * If the type can have more than one id, then use zero.
452 */
453 #define trace_assign_type(var, ent) \
454 do { \
455 IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \
456 IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \
457 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \
458 IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
459 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \
460 IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \
461 IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS); \
462 IF_ASSIGN(var, ent, struct hwlat_entry, TRACE_HWLAT); \
463 IF_ASSIGN(var, ent, struct osnoise_entry, TRACE_OSNOISE);\
464 IF_ASSIGN(var, ent, struct timerlat_entry, TRACE_TIMERLAT);\
465 IF_ASSIGN(var, ent, struct raw_data_entry, TRACE_RAW_DATA);\
466 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
467 TRACE_MMIO_RW); \
468 IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \
469 TRACE_MMIO_MAP); \
470 IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
471 IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \
472 TRACE_GRAPH_ENT); \
473 IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
474 TRACE_GRAPH_RET); \
475 IF_ASSIGN(var, ent, struct func_repeats_entry, \
476 TRACE_FUNC_REPEATS); \
477 __ftrace_bad_type(); \
478 } while (0)
479
480 /*
481 * An option specific to a tracer. This is a boolean value.
482 * The bit is the bit index that sets its value on the
483 * flags value in struct tracer_flags.
484 */
485 struct tracer_opt {
486 const char *name; /* Will appear on the trace_options file */
487 u32 bit; /* Mask assigned in val field in tracer_flags */
488 };
489
490 /*
491 * The set of specific options for a tracer. Your tracer
492 * have to set the initial value of the flags val.
493 */
494 struct tracer_flags {
495 u32 val;
496 struct tracer_opt *opts;
497 struct tracer *trace;
498 };
499
500 /* Makes more easy to define a tracer opt */
501 #define TRACER_OPT(s, b) .name = #s, .bit = b
502
503
504 struct trace_option_dentry {
505 struct tracer_opt *opt;
506 struct tracer_flags *flags;
507 struct trace_array *tr;
508 struct dentry *entry;
509 };
510
511 /**
512 * struct tracer - a specific tracer and its callbacks to interact with tracefs
513 * @name: the name chosen to select it on the available_tracers file
514 * @init: called when one switches to this tracer (echo name > current_tracer)
515 * @reset: called when one switches to another tracer
516 * @start: called when tracing is unpaused (echo 1 > tracing_on)
517 * @stop: called when tracing is paused (echo 0 > tracing_on)
518 * @update_thresh: called when tracing_thresh is updated
519 * @open: called when the trace file is opened
520 * @pipe_open: called when the trace_pipe file is opened
521 * @close: called when the trace file is released
522 * @pipe_close: called when the trace_pipe file is released
523 * @read: override the default read callback on trace_pipe
524 * @splice_read: override the default splice_read callback on trace_pipe
525 * @selftest: selftest to run on boot (see trace_selftest.c)
526 * @print_headers: override the first lines that describe your columns
527 * @print_line: callback that prints a trace
528 * @set_flag: signals one of your private flags changed (trace_options file)
529 * @flags: your private flags
530 */
531 struct tracer {
532 const char *name;
533 int (*init)(struct trace_array *tr);
534 void (*reset)(struct trace_array *tr);
535 void (*start)(struct trace_array *tr);
536 void (*stop)(struct trace_array *tr);
537 int (*update_thresh)(struct trace_array *tr);
538 void (*open)(struct trace_iterator *iter);
539 void (*pipe_open)(struct trace_iterator *iter);
540 void (*close)(struct trace_iterator *iter);
541 void (*pipe_close)(struct trace_iterator *iter);
542 ssize_t (*read)(struct trace_iterator *iter,
543 struct file *filp, char __user *ubuf,
544 size_t cnt, loff_t *ppos);
545 ssize_t (*splice_read)(struct trace_iterator *iter,
546 struct file *filp,
547 loff_t *ppos,
548 struct pipe_inode_info *pipe,
549 size_t len,
550 unsigned int flags);
551 #ifdef CONFIG_FTRACE_STARTUP_TEST
552 int (*selftest)(struct tracer *trace,
553 struct trace_array *tr);
554 #endif
555 void (*print_header)(struct seq_file *m);
556 enum print_line_t (*print_line)(struct trace_iterator *iter);
557 /* If you handled the flag setting, return 0 */
558 int (*set_flag)(struct trace_array *tr,
559 u32 old_flags, u32 bit, int set);
560 /* Return 0 if OK with change, else return non-zero */
561 int (*flag_changed)(struct trace_array *tr,
562 u32 mask, int set);
563 struct tracer *next;
564 struct tracer_flags *flags;
565 int enabled;
566 bool print_max;
567 bool allow_instances;
568 #ifdef CONFIG_TRACER_MAX_TRACE
569 bool use_max_tr;
570 #endif
571 /* True if tracer cannot be enabled in kernel param */
572 bool noboot;
573 };
574
575 static inline struct ring_buffer_iter *
trace_buffer_iter(struct trace_iterator * iter,int cpu)576 trace_buffer_iter(struct trace_iterator *iter, int cpu)
577 {
578 return iter->buffer_iter ? iter->buffer_iter[cpu] : NULL;
579 }
580
581 int tracer_init(struct tracer *t, struct trace_array *tr);
582 int tracing_is_enabled(void);
583 void tracing_reset_online_cpus(struct array_buffer *buf);
584 void tracing_reset_current(int cpu);
585 void tracing_reset_all_online_cpus(void);
586 void tracing_reset_all_online_cpus_unlocked(void);
587 int tracing_open_generic(struct inode *inode, struct file *filp);
588 int tracing_open_generic_tr(struct inode *inode, struct file *filp);
589 bool tracing_is_disabled(void);
590 bool tracer_tracing_is_on(struct trace_array *tr);
591 void tracer_tracing_on(struct trace_array *tr);
592 void tracer_tracing_off(struct trace_array *tr);
593 struct dentry *trace_create_file(const char *name,
594 umode_t mode,
595 struct dentry *parent,
596 void *data,
597 const struct file_operations *fops);
598
599 int tracing_init_dentry(void);
600
601 struct ring_buffer_event;
602
603 struct ring_buffer_event *
604 trace_buffer_lock_reserve(struct trace_buffer *buffer,
605 int type,
606 unsigned long len,
607 unsigned int trace_ctx);
608
609 struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
610 struct trace_array_cpu *data);
611
612 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
613 int *ent_cpu, u64 *ent_ts);
614
615 void trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
616 struct ring_buffer_event *event);
617
618 bool trace_is_tracepoint_string(const char *str);
619 const char *trace_event_format(struct trace_iterator *iter, const char *fmt);
620 void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
621 va_list ap) __printf(2, 0);
622
623 int trace_empty(struct trace_iterator *iter);
624
625 void *trace_find_next_entry_inc(struct trace_iterator *iter);
626
627 void trace_init_global_iter(struct trace_iterator *iter);
628
629 void tracing_iter_reset(struct trace_iterator *iter, int cpu);
630
631 unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu);
632 unsigned long trace_total_entries(struct trace_array *tr);
633
634 void trace_function(struct trace_array *tr,
635 unsigned long ip,
636 unsigned long parent_ip,
637 unsigned int trace_ctx);
638 void trace_graph_function(struct trace_array *tr,
639 unsigned long ip,
640 unsigned long parent_ip,
641 unsigned int trace_ctx);
642 void trace_latency_header(struct seq_file *m);
643 void trace_default_header(struct seq_file *m);
644 void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
645
646 void trace_graph_return(struct ftrace_graph_ret *trace);
647 int trace_graph_entry(struct ftrace_graph_ent *trace);
648 void set_graph_array(struct trace_array *tr);
649
650 void tracing_start_cmdline_record(void);
651 void tracing_stop_cmdline_record(void);
652 void tracing_start_tgid_record(void);
653 void tracing_stop_tgid_record(void);
654
655 int register_tracer(struct tracer *type);
656 int is_tracing_stopped(void);
657
658 loff_t tracing_lseek(struct file *file, loff_t offset, int whence);
659
660 extern cpumask_var_t __read_mostly tracing_buffer_mask;
661
662 #define for_each_tracing_cpu(cpu) \
663 for_each_cpu(cpu, tracing_buffer_mask)
664
665 extern unsigned long nsecs_to_usecs(unsigned long nsecs);
666
667 extern unsigned long tracing_thresh;
668
669 /* PID filtering */
670
671 extern int pid_max;
672
673 bool trace_find_filtered_pid(struct trace_pid_list *filtered_pids,
674 pid_t search_pid);
675 bool trace_ignore_this_task(struct trace_pid_list *filtered_pids,
676 struct trace_pid_list *filtered_no_pids,
677 struct task_struct *task);
678 void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
679 struct task_struct *self,
680 struct task_struct *task);
681 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos);
682 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos);
683 int trace_pid_show(struct seq_file *m, void *v);
684 void trace_free_pid_list(struct trace_pid_list *pid_list);
685 int trace_pid_write(struct trace_pid_list *filtered_pids,
686 struct trace_pid_list **new_pid_list,
687 const char __user *ubuf, size_t cnt);
688
689 #ifdef CONFIG_TRACER_MAX_TRACE
690 void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
691 void *cond_data);
692 void update_max_tr_single(struct trace_array *tr,
693 struct task_struct *tsk, int cpu);
694
695 #ifdef CONFIG_FSNOTIFY
696 #define LATENCY_FS_NOTIFY
697 #endif
698 #endif /* CONFIG_TRACER_MAX_TRACE */
699
700 #ifdef LATENCY_FS_NOTIFY
701 void latency_fsnotify(struct trace_array *tr);
702 #else
latency_fsnotify(struct trace_array * tr)703 static inline void latency_fsnotify(struct trace_array *tr) { }
704 #endif
705
706 #ifdef CONFIG_STACKTRACE
707 void __trace_stack(struct trace_array *tr, unsigned int trace_ctx, int skip);
708 #else
__trace_stack(struct trace_array * tr,unsigned int trace_ctx,int skip)709 static inline void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
710 int skip)
711 {
712 }
713 #endif /* CONFIG_STACKTRACE */
714
715 void trace_last_func_repeats(struct trace_array *tr,
716 struct trace_func_repeats *last_info,
717 unsigned int trace_ctx);
718
719 extern u64 ftrace_now(int cpu);
720
721 extern void trace_find_cmdline(int pid, char comm[]);
722 extern int trace_find_tgid(int pid);
723 extern void trace_event_follow_fork(struct trace_array *tr, bool enable);
724
725 #ifdef CONFIG_DYNAMIC_FTRACE
726 extern unsigned long ftrace_update_tot_cnt;
727 extern unsigned long ftrace_number_of_pages;
728 extern unsigned long ftrace_number_of_groups;
729 void ftrace_init_trace_array(struct trace_array *tr);
730 #else
ftrace_init_trace_array(struct trace_array * tr)731 static inline void ftrace_init_trace_array(struct trace_array *tr) { }
732 #endif
733 #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
734 extern int DYN_FTRACE_TEST_NAME(void);
735 #define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2
736 extern int DYN_FTRACE_TEST_NAME2(void);
737
738 extern bool ring_buffer_expanded;
739 extern bool tracing_selftest_disabled;
740
741 #ifdef CONFIG_FTRACE_STARTUP_TEST
742 extern void __init disable_tracing_selftest(const char *reason);
743
744 extern int trace_selftest_startup_function(struct tracer *trace,
745 struct trace_array *tr);
746 extern int trace_selftest_startup_function_graph(struct tracer *trace,
747 struct trace_array *tr);
748 extern int trace_selftest_startup_irqsoff(struct tracer *trace,
749 struct trace_array *tr);
750 extern int trace_selftest_startup_preemptoff(struct tracer *trace,
751 struct trace_array *tr);
752 extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace,
753 struct trace_array *tr);
754 extern int trace_selftest_startup_wakeup(struct tracer *trace,
755 struct trace_array *tr);
756 extern int trace_selftest_startup_nop(struct tracer *trace,
757 struct trace_array *tr);
758 extern int trace_selftest_startup_branch(struct tracer *trace,
759 struct trace_array *tr);
760 /*
761 * Tracer data references selftest functions that only occur
762 * on boot up. These can be __init functions. Thus, when selftests
763 * are enabled, then the tracers need to reference __init functions.
764 */
765 #define __tracer_data __refdata
766 #else
disable_tracing_selftest(const char * reason)767 static inline void __init disable_tracing_selftest(const char *reason)
768 {
769 }
770 /* Tracers are seldom changed. Optimize when selftests are disabled. */
771 #define __tracer_data __read_mostly
772 #endif /* CONFIG_FTRACE_STARTUP_TEST */
773
774 extern void *head_page(struct trace_array_cpu *data);
775 extern unsigned long long ns2usecs(u64 nsec);
776 extern int
777 trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
778 extern int
779 trace_vprintk(unsigned long ip, const char *fmt, va_list args);
780 extern int
781 trace_array_vprintk(struct trace_array *tr,
782 unsigned long ip, const char *fmt, va_list args);
783 int trace_array_printk_buf(struct trace_buffer *buffer,
784 unsigned long ip, const char *fmt, ...);
785 void trace_printk_seq(struct trace_seq *s);
786 enum print_line_t print_trace_line(struct trace_iterator *iter);
787
788 extern char trace_find_mark(unsigned long long duration);
789
790 struct ftrace_hash;
791
792 struct ftrace_mod_load {
793 struct list_head list;
794 char *func;
795 char *module;
796 int enable;
797 };
798
799 enum {
800 FTRACE_HASH_FL_MOD = (1 << 0),
801 };
802
803 struct ftrace_hash {
804 unsigned long size_bits;
805 struct hlist_head *buckets;
806 unsigned long count;
807 unsigned long flags;
808 struct rcu_head rcu;
809 };
810
811 struct ftrace_func_entry *
812 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip);
813
ftrace_hash_empty(struct ftrace_hash * hash)814 static __always_inline bool ftrace_hash_empty(struct ftrace_hash *hash)
815 {
816 return !hash || !(hash->count || (hash->flags & FTRACE_HASH_FL_MOD));
817 }
818
819 /* Standard output formatting function used for function return traces */
820 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
821
822 /* Flag options */
823 #define TRACE_GRAPH_PRINT_OVERRUN 0x1
824 #define TRACE_GRAPH_PRINT_CPU 0x2
825 #define TRACE_GRAPH_PRINT_OVERHEAD 0x4
826 #define TRACE_GRAPH_PRINT_PROC 0x8
827 #define TRACE_GRAPH_PRINT_DURATION 0x10
828 #define TRACE_GRAPH_PRINT_ABS_TIME 0x20
829 #define TRACE_GRAPH_PRINT_REL_TIME 0x40
830 #define TRACE_GRAPH_PRINT_IRQS 0x80
831 #define TRACE_GRAPH_PRINT_TAIL 0x100
832 #define TRACE_GRAPH_SLEEP_TIME 0x200
833 #define TRACE_GRAPH_GRAPH_TIME 0x400
834 #define TRACE_GRAPH_PRINT_FILL_SHIFT 28
835 #define TRACE_GRAPH_PRINT_FILL_MASK (0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT)
836
837 extern void ftrace_graph_sleep_time_control(bool enable);
838
839 #ifdef CONFIG_FUNCTION_PROFILER
840 extern void ftrace_graph_graph_time_control(bool enable);
841 #else
ftrace_graph_graph_time_control(bool enable)842 static inline void ftrace_graph_graph_time_control(bool enable) { }
843 #endif
844
845 extern enum print_line_t
846 print_graph_function_flags(struct trace_iterator *iter, u32 flags);
847 extern void print_graph_headers_flags(struct seq_file *s, u32 flags);
848 extern void
849 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s);
850 extern void graph_trace_open(struct trace_iterator *iter);
851 extern void graph_trace_close(struct trace_iterator *iter);
852 extern int __trace_graph_entry(struct trace_array *tr,
853 struct ftrace_graph_ent *trace,
854 unsigned int trace_ctx);
855 extern void __trace_graph_return(struct trace_array *tr,
856 struct ftrace_graph_ret *trace,
857 unsigned int trace_ctx);
858
859 #ifdef CONFIG_DYNAMIC_FTRACE
860 extern struct ftrace_hash __rcu *ftrace_graph_hash;
861 extern struct ftrace_hash __rcu *ftrace_graph_notrace_hash;
862
ftrace_graph_addr(struct ftrace_graph_ent * trace)863 static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace)
864 {
865 unsigned long addr = trace->func;
866 int ret = 0;
867 struct ftrace_hash *hash;
868
869 preempt_disable_notrace();
870
871 /*
872 * Have to open code "rcu_dereference_sched()" because the
873 * function graph tracer can be called when RCU is not
874 * "watching".
875 * Protected with schedule_on_each_cpu(ftrace_sync)
876 */
877 hash = rcu_dereference_protected(ftrace_graph_hash, !preemptible());
878
879 if (ftrace_hash_empty(hash)) {
880 ret = 1;
881 goto out;
882 }
883
884 if (ftrace_lookup_ip(hash, addr)) {
885
886 /*
887 * This needs to be cleared on the return functions
888 * when the depth is zero.
889 */
890 trace_recursion_set(TRACE_GRAPH_BIT);
891 trace_recursion_set_depth(trace->depth);
892
893 /*
894 * If no irqs are to be traced, but a set_graph_function
895 * is set, and called by an interrupt handler, we still
896 * want to trace it.
897 */
898 if (in_hardirq())
899 trace_recursion_set(TRACE_IRQ_BIT);
900 else
901 trace_recursion_clear(TRACE_IRQ_BIT);
902 ret = 1;
903 }
904
905 out:
906 preempt_enable_notrace();
907 return ret;
908 }
909
ftrace_graph_addr_finish(struct ftrace_graph_ret * trace)910 static inline void ftrace_graph_addr_finish(struct ftrace_graph_ret *trace)
911 {
912 if (trace_recursion_test(TRACE_GRAPH_BIT) &&
913 trace->depth == trace_recursion_depth())
914 trace_recursion_clear(TRACE_GRAPH_BIT);
915 }
916
ftrace_graph_notrace_addr(unsigned long addr)917 static inline int ftrace_graph_notrace_addr(unsigned long addr)
918 {
919 int ret = 0;
920 struct ftrace_hash *notrace_hash;
921
922 preempt_disable_notrace();
923
924 /*
925 * Have to open code "rcu_dereference_sched()" because the
926 * function graph tracer can be called when RCU is not
927 * "watching".
928 * Protected with schedule_on_each_cpu(ftrace_sync)
929 */
930 notrace_hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
931 !preemptible());
932
933 if (ftrace_lookup_ip(notrace_hash, addr))
934 ret = 1;
935
936 preempt_enable_notrace();
937 return ret;
938 }
939 #else
ftrace_graph_addr(struct ftrace_graph_ent * trace)940 static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace)
941 {
942 return 1;
943 }
944
ftrace_graph_notrace_addr(unsigned long addr)945 static inline int ftrace_graph_notrace_addr(unsigned long addr)
946 {
947 return 0;
948 }
ftrace_graph_addr_finish(struct ftrace_graph_ret * trace)949 static inline void ftrace_graph_addr_finish(struct ftrace_graph_ret *trace)
950 { }
951 #endif /* CONFIG_DYNAMIC_FTRACE */
952
953 extern unsigned int fgraph_max_depth;
954
ftrace_graph_ignore_func(struct ftrace_graph_ent * trace)955 static inline bool ftrace_graph_ignore_func(struct ftrace_graph_ent *trace)
956 {
957 /* trace it when it is-nested-in or is a function enabled. */
958 return !(trace_recursion_test(TRACE_GRAPH_BIT) ||
959 ftrace_graph_addr(trace)) ||
960 (trace->depth < 0) ||
961 (fgraph_max_depth && trace->depth >= fgraph_max_depth);
962 }
963
964 #else /* CONFIG_FUNCTION_GRAPH_TRACER */
965 static inline enum print_line_t
print_graph_function_flags(struct trace_iterator * iter,u32 flags)966 print_graph_function_flags(struct trace_iterator *iter, u32 flags)
967 {
968 return TRACE_TYPE_UNHANDLED;
969 }
970 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
971
972 extern struct list_head ftrace_pids;
973
974 #ifdef CONFIG_FUNCTION_TRACER
975
976 #define FTRACE_PID_IGNORE -1
977 #define FTRACE_PID_TRACE -2
978
979 struct ftrace_func_command {
980 struct list_head list;
981 char *name;
982 int (*func)(struct trace_array *tr,
983 struct ftrace_hash *hash,
984 char *func, char *cmd,
985 char *params, int enable);
986 };
987 extern bool ftrace_filter_param __initdata;
ftrace_trace_task(struct trace_array * tr)988 static inline int ftrace_trace_task(struct trace_array *tr)
989 {
990 return this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid) !=
991 FTRACE_PID_IGNORE;
992 }
993 extern int ftrace_is_dead(void);
994 int ftrace_create_function_files(struct trace_array *tr,
995 struct dentry *parent);
996 void ftrace_destroy_function_files(struct trace_array *tr);
997 int ftrace_allocate_ftrace_ops(struct trace_array *tr);
998 void ftrace_free_ftrace_ops(struct trace_array *tr);
999 void ftrace_init_global_array_ops(struct trace_array *tr);
1000 void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func);
1001 void ftrace_reset_array_ops(struct trace_array *tr);
1002 void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer);
1003 void ftrace_init_tracefs_toplevel(struct trace_array *tr,
1004 struct dentry *d_tracer);
1005 void ftrace_clear_pids(struct trace_array *tr);
1006 int init_function_trace(void);
1007 void ftrace_pid_follow_fork(struct trace_array *tr, bool enable);
1008 #else
ftrace_trace_task(struct trace_array * tr)1009 static inline int ftrace_trace_task(struct trace_array *tr)
1010 {
1011 return 1;
1012 }
ftrace_is_dead(void)1013 static inline int ftrace_is_dead(void) { return 0; }
1014 static inline int
ftrace_create_function_files(struct trace_array * tr,struct dentry * parent)1015 ftrace_create_function_files(struct trace_array *tr,
1016 struct dentry *parent)
1017 {
1018 return 0;
1019 }
ftrace_allocate_ftrace_ops(struct trace_array * tr)1020 static inline int ftrace_allocate_ftrace_ops(struct trace_array *tr)
1021 {
1022 return 0;
1023 }
ftrace_free_ftrace_ops(struct trace_array * tr)1024 static inline void ftrace_free_ftrace_ops(struct trace_array *tr) { }
ftrace_destroy_function_files(struct trace_array * tr)1025 static inline void ftrace_destroy_function_files(struct trace_array *tr) { }
1026 static inline __init void
ftrace_init_global_array_ops(struct trace_array * tr)1027 ftrace_init_global_array_ops(struct trace_array *tr) { }
ftrace_reset_array_ops(struct trace_array * tr)1028 static inline void ftrace_reset_array_ops(struct trace_array *tr) { }
ftrace_init_tracefs(struct trace_array * tr,struct dentry * d)1029 static inline void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d) { }
ftrace_init_tracefs_toplevel(struct trace_array * tr,struct dentry * d)1030 static inline void ftrace_init_tracefs_toplevel(struct trace_array *tr, struct dentry *d) { }
ftrace_clear_pids(struct trace_array * tr)1031 static inline void ftrace_clear_pids(struct trace_array *tr) { }
init_function_trace(void)1032 static inline int init_function_trace(void) { return 0; }
ftrace_pid_follow_fork(struct trace_array * tr,bool enable)1033 static inline void ftrace_pid_follow_fork(struct trace_array *tr, bool enable) { }
1034 /* ftace_func_t type is not defined, use macro instead of static inline */
1035 #define ftrace_init_array_ops(tr, func) do { } while (0)
1036 #endif /* CONFIG_FUNCTION_TRACER */
1037
1038 #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
1039
1040 struct ftrace_probe_ops {
1041 void (*func)(unsigned long ip,
1042 unsigned long parent_ip,
1043 struct trace_array *tr,
1044 struct ftrace_probe_ops *ops,
1045 void *data);
1046 int (*init)(struct ftrace_probe_ops *ops,
1047 struct trace_array *tr,
1048 unsigned long ip, void *init_data,
1049 void **data);
1050 void (*free)(struct ftrace_probe_ops *ops,
1051 struct trace_array *tr,
1052 unsigned long ip, void *data);
1053 int (*print)(struct seq_file *m,
1054 unsigned long ip,
1055 struct ftrace_probe_ops *ops,
1056 void *data);
1057 };
1058
1059 struct ftrace_func_mapper;
1060 typedef int (*ftrace_mapper_func)(void *data);
1061
1062 struct ftrace_func_mapper *allocate_ftrace_func_mapper(void);
1063 void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper,
1064 unsigned long ip);
1065 int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper,
1066 unsigned long ip, void *data);
1067 void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper,
1068 unsigned long ip);
1069 void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper,
1070 ftrace_mapper_func free_func);
1071
1072 extern int
1073 register_ftrace_function_probe(char *glob, struct trace_array *tr,
1074 struct ftrace_probe_ops *ops, void *data);
1075 extern int
1076 unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
1077 struct ftrace_probe_ops *ops);
1078 extern void clear_ftrace_function_probes(struct trace_array *tr);
1079
1080 int register_ftrace_command(struct ftrace_func_command *cmd);
1081 int unregister_ftrace_command(struct ftrace_func_command *cmd);
1082
1083 void ftrace_create_filter_files(struct ftrace_ops *ops,
1084 struct dentry *parent);
1085 void ftrace_destroy_filter_files(struct ftrace_ops *ops);
1086
1087 extern int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
1088 int len, int reset);
1089 extern int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
1090 int len, int reset);
1091 #else
1092 struct ftrace_func_command;
1093
register_ftrace_command(struct ftrace_func_command * cmd)1094 static inline __init int register_ftrace_command(struct ftrace_func_command *cmd)
1095 {
1096 return -EINVAL;
1097 }
unregister_ftrace_command(char * cmd_name)1098 static inline __init int unregister_ftrace_command(char *cmd_name)
1099 {
1100 return -EINVAL;
1101 }
clear_ftrace_function_probes(struct trace_array * tr)1102 static inline void clear_ftrace_function_probes(struct trace_array *tr)
1103 {
1104 }
1105
1106 /*
1107 * The ops parameter passed in is usually undefined.
1108 * This must be a macro.
1109 */
1110 #define ftrace_create_filter_files(ops, parent) do { } while (0)
1111 #define ftrace_destroy_filter_files(ops) do { } while (0)
1112 #endif /* CONFIG_FUNCTION_TRACER && CONFIG_DYNAMIC_FTRACE */
1113
1114 bool ftrace_event_is_function(struct trace_event_call *call);
1115
1116 /*
1117 * struct trace_parser - servers for reading the user input separated by spaces
1118 * @cont: set if the input is not complete - no final space char was found
1119 * @buffer: holds the parsed user input
1120 * @idx: user input length
1121 * @size: buffer size
1122 */
1123 struct trace_parser {
1124 bool cont;
1125 char *buffer;
1126 unsigned idx;
1127 unsigned size;
1128 };
1129
trace_parser_loaded(struct trace_parser * parser)1130 static inline bool trace_parser_loaded(struct trace_parser *parser)
1131 {
1132 return (parser->idx != 0);
1133 }
1134
trace_parser_cont(struct trace_parser * parser)1135 static inline bool trace_parser_cont(struct trace_parser *parser)
1136 {
1137 return parser->cont;
1138 }
1139
trace_parser_clear(struct trace_parser * parser)1140 static inline void trace_parser_clear(struct trace_parser *parser)
1141 {
1142 parser->cont = false;
1143 parser->idx = 0;
1144 }
1145
1146 extern int trace_parser_get_init(struct trace_parser *parser, int size);
1147 extern void trace_parser_put(struct trace_parser *parser);
1148 extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1149 size_t cnt, loff_t *ppos);
1150
1151 /*
1152 * Only create function graph options if function graph is configured.
1153 */
1154 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1155 # define FGRAPH_FLAGS \
1156 C(DISPLAY_GRAPH, "display-graph"),
1157 #else
1158 # define FGRAPH_FLAGS
1159 #endif
1160
1161 #ifdef CONFIG_BRANCH_TRACER
1162 # define BRANCH_FLAGS \
1163 C(BRANCH, "branch"),
1164 #else
1165 # define BRANCH_FLAGS
1166 #endif
1167
1168 #ifdef CONFIG_FUNCTION_TRACER
1169 # define FUNCTION_FLAGS \
1170 C(FUNCTION, "function-trace"), \
1171 C(FUNC_FORK, "function-fork"),
1172 # define FUNCTION_DEFAULT_FLAGS TRACE_ITER_FUNCTION
1173 #else
1174 # define FUNCTION_FLAGS
1175 # define FUNCTION_DEFAULT_FLAGS 0UL
1176 # define TRACE_ITER_FUNC_FORK 0UL
1177 #endif
1178
1179 #ifdef CONFIG_STACKTRACE
1180 # define STACK_FLAGS \
1181 C(STACKTRACE, "stacktrace"),
1182 #else
1183 # define STACK_FLAGS
1184 #endif
1185
1186 /*
1187 * trace_iterator_flags is an enumeration that defines bit
1188 * positions into trace_flags that controls the output.
1189 *
1190 * NOTE: These bits must match the trace_options array in
1191 * trace.c (this macro guarantees it).
1192 */
1193 #define TRACE_FLAGS \
1194 C(PRINT_PARENT, "print-parent"), \
1195 C(SYM_OFFSET, "sym-offset"), \
1196 C(SYM_ADDR, "sym-addr"), \
1197 C(VERBOSE, "verbose"), \
1198 C(RAW, "raw"), \
1199 C(HEX, "hex"), \
1200 C(BIN, "bin"), \
1201 C(BLOCK, "block"), \
1202 C(PRINTK, "trace_printk"), \
1203 C(ANNOTATE, "annotate"), \
1204 C(USERSTACKTRACE, "userstacktrace"), \
1205 C(SYM_USEROBJ, "sym-userobj"), \
1206 C(PRINTK_MSGONLY, "printk-msg-only"), \
1207 C(CONTEXT_INFO, "context-info"), /* Print pid/cpu/time */ \
1208 C(LATENCY_FMT, "latency-format"), \
1209 C(RECORD_CMD, "record-cmd"), \
1210 C(RECORD_TGID, "record-tgid"), \
1211 C(OVERWRITE, "overwrite"), \
1212 C(STOP_ON_FREE, "disable_on_free"), \
1213 C(IRQ_INFO, "irq-info"), \
1214 C(MARKERS, "markers"), \
1215 C(EVENT_FORK, "event-fork"), \
1216 C(PAUSE_ON_TRACE, "pause-on-trace"), \
1217 C(HASH_PTR, "hash-ptr"), /* Print hashed pointer */ \
1218 FUNCTION_FLAGS \
1219 FGRAPH_FLAGS \
1220 STACK_FLAGS \
1221 BRANCH_FLAGS
1222
1223 /*
1224 * By defining C, we can make TRACE_FLAGS a list of bit names
1225 * that will define the bits for the flag masks.
1226 */
1227 #undef C
1228 #define C(a, b) TRACE_ITER_##a##_BIT
1229
1230 enum trace_iterator_bits {
1231 TRACE_FLAGS
1232 /* Make sure we don't go more than we have bits for */
1233 TRACE_ITER_LAST_BIT
1234 };
1235
1236 /*
1237 * By redefining C, we can make TRACE_FLAGS a list of masks that
1238 * use the bits as defined above.
1239 */
1240 #undef C
1241 #define C(a, b) TRACE_ITER_##a = (1 << TRACE_ITER_##a##_BIT)
1242
1243 enum trace_iterator_flags { TRACE_FLAGS };
1244
1245 /*
1246 * TRACE_ITER_SYM_MASK masks the options in trace_flags that
1247 * control the output of kernel symbols.
1248 */
1249 #define TRACE_ITER_SYM_MASK \
1250 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
1251
1252 extern struct tracer nop_trace;
1253
1254 #ifdef CONFIG_BRANCH_TRACER
1255 extern int enable_branch_tracing(struct trace_array *tr);
1256 extern void disable_branch_tracing(void);
trace_branch_enable(struct trace_array * tr)1257 static inline int trace_branch_enable(struct trace_array *tr)
1258 {
1259 if (tr->trace_flags & TRACE_ITER_BRANCH)
1260 return enable_branch_tracing(tr);
1261 return 0;
1262 }
trace_branch_disable(void)1263 static inline void trace_branch_disable(void)
1264 {
1265 /* due to races, always disable */
1266 disable_branch_tracing();
1267 }
1268 #else
trace_branch_enable(struct trace_array * tr)1269 static inline int trace_branch_enable(struct trace_array *tr)
1270 {
1271 return 0;
1272 }
trace_branch_disable(void)1273 static inline void trace_branch_disable(void)
1274 {
1275 }
1276 #endif /* CONFIG_BRANCH_TRACER */
1277
1278 /* set ring buffers to default size if not already done so */
1279 int tracing_update_buffers(void);
1280
1281 struct ftrace_event_field {
1282 struct list_head link;
1283 const char *name;
1284 const char *type;
1285 int filter_type;
1286 int offset;
1287 int size;
1288 int is_signed;
1289 int len;
1290 };
1291
1292 struct prog_entry;
1293
1294 struct event_filter {
1295 struct prog_entry __rcu *prog;
1296 char *filter_string;
1297 };
1298
1299 struct event_subsystem {
1300 struct list_head list;
1301 const char *name;
1302 struct event_filter *filter;
1303 int ref_count;
1304 };
1305
1306 struct trace_subsystem_dir {
1307 struct list_head list;
1308 struct event_subsystem *subsystem;
1309 struct trace_array *tr;
1310 struct dentry *entry;
1311 int ref_count;
1312 int nr_events;
1313 };
1314
1315 extern int call_filter_check_discard(struct trace_event_call *call, void *rec,
1316 struct trace_buffer *buffer,
1317 struct ring_buffer_event *event);
1318
1319 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
1320 struct trace_buffer *buffer,
1321 struct ring_buffer_event *event,
1322 unsigned int trcace_ctx,
1323 struct pt_regs *regs);
1324
trace_buffer_unlock_commit(struct trace_array * tr,struct trace_buffer * buffer,struct ring_buffer_event * event,unsigned int trace_ctx)1325 static inline void trace_buffer_unlock_commit(struct trace_array *tr,
1326 struct trace_buffer *buffer,
1327 struct ring_buffer_event *event,
1328 unsigned int trace_ctx)
1329 {
1330 trace_buffer_unlock_commit_regs(tr, buffer, event, trace_ctx, NULL);
1331 }
1332
1333 DECLARE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
1334 DECLARE_PER_CPU(int, trace_buffered_event_cnt);
1335 void trace_buffered_event_disable(void);
1336 void trace_buffered_event_enable(void);
1337
1338 void early_enable_events(struct trace_array *tr, char *buf, bool disable_first);
1339
1340 static inline void
__trace_event_discard_commit(struct trace_buffer * buffer,struct ring_buffer_event * event)1341 __trace_event_discard_commit(struct trace_buffer *buffer,
1342 struct ring_buffer_event *event)
1343 {
1344 if (this_cpu_read(trace_buffered_event) == event) {
1345 /* Simply release the temp buffer and enable preemption */
1346 this_cpu_dec(trace_buffered_event_cnt);
1347 preempt_enable_notrace();
1348 return;
1349 }
1350 /* ring_buffer_discard_commit() enables preemption */
1351 ring_buffer_discard_commit(buffer, event);
1352 }
1353
1354 /*
1355 * Helper function for event_trigger_unlock_commit{_regs}().
1356 * If there are event triggers attached to this event that requires
1357 * filtering against its fields, then they will be called as the
1358 * entry already holds the field information of the current event.
1359 *
1360 * It also checks if the event should be discarded or not.
1361 * It is to be discarded if the event is soft disabled and the
1362 * event was only recorded to process triggers, or if the event
1363 * filter is active and this event did not match the filters.
1364 *
1365 * Returns true if the event is discarded, false otherwise.
1366 */
1367 static inline bool
__event_trigger_test_discard(struct trace_event_file * file,struct trace_buffer * buffer,struct ring_buffer_event * event,void * entry,enum event_trigger_type * tt)1368 __event_trigger_test_discard(struct trace_event_file *file,
1369 struct trace_buffer *buffer,
1370 struct ring_buffer_event *event,
1371 void *entry,
1372 enum event_trigger_type *tt)
1373 {
1374 unsigned long eflags = file->flags;
1375
1376 if (eflags & EVENT_FILE_FL_TRIGGER_COND)
1377 *tt = event_triggers_call(file, buffer, entry, event);
1378
1379 if (likely(!(file->flags & (EVENT_FILE_FL_SOFT_DISABLED |
1380 EVENT_FILE_FL_FILTERED |
1381 EVENT_FILE_FL_PID_FILTER))))
1382 return false;
1383
1384 if (file->flags & EVENT_FILE_FL_SOFT_DISABLED)
1385 goto discard;
1386
1387 if (file->flags & EVENT_FILE_FL_FILTERED &&
1388 !filter_match_preds(file->filter, entry))
1389 goto discard;
1390
1391 if ((file->flags & EVENT_FILE_FL_PID_FILTER) &&
1392 trace_event_ignore_this_pid(file))
1393 goto discard;
1394
1395 return false;
1396 discard:
1397 __trace_event_discard_commit(buffer, event);
1398 return true;
1399 }
1400
1401 /**
1402 * event_trigger_unlock_commit - handle triggers and finish event commit
1403 * @file: The file pointer associated with the event
1404 * @buffer: The ring buffer that the event is being written to
1405 * @event: The event meta data in the ring buffer
1406 * @entry: The event itself
1407 * @trace_ctx: The tracing context flags.
1408 *
1409 * This is a helper function to handle triggers that require data
1410 * from the event itself. It also tests the event against filters and
1411 * if the event is soft disabled and should be discarded.
1412 */
1413 static inline void
event_trigger_unlock_commit(struct trace_event_file * file,struct trace_buffer * buffer,struct ring_buffer_event * event,void * entry,unsigned int trace_ctx)1414 event_trigger_unlock_commit(struct trace_event_file *file,
1415 struct trace_buffer *buffer,
1416 struct ring_buffer_event *event,
1417 void *entry, unsigned int trace_ctx)
1418 {
1419 enum event_trigger_type tt = ETT_NONE;
1420
1421 if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
1422 trace_buffer_unlock_commit(file->tr, buffer, event, trace_ctx);
1423
1424 if (tt)
1425 event_triggers_post_call(file, tt);
1426 }
1427
1428 #define FILTER_PRED_INVALID ((unsigned short)-1)
1429 #define FILTER_PRED_IS_RIGHT (1 << 15)
1430 #define FILTER_PRED_FOLD (1 << 15)
1431
1432 /*
1433 * The max preds is the size of unsigned short with
1434 * two flags at the MSBs. One bit is used for both the IS_RIGHT
1435 * and FOLD flags. The other is reserved.
1436 *
1437 * 2^14 preds is way more than enough.
1438 */
1439 #define MAX_FILTER_PRED 16384
1440
1441 struct filter_pred;
1442 struct regex;
1443
1444 typedef int (*regex_match_func)(char *str, struct regex *r, int len);
1445
1446 enum regex_type {
1447 MATCH_FULL = 0,
1448 MATCH_FRONT_ONLY,
1449 MATCH_MIDDLE_ONLY,
1450 MATCH_END_ONLY,
1451 MATCH_GLOB,
1452 MATCH_INDEX,
1453 };
1454
1455 struct regex {
1456 char pattern[MAX_FILTER_STR_VAL];
1457 int len;
1458 int field_len;
1459 regex_match_func match;
1460 };
1461
is_string_field(struct ftrace_event_field * field)1462 static inline bool is_string_field(struct ftrace_event_field *field)
1463 {
1464 return field->filter_type == FILTER_DYN_STRING ||
1465 field->filter_type == FILTER_RDYN_STRING ||
1466 field->filter_type == FILTER_STATIC_STRING ||
1467 field->filter_type == FILTER_PTR_STRING ||
1468 field->filter_type == FILTER_COMM;
1469 }
1470
is_function_field(struct ftrace_event_field * field)1471 static inline bool is_function_field(struct ftrace_event_field *field)
1472 {
1473 return field->filter_type == FILTER_TRACE_FN;
1474 }
1475
1476 extern enum regex_type
1477 filter_parse_regex(char *buff, int len, char **search, int *not);
1478 extern void print_event_filter(struct trace_event_file *file,
1479 struct trace_seq *s);
1480 extern int apply_event_filter(struct trace_event_file *file,
1481 char *filter_string);
1482 extern int apply_subsystem_event_filter(struct trace_subsystem_dir *dir,
1483 char *filter_string);
1484 extern void print_subsystem_event_filter(struct event_subsystem *system,
1485 struct trace_seq *s);
1486 extern int filter_assign_type(const char *type);
1487 extern int create_event_filter(struct trace_array *tr,
1488 struct trace_event_call *call,
1489 char *filter_str, bool set_str,
1490 struct event_filter **filterp);
1491 extern void free_event_filter(struct event_filter *filter);
1492
1493 struct ftrace_event_field *
1494 trace_find_event_field(struct trace_event_call *call, char *name);
1495
1496 extern void trace_event_enable_cmd_record(bool enable);
1497 extern void trace_event_enable_tgid_record(bool enable);
1498
1499 extern int event_trace_init(void);
1500 extern int init_events(void);
1501 extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr);
1502 extern int event_trace_del_tracer(struct trace_array *tr);
1503 extern void __trace_early_add_events(struct trace_array *tr);
1504
1505 extern struct trace_event_file *__find_event_file(struct trace_array *tr,
1506 const char *system,
1507 const char *event);
1508 extern struct trace_event_file *find_event_file(struct trace_array *tr,
1509 const char *system,
1510 const char *event);
1511
event_file_data(struct file * filp)1512 static inline void *event_file_data(struct file *filp)
1513 {
1514 return READ_ONCE(file_inode(filp)->i_private);
1515 }
1516
1517 extern struct mutex event_mutex;
1518 extern struct list_head ftrace_events;
1519
1520 extern const struct file_operations event_trigger_fops;
1521 extern const struct file_operations event_hist_fops;
1522 extern const struct file_operations event_hist_debug_fops;
1523 extern const struct file_operations event_inject_fops;
1524
1525 #ifdef CONFIG_HIST_TRIGGERS
1526 extern int register_trigger_hist_cmd(void);
1527 extern int register_trigger_hist_enable_disable_cmds(void);
1528 #else
register_trigger_hist_cmd(void)1529 static inline int register_trigger_hist_cmd(void) { return 0; }
register_trigger_hist_enable_disable_cmds(void)1530 static inline int register_trigger_hist_enable_disable_cmds(void) { return 0; }
1531 #endif
1532
1533 extern int register_trigger_cmds(void);
1534 extern void clear_event_triggers(struct trace_array *tr);
1535
1536 enum {
1537 EVENT_TRIGGER_FL_PROBE = BIT(0),
1538 };
1539
1540 struct event_trigger_data {
1541 unsigned long count;
1542 int ref;
1543 int flags;
1544 struct event_trigger_ops *ops;
1545 struct event_command *cmd_ops;
1546 struct event_filter __rcu *filter;
1547 char *filter_str;
1548 void *private_data;
1549 bool paused;
1550 bool paused_tmp;
1551 struct list_head list;
1552 char *name;
1553 struct list_head named_list;
1554 struct event_trigger_data *named_data;
1555 };
1556
1557 /* Avoid typos */
1558 #define ENABLE_EVENT_STR "enable_event"
1559 #define DISABLE_EVENT_STR "disable_event"
1560 #define ENABLE_HIST_STR "enable_hist"
1561 #define DISABLE_HIST_STR "disable_hist"
1562
1563 struct enable_trigger_data {
1564 struct trace_event_file *file;
1565 bool enable;
1566 bool hist;
1567 };
1568
1569 extern int event_enable_trigger_print(struct seq_file *m,
1570 struct event_trigger_data *data);
1571 extern void event_enable_trigger_free(struct event_trigger_data *data);
1572 extern int event_enable_trigger_parse(struct event_command *cmd_ops,
1573 struct trace_event_file *file,
1574 char *glob, char *cmd,
1575 char *param_and_filter);
1576 extern int event_enable_register_trigger(char *glob,
1577 struct event_trigger_data *data,
1578 struct trace_event_file *file);
1579 extern void event_enable_unregister_trigger(char *glob,
1580 struct event_trigger_data *test,
1581 struct trace_event_file *file);
1582 extern void trigger_data_free(struct event_trigger_data *data);
1583 extern int event_trigger_init(struct event_trigger_data *data);
1584 extern int trace_event_trigger_enable_disable(struct trace_event_file *file,
1585 int trigger_enable);
1586 extern void update_cond_flag(struct trace_event_file *file);
1587 extern int set_trigger_filter(char *filter_str,
1588 struct event_trigger_data *trigger_data,
1589 struct trace_event_file *file);
1590 extern struct event_trigger_data *find_named_trigger(const char *name);
1591 extern bool is_named_trigger(struct event_trigger_data *test);
1592 extern int save_named_trigger(const char *name,
1593 struct event_trigger_data *data);
1594 extern void del_named_trigger(struct event_trigger_data *data);
1595 extern void pause_named_trigger(struct event_trigger_data *data);
1596 extern void unpause_named_trigger(struct event_trigger_data *data);
1597 extern void set_named_trigger_data(struct event_trigger_data *data,
1598 struct event_trigger_data *named_data);
1599 extern struct event_trigger_data *
1600 get_named_trigger_data(struct event_trigger_data *data);
1601 extern int register_event_command(struct event_command *cmd);
1602 extern int unregister_event_command(struct event_command *cmd);
1603 extern int register_trigger_hist_enable_disable_cmds(void);
1604 extern bool event_trigger_check_remove(const char *glob);
1605 extern bool event_trigger_empty_param(const char *param);
1606 extern int event_trigger_separate_filter(char *param_and_filter, char **param,
1607 char **filter, bool param_required);
1608 extern struct event_trigger_data *
1609 event_trigger_alloc(struct event_command *cmd_ops,
1610 char *cmd,
1611 char *param,
1612 void *private_data);
1613 extern int event_trigger_parse_num(char *trigger,
1614 struct event_trigger_data *trigger_data);
1615 extern int event_trigger_set_filter(struct event_command *cmd_ops,
1616 struct trace_event_file *file,
1617 char *param,
1618 struct event_trigger_data *trigger_data);
1619 extern void event_trigger_reset_filter(struct event_command *cmd_ops,
1620 struct event_trigger_data *trigger_data);
1621 extern int event_trigger_register(struct event_command *cmd_ops,
1622 struct trace_event_file *file,
1623 char *glob,
1624 struct event_trigger_data *trigger_data);
1625 extern void event_trigger_unregister(struct event_command *cmd_ops,
1626 struct trace_event_file *file,
1627 char *glob,
1628 struct event_trigger_data *trigger_data);
1629
1630 /**
1631 * struct event_trigger_ops - callbacks for trace event triggers
1632 *
1633 * The methods in this structure provide per-event trigger hooks for
1634 * various trigger operations.
1635 *
1636 * The @init and @free methods are used during trigger setup and
1637 * teardown, typically called from an event_command's @parse()
1638 * function implementation.
1639 *
1640 * The @print method is used to print the trigger spec.
1641 *
1642 * The @trigger method is the function that actually implements the
1643 * trigger and is called in the context of the triggering event
1644 * whenever that event occurs.
1645 *
1646 * All the methods below, except for @init() and @free(), must be
1647 * implemented.
1648 *
1649 * @trigger: The trigger 'probe' function called when the triggering
1650 * event occurs. The data passed into this callback is the data
1651 * that was supplied to the event_command @reg() function that
1652 * registered the trigger (see struct event_command) along with
1653 * the trace record, rec.
1654 *
1655 * @init: An optional initialization function called for the trigger
1656 * when the trigger is registered (via the event_command reg()
1657 * function). This can be used to perform per-trigger
1658 * initialization such as incrementing a per-trigger reference
1659 * count, for instance. This is usually implemented by the
1660 * generic utility function @event_trigger_init() (see
1661 * trace_event_triggers.c).
1662 *
1663 * @free: An optional de-initialization function called for the
1664 * trigger when the trigger is unregistered (via the
1665 * event_command @reg() function). This can be used to perform
1666 * per-trigger de-initialization such as decrementing a
1667 * per-trigger reference count and freeing corresponding trigger
1668 * data, for instance. This is usually implemented by the
1669 * generic utility function @event_trigger_free() (see
1670 * trace_event_triggers.c).
1671 *
1672 * @print: The callback function invoked to have the trigger print
1673 * itself. This is usually implemented by a wrapper function
1674 * that calls the generic utility function @event_trigger_print()
1675 * (see trace_event_triggers.c).
1676 */
1677 struct event_trigger_ops {
1678 void (*trigger)(struct event_trigger_data *data,
1679 struct trace_buffer *buffer,
1680 void *rec,
1681 struct ring_buffer_event *rbe);
1682 int (*init)(struct event_trigger_data *data);
1683 void (*free)(struct event_trigger_data *data);
1684 int (*print)(struct seq_file *m,
1685 struct event_trigger_data *data);
1686 };
1687
1688 /**
1689 * struct event_command - callbacks and data members for event commands
1690 *
1691 * Event commands are invoked by users by writing the command name
1692 * into the 'trigger' file associated with a trace event. The
1693 * parameters associated with a specific invocation of an event
1694 * command are used to create an event trigger instance, which is
1695 * added to the list of trigger instances associated with that trace
1696 * event. When the event is hit, the set of triggers associated with
1697 * that event is invoked.
1698 *
1699 * The data members in this structure provide per-event command data
1700 * for various event commands.
1701 *
1702 * All the data members below, except for @post_trigger, must be set
1703 * for each event command.
1704 *
1705 * @name: The unique name that identifies the event command. This is
1706 * the name used when setting triggers via trigger files.
1707 *
1708 * @trigger_type: A unique id that identifies the event command
1709 * 'type'. This value has two purposes, the first to ensure that
1710 * only one trigger of the same type can be set at a given time
1711 * for a particular event e.g. it doesn't make sense to have both
1712 * a traceon and traceoff trigger attached to a single event at
1713 * the same time, so traceon and traceoff have the same type
1714 * though they have different names. The @trigger_type value is
1715 * also used as a bit value for deferring the actual trigger
1716 * action until after the current event is finished. Some
1717 * commands need to do this if they themselves log to the trace
1718 * buffer (see the @post_trigger() member below). @trigger_type
1719 * values are defined by adding new values to the trigger_type
1720 * enum in include/linux/trace_events.h.
1721 *
1722 * @flags: See the enum event_command_flags below.
1723 *
1724 * All the methods below, except for @set_filter() and @unreg_all(),
1725 * must be implemented.
1726 *
1727 * @parse: The callback function responsible for parsing and
1728 * registering the trigger written to the 'trigger' file by the
1729 * user. It allocates the trigger instance and registers it with
1730 * the appropriate trace event. It makes use of the other
1731 * event_command callback functions to orchestrate this, and is
1732 * usually implemented by the generic utility function
1733 * @event_trigger_callback() (see trace_event_triggers.c).
1734 *
1735 * @reg: Adds the trigger to the list of triggers associated with the
1736 * event, and enables the event trigger itself, after
1737 * initializing it (via the event_trigger_ops @init() function).
1738 * This is also where commands can use the @trigger_type value to
1739 * make the decision as to whether or not multiple instances of
1740 * the trigger should be allowed. This is usually implemented by
1741 * the generic utility function @register_trigger() (see
1742 * trace_event_triggers.c).
1743 *
1744 * @unreg: Removes the trigger from the list of triggers associated
1745 * with the event, and disables the event trigger itself, after
1746 * initializing it (via the event_trigger_ops @free() function).
1747 * This is usually implemented by the generic utility function
1748 * @unregister_trigger() (see trace_event_triggers.c).
1749 *
1750 * @unreg_all: An optional function called to remove all the triggers
1751 * from the list of triggers associated with the event. Called
1752 * when a trigger file is opened in truncate mode.
1753 *
1754 * @set_filter: An optional function called to parse and set a filter
1755 * for the trigger. If no @set_filter() method is set for the
1756 * event command, filters set by the user for the command will be
1757 * ignored. This is usually implemented by the generic utility
1758 * function @set_trigger_filter() (see trace_event_triggers.c).
1759 *
1760 * @get_trigger_ops: The callback function invoked to retrieve the
1761 * event_trigger_ops implementation associated with the command.
1762 * This callback function allows a single event_command to
1763 * support multiple trigger implementations via different sets of
1764 * event_trigger_ops, depending on the value of the @param
1765 * string.
1766 */
1767 struct event_command {
1768 struct list_head list;
1769 char *name;
1770 enum event_trigger_type trigger_type;
1771 int flags;
1772 int (*parse)(struct event_command *cmd_ops,
1773 struct trace_event_file *file,
1774 char *glob, char *cmd,
1775 char *param_and_filter);
1776 int (*reg)(char *glob,
1777 struct event_trigger_data *data,
1778 struct trace_event_file *file);
1779 void (*unreg)(char *glob,
1780 struct event_trigger_data *data,
1781 struct trace_event_file *file);
1782 void (*unreg_all)(struct trace_event_file *file);
1783 int (*set_filter)(char *filter_str,
1784 struct event_trigger_data *data,
1785 struct trace_event_file *file);
1786 struct event_trigger_ops *(*get_trigger_ops)(char *cmd, char *param);
1787 };
1788
1789 /**
1790 * enum event_command_flags - flags for struct event_command
1791 *
1792 * @POST_TRIGGER: A flag that says whether or not this command needs
1793 * to have its action delayed until after the current event has
1794 * been closed. Some triggers need to avoid being invoked while
1795 * an event is currently in the process of being logged, since
1796 * the trigger may itself log data into the trace buffer. Thus
1797 * we make sure the current event is committed before invoking
1798 * those triggers. To do that, the trigger invocation is split
1799 * in two - the first part checks the filter using the current
1800 * trace record; if a command has the @post_trigger flag set, it
1801 * sets a bit for itself in the return value, otherwise it
1802 * directly invokes the trigger. Once all commands have been
1803 * either invoked or set their return flag, the current record is
1804 * either committed or discarded. At that point, if any commands
1805 * have deferred their triggers, those commands are finally
1806 * invoked following the close of the current event. In other
1807 * words, if the event_trigger_ops @func() probe implementation
1808 * itself logs to the trace buffer, this flag should be set,
1809 * otherwise it can be left unspecified.
1810 *
1811 * @NEEDS_REC: A flag that says whether or not this command needs
1812 * access to the trace record in order to perform its function,
1813 * regardless of whether or not it has a filter associated with
1814 * it (filters make a trigger require access to the trace record
1815 * but are not always present).
1816 */
1817 enum event_command_flags {
1818 EVENT_CMD_FL_POST_TRIGGER = 1,
1819 EVENT_CMD_FL_NEEDS_REC = 2,
1820 };
1821
event_command_post_trigger(struct event_command * cmd_ops)1822 static inline bool event_command_post_trigger(struct event_command *cmd_ops)
1823 {
1824 return cmd_ops->flags & EVENT_CMD_FL_POST_TRIGGER;
1825 }
1826
event_command_needs_rec(struct event_command * cmd_ops)1827 static inline bool event_command_needs_rec(struct event_command *cmd_ops)
1828 {
1829 return cmd_ops->flags & EVENT_CMD_FL_NEEDS_REC;
1830 }
1831
1832 extern int trace_event_enable_disable(struct trace_event_file *file,
1833 int enable, int soft_disable);
1834 extern int tracing_alloc_snapshot(void);
1835 extern void tracing_snapshot_cond(struct trace_array *tr, void *cond_data);
1836 extern int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update);
1837
1838 extern int tracing_snapshot_cond_disable(struct trace_array *tr);
1839 extern void *tracing_cond_snapshot_data(struct trace_array *tr);
1840
1841 extern const char *__start___trace_bprintk_fmt[];
1842 extern const char *__stop___trace_bprintk_fmt[];
1843
1844 extern const char *__start___tracepoint_str[];
1845 extern const char *__stop___tracepoint_str[];
1846
1847 void trace_printk_control(bool enabled);
1848 void trace_printk_start_comm(void);
1849 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
1850 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
1851
1852 /* Used from boot time tracer */
1853 extern int trace_set_options(struct trace_array *tr, char *option);
1854 extern int tracing_set_tracer(struct trace_array *tr, const char *buf);
1855 extern ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
1856 unsigned long size, int cpu_id);
1857 extern int tracing_set_cpumask(struct trace_array *tr,
1858 cpumask_var_t tracing_cpumask_new);
1859
1860
1861 #define MAX_EVENT_NAME_LEN 64
1862
1863 extern ssize_t trace_parse_run_command(struct file *file,
1864 const char __user *buffer, size_t count, loff_t *ppos,
1865 int (*createfn)(const char *));
1866
1867 extern unsigned int err_pos(char *cmd, const char *str);
1868 extern void tracing_log_err(struct trace_array *tr,
1869 const char *loc, const char *cmd,
1870 const char **errs, u8 type, u16 pos);
1871
1872 /*
1873 * Normal trace_printk() and friends allocates special buffers
1874 * to do the manipulation, as well as saves the print formats
1875 * into sections to display. But the trace infrastructure wants
1876 * to use these without the added overhead at the price of being
1877 * a bit slower (used mainly for warnings, where we don't care
1878 * about performance). The internal_trace_puts() is for such
1879 * a purpose.
1880 */
1881 #define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str))
1882
1883 #undef FTRACE_ENTRY
1884 #define FTRACE_ENTRY(call, struct_name, id, tstruct, print) \
1885 extern struct trace_event_call \
1886 __aligned(4) event_##call;
1887 #undef FTRACE_ENTRY_DUP
1888 #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print) \
1889 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print))
1890 #undef FTRACE_ENTRY_PACKED
1891 #define FTRACE_ENTRY_PACKED(call, struct_name, id, tstruct, print) \
1892 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print))
1893
1894 #include "trace_entries.h"
1895
1896 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER)
1897 int perf_ftrace_event_register(struct trace_event_call *call,
1898 enum trace_reg type, void *data);
1899 #else
1900 #define perf_ftrace_event_register NULL
1901 #endif
1902
1903 #ifdef CONFIG_FTRACE_SYSCALLS
1904 void init_ftrace_syscalls(void);
1905 const char *get_syscall_name(int syscall);
1906 #else
init_ftrace_syscalls(void)1907 static inline void init_ftrace_syscalls(void) { }
get_syscall_name(int syscall)1908 static inline const char *get_syscall_name(int syscall)
1909 {
1910 return NULL;
1911 }
1912 #endif
1913
1914 #ifdef CONFIG_EVENT_TRACING
1915 void trace_event_init(void);
1916 void trace_event_eval_update(struct trace_eval_map **map, int len);
1917 /* Used from boot time tracer */
1918 extern int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set);
1919 extern int trigger_process_regex(struct trace_event_file *file, char *buff);
1920 #else
trace_event_init(void)1921 static inline void __init trace_event_init(void) { }
trace_event_eval_update(struct trace_eval_map ** map,int len)1922 static inline void trace_event_eval_update(struct trace_eval_map **map, int len) { }
1923 #endif
1924
1925 #ifdef CONFIG_TRACER_SNAPSHOT
1926 void tracing_snapshot_instance(struct trace_array *tr);
1927 int tracing_alloc_snapshot_instance(struct trace_array *tr);
1928 #else
tracing_snapshot_instance(struct trace_array * tr)1929 static inline void tracing_snapshot_instance(struct trace_array *tr) { }
tracing_alloc_snapshot_instance(struct trace_array * tr)1930 static inline int tracing_alloc_snapshot_instance(struct trace_array *tr)
1931 {
1932 return 0;
1933 }
1934 #endif
1935
1936 #ifdef CONFIG_PREEMPT_TRACER
1937 void tracer_preempt_on(unsigned long a0, unsigned long a1);
1938 void tracer_preempt_off(unsigned long a0, unsigned long a1);
1939 #else
tracer_preempt_on(unsigned long a0,unsigned long a1)1940 static inline void tracer_preempt_on(unsigned long a0, unsigned long a1) { }
tracer_preempt_off(unsigned long a0,unsigned long a1)1941 static inline void tracer_preempt_off(unsigned long a0, unsigned long a1) { }
1942 #endif
1943 #ifdef CONFIG_IRQSOFF_TRACER
1944 void tracer_hardirqs_on(unsigned long a0, unsigned long a1);
1945 void tracer_hardirqs_off(unsigned long a0, unsigned long a1);
1946 #else
tracer_hardirqs_on(unsigned long a0,unsigned long a1)1947 static inline void tracer_hardirqs_on(unsigned long a0, unsigned long a1) { }
tracer_hardirqs_off(unsigned long a0,unsigned long a1)1948 static inline void tracer_hardirqs_off(unsigned long a0, unsigned long a1) { }
1949 #endif
1950
1951 /*
1952 * Reset the state of the trace_iterator so that it can read consumed data.
1953 * Normally, the trace_iterator is used for reading the data when it is not
1954 * consumed, and must retain state.
1955 */
trace_iterator_reset(struct trace_iterator * iter)1956 static __always_inline void trace_iterator_reset(struct trace_iterator *iter)
1957 {
1958 memset_startat(iter, 0, seq);
1959 iter->pos = -1;
1960 }
1961
1962 /* Check the name is good for event/group/fields */
__is_good_name(const char * name,bool hash_ok)1963 static inline bool __is_good_name(const char *name, bool hash_ok)
1964 {
1965 if (!isalpha(*name) && *name != '_' && (!hash_ok || *name != '-'))
1966 return false;
1967 while (*++name != '\0') {
1968 if (!isalpha(*name) && !isdigit(*name) && *name != '_' &&
1969 (!hash_ok || *name != '-'))
1970 return false;
1971 }
1972 return true;
1973 }
1974
1975 /* Check the name is good for event/group/fields */
is_good_name(const char * name)1976 static inline bool is_good_name(const char *name)
1977 {
1978 return __is_good_name(name, false);
1979 }
1980
1981 /* Check the name is good for system */
is_good_system_name(const char * name)1982 static inline bool is_good_system_name(const char *name)
1983 {
1984 return __is_good_name(name, true);
1985 }
1986
1987 /* Convert certain expected symbols into '_' when generating event names */
sanitize_event_name(char * name)1988 static inline void sanitize_event_name(char *name)
1989 {
1990 while (*name++ != '\0')
1991 if (*name == ':' || *name == '.')
1992 *name = '_';
1993 }
1994
1995 /*
1996 * This is a generic way to read and write a u64 value from a file in tracefs.
1997 *
1998 * The value is stored on the variable pointed by *val. The value needs
1999 * to be at least *min and at most *max. The write is protected by an
2000 * existing *lock.
2001 */
2002 struct trace_min_max_param {
2003 struct mutex *lock;
2004 u64 *val;
2005 u64 *min;
2006 u64 *max;
2007 };
2008
2009 #define U64_STR_SIZE 24 /* 20 digits max */
2010
2011 extern const struct file_operations trace_min_max_fops;
2012
2013 #ifdef CONFIG_RV
2014 extern int rv_init_interface(void);
2015 #else
rv_init_interface(void)2016 static inline int rv_init_interface(void)
2017 {
2018 return 0;
2019 }
2020 #endif
2021
2022 #endif /* _LINUX_KERNEL_TRACE_H */
2023