1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * trace_events_synth - synthetic trace events
4 *
5 * Copyright (C) 2015, 2020 Tom Zanussi <tom.zanussi@linux.intel.com>
6 */
7
8 #include <linux/module.h>
9 #include <linux/kallsyms.h>
10 #include <linux/security.h>
11 #include <linux/mutex.h>
12 #include <linux/slab.h>
13 #include <linux/stacktrace.h>
14 #include <linux/rculist.h>
15 #include <linux/tracefs.h>
16
17 /* for gfp flag names */
18 #include <linux/trace_events.h>
19 #include <trace/events/mmflags.h>
20 #include "trace_probe.h"
21 #include "trace_probe_kernel.h"
22
23 #include "trace_synth.h"
24
25 #undef ERRORS
26 #define ERRORS \
27 C(BAD_NAME, "Illegal name"), \
28 C(INVALID_CMD, "Command must be of the form: <name> field[;field] ..."),\
29 C(INVALID_DYN_CMD, "Command must be of the form: s or -:[synthetic/]<name> field[;field] ..."),\
30 C(EVENT_EXISTS, "Event already exists"), \
31 C(TOO_MANY_FIELDS, "Too many fields"), \
32 C(INCOMPLETE_TYPE, "Incomplete type"), \
33 C(INVALID_TYPE, "Invalid type"), \
34 C(INVALID_FIELD, "Invalid field"), \
35 C(INVALID_ARRAY_SPEC, "Invalid array specification"),
36
37 #undef C
38 #define C(a, b) SYNTH_ERR_##a
39
40 enum { ERRORS };
41
42 #undef C
43 #define C(a, b) b
44
45 static const char *err_text[] = { ERRORS };
46
47 static char *last_cmd;
48
errpos(const char * str)49 static int errpos(const char *str)
50 {
51 if (!str || !last_cmd)
52 return 0;
53
54 return err_pos(last_cmd, str);
55 }
56
last_cmd_set(const char * str)57 static void last_cmd_set(const char *str)
58 {
59 if (!str)
60 return;
61
62 kfree(last_cmd);
63
64 last_cmd = kstrdup(str, GFP_KERNEL);
65 }
66
synth_err(u8 err_type,u16 err_pos)67 static void synth_err(u8 err_type, u16 err_pos)
68 {
69 if (!last_cmd)
70 return;
71
72 tracing_log_err(NULL, "synthetic_events", last_cmd, err_text,
73 err_type, err_pos);
74 }
75
76 static int create_synth_event(const char *raw_command);
77 static int synth_event_show(struct seq_file *m, struct dyn_event *ev);
78 static int synth_event_release(struct dyn_event *ev);
79 static bool synth_event_is_busy(struct dyn_event *ev);
80 static bool synth_event_match(const char *system, const char *event,
81 int argc, const char **argv, struct dyn_event *ev);
82
83 static struct dyn_event_operations synth_event_ops = {
84 .create = create_synth_event,
85 .show = synth_event_show,
86 .is_busy = synth_event_is_busy,
87 .free = synth_event_release,
88 .match = synth_event_match,
89 };
90
is_synth_event(struct dyn_event * ev)91 static bool is_synth_event(struct dyn_event *ev)
92 {
93 return ev->ops == &synth_event_ops;
94 }
95
to_synth_event(struct dyn_event * ev)96 static struct synth_event *to_synth_event(struct dyn_event *ev)
97 {
98 return container_of(ev, struct synth_event, devent);
99 }
100
synth_event_is_busy(struct dyn_event * ev)101 static bool synth_event_is_busy(struct dyn_event *ev)
102 {
103 struct synth_event *event = to_synth_event(ev);
104
105 return event->ref != 0;
106 }
107
synth_event_match(const char * system,const char * event,int argc,const char ** argv,struct dyn_event * ev)108 static bool synth_event_match(const char *system, const char *event,
109 int argc, const char **argv, struct dyn_event *ev)
110 {
111 struct synth_event *sev = to_synth_event(ev);
112
113 return strcmp(sev->name, event) == 0 &&
114 (!system || strcmp(system, SYNTH_SYSTEM) == 0);
115 }
116
117 struct synth_trace_event {
118 struct trace_entry ent;
119 u64 fields[];
120 };
121
synth_event_define_fields(struct trace_event_call * call)122 static int synth_event_define_fields(struct trace_event_call *call)
123 {
124 struct synth_trace_event trace;
125 int offset = offsetof(typeof(trace), fields);
126 struct synth_event *event = call->data;
127 unsigned int i, size, n_u64;
128 char *name, *type;
129 bool is_signed;
130 int ret = 0;
131
132 for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
133 size = event->fields[i]->size;
134 is_signed = event->fields[i]->is_signed;
135 type = event->fields[i]->type;
136 name = event->fields[i]->name;
137 ret = trace_define_field(call, type, name, offset, size,
138 is_signed, FILTER_OTHER);
139 if (ret)
140 break;
141
142 event->fields[i]->offset = n_u64;
143
144 if (event->fields[i]->is_string && !event->fields[i]->is_dynamic) {
145 offset += STR_VAR_LEN_MAX;
146 n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
147 } else {
148 offset += sizeof(u64);
149 n_u64++;
150 }
151 }
152
153 event->n_u64 = n_u64;
154
155 return ret;
156 }
157
synth_field_signed(char * type)158 static bool synth_field_signed(char *type)
159 {
160 if (str_has_prefix(type, "u"))
161 return false;
162 if (strcmp(type, "gfp_t") == 0)
163 return false;
164
165 return true;
166 }
167
synth_field_is_string(char * type)168 static int synth_field_is_string(char *type)
169 {
170 if (strstr(type, "char[") != NULL)
171 return true;
172
173 return false;
174 }
175
synth_field_is_stack(char * type)176 static int synth_field_is_stack(char *type)
177 {
178 if (strstr(type, "long[") != NULL)
179 return true;
180
181 return false;
182 }
183
synth_field_string_size(char * type)184 static int synth_field_string_size(char *type)
185 {
186 char buf[4], *end, *start;
187 unsigned int len;
188 int size, err;
189
190 start = strstr(type, "char[");
191 if (start == NULL)
192 return -EINVAL;
193 start += sizeof("char[") - 1;
194
195 end = strchr(type, ']');
196 if (!end || end < start || type + strlen(type) > end + 1)
197 return -EINVAL;
198
199 len = end - start;
200 if (len > 3)
201 return -EINVAL;
202
203 if (len == 0)
204 return 0; /* variable-length string */
205
206 strncpy(buf, start, len);
207 buf[len] = '\0';
208
209 err = kstrtouint(buf, 0, &size);
210 if (err)
211 return err;
212
213 if (size > STR_VAR_LEN_MAX)
214 return -EINVAL;
215
216 return size;
217 }
218
synth_field_size(char * type)219 static int synth_field_size(char *type)
220 {
221 int size = 0;
222
223 if (strcmp(type, "s64") == 0)
224 size = sizeof(s64);
225 else if (strcmp(type, "u64") == 0)
226 size = sizeof(u64);
227 else if (strcmp(type, "s32") == 0)
228 size = sizeof(s32);
229 else if (strcmp(type, "u32") == 0)
230 size = sizeof(u32);
231 else if (strcmp(type, "s16") == 0)
232 size = sizeof(s16);
233 else if (strcmp(type, "u16") == 0)
234 size = sizeof(u16);
235 else if (strcmp(type, "s8") == 0)
236 size = sizeof(s8);
237 else if (strcmp(type, "u8") == 0)
238 size = sizeof(u8);
239 else if (strcmp(type, "char") == 0)
240 size = sizeof(char);
241 else if (strcmp(type, "unsigned char") == 0)
242 size = sizeof(unsigned char);
243 else if (strcmp(type, "int") == 0)
244 size = sizeof(int);
245 else if (strcmp(type, "unsigned int") == 0)
246 size = sizeof(unsigned int);
247 else if (strcmp(type, "long") == 0)
248 size = sizeof(long);
249 else if (strcmp(type, "unsigned long") == 0)
250 size = sizeof(unsigned long);
251 else if (strcmp(type, "bool") == 0)
252 size = sizeof(bool);
253 else if (strcmp(type, "pid_t") == 0)
254 size = sizeof(pid_t);
255 else if (strcmp(type, "gfp_t") == 0)
256 size = sizeof(gfp_t);
257 else if (synth_field_is_string(type))
258 size = synth_field_string_size(type);
259 else if (synth_field_is_stack(type))
260 size = 0;
261
262 return size;
263 }
264
synth_field_fmt(char * type)265 static const char *synth_field_fmt(char *type)
266 {
267 const char *fmt = "%llu";
268
269 if (strcmp(type, "s64") == 0)
270 fmt = "%lld";
271 else if (strcmp(type, "u64") == 0)
272 fmt = "%llu";
273 else if (strcmp(type, "s32") == 0)
274 fmt = "%d";
275 else if (strcmp(type, "u32") == 0)
276 fmt = "%u";
277 else if (strcmp(type, "s16") == 0)
278 fmt = "%d";
279 else if (strcmp(type, "u16") == 0)
280 fmt = "%u";
281 else if (strcmp(type, "s8") == 0)
282 fmt = "%d";
283 else if (strcmp(type, "u8") == 0)
284 fmt = "%u";
285 else if (strcmp(type, "char") == 0)
286 fmt = "%d";
287 else if (strcmp(type, "unsigned char") == 0)
288 fmt = "%u";
289 else if (strcmp(type, "int") == 0)
290 fmt = "%d";
291 else if (strcmp(type, "unsigned int") == 0)
292 fmt = "%u";
293 else if (strcmp(type, "long") == 0)
294 fmt = "%ld";
295 else if (strcmp(type, "unsigned long") == 0)
296 fmt = "%lu";
297 else if (strcmp(type, "bool") == 0)
298 fmt = "%d";
299 else if (strcmp(type, "pid_t") == 0)
300 fmt = "%d";
301 else if (strcmp(type, "gfp_t") == 0)
302 fmt = "%x";
303 else if (synth_field_is_string(type))
304 fmt = "%.*s";
305 else if (synth_field_is_stack(type))
306 fmt = "%s";
307
308 return fmt;
309 }
310
print_synth_event_num_val(struct trace_seq * s,char * print_fmt,char * name,int size,u64 val,char * space)311 static void print_synth_event_num_val(struct trace_seq *s,
312 char *print_fmt, char *name,
313 int size, u64 val, char *space)
314 {
315 switch (size) {
316 case 1:
317 trace_seq_printf(s, print_fmt, name, (u8)val, space);
318 break;
319
320 case 2:
321 trace_seq_printf(s, print_fmt, name, (u16)val, space);
322 break;
323
324 case 4:
325 trace_seq_printf(s, print_fmt, name, (u32)val, space);
326 break;
327
328 default:
329 trace_seq_printf(s, print_fmt, name, val, space);
330 break;
331 }
332 }
333
print_synth_event(struct trace_iterator * iter,int flags,struct trace_event * event)334 static enum print_line_t print_synth_event(struct trace_iterator *iter,
335 int flags,
336 struct trace_event *event)
337 {
338 struct trace_array *tr = iter->tr;
339 struct trace_seq *s = &iter->seq;
340 struct synth_trace_event *entry;
341 struct synth_event *se;
342 unsigned int i, n_u64;
343 char print_fmt[32];
344 const char *fmt;
345
346 entry = (struct synth_trace_event *)iter->ent;
347 se = container_of(event, struct synth_event, call.event);
348
349 trace_seq_printf(s, "%s: ", se->name);
350
351 for (i = 0, n_u64 = 0; i < se->n_fields; i++) {
352 if (trace_seq_has_overflowed(s))
353 goto end;
354
355 fmt = synth_field_fmt(se->fields[i]->type);
356
357 /* parameter types */
358 if (tr && tr->trace_flags & TRACE_ITER_VERBOSE)
359 trace_seq_printf(s, "%s ", fmt);
360
361 snprintf(print_fmt, sizeof(print_fmt), "%%s=%s%%s", fmt);
362
363 /* parameter values */
364 if (se->fields[i]->is_string) {
365 if (se->fields[i]->is_dynamic) {
366 u32 offset, data_offset;
367 char *str_field;
368
369 offset = (u32)entry->fields[n_u64];
370 data_offset = offset & 0xffff;
371
372 str_field = (char *)entry + data_offset;
373
374 trace_seq_printf(s, print_fmt, se->fields[i]->name,
375 STR_VAR_LEN_MAX,
376 str_field,
377 i == se->n_fields - 1 ? "" : " ");
378 n_u64++;
379 } else {
380 trace_seq_printf(s, print_fmt, se->fields[i]->name,
381 STR_VAR_LEN_MAX,
382 (char *)&entry->fields[n_u64],
383 i == se->n_fields - 1 ? "" : " ");
384 n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
385 }
386 } else if (se->fields[i]->is_stack) {
387 u32 offset, data_offset, len;
388 unsigned long *p, *end;
389
390 offset = (u32)entry->fields[n_u64];
391 data_offset = offset & 0xffff;
392 len = offset >> 16;
393
394 p = (void *)entry + data_offset;
395 end = (void *)p + len - (sizeof(long) - 1);
396
397 trace_seq_printf(s, "%s=STACK:\n", se->fields[i]->name);
398
399 for (; *p && p < end; p++)
400 trace_seq_printf(s, "=> %pS\n", (void *)*p);
401 n_u64++;
402
403 } else {
404 struct trace_print_flags __flags[] = {
405 __def_gfpflag_names, {-1, NULL} };
406 char *space = (i == se->n_fields - 1 ? "" : " ");
407
408 print_synth_event_num_val(s, print_fmt,
409 se->fields[i]->name,
410 se->fields[i]->size,
411 entry->fields[n_u64],
412 space);
413
414 if (strcmp(se->fields[i]->type, "gfp_t") == 0) {
415 trace_seq_puts(s, " (");
416 trace_print_flags_seq(s, "|",
417 entry->fields[n_u64],
418 __flags);
419 trace_seq_putc(s, ')');
420 }
421 n_u64++;
422 }
423 }
424 end:
425 trace_seq_putc(s, '\n');
426
427 return trace_handle_return(s);
428 }
429
430 static struct trace_event_functions synth_event_funcs = {
431 .trace = print_synth_event
432 };
433
trace_string(struct synth_trace_event * entry,struct synth_event * event,char * str_val,bool is_dynamic,unsigned int data_size,unsigned int * n_u64)434 static unsigned int trace_string(struct synth_trace_event *entry,
435 struct synth_event *event,
436 char *str_val,
437 bool is_dynamic,
438 unsigned int data_size,
439 unsigned int *n_u64)
440 {
441 unsigned int len = 0;
442 char *str_field;
443 int ret;
444
445 if (is_dynamic) {
446 u32 data_offset;
447
448 data_offset = struct_size(entry, fields, event->n_u64);
449 data_offset += data_size;
450
451 len = fetch_store_strlen((unsigned long)str_val);
452
453 data_offset |= len << 16;
454 *(u32 *)&entry->fields[*n_u64] = data_offset;
455
456 ret = fetch_store_string((unsigned long)str_val, &entry->fields[*n_u64], entry);
457
458 (*n_u64)++;
459 } else {
460 str_field = (char *)&entry->fields[*n_u64];
461
462 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
463 if ((unsigned long)str_val < TASK_SIZE)
464 ret = strncpy_from_user_nofault(str_field, str_val, STR_VAR_LEN_MAX);
465 else
466 #endif
467 ret = strncpy_from_kernel_nofault(str_field, str_val, STR_VAR_LEN_MAX);
468
469 if (ret < 0)
470 strcpy(str_field, FAULT_STRING);
471
472 (*n_u64) += STR_VAR_LEN_MAX / sizeof(u64);
473 }
474
475 return len;
476 }
477
trace_stack(struct synth_trace_event * entry,struct synth_event * event,long * stack,unsigned int data_size,unsigned int * n_u64)478 static unsigned int trace_stack(struct synth_trace_event *entry,
479 struct synth_event *event,
480 long *stack,
481 unsigned int data_size,
482 unsigned int *n_u64)
483 {
484 unsigned int len;
485 u32 data_offset;
486 void *data_loc;
487
488 data_offset = struct_size(entry, fields, event->n_u64);
489 data_offset += data_size;
490
491 for (len = 0; len < HIST_STACKTRACE_DEPTH; len++) {
492 if (!stack[len])
493 break;
494 }
495
496 /* Include the zero'd element if it fits */
497 if (len < HIST_STACKTRACE_DEPTH)
498 len++;
499
500 len *= sizeof(long);
501
502 /* Find the dynamic section to copy the stack into. */
503 data_loc = (void *)entry + data_offset;
504 memcpy(data_loc, stack, len);
505
506 /* Fill in the field that holds the offset/len combo */
507 data_offset |= len << 16;
508 *(u32 *)&entry->fields[*n_u64] = data_offset;
509
510 (*n_u64)++;
511
512 return len;
513 }
514
trace_event_raw_event_synth(void * __data,u64 * var_ref_vals,unsigned int * var_ref_idx)515 static notrace void trace_event_raw_event_synth(void *__data,
516 u64 *var_ref_vals,
517 unsigned int *var_ref_idx)
518 {
519 unsigned int i, n_u64, val_idx, len, data_size = 0;
520 struct trace_event_file *trace_file = __data;
521 struct synth_trace_event *entry;
522 struct trace_event_buffer fbuffer;
523 struct trace_buffer *buffer;
524 struct synth_event *event;
525 int fields_size = 0;
526
527 event = trace_file->event_call->data;
528
529 if (trace_trigger_soft_disabled(trace_file))
530 return;
531
532 fields_size = event->n_u64 * sizeof(u64);
533
534 for (i = 0; i < event->n_dynamic_fields; i++) {
535 unsigned int field_pos = event->dynamic_fields[i]->field_pos;
536 char *str_val;
537
538 val_idx = var_ref_idx[field_pos];
539 str_val = (char *)(long)var_ref_vals[val_idx];
540
541 if (event->dynamic_fields[i]->is_stack) {
542 len = *((unsigned long *)str_val);
543 len *= sizeof(unsigned long);
544 } else {
545 len = fetch_store_strlen((unsigned long)str_val);
546 }
547
548 fields_size += len;
549 }
550
551 /*
552 * Avoid ring buffer recursion detection, as this event
553 * is being performed within another event.
554 */
555 buffer = trace_file->tr->array_buffer.buffer;
556 ring_buffer_nest_start(buffer);
557
558 entry = trace_event_buffer_reserve(&fbuffer, trace_file,
559 sizeof(*entry) + fields_size);
560 if (!entry)
561 goto out;
562
563 for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
564 val_idx = var_ref_idx[i];
565 if (event->fields[i]->is_string) {
566 char *str_val = (char *)(long)var_ref_vals[val_idx];
567
568 len = trace_string(entry, event, str_val,
569 event->fields[i]->is_dynamic,
570 data_size, &n_u64);
571 data_size += len; /* only dynamic string increments */
572 } else if (event->fields[i]->is_stack) {
573 long *stack = (long *)(long)var_ref_vals[val_idx];
574
575 len = trace_stack(entry, event, stack,
576 data_size, &n_u64);
577 data_size += len;
578 } else {
579 struct synth_field *field = event->fields[i];
580 u64 val = var_ref_vals[val_idx];
581
582 switch (field->size) {
583 case 1:
584 *(u8 *)&entry->fields[n_u64] = (u8)val;
585 break;
586
587 case 2:
588 *(u16 *)&entry->fields[n_u64] = (u16)val;
589 break;
590
591 case 4:
592 *(u32 *)&entry->fields[n_u64] = (u32)val;
593 break;
594
595 default:
596 entry->fields[n_u64] = val;
597 break;
598 }
599 n_u64++;
600 }
601 }
602
603 trace_event_buffer_commit(&fbuffer);
604 out:
605 ring_buffer_nest_end(buffer);
606 }
607
free_synth_event_print_fmt(struct trace_event_call * call)608 static void free_synth_event_print_fmt(struct trace_event_call *call)
609 {
610 if (call) {
611 kfree(call->print_fmt);
612 call->print_fmt = NULL;
613 }
614 }
615
__set_synth_event_print_fmt(struct synth_event * event,char * buf,int len)616 static int __set_synth_event_print_fmt(struct synth_event *event,
617 char *buf, int len)
618 {
619 const char *fmt;
620 int pos = 0;
621 int i;
622
623 /* When len=0, we just calculate the needed length */
624 #define LEN_OR_ZERO (len ? len - pos : 0)
625
626 pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
627 for (i = 0; i < event->n_fields; i++) {
628 fmt = synth_field_fmt(event->fields[i]->type);
629 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s=%s%s",
630 event->fields[i]->name, fmt,
631 i == event->n_fields - 1 ? "" : ", ");
632 }
633 pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
634
635 for (i = 0; i < event->n_fields; i++) {
636 if (event->fields[i]->is_string &&
637 event->fields[i]->is_dynamic)
638 pos += snprintf(buf + pos, LEN_OR_ZERO,
639 ", __get_str(%s)", event->fields[i]->name);
640 else if (event->fields[i]->is_stack)
641 pos += snprintf(buf + pos, LEN_OR_ZERO,
642 ", __get_stacktrace(%s)", event->fields[i]->name);
643 else
644 pos += snprintf(buf + pos, LEN_OR_ZERO,
645 ", REC->%s", event->fields[i]->name);
646 }
647
648 #undef LEN_OR_ZERO
649
650 /* return the length of print_fmt */
651 return pos;
652 }
653
set_synth_event_print_fmt(struct trace_event_call * call)654 static int set_synth_event_print_fmt(struct trace_event_call *call)
655 {
656 struct synth_event *event = call->data;
657 char *print_fmt;
658 int len;
659
660 /* First: called with 0 length to calculate the needed length */
661 len = __set_synth_event_print_fmt(event, NULL, 0);
662
663 print_fmt = kmalloc(len + 1, GFP_KERNEL);
664 if (!print_fmt)
665 return -ENOMEM;
666
667 /* Second: actually write the @print_fmt */
668 __set_synth_event_print_fmt(event, print_fmt, len + 1);
669 call->print_fmt = print_fmt;
670
671 return 0;
672 }
673
free_synth_field(struct synth_field * field)674 static void free_synth_field(struct synth_field *field)
675 {
676 kfree(field->type);
677 kfree(field->name);
678 kfree(field);
679 }
680
check_field_version(const char * prefix,const char * field_type,const char * field_name)681 static int check_field_version(const char *prefix, const char *field_type,
682 const char *field_name)
683 {
684 /*
685 * For backward compatibility, the old synthetic event command
686 * format did not require semicolons, and in order to not
687 * break user space, that old format must still work. If a new
688 * feature is added, then the format that uses the new feature
689 * will be required to have semicolons, as nothing that uses
690 * the old format would be using the new, yet to be created,
691 * feature. When a new feature is added, this will detect it,
692 * and return a number greater than 1, and require the format
693 * to use semicolons.
694 */
695 return 1;
696 }
697
parse_synth_field(int argc,char ** argv,int * consumed,int * field_version)698 static struct synth_field *parse_synth_field(int argc, char **argv,
699 int *consumed, int *field_version)
700 {
701 const char *prefix = NULL, *field_type = argv[0], *field_name, *array;
702 struct synth_field *field;
703 int len, ret = -ENOMEM;
704 struct seq_buf s;
705 ssize_t size;
706
707 if (!strcmp(field_type, "unsigned")) {
708 if (argc < 3) {
709 synth_err(SYNTH_ERR_INCOMPLETE_TYPE, errpos(field_type));
710 return ERR_PTR(-EINVAL);
711 }
712 prefix = "unsigned ";
713 field_type = argv[1];
714 field_name = argv[2];
715 *consumed += 3;
716 } else {
717 field_name = argv[1];
718 *consumed += 2;
719 }
720
721 if (!field_name) {
722 synth_err(SYNTH_ERR_INVALID_FIELD, errpos(field_type));
723 return ERR_PTR(-EINVAL);
724 }
725
726 *field_version = check_field_version(prefix, field_type, field_name);
727
728 field = kzalloc(sizeof(*field), GFP_KERNEL);
729 if (!field)
730 return ERR_PTR(-ENOMEM);
731
732 len = strlen(field_name);
733 array = strchr(field_name, '[');
734 if (array)
735 len -= strlen(array);
736
737 field->name = kmemdup_nul(field_name, len, GFP_KERNEL);
738 if (!field->name)
739 goto free;
740
741 if (!is_good_name(field->name)) {
742 synth_err(SYNTH_ERR_BAD_NAME, errpos(field_name));
743 ret = -EINVAL;
744 goto free;
745 }
746
747 len = strlen(field_type) + 1;
748
749 if (array)
750 len += strlen(array);
751
752 if (prefix)
753 len += strlen(prefix);
754
755 field->type = kzalloc(len, GFP_KERNEL);
756 if (!field->type)
757 goto free;
758
759 seq_buf_init(&s, field->type, len);
760 if (prefix)
761 seq_buf_puts(&s, prefix);
762 seq_buf_puts(&s, field_type);
763 if (array)
764 seq_buf_puts(&s, array);
765 if (WARN_ON_ONCE(!seq_buf_buffer_left(&s)))
766 goto free;
767
768 s.buffer[s.len] = '\0';
769
770 size = synth_field_size(field->type);
771 if (size < 0) {
772 if (array)
773 synth_err(SYNTH_ERR_INVALID_ARRAY_SPEC, errpos(field_name));
774 else
775 synth_err(SYNTH_ERR_INVALID_TYPE, errpos(field_type));
776 ret = -EINVAL;
777 goto free;
778 } else if (size == 0) {
779 if (synth_field_is_string(field->type) ||
780 synth_field_is_stack(field->type)) {
781 char *type;
782
783 len = sizeof("__data_loc ") + strlen(field->type) + 1;
784 type = kzalloc(len, GFP_KERNEL);
785 if (!type)
786 goto free;
787
788 seq_buf_init(&s, type, len);
789 seq_buf_puts(&s, "__data_loc ");
790 seq_buf_puts(&s, field->type);
791
792 if (WARN_ON_ONCE(!seq_buf_buffer_left(&s)))
793 goto free;
794 s.buffer[s.len] = '\0';
795
796 kfree(field->type);
797 field->type = type;
798
799 field->is_dynamic = true;
800 size = sizeof(u64);
801 } else {
802 synth_err(SYNTH_ERR_INVALID_TYPE, errpos(field_type));
803 ret = -EINVAL;
804 goto free;
805 }
806 }
807 field->size = size;
808
809 if (synth_field_is_string(field->type))
810 field->is_string = true;
811 else if (synth_field_is_stack(field->type))
812 field->is_stack = true;
813
814 field->is_signed = synth_field_signed(field->type);
815 out:
816 return field;
817 free:
818 free_synth_field(field);
819 field = ERR_PTR(ret);
820 goto out;
821 }
822
free_synth_tracepoint(struct tracepoint * tp)823 static void free_synth_tracepoint(struct tracepoint *tp)
824 {
825 if (!tp)
826 return;
827
828 kfree(tp->name);
829 kfree(tp);
830 }
831
alloc_synth_tracepoint(char * name)832 static struct tracepoint *alloc_synth_tracepoint(char *name)
833 {
834 struct tracepoint *tp;
835
836 tp = kzalloc(sizeof(*tp), GFP_KERNEL);
837 if (!tp)
838 return ERR_PTR(-ENOMEM);
839
840 tp->name = kstrdup(name, GFP_KERNEL);
841 if (!tp->name) {
842 kfree(tp);
843 return ERR_PTR(-ENOMEM);
844 }
845
846 return tp;
847 }
848
find_synth_event(const char * name)849 struct synth_event *find_synth_event(const char *name)
850 {
851 struct dyn_event *pos;
852 struct synth_event *event;
853
854 for_each_dyn_event(pos) {
855 if (!is_synth_event(pos))
856 continue;
857 event = to_synth_event(pos);
858 if (strcmp(event->name, name) == 0)
859 return event;
860 }
861
862 return NULL;
863 }
864
865 static struct trace_event_fields synth_event_fields_array[] = {
866 { .type = TRACE_FUNCTION_TYPE,
867 .define_fields = synth_event_define_fields },
868 {}
869 };
870
register_synth_event(struct synth_event * event)871 static int register_synth_event(struct synth_event *event)
872 {
873 struct trace_event_call *call = &event->call;
874 int ret = 0;
875
876 event->call.class = &event->class;
877 event->class.system = kstrdup(SYNTH_SYSTEM, GFP_KERNEL);
878 if (!event->class.system) {
879 ret = -ENOMEM;
880 goto out;
881 }
882
883 event->tp = alloc_synth_tracepoint(event->name);
884 if (IS_ERR(event->tp)) {
885 ret = PTR_ERR(event->tp);
886 event->tp = NULL;
887 goto out;
888 }
889
890 INIT_LIST_HEAD(&call->class->fields);
891 call->event.funcs = &synth_event_funcs;
892 call->class->fields_array = synth_event_fields_array;
893
894 ret = register_trace_event(&call->event);
895 if (!ret) {
896 ret = -ENODEV;
897 goto out;
898 }
899 call->flags = TRACE_EVENT_FL_TRACEPOINT;
900 call->class->reg = trace_event_reg;
901 call->class->probe = trace_event_raw_event_synth;
902 call->data = event;
903 call->tp = event->tp;
904
905 ret = trace_add_event_call(call);
906 if (ret) {
907 pr_warn("Failed to register synthetic event: %s\n",
908 trace_event_name(call));
909 goto err;
910 }
911
912 ret = set_synth_event_print_fmt(call);
913 /* unregister_trace_event() will be called inside */
914 if (ret < 0)
915 trace_remove_event_call(call);
916 out:
917 return ret;
918 err:
919 unregister_trace_event(&call->event);
920 goto out;
921 }
922
unregister_synth_event(struct synth_event * event)923 static int unregister_synth_event(struct synth_event *event)
924 {
925 struct trace_event_call *call = &event->call;
926 int ret;
927
928 ret = trace_remove_event_call(call);
929
930 return ret;
931 }
932
free_synth_event(struct synth_event * event)933 static void free_synth_event(struct synth_event *event)
934 {
935 unsigned int i;
936
937 if (!event)
938 return;
939
940 for (i = 0; i < event->n_fields; i++)
941 free_synth_field(event->fields[i]);
942
943 kfree(event->fields);
944 kfree(event->dynamic_fields);
945 kfree(event->name);
946 kfree(event->class.system);
947 free_synth_tracepoint(event->tp);
948 free_synth_event_print_fmt(&event->call);
949 kfree(event);
950 }
951
alloc_synth_event(const char * name,int n_fields,struct synth_field ** fields)952 static struct synth_event *alloc_synth_event(const char *name, int n_fields,
953 struct synth_field **fields)
954 {
955 unsigned int i, j, n_dynamic_fields = 0;
956 struct synth_event *event;
957
958 event = kzalloc(sizeof(*event), GFP_KERNEL);
959 if (!event) {
960 event = ERR_PTR(-ENOMEM);
961 goto out;
962 }
963
964 event->name = kstrdup(name, GFP_KERNEL);
965 if (!event->name) {
966 kfree(event);
967 event = ERR_PTR(-ENOMEM);
968 goto out;
969 }
970
971 event->fields = kcalloc(n_fields, sizeof(*event->fields), GFP_KERNEL);
972 if (!event->fields) {
973 free_synth_event(event);
974 event = ERR_PTR(-ENOMEM);
975 goto out;
976 }
977
978 for (i = 0; i < n_fields; i++)
979 if (fields[i]->is_dynamic)
980 n_dynamic_fields++;
981
982 if (n_dynamic_fields) {
983 event->dynamic_fields = kcalloc(n_dynamic_fields,
984 sizeof(*event->dynamic_fields),
985 GFP_KERNEL);
986 if (!event->dynamic_fields) {
987 free_synth_event(event);
988 event = ERR_PTR(-ENOMEM);
989 goto out;
990 }
991 }
992
993 dyn_event_init(&event->devent, &synth_event_ops);
994
995 for (i = 0, j = 0; i < n_fields; i++) {
996 fields[i]->field_pos = i;
997 event->fields[i] = fields[i];
998
999 if (fields[i]->is_dynamic)
1000 event->dynamic_fields[j++] = fields[i];
1001 }
1002 event->n_dynamic_fields = j;
1003 event->n_fields = n_fields;
1004 out:
1005 return event;
1006 }
1007
synth_event_check_arg_fn(void * data)1008 static int synth_event_check_arg_fn(void *data)
1009 {
1010 struct dynevent_arg_pair *arg_pair = data;
1011 int size;
1012
1013 size = synth_field_size((char *)arg_pair->lhs);
1014 if (size == 0) {
1015 if (strstr((char *)arg_pair->lhs, "["))
1016 return 0;
1017 }
1018
1019 return size ? 0 : -EINVAL;
1020 }
1021
1022 /**
1023 * synth_event_add_field - Add a new field to a synthetic event cmd
1024 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1025 * @type: The type of the new field to add
1026 * @name: The name of the new field to add
1027 *
1028 * Add a new field to a synthetic event cmd object. Field ordering is in
1029 * the same order the fields are added.
1030 *
1031 * See synth_field_size() for available types. If field_name contains
1032 * [n] the field is considered to be an array.
1033 *
1034 * Return: 0 if successful, error otherwise.
1035 */
synth_event_add_field(struct dynevent_cmd * cmd,const char * type,const char * name)1036 int synth_event_add_field(struct dynevent_cmd *cmd, const char *type,
1037 const char *name)
1038 {
1039 struct dynevent_arg_pair arg_pair;
1040 int ret;
1041
1042 if (cmd->type != DYNEVENT_TYPE_SYNTH)
1043 return -EINVAL;
1044
1045 if (!type || !name)
1046 return -EINVAL;
1047
1048 dynevent_arg_pair_init(&arg_pair, 0, ';');
1049
1050 arg_pair.lhs = type;
1051 arg_pair.rhs = name;
1052
1053 ret = dynevent_arg_pair_add(cmd, &arg_pair, synth_event_check_arg_fn);
1054 if (ret)
1055 return ret;
1056
1057 if (++cmd->n_fields > SYNTH_FIELDS_MAX)
1058 ret = -EINVAL;
1059
1060 return ret;
1061 }
1062 EXPORT_SYMBOL_GPL(synth_event_add_field);
1063
1064 /**
1065 * synth_event_add_field_str - Add a new field to a synthetic event cmd
1066 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1067 * @type_name: The type and name of the new field to add, as a single string
1068 *
1069 * Add a new field to a synthetic event cmd object, as a single
1070 * string. The @type_name string is expected to be of the form 'type
1071 * name', which will be appended by ';'. No sanity checking is done -
1072 * what's passed in is assumed to already be well-formed. Field
1073 * ordering is in the same order the fields are added.
1074 *
1075 * See synth_field_size() for available types. If field_name contains
1076 * [n] the field is considered to be an array.
1077 *
1078 * Return: 0 if successful, error otherwise.
1079 */
synth_event_add_field_str(struct dynevent_cmd * cmd,const char * type_name)1080 int synth_event_add_field_str(struct dynevent_cmd *cmd, const char *type_name)
1081 {
1082 struct dynevent_arg arg;
1083 int ret;
1084
1085 if (cmd->type != DYNEVENT_TYPE_SYNTH)
1086 return -EINVAL;
1087
1088 if (!type_name)
1089 return -EINVAL;
1090
1091 dynevent_arg_init(&arg, ';');
1092
1093 arg.str = type_name;
1094
1095 ret = dynevent_arg_add(cmd, &arg, NULL);
1096 if (ret)
1097 return ret;
1098
1099 if (++cmd->n_fields > SYNTH_FIELDS_MAX)
1100 ret = -EINVAL;
1101
1102 return ret;
1103 }
1104 EXPORT_SYMBOL_GPL(synth_event_add_field_str);
1105
1106 /**
1107 * synth_event_add_fields - Add multiple fields to a synthetic event cmd
1108 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1109 * @fields: An array of type/name field descriptions
1110 * @n_fields: The number of field descriptions contained in the fields array
1111 *
1112 * Add a new set of fields to a synthetic event cmd object. The event
1113 * fields that will be defined for the event should be passed in as an
1114 * array of struct synth_field_desc, and the number of elements in the
1115 * array passed in as n_fields. Field ordering will retain the
1116 * ordering given in the fields array.
1117 *
1118 * See synth_field_size() for available types. If field_name contains
1119 * [n] the field is considered to be an array.
1120 *
1121 * Return: 0 if successful, error otherwise.
1122 */
synth_event_add_fields(struct dynevent_cmd * cmd,struct synth_field_desc * fields,unsigned int n_fields)1123 int synth_event_add_fields(struct dynevent_cmd *cmd,
1124 struct synth_field_desc *fields,
1125 unsigned int n_fields)
1126 {
1127 unsigned int i;
1128 int ret = 0;
1129
1130 for (i = 0; i < n_fields; i++) {
1131 if (fields[i].type == NULL || fields[i].name == NULL) {
1132 ret = -EINVAL;
1133 break;
1134 }
1135
1136 ret = synth_event_add_field(cmd, fields[i].type, fields[i].name);
1137 if (ret)
1138 break;
1139 }
1140
1141 return ret;
1142 }
1143 EXPORT_SYMBOL_GPL(synth_event_add_fields);
1144
1145 /**
1146 * __synth_event_gen_cmd_start - Start a synthetic event command from arg list
1147 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1148 * @name: The name of the synthetic event
1149 * @mod: The module creating the event, NULL if not created from a module
1150 * @args: Variable number of arg (pairs), one pair for each field
1151 *
1152 * NOTE: Users normally won't want to call this function directly, but
1153 * rather use the synth_event_gen_cmd_start() wrapper, which
1154 * automatically adds a NULL to the end of the arg list. If this
1155 * function is used directly, make sure the last arg in the variable
1156 * arg list is NULL.
1157 *
1158 * Generate a synthetic event command to be executed by
1159 * synth_event_gen_cmd_end(). This function can be used to generate
1160 * the complete command or only the first part of it; in the latter
1161 * case, synth_event_add_field(), synth_event_add_field_str(), or
1162 * synth_event_add_fields() can be used to add more fields following
1163 * this.
1164 *
1165 * There should be an even number variable args, each pair consisting
1166 * of a type followed by a field name.
1167 *
1168 * See synth_field_size() for available types. If field_name contains
1169 * [n] the field is considered to be an array.
1170 *
1171 * Return: 0 if successful, error otherwise.
1172 */
__synth_event_gen_cmd_start(struct dynevent_cmd * cmd,const char * name,struct module * mod,...)1173 int __synth_event_gen_cmd_start(struct dynevent_cmd *cmd, const char *name,
1174 struct module *mod, ...)
1175 {
1176 struct dynevent_arg arg;
1177 va_list args;
1178 int ret;
1179
1180 cmd->event_name = name;
1181 cmd->private_data = mod;
1182
1183 if (cmd->type != DYNEVENT_TYPE_SYNTH)
1184 return -EINVAL;
1185
1186 dynevent_arg_init(&arg, 0);
1187 arg.str = name;
1188 ret = dynevent_arg_add(cmd, &arg, NULL);
1189 if (ret)
1190 return ret;
1191
1192 va_start(args, mod);
1193 for (;;) {
1194 const char *type, *name;
1195
1196 type = va_arg(args, const char *);
1197 if (!type)
1198 break;
1199 name = va_arg(args, const char *);
1200 if (!name)
1201 break;
1202
1203 if (++cmd->n_fields > SYNTH_FIELDS_MAX) {
1204 ret = -EINVAL;
1205 break;
1206 }
1207
1208 ret = synth_event_add_field(cmd, type, name);
1209 if (ret)
1210 break;
1211 }
1212 va_end(args);
1213
1214 return ret;
1215 }
1216 EXPORT_SYMBOL_GPL(__synth_event_gen_cmd_start);
1217
1218 /**
1219 * synth_event_gen_cmd_array_start - Start synthetic event command from an array
1220 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1221 * @name: The name of the synthetic event
1222 * @fields: An array of type/name field descriptions
1223 * @n_fields: The number of field descriptions contained in the fields array
1224 *
1225 * Generate a synthetic event command to be executed by
1226 * synth_event_gen_cmd_end(). This function can be used to generate
1227 * the complete command or only the first part of it; in the latter
1228 * case, synth_event_add_field(), synth_event_add_field_str(), or
1229 * synth_event_add_fields() can be used to add more fields following
1230 * this.
1231 *
1232 * The event fields that will be defined for the event should be
1233 * passed in as an array of struct synth_field_desc, and the number of
1234 * elements in the array passed in as n_fields. Field ordering will
1235 * retain the ordering given in the fields array.
1236 *
1237 * See synth_field_size() for available types. If field_name contains
1238 * [n] the field is considered to be an array.
1239 *
1240 * Return: 0 if successful, error otherwise.
1241 */
synth_event_gen_cmd_array_start(struct dynevent_cmd * cmd,const char * name,struct module * mod,struct synth_field_desc * fields,unsigned int n_fields)1242 int synth_event_gen_cmd_array_start(struct dynevent_cmd *cmd, const char *name,
1243 struct module *mod,
1244 struct synth_field_desc *fields,
1245 unsigned int n_fields)
1246 {
1247 struct dynevent_arg arg;
1248 unsigned int i;
1249 int ret = 0;
1250
1251 cmd->event_name = name;
1252 cmd->private_data = mod;
1253
1254 if (cmd->type != DYNEVENT_TYPE_SYNTH)
1255 return -EINVAL;
1256
1257 if (n_fields > SYNTH_FIELDS_MAX)
1258 return -EINVAL;
1259
1260 dynevent_arg_init(&arg, 0);
1261 arg.str = name;
1262 ret = dynevent_arg_add(cmd, &arg, NULL);
1263 if (ret)
1264 return ret;
1265
1266 for (i = 0; i < n_fields; i++) {
1267 if (fields[i].type == NULL || fields[i].name == NULL)
1268 return -EINVAL;
1269
1270 ret = synth_event_add_field(cmd, fields[i].type, fields[i].name);
1271 if (ret)
1272 break;
1273 }
1274
1275 return ret;
1276 }
1277 EXPORT_SYMBOL_GPL(synth_event_gen_cmd_array_start);
1278
__create_synth_event(const char * name,const char * raw_fields)1279 static int __create_synth_event(const char *name, const char *raw_fields)
1280 {
1281 char **argv, *field_str, *tmp_fields, *saved_fields = NULL;
1282 struct synth_field *field, *fields[SYNTH_FIELDS_MAX];
1283 int consumed, cmd_version = 1, n_fields_this_loop;
1284 int i, argc, n_fields = 0, ret = 0;
1285 struct synth_event *event = NULL;
1286
1287 /*
1288 * Argument syntax:
1289 * - Add synthetic event: <event_name> field[;field] ...
1290 * - Remove synthetic event: !<event_name> field[;field] ...
1291 * where 'field' = type field_name
1292 */
1293
1294 if (name[0] == '\0') {
1295 synth_err(SYNTH_ERR_INVALID_CMD, 0);
1296 return -EINVAL;
1297 }
1298
1299 if (!is_good_name(name)) {
1300 synth_err(SYNTH_ERR_BAD_NAME, errpos(name));
1301 return -EINVAL;
1302 }
1303
1304 mutex_lock(&event_mutex);
1305
1306 event = find_synth_event(name);
1307 if (event) {
1308 synth_err(SYNTH_ERR_EVENT_EXISTS, errpos(name));
1309 ret = -EEXIST;
1310 goto err;
1311 }
1312
1313 tmp_fields = saved_fields = kstrdup(raw_fields, GFP_KERNEL);
1314 if (!tmp_fields) {
1315 ret = -ENOMEM;
1316 goto err;
1317 }
1318
1319 while ((field_str = strsep(&tmp_fields, ";")) != NULL) {
1320 argv = argv_split(GFP_KERNEL, field_str, &argc);
1321 if (!argv) {
1322 ret = -ENOMEM;
1323 goto err;
1324 }
1325
1326 if (!argc) {
1327 argv_free(argv);
1328 continue;
1329 }
1330
1331 n_fields_this_loop = 0;
1332 consumed = 0;
1333 while (argc > consumed) {
1334 int field_version;
1335
1336 field = parse_synth_field(argc - consumed,
1337 argv + consumed, &consumed,
1338 &field_version);
1339 if (IS_ERR(field)) {
1340 ret = PTR_ERR(field);
1341 goto err_free_arg;
1342 }
1343
1344 /*
1345 * Track the highest version of any field we
1346 * found in the command.
1347 */
1348 if (field_version > cmd_version)
1349 cmd_version = field_version;
1350
1351 /*
1352 * Now sort out what is and isn't valid for
1353 * each supported version.
1354 *
1355 * If we see more than 1 field per loop, it
1356 * means we have multiple fields between
1357 * semicolons, and that's something we no
1358 * longer support in a version 2 or greater
1359 * command.
1360 */
1361 if (cmd_version > 1 && n_fields_this_loop >= 1) {
1362 synth_err(SYNTH_ERR_INVALID_CMD, errpos(field_str));
1363 ret = -EINVAL;
1364 goto err_free_arg;
1365 }
1366
1367 if (n_fields == SYNTH_FIELDS_MAX) {
1368 synth_err(SYNTH_ERR_TOO_MANY_FIELDS, 0);
1369 ret = -EINVAL;
1370 goto err_free_arg;
1371 }
1372 fields[n_fields++] = field;
1373
1374 n_fields_this_loop++;
1375 }
1376 argv_free(argv);
1377
1378 if (consumed < argc) {
1379 synth_err(SYNTH_ERR_INVALID_CMD, 0);
1380 ret = -EINVAL;
1381 goto err;
1382 }
1383
1384 }
1385
1386 if (n_fields == 0) {
1387 synth_err(SYNTH_ERR_INVALID_CMD, 0);
1388 ret = -EINVAL;
1389 goto err;
1390 }
1391
1392 event = alloc_synth_event(name, n_fields, fields);
1393 if (IS_ERR(event)) {
1394 ret = PTR_ERR(event);
1395 event = NULL;
1396 goto err;
1397 }
1398 ret = register_synth_event(event);
1399 if (!ret)
1400 dyn_event_add(&event->devent, &event->call);
1401 else
1402 free_synth_event(event);
1403 out:
1404 mutex_unlock(&event_mutex);
1405
1406 kfree(saved_fields);
1407
1408 return ret;
1409 err_free_arg:
1410 argv_free(argv);
1411 err:
1412 for (i = 0; i < n_fields; i++)
1413 free_synth_field(fields[i]);
1414
1415 goto out;
1416 }
1417
1418 /**
1419 * synth_event_create - Create a new synthetic event
1420 * @name: The name of the new synthetic event
1421 * @fields: An array of type/name field descriptions
1422 * @n_fields: The number of field descriptions contained in the fields array
1423 * @mod: The module creating the event, NULL if not created from a module
1424 *
1425 * Create a new synthetic event with the given name under the
1426 * trace/events/synthetic/ directory. The event fields that will be
1427 * defined for the event should be passed in as an array of struct
1428 * synth_field_desc, and the number elements in the array passed in as
1429 * n_fields. Field ordering will retain the ordering given in the
1430 * fields array.
1431 *
1432 * If the new synthetic event is being created from a module, the mod
1433 * param must be non-NULL. This will ensure that the trace buffer
1434 * won't contain unreadable events.
1435 *
1436 * The new synth event should be deleted using synth_event_delete()
1437 * function. The new synthetic event can be generated from modules or
1438 * other kernel code using trace_synth_event() and related functions.
1439 *
1440 * Return: 0 if successful, error otherwise.
1441 */
synth_event_create(const char * name,struct synth_field_desc * fields,unsigned int n_fields,struct module * mod)1442 int synth_event_create(const char *name, struct synth_field_desc *fields,
1443 unsigned int n_fields, struct module *mod)
1444 {
1445 struct dynevent_cmd cmd;
1446 char *buf;
1447 int ret;
1448
1449 buf = kzalloc(MAX_DYNEVENT_CMD_LEN, GFP_KERNEL);
1450 if (!buf)
1451 return -ENOMEM;
1452
1453 synth_event_cmd_init(&cmd, buf, MAX_DYNEVENT_CMD_LEN);
1454
1455 ret = synth_event_gen_cmd_array_start(&cmd, name, mod,
1456 fields, n_fields);
1457 if (ret)
1458 goto out;
1459
1460 ret = synth_event_gen_cmd_end(&cmd);
1461 out:
1462 kfree(buf);
1463
1464 return ret;
1465 }
1466 EXPORT_SYMBOL_GPL(synth_event_create);
1467
destroy_synth_event(struct synth_event * se)1468 static int destroy_synth_event(struct synth_event *se)
1469 {
1470 int ret;
1471
1472 if (se->ref)
1473 return -EBUSY;
1474
1475 if (trace_event_dyn_busy(&se->call))
1476 return -EBUSY;
1477
1478 ret = unregister_synth_event(se);
1479 if (!ret) {
1480 dyn_event_remove(&se->devent);
1481 free_synth_event(se);
1482 }
1483
1484 return ret;
1485 }
1486
1487 /**
1488 * synth_event_delete - Delete a synthetic event
1489 * @event_name: The name of the new synthetic event
1490 *
1491 * Delete a synthetic event that was created with synth_event_create().
1492 *
1493 * Return: 0 if successful, error otherwise.
1494 */
synth_event_delete(const char * event_name)1495 int synth_event_delete(const char *event_name)
1496 {
1497 struct synth_event *se = NULL;
1498 struct module *mod = NULL;
1499 int ret = -ENOENT;
1500
1501 mutex_lock(&event_mutex);
1502 se = find_synth_event(event_name);
1503 if (se) {
1504 mod = se->mod;
1505 ret = destroy_synth_event(se);
1506 }
1507 mutex_unlock(&event_mutex);
1508
1509 if (mod) {
1510 /*
1511 * It is safest to reset the ring buffer if the module
1512 * being unloaded registered any events that were
1513 * used. The only worry is if a new module gets
1514 * loaded, and takes on the same id as the events of
1515 * this module. When printing out the buffer, traced
1516 * events left over from this module may be passed to
1517 * the new module events and unexpected results may
1518 * occur.
1519 */
1520 tracing_reset_all_online_cpus();
1521 }
1522
1523 return ret;
1524 }
1525 EXPORT_SYMBOL_GPL(synth_event_delete);
1526
check_command(const char * raw_command)1527 static int check_command(const char *raw_command)
1528 {
1529 char **argv = NULL, *cmd, *saved_cmd, *name_and_field;
1530 int argc, ret = 0;
1531
1532 cmd = saved_cmd = kstrdup(raw_command, GFP_KERNEL);
1533 if (!cmd)
1534 return -ENOMEM;
1535
1536 name_and_field = strsep(&cmd, ";");
1537 if (!name_and_field) {
1538 ret = -EINVAL;
1539 goto free;
1540 }
1541
1542 if (name_and_field[0] == '!')
1543 goto free;
1544
1545 argv = argv_split(GFP_KERNEL, name_and_field, &argc);
1546 if (!argv) {
1547 ret = -ENOMEM;
1548 goto free;
1549 }
1550 argv_free(argv);
1551
1552 if (argc < 3)
1553 ret = -EINVAL;
1554 free:
1555 kfree(saved_cmd);
1556
1557 return ret;
1558 }
1559
create_or_delete_synth_event(const char * raw_command)1560 static int create_or_delete_synth_event(const char *raw_command)
1561 {
1562 char *name = NULL, *fields, *p;
1563 int ret = 0;
1564
1565 raw_command = skip_spaces(raw_command);
1566 if (raw_command[0] == '\0')
1567 return ret;
1568
1569 last_cmd_set(raw_command);
1570
1571 ret = check_command(raw_command);
1572 if (ret) {
1573 synth_err(SYNTH_ERR_INVALID_CMD, 0);
1574 return ret;
1575 }
1576
1577 p = strpbrk(raw_command, " \t");
1578 if (!p && raw_command[0] != '!') {
1579 synth_err(SYNTH_ERR_INVALID_CMD, 0);
1580 ret = -EINVAL;
1581 goto free;
1582 }
1583
1584 name = kmemdup_nul(raw_command, p ? p - raw_command : strlen(raw_command), GFP_KERNEL);
1585 if (!name)
1586 return -ENOMEM;
1587
1588 if (name[0] == '!') {
1589 ret = synth_event_delete(name + 1);
1590 goto free;
1591 }
1592
1593 fields = skip_spaces(p);
1594
1595 ret = __create_synth_event(name, fields);
1596 free:
1597 kfree(name);
1598
1599 return ret;
1600 }
1601
synth_event_run_command(struct dynevent_cmd * cmd)1602 static int synth_event_run_command(struct dynevent_cmd *cmd)
1603 {
1604 struct synth_event *se;
1605 int ret;
1606
1607 ret = create_or_delete_synth_event(cmd->seq.buffer);
1608 if (ret)
1609 return ret;
1610
1611 se = find_synth_event(cmd->event_name);
1612 if (WARN_ON(!se))
1613 return -ENOENT;
1614
1615 se->mod = cmd->private_data;
1616
1617 return ret;
1618 }
1619
1620 /**
1621 * synth_event_cmd_init - Initialize a synthetic event command object
1622 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1623 * @buf: A pointer to the buffer used to build the command
1624 * @maxlen: The length of the buffer passed in @buf
1625 *
1626 * Initialize a synthetic event command object. Use this before
1627 * calling any of the other dyenvent_cmd functions.
1628 */
synth_event_cmd_init(struct dynevent_cmd * cmd,char * buf,int maxlen)1629 void synth_event_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen)
1630 {
1631 dynevent_cmd_init(cmd, buf, maxlen, DYNEVENT_TYPE_SYNTH,
1632 synth_event_run_command);
1633 }
1634 EXPORT_SYMBOL_GPL(synth_event_cmd_init);
1635
1636 static inline int
__synth_event_trace_init(struct trace_event_file * file,struct synth_event_trace_state * trace_state)1637 __synth_event_trace_init(struct trace_event_file *file,
1638 struct synth_event_trace_state *trace_state)
1639 {
1640 int ret = 0;
1641
1642 memset(trace_state, '\0', sizeof(*trace_state));
1643
1644 /*
1645 * Normal event tracing doesn't get called at all unless the
1646 * ENABLED bit is set (which attaches the probe thus allowing
1647 * this code to be called, etc). Because this is called
1648 * directly by the user, we don't have that but we still need
1649 * to honor not logging when disabled. For the iterated
1650 * trace case, we save the enabled state upon start and just
1651 * ignore the following data calls.
1652 */
1653 if (!(file->flags & EVENT_FILE_FL_ENABLED) ||
1654 trace_trigger_soft_disabled(file)) {
1655 trace_state->disabled = true;
1656 ret = -ENOENT;
1657 goto out;
1658 }
1659
1660 trace_state->event = file->event_call->data;
1661 out:
1662 return ret;
1663 }
1664
1665 static inline int
__synth_event_trace_start(struct trace_event_file * file,struct synth_event_trace_state * trace_state,int dynamic_fields_size)1666 __synth_event_trace_start(struct trace_event_file *file,
1667 struct synth_event_trace_state *trace_state,
1668 int dynamic_fields_size)
1669 {
1670 int entry_size, fields_size = 0;
1671 int ret = 0;
1672
1673 fields_size = trace_state->event->n_u64 * sizeof(u64);
1674 fields_size += dynamic_fields_size;
1675
1676 /*
1677 * Avoid ring buffer recursion detection, as this event
1678 * is being performed within another event.
1679 */
1680 trace_state->buffer = file->tr->array_buffer.buffer;
1681 ring_buffer_nest_start(trace_state->buffer);
1682
1683 entry_size = sizeof(*trace_state->entry) + fields_size;
1684 trace_state->entry = trace_event_buffer_reserve(&trace_state->fbuffer,
1685 file,
1686 entry_size);
1687 if (!trace_state->entry) {
1688 ring_buffer_nest_end(trace_state->buffer);
1689 ret = -EINVAL;
1690 }
1691
1692 return ret;
1693 }
1694
1695 static inline void
__synth_event_trace_end(struct synth_event_trace_state * trace_state)1696 __synth_event_trace_end(struct synth_event_trace_state *trace_state)
1697 {
1698 trace_event_buffer_commit(&trace_state->fbuffer);
1699
1700 ring_buffer_nest_end(trace_state->buffer);
1701 }
1702
1703 /**
1704 * synth_event_trace - Trace a synthetic event
1705 * @file: The trace_event_file representing the synthetic event
1706 * @n_vals: The number of values in vals
1707 * @args: Variable number of args containing the event values
1708 *
1709 * Trace a synthetic event using the values passed in the variable
1710 * argument list.
1711 *
1712 * The argument list should be a list 'n_vals' u64 values. The number
1713 * of vals must match the number of field in the synthetic event, and
1714 * must be in the same order as the synthetic event fields.
1715 *
1716 * All vals should be cast to u64, and string vals are just pointers
1717 * to strings, cast to u64. Strings will be copied into space
1718 * reserved in the event for the string, using these pointers.
1719 *
1720 * Return: 0 on success, err otherwise.
1721 */
synth_event_trace(struct trace_event_file * file,unsigned int n_vals,...)1722 int synth_event_trace(struct trace_event_file *file, unsigned int n_vals, ...)
1723 {
1724 unsigned int i, n_u64, len, data_size = 0;
1725 struct synth_event_trace_state state;
1726 va_list args;
1727 int ret;
1728
1729 ret = __synth_event_trace_init(file, &state);
1730 if (ret) {
1731 if (ret == -ENOENT)
1732 ret = 0; /* just disabled, not really an error */
1733 return ret;
1734 }
1735
1736 if (state.event->n_dynamic_fields) {
1737 va_start(args, n_vals);
1738
1739 for (i = 0; i < state.event->n_fields; i++) {
1740 u64 val = va_arg(args, u64);
1741
1742 if (state.event->fields[i]->is_string &&
1743 state.event->fields[i]->is_dynamic) {
1744 char *str_val = (char *)(long)val;
1745
1746 data_size += strlen(str_val) + 1;
1747 }
1748 }
1749
1750 va_end(args);
1751 }
1752
1753 ret = __synth_event_trace_start(file, &state, data_size);
1754 if (ret)
1755 return ret;
1756
1757 if (n_vals != state.event->n_fields) {
1758 ret = -EINVAL;
1759 goto out;
1760 }
1761
1762 data_size = 0;
1763
1764 va_start(args, n_vals);
1765 for (i = 0, n_u64 = 0; i < state.event->n_fields; i++) {
1766 u64 val;
1767
1768 val = va_arg(args, u64);
1769
1770 if (state.event->fields[i]->is_string) {
1771 char *str_val = (char *)(long)val;
1772
1773 len = trace_string(state.entry, state.event, str_val,
1774 state.event->fields[i]->is_dynamic,
1775 data_size, &n_u64);
1776 data_size += len; /* only dynamic string increments */
1777 } else {
1778 struct synth_field *field = state.event->fields[i];
1779
1780 switch (field->size) {
1781 case 1:
1782 *(u8 *)&state.entry->fields[n_u64] = (u8)val;
1783 break;
1784
1785 case 2:
1786 *(u16 *)&state.entry->fields[n_u64] = (u16)val;
1787 break;
1788
1789 case 4:
1790 *(u32 *)&state.entry->fields[n_u64] = (u32)val;
1791 break;
1792
1793 default:
1794 state.entry->fields[n_u64] = val;
1795 break;
1796 }
1797 n_u64++;
1798 }
1799 }
1800 va_end(args);
1801 out:
1802 __synth_event_trace_end(&state);
1803
1804 return ret;
1805 }
1806 EXPORT_SYMBOL_GPL(synth_event_trace);
1807
1808 /**
1809 * synth_event_trace_array - Trace a synthetic event from an array
1810 * @file: The trace_event_file representing the synthetic event
1811 * @vals: Array of values
1812 * @n_vals: The number of values in vals
1813 *
1814 * Trace a synthetic event using the values passed in as 'vals'.
1815 *
1816 * The 'vals' array is just an array of 'n_vals' u64. The number of
1817 * vals must match the number of field in the synthetic event, and
1818 * must be in the same order as the synthetic event fields.
1819 *
1820 * All vals should be cast to u64, and string vals are just pointers
1821 * to strings, cast to u64. Strings will be copied into space
1822 * reserved in the event for the string, using these pointers.
1823 *
1824 * Return: 0 on success, err otherwise.
1825 */
synth_event_trace_array(struct trace_event_file * file,u64 * vals,unsigned int n_vals)1826 int synth_event_trace_array(struct trace_event_file *file, u64 *vals,
1827 unsigned int n_vals)
1828 {
1829 unsigned int i, n_u64, field_pos, len, data_size = 0;
1830 struct synth_event_trace_state state;
1831 char *str_val;
1832 int ret;
1833
1834 ret = __synth_event_trace_init(file, &state);
1835 if (ret) {
1836 if (ret == -ENOENT)
1837 ret = 0; /* just disabled, not really an error */
1838 return ret;
1839 }
1840
1841 if (state.event->n_dynamic_fields) {
1842 for (i = 0; i < state.event->n_dynamic_fields; i++) {
1843 field_pos = state.event->dynamic_fields[i]->field_pos;
1844 str_val = (char *)(long)vals[field_pos];
1845 len = strlen(str_val) + 1;
1846 data_size += len;
1847 }
1848 }
1849
1850 ret = __synth_event_trace_start(file, &state, data_size);
1851 if (ret)
1852 return ret;
1853
1854 if (n_vals != state.event->n_fields) {
1855 ret = -EINVAL;
1856 goto out;
1857 }
1858
1859 data_size = 0;
1860
1861 for (i = 0, n_u64 = 0; i < state.event->n_fields; i++) {
1862 if (state.event->fields[i]->is_string) {
1863 char *str_val = (char *)(long)vals[i];
1864
1865 len = trace_string(state.entry, state.event, str_val,
1866 state.event->fields[i]->is_dynamic,
1867 data_size, &n_u64);
1868 data_size += len; /* only dynamic string increments */
1869 } else {
1870 struct synth_field *field = state.event->fields[i];
1871 u64 val = vals[i];
1872
1873 switch (field->size) {
1874 case 1:
1875 *(u8 *)&state.entry->fields[n_u64] = (u8)val;
1876 break;
1877
1878 case 2:
1879 *(u16 *)&state.entry->fields[n_u64] = (u16)val;
1880 break;
1881
1882 case 4:
1883 *(u32 *)&state.entry->fields[n_u64] = (u32)val;
1884 break;
1885
1886 default:
1887 state.entry->fields[n_u64] = val;
1888 break;
1889 }
1890 n_u64++;
1891 }
1892 }
1893 out:
1894 __synth_event_trace_end(&state);
1895
1896 return ret;
1897 }
1898 EXPORT_SYMBOL_GPL(synth_event_trace_array);
1899
1900 /**
1901 * synth_event_trace_start - Start piecewise synthetic event trace
1902 * @file: The trace_event_file representing the synthetic event
1903 * @trace_state: A pointer to object tracking the piecewise trace state
1904 *
1905 * Start the trace of a synthetic event field-by-field rather than all
1906 * at once.
1907 *
1908 * This function 'opens' an event trace, which means space is reserved
1909 * for the event in the trace buffer, after which the event's
1910 * individual field values can be set through either
1911 * synth_event_add_next_val() or synth_event_add_val().
1912 *
1913 * A pointer to a trace_state object is passed in, which will keep
1914 * track of the current event trace state until the event trace is
1915 * closed (and the event finally traced) using
1916 * synth_event_trace_end().
1917 *
1918 * Note that synth_event_trace_end() must be called after all values
1919 * have been added for each event trace, regardless of whether adding
1920 * all field values succeeded or not.
1921 *
1922 * Note also that for a given event trace, all fields must be added
1923 * using either synth_event_add_next_val() or synth_event_add_val()
1924 * but not both together or interleaved.
1925 *
1926 * Return: 0 on success, err otherwise.
1927 */
synth_event_trace_start(struct trace_event_file * file,struct synth_event_trace_state * trace_state)1928 int synth_event_trace_start(struct trace_event_file *file,
1929 struct synth_event_trace_state *trace_state)
1930 {
1931 int ret;
1932
1933 if (!trace_state)
1934 return -EINVAL;
1935
1936 ret = __synth_event_trace_init(file, trace_state);
1937 if (ret) {
1938 if (ret == -ENOENT)
1939 ret = 0; /* just disabled, not really an error */
1940 return ret;
1941 }
1942
1943 if (trace_state->event->n_dynamic_fields)
1944 return -ENOTSUPP;
1945
1946 ret = __synth_event_trace_start(file, trace_state, 0);
1947
1948 return ret;
1949 }
1950 EXPORT_SYMBOL_GPL(synth_event_trace_start);
1951
__synth_event_add_val(const char * field_name,u64 val,struct synth_event_trace_state * trace_state)1952 static int __synth_event_add_val(const char *field_name, u64 val,
1953 struct synth_event_trace_state *trace_state)
1954 {
1955 struct synth_field *field = NULL;
1956 struct synth_trace_event *entry;
1957 struct synth_event *event;
1958 int i, ret = 0;
1959
1960 if (!trace_state) {
1961 ret = -EINVAL;
1962 goto out;
1963 }
1964
1965 /* can't mix add_next_synth_val() with add_synth_val() */
1966 if (field_name) {
1967 if (trace_state->add_next) {
1968 ret = -EINVAL;
1969 goto out;
1970 }
1971 trace_state->add_name = true;
1972 } else {
1973 if (trace_state->add_name) {
1974 ret = -EINVAL;
1975 goto out;
1976 }
1977 trace_state->add_next = true;
1978 }
1979
1980 if (trace_state->disabled)
1981 goto out;
1982
1983 event = trace_state->event;
1984 if (trace_state->add_name) {
1985 for (i = 0; i < event->n_fields; i++) {
1986 field = event->fields[i];
1987 if (strcmp(field->name, field_name) == 0)
1988 break;
1989 }
1990 if (!field) {
1991 ret = -EINVAL;
1992 goto out;
1993 }
1994 } else {
1995 if (trace_state->cur_field >= event->n_fields) {
1996 ret = -EINVAL;
1997 goto out;
1998 }
1999 field = event->fields[trace_state->cur_field++];
2000 }
2001
2002 entry = trace_state->entry;
2003 if (field->is_string) {
2004 char *str_val = (char *)(long)val;
2005 char *str_field;
2006
2007 if (field->is_dynamic) { /* add_val can't do dynamic strings */
2008 ret = -EINVAL;
2009 goto out;
2010 }
2011
2012 if (!str_val) {
2013 ret = -EINVAL;
2014 goto out;
2015 }
2016
2017 str_field = (char *)&entry->fields[field->offset];
2018 strscpy(str_field, str_val, STR_VAR_LEN_MAX);
2019 } else {
2020 switch (field->size) {
2021 case 1:
2022 *(u8 *)&trace_state->entry->fields[field->offset] = (u8)val;
2023 break;
2024
2025 case 2:
2026 *(u16 *)&trace_state->entry->fields[field->offset] = (u16)val;
2027 break;
2028
2029 case 4:
2030 *(u32 *)&trace_state->entry->fields[field->offset] = (u32)val;
2031 break;
2032
2033 default:
2034 trace_state->entry->fields[field->offset] = val;
2035 break;
2036 }
2037 }
2038 out:
2039 return ret;
2040 }
2041
2042 /**
2043 * synth_event_add_next_val - Add the next field's value to an open synth trace
2044 * @val: The value to set the next field to
2045 * @trace_state: A pointer to object tracking the piecewise trace state
2046 *
2047 * Set the value of the next field in an event that's been opened by
2048 * synth_event_trace_start().
2049 *
2050 * The val param should be the value cast to u64. If the value points
2051 * to a string, the val param should be a char * cast to u64.
2052 *
2053 * This function assumes all the fields in an event are to be set one
2054 * after another - successive calls to this function are made, one for
2055 * each field, in the order of the fields in the event, until all
2056 * fields have been set. If you'd rather set each field individually
2057 * without regard to ordering, synth_event_add_val() can be used
2058 * instead.
2059 *
2060 * Note however that synth_event_add_next_val() and
2061 * synth_event_add_val() can't be intermixed for a given event trace -
2062 * one or the other but not both can be used at the same time.
2063 *
2064 * Note also that synth_event_trace_end() must be called after all
2065 * values have been added for each event trace, regardless of whether
2066 * adding all field values succeeded or not.
2067 *
2068 * Return: 0 on success, err otherwise.
2069 */
synth_event_add_next_val(u64 val,struct synth_event_trace_state * trace_state)2070 int synth_event_add_next_val(u64 val,
2071 struct synth_event_trace_state *trace_state)
2072 {
2073 return __synth_event_add_val(NULL, val, trace_state);
2074 }
2075 EXPORT_SYMBOL_GPL(synth_event_add_next_val);
2076
2077 /**
2078 * synth_event_add_val - Add a named field's value to an open synth trace
2079 * @field_name: The name of the synthetic event field value to set
2080 * @val: The value to set the named field to
2081 * @trace_state: A pointer to object tracking the piecewise trace state
2082 *
2083 * Set the value of the named field in an event that's been opened by
2084 * synth_event_trace_start().
2085 *
2086 * The val param should be the value cast to u64. If the value points
2087 * to a string, the val param should be a char * cast to u64.
2088 *
2089 * This function looks up the field name, and if found, sets the field
2090 * to the specified value. This lookup makes this function more
2091 * expensive than synth_event_add_next_val(), so use that or the
2092 * none-piecewise synth_event_trace() instead if efficiency is more
2093 * important.
2094 *
2095 * Note however that synth_event_add_next_val() and
2096 * synth_event_add_val() can't be intermixed for a given event trace -
2097 * one or the other but not both can be used at the same time.
2098 *
2099 * Note also that synth_event_trace_end() must be called after all
2100 * values have been added for each event trace, regardless of whether
2101 * adding all field values succeeded or not.
2102 *
2103 * Return: 0 on success, err otherwise.
2104 */
synth_event_add_val(const char * field_name,u64 val,struct synth_event_trace_state * trace_state)2105 int synth_event_add_val(const char *field_name, u64 val,
2106 struct synth_event_trace_state *trace_state)
2107 {
2108 return __synth_event_add_val(field_name, val, trace_state);
2109 }
2110 EXPORT_SYMBOL_GPL(synth_event_add_val);
2111
2112 /**
2113 * synth_event_trace_end - End piecewise synthetic event trace
2114 * @trace_state: A pointer to object tracking the piecewise trace state
2115 *
2116 * End the trace of a synthetic event opened by
2117 * synth_event_trace__start().
2118 *
2119 * This function 'closes' an event trace, which basically means that
2120 * it commits the reserved event and cleans up other loose ends.
2121 *
2122 * A pointer to a trace_state object is passed in, which will keep
2123 * track of the current event trace state opened with
2124 * synth_event_trace_start().
2125 *
2126 * Note that this function must be called after all values have been
2127 * added for each event trace, regardless of whether adding all field
2128 * values succeeded or not.
2129 *
2130 * Return: 0 on success, err otherwise.
2131 */
synth_event_trace_end(struct synth_event_trace_state * trace_state)2132 int synth_event_trace_end(struct synth_event_trace_state *trace_state)
2133 {
2134 if (!trace_state)
2135 return -EINVAL;
2136
2137 __synth_event_trace_end(trace_state);
2138
2139 return 0;
2140 }
2141 EXPORT_SYMBOL_GPL(synth_event_trace_end);
2142
create_synth_event(const char * raw_command)2143 static int create_synth_event(const char *raw_command)
2144 {
2145 char *fields, *p;
2146 const char *name;
2147 int len, ret = 0;
2148
2149 raw_command = skip_spaces(raw_command);
2150 if (raw_command[0] == '\0')
2151 return ret;
2152
2153 last_cmd_set(raw_command);
2154
2155 name = raw_command;
2156
2157 /* Don't try to process if not our system */
2158 if (name[0] != 's' || name[1] != ':')
2159 return -ECANCELED;
2160 name += 2;
2161
2162 p = strpbrk(raw_command, " \t");
2163 if (!p) {
2164 synth_err(SYNTH_ERR_INVALID_CMD, 0);
2165 return -EINVAL;
2166 }
2167
2168 fields = skip_spaces(p);
2169
2170 /* This interface accepts group name prefix */
2171 if (strchr(name, '/')) {
2172 len = str_has_prefix(name, SYNTH_SYSTEM "/");
2173 if (len == 0) {
2174 synth_err(SYNTH_ERR_INVALID_DYN_CMD, 0);
2175 return -EINVAL;
2176 }
2177 name += len;
2178 }
2179
2180 len = name - raw_command;
2181
2182 ret = check_command(raw_command + len);
2183 if (ret) {
2184 synth_err(SYNTH_ERR_INVALID_CMD, 0);
2185 return ret;
2186 }
2187
2188 name = kmemdup_nul(raw_command + len, p - raw_command - len, GFP_KERNEL);
2189 if (!name)
2190 return -ENOMEM;
2191
2192 ret = __create_synth_event(name, fields);
2193
2194 kfree(name);
2195
2196 return ret;
2197 }
2198
synth_event_release(struct dyn_event * ev)2199 static int synth_event_release(struct dyn_event *ev)
2200 {
2201 struct synth_event *event = to_synth_event(ev);
2202 int ret;
2203
2204 if (event->ref)
2205 return -EBUSY;
2206
2207 if (trace_event_dyn_busy(&event->call))
2208 return -EBUSY;
2209
2210 ret = unregister_synth_event(event);
2211 if (ret)
2212 return ret;
2213
2214 dyn_event_remove(ev);
2215 free_synth_event(event);
2216 return 0;
2217 }
2218
__synth_event_show(struct seq_file * m,struct synth_event * event)2219 static int __synth_event_show(struct seq_file *m, struct synth_event *event)
2220 {
2221 struct synth_field *field;
2222 unsigned int i;
2223 char *type, *t;
2224
2225 seq_printf(m, "%s\t", event->name);
2226
2227 for (i = 0; i < event->n_fields; i++) {
2228 field = event->fields[i];
2229
2230 type = field->type;
2231 t = strstr(type, "__data_loc");
2232 if (t) { /* __data_loc belongs in format but not event desc */
2233 t += sizeof("__data_loc");
2234 type = t;
2235 }
2236
2237 /* parameter values */
2238 seq_printf(m, "%s %s%s", type, field->name,
2239 i == event->n_fields - 1 ? "" : "; ");
2240 }
2241
2242 seq_putc(m, '\n');
2243
2244 return 0;
2245 }
2246
synth_event_show(struct seq_file * m,struct dyn_event * ev)2247 static int synth_event_show(struct seq_file *m, struct dyn_event *ev)
2248 {
2249 struct synth_event *event = to_synth_event(ev);
2250
2251 seq_printf(m, "s:%s/", event->class.system);
2252
2253 return __synth_event_show(m, event);
2254 }
2255
synth_events_seq_show(struct seq_file * m,void * v)2256 static int synth_events_seq_show(struct seq_file *m, void *v)
2257 {
2258 struct dyn_event *ev = v;
2259
2260 if (!is_synth_event(ev))
2261 return 0;
2262
2263 return __synth_event_show(m, to_synth_event(ev));
2264 }
2265
2266 static const struct seq_operations synth_events_seq_op = {
2267 .start = dyn_event_seq_start,
2268 .next = dyn_event_seq_next,
2269 .stop = dyn_event_seq_stop,
2270 .show = synth_events_seq_show,
2271 };
2272
synth_events_open(struct inode * inode,struct file * file)2273 static int synth_events_open(struct inode *inode, struct file *file)
2274 {
2275 int ret;
2276
2277 ret = security_locked_down(LOCKDOWN_TRACEFS);
2278 if (ret)
2279 return ret;
2280
2281 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
2282 ret = dyn_events_release_all(&synth_event_ops);
2283 if (ret < 0)
2284 return ret;
2285 }
2286
2287 return seq_open(file, &synth_events_seq_op);
2288 }
2289
synth_events_write(struct file * file,const char __user * buffer,size_t count,loff_t * ppos)2290 static ssize_t synth_events_write(struct file *file,
2291 const char __user *buffer,
2292 size_t count, loff_t *ppos)
2293 {
2294 return trace_parse_run_command(file, buffer, count, ppos,
2295 create_or_delete_synth_event);
2296 }
2297
2298 static const struct file_operations synth_events_fops = {
2299 .open = synth_events_open,
2300 .write = synth_events_write,
2301 .read = seq_read,
2302 .llseek = seq_lseek,
2303 .release = seq_release,
2304 };
2305
2306 /*
2307 * Register dynevent at core_initcall. This allows kernel to setup kprobe
2308 * events in postcore_initcall without tracefs.
2309 */
trace_events_synth_init_early(void)2310 static __init int trace_events_synth_init_early(void)
2311 {
2312 int err = 0;
2313
2314 err = dyn_event_register(&synth_event_ops);
2315 if (err)
2316 pr_warn("Could not register synth_event_ops\n");
2317
2318 return err;
2319 }
2320 core_initcall(trace_events_synth_init_early);
2321
trace_events_synth_init(void)2322 static __init int trace_events_synth_init(void)
2323 {
2324 struct dentry *entry = NULL;
2325 int err = 0;
2326 err = tracing_init_dentry();
2327 if (err)
2328 goto err;
2329
2330 entry = tracefs_create_file("synthetic_events", TRACE_MODE_WRITE,
2331 NULL, NULL, &synth_events_fops);
2332 if (!entry) {
2333 err = -ENODEV;
2334 goto err;
2335 }
2336
2337 return err;
2338 err:
2339 pr_warn("Could not create tracefs 'synthetic_events' entry\n");
2340
2341 return err;
2342 }
2343
2344 fs_initcall(trace_events_synth_init);
2345