1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * bpf-loader.c
4  *
5  * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
6  * Copyright (C) 2015 Huawei Inc.
7  */
8 
9 #include <linux/bpf.h>
10 #include <bpf/libbpf.h>
11 #include <bpf/bpf.h>
12 #include <linux/filter.h>
13 #include <linux/err.h>
14 #include <linux/kernel.h>
15 #include <linux/string.h>
16 #include <linux/zalloc.h>
17 #include <errno.h>
18 #include <stdlib.h>
19 #include "debug.h"
20 #include "evlist.h"
21 #include "bpf-loader.h"
22 #include "bpf-prologue.h"
23 #include "probe-event.h"
24 #include "probe-finder.h" // for MAX_PROBES
25 #include "parse-events.h"
26 #include "strfilter.h"
27 #include "util.h"
28 #include "llvm-utils.h"
29 #include "c++/clang-c.h"
30 #include "util/hashmap.h"
31 #include "asm/bug.h"
32 
33 #include <internal/xyarray.h>
34 
35 #ifndef HAVE_LIBBPF_BPF_PROGRAM__SET_INSNS
bpf_program__set_insns(struct bpf_program * prog __maybe_unused,struct bpf_insn * new_insns __maybe_unused,size_t new_insn_cnt __maybe_unused)36 int bpf_program__set_insns(struct bpf_program *prog __maybe_unused,
37 			   struct bpf_insn *new_insns __maybe_unused, size_t new_insn_cnt __maybe_unused)
38 {
39 	pr_err("%s: not support, update libbpf\n", __func__);
40 	return -ENOTSUP;
41 }
42 
libbpf_register_prog_handler(const char * sec __maybe_unused,enum bpf_prog_type prog_type __maybe_unused,enum bpf_attach_type exp_attach_type __maybe_unused,const struct libbpf_prog_handler_opts * opts __maybe_unused)43 int libbpf_register_prog_handler(const char *sec __maybe_unused,
44                                  enum bpf_prog_type prog_type __maybe_unused,
45                                  enum bpf_attach_type exp_attach_type __maybe_unused,
46                                  const struct libbpf_prog_handler_opts *opts __maybe_unused)
47 {
48 	pr_err("%s: not support, update libbpf\n", __func__);
49 	return -ENOTSUP;
50 }
51 #endif
52 
53 /* temporarily disable libbpf deprecation warnings */
54 #pragma GCC diagnostic ignored "-Wdeprecated-declarations"
55 
libbpf_perf_print(enum libbpf_print_level level,const char * fmt,va_list args)56 static int libbpf_perf_print(enum libbpf_print_level level __attribute__((unused)),
57 			      const char *fmt, va_list args)
58 {
59 	return veprintf(1, verbose, pr_fmt(fmt), args);
60 }
61 
62 struct bpf_prog_priv {
63 	bool is_tp;
64 	char *sys_name;
65 	char *evt_name;
66 	struct perf_probe_event pev;
67 	bool need_prologue;
68 	struct bpf_insn *insns_buf;
69 	int nr_types;
70 	int *type_mapping;
71 	int *prologue_fds;
72 };
73 
74 struct bpf_perf_object {
75 	struct list_head list;
76 	struct bpf_object *obj;
77 };
78 
79 struct bpf_preproc_result {
80 	struct bpf_insn *new_insn_ptr;
81 	int new_insn_cnt;
82 };
83 
84 static LIST_HEAD(bpf_objects_list);
85 static struct hashmap *bpf_program_hash;
86 static struct hashmap *bpf_map_hash;
87 
88 static struct bpf_perf_object *
bpf_perf_object__next(struct bpf_perf_object * prev)89 bpf_perf_object__next(struct bpf_perf_object *prev)
90 {
91 	if (!prev) {
92 		if (list_empty(&bpf_objects_list))
93 			return NULL;
94 
95 		return list_first_entry(&bpf_objects_list, struct bpf_perf_object, list);
96 	}
97 	if (list_is_last(&prev->list, &bpf_objects_list))
98 		return NULL;
99 
100 	return list_next_entry(prev, list);
101 }
102 
103 #define bpf_perf_object__for_each(perf_obj, tmp)	\
104 	for ((perf_obj) = bpf_perf_object__next(NULL),	\
105 	     (tmp) = bpf_perf_object__next(perf_obj);	\
106 	     (perf_obj) != NULL;			\
107 	     (perf_obj) = (tmp), (tmp) = bpf_perf_object__next(tmp))
108 
109 static bool libbpf_initialized;
110 static int libbpf_sec_handler;
111 
bpf_perf_object__add(struct bpf_object * obj)112 static int bpf_perf_object__add(struct bpf_object *obj)
113 {
114 	struct bpf_perf_object *perf_obj = zalloc(sizeof(*perf_obj));
115 
116 	if (perf_obj) {
117 		INIT_LIST_HEAD(&perf_obj->list);
118 		perf_obj->obj = obj;
119 		list_add_tail(&perf_obj->list, &bpf_objects_list);
120 	}
121 	return perf_obj ? 0 : -ENOMEM;
122 }
123 
program_priv(const struct bpf_program * prog)124 static void *program_priv(const struct bpf_program *prog)
125 {
126 	void *priv;
127 
128 	if (IS_ERR_OR_NULL(bpf_program_hash))
129 		return NULL;
130 	if (!hashmap__find(bpf_program_hash, prog, &priv))
131 		return NULL;
132 	return priv;
133 }
134 
135 static struct bpf_insn prologue_init_insn[] = {
136 	BPF_MOV64_IMM(BPF_REG_2, 0),
137 	BPF_MOV64_IMM(BPF_REG_3, 0),
138 	BPF_MOV64_IMM(BPF_REG_4, 0),
139 	BPF_MOV64_IMM(BPF_REG_5, 0),
140 };
141 
libbpf_prog_prepare_load_fn(struct bpf_program * prog,struct bpf_prog_load_opts * opts __maybe_unused,long cookie __maybe_unused)142 static int libbpf_prog_prepare_load_fn(struct bpf_program *prog,
143 				       struct bpf_prog_load_opts *opts __maybe_unused,
144 				       long cookie __maybe_unused)
145 {
146 	size_t init_size_cnt = ARRAY_SIZE(prologue_init_insn);
147 	size_t orig_insn_cnt, insn_cnt, init_size, orig_size;
148 	struct bpf_prog_priv *priv = program_priv(prog);
149 	const struct bpf_insn *orig_insn;
150 	struct bpf_insn *insn;
151 
152 	if (IS_ERR_OR_NULL(priv)) {
153 		pr_debug("bpf: failed to get private field\n");
154 		return -BPF_LOADER_ERRNO__INTERNAL;
155 	}
156 
157 	if (!priv->need_prologue)
158 		return 0;
159 
160 	/* prepend initialization code to program instructions */
161 	orig_insn = bpf_program__insns(prog);
162 	orig_insn_cnt = bpf_program__insn_cnt(prog);
163 	init_size = init_size_cnt * sizeof(*insn);
164 	orig_size = orig_insn_cnt * sizeof(*insn);
165 
166 	insn_cnt = orig_insn_cnt + init_size_cnt;
167 	insn = malloc(insn_cnt * sizeof(*insn));
168 	if (!insn)
169 		return -ENOMEM;
170 
171 	memcpy(insn, prologue_init_insn, init_size);
172 	memcpy((char *) insn + init_size, orig_insn, orig_size);
173 	bpf_program__set_insns(prog, insn, insn_cnt);
174 	return 0;
175 }
176 
libbpf_init(void)177 static int libbpf_init(void)
178 {
179 	LIBBPF_OPTS(libbpf_prog_handler_opts, handler_opts,
180 		.prog_prepare_load_fn = libbpf_prog_prepare_load_fn,
181 	);
182 
183 	if (libbpf_initialized)
184 		return 0;
185 
186 	libbpf_set_print(libbpf_perf_print);
187 	libbpf_sec_handler = libbpf_register_prog_handler(NULL, BPF_PROG_TYPE_KPROBE,
188 							  0, &handler_opts);
189 	if (libbpf_sec_handler < 0) {
190 		pr_debug("bpf: failed to register libbpf section handler: %d\n",
191 			 libbpf_sec_handler);
192 		return -BPF_LOADER_ERRNO__INTERNAL;
193 	}
194 	libbpf_initialized = true;
195 	return 0;
196 }
197 
198 struct bpf_object *
bpf__prepare_load_buffer(void * obj_buf,size_t obj_buf_sz,const char * name)199 bpf__prepare_load_buffer(void *obj_buf, size_t obj_buf_sz, const char *name)
200 {
201 	LIBBPF_OPTS(bpf_object_open_opts, opts, .object_name = name);
202 	struct bpf_object *obj;
203 	int err;
204 
205 	err = libbpf_init();
206 	if (err)
207 		return ERR_PTR(err);
208 
209 	obj = bpf_object__open_mem(obj_buf, obj_buf_sz, &opts);
210 	if (IS_ERR_OR_NULL(obj)) {
211 		pr_debug("bpf: failed to load buffer\n");
212 		return ERR_PTR(-EINVAL);
213 	}
214 
215 	if (bpf_perf_object__add(obj)) {
216 		bpf_object__close(obj);
217 		return ERR_PTR(-ENOMEM);
218 	}
219 
220 	return obj;
221 }
222 
bpf_perf_object__close(struct bpf_perf_object * perf_obj)223 static void bpf_perf_object__close(struct bpf_perf_object *perf_obj)
224 {
225 	list_del(&perf_obj->list);
226 	bpf_object__close(perf_obj->obj);
227 	free(perf_obj);
228 }
229 
bpf__prepare_load(const char * filename,bool source)230 struct bpf_object *bpf__prepare_load(const char *filename, bool source)
231 {
232 	LIBBPF_OPTS(bpf_object_open_opts, opts, .object_name = filename);
233 	struct bpf_object *obj;
234 	int err;
235 
236 	err = libbpf_init();
237 	if (err)
238 		return ERR_PTR(err);
239 
240 	if (source) {
241 		void *obj_buf;
242 		size_t obj_buf_sz;
243 
244 		perf_clang__init();
245 		err = perf_clang__compile_bpf(filename, &obj_buf, &obj_buf_sz);
246 		perf_clang__cleanup();
247 		if (err) {
248 			pr_debug("bpf: builtin compilation failed: %d, try external compiler\n", err);
249 			err = llvm__compile_bpf(filename, &obj_buf, &obj_buf_sz);
250 			if (err)
251 				return ERR_PTR(-BPF_LOADER_ERRNO__COMPILE);
252 		} else
253 			pr_debug("bpf: successful builtin compilation\n");
254 		obj = bpf_object__open_mem(obj_buf, obj_buf_sz, &opts);
255 
256 		if (!IS_ERR_OR_NULL(obj) && llvm_param.dump_obj)
257 			llvm__dump_obj(filename, obj_buf, obj_buf_sz);
258 
259 		free(obj_buf);
260 	} else {
261 		obj = bpf_object__open(filename);
262 	}
263 
264 	if (IS_ERR_OR_NULL(obj)) {
265 		pr_debug("bpf: failed to load %s\n", filename);
266 		return obj;
267 	}
268 
269 	if (bpf_perf_object__add(obj)) {
270 		bpf_object__close(obj);
271 		return ERR_PTR(-BPF_LOADER_ERRNO__COMPILE);
272 	}
273 
274 	return obj;
275 }
276 
close_prologue_programs(struct bpf_prog_priv * priv)277 static void close_prologue_programs(struct bpf_prog_priv *priv)
278 {
279 	struct perf_probe_event *pev;
280 	int i, fd;
281 
282 	if (!priv->need_prologue)
283 		return;
284 	pev = &priv->pev;
285 	for (i = 0; i < pev->ntevs; i++) {
286 		fd = priv->prologue_fds[i];
287 		if (fd != -1)
288 			close(fd);
289 	}
290 }
291 
292 static void
clear_prog_priv(const struct bpf_program * prog __maybe_unused,void * _priv)293 clear_prog_priv(const struct bpf_program *prog __maybe_unused,
294 		void *_priv)
295 {
296 	struct bpf_prog_priv *priv = _priv;
297 
298 	close_prologue_programs(priv);
299 	cleanup_perf_probe_events(&priv->pev, 1);
300 	zfree(&priv->insns_buf);
301 	zfree(&priv->prologue_fds);
302 	zfree(&priv->type_mapping);
303 	zfree(&priv->sys_name);
304 	zfree(&priv->evt_name);
305 	free(priv);
306 }
307 
bpf_program_hash_free(void)308 static void bpf_program_hash_free(void)
309 {
310 	struct hashmap_entry *cur;
311 	size_t bkt;
312 
313 	if (IS_ERR_OR_NULL(bpf_program_hash))
314 		return;
315 
316 	hashmap__for_each_entry(bpf_program_hash, cur, bkt)
317 		clear_prog_priv(cur->pkey, cur->pvalue);
318 
319 	hashmap__free(bpf_program_hash);
320 	bpf_program_hash = NULL;
321 }
322 
323 static void bpf_map_hash_free(void);
324 
bpf__clear(void)325 void bpf__clear(void)
326 {
327 	struct bpf_perf_object *perf_obj, *tmp;
328 
329 	bpf_perf_object__for_each(perf_obj, tmp) {
330 		bpf__unprobe(perf_obj->obj);
331 		bpf_perf_object__close(perf_obj);
332 	}
333 
334 	bpf_program_hash_free();
335 	bpf_map_hash_free();
336 }
337 
ptr_hash(const long __key,void * ctx __maybe_unused)338 static size_t ptr_hash(const long __key, void *ctx __maybe_unused)
339 {
340 	return __key;
341 }
342 
ptr_equal(long key1,long key2,void * ctx __maybe_unused)343 static bool ptr_equal(long key1, long key2, void *ctx __maybe_unused)
344 {
345 	return key1 == key2;
346 }
347 
program_set_priv(struct bpf_program * prog,void * priv)348 static int program_set_priv(struct bpf_program *prog, void *priv)
349 {
350 	void *old_priv;
351 
352 	/*
353 	 * Should not happen, we warn about it in the
354 	 * caller function - config_bpf_program
355 	 */
356 	if (IS_ERR(bpf_program_hash))
357 		return PTR_ERR(bpf_program_hash);
358 
359 	if (!bpf_program_hash) {
360 		bpf_program_hash = hashmap__new(ptr_hash, ptr_equal, NULL);
361 		if (IS_ERR(bpf_program_hash))
362 			return PTR_ERR(bpf_program_hash);
363 	}
364 
365 	old_priv = program_priv(prog);
366 	if (old_priv) {
367 		clear_prog_priv(prog, old_priv);
368 		return hashmap__set(bpf_program_hash, prog, priv, NULL, NULL);
369 	}
370 	return hashmap__add(bpf_program_hash, prog, priv);
371 }
372 
373 static int
prog_config__exec(const char * value,struct perf_probe_event * pev)374 prog_config__exec(const char *value, struct perf_probe_event *pev)
375 {
376 	pev->uprobes = true;
377 	pev->target = strdup(value);
378 	if (!pev->target)
379 		return -ENOMEM;
380 	return 0;
381 }
382 
383 static int
prog_config__module(const char * value,struct perf_probe_event * pev)384 prog_config__module(const char *value, struct perf_probe_event *pev)
385 {
386 	pev->uprobes = false;
387 	pev->target = strdup(value);
388 	if (!pev->target)
389 		return -ENOMEM;
390 	return 0;
391 }
392 
393 static int
prog_config__bool(const char * value,bool * pbool,bool invert)394 prog_config__bool(const char *value, bool *pbool, bool invert)
395 {
396 	int err;
397 	bool bool_value;
398 
399 	if (!pbool)
400 		return -EINVAL;
401 
402 	err = strtobool(value, &bool_value);
403 	if (err)
404 		return err;
405 
406 	*pbool = invert ? !bool_value : bool_value;
407 	return 0;
408 }
409 
410 static int
prog_config__inlines(const char * value,struct perf_probe_event * pev __maybe_unused)411 prog_config__inlines(const char *value,
412 		     struct perf_probe_event *pev __maybe_unused)
413 {
414 	return prog_config__bool(value, &probe_conf.no_inlines, true);
415 }
416 
417 static int
prog_config__force(const char * value,struct perf_probe_event * pev __maybe_unused)418 prog_config__force(const char *value,
419 		   struct perf_probe_event *pev __maybe_unused)
420 {
421 	return prog_config__bool(value, &probe_conf.force_add, false);
422 }
423 
424 static struct {
425 	const char *key;
426 	const char *usage;
427 	const char *desc;
428 	int (*func)(const char *, struct perf_probe_event *);
429 } bpf_prog_config_terms[] = {
430 	{
431 		.key	= "exec",
432 		.usage	= "exec=<full path of file>",
433 		.desc	= "Set uprobe target",
434 		.func	= prog_config__exec,
435 	},
436 	{
437 		.key	= "module",
438 		.usage	= "module=<module name>    ",
439 		.desc	= "Set kprobe module",
440 		.func	= prog_config__module,
441 	},
442 	{
443 		.key	= "inlines",
444 		.usage	= "inlines=[yes|no]        ",
445 		.desc	= "Probe at inline symbol",
446 		.func	= prog_config__inlines,
447 	},
448 	{
449 		.key	= "force",
450 		.usage	= "force=[yes|no]          ",
451 		.desc	= "Forcibly add events with existing name",
452 		.func	= prog_config__force,
453 	},
454 };
455 
456 static int
do_prog_config(const char * key,const char * value,struct perf_probe_event * pev)457 do_prog_config(const char *key, const char *value,
458 	       struct perf_probe_event *pev)
459 {
460 	unsigned int i;
461 
462 	pr_debug("config bpf program: %s=%s\n", key, value);
463 	for (i = 0; i < ARRAY_SIZE(bpf_prog_config_terms); i++)
464 		if (strcmp(key, bpf_prog_config_terms[i].key) == 0)
465 			return bpf_prog_config_terms[i].func(value, pev);
466 
467 	pr_debug("BPF: ERROR: invalid program config option: %s=%s\n",
468 		 key, value);
469 
470 	pr_debug("\nHint: Valid options are:\n");
471 	for (i = 0; i < ARRAY_SIZE(bpf_prog_config_terms); i++)
472 		pr_debug("\t%s:\t%s\n", bpf_prog_config_terms[i].usage,
473 			 bpf_prog_config_terms[i].desc);
474 	pr_debug("\n");
475 
476 	return -BPF_LOADER_ERRNO__PROGCONF_TERM;
477 }
478 
479 static const char *
parse_prog_config_kvpair(const char * config_str,struct perf_probe_event * pev)480 parse_prog_config_kvpair(const char *config_str, struct perf_probe_event *pev)
481 {
482 	char *text = strdup(config_str);
483 	char *sep, *line;
484 	const char *main_str = NULL;
485 	int err = 0;
486 
487 	if (!text) {
488 		pr_debug("Not enough memory: dup config_str failed\n");
489 		return ERR_PTR(-ENOMEM);
490 	}
491 
492 	line = text;
493 	while ((sep = strchr(line, ';'))) {
494 		char *equ;
495 
496 		*sep = '\0';
497 		equ = strchr(line, '=');
498 		if (!equ) {
499 			pr_warning("WARNING: invalid config in BPF object: %s\n",
500 				   line);
501 			pr_warning("\tShould be 'key=value'.\n");
502 			goto nextline;
503 		}
504 		*equ = '\0';
505 
506 		err = do_prog_config(line, equ + 1, pev);
507 		if (err)
508 			break;
509 nextline:
510 		line = sep + 1;
511 	}
512 
513 	if (!err)
514 		main_str = config_str + (line - text);
515 	free(text);
516 
517 	return err ? ERR_PTR(err) : main_str;
518 }
519 
520 static int
parse_prog_config(const char * config_str,const char ** p_main_str,bool * is_tp,struct perf_probe_event * pev)521 parse_prog_config(const char *config_str, const char **p_main_str,
522 		  bool *is_tp, struct perf_probe_event *pev)
523 {
524 	int err;
525 	const char *main_str = parse_prog_config_kvpair(config_str, pev);
526 
527 	if (IS_ERR(main_str))
528 		return PTR_ERR(main_str);
529 
530 	*p_main_str = main_str;
531 	if (!strchr(main_str, '=')) {
532 		/* Is a tracepoint event? */
533 		const char *s = strchr(main_str, ':');
534 
535 		if (!s) {
536 			pr_debug("bpf: '%s' is not a valid tracepoint\n",
537 				 config_str);
538 			return -BPF_LOADER_ERRNO__CONFIG;
539 		}
540 
541 		*is_tp = true;
542 		return 0;
543 	}
544 
545 	*is_tp = false;
546 	err = parse_perf_probe_command(main_str, pev);
547 	if (err < 0) {
548 		pr_debug("bpf: '%s' is not a valid config string\n",
549 			 config_str);
550 		/* parse failed, don't need clear pev. */
551 		return -BPF_LOADER_ERRNO__CONFIG;
552 	}
553 	return 0;
554 }
555 
556 static int
config_bpf_program(struct bpf_program * prog)557 config_bpf_program(struct bpf_program *prog)
558 {
559 	struct perf_probe_event *pev = NULL;
560 	struct bpf_prog_priv *priv = NULL;
561 	const char *config_str, *main_str;
562 	bool is_tp = false;
563 	int err;
564 
565 	/* Initialize per-program probing setting */
566 	probe_conf.no_inlines = false;
567 	probe_conf.force_add = false;
568 
569 	priv = calloc(sizeof(*priv), 1);
570 	if (!priv) {
571 		pr_debug("bpf: failed to alloc priv\n");
572 		return -ENOMEM;
573 	}
574 	pev = &priv->pev;
575 
576 	config_str = bpf_program__section_name(prog);
577 	pr_debug("bpf: config program '%s'\n", config_str);
578 	err = parse_prog_config(config_str, &main_str, &is_tp, pev);
579 	if (err)
580 		goto errout;
581 
582 	if (is_tp) {
583 		char *s = strchr(main_str, ':');
584 
585 		priv->is_tp = true;
586 		priv->sys_name = strndup(main_str, s - main_str);
587 		priv->evt_name = strdup(s + 1);
588 		goto set_priv;
589 	}
590 
591 	if (pev->group && strcmp(pev->group, PERF_BPF_PROBE_GROUP)) {
592 		pr_debug("bpf: '%s': group for event is set and not '%s'.\n",
593 			 config_str, PERF_BPF_PROBE_GROUP);
594 		err = -BPF_LOADER_ERRNO__GROUP;
595 		goto errout;
596 	} else if (!pev->group)
597 		pev->group = strdup(PERF_BPF_PROBE_GROUP);
598 
599 	if (!pev->group) {
600 		pr_debug("bpf: strdup failed\n");
601 		err = -ENOMEM;
602 		goto errout;
603 	}
604 
605 	if (!pev->event) {
606 		pr_debug("bpf: '%s': event name is missing. Section name should be 'key=value'\n",
607 			 config_str);
608 		err = -BPF_LOADER_ERRNO__EVENTNAME;
609 		goto errout;
610 	}
611 	pr_debug("bpf: config '%s' is ok\n", config_str);
612 
613 set_priv:
614 	err = program_set_priv(prog, priv);
615 	if (err) {
616 		pr_debug("Failed to set priv for program '%s'\n", config_str);
617 		goto errout;
618 	}
619 
620 	return 0;
621 
622 errout:
623 	if (pev)
624 		clear_perf_probe_event(pev);
625 	free(priv);
626 	return err;
627 }
628 
bpf__prepare_probe(void)629 static int bpf__prepare_probe(void)
630 {
631 	static int err = 0;
632 	static bool initialized = false;
633 
634 	/*
635 	 * Make err static, so if init failed the first, bpf__prepare_probe()
636 	 * fails each time without calling init_probe_symbol_maps multiple
637 	 * times.
638 	 */
639 	if (initialized)
640 		return err;
641 
642 	initialized = true;
643 	err = init_probe_symbol_maps(false);
644 	if (err < 0)
645 		pr_debug("Failed to init_probe_symbol_maps\n");
646 	probe_conf.max_probes = MAX_PROBES;
647 	return err;
648 }
649 
650 static int
preproc_gen_prologue(struct bpf_program * prog,int n,const struct bpf_insn * orig_insns,int orig_insns_cnt,struct bpf_preproc_result * res)651 preproc_gen_prologue(struct bpf_program *prog, int n,
652 		     const struct bpf_insn *orig_insns, int orig_insns_cnt,
653 		     struct bpf_preproc_result *res)
654 {
655 	struct bpf_prog_priv *priv = program_priv(prog);
656 	struct probe_trace_event *tev;
657 	struct perf_probe_event *pev;
658 	struct bpf_insn *buf;
659 	size_t prologue_cnt = 0;
660 	int i, err;
661 
662 	if (IS_ERR_OR_NULL(priv) || priv->is_tp)
663 		goto errout;
664 
665 	pev = &priv->pev;
666 
667 	if (n < 0 || n >= priv->nr_types)
668 		goto errout;
669 
670 	/* Find a tev belongs to that type */
671 	for (i = 0; i < pev->ntevs; i++) {
672 		if (priv->type_mapping[i] == n)
673 			break;
674 	}
675 
676 	if (i >= pev->ntevs) {
677 		pr_debug("Internal error: prologue type %d not found\n", n);
678 		return -BPF_LOADER_ERRNO__PROLOGUE;
679 	}
680 
681 	tev = &pev->tevs[i];
682 
683 	buf = priv->insns_buf;
684 	err = bpf__gen_prologue(tev->args, tev->nargs,
685 				buf, &prologue_cnt,
686 				BPF_MAXINSNS - orig_insns_cnt);
687 	if (err) {
688 		const char *title;
689 
690 		title = bpf_program__section_name(prog);
691 		pr_debug("Failed to generate prologue for program %s\n",
692 			 title);
693 		return err;
694 	}
695 
696 	memcpy(&buf[prologue_cnt], orig_insns,
697 	       sizeof(struct bpf_insn) * orig_insns_cnt);
698 
699 	res->new_insn_ptr = buf;
700 	res->new_insn_cnt = prologue_cnt + orig_insns_cnt;
701 	return 0;
702 
703 errout:
704 	pr_debug("Internal error in preproc_gen_prologue\n");
705 	return -BPF_LOADER_ERRNO__PROLOGUE;
706 }
707 
708 /*
709  * compare_tev_args is reflexive, transitive and antisymmetric.
710  * I can proof it but this margin is too narrow to contain.
711  */
compare_tev_args(const void * ptev1,const void * ptev2)712 static int compare_tev_args(const void *ptev1, const void *ptev2)
713 {
714 	int i, ret;
715 	const struct probe_trace_event *tev1 =
716 		*(const struct probe_trace_event **)ptev1;
717 	const struct probe_trace_event *tev2 =
718 		*(const struct probe_trace_event **)ptev2;
719 
720 	ret = tev2->nargs - tev1->nargs;
721 	if (ret)
722 		return ret;
723 
724 	for (i = 0; i < tev1->nargs; i++) {
725 		struct probe_trace_arg *arg1, *arg2;
726 		struct probe_trace_arg_ref *ref1, *ref2;
727 
728 		arg1 = &tev1->args[i];
729 		arg2 = &tev2->args[i];
730 
731 		ret = strcmp(arg1->value, arg2->value);
732 		if (ret)
733 			return ret;
734 
735 		ref1 = arg1->ref;
736 		ref2 = arg2->ref;
737 
738 		while (ref1 && ref2) {
739 			ret = ref2->offset - ref1->offset;
740 			if (ret)
741 				return ret;
742 
743 			ref1 = ref1->next;
744 			ref2 = ref2->next;
745 		}
746 
747 		if (ref1 || ref2)
748 			return ref2 ? 1 : -1;
749 	}
750 
751 	return 0;
752 }
753 
754 /*
755  * Assign a type number to each tevs in a pev.
756  * mapping is an array with same slots as tevs in that pev.
757  * nr_types will be set to number of types.
758  */
map_prologue(struct perf_probe_event * pev,int * mapping,int * nr_types)759 static int map_prologue(struct perf_probe_event *pev, int *mapping,
760 			int *nr_types)
761 {
762 	int i, type = 0;
763 	struct probe_trace_event **ptevs;
764 
765 	size_t array_sz = sizeof(*ptevs) * pev->ntevs;
766 
767 	ptevs = malloc(array_sz);
768 	if (!ptevs) {
769 		pr_debug("Not enough memory: alloc ptevs failed\n");
770 		return -ENOMEM;
771 	}
772 
773 	pr_debug("In map_prologue, ntevs=%d\n", pev->ntevs);
774 	for (i = 0; i < pev->ntevs; i++)
775 		ptevs[i] = &pev->tevs[i];
776 
777 	qsort(ptevs, pev->ntevs, sizeof(*ptevs),
778 	      compare_tev_args);
779 
780 	for (i = 0; i < pev->ntevs; i++) {
781 		int n;
782 
783 		n = ptevs[i] - pev->tevs;
784 		if (i == 0) {
785 			mapping[n] = type;
786 			pr_debug("mapping[%d]=%d\n", n, type);
787 			continue;
788 		}
789 
790 		if (compare_tev_args(ptevs + i, ptevs + i - 1) == 0)
791 			mapping[n] = type;
792 		else
793 			mapping[n] = ++type;
794 
795 		pr_debug("mapping[%d]=%d\n", n, mapping[n]);
796 	}
797 	free(ptevs);
798 	*nr_types = type + 1;
799 
800 	return 0;
801 }
802 
hook_load_preprocessor(struct bpf_program * prog)803 static int hook_load_preprocessor(struct bpf_program *prog)
804 {
805 	struct bpf_prog_priv *priv = program_priv(prog);
806 	struct perf_probe_event *pev;
807 	bool need_prologue = false;
808 	int i;
809 
810 	if (IS_ERR_OR_NULL(priv)) {
811 		pr_debug("Internal error when hook preprocessor\n");
812 		return -BPF_LOADER_ERRNO__INTERNAL;
813 	}
814 
815 	if (priv->is_tp) {
816 		priv->need_prologue = false;
817 		return 0;
818 	}
819 
820 	pev = &priv->pev;
821 	for (i = 0; i < pev->ntevs; i++) {
822 		struct probe_trace_event *tev = &pev->tevs[i];
823 
824 		if (tev->nargs > 0) {
825 			need_prologue = true;
826 			break;
827 		}
828 	}
829 
830 	/*
831 	 * Since all tevs don't have argument, we don't need generate
832 	 * prologue.
833 	 */
834 	if (!need_prologue) {
835 		priv->need_prologue = false;
836 		return 0;
837 	}
838 
839 	priv->need_prologue = true;
840 	priv->insns_buf = malloc(sizeof(struct bpf_insn) * BPF_MAXINSNS);
841 	if (!priv->insns_buf) {
842 		pr_debug("Not enough memory: alloc insns_buf failed\n");
843 		return -ENOMEM;
844 	}
845 
846 	priv->prologue_fds = malloc(sizeof(int) * pev->ntevs);
847 	if (!priv->prologue_fds) {
848 		pr_debug("Not enough memory: alloc prologue fds failed\n");
849 		return -ENOMEM;
850 	}
851 	memset(priv->prologue_fds, -1, sizeof(int) * pev->ntevs);
852 
853 	priv->type_mapping = malloc(sizeof(int) * pev->ntevs);
854 	if (!priv->type_mapping) {
855 		pr_debug("Not enough memory: alloc type_mapping failed\n");
856 		return -ENOMEM;
857 	}
858 	memset(priv->type_mapping, -1,
859 	       sizeof(int) * pev->ntevs);
860 
861 	return map_prologue(pev, priv->type_mapping, &priv->nr_types);
862 }
863 
bpf__probe(struct bpf_object * obj)864 int bpf__probe(struct bpf_object *obj)
865 {
866 	int err = 0;
867 	struct bpf_program *prog;
868 	struct bpf_prog_priv *priv;
869 	struct perf_probe_event *pev;
870 
871 	err = bpf__prepare_probe();
872 	if (err) {
873 		pr_debug("bpf__prepare_probe failed\n");
874 		return err;
875 	}
876 
877 	bpf_object__for_each_program(prog, obj) {
878 		err = config_bpf_program(prog);
879 		if (err)
880 			goto out;
881 
882 		priv = program_priv(prog);
883 		if (IS_ERR_OR_NULL(priv)) {
884 			if (!priv)
885 				err = -BPF_LOADER_ERRNO__INTERNAL;
886 			else
887 				err = PTR_ERR(priv);
888 			goto out;
889 		}
890 
891 		if (priv->is_tp) {
892 			bpf_program__set_type(prog, BPF_PROG_TYPE_TRACEPOINT);
893 			continue;
894 		}
895 
896 		bpf_program__set_type(prog, BPF_PROG_TYPE_KPROBE);
897 		pev = &priv->pev;
898 
899 		err = convert_perf_probe_events(pev, 1);
900 		if (err < 0) {
901 			pr_debug("bpf_probe: failed to convert perf probe events\n");
902 			goto out;
903 		}
904 
905 		err = apply_perf_probe_events(pev, 1);
906 		if (err < 0) {
907 			pr_debug("bpf_probe: failed to apply perf probe events\n");
908 			goto out;
909 		}
910 
911 		/*
912 		 * After probing, let's consider prologue, which
913 		 * adds program fetcher to BPF programs.
914 		 *
915 		 * hook_load_preprocessor() hooks pre-processor
916 		 * to bpf_program, let it generate prologue
917 		 * dynamically during loading.
918 		 */
919 		err = hook_load_preprocessor(prog);
920 		if (err)
921 			goto out;
922 	}
923 out:
924 	return err < 0 ? err : 0;
925 }
926 
927 #define EVENTS_WRITE_BUFSIZE  4096
bpf__unprobe(struct bpf_object * obj)928 int bpf__unprobe(struct bpf_object *obj)
929 {
930 	int err, ret = 0;
931 	struct bpf_program *prog;
932 
933 	bpf_object__for_each_program(prog, obj) {
934 		struct bpf_prog_priv *priv = program_priv(prog);
935 		int i;
936 
937 		if (IS_ERR_OR_NULL(priv) || priv->is_tp)
938 			continue;
939 
940 		for (i = 0; i < priv->pev.ntevs; i++) {
941 			struct probe_trace_event *tev = &priv->pev.tevs[i];
942 			char name_buf[EVENTS_WRITE_BUFSIZE];
943 			struct strfilter *delfilter;
944 
945 			snprintf(name_buf, EVENTS_WRITE_BUFSIZE,
946 				 "%s:%s", tev->group, tev->event);
947 			name_buf[EVENTS_WRITE_BUFSIZE - 1] = '\0';
948 
949 			delfilter = strfilter__new(name_buf, NULL);
950 			if (!delfilter) {
951 				pr_debug("Failed to create filter for unprobing\n");
952 				ret = -ENOMEM;
953 				continue;
954 			}
955 
956 			err = del_perf_probe_events(delfilter);
957 			strfilter__delete(delfilter);
958 			if (err) {
959 				pr_debug("Failed to delete %s\n", name_buf);
960 				ret = err;
961 				continue;
962 			}
963 		}
964 	}
965 	return ret;
966 }
967 
bpf_object__load_prologue(struct bpf_object * obj)968 static int bpf_object__load_prologue(struct bpf_object *obj)
969 {
970 	int init_cnt = ARRAY_SIZE(prologue_init_insn);
971 	const struct bpf_insn *orig_insns;
972 	struct bpf_preproc_result res;
973 	struct perf_probe_event *pev;
974 	struct bpf_program *prog;
975 	int orig_insns_cnt;
976 
977 	bpf_object__for_each_program(prog, obj) {
978 		struct bpf_prog_priv *priv = program_priv(prog);
979 		int err, i, fd;
980 
981 		if (IS_ERR_OR_NULL(priv)) {
982 			pr_debug("bpf: failed to get private field\n");
983 			return -BPF_LOADER_ERRNO__INTERNAL;
984 		}
985 
986 		if (!priv->need_prologue)
987 			continue;
988 
989 		/*
990 		 * For each program that needs prologue we do following:
991 		 *
992 		 * - take its current instructions and use them
993 		 *   to generate the new code with prologue
994 		 * - load new instructions with bpf_prog_load
995 		 *   and keep the fd in prologue_fds
996 		 * - new fd will be used in bpf__foreach_event
997 		 *   to connect this program with perf evsel
998 		 */
999 		orig_insns = bpf_program__insns(prog);
1000 		orig_insns_cnt = bpf_program__insn_cnt(prog);
1001 
1002 		pev = &priv->pev;
1003 		for (i = 0; i < pev->ntevs; i++) {
1004 			/*
1005 			 * Skipping artificall prologue_init_insn instructions
1006 			 * (init_cnt), so the prologue can be generated instead
1007 			 * of them.
1008 			 */
1009 			err = preproc_gen_prologue(prog, i,
1010 						   orig_insns + init_cnt,
1011 						   orig_insns_cnt - init_cnt,
1012 						   &res);
1013 			if (err)
1014 				return err;
1015 
1016 			fd = bpf_prog_load(bpf_program__get_type(prog),
1017 					   bpf_program__name(prog), "GPL",
1018 					   res.new_insn_ptr,
1019 					   res.new_insn_cnt, NULL);
1020 			if (fd < 0) {
1021 				char bf[128];
1022 
1023 				libbpf_strerror(-errno, bf, sizeof(bf));
1024 				pr_debug("bpf: load objects with prologue failed: err=%d: (%s)\n",
1025 					 -errno, bf);
1026 				return -errno;
1027 			}
1028 			priv->prologue_fds[i] = fd;
1029 		}
1030 		/*
1031 		 * We no longer need the original program,
1032 		 * we can unload it.
1033 		 */
1034 		bpf_program__unload(prog);
1035 	}
1036 	return 0;
1037 }
1038 
bpf__load(struct bpf_object * obj)1039 int bpf__load(struct bpf_object *obj)
1040 {
1041 	int err;
1042 
1043 	err = bpf_object__load(obj);
1044 	if (err) {
1045 		char bf[128];
1046 		libbpf_strerror(err, bf, sizeof(bf));
1047 		pr_debug("bpf: load objects failed: err=%d: (%s)\n", err, bf);
1048 		return err;
1049 	}
1050 	return bpf_object__load_prologue(obj);
1051 }
1052 
bpf__foreach_event(struct bpf_object * obj,bpf_prog_iter_callback_t func,void * arg)1053 int bpf__foreach_event(struct bpf_object *obj,
1054 		       bpf_prog_iter_callback_t func,
1055 		       void *arg)
1056 {
1057 	struct bpf_program *prog;
1058 	int err;
1059 
1060 	bpf_object__for_each_program(prog, obj) {
1061 		struct bpf_prog_priv *priv = program_priv(prog);
1062 		struct probe_trace_event *tev;
1063 		struct perf_probe_event *pev;
1064 		int i, fd;
1065 
1066 		if (IS_ERR_OR_NULL(priv)) {
1067 			pr_debug("bpf: failed to get private field\n");
1068 			return -BPF_LOADER_ERRNO__INTERNAL;
1069 		}
1070 
1071 		if (priv->is_tp) {
1072 			fd = bpf_program__fd(prog);
1073 			err = (*func)(priv->sys_name, priv->evt_name, fd, obj, arg);
1074 			if (err) {
1075 				pr_debug("bpf: tracepoint call back failed, stop iterate\n");
1076 				return err;
1077 			}
1078 			continue;
1079 		}
1080 
1081 		pev = &priv->pev;
1082 		for (i = 0; i < pev->ntevs; i++) {
1083 			tev = &pev->tevs[i];
1084 
1085 			if (priv->need_prologue)
1086 				fd = priv->prologue_fds[i];
1087 			else
1088 				fd = bpf_program__fd(prog);
1089 
1090 			if (fd < 0) {
1091 				pr_debug("bpf: failed to get file descriptor\n");
1092 				return fd;
1093 			}
1094 
1095 			err = (*func)(tev->group, tev->event, fd, obj, arg);
1096 			if (err) {
1097 				pr_debug("bpf: call back failed, stop iterate\n");
1098 				return err;
1099 			}
1100 		}
1101 	}
1102 	return 0;
1103 }
1104 
1105 enum bpf_map_op_type {
1106 	BPF_MAP_OP_SET_VALUE,
1107 	BPF_MAP_OP_SET_EVSEL,
1108 };
1109 
1110 enum bpf_map_key_type {
1111 	BPF_MAP_KEY_ALL,
1112 	BPF_MAP_KEY_RANGES,
1113 };
1114 
1115 struct bpf_map_op {
1116 	struct list_head list;
1117 	enum bpf_map_op_type op_type;
1118 	enum bpf_map_key_type key_type;
1119 	union {
1120 		struct parse_events_array array;
1121 	} k;
1122 	union {
1123 		u64 value;
1124 		struct evsel *evsel;
1125 	} v;
1126 };
1127 
1128 struct bpf_map_priv {
1129 	struct list_head ops_list;
1130 };
1131 
1132 static void
bpf_map_op__delete(struct bpf_map_op * op)1133 bpf_map_op__delete(struct bpf_map_op *op)
1134 {
1135 	if (!list_empty(&op->list))
1136 		list_del_init(&op->list);
1137 	if (op->key_type == BPF_MAP_KEY_RANGES)
1138 		parse_events__clear_array(&op->k.array);
1139 	free(op);
1140 }
1141 
1142 static void
bpf_map_priv__purge(struct bpf_map_priv * priv)1143 bpf_map_priv__purge(struct bpf_map_priv *priv)
1144 {
1145 	struct bpf_map_op *pos, *n;
1146 
1147 	list_for_each_entry_safe(pos, n, &priv->ops_list, list) {
1148 		list_del_init(&pos->list);
1149 		bpf_map_op__delete(pos);
1150 	}
1151 }
1152 
1153 static void
bpf_map_priv__clear(const struct bpf_map * map __maybe_unused,void * _priv)1154 bpf_map_priv__clear(const struct bpf_map *map __maybe_unused,
1155 		    void *_priv)
1156 {
1157 	struct bpf_map_priv *priv = _priv;
1158 
1159 	bpf_map_priv__purge(priv);
1160 	free(priv);
1161 }
1162 
map_priv(const struct bpf_map * map)1163 static void *map_priv(const struct bpf_map *map)
1164 {
1165 	void *priv;
1166 
1167 	if (IS_ERR_OR_NULL(bpf_map_hash))
1168 		return NULL;
1169 	if (!hashmap__find(bpf_map_hash, map, &priv))
1170 		return NULL;
1171 	return priv;
1172 }
1173 
bpf_map_hash_free(void)1174 static void bpf_map_hash_free(void)
1175 {
1176 	struct hashmap_entry *cur;
1177 	size_t bkt;
1178 
1179 	if (IS_ERR_OR_NULL(bpf_map_hash))
1180 		return;
1181 
1182 	hashmap__for_each_entry(bpf_map_hash, cur, bkt)
1183 		bpf_map_priv__clear(cur->pkey, cur->pvalue);
1184 
1185 	hashmap__free(bpf_map_hash);
1186 	bpf_map_hash = NULL;
1187 }
1188 
map_set_priv(struct bpf_map * map,void * priv)1189 static int map_set_priv(struct bpf_map *map, void *priv)
1190 {
1191 	void *old_priv;
1192 
1193 	if (WARN_ON_ONCE(IS_ERR(bpf_map_hash)))
1194 		return PTR_ERR(bpf_program_hash);
1195 
1196 	if (!bpf_map_hash) {
1197 		bpf_map_hash = hashmap__new(ptr_hash, ptr_equal, NULL);
1198 		if (IS_ERR(bpf_map_hash))
1199 			return PTR_ERR(bpf_map_hash);
1200 	}
1201 
1202 	old_priv = map_priv(map);
1203 	if (old_priv) {
1204 		bpf_map_priv__clear(map, old_priv);
1205 		return hashmap__set(bpf_map_hash, map, priv, NULL, NULL);
1206 	}
1207 	return hashmap__add(bpf_map_hash, map, priv);
1208 }
1209 
1210 static int
bpf_map_op_setkey(struct bpf_map_op * op,struct parse_events_term * term)1211 bpf_map_op_setkey(struct bpf_map_op *op, struct parse_events_term *term)
1212 {
1213 	op->key_type = BPF_MAP_KEY_ALL;
1214 	if (!term)
1215 		return 0;
1216 
1217 	if (term->array.nr_ranges) {
1218 		size_t memsz = term->array.nr_ranges *
1219 				sizeof(op->k.array.ranges[0]);
1220 
1221 		op->k.array.ranges = memdup(term->array.ranges, memsz);
1222 		if (!op->k.array.ranges) {
1223 			pr_debug("Not enough memory to alloc indices for map\n");
1224 			return -ENOMEM;
1225 		}
1226 		op->key_type = BPF_MAP_KEY_RANGES;
1227 		op->k.array.nr_ranges = term->array.nr_ranges;
1228 	}
1229 	return 0;
1230 }
1231 
1232 static struct bpf_map_op *
bpf_map_op__new(struct parse_events_term * term)1233 bpf_map_op__new(struct parse_events_term *term)
1234 {
1235 	struct bpf_map_op *op;
1236 	int err;
1237 
1238 	op = zalloc(sizeof(*op));
1239 	if (!op) {
1240 		pr_debug("Failed to alloc bpf_map_op\n");
1241 		return ERR_PTR(-ENOMEM);
1242 	}
1243 	INIT_LIST_HEAD(&op->list);
1244 
1245 	err = bpf_map_op_setkey(op, term);
1246 	if (err) {
1247 		free(op);
1248 		return ERR_PTR(err);
1249 	}
1250 	return op;
1251 }
1252 
1253 static struct bpf_map_op *
bpf_map_op__clone(struct bpf_map_op * op)1254 bpf_map_op__clone(struct bpf_map_op *op)
1255 {
1256 	struct bpf_map_op *newop;
1257 
1258 	newop = memdup(op, sizeof(*op));
1259 	if (!newop) {
1260 		pr_debug("Failed to alloc bpf_map_op\n");
1261 		return NULL;
1262 	}
1263 
1264 	INIT_LIST_HEAD(&newop->list);
1265 	if (op->key_type == BPF_MAP_KEY_RANGES) {
1266 		size_t memsz = op->k.array.nr_ranges *
1267 			       sizeof(op->k.array.ranges[0]);
1268 
1269 		newop->k.array.ranges = memdup(op->k.array.ranges, memsz);
1270 		if (!newop->k.array.ranges) {
1271 			pr_debug("Failed to alloc indices for map\n");
1272 			free(newop);
1273 			return NULL;
1274 		}
1275 	}
1276 
1277 	return newop;
1278 }
1279 
1280 static struct bpf_map_priv *
bpf_map_priv__clone(struct bpf_map_priv * priv)1281 bpf_map_priv__clone(struct bpf_map_priv *priv)
1282 {
1283 	struct bpf_map_priv *newpriv;
1284 	struct bpf_map_op *pos, *newop;
1285 
1286 	newpriv = zalloc(sizeof(*newpriv));
1287 	if (!newpriv) {
1288 		pr_debug("Not enough memory to alloc map private\n");
1289 		return NULL;
1290 	}
1291 	INIT_LIST_HEAD(&newpriv->ops_list);
1292 
1293 	list_for_each_entry(pos, &priv->ops_list, list) {
1294 		newop = bpf_map_op__clone(pos);
1295 		if (!newop) {
1296 			bpf_map_priv__purge(newpriv);
1297 			return NULL;
1298 		}
1299 		list_add_tail(&newop->list, &newpriv->ops_list);
1300 	}
1301 
1302 	return newpriv;
1303 }
1304 
1305 static int
bpf_map__add_op(struct bpf_map * map,struct bpf_map_op * op)1306 bpf_map__add_op(struct bpf_map *map, struct bpf_map_op *op)
1307 {
1308 	const char *map_name = bpf_map__name(map);
1309 	struct bpf_map_priv *priv = map_priv(map);
1310 
1311 	if (IS_ERR(priv)) {
1312 		pr_debug("Failed to get private from map %s\n", map_name);
1313 		return PTR_ERR(priv);
1314 	}
1315 
1316 	if (!priv) {
1317 		priv = zalloc(sizeof(*priv));
1318 		if (!priv) {
1319 			pr_debug("Not enough memory to alloc map private\n");
1320 			return -ENOMEM;
1321 		}
1322 		INIT_LIST_HEAD(&priv->ops_list);
1323 
1324 		if (map_set_priv(map, priv)) {
1325 			free(priv);
1326 			return -BPF_LOADER_ERRNO__INTERNAL;
1327 		}
1328 	}
1329 
1330 	list_add_tail(&op->list, &priv->ops_list);
1331 	return 0;
1332 }
1333 
1334 static struct bpf_map_op *
bpf_map__add_newop(struct bpf_map * map,struct parse_events_term * term)1335 bpf_map__add_newop(struct bpf_map *map, struct parse_events_term *term)
1336 {
1337 	struct bpf_map_op *op;
1338 	int err;
1339 
1340 	op = bpf_map_op__new(term);
1341 	if (IS_ERR(op))
1342 		return op;
1343 
1344 	err = bpf_map__add_op(map, op);
1345 	if (err) {
1346 		bpf_map_op__delete(op);
1347 		return ERR_PTR(err);
1348 	}
1349 	return op;
1350 }
1351 
1352 static int
__bpf_map__config_value(struct bpf_map * map,struct parse_events_term * term)1353 __bpf_map__config_value(struct bpf_map *map,
1354 			struct parse_events_term *term)
1355 {
1356 	struct bpf_map_op *op;
1357 	const char *map_name = bpf_map__name(map);
1358 
1359 	if (!map) {
1360 		pr_debug("Map '%s' is invalid\n", map_name);
1361 		return -BPF_LOADER_ERRNO__INTERNAL;
1362 	}
1363 
1364 	if (bpf_map__type(map) != BPF_MAP_TYPE_ARRAY) {
1365 		pr_debug("Map %s type is not BPF_MAP_TYPE_ARRAY\n",
1366 			 map_name);
1367 		return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE;
1368 	}
1369 	if (bpf_map__key_size(map) < sizeof(unsigned int)) {
1370 		pr_debug("Map %s has incorrect key size\n", map_name);
1371 		return -BPF_LOADER_ERRNO__OBJCONF_MAP_KEYSIZE;
1372 	}
1373 	switch (bpf_map__value_size(map)) {
1374 	case 1:
1375 	case 2:
1376 	case 4:
1377 	case 8:
1378 		break;
1379 	default:
1380 		pr_debug("Map %s has incorrect value size\n", map_name);
1381 		return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUESIZE;
1382 	}
1383 
1384 	op = bpf_map__add_newop(map, term);
1385 	if (IS_ERR(op))
1386 		return PTR_ERR(op);
1387 	op->op_type = BPF_MAP_OP_SET_VALUE;
1388 	op->v.value = term->val.num;
1389 	return 0;
1390 }
1391 
1392 static int
bpf_map__config_value(struct bpf_map * map,struct parse_events_term * term,struct evlist * evlist __maybe_unused)1393 bpf_map__config_value(struct bpf_map *map,
1394 		      struct parse_events_term *term,
1395 		      struct evlist *evlist __maybe_unused)
1396 {
1397 	if (!term->err_val) {
1398 		pr_debug("Config value not set\n");
1399 		return -BPF_LOADER_ERRNO__OBJCONF_CONF;
1400 	}
1401 
1402 	if (term->type_val != PARSE_EVENTS__TERM_TYPE_NUM) {
1403 		pr_debug("ERROR: wrong value type for 'value'\n");
1404 		return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUE;
1405 	}
1406 
1407 	return __bpf_map__config_value(map, term);
1408 }
1409 
1410 static int
__bpf_map__config_event(struct bpf_map * map,struct parse_events_term * term,struct evlist * evlist)1411 __bpf_map__config_event(struct bpf_map *map,
1412 			struct parse_events_term *term,
1413 			struct evlist *evlist)
1414 {
1415 	struct bpf_map_op *op;
1416 	const char *map_name = bpf_map__name(map);
1417 	struct evsel *evsel = evlist__find_evsel_by_str(evlist, term->val.str);
1418 
1419 	if (!evsel) {
1420 		pr_debug("Event (for '%s') '%s' doesn't exist\n",
1421 			 map_name, term->val.str);
1422 		return -BPF_LOADER_ERRNO__OBJCONF_MAP_NOEVT;
1423 	}
1424 
1425 	if (!map) {
1426 		pr_debug("Map '%s' is invalid\n", map_name);
1427 		return PTR_ERR(map);
1428 	}
1429 
1430 	/*
1431 	 * No need to check key_size and value_size:
1432 	 * kernel has already checked them.
1433 	 */
1434 	if (bpf_map__type(map) != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
1435 		pr_debug("Map %s type is not BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
1436 			 map_name);
1437 		return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE;
1438 	}
1439 
1440 	op = bpf_map__add_newop(map, term);
1441 	if (IS_ERR(op))
1442 		return PTR_ERR(op);
1443 	op->op_type = BPF_MAP_OP_SET_EVSEL;
1444 	op->v.evsel = evsel;
1445 	return 0;
1446 }
1447 
1448 static int
bpf_map__config_event(struct bpf_map * map,struct parse_events_term * term,struct evlist * evlist)1449 bpf_map__config_event(struct bpf_map *map,
1450 		      struct parse_events_term *term,
1451 		      struct evlist *evlist)
1452 {
1453 	if (!term->err_val) {
1454 		pr_debug("Config value not set\n");
1455 		return -BPF_LOADER_ERRNO__OBJCONF_CONF;
1456 	}
1457 
1458 	if (term->type_val != PARSE_EVENTS__TERM_TYPE_STR) {
1459 		pr_debug("ERROR: wrong value type for 'event'\n");
1460 		return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUE;
1461 	}
1462 
1463 	return __bpf_map__config_event(map, term, evlist);
1464 }
1465 
1466 struct bpf_obj_config__map_func {
1467 	const char *config_opt;
1468 	int (*config_func)(struct bpf_map *, struct parse_events_term *,
1469 			   struct evlist *);
1470 };
1471 
1472 struct bpf_obj_config__map_func bpf_obj_config__map_funcs[] = {
1473 	{"value", bpf_map__config_value},
1474 	{"event", bpf_map__config_event},
1475 };
1476 
1477 static int
config_map_indices_range_check(struct parse_events_term * term,struct bpf_map * map,const char * map_name)1478 config_map_indices_range_check(struct parse_events_term *term,
1479 			       struct bpf_map *map,
1480 			       const char *map_name)
1481 {
1482 	struct parse_events_array *array = &term->array;
1483 	unsigned int i;
1484 
1485 	if (!array->nr_ranges)
1486 		return 0;
1487 	if (!array->ranges) {
1488 		pr_debug("ERROR: map %s: array->nr_ranges is %d but range array is NULL\n",
1489 			 map_name, (int)array->nr_ranges);
1490 		return -BPF_LOADER_ERRNO__INTERNAL;
1491 	}
1492 
1493 	if (!map) {
1494 		pr_debug("Map '%s' is invalid\n", map_name);
1495 		return -BPF_LOADER_ERRNO__INTERNAL;
1496 	}
1497 
1498 	for (i = 0; i < array->nr_ranges; i++) {
1499 		unsigned int start = array->ranges[i].start;
1500 		size_t length = array->ranges[i].length;
1501 		unsigned int idx = start + length - 1;
1502 
1503 		if (idx >= bpf_map__max_entries(map)) {
1504 			pr_debug("ERROR: index %d too large\n", idx);
1505 			return -BPF_LOADER_ERRNO__OBJCONF_MAP_IDX2BIG;
1506 		}
1507 	}
1508 	return 0;
1509 }
1510 
1511 static int
bpf__obj_config_map(struct bpf_object * obj,struct parse_events_term * term,struct evlist * evlist,int * key_scan_pos)1512 bpf__obj_config_map(struct bpf_object *obj,
1513 		    struct parse_events_term *term,
1514 		    struct evlist *evlist,
1515 		    int *key_scan_pos)
1516 {
1517 	/* key is "map:<mapname>.<config opt>" */
1518 	char *map_name = strdup(term->config + sizeof("map:") - 1);
1519 	struct bpf_map *map;
1520 	int err = -BPF_LOADER_ERRNO__OBJCONF_OPT;
1521 	char *map_opt;
1522 	size_t i;
1523 
1524 	if (!map_name)
1525 		return -ENOMEM;
1526 
1527 	map_opt = strchr(map_name, '.');
1528 	if (!map_opt) {
1529 		pr_debug("ERROR: Invalid map config: %s\n", map_name);
1530 		goto out;
1531 	}
1532 
1533 	*map_opt++ = '\0';
1534 	if (*map_opt == '\0') {
1535 		pr_debug("ERROR: Invalid map option: %s\n", term->config);
1536 		goto out;
1537 	}
1538 
1539 	map = bpf_object__find_map_by_name(obj, map_name);
1540 	if (!map) {
1541 		pr_debug("ERROR: Map %s doesn't exist\n", map_name);
1542 		err = -BPF_LOADER_ERRNO__OBJCONF_MAP_NOTEXIST;
1543 		goto out;
1544 	}
1545 
1546 	*key_scan_pos += strlen(map_opt);
1547 	err = config_map_indices_range_check(term, map, map_name);
1548 	if (err)
1549 		goto out;
1550 	*key_scan_pos -= strlen(map_opt);
1551 
1552 	for (i = 0; i < ARRAY_SIZE(bpf_obj_config__map_funcs); i++) {
1553 		struct bpf_obj_config__map_func *func =
1554 				&bpf_obj_config__map_funcs[i];
1555 
1556 		if (strcmp(map_opt, func->config_opt) == 0) {
1557 			err = func->config_func(map, term, evlist);
1558 			goto out;
1559 		}
1560 	}
1561 
1562 	pr_debug("ERROR: Invalid map config option '%s'\n", map_opt);
1563 	err = -BPF_LOADER_ERRNO__OBJCONF_MAP_OPT;
1564 out:
1565 	if (!err)
1566 		*key_scan_pos += strlen(map_opt);
1567 
1568 	free(map_name);
1569 	return err;
1570 }
1571 
bpf__config_obj(struct bpf_object * obj,struct parse_events_term * term,struct evlist * evlist,int * error_pos)1572 int bpf__config_obj(struct bpf_object *obj,
1573 		    struct parse_events_term *term,
1574 		    struct evlist *evlist,
1575 		    int *error_pos)
1576 {
1577 	int key_scan_pos = 0;
1578 	int err;
1579 
1580 	if (!obj || !term || !term->config)
1581 		return -EINVAL;
1582 
1583 	if (strstarts(term->config, "map:")) {
1584 		key_scan_pos = sizeof("map:") - 1;
1585 		err = bpf__obj_config_map(obj, term, evlist, &key_scan_pos);
1586 		goto out;
1587 	}
1588 	err = -BPF_LOADER_ERRNO__OBJCONF_OPT;
1589 out:
1590 	if (error_pos)
1591 		*error_pos = key_scan_pos;
1592 	return err;
1593 
1594 }
1595 
1596 typedef int (*map_config_func_t)(const char *name, int map_fd,
1597 				 const struct bpf_map *map,
1598 				 struct bpf_map_op *op,
1599 				 void *pkey, void *arg);
1600 
1601 static int
foreach_key_array_all(map_config_func_t func,void * arg,const char * name,int map_fd,const struct bpf_map * map,struct bpf_map_op * op)1602 foreach_key_array_all(map_config_func_t func,
1603 		      void *arg, const char *name,
1604 		      int map_fd, const struct bpf_map *map,
1605 		      struct bpf_map_op *op)
1606 {
1607 	unsigned int i;
1608 	int err;
1609 
1610 	for (i = 0; i < bpf_map__max_entries(map); i++) {
1611 		err = func(name, map_fd, map, op, &i, arg);
1612 		if (err) {
1613 			pr_debug("ERROR: failed to insert value to %s[%u]\n",
1614 				 name, i);
1615 			return err;
1616 		}
1617 	}
1618 	return 0;
1619 }
1620 
1621 static int
foreach_key_array_ranges(map_config_func_t func,void * arg,const char * name,int map_fd,const struct bpf_map * map,struct bpf_map_op * op)1622 foreach_key_array_ranges(map_config_func_t func, void *arg,
1623 			 const char *name, int map_fd,
1624 			 const struct bpf_map *map,
1625 			 struct bpf_map_op *op)
1626 {
1627 	unsigned int i, j;
1628 	int err;
1629 
1630 	for (i = 0; i < op->k.array.nr_ranges; i++) {
1631 		unsigned int start = op->k.array.ranges[i].start;
1632 		size_t length = op->k.array.ranges[i].length;
1633 
1634 		for (j = 0; j < length; j++) {
1635 			unsigned int idx = start + j;
1636 
1637 			err = func(name, map_fd, map, op, &idx, arg);
1638 			if (err) {
1639 				pr_debug("ERROR: failed to insert value to %s[%u]\n",
1640 					 name, idx);
1641 				return err;
1642 			}
1643 		}
1644 	}
1645 	return 0;
1646 }
1647 
1648 static int
bpf_map_config_foreach_key(struct bpf_map * map,map_config_func_t func,void * arg)1649 bpf_map_config_foreach_key(struct bpf_map *map,
1650 			   map_config_func_t func,
1651 			   void *arg)
1652 {
1653 	int err, map_fd, type;
1654 	struct bpf_map_op *op;
1655 	const char *name = bpf_map__name(map);
1656 	struct bpf_map_priv *priv = map_priv(map);
1657 
1658 	if (IS_ERR(priv)) {
1659 		pr_debug("ERROR: failed to get private from map %s\n", name);
1660 		return -BPF_LOADER_ERRNO__INTERNAL;
1661 	}
1662 	if (!priv || list_empty(&priv->ops_list)) {
1663 		pr_debug("INFO: nothing to config for map %s\n", name);
1664 		return 0;
1665 	}
1666 
1667 	if (!map) {
1668 		pr_debug("Map '%s' is invalid\n", name);
1669 		return -BPF_LOADER_ERRNO__INTERNAL;
1670 	}
1671 	map_fd = bpf_map__fd(map);
1672 	if (map_fd < 0) {
1673 		pr_debug("ERROR: failed to get fd from map %s\n", name);
1674 		return map_fd;
1675 	}
1676 
1677 	type = bpf_map__type(map);
1678 	list_for_each_entry(op, &priv->ops_list, list) {
1679 		switch (type) {
1680 		case BPF_MAP_TYPE_ARRAY:
1681 		case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
1682 			switch (op->key_type) {
1683 			case BPF_MAP_KEY_ALL:
1684 				err = foreach_key_array_all(func, arg, name,
1685 							    map_fd, map, op);
1686 				break;
1687 			case BPF_MAP_KEY_RANGES:
1688 				err = foreach_key_array_ranges(func, arg, name,
1689 							       map_fd, map, op);
1690 				break;
1691 			default:
1692 				pr_debug("ERROR: keytype for map '%s' invalid\n",
1693 					 name);
1694 				return -BPF_LOADER_ERRNO__INTERNAL;
1695 			}
1696 			if (err)
1697 				return err;
1698 			break;
1699 		default:
1700 			pr_debug("ERROR: type of '%s' incorrect\n", name);
1701 			return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE;
1702 		}
1703 	}
1704 
1705 	return 0;
1706 }
1707 
1708 static int
apply_config_value_for_key(int map_fd,void * pkey,size_t val_size,u64 val)1709 apply_config_value_for_key(int map_fd, void *pkey,
1710 			   size_t val_size, u64 val)
1711 {
1712 	int err = 0;
1713 
1714 	switch (val_size) {
1715 	case 1: {
1716 		u8 _val = (u8)(val);
1717 		err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY);
1718 		break;
1719 	}
1720 	case 2: {
1721 		u16 _val = (u16)(val);
1722 		err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY);
1723 		break;
1724 	}
1725 	case 4: {
1726 		u32 _val = (u32)(val);
1727 		err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY);
1728 		break;
1729 	}
1730 	case 8: {
1731 		err = bpf_map_update_elem(map_fd, pkey, &val, BPF_ANY);
1732 		break;
1733 	}
1734 	default:
1735 		pr_debug("ERROR: invalid value size\n");
1736 		return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUESIZE;
1737 	}
1738 	if (err && errno)
1739 		err = -errno;
1740 	return err;
1741 }
1742 
1743 static int
apply_config_evsel_for_key(const char * name,int map_fd,void * pkey,struct evsel * evsel)1744 apply_config_evsel_for_key(const char *name, int map_fd, void *pkey,
1745 			   struct evsel *evsel)
1746 {
1747 	struct xyarray *xy = evsel->core.fd;
1748 	struct perf_event_attr *attr;
1749 	unsigned int key, events;
1750 	bool check_pass = false;
1751 	int *evt_fd;
1752 	int err;
1753 
1754 	if (!xy) {
1755 		pr_debug("ERROR: evsel not ready for map %s\n", name);
1756 		return -BPF_LOADER_ERRNO__INTERNAL;
1757 	}
1758 
1759 	if (xy->row_size / xy->entry_size != 1) {
1760 		pr_debug("ERROR: Dimension of target event is incorrect for map %s\n",
1761 			 name);
1762 		return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTDIM;
1763 	}
1764 
1765 	attr = &evsel->core.attr;
1766 	if (attr->inherit) {
1767 		pr_debug("ERROR: Can't put inherit event into map %s\n", name);
1768 		return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTINH;
1769 	}
1770 
1771 	if (evsel__is_bpf_output(evsel))
1772 		check_pass = true;
1773 	if (attr->type == PERF_TYPE_RAW)
1774 		check_pass = true;
1775 	if (attr->type == PERF_TYPE_HARDWARE)
1776 		check_pass = true;
1777 	if (!check_pass) {
1778 		pr_debug("ERROR: Event type is wrong for map %s\n", name);
1779 		return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTTYPE;
1780 	}
1781 
1782 	events = xy->entries / (xy->row_size / xy->entry_size);
1783 	key = *((unsigned int *)pkey);
1784 	if (key >= events) {
1785 		pr_debug("ERROR: there is no event %d for map %s\n",
1786 			 key, name);
1787 		return -BPF_LOADER_ERRNO__OBJCONF_MAP_MAPSIZE;
1788 	}
1789 	evt_fd = xyarray__entry(xy, key, 0);
1790 	err = bpf_map_update_elem(map_fd, pkey, evt_fd, BPF_ANY);
1791 	if (err && errno)
1792 		err = -errno;
1793 	return err;
1794 }
1795 
1796 static int
apply_obj_config_map_for_key(const char * name,int map_fd,const struct bpf_map * map,struct bpf_map_op * op,void * pkey,void * arg __maybe_unused)1797 apply_obj_config_map_for_key(const char *name, int map_fd,
1798 			     const struct bpf_map *map,
1799 			     struct bpf_map_op *op,
1800 			     void *pkey, void *arg __maybe_unused)
1801 {
1802 	int err;
1803 
1804 	switch (op->op_type) {
1805 	case BPF_MAP_OP_SET_VALUE:
1806 		err = apply_config_value_for_key(map_fd, pkey,
1807 						 bpf_map__value_size(map),
1808 						 op->v.value);
1809 		break;
1810 	case BPF_MAP_OP_SET_EVSEL:
1811 		err = apply_config_evsel_for_key(name, map_fd, pkey,
1812 						 op->v.evsel);
1813 		break;
1814 	default:
1815 		pr_debug("ERROR: unknown value type for '%s'\n", name);
1816 		err = -BPF_LOADER_ERRNO__INTERNAL;
1817 	}
1818 	return err;
1819 }
1820 
1821 static int
apply_obj_config_map(struct bpf_map * map)1822 apply_obj_config_map(struct bpf_map *map)
1823 {
1824 	return bpf_map_config_foreach_key(map,
1825 					  apply_obj_config_map_for_key,
1826 					  NULL);
1827 }
1828 
1829 static int
apply_obj_config_object(struct bpf_object * obj)1830 apply_obj_config_object(struct bpf_object *obj)
1831 {
1832 	struct bpf_map *map;
1833 	int err;
1834 
1835 	bpf_object__for_each_map(map, obj) {
1836 		err = apply_obj_config_map(map);
1837 		if (err)
1838 			return err;
1839 	}
1840 	return 0;
1841 }
1842 
bpf__apply_obj_config(void)1843 int bpf__apply_obj_config(void)
1844 {
1845 	struct bpf_perf_object *perf_obj, *tmp;
1846 	int err;
1847 
1848 	bpf_perf_object__for_each(perf_obj, tmp) {
1849 		err = apply_obj_config_object(perf_obj->obj);
1850 		if (err)
1851 			return err;
1852 	}
1853 
1854 	return 0;
1855 }
1856 
1857 #define bpf__perf_for_each_map(map, pobj, tmp)			\
1858 	bpf_perf_object__for_each(pobj, tmp)			\
1859 		bpf_object__for_each_map(map, pobj->obj)
1860 
1861 #define bpf__perf_for_each_map_named(map, pobj, pobjtmp, name)	\
1862 	bpf__perf_for_each_map(map, pobj, pobjtmp)		\
1863 		if (bpf_map__name(map) && (strcmp(name, bpf_map__name(map)) == 0))
1864 
bpf__setup_output_event(struct evlist * evlist,const char * name)1865 struct evsel *bpf__setup_output_event(struct evlist *evlist, const char *name)
1866 {
1867 	struct bpf_map_priv *tmpl_priv = NULL;
1868 	struct bpf_perf_object *perf_obj, *tmp;
1869 	struct evsel *evsel = NULL;
1870 	struct bpf_map *map;
1871 	int err;
1872 	bool need_init = false;
1873 
1874 	bpf__perf_for_each_map_named(map, perf_obj, tmp, name) {
1875 		struct bpf_map_priv *priv = map_priv(map);
1876 
1877 		if (IS_ERR(priv))
1878 			return ERR_PTR(-BPF_LOADER_ERRNO__INTERNAL);
1879 
1880 		/*
1881 		 * No need to check map type: type should have been
1882 		 * verified by kernel.
1883 		 */
1884 		if (!need_init && !priv)
1885 			need_init = !priv;
1886 		if (!tmpl_priv && priv)
1887 			tmpl_priv = priv;
1888 	}
1889 
1890 	if (!need_init)
1891 		return NULL;
1892 
1893 	if (!tmpl_priv) {
1894 		char *event_definition = NULL;
1895 
1896 		if (asprintf(&event_definition, "bpf-output/no-inherit=1,name=%s/", name) < 0)
1897 			return ERR_PTR(-ENOMEM);
1898 
1899 		err = parse_event(evlist, event_definition);
1900 		free(event_definition);
1901 
1902 		if (err) {
1903 			pr_debug("ERROR: failed to create the \"%s\" bpf-output event\n", name);
1904 			return ERR_PTR(-err);
1905 		}
1906 
1907 		evsel = evlist__last(evlist);
1908 	}
1909 
1910 	bpf__perf_for_each_map_named(map, perf_obj, tmp, name) {
1911 		struct bpf_map_priv *priv = map_priv(map);
1912 
1913 		if (IS_ERR(priv))
1914 			return ERR_PTR(-BPF_LOADER_ERRNO__INTERNAL);
1915 		if (priv)
1916 			continue;
1917 
1918 		if (tmpl_priv) {
1919 			priv = bpf_map_priv__clone(tmpl_priv);
1920 			if (!priv)
1921 				return ERR_PTR(-ENOMEM);
1922 
1923 			err = map_set_priv(map, priv);
1924 			if (err) {
1925 				bpf_map_priv__clear(map, priv);
1926 				return ERR_PTR(err);
1927 			}
1928 		} else if (evsel) {
1929 			struct bpf_map_op *op;
1930 
1931 			op = bpf_map__add_newop(map, NULL);
1932 			if (IS_ERR(op))
1933 				return ERR_CAST(op);
1934 			op->op_type = BPF_MAP_OP_SET_EVSEL;
1935 			op->v.evsel = evsel;
1936 		}
1937 	}
1938 
1939 	return evsel;
1940 }
1941 
bpf__setup_stdout(struct evlist * evlist)1942 int bpf__setup_stdout(struct evlist *evlist)
1943 {
1944 	struct evsel *evsel = bpf__setup_output_event(evlist, "__bpf_stdout__");
1945 	return PTR_ERR_OR_ZERO(evsel);
1946 }
1947 
1948 #define ERRNO_OFFSET(e)		((e) - __BPF_LOADER_ERRNO__START)
1949 #define ERRCODE_OFFSET(c)	ERRNO_OFFSET(BPF_LOADER_ERRNO__##c)
1950 #define NR_ERRNO	(__BPF_LOADER_ERRNO__END - __BPF_LOADER_ERRNO__START)
1951 
1952 static const char *bpf_loader_strerror_table[NR_ERRNO] = {
1953 	[ERRCODE_OFFSET(CONFIG)]	= "Invalid config string",
1954 	[ERRCODE_OFFSET(GROUP)]		= "Invalid group name",
1955 	[ERRCODE_OFFSET(EVENTNAME)]	= "No event name found in config string",
1956 	[ERRCODE_OFFSET(INTERNAL)]	= "BPF loader internal error",
1957 	[ERRCODE_OFFSET(COMPILE)]	= "Error when compiling BPF scriptlet",
1958 	[ERRCODE_OFFSET(PROGCONF_TERM)]	= "Invalid program config term in config string",
1959 	[ERRCODE_OFFSET(PROLOGUE)]	= "Failed to generate prologue",
1960 	[ERRCODE_OFFSET(PROLOGUE2BIG)]	= "Prologue too big for program",
1961 	[ERRCODE_OFFSET(PROLOGUEOOB)]	= "Offset out of bound for prologue",
1962 	[ERRCODE_OFFSET(OBJCONF_OPT)]	= "Invalid object config option",
1963 	[ERRCODE_OFFSET(OBJCONF_CONF)]	= "Config value not set (missing '=')",
1964 	[ERRCODE_OFFSET(OBJCONF_MAP_OPT)]	= "Invalid object map config option",
1965 	[ERRCODE_OFFSET(OBJCONF_MAP_NOTEXIST)]	= "Target map doesn't exist",
1966 	[ERRCODE_OFFSET(OBJCONF_MAP_VALUE)]	= "Incorrect value type for map",
1967 	[ERRCODE_OFFSET(OBJCONF_MAP_TYPE)]	= "Incorrect map type",
1968 	[ERRCODE_OFFSET(OBJCONF_MAP_KEYSIZE)]	= "Incorrect map key size",
1969 	[ERRCODE_OFFSET(OBJCONF_MAP_VALUESIZE)]	= "Incorrect map value size",
1970 	[ERRCODE_OFFSET(OBJCONF_MAP_NOEVT)]	= "Event not found for map setting",
1971 	[ERRCODE_OFFSET(OBJCONF_MAP_MAPSIZE)]	= "Invalid map size for event setting",
1972 	[ERRCODE_OFFSET(OBJCONF_MAP_EVTDIM)]	= "Event dimension too large",
1973 	[ERRCODE_OFFSET(OBJCONF_MAP_EVTINH)]	= "Doesn't support inherit event",
1974 	[ERRCODE_OFFSET(OBJCONF_MAP_EVTTYPE)]	= "Wrong event type for map",
1975 	[ERRCODE_OFFSET(OBJCONF_MAP_IDX2BIG)]	= "Index too large",
1976 };
1977 
1978 static int
bpf_loader_strerror(int err,char * buf,size_t size)1979 bpf_loader_strerror(int err, char *buf, size_t size)
1980 {
1981 	char sbuf[STRERR_BUFSIZE];
1982 	const char *msg;
1983 
1984 	if (!buf || !size)
1985 		return -1;
1986 
1987 	err = err > 0 ? err : -err;
1988 
1989 	if (err >= __LIBBPF_ERRNO__START)
1990 		return libbpf_strerror(err, buf, size);
1991 
1992 	if (err >= __BPF_LOADER_ERRNO__START && err < __BPF_LOADER_ERRNO__END) {
1993 		msg = bpf_loader_strerror_table[ERRNO_OFFSET(err)];
1994 		snprintf(buf, size, "%s", msg);
1995 		buf[size - 1] = '\0';
1996 		return 0;
1997 	}
1998 
1999 	if (err >= __BPF_LOADER_ERRNO__END)
2000 		snprintf(buf, size, "Unknown bpf loader error %d", err);
2001 	else
2002 		snprintf(buf, size, "%s",
2003 			 str_error_r(err, sbuf, sizeof(sbuf)));
2004 
2005 	buf[size - 1] = '\0';
2006 	return -1;
2007 }
2008 
2009 #define bpf__strerror_head(err, buf, size) \
2010 	char sbuf[STRERR_BUFSIZE], *emsg;\
2011 	if (!size)\
2012 		return 0;\
2013 	if (err < 0)\
2014 		err = -err;\
2015 	bpf_loader_strerror(err, sbuf, sizeof(sbuf));\
2016 	emsg = sbuf;\
2017 	switch (err) {\
2018 	default:\
2019 		scnprintf(buf, size, "%s", emsg);\
2020 		break;
2021 
2022 #define bpf__strerror_entry(val, fmt...)\
2023 	case val: {\
2024 		scnprintf(buf, size, fmt);\
2025 		break;\
2026 	}
2027 
2028 #define bpf__strerror_end(buf, size)\
2029 	}\
2030 	buf[size - 1] = '\0';
2031 
bpf__strerror_prepare_load(const char * filename,bool source,int err,char * buf,size_t size)2032 int bpf__strerror_prepare_load(const char *filename, bool source,
2033 			       int err, char *buf, size_t size)
2034 {
2035 	size_t n;
2036 	int ret;
2037 
2038 	n = snprintf(buf, size, "Failed to load %s%s: ",
2039 			 filename, source ? " from source" : "");
2040 	if (n >= size) {
2041 		buf[size - 1] = '\0';
2042 		return 0;
2043 	}
2044 	buf += n;
2045 	size -= n;
2046 
2047 	ret = bpf_loader_strerror(err, buf, size);
2048 	buf[size - 1] = '\0';
2049 	return ret;
2050 }
2051 
bpf__strerror_probe(struct bpf_object * obj __maybe_unused,int err,char * buf,size_t size)2052 int bpf__strerror_probe(struct bpf_object *obj __maybe_unused,
2053 			int err, char *buf, size_t size)
2054 {
2055 	bpf__strerror_head(err, buf, size);
2056 	case BPF_LOADER_ERRNO__PROGCONF_TERM: {
2057 		scnprintf(buf, size, "%s (add -v to see detail)", emsg);
2058 		break;
2059 	}
2060 	bpf__strerror_entry(EEXIST, "Probe point exist. Try 'perf probe -d \"*\"' and set 'force=yes'");
2061 	bpf__strerror_entry(EACCES, "You need to be root");
2062 	bpf__strerror_entry(EPERM, "You need to be root, and /proc/sys/kernel/kptr_restrict should be 0");
2063 	bpf__strerror_entry(ENOENT, "You need to check probing points in BPF file");
2064 	bpf__strerror_end(buf, size);
2065 	return 0;
2066 }
2067 
bpf__strerror_load(struct bpf_object * obj,int err,char * buf,size_t size)2068 int bpf__strerror_load(struct bpf_object *obj,
2069 		       int err, char *buf, size_t size)
2070 {
2071 	bpf__strerror_head(err, buf, size);
2072 	case LIBBPF_ERRNO__KVER: {
2073 		unsigned int obj_kver = bpf_object__kversion(obj);
2074 		unsigned int real_kver;
2075 
2076 		if (fetch_kernel_version(&real_kver, NULL, 0)) {
2077 			scnprintf(buf, size, "Unable to fetch kernel version");
2078 			break;
2079 		}
2080 
2081 		if (obj_kver != real_kver) {
2082 			scnprintf(buf, size,
2083 				  "'version' ("KVER_FMT") doesn't match running kernel ("KVER_FMT")",
2084 				  KVER_PARAM(obj_kver),
2085 				  KVER_PARAM(real_kver));
2086 			break;
2087 		}
2088 
2089 		scnprintf(buf, size, "Failed to load program for unknown reason");
2090 		break;
2091 	}
2092 	bpf__strerror_end(buf, size);
2093 	return 0;
2094 }
2095 
bpf__strerror_config_obj(struct bpf_object * obj __maybe_unused,struct parse_events_term * term __maybe_unused,struct evlist * evlist __maybe_unused,int * error_pos __maybe_unused,int err,char * buf,size_t size)2096 int bpf__strerror_config_obj(struct bpf_object *obj __maybe_unused,
2097 			     struct parse_events_term *term __maybe_unused,
2098 			     struct evlist *evlist __maybe_unused,
2099 			     int *error_pos __maybe_unused, int err,
2100 			     char *buf, size_t size)
2101 {
2102 	bpf__strerror_head(err, buf, size);
2103 	bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE,
2104 			    "Can't use this config term with this map type");
2105 	bpf__strerror_end(buf, size);
2106 	return 0;
2107 }
2108 
bpf__strerror_apply_obj_config(int err,char * buf,size_t size)2109 int bpf__strerror_apply_obj_config(int err, char *buf, size_t size)
2110 {
2111 	bpf__strerror_head(err, buf, size);
2112 	bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTDIM,
2113 			    "Cannot set event to BPF map in multi-thread tracing");
2114 	bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTINH,
2115 			    "%s (Hint: use -i to turn off inherit)", emsg);
2116 	bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTTYPE,
2117 			    "Can only put raw, hardware and BPF output event into a BPF map");
2118 	bpf__strerror_end(buf, size);
2119 	return 0;
2120 }
2121 
bpf__strerror_setup_output_event(struct evlist * evlist __maybe_unused,int err,char * buf,size_t size)2122 int bpf__strerror_setup_output_event(struct evlist *evlist __maybe_unused,
2123 				     int err, char *buf, size_t size)
2124 {
2125 	bpf__strerror_head(err, buf, size);
2126 	bpf__strerror_end(buf, size);
2127 	return 0;
2128 }
2129