Linux Perf
bpf-loader.c
Go to the documentation of this file.
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * bpf-loader.c
4  *
5  * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
6  * Copyright (C) 2015 Huawei Inc.
7  */
8 
9 #include <linux/bpf.h>
10 #include <bpf/libbpf.h>
11 #include <bpf/bpf.h>
12 #include <linux/err.h>
13 #include <linux/kernel.h>
14 #include <linux/string.h>
15 #include <errno.h>
16 #include "perf.h"
17 #include "debug.h"
18 #include "bpf-loader.h"
19 #include "bpf-prologue.h"
20 #include "probe-event.h"
21 #include "probe-finder.h" // for MAX_PROBES
22 #include "parse-events.h"
23 #include "strfilter.h"
24 #include "llvm-utils.h"
25 #include "c++/clang-c.h"
26 
27 #define DEFINE_PRINT_FN(name, level) \
28 static int libbpf_##name(const char *fmt, ...) \
29 { \
30  va_list args; \
31  int ret; \
32  \
33  va_start(args, fmt); \
34  ret = veprintf(level, verbose, pr_fmt(fmt), args);\
35  va_end(args); \
36  return ret; \
37 }
38 
39 DEFINE_PRINT_FN(warning, 1)
40 DEFINE_PRINT_FN(info, 1)
42 
43 struct bpf_prog_priv {
44  bool is_tp;
45  char *sys_name;
46  char *evt_name;
47  struct perf_probe_event pev;
49  struct bpf_insn *insns_buf;
50  int nr_types;
52 };
53 
54 static bool libbpf_initialized;
55 
56 struct bpf_object *
57 bpf__prepare_load_buffer(void *obj_buf, size_t obj_buf_sz, const char *name)
58 {
59  struct bpf_object *obj;
60 
61  if (!libbpf_initialized) {
62  libbpf_set_print(libbpf_warning,
63  libbpf_info,
64  libbpf_debug);
65  libbpf_initialized = true;
66  }
67 
68  obj = bpf_object__open_buffer(obj_buf, obj_buf_sz, name);
69  if (IS_ERR_OR_NULL(obj)) {
70  pr_debug("bpf: failed to load buffer\n");
71  return ERR_PTR(-EINVAL);
72  }
73 
74  return obj;
75 }
76 
77 struct bpf_object *bpf__prepare_load(const char *filename, bool source)
78 {
79  struct bpf_object *obj;
80 
81  if (!libbpf_initialized) {
82  libbpf_set_print(libbpf_warning,
83  libbpf_info,
84  libbpf_debug);
85  libbpf_initialized = true;
86  }
87 
88  if (source) {
89  int err;
90  void *obj_buf;
91  size_t obj_buf_sz;
92 
94  err = perf_clang__compile_bpf(filename, &obj_buf, &obj_buf_sz);
96  if (err) {
97  pr_debug("bpf: builtin compilation failed: %d, try external compiler\n", err);
98  err = llvm__compile_bpf(filename, &obj_buf, &obj_buf_sz);
99  if (err)
100  return ERR_PTR(-BPF_LOADER_ERRNO__COMPILE);
101  } else
102  pr_debug("bpf: successfull builtin compilation\n");
103  obj = bpf_object__open_buffer(obj_buf, obj_buf_sz, filename);
104 
105  if (!IS_ERR_OR_NULL(obj) && llvm_param.dump_obj)
106  llvm__dump_obj(filename, obj_buf, obj_buf_sz);
107 
108  free(obj_buf);
109  } else
110  obj = bpf_object__open(filename);
111 
112  if (IS_ERR_OR_NULL(obj)) {
113  pr_debug("bpf: failed to load %s\n", filename);
114  return obj;
115  }
116 
117  return obj;
118 }
119 
120 void bpf__clear(void)
121 {
122  struct bpf_object *obj, *tmp;
123 
124  bpf_object__for_each_safe(obj, tmp) {
125  bpf__unprobe(obj);
126  bpf_object__close(obj);
127  }
128 }
129 
130 static void
131 clear_prog_priv(struct bpf_program *prog __maybe_unused,
132  void *_priv)
133 {
134  struct bpf_prog_priv *priv = _priv;
135 
136  cleanup_perf_probe_events(&priv->pev, 1);
137  zfree(&priv->insns_buf);
138  zfree(&priv->type_mapping);
139  zfree(&priv->sys_name);
140  zfree(&priv->evt_name);
141  free(priv);
142 }
143 
144 static int
146 {
147  pev->uprobes = true;
148  pev->target = strdup(value);
149  if (!pev->target)
150  return -ENOMEM;
151  return 0;
152 }
153 
154 static int
156 {
157  pev->uprobes = false;
158  pev->target = strdup(value);
159  if (!pev->target)
160  return -ENOMEM;
161  return 0;
162 }
163 
164 static int
165 prog_config__bool(const char *value, bool *pbool, bool invert)
166 {
167  int err;
168  bool bool_value;
169 
170  if (!pbool)
171  return -EINVAL;
172 
173  err = strtobool(value, &bool_value);
174  if (err)
175  return err;
176 
177  *pbool = invert ? !bool_value : bool_value;
178  return 0;
179 }
180 
181 static int
183  struct perf_probe_event *pev __maybe_unused)
184 {
185  return prog_config__bool(value, &probe_conf.no_inlines, true);
186 }
187 
188 static int
190  struct perf_probe_event *pev __maybe_unused)
191 {
192  return prog_config__bool(value, &probe_conf.force_add, false);
193 }
194 
195 static struct {
196  const char *key;
197  const char *usage;
198  const char *desc;
199  int (*func)(const char *, struct perf_probe_event *);
200 } bpf_prog_config_terms[] = {
201  {
202  .key = "exec",
203  .usage = "exec=<full path of file>",
204  .desc = "Set uprobe target",
205  .func = prog_config__exec,
206  },
207  {
208  .key = "module",
209  .usage = "module=<module name> ",
210  .desc = "Set kprobe module",
211  .func = prog_config__module,
212  },
213  {
214  .key = "inlines",
215  .usage = "inlines=[yes|no] ",
216  .desc = "Probe at inline symbol",
217  .func = prog_config__inlines,
218  },
219  {
220  .key = "force",
221  .usage = "force=[yes|no] ",
222  .desc = "Forcibly add events with existing name",
223  .func = prog_config__force,
224  },
225 };
226 
227 static int
228 do_prog_config(const char *key, const char *value,
229  struct perf_probe_event *pev)
230 {
231  unsigned int i;
232 
233  pr_debug("config bpf program: %s=%s\n", key, value);
234  for (i = 0; i < ARRAY_SIZE(bpf_prog_config_terms); i++)
235  if (strcmp(key, bpf_prog_config_terms[i].key) == 0)
236  return bpf_prog_config_terms[i].func(value, pev);
237 
238  pr_debug("BPF: ERROR: invalid program config option: %s=%s\n",
239  key, value);
240 
241  pr_debug("\nHint: Valid options are:\n");
242  for (i = 0; i < ARRAY_SIZE(bpf_prog_config_terms); i++)
243  pr_debug("\t%s:\t%s\n", bpf_prog_config_terms[i].usage,
245  pr_debug("\n");
246 
248 }
249 
250 static const char *
251 parse_prog_config_kvpair(const char *config_str, struct perf_probe_event *pev)
252 {
253  char *text = strdup(config_str);
254  char *sep, *line;
255  const char *main_str = NULL;
256  int err = 0;
257 
258  if (!text) {
259  pr_debug("Not enough memory: dup config_str failed\n");
260  return ERR_PTR(-ENOMEM);
261  }
262 
263  line = text;
264  while ((sep = strchr(line, ';'))) {
265  char *equ;
266 
267  *sep = '\0';
268  equ = strchr(line, '=');
269  if (!equ) {
270  pr_warning("WARNING: invalid config in BPF object: %s\n",
271  line);
272  pr_warning("\tShould be 'key=value'.\n");
273  goto nextline;
274  }
275  *equ = '\0';
276 
277  err = do_prog_config(line, equ + 1, pev);
278  if (err)
279  break;
280 nextline:
281  line = sep + 1;
282  }
283 
284  if (!err)
285  main_str = config_str + (line - text);
286  free(text);
287 
288  return err ? ERR_PTR(err) : main_str;
289 }
290 
291 static int
292 parse_prog_config(const char *config_str, const char **p_main_str,
293  bool *is_tp, struct perf_probe_event *pev)
294 {
295  int err;
296  const char *main_str = parse_prog_config_kvpair(config_str, pev);
297 
298  if (IS_ERR(main_str))
299  return PTR_ERR(main_str);
300 
301  *p_main_str = main_str;
302  if (!strchr(main_str, '=')) {
303  /* Is a tracepoint event? */
304  const char *s = strchr(main_str, ':');
305 
306  if (!s) {
307  pr_debug("bpf: '%s' is not a valid tracepoint\n",
308  config_str);
309  return -BPF_LOADER_ERRNO__CONFIG;
310  }
311 
312  *is_tp = true;
313  return 0;
314  }
315 
316  *is_tp = false;
317  err = parse_perf_probe_command(main_str, pev);
318  if (err < 0) {
319  pr_debug("bpf: '%s' is not a valid config string\n",
320  config_str);
321  /* parse failed, don't need clear pev. */
322  return -BPF_LOADER_ERRNO__CONFIG;
323  }
324  return 0;
325 }
326 
327 static int
328 config_bpf_program(struct bpf_program *prog)
329 {
330  struct perf_probe_event *pev = NULL;
331  struct bpf_prog_priv *priv = NULL;
332  const char *config_str, *main_str;
333  bool is_tp = false;
334  int err;
335 
336  /* Initialize per-program probing setting */
337  probe_conf.no_inlines = false;
338  probe_conf.force_add = false;
339 
340  config_str = bpf_program__title(prog, false);
341  if (IS_ERR(config_str)) {
342  pr_debug("bpf: unable to get title for program\n");
343  return PTR_ERR(config_str);
344  }
345 
346  priv = calloc(sizeof(*priv), 1);
347  if (!priv) {
348  pr_debug("bpf: failed to alloc priv\n");
349  return -ENOMEM;
350  }
351  pev = &priv->pev;
352 
353  pr_debug("bpf: config program '%s'\n", config_str);
354  err = parse_prog_config(config_str, &main_str, &is_tp, pev);
355  if (err)
356  goto errout;
357 
358  if (is_tp) {
359  char *s = strchr(main_str, ':');
360 
361  priv->is_tp = true;
362  priv->sys_name = strndup(main_str, s - main_str);
363  priv->evt_name = strdup(s + 1);
364  goto set_priv;
365  }
366 
367  if (pev->group && strcmp(pev->group, PERF_BPF_PROBE_GROUP)) {
368  pr_debug("bpf: '%s': group for event is set and not '%s'.\n",
369  config_str, PERF_BPF_PROBE_GROUP);
371  goto errout;
372  } else if (!pev->group)
373  pev->group = strdup(PERF_BPF_PROBE_GROUP);
374 
375  if (!pev->group) {
376  pr_debug("bpf: strdup failed\n");
377  err = -ENOMEM;
378  goto errout;
379  }
380 
381  if (!pev->event) {
382  pr_debug("bpf: '%s': event name is missing. Section name should be 'key=value'\n",
383  config_str);
385  goto errout;
386  }
387  pr_debug("bpf: config '%s' is ok\n", config_str);
388 
389 set_priv:
390  err = bpf_program__set_priv(prog, priv, clear_prog_priv);
391  if (err) {
392  pr_debug("Failed to set priv for program '%s'\n", config_str);
393  goto errout;
394  }
395 
396  return 0;
397 
398 errout:
399  if (pev)
401  free(priv);
402  return err;
403 }
404 
405 static int bpf__prepare_probe(void)
406 {
407  static int err = 0;
408  static bool initialized = false;
409 
410  /*
411  * Make err static, so if init failed the first, bpf__prepare_probe()
412  * fails each time without calling init_probe_symbol_maps multiple
413  * times.
414  */
415  if (initialized)
416  return err;
417 
418  initialized = true;
419  err = init_probe_symbol_maps(false);
420  if (err < 0)
421  pr_debug("Failed to init_probe_symbol_maps\n");
423  return err;
424 }
425 
426 static int
427 preproc_gen_prologue(struct bpf_program *prog, int n,
428  struct bpf_insn *orig_insns, int orig_insns_cnt,
429  struct bpf_prog_prep_result *res)
430 {
431  struct bpf_prog_priv *priv = bpf_program__priv(prog);
432  struct probe_trace_event *tev;
433  struct perf_probe_event *pev;
434  struct bpf_insn *buf;
435  size_t prologue_cnt = 0;
436  int i, err;
437 
438  if (IS_ERR(priv) || !priv || priv->is_tp)
439  goto errout;
440 
441  pev = &priv->pev;
442 
443  if (n < 0 || n >= priv->nr_types)
444  goto errout;
445 
446  /* Find a tev belongs to that type */
447  for (i = 0; i < pev->ntevs; i++) {
448  if (priv->type_mapping[i] == n)
449  break;
450  }
451 
452  if (i >= pev->ntevs) {
453  pr_debug("Internal error: prologue type %d not found\n", n);
455  }
456 
457  tev = &pev->tevs[i];
458 
459  buf = priv->insns_buf;
460  err = bpf__gen_prologue(tev->args, tev->nargs,
461  buf, &prologue_cnt,
462  BPF_MAXINSNS - orig_insns_cnt);
463  if (err) {
464  const char *title;
465 
466  title = bpf_program__title(prog, false);
467  if (!title)
468  title = "[unknown]";
469 
470  pr_debug("Failed to generate prologue for program %s\n",
471  title);
472  return err;
473  }
474 
475  memcpy(&buf[prologue_cnt], orig_insns,
476  sizeof(struct bpf_insn) * orig_insns_cnt);
477 
478  res->new_insn_ptr = buf;
479  res->new_insn_cnt = prologue_cnt + orig_insns_cnt;
480  res->pfd = NULL;
481  return 0;
482 
483 errout:
484  pr_debug("Internal error in preproc_gen_prologue\n");
486 }
487 
488 /*
489  * compare_tev_args is reflexive, transitive and antisymmetric.
490  * I can proof it but this margin is too narrow to contain.
491  */
492 static int compare_tev_args(const void *ptev1, const void *ptev2)
493 {
494  int i, ret;
495  const struct probe_trace_event *tev1 =
496  *(const struct probe_trace_event **)ptev1;
497  const struct probe_trace_event *tev2 =
498  *(const struct probe_trace_event **)ptev2;
499 
500  ret = tev2->nargs - tev1->nargs;
501  if (ret)
502  return ret;
503 
504  for (i = 0; i < tev1->nargs; i++) {
505  struct probe_trace_arg *arg1, *arg2;
506  struct probe_trace_arg_ref *ref1, *ref2;
507 
508  arg1 = &tev1->args[i];
509  arg2 = &tev2->args[i];
510 
511  ret = strcmp(arg1->value, arg2->value);
512  if (ret)
513  return ret;
514 
515  ref1 = arg1->ref;
516  ref2 = arg2->ref;
517 
518  while (ref1 && ref2) {
519  ret = ref2->offset - ref1->offset;
520  if (ret)
521  return ret;
522 
523  ref1 = ref1->next;
524  ref2 = ref2->next;
525  }
526 
527  if (ref1 || ref2)
528  return ref2 ? 1 : -1;
529  }
530 
531  return 0;
532 }
533 
534 /*
535  * Assign a type number to each tevs in a pev.
536  * mapping is an array with same slots as tevs in that pev.
537  * nr_types will be set to number of types.
538  */
539 static int map_prologue(struct perf_probe_event *pev, int *mapping,
540  int *nr_types)
541 {
542  int i, type = 0;
543  struct probe_trace_event **ptevs;
544 
545  size_t array_sz = sizeof(*ptevs) * pev->ntevs;
546 
547  ptevs = malloc(array_sz);
548  if (!ptevs) {
549  pr_debug("Not enough memory: alloc ptevs failed\n");
550  return -ENOMEM;
551  }
552 
553  pr_debug("In map_prologue, ntevs=%d\n", pev->ntevs);
554  for (i = 0; i < pev->ntevs; i++)
555  ptevs[i] = &pev->tevs[i];
556 
557  qsort(ptevs, pev->ntevs, sizeof(*ptevs),
559 
560  for (i = 0; i < pev->ntevs; i++) {
561  int n;
562 
563  n = ptevs[i] - pev->tevs;
564  if (i == 0) {
565  mapping[n] = type;
566  pr_debug("mapping[%d]=%d\n", n, type);
567  continue;
568  }
569 
570  if (compare_tev_args(ptevs + i, ptevs + i - 1) == 0)
571  mapping[n] = type;
572  else
573  mapping[n] = ++type;
574 
575  pr_debug("mapping[%d]=%d\n", n, mapping[n]);
576  }
577  free(ptevs);
578  *nr_types = type + 1;
579 
580  return 0;
581 }
582 
583 static int hook_load_preprocessor(struct bpf_program *prog)
584 {
585  struct bpf_prog_priv *priv = bpf_program__priv(prog);
586  struct perf_probe_event *pev;
587  bool need_prologue = false;
588  int err, i;
589 
590  if (IS_ERR(priv) || !priv) {
591  pr_debug("Internal error when hook preprocessor\n");
593  }
594 
595  if (priv->is_tp) {
596  priv->need_prologue = false;
597  return 0;
598  }
599 
600  pev = &priv->pev;
601  for (i = 0; i < pev->ntevs; i++) {
602  struct probe_trace_event *tev = &pev->tevs[i];
603 
604  if (tev->nargs > 0) {
605  need_prologue = true;
606  break;
607  }
608  }
609 
610  /*
611  * Since all tevs don't have argument, we don't need generate
612  * prologue.
613  */
614  if (!need_prologue) {
615  priv->need_prologue = false;
616  return 0;
617  }
618 
619  priv->need_prologue = true;
620  priv->insns_buf = malloc(sizeof(struct bpf_insn) * BPF_MAXINSNS);
621  if (!priv->insns_buf) {
622  pr_debug("Not enough memory: alloc insns_buf failed\n");
623  return -ENOMEM;
624  }
625 
626  priv->type_mapping = malloc(sizeof(int) * pev->ntevs);
627  if (!priv->type_mapping) {
628  pr_debug("Not enough memory: alloc type_mapping failed\n");
629  return -ENOMEM;
630  }
631  memset(priv->type_mapping, -1,
632  sizeof(int) * pev->ntevs);
633 
634  err = map_prologue(pev, priv->type_mapping, &priv->nr_types);
635  if (err)
636  return err;
637 
638  err = bpf_program__set_prep(prog, priv->nr_types,
640  return err;
641 }
642 
643 int bpf__probe(struct bpf_object *obj)
644 {
645  int err = 0;
646  struct bpf_program *prog;
647  struct bpf_prog_priv *priv;
648  struct perf_probe_event *pev;
649 
650  err = bpf__prepare_probe();
651  if (err) {
652  pr_debug("bpf__prepare_probe failed\n");
653  return err;
654  }
655 
656  bpf_object__for_each_program(prog, obj) {
657  err = config_bpf_program(prog);
658  if (err)
659  goto out;
660 
661  priv = bpf_program__priv(prog);
662  if (IS_ERR(priv) || !priv) {
663  err = PTR_ERR(priv);
664  goto out;
665  }
666 
667  if (priv->is_tp) {
668  bpf_program__set_tracepoint(prog);
669  continue;
670  }
671 
672  bpf_program__set_kprobe(prog);
673  pev = &priv->pev;
674 
675  err = convert_perf_probe_events(pev, 1);
676  if (err < 0) {
677  pr_debug("bpf_probe: failed to convert perf probe events\n");
678  goto out;
679  }
680 
681  err = apply_perf_probe_events(pev, 1);
682  if (err < 0) {
683  pr_debug("bpf_probe: failed to apply perf probe events\n");
684  goto out;
685  }
686 
687  /*
688  * After probing, let's consider prologue, which
689  * adds program fetcher to BPF programs.
690  *
691  * hook_load_preprocessorr() hooks pre-processor
692  * to bpf_program, let it generate prologue
693  * dynamically during loading.
694  */
695  err = hook_load_preprocessor(prog);
696  if (err)
697  goto out;
698  }
699 out:
700  return err < 0 ? err : 0;
701 }
702 
703 #define EVENTS_WRITE_BUFSIZE 4096
704 int bpf__unprobe(struct bpf_object *obj)
705 {
706  int err, ret = 0;
707  struct bpf_program *prog;
708 
709  bpf_object__for_each_program(prog, obj) {
710  struct bpf_prog_priv *priv = bpf_program__priv(prog);
711  int i;
712 
713  if (IS_ERR(priv) || !priv || priv->is_tp)
714  continue;
715 
716  for (i = 0; i < priv->pev.ntevs; i++) {
717  struct probe_trace_event *tev = &priv->pev.tevs[i];
718  char name_buf[EVENTS_WRITE_BUFSIZE];
719  struct strfilter *delfilter;
720 
721  snprintf(name_buf, EVENTS_WRITE_BUFSIZE,
722  "%s:%s", tev->group, tev->event);
723  name_buf[EVENTS_WRITE_BUFSIZE - 1] = '\0';
724 
725  delfilter = strfilter__new(name_buf, NULL);
726  if (!delfilter) {
727  pr_debug("Failed to create filter for unprobing\n");
728  ret = -ENOMEM;
729  continue;
730  }
731 
732  err = del_perf_probe_events(delfilter);
733  strfilter__delete(delfilter);
734  if (err) {
735  pr_debug("Failed to delete %s\n", name_buf);
736  ret = err;
737  continue;
738  }
739  }
740  }
741  return ret;
742 }
743 
744 int bpf__load(struct bpf_object *obj)
745 {
746  int err;
747 
748  err = bpf_object__load(obj);
749  if (err) {
750  pr_debug("bpf: load objects failed\n");
751  return err;
752  }
753  return 0;
754 }
755 
756 int bpf__foreach_event(struct bpf_object *obj,
758  void *arg)
759 {
760  struct bpf_program *prog;
761  int err;
762 
763  bpf_object__for_each_program(prog, obj) {
764  struct bpf_prog_priv *priv = bpf_program__priv(prog);
765  struct probe_trace_event *tev;
766  struct perf_probe_event *pev;
767  int i, fd;
768 
769  if (IS_ERR(priv) || !priv) {
770  pr_debug("bpf: failed to get private field\n");
772  }
773 
774  if (priv->is_tp) {
775  fd = bpf_program__fd(prog);
776  err = (*func)(priv->sys_name, priv->evt_name, fd, arg);
777  if (err) {
778  pr_debug("bpf: tracepoint call back failed, stop iterate\n");
779  return err;
780  }
781  continue;
782  }
783 
784  pev = &priv->pev;
785  for (i = 0; i < pev->ntevs; i++) {
786  tev = &pev->tevs[i];
787 
788  if (priv->need_prologue) {
789  int type = priv->type_mapping[i];
790 
791  fd = bpf_program__nth_fd(prog, type);
792  } else {
793  fd = bpf_program__fd(prog);
794  }
795 
796  if (fd < 0) {
797  pr_debug("bpf: failed to get file descriptor\n");
798  return fd;
799  }
800 
801  err = (*func)(tev->group, tev->event, fd, arg);
802  if (err) {
803  pr_debug("bpf: call back failed, stop iterate\n");
804  return err;
805  }
806  }
807  }
808  return 0;
809 }
810 
814 };
815 
819 };
820 
821 struct bpf_map_op {
822  struct list_head list;
823  enum bpf_map_op_type op_type;
824  enum bpf_map_key_type key_type;
825  union {
827  } k;
828  union {
829  u64 value;
830  struct perf_evsel *evsel;
831  } v;
832 };
833 
834 struct bpf_map_priv {
835  struct list_head ops_list;
836 };
837 
838 static void
840 {
841  if (!list_empty(&op->list))
842  list_del(&op->list);
843  if (op->key_type == BPF_MAP_KEY_RANGES)
845  free(op);
846 }
847 
848 static void
850 {
851  struct bpf_map_op *pos, *n;
852 
853  list_for_each_entry_safe(pos, n, &priv->ops_list, list) {
854  list_del_init(&pos->list);
855  bpf_map_op__delete(pos);
856  }
857 }
858 
859 static void
860 bpf_map_priv__clear(struct bpf_map *map __maybe_unused,
861  void *_priv)
862 {
863  struct bpf_map_priv *priv = _priv;
864 
865  bpf_map_priv__purge(priv);
866  free(priv);
867 }
868 
869 static int
871 {
873  if (!term)
874  return 0;
875 
876  if (term->array.nr_ranges) {
877  size_t memsz = term->array.nr_ranges *
878  sizeof(op->k.array.ranges[0]);
879 
880  op->k.array.ranges = memdup(term->array.ranges, memsz);
881  if (!op->k.array.ranges) {
882  pr_debug("Not enough memory to alloc indices for map\n");
883  return -ENOMEM;
884  }
886  op->k.array.nr_ranges = term->array.nr_ranges;
887  }
888  return 0;
889 }
890 
891 static struct bpf_map_op *
893 {
894  struct bpf_map_op *op;
895  int err;
896 
897  op = zalloc(sizeof(*op));
898  if (!op) {
899  pr_debug("Failed to alloc bpf_map_op\n");
900  return ERR_PTR(-ENOMEM);
901  }
902  INIT_LIST_HEAD(&op->list);
903 
904  err = bpf_map_op_setkey(op, term);
905  if (err) {
906  free(op);
907  return ERR_PTR(err);
908  }
909  return op;
910 }
911 
912 static struct bpf_map_op *
914 {
915  struct bpf_map_op *newop;
916 
917  newop = memdup(op, sizeof(*op));
918  if (!newop) {
919  pr_debug("Failed to alloc bpf_map_op\n");
920  return NULL;
921  }
922 
923  INIT_LIST_HEAD(&newop->list);
924  if (op->key_type == BPF_MAP_KEY_RANGES) {
925  size_t memsz = op->k.array.nr_ranges *
926  sizeof(op->k.array.ranges[0]);
927 
928  newop->k.array.ranges = memdup(op->k.array.ranges, memsz);
929  if (!newop->k.array.ranges) {
930  pr_debug("Failed to alloc indices for map\n");
931  free(newop);
932  return NULL;
933  }
934  }
935 
936  return newop;
937 }
938 
939 static struct bpf_map_priv *
941 {
942  struct bpf_map_priv *newpriv;
943  struct bpf_map_op *pos, *newop;
944 
945  newpriv = zalloc(sizeof(*newpriv));
946  if (!newpriv) {
947  pr_debug("Not enough memory to alloc map private\n");
948  return NULL;
949  }
950  INIT_LIST_HEAD(&newpriv->ops_list);
951 
952  list_for_each_entry(pos, &priv->ops_list, list) {
953  newop = bpf_map_op__clone(pos);
954  if (!newop) {
955  bpf_map_priv__purge(newpriv);
956  return NULL;
957  }
958  list_add_tail(&newop->list, &newpriv->ops_list);
959  }
960 
961  return newpriv;
962 }
963 
964 static int
965 bpf_map__add_op(struct bpf_map *map, struct bpf_map_op *op)
966 {
967  const char *map_name = bpf_map__name(map);
968  struct bpf_map_priv *priv = bpf_map__priv(map);
969 
970  if (IS_ERR(priv)) {
971  pr_debug("Failed to get private from map %s\n", map_name);
972  return PTR_ERR(priv);
973  }
974 
975  if (!priv) {
976  priv = zalloc(sizeof(*priv));
977  if (!priv) {
978  pr_debug("Not enough memory to alloc map private\n");
979  return -ENOMEM;
980  }
981  INIT_LIST_HEAD(&priv->ops_list);
982 
983  if (bpf_map__set_priv(map, priv, bpf_map_priv__clear)) {
984  free(priv);
986  }
987  }
988 
989  list_add_tail(&op->list, &priv->ops_list);
990  return 0;
991 }
992 
993 static struct bpf_map_op *
994 bpf_map__add_newop(struct bpf_map *map, struct parse_events_term *term)
995 {
996  struct bpf_map_op *op;
997  int err;
998 
999  op = bpf_map_op__new(term);
1000  if (IS_ERR(op))
1001  return op;
1002 
1003  err = bpf_map__add_op(map, op);
1004  if (err) {
1005  bpf_map_op__delete(op);
1006  return ERR_PTR(err);
1007  }
1008  return op;
1009 }
1010 
1011 static int
1013  struct parse_events_term *term)
1014 {
1015  struct bpf_map_op *op;
1016  const char *map_name = bpf_map__name(map);
1017  const struct bpf_map_def *def = bpf_map__def(map);
1018 
1019  if (IS_ERR(def)) {
1020  pr_debug("Unable to get map definition from '%s'\n",
1021  map_name);
1023  }
1024 
1025  if (def->type != BPF_MAP_TYPE_ARRAY) {
1026  pr_debug("Map %s type is not BPF_MAP_TYPE_ARRAY\n",
1027  map_name);
1029  }
1030  if (def->key_size < sizeof(unsigned int)) {
1031  pr_debug("Map %s has incorrect key size\n", map_name);
1033  }
1034  switch (def->value_size) {
1035  case 1:
1036  case 2:
1037  case 4:
1038  case 8:
1039  break;
1040  default:
1041  pr_debug("Map %s has incorrect value size\n", map_name);
1043  }
1044 
1045  op = bpf_map__add_newop(map, term);
1046  if (IS_ERR(op))
1047  return PTR_ERR(op);
1049  op->v.value = term->val.num;
1050  return 0;
1051 }
1052 
1053 static int
1054 bpf_map__config_value(struct bpf_map *map,
1055  struct parse_events_term *term,
1056  struct perf_evlist *evlist __maybe_unused)
1057 {
1058  if (!term->err_val) {
1059  pr_debug("Config value not set\n");
1061  }
1062 
1063  if (term->type_val != PARSE_EVENTS__TERM_TYPE_NUM) {
1064  pr_debug("ERROR: wrong value type for 'value'\n");
1066  }
1067 
1068  return __bpf_map__config_value(map, term);
1069 }
1070 
1071 static int
1073  struct parse_events_term *term,
1074  struct perf_evlist *evlist)
1075 {
1076  struct perf_evsel *evsel;
1077  const struct bpf_map_def *def;
1078  struct bpf_map_op *op;
1079  const char *map_name = bpf_map__name(map);
1080 
1081  evsel = perf_evlist__find_evsel_by_str(evlist, term->val.str);
1082  if (!evsel) {
1083  pr_debug("Event (for '%s') '%s' doesn't exist\n",
1084  map_name, term->val.str);
1086  }
1087 
1088  def = bpf_map__def(map);
1089  if (IS_ERR(def)) {
1090  pr_debug("Unable to get map definition from '%s'\n",
1091  map_name);
1092  return PTR_ERR(def);
1093  }
1094 
1095  /*
1096  * No need to check key_size and value_size:
1097  * kernel has already checked them.
1098  */
1099  if (def->type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
1100  pr_debug("Map %s type is not BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
1101  map_name);
1103  }
1104 
1105  op = bpf_map__add_newop(map, term);
1106  if (IS_ERR(op))
1107  return PTR_ERR(op);
1109  op->v.evsel = evsel;
1110  return 0;
1111 }
1112 
1113 static int
1114 bpf_map__config_event(struct bpf_map *map,
1115  struct parse_events_term *term,
1116  struct perf_evlist *evlist)
1117 {
1118  if (!term->err_val) {
1119  pr_debug("Config value not set\n");
1121  }
1122 
1123  if (term->type_val != PARSE_EVENTS__TERM_TYPE_STR) {
1124  pr_debug("ERROR: wrong value type for 'event'\n");
1126  }
1127 
1128  return __bpf_map__config_event(map, term, evlist);
1129 }
1130 
1132  const char *config_opt;
1133  int (*config_func)(struct bpf_map *, struct parse_events_term *,
1134  struct perf_evlist *);
1135 };
1136 
1138  {"value", bpf_map__config_value},
1139  {"event", bpf_map__config_event},
1140 };
1141 
1142 static int
1144  struct bpf_map *map,
1145  const char *map_name)
1146 {
1147  struct parse_events_array *array = &term->array;
1148  const struct bpf_map_def *def;
1149  unsigned int i;
1150 
1151  if (!array->nr_ranges)
1152  return 0;
1153  if (!array->ranges) {
1154  pr_debug("ERROR: map %s: array->nr_ranges is %d but range array is NULL\n",
1155  map_name, (int)array->nr_ranges);
1157  }
1158 
1159  def = bpf_map__def(map);
1160  if (IS_ERR(def)) {
1161  pr_debug("ERROR: Unable to get map definition from '%s'\n",
1162  map_name);
1164  }
1165 
1166  for (i = 0; i < array->nr_ranges; i++) {
1167  unsigned int start = array->ranges[i].start;
1168  size_t length = array->ranges[i].length;
1169  unsigned int idx = start + length - 1;
1170 
1171  if (idx >= def->max_entries) {
1172  pr_debug("ERROR: index %d too large\n", idx);
1174  }
1175  }
1176  return 0;
1177 }
1178 
1179 static int
1180 bpf__obj_config_map(struct bpf_object *obj,
1181  struct parse_events_term *term,
1182  struct perf_evlist *evlist,
1183  int *key_scan_pos)
1184 {
1185  /* key is "map:<mapname>.<config opt>" */
1186  char *map_name = strdup(term->config + sizeof("map:") - 1);
1187  struct bpf_map *map;
1189  char *map_opt;
1190  size_t i;
1191 
1192  if (!map_name)
1193  return -ENOMEM;
1194 
1195  map_opt = strchr(map_name, '.');
1196  if (!map_opt) {
1197  pr_debug("ERROR: Invalid map config: %s\n", map_name);
1198  goto out;
1199  }
1200 
1201  *map_opt++ = '\0';
1202  if (*map_opt == '\0') {
1203  pr_debug("ERROR: Invalid map option: %s\n", term->config);
1204  goto out;
1205  }
1206 
1207  map = bpf_object__find_map_by_name(obj, map_name);
1208  if (!map) {
1209  pr_debug("ERROR: Map %s doesn't exist\n", map_name);
1211  goto out;
1212  }
1213 
1214  *key_scan_pos += strlen(map_opt);
1215  err = config_map_indices_range_check(term, map, map_name);
1216  if (err)
1217  goto out;
1218  *key_scan_pos -= strlen(map_opt);
1219 
1220  for (i = 0; i < ARRAY_SIZE(bpf_obj_config__map_funcs); i++) {
1221  struct bpf_obj_config__map_func *func =
1222  &bpf_obj_config__map_funcs[i];
1223 
1224  if (strcmp(map_opt, func->config_opt) == 0) {
1225  err = func->config_func(map, term, evlist);
1226  goto out;
1227  }
1228  }
1229 
1230  pr_debug("ERROR: Invalid map config option '%s'\n", map_opt);
1232 out:
1233  free(map_name);
1234  if (!err)
1235  key_scan_pos += strlen(map_opt);
1236  return err;
1237 }
1238 
1239 int bpf__config_obj(struct bpf_object *obj,
1240  struct parse_events_term *term,
1241  struct perf_evlist *evlist,
1242  int *error_pos)
1243 {
1244  int key_scan_pos = 0;
1245  int err;
1246 
1247  if (!obj || !term || !term->config)
1248  return -EINVAL;
1249 
1250  if (strstarts(term->config, "map:")) {
1251  key_scan_pos = sizeof("map:") - 1;
1252  err = bpf__obj_config_map(obj, term, evlist, &key_scan_pos);
1253  goto out;
1254  }
1256 out:
1257  if (error_pos)
1258  *error_pos = key_scan_pos;
1259  return err;
1260 
1261 }
1262 
1263 typedef int (*map_config_func_t)(const char *name, int map_fd,
1264  const struct bpf_map_def *pdef,
1265  struct bpf_map_op *op,
1266  void *pkey, void *arg);
1267 
1268 static int
1270  void *arg, const char *name,
1271  int map_fd, const struct bpf_map_def *pdef,
1272  struct bpf_map_op *op)
1273 {
1274  unsigned int i;
1275  int err;
1276 
1277  for (i = 0; i < pdef->max_entries; i++) {
1278  err = func(name, map_fd, pdef, op, &i, arg);
1279  if (err) {
1280  pr_debug("ERROR: failed to insert value to %s[%u]\n",
1281  name, i);
1282  return err;
1283  }
1284  }
1285  return 0;
1286 }
1287 
1288 static int
1290  const char *name, int map_fd,
1291  const struct bpf_map_def *pdef,
1292  struct bpf_map_op *op)
1293 {
1294  unsigned int i, j;
1295  int err;
1296 
1297  for (i = 0; i < op->k.array.nr_ranges; i++) {
1298  unsigned int start = op->k.array.ranges[i].start;
1299  size_t length = op->k.array.ranges[i].length;
1300 
1301  for (j = 0; j < length; j++) {
1302  unsigned int idx = start + j;
1303 
1304  err = func(name, map_fd, pdef, op, &idx, arg);
1305  if (err) {
1306  pr_debug("ERROR: failed to insert value to %s[%u]\n",
1307  name, idx);
1308  return err;
1309  }
1310  }
1311  }
1312  return 0;
1313 }
1314 
1315 static int
1318  void *arg)
1319 {
1320  int err, map_fd;
1321  struct bpf_map_op *op;
1322  const struct bpf_map_def *def;
1323  const char *name = bpf_map__name(map);
1324  struct bpf_map_priv *priv = bpf_map__priv(map);
1325 
1326  if (IS_ERR(priv)) {
1327  pr_debug("ERROR: failed to get private from map %s\n", name);
1329  }
1330  if (!priv || list_empty(&priv->ops_list)) {
1331  pr_debug("INFO: nothing to config for map %s\n", name);
1332  return 0;
1333  }
1334 
1335  def = bpf_map__def(map);
1336  if (IS_ERR(def)) {
1337  pr_debug("ERROR: failed to get definition from map %s\n", name);
1339  }
1340  map_fd = bpf_map__fd(map);
1341  if (map_fd < 0) {
1342  pr_debug("ERROR: failed to get fd from map %s\n", name);
1343  return map_fd;
1344  }
1345 
1346  list_for_each_entry(op, &priv->ops_list, list) {
1347  switch (def->type) {
1348  case BPF_MAP_TYPE_ARRAY:
1349  case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
1350  switch (op->key_type) {
1351  case BPF_MAP_KEY_ALL:
1352  err = foreach_key_array_all(func, arg, name,
1353  map_fd, def, op);
1354  break;
1355  case BPF_MAP_KEY_RANGES:
1356  err = foreach_key_array_ranges(func, arg, name,
1357  map_fd, def,
1358  op);
1359  break;
1360  default:
1361  pr_debug("ERROR: keytype for map '%s' invalid\n",
1362  name);
1364  }
1365  if (err)
1366  return err;
1367  break;
1368  default:
1369  pr_debug("ERROR: type of '%s' incorrect\n", name);
1371  }
1372  }
1373 
1374  return 0;
1375 }
1376 
1377 static int
1378 apply_config_value_for_key(int map_fd, void *pkey,
1379  size_t val_size, u64 val)
1380 {
1381  int err = 0;
1382 
1383  switch (val_size) {
1384  case 1: {
1385  u8 _val = (u8)(val);
1386  err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY);
1387  break;
1388  }
1389  case 2: {
1390  u16 _val = (u16)(val);
1391  err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY);
1392  break;
1393  }
1394  case 4: {
1395  u32 _val = (u32)(val);
1396  err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY);
1397  break;
1398  }
1399  case 8: {
1400  err = bpf_map_update_elem(map_fd, pkey, &val, BPF_ANY);
1401  break;
1402  }
1403  default:
1404  pr_debug("ERROR: invalid value size\n");
1406  }
1407  if (err && errno)
1408  err = -errno;
1409  return err;
1410 }
1411 
1412 static int
1413 apply_config_evsel_for_key(const char *name, int map_fd, void *pkey,
1414  struct perf_evsel *evsel)
1415 {
1416  struct xyarray *xy = evsel->fd;
1417  struct perf_event_attr *attr;
1418  unsigned int key, events;
1419  bool check_pass = false;
1420  int *evt_fd;
1421  int err;
1422 
1423  if (!xy) {
1424  pr_debug("ERROR: evsel not ready for map %s\n", name);
1426  }
1427 
1428  if (xy->row_size / xy->entry_size != 1) {
1429  pr_debug("ERROR: Dimension of target event is incorrect for map %s\n",
1430  name);
1432  }
1433 
1434  attr = &evsel->attr;
1435  if (attr->inherit) {
1436  pr_debug("ERROR: Can't put inherit event into map %s\n", name);
1438  }
1439 
1440  if (perf_evsel__is_bpf_output(evsel))
1441  check_pass = true;
1442  if (attr->type == PERF_TYPE_RAW)
1443  check_pass = true;
1444  if (attr->type == PERF_TYPE_HARDWARE)
1445  check_pass = true;
1446  if (!check_pass) {
1447  pr_debug("ERROR: Event type is wrong for map %s\n", name);
1449  }
1450 
1451  events = xy->entries / (xy->row_size / xy->entry_size);
1452  key = *((unsigned int *)pkey);
1453  if (key >= events) {
1454  pr_debug("ERROR: there is no event %d for map %s\n",
1455  key, name);
1457  }
1458  evt_fd = xyarray__entry(xy, key, 0);
1459  err = bpf_map_update_elem(map_fd, pkey, evt_fd, BPF_ANY);
1460  if (err && errno)
1461  err = -errno;
1462  return err;
1463 }
1464 
1465 static int
1466 apply_obj_config_map_for_key(const char *name, int map_fd,
1467  const struct bpf_map_def *pdef,
1468  struct bpf_map_op *op,
1469  void *pkey, void *arg __maybe_unused)
1470 {
1471  int err;
1472 
1473  switch (op->op_type) {
1474  case BPF_MAP_OP_SET_VALUE:
1475  err = apply_config_value_for_key(map_fd, pkey,
1476  pdef->value_size,
1477  op->v.value);
1478  break;
1479  case BPF_MAP_OP_SET_EVSEL:
1480  err = apply_config_evsel_for_key(name, map_fd, pkey,
1481  op->v.evsel);
1482  break;
1483  default:
1484  pr_debug("ERROR: unknown value type for '%s'\n", name);
1486  }
1487  return err;
1488 }
1489 
1490 static int
1491 apply_obj_config_map(struct bpf_map *map)
1492 {
1493  return bpf_map_config_foreach_key(map,
1495  NULL);
1496 }
1497 
1498 static int
1499 apply_obj_config_object(struct bpf_object *obj)
1500 {
1501  struct bpf_map *map;
1502  int err;
1503 
1504  bpf_map__for_each(map, obj) {
1505  err = apply_obj_config_map(map);
1506  if (err)
1507  return err;
1508  }
1509  return 0;
1510 }
1511 
1513 {
1514  struct bpf_object *obj, *tmp;
1515  int err;
1516 
1517  bpf_object__for_each_safe(obj, tmp) {
1518  err = apply_obj_config_object(obj);
1519  if (err)
1520  return err;
1521  }
1522 
1523  return 0;
1524 }
1525 
1526 #define bpf__for_each_map(pos, obj, objtmp) \
1527  bpf_object__for_each_safe(obj, objtmp) \
1528  bpf_map__for_each(pos, obj)
1529 
1530 #define bpf__for_each_stdout_map(pos, obj, objtmp) \
1531  bpf__for_each_map(pos, obj, objtmp) \
1532  if (bpf_map__name(pos) && \
1533  (strcmp("__bpf_stdout__", \
1534  bpf_map__name(pos)) == 0))
1535 
1536 int bpf__setup_stdout(struct perf_evlist *evlist)
1537 {
1538  struct bpf_map_priv *tmpl_priv = NULL;
1539  struct bpf_object *obj, *tmp;
1540  struct perf_evsel *evsel = NULL;
1541  struct bpf_map *map;
1542  int err;
1543  bool need_init = false;
1544 
1545  bpf__for_each_stdout_map(map, obj, tmp) {
1546  struct bpf_map_priv *priv = bpf_map__priv(map);
1547 
1548  if (IS_ERR(priv))
1550 
1551  /*
1552  * No need to check map type: type should have been
1553  * verified by kernel.
1554  */
1555  if (!need_init && !priv)
1556  need_init = !priv;
1557  if (!tmpl_priv && priv)
1558  tmpl_priv = priv;
1559  }
1560 
1561  if (!need_init)
1562  return 0;
1563 
1564  if (!tmpl_priv) {
1565  err = parse_events(evlist, "bpf-output/no-inherit=1,name=__bpf_stdout__/",
1566  NULL);
1567  if (err) {
1568  pr_debug("ERROR: failed to create bpf-output event\n");
1569  return -err;
1570  }
1571 
1572  evsel = perf_evlist__last(evlist);
1573  }
1574 
1575  bpf__for_each_stdout_map(map, obj, tmp) {
1576  struct bpf_map_priv *priv = bpf_map__priv(map);
1577 
1578  if (IS_ERR(priv))
1580  if (priv)
1581  continue;
1582 
1583  if (tmpl_priv) {
1584  priv = bpf_map_priv__clone(tmpl_priv);
1585  if (!priv)
1586  return -ENOMEM;
1587 
1588  err = bpf_map__set_priv(map, priv, bpf_map_priv__clear);
1589  if (err) {
1590  bpf_map_priv__clear(map, priv);
1591  return err;
1592  }
1593  } else if (evsel) {
1594  struct bpf_map_op *op;
1595 
1596  op = bpf_map__add_newop(map, NULL);
1597  if (IS_ERR(op))
1598  return PTR_ERR(op);
1600  op->v.evsel = evsel;
1601  }
1602  }
1603 
1604  return 0;
1605 }
1606 
1607 #define ERRNO_OFFSET(e) ((e) - __BPF_LOADER_ERRNO__START)
1608 #define ERRCODE_OFFSET(c) ERRNO_OFFSET(BPF_LOADER_ERRNO__##c)
1609 #define NR_ERRNO (__BPF_LOADER_ERRNO__END - __BPF_LOADER_ERRNO__START)
1610 
1611 static const char *bpf_loader_strerror_table[NR_ERRNO] = {
1612  [ERRCODE_OFFSET(CONFIG)] = "Invalid config string",
1613  [ERRCODE_OFFSET(GROUP)] = "Invalid group name",
1614  [ERRCODE_OFFSET(EVENTNAME)] = "No event name found in config string",
1615  [ERRCODE_OFFSET(INTERNAL)] = "BPF loader internal error",
1616  [ERRCODE_OFFSET(COMPILE)] = "Error when compiling BPF scriptlet",
1617  [ERRCODE_OFFSET(PROGCONF_TERM)] = "Invalid program config term in config string",
1618  [ERRCODE_OFFSET(PROLOGUE)] = "Failed to generate prologue",
1619  [ERRCODE_OFFSET(PROLOGUE2BIG)] = "Prologue too big for program",
1620  [ERRCODE_OFFSET(PROLOGUEOOB)] = "Offset out of bound for prologue",
1621  [ERRCODE_OFFSET(OBJCONF_OPT)] = "Invalid object config option",
1622  [ERRCODE_OFFSET(OBJCONF_CONF)] = "Config value not set (missing '=')",
1623  [ERRCODE_OFFSET(OBJCONF_MAP_OPT)] = "Invalid object map config option",
1624  [ERRCODE_OFFSET(OBJCONF_MAP_NOTEXIST)] = "Target map doesn't exist",
1625  [ERRCODE_OFFSET(OBJCONF_MAP_VALUE)] = "Incorrect value type for map",
1626  [ERRCODE_OFFSET(OBJCONF_MAP_TYPE)] = "Incorrect map type",
1627  [ERRCODE_OFFSET(OBJCONF_MAP_KEYSIZE)] = "Incorrect map key size",
1628  [ERRCODE_OFFSET(OBJCONF_MAP_VALUESIZE)] = "Incorrect map value size",
1629  [ERRCODE_OFFSET(OBJCONF_MAP_NOEVT)] = "Event not found for map setting",
1630  [ERRCODE_OFFSET(OBJCONF_MAP_MAPSIZE)] = "Invalid map size for event setting",
1631  [ERRCODE_OFFSET(OBJCONF_MAP_EVTDIM)] = "Event dimension too large",
1632  [ERRCODE_OFFSET(OBJCONF_MAP_EVTINH)] = "Doesn't support inherit event",
1633  [ERRCODE_OFFSET(OBJCONF_MAP_EVTTYPE)] = "Wrong event type for map",
1634  [ERRCODE_OFFSET(OBJCONF_MAP_IDX2BIG)] = "Index too large",
1635 };
1636 
1637 static int
1638 bpf_loader_strerror(int err, char *buf, size_t size)
1639 {
1640  char sbuf[STRERR_BUFSIZE];
1641  const char *msg;
1642 
1643  if (!buf || !size)
1644  return -1;
1645 
1646  err = err > 0 ? err : -err;
1647 
1648  if (err >= __LIBBPF_ERRNO__START)
1649  return libbpf_strerror(err, buf, size);
1650 
1651  if (err >= __BPF_LOADER_ERRNO__START && err < __BPF_LOADER_ERRNO__END) {
1653  snprintf(buf, size, "%s", msg);
1654  buf[size - 1] = '\0';
1655  return 0;
1656  }
1657 
1658  if (err >= __BPF_LOADER_ERRNO__END)
1659  snprintf(buf, size, "Unknown bpf loader error %d", err);
1660  else
1661  snprintf(buf, size, "%s",
1662  str_error_r(err, sbuf, sizeof(sbuf)));
1663 
1664  buf[size - 1] = '\0';
1665  return -1;
1666 }
1667 
1668 #define bpf__strerror_head(err, buf, size) \
1669  char sbuf[STRERR_BUFSIZE], *emsg;\
1670  if (!size)\
1671  return 0;\
1672  if (err < 0)\
1673  err = -err;\
1674  bpf_loader_strerror(err, sbuf, sizeof(sbuf));\
1675  emsg = sbuf;\
1676  switch (err) {\
1677  default:\
1678  scnprintf(buf, size, "%s", emsg);\
1679  break;
1680 
1681 #define bpf__strerror_entry(val, fmt...)\
1682  case val: {\
1683  scnprintf(buf, size, fmt);\
1684  break;\
1685  }
1686 
1687 #define bpf__strerror_end(buf, size)\
1688  }\
1689  buf[size - 1] = '\0';
1690 
1692  int err, char *buf, size_t size)
1693 {
1694  size_t n;
1695  int ret;
1696 
1697  n = snprintf(buf, size, "Failed to load %s%s: ",
1698  filename, source ? " from source" : "");
1699  if (n >= size) {
1700  buf[size - 1] = '\0';
1701  return 0;
1702  }
1703  buf += n;
1704  size -= n;
1705 
1706  ret = bpf_loader_strerror(err, buf, size);
1707  buf[size - 1] = '\0';
1708  return ret;
1709 }
1710 
1711 int bpf__strerror_probe(struct bpf_object *obj __maybe_unused,
1712  int err, char *buf, size_t size)
1713 {
1714  bpf__strerror_head(err, buf, size);
1716  scnprintf(buf, size, "%s (add -v to see detail)", emsg);
1717  break;
1718  }
1719  bpf__strerror_entry(EEXIST, "Probe point exist. Try 'perf probe -d \"*\"' and set 'force=yes'");
1720  bpf__strerror_entry(EACCES, "You need to be root");
1721  bpf__strerror_entry(EPERM, "You need to be root, and /proc/sys/kernel/kptr_restrict should be 0");
1722  bpf__strerror_entry(ENOENT, "You need to check probing points in BPF file");
1723  bpf__strerror_end(buf, size);
1724  return 0;
1725 }
1726 
1727 int bpf__strerror_load(struct bpf_object *obj,
1728  int err, char *buf, size_t size)
1729 {
1730  bpf__strerror_head(err, buf, size);
1731  case LIBBPF_ERRNO__KVER: {
1732  unsigned int obj_kver = bpf_object__kversion(obj);
1733  unsigned int real_kver;
1734 
1735  if (fetch_kernel_version(&real_kver, NULL, 0)) {
1736  scnprintf(buf, size, "Unable to fetch kernel version");
1737  break;
1738  }
1739 
1740  if (obj_kver != real_kver) {
1741  scnprintf(buf, size,
1742  "'version' ("KVER_FMT") doesn't match running kernel ("KVER_FMT")",
1743  KVER_PARAM(obj_kver),
1744  KVER_PARAM(real_kver));
1745  break;
1746  }
1747 
1748  scnprintf(buf, size, "Failed to load program for unknown reason");
1749  break;
1750  }
1751  bpf__strerror_end(buf, size);
1752  return 0;
1753 }
1754 
1755 int bpf__strerror_config_obj(struct bpf_object *obj __maybe_unused,
1756  struct parse_events_term *term __maybe_unused,
1757  struct perf_evlist *evlist __maybe_unused,
1758  int *error_pos __maybe_unused, int err,
1759  char *buf, size_t size)
1760 {
1761  bpf__strerror_head(err, buf, size);
1763  "Can't use this config term with this map type");
1764  bpf__strerror_end(buf, size);
1765  return 0;
1766 }
1767 
1768 int bpf__strerror_apply_obj_config(int err, char *buf, size_t size)
1769 {
1770  bpf__strerror_head(err, buf, size);
1772  "Cannot set event to BPF map in multi-thread tracing");
1774  "%s (Hint: use -i to turn off inherit)", emsg);
1776  "Can only put raw, hardware and BPF output event into a BPF map");
1777  bpf__strerror_end(buf, size);
1778  return 0;
1779 }
1780 
1781 int bpf__strerror_setup_stdout(struct perf_evlist *evlist __maybe_unused,
1782  int err, char *buf, size_t size)
1783 {
1784  bpf__strerror_head(err, buf, size);
1785  bpf__strerror_end(buf, size);
1786  return 0;
1787 }
void parse_events__clear_array(struct parse_events_array *a)
bool dump_obj
Definition: llvm-utils.h:37
int fetch_kernel_version(unsigned int *puint, char *str, size_t str_size)
Definition: util.c:447
#define ERRNO_OFFSET(e)
Definition: bpf-loader.c:1607
static const char * parse_prog_config_kvpair(const char *config_str, struct perf_probe_event *pev)
Definition: bpf-loader.c:251
bpf_map_key_type
Definition: bpf-loader.c:816
int value
Definition: python.c:1143
int del_perf_probe_events(struct strfilter *filter)
Definition: probe-event.c:3429
struct perf_probe_event events[MAX_PROBES]
Definition: builtin-probe.c:57
int bpf__strerror_setup_stdout(struct perf_evlist *evlist __maybe_unused, int err, char *buf, size_t size)
Definition: bpf-loader.c:1781
struct strfilter * strfilter__new(const char *rules, const char **err)
Definition: strfilter.c:159
bpf_map_op_type
Definition: bpf-loader.c:811
static bool libbpf_initialized
Definition: bpf-loader.c:54
static int bpf_loader_strerror(int err, char *buf, size_t size)
Definition: bpf-loader.c:1638
static int bpf__prepare_probe(void)
Definition: bpf-loader.c:405
size_t size
Definition: evsel.c:60
static int parse_prog_config(const char *config_str, const char **p_main_str, bool *is_tp, struct perf_probe_event *pev)
Definition: bpf-loader.c:292
static void bpf_map_priv__purge(struct bpf_map_priv *priv)
Definition: bpf-loader.c:849
size_t row_size
Definition: xyarray.h:8
Definition: xyarray.h:7
unsigned int value_size
int bpf__strerror_probe(struct bpf_object *obj __maybe_unused, int err, char *buf, size_t size)
Definition: bpf-loader.c:1711
static void * xyarray__entry(struct xyarray *xy, int x, int y)
Definition: xyarray.h:20
int init_probe_symbol_maps(bool user_only)
Definition: probe-event.c:77
const char * filename
Definition: hists_common.c:26
static bool perf_evsel__is_bpf_output(struct perf_evsel *evsel)
Definition: evsel.h:403
int bpf__config_obj(struct bpf_object *obj, struct parse_events_term *term, struct perf_evlist *evlist, int *error_pos)
Definition: bpf-loader.c:1239
static struct bpf_map_priv * bpf_map_priv__clone(struct bpf_map_priv *priv)
Definition: bpf-loader.c:940
static int prog_config__exec(const char *value, struct perf_probe_event *pev)
Definition: bpf-loader.c:145
int int err
Definition: 5sec.c:44
void perf_clang__cleanup(void)
Definition: clang.cpp:171
#define ERRCODE_OFFSET(c)
Definition: bpf-loader.c:1608
int bpf__probe(struct bpf_object *obj)
Definition: bpf-loader.c:643
struct parse_events_array array
Definition: bpf-loader.c:826
static int foreach_key_array_all(map_config_func_t func, void *arg, const char *name, int map_fd, const struct bpf_map_def *pdef, struct bpf_map_op *op)
Definition: bpf-loader.c:1269
enum bpf_map_op_type op_type
Definition: bpf-loader.c:823
#define bpf__strerror_end(buf, size)
Definition: bpf-loader.c:1687
struct bpf_obj_config__map_func bpf_obj_config__map_funcs[]
Definition: bpf-loader.c:1137
void cleanup_perf_probe_events(struct perf_probe_event *pevs, int npevs)
Definition: probe-event.c:3394
static struct bpf_map_op * bpf_map_op__new(struct parse_events_term *term)
Definition: bpf-loader.c:892
const char * desc
Definition: bpf-loader.c:198
void bpf__clear(void)
Definition: bpf-loader.c:120
static int bpf_map__config_event(struct bpf_map *map, struct parse_events_term *term, struct perf_evlist *evlist)
Definition: bpf-loader.c:1114
#define KVER_FMT
Definition: util.h:55
static int term(yyscan_t scanner, int type)
#define KVER_PARAM(x)
Definition: util.h:56
static int map_prologue(struct perf_probe_event *pev, int *mapping, int *nr_types)
Definition: bpf-loader.c:539
int bpf__strerror_config_obj(struct bpf_object *obj __maybe_unused, struct parse_events_term *term __maybe_unused, struct perf_evlist *evlist __maybe_unused, int *error_pos __maybe_unused, int err, char *buf, size_t size)
Definition: bpf-loader.c:1755
#define NR_ERRNO
Definition: bpf-loader.c:1609
bool force_add
Definition: probe-event.h:14
char * sys_name
Definition: bpf-loader.c:45
x86 movsq based memset() in arch/x86/lib/memset_64.S") MEMSET_FN(memset_erms
int apply_perf_probe_events(struct perf_probe_event *pevs, int npevs)
Definition: probe-event.c:3379
int bpf__strerror_load(struct bpf_object *obj, int err, char *buf, size_t size)
Definition: bpf-loader.c:1727
union parse_events_term::@132 val
#define DEFINE_PRINT_FN(name, level)
Definition: bpf-loader.c:27
int bpf__strerror_apply_obj_config(int err, char *buf, size_t size)
Definition: bpf-loader.c:1768
struct probe_trace_event * tevs
Definition: probe-event.h:95
static int apply_obj_config_map_for_key(const char *name, int map_fd, const struct bpf_map_def *pdef, struct bpf_map_op *op, void *pkey, void *arg __maybe_unused)
Definition: bpf-loader.c:1466
int parse_events(struct perf_evlist *evlist, const char *str, struct parse_events_error *err)
struct parse_events_array array
Definition: parse-events.h:90
unsigned int start
Definition: parse-events.h:83
union bpf_map_op::@44 k
static int prog_config__force(const char *value, struct perf_probe_event *pev __maybe_unused)
Definition: bpf-loader.c:189
void * malloc(YYSIZE_T)
const char * key
Definition: bpf-loader.c:196
#define EVENTS_WRITE_BUFSIZE
Definition: bpf-loader.c:703
enum bpf_map_key_type key_type
Definition: bpf-loader.c:824
#define BPF_MAP_TYPE_ARRAY
static int config_map_indices_range_check(struct parse_events_term *term, struct bpf_map *map, const char *map_name)
Definition: bpf-loader.c:1143
struct probe_trace_arg_ref * next
Definition: probe-event.h:36
struct bpf_object * bpf__prepare_load(const char *filename, bool source)
Definition: bpf-loader.c:77
const char * name
void strfilter__delete(struct strfilter *filter)
Definition: strfilter.c:28
#define MAX_PROBES
Definition: probe-finder.h:11
static int hook_load_preprocessor(struct bpf_program *prog)
Definition: bpf-loader.c:583
bool no_inlines
Definition: probe-event.h:15
static void bpf_map_priv__clear(struct bpf_map *map __maybe_unused, void *_priv)
Definition: bpf-loader.c:860
char * evt_name
Definition: bpf-loader.c:46
#define pr_debug(fmt,...)
Definition: json.h:27
int bpf__foreach_event(struct bpf_object *obj, bpf_prog_iter_callback_t func, void *arg)
Definition: bpf-loader.c:756
static int apply_obj_config_object(struct bpf_object *obj)
Definition: bpf-loader.c:1499
static void bpf_map_op__delete(struct bpf_map_op *op)
Definition: bpf-loader.c:839
int convert_perf_probe_events(struct perf_probe_event *pevs, int npevs)
Definition: probe-event.c:3312
union bpf_map_op::@45 v
char * prog
Definition: jevents.c:54
void clear_perf_probe_event(struct perf_probe_event *pev)
Definition: probe-event.c:2183
static int bpf_map__add_op(struct bpf_map *map, struct bpf_map_op *op)
Definition: bpf-loader.c:965
static void clear_prog_priv(struct bpf_program *prog __maybe_unused, void *_priv)
Definition: bpf-loader.c:131
static int config_bpf_program(struct bpf_program *prog)
Definition: bpf-loader.c:328
#define bpf__strerror_head(err, buf, size)
Definition: bpf-loader.c:1668
struct perf_evsel * evsel
Definition: bpf-loader.c:830
int(* bpf_prog_iter_callback_t)(const char *group, const char *event, int fd, void *arg)
Definition: bpf-loader.h:50
static struct bpf_map_op * bpf_map__add_newop(struct bpf_map *map, struct parse_events_term *term)
Definition: bpf-loader.c:994
int bpf__load(struct bpf_object *obj)
Definition: bpf-loader.c:744
size_t entry_size
Definition: xyarray.h:9
#define bpf__for_each_stdout_map(pos, obj, objtmp)
Definition: bpf-loader.c:1530
static int apply_obj_config_map(struct bpf_map *map)
Definition: bpf-loader.c:1491
bool need_prologue
Definition: bpf-loader.c:48
struct bpf_object * bpf__prepare_load_buffer(void *obj_buf, size_t obj_buf_sz, const char *name)
Definition: bpf-loader.c:57
int bpf__strerror_prepare_load(const char *filename, bool source, int err, char *buf, size_t size)
Definition: bpf-loader.c:1691
static int prog_config__module(const char *value, struct perf_probe_event *pev)
Definition: bpf-loader.c:155
int bpf__apply_obj_config(void)
Definition: bpf-loader.c:1512
#define PERF_BPF_PROBE_GROUP
Definition: bpf-loader.h:48
struct probe_trace_arg * args
Definition: probe-event.h:55
static struct bpf_map_op * bpf_map_op__clone(struct bpf_map_op *op)
Definition: bpf-loader.c:913
static int __bpf_map__config_event(struct bpf_map *map, struct parse_events_term *term, struct perf_evlist *evlist)
Definition: bpf-loader.c:1072
const char * source
Definition: llvm.c:32
static int do_prog_config(const char *key, const char *value, struct perf_probe_event *pev)
Definition: bpf-loader.c:228
size_t entries
Definition: xyarray.h:10
struct bpf_insn * insns_buf
Definition: bpf-loader.c:49
x86 movsq based memcpy() in arch/x86/lib/memcpy_64.S") MEMCPY_FN(memcpy_erms
static int bpf_map_op_setkey(struct bpf_map_op *op, struct parse_events_term *term)
Definition: bpf-loader.c:870
int bpf__gen_prologue(struct probe_trace_arg *args, int nargs, struct bpf_insn *new_prog, size_t *new_cnt, size_t cnt_space)
Definition: bpf-prologue.c:358
int bpf__unprobe(struct bpf_object *obj)
Definition: bpf-loader.c:704
int(* config_func)(struct bpf_map *, struct parse_events_term *, struct perf_evlist *)
Definition: bpf-loader.c:1133
#define array
#define zfree(ptr)
Definition: util.h:25
static int compare_tev_args(const void *ptev1, const void *ptev2)
Definition: bpf-loader.c:492
int(* map_config_func_t)(const char *name, int map_fd, const struct bpf_map_def *pdef, struct bpf_map_op *op, void *pkey, void *arg)
Definition: bpf-loader.c:1263
static int prog_config__bool(const char *value, bool *pbool, bool invert)
Definition: bpf-loader.c:165
unsigned int max_entries
static struct @43 bpf_prog_config_terms[]
int bpf__setup_stdout(struct perf_evlist *evlist)
Definition: bpf-loader.c:1536
static void *(* bpf_map_update_elem)(void *map, void *key, void *value, int flags)
u64 start
Definition: hists_common.c:25
int parse_perf_probe_command(const char *cmd, struct perf_probe_event *pev)
Definition: probe-event.c:1656
int(* func)(const char *, struct perf_probe_event *)
Definition: bpf-loader.c:199
int * type_mapping
Definition: bpf-loader.c:51
static int prog_config__inlines(const char *value, struct perf_probe_event *pev __maybe_unused)
Definition: bpf-loader.c:182
Definition: jevents.c:228
static const char * bpf_loader_strerror_table[NR_ERRNO]
Definition: bpf-loader.c:1611
int llvm__compile_bpf(const char *path, void **p_obj_buf, size_t *p_obj_buf_sz)
Definition: llvm-utils.c:424
static int bpf_map_config_foreach_key(struct bpf_map *map, map_config_func_t func, void *arg)
Definition: bpf-loader.c:1316
static int __bpf_map__config_value(struct bpf_map *map, struct parse_events_term *term)
Definition: bpf-loader.c:1012
#define bpf__strerror_entry(val, fmt...)
Definition: bpf-loader.c:1681
unsigned int type
static int bpf_map__config_value(struct bpf_map *map, struct parse_events_term *term, struct perf_evlist *evlist __maybe_unused)
Definition: bpf-loader.c:1054
#define STRERR_BUFSIZE
Definition: debug.h:43
void free(void *)
struct perf_probe_event pev
Definition: bpf-loader.c:47
const char * usage
Definition: bpf-loader.c:197
struct list_head list
Definition: bpf-loader.c:822
int max_probes
Definition: probe-event.h:17
Definition: attr.py:1
static struct perf_evsel * perf_evlist__last(struct perf_evlist *evlist)
Definition: evlist.h:220
struct list_head ops_list
Definition: bpf-loader.c:835
void llvm__dump_obj(const char *path, void *obj_buf, size_t size)
Definition: llvm-utils.c:389
struct xyarray * fd
Definition: evsel.h:95
static int preproc_gen_prologue(struct bpf_program *prog, int n, struct bpf_insn *orig_insns, int orig_insns_cnt, struct bpf_prog_prep_result *res)
Definition: bpf-loader.c:427
static int foreach_key_array_ranges(map_config_func_t func, void *arg, const char *name, int map_fd, const struct bpf_map_def *pdef, struct bpf_map_op *op)
Definition: bpf-loader.c:1289
#define pr_warning(fmt,...)
Definition: debug.h:25
int perf_clang__compile_bpf(const char *filename, void **p_obj_buf, size_t *p_obj_buf_sz)
Definition: clang.cpp:177
#define BPF_ANY
static int bpf__obj_config_map(struct bpf_object *obj, struct parse_events_term *term, struct perf_evlist *evlist, int *key_scan_pos)
Definition: bpf-loader.c:1180
static int apply_config_value_for_key(int map_fd, void *pkey, size_t val_size, u64 val)
Definition: bpf-loader.c:1378
struct perf_evsel * perf_evlist__find_evsel_by_str(struct perf_evlist *evlist, const char *str)
Definition: evlist.c:1714
struct perf_event_attr attr
Definition: evsel.h:93
struct parse_events_array::@131 * ranges
void perf_clang__init(void)
Definition: clang.cpp:162
void static void * zalloc(size_t size)
Definition: util.h:20
static int apply_config_evsel_for_key(const char *name, int map_fd, void *pkey, struct perf_evsel *evsel)
Definition: bpf-loader.c:1413
unsigned int key_size
struct probe_trace_arg_ref * ref
Definition: probe-event.h:45