Linux Perf
machine.c
Go to the documentation of this file.
1 // SPDX-License-Identifier: GPL-2.0
2 #include <dirent.h>
3 #include <errno.h>
4 #include <inttypes.h>
5 #include <regex.h>
6 #include "callchain.h"
7 #include "debug.h"
8 #include "event.h"
9 #include "evsel.h"
10 #include "hist.h"
11 #include "machine.h"
12 #include "map.h"
13 #include "sort.h"
14 #include "strlist.h"
15 #include "thread.h"
16 #include "vdso.h"
17 #include <stdbool.h>
18 #include <sys/types.h>
19 #include <sys/stat.h>
20 #include <unistd.h>
21 #include "unwind.h"
22 #include "linux/hash.h"
23 #include "asm/bug.h"
24 
25 #include "sane_ctype.h"
26 #include <symbol/kallsyms.h>
27 #include <linux/mman.h>
28 
29 static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock);
30 
31 static void dsos__init(struct dsos *dsos)
32 {
33  INIT_LIST_HEAD(&dsos->head);
34  dsos->root = RB_ROOT;
35  init_rwsem(&dsos->lock);
36 }
37 
39 {
40  int i;
41 
42  for (i = 0; i < THREADS__TABLE_SIZE; i++) {
43  struct threads *threads = &machine->threads[i];
44  threads->entries = RB_ROOT;
45  init_rwsem(&threads->lock);
46  threads->nr = 0;
47  INIT_LIST_HEAD(&threads->dead);
48  threads->last_match = NULL;
49  }
50 }
51 
53 {
54  if (machine__is_host(machine))
55  machine->mmap_name = strdup("[kernel.kallsyms]");
56  else if (machine__is_default_guest(machine))
57  machine->mmap_name = strdup("[guest.kernel.kallsyms]");
58  else if (asprintf(&machine->mmap_name, "[guest.kernel.kallsyms.%d]",
59  machine->pid) < 0)
60  machine->mmap_name = NULL;
61 
62  return machine->mmap_name ? 0 : -ENOMEM;
63 }
64 
65 int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
66 {
67  int err = -ENOMEM;
68 
69  memset(machine, 0, sizeof(*machine));
70  map_groups__init(&machine->kmaps, machine);
71  RB_CLEAR_NODE(&machine->rb_node);
72  dsos__init(&machine->dsos);
73 
74  machine__threads_init(machine);
75 
76  machine->vdso_info = NULL;
77  machine->env = NULL;
78 
79  machine->pid = pid;
80 
81  machine->id_hdr_size = 0;
82  machine->kptr_restrict_warned = false;
83  machine->comm_exec = false;
84  machine->kernel_start = 0;
85  machine->vmlinux_map = NULL;
86 
87  machine->root_dir = strdup(root_dir);
88  if (machine->root_dir == NULL)
89  return -ENOMEM;
90 
91  if (machine__set_mmap_name(machine))
92  goto out;
93 
94  if (pid != HOST_KERNEL_ID) {
95  struct thread *thread = machine__findnew_thread(machine, -1,
96  pid);
97  char comm[64];
98 
99  if (thread == NULL)
100  goto out;
101 
102  snprintf(comm, sizeof(comm), "[guest/%d]", pid);
103  thread__set_comm(thread, comm, 0);
104  thread__put(thread);
105  }
106 
107  machine->current_tid = NULL;
108  err = 0;
109 
110 out:
111  if (err) {
112  zfree(&machine->root_dir);
113  zfree(&machine->mmap_name);
114  }
115  return 0;
116 }
117 
119 {
120  struct machine *machine = malloc(sizeof(*machine));
121 
122  if (machine != NULL) {
123  machine__init(machine, "", HOST_KERNEL_ID);
124 
125  if (machine__create_kernel_maps(machine) < 0)
126  goto out_delete;
127  }
128 
129  return machine;
130 out_delete:
131  free(machine);
132  return NULL;
133 }
134 
136 {
137  struct machine *machine = machine__new_host();
138  /*
139  * FIXME:
140  * 1) We should switch to machine__load_kallsyms(), i.e. not explicitely
141  * ask for not using the kcore parsing code, once this one is fixed
142  * to create a map per module.
143  */
144  if (machine && machine__load_kallsyms(machine, "/proc/kallsyms") <= 0) {
145  machine__delete(machine);
146  machine = NULL;
147  }
148 
149  return machine;
150 }
151 
152 static void dsos__purge(struct dsos *dsos)
153 {
154  struct dso *pos, *n;
155 
156  down_write(&dsos->lock);
157 
158  list_for_each_entry_safe(pos, n, &dsos->head, node) {
159  RB_CLEAR_NODE(&pos->rb_node);
160  pos->root = NULL;
161  list_del_init(&pos->node);
162  dso__put(pos);
163  }
164 
165  up_write(&dsos->lock);
166 }
167 
168 static void dsos__exit(struct dsos *dsos)
169 {
170  dsos__purge(dsos);
171  exit_rwsem(&dsos->lock);
172 }
173 
175 {
176  struct rb_node *nd;
177  int i;
178 
179  for (i = 0; i < THREADS__TABLE_SIZE; i++) {
180  struct threads *threads = &machine->threads[i];
181  down_write(&threads->lock);
182  nd = rb_first(&threads->entries);
183  while (nd) {
184  struct thread *t = rb_entry(nd, struct thread, rb_node);
185 
186  nd = rb_next(nd);
187  __machine__remove_thread(machine, t, false);
188  }
189  up_write(&threads->lock);
190  }
191 }
192 
194 {
195  int i;
196 
197  if (machine == NULL)
198  return;
199 
201  map_groups__exit(&machine->kmaps);
202  dsos__exit(&machine->dsos);
203  machine__exit_vdso(machine);
204  zfree(&machine->root_dir);
205  zfree(&machine->mmap_name);
206  zfree(&machine->current_tid);
207 
208  for (i = 0; i < THREADS__TABLE_SIZE; i++) {
209  struct threads *threads = &machine->threads[i];
210  exit_rwsem(&threads->lock);
211  }
212 }
213 
215 {
216  if (machine) {
217  machine__exit(machine);
218  free(machine);
219  }
220 }
221 
223 {
224  machine__init(&machines->host, "", HOST_KERNEL_ID);
225  machines->guests = RB_ROOT;
226 }
227 
229 {
230  machine__exit(&machines->host);
231  /* XXX exit guest */
232 }
233 
234 struct machine *machines__add(struct machines *machines, pid_t pid,
235  const char *root_dir)
236 {
237  struct rb_node **p = &machines->guests.rb_node;
238  struct rb_node *parent = NULL;
239  struct machine *pos, *machine = malloc(sizeof(*machine));
240 
241  if (machine == NULL)
242  return NULL;
243 
244  if (machine__init(machine, root_dir, pid) != 0) {
245  free(machine);
246  return NULL;
247  }
248 
249  while (*p != NULL) {
250  parent = *p;
251  pos = rb_entry(parent, struct machine, rb_node);
252  if (pid < pos->pid)
253  p = &(*p)->rb_left;
254  else
255  p = &(*p)->rb_right;
256  }
257 
258  rb_link_node(&machine->rb_node, parent, p);
259  rb_insert_color(&machine->rb_node, &machines->guests);
260 
261  return machine;
262 }
263 
265 {
266  struct rb_node *nd;
267 
268  machines->host.comm_exec = comm_exec;
269 
270  for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
271  struct machine *machine = rb_entry(nd, struct machine, rb_node);
272 
273  machine->comm_exec = comm_exec;
274  }
275 }
276 
277 struct machine *machines__find(struct machines *machines, pid_t pid)
278 {
279  struct rb_node **p = &machines->guests.rb_node;
280  struct rb_node *parent = NULL;
281  struct machine *machine;
282  struct machine *default_machine = NULL;
283 
284  if (pid == HOST_KERNEL_ID)
285  return &machines->host;
286 
287  while (*p != NULL) {
288  parent = *p;
289  machine = rb_entry(parent, struct machine, rb_node);
290  if (pid < machine->pid)
291  p = &(*p)->rb_left;
292  else if (pid > machine->pid)
293  p = &(*p)->rb_right;
294  else
295  return machine;
296  if (!machine->pid)
297  default_machine = machine;
298  }
299 
300  return default_machine;
301 }
302 
304 {
305  char path[PATH_MAX];
306  const char *root_dir = "";
307  struct machine *machine = machines__find(machines, pid);
308 
309  if (machine && (machine->pid == pid))
310  goto out;
311 
312  if ((pid != HOST_KERNEL_ID) &&
313  (pid != DEFAULT_GUEST_KERNEL_ID) &&
315  sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
316  if (access(path, R_OK)) {
317  static struct strlist *seen;
318 
319  if (!seen)
320  seen = strlist__new(NULL, NULL);
321 
322  if (!strlist__has_entry(seen, path)) {
323  pr_err("Can't access file %s\n", path);
324  strlist__add(seen, path);
325  }
326  machine = NULL;
327  goto out;
328  }
329  root_dir = path;
330  }
331 
332  machine = machines__add(machines, pid, root_dir);
333 out:
334  return machine;
335 }
336 
338  machine__process_t process, void *data)
339 {
340  struct rb_node *nd;
341 
342  for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
343  struct machine *pos = rb_entry(nd, struct machine, rb_node);
344  process(pos, data);
345  }
346 }
347 
349 {
350  struct rb_node *node;
351  struct machine *machine;
352 
353  machines->host.id_hdr_size = id_hdr_size;
354 
355  for (node = rb_first(&machines->guests); node; node = rb_next(node)) {
356  machine = rb_entry(node, struct machine, rb_node);
357  machine->id_hdr_size = id_hdr_size;
358  }
359 
360  return;
361 }
362 
364  struct thread *th, pid_t pid)
365 {
366  struct thread *leader;
367 
368  if (pid == th->pid_ || pid == -1 || th->pid_ != -1)
369  return;
370 
371  th->pid_ = pid;
372 
373  if (th->pid_ == th->tid)
374  return;
375 
376  leader = __machine__findnew_thread(machine, th->pid_, th->pid_);
377  if (!leader)
378  goto out_err;
379 
380  if (!leader->mg)
381  leader->mg = map_groups__new(machine);
382 
383  if (!leader->mg)
384  goto out_err;
385 
386  if (th->mg == leader->mg)
387  return;
388 
389  if (th->mg) {
390  /*
391  * Maps are created from MMAP events which provide the pid and
392  * tid. Consequently there never should be any maps on a thread
393  * with an unknown pid. Just print an error if there are.
394  */
395  if (!map_groups__empty(th->mg))
396  pr_err("Discarding thread maps for %d:%d\n",
397  th->pid_, th->tid);
398  map_groups__put(th->mg);
399  }
400 
401  th->mg = map_groups__get(leader->mg);
402 out_put:
403  thread__put(leader);
404  return;
405 out_err:
406  pr_err("Failed to join map groups for %d:%d\n", th->pid_, th->tid);
407  goto out_put;
408 }
409 
410 /*
411  * Caller must eventually drop thread->refcnt returned with a successful
412  * lookup/new thread inserted.
413  */
415  struct threads *threads,
416  pid_t pid, pid_t tid,
417  bool create)
418 {
419  struct rb_node **p = &threads->entries.rb_node;
420  struct rb_node *parent = NULL;
421  struct thread *th;
422 
423  /*
424  * Front-end cache - TID lookups come in blocks,
425  * so most of the time we dont have to look up
426  * the full rbtree:
427  */
428  th = threads->last_match;
429  if (th != NULL) {
430  if (th->tid == tid) {
431  machine__update_thread_pid(machine, th, pid);
432  return thread__get(th);
433  }
434 
435  threads->last_match = NULL;
436  }
437 
438  while (*p != NULL) {
439  parent = *p;
440  th = rb_entry(parent, struct thread, rb_node);
441 
442  if (th->tid == tid) {
443  threads->last_match = th;
444  machine__update_thread_pid(machine, th, pid);
445  return thread__get(th);
446  }
447 
448  if (tid < th->tid)
449  p = &(*p)->rb_left;
450  else
451  p = &(*p)->rb_right;
452  }
453 
454  if (!create)
455  return NULL;
456 
457  th = thread__new(pid, tid);
458  if (th != NULL) {
459  rb_link_node(&th->rb_node, parent, p);
460  rb_insert_color(&th->rb_node, &threads->entries);
461 
462  /*
463  * We have to initialize map_groups separately
464  * after rb tree is updated.
465  *
466  * The reason is that we call machine__findnew_thread
467  * within thread__init_map_groups to find the thread
468  * leader and that would screwed the rb tree.
469  */
470  if (thread__init_map_groups(th, machine)) {
471  rb_erase_init(&th->rb_node, &threads->entries);
472  RB_CLEAR_NODE(&th->rb_node);
473  thread__put(th);
474  return NULL;
475  }
476  /*
477  * It is now in the rbtree, get a ref
478  */
479  thread__get(th);
480  threads->last_match = th;
481  ++threads->nr;
482  }
483 
484  return th;
485 }
486 
487 struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid)
488 {
489  return ____machine__findnew_thread(machine, machine__threads(machine, tid), pid, tid, true);
490 }
491 
493  pid_t tid)
494 {
495  struct threads *threads = machine__threads(machine, tid);
496  struct thread *th;
497 
498  down_write(&threads->lock);
499  th = __machine__findnew_thread(machine, pid, tid);
500  up_write(&threads->lock);
501  return th;
502 }
503 
505  pid_t tid)
506 {
507  struct threads *threads = machine__threads(machine, tid);
508  struct thread *th;
509 
510  down_read(&threads->lock);
511  th = ____machine__findnew_thread(machine, threads, pid, tid, false);
512  up_read(&threads->lock);
513  return th;
514 }
515 
517  struct thread *thread)
518 {
519  if (machine->comm_exec)
520  return thread__exec_comm(thread);
521  else
522  return thread__comm(thread);
523 }
524 
526  struct perf_sample *sample)
527 {
528  struct thread *thread = machine__findnew_thread(machine,
529  event->comm.pid,
530  event->comm.tid);
531  bool exec = event->header.misc & PERF_RECORD_MISC_COMM_EXEC;
532  int err = 0;
533 
534  if (exec)
535  machine->comm_exec = true;
536 
537  if (dump_trace)
538  perf_event__fprintf_comm(event, stdout);
539 
540  if (thread == NULL ||
541  __thread__set_comm(thread, event->comm.comm, sample->time, exec)) {
542  dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
543  err = -1;
544  }
545 
546  thread__put(thread);
547 
548  return err;
549 }
550 
552  union perf_event *event,
553  struct perf_sample *sample __maybe_unused)
554 {
556  event->namespaces.pid,
557  event->namespaces.tid);
558  int err = 0;
559 
560  WARN_ONCE(event->namespaces.nr_namespaces > NR_NAMESPACES,
561  "\nWARNING: kernel seems to support more namespaces than perf"
562  " tool.\nTry updating the perf tool..\n\n");
563 
564  WARN_ONCE(event->namespaces.nr_namespaces < NR_NAMESPACES,
565  "\nWARNING: perf tool seems to support more namespaces than"
566  " the kernel.\nTry updating the kernel..\n\n");
567 
568  if (dump_trace)
569  perf_event__fprintf_namespaces(event, stdout);
570 
571  if (thread == NULL ||
572  thread__set_namespaces(thread, sample->time, &event->namespaces)) {
573  dump_printf("problem processing PERF_RECORD_NAMESPACES, skipping event.\n");
574  err = -1;
575  }
576 
577  thread__put(thread);
578 
579  return err;
580 }
581 
582 int machine__process_lost_event(struct machine *machine __maybe_unused,
583  union perf_event *event, struct perf_sample *sample __maybe_unused)
584 {
585  dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n",
586  event->lost.id, event->lost.lost);
587  return 0;
588 }
589 
591  union perf_event *event, struct perf_sample *sample)
592 {
593  dump_printf(": id:%" PRIu64 ": lost samples :%" PRIu64 "\n",
594  sample->id, event->lost_samples.lost);
595  return 0;
596 }
597 
599  struct kmod_path *m,
600  const char *filename)
601 {
602  struct dso *dso;
603 
604  down_write(&machine->dsos.lock);
605 
606  dso = __dsos__find(&machine->dsos, m->name, true);
607  if (!dso) {
608  dso = __dsos__addnew(&machine->dsos, m->name);
609  if (dso == NULL)
610  goto out_unlock;
611 
612  dso__set_module_info(dso, m, machine);
613  dso__set_long_name(dso, strdup(filename), true);
614  }
615 
616  dso__get(dso);
617 out_unlock:
618  up_write(&machine->dsos.lock);
619  return dso;
620 }
621 
622 int machine__process_aux_event(struct machine *machine __maybe_unused,
623  union perf_event *event)
624 {
625  if (dump_trace)
626  perf_event__fprintf_aux(event, stdout);
627  return 0;
628 }
629 
631  union perf_event *event)
632 {
633  if (dump_trace)
634  perf_event__fprintf_itrace_start(event, stdout);
635  return 0;
636 }
637 
638 int machine__process_switch_event(struct machine *machine __maybe_unused,
639  union perf_event *event)
640 {
641  if (dump_trace)
642  perf_event__fprintf_switch(event, stdout);
643  return 0;
644 }
645 
646 static void dso__adjust_kmod_long_name(struct dso *dso, const char *filename)
647 {
648  const char *dup_filename;
649 
650  if (!filename || !dso || !dso->long_name)
651  return;
652  if (dso->long_name[0] != '[')
653  return;
654  if (!strchr(filename, '/'))
655  return;
656 
657  dup_filename = strdup(filename);
658  if (!dup_filename)
659  return;
660 
661  dso__set_long_name(dso, dup_filename, true);
662 }
663 
665  const char *filename)
666 {
667  struct map *map = NULL;
668  struct dso *dso = NULL;
669  struct kmod_path m;
670 
671  if (kmod_path__parse_name(&m, filename))
672  return NULL;
673 
674  map = map_groups__find_by_name(&machine->kmaps, m.name);
675  if (map) {
676  /*
677  * If the map's dso is an offline module, give dso__load()
678  * a chance to find the file path of that module by fixing
679  * long_name.
680  */
681  dso__adjust_kmod_long_name(map->dso, filename);
682  goto out;
683  }
684 
685  dso = machine__findnew_module_dso(machine, &m, filename);
686  if (dso == NULL)
687  goto out;
688 
689  map = map__new2(start, dso);
690  if (map == NULL)
691  goto out;
692 
693  map_groups__insert(&machine->kmaps, map);
694 
695  /* Put the map here because map_groups__insert alread got it */
696  map__put(map);
697 out:
698  /* put the dso here, corresponding to machine__findnew_module_dso */
699  dso__put(dso);
700  free(m.name);
701  return map;
702 }
703 
704 size_t machines__fprintf_dsos(struct machines *machines, FILE *fp)
705 {
706  struct rb_node *nd;
707  size_t ret = __dsos__fprintf(&machines->host.dsos.head, fp);
708 
709  for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
710  struct machine *pos = rb_entry(nd, struct machine, rb_node);
711  ret += __dsos__fprintf(&pos->dsos.head, fp);
712  }
713 
714  return ret;
715 }
716 
717 size_t machine__fprintf_dsos_buildid(struct machine *m, FILE *fp,
718  bool (skip)(struct dso *dso, int parm), int parm)
719 {
720  return __dsos__fprintf_buildid(&m->dsos.head, fp, skip, parm);
721 }
722 
724  bool (skip)(struct dso *dso, int parm), int parm)
725 {
726  struct rb_node *nd;
727  size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm);
728 
729  for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
730  struct machine *pos = rb_entry(nd, struct machine, rb_node);
731  ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm);
732  }
733  return ret;
734 }
735 
737 {
738  int i;
739  size_t printed = 0;
740  struct dso *kdso = machine__kernel_map(machine)->dso;
741 
742  if (kdso->has_build_id) {
743  char filename[PATH_MAX];
744  if (dso__build_id_filename(kdso, filename, sizeof(filename),
745  false))
746  printed += fprintf(fp, "[0] %s\n", filename);
747  }
748 
749  for (i = 0; i < vmlinux_path__nr_entries; ++i)
750  printed += fprintf(fp, "[%d] %s\n",
751  i + kdso->has_build_id, vmlinux_path[i]);
752 
753  return printed;
754 }
755 
756 size_t machine__fprintf(struct machine *machine, FILE *fp)
757 {
758  struct rb_node *nd;
759  size_t ret;
760  int i;
761 
762  for (i = 0; i < THREADS__TABLE_SIZE; i++) {
763  struct threads *threads = &machine->threads[i];
764 
765  down_read(&threads->lock);
766 
767  ret = fprintf(fp, "Threads: %u\n", threads->nr);
768 
769  for (nd = rb_first(&threads->entries); nd; nd = rb_next(nd)) {
770  struct thread *pos = rb_entry(nd, struct thread, rb_node);
771 
772  ret += thread__fprintf(pos, fp);
773  }
774 
775  up_read(&threads->lock);
776  }
777  return ret;
778 }
779 
780 static struct dso *machine__get_kernel(struct machine *machine)
781 {
782  const char *vmlinux_name = machine->mmap_name;
783  struct dso *kernel;
784 
785  if (machine__is_host(machine)) {
787  vmlinux_name = symbol_conf.vmlinux_name;
788 
789  kernel = machine__findnew_kernel(machine, vmlinux_name,
790  "[kernel]", DSO_TYPE_KERNEL);
791  } else {
794 
795  kernel = machine__findnew_kernel(machine, vmlinux_name,
796  "[guest.kernel]",
798  }
799 
800  if (kernel != NULL && (!kernel->has_build_id))
801  dso__read_running_kernel_build_id(kernel, machine);
802 
803  return kernel;
804 }
805 
806 struct process_args {
807  u64 start;
808 };
809 
811  size_t bufsz)
812 {
813  if (machine__is_default_guest(machine))
814  scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms);
815  else
816  scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir);
817 }
818 
819 const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
820 
821 /* Figure out the start address of kernel map from /proc/kallsyms.
822  * Returns the name of the start symbol in *symbol_name. Pass in NULL as
823  * symbol_name if it's not that important.
824  */
826  const char **symbol_name, u64 *start)
827 {
828  char filename[PATH_MAX];
829  int i, err = -1;
830  const char *name;
831  u64 addr = 0;
832 
833  machine__get_kallsyms_filename(machine, filename, PATH_MAX);
834 
835  if (symbol__restricted_filename(filename, "/proc/kallsyms"))
836  return 0;
837 
838  for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
839  err = kallsyms__get_function_start(filename, name, &addr);
840  if (!err)
841  break;
842  }
843 
844  if (err)
845  return -1;
846 
847  if (symbol_name)
848  *symbol_name = name;
849 
850  *start = addr;
851  return 0;
852 }
853 
855  struct dso *kernel,
856  struct extra_kernel_map *xm)
857 {
858  struct kmap *kmap;
859  struct map *map;
860 
861  map = map__new2(xm->start, kernel);
862  if (!map)
863  return -1;
864 
865  map->end = xm->end;
866  map->pgoff = xm->pgoff;
867 
868  kmap = map__kmap(map);
869 
870  kmap->kmaps = &machine->kmaps;
871  strlcpy(kmap->name, xm->name, KMAP_NAME_LEN);
872 
873  map_groups__insert(&machine->kmaps, map);
874 
875  pr_debug2("Added extra kernel map %s %" PRIx64 "-%" PRIx64 "\n",
876  kmap->name, map->start, map->end);
877 
878  map__put(map);
879 
880  return 0;
881 }
882 
883 static u64 find_entry_trampoline(struct dso *dso)
884 {
885  /* Duplicates are removed so lookup all aliases */
886  const char *syms[] = {
887  "_entry_trampoline",
888  "__entry_trampoline_start",
889  "entry_SYSCALL_64_trampoline",
890  };
891  struct symbol *sym = dso__first_symbol(dso);
892  unsigned int i;
893 
894  for (; sym; sym = dso__next_symbol(sym)) {
895  if (sym->binding != STB_GLOBAL)
896  continue;
897  for (i = 0; i < ARRAY_SIZE(syms); i++) {
898  if (!strcmp(sym->name, syms[i]))
899  return sym->start;
900  }
901  }
902 
903  return 0;
904 }
905 
906 /*
907  * These values can be used for kernels that do not have symbols for the entry
908  * trampolines in kallsyms.
909  */
910 #define X86_64_CPU_ENTRY_AREA_PER_CPU 0xfffffe0000000000ULL
911 #define X86_64_CPU_ENTRY_AREA_SIZE 0x2c000
912 #define X86_64_ENTRY_TRAMPOLINE 0x6000
913 
914 /* Map x86_64 PTI entry trampolines */
916  struct dso *kernel)
917 {
918  struct map_groups *kmaps = &machine->kmaps;
919  struct maps *maps = &kmaps->maps;
920  int nr_cpus_avail, cpu;
921  bool found = false;
922  struct map *map;
923  u64 pgoff;
924 
925  /*
926  * In the vmlinux case, pgoff is a virtual address which must now be
927  * mapped to a vmlinux offset.
928  */
929  for (map = maps__first(maps); map; map = map__next(map)) {
930  struct kmap *kmap = __map__kmap(map);
931  struct map *dest_map;
932 
933  if (!kmap || !is_entry_trampoline(kmap->name))
934  continue;
935 
936  dest_map = map_groups__find(kmaps, map->pgoff);
937  if (dest_map != map)
938  map->pgoff = dest_map->map_ip(dest_map, map->pgoff);
939  found = true;
940  }
941  if (found || machine->trampolines_mapped)
942  return 0;
943 
944  pgoff = find_entry_trampoline(kernel);
945  if (!pgoff)
946  return 0;
947 
948  nr_cpus_avail = machine__nr_cpus_avail(machine);
949 
950  /* Add a 1 page map for each CPU's entry trampoline */
951  for (cpu = 0; cpu < nr_cpus_avail; cpu++) {
955  struct extra_kernel_map xm = {
956  .start = va,
957  .end = va + page_size,
958  .pgoff = pgoff,
959  };
960 
962 
963  if (machine__create_extra_kernel_map(machine, kernel, &xm) < 0)
964  return -1;
965  }
966 
967  machine->trampolines_mapped = nr_cpus_avail;
968 
969  return 0;
970 }
971 
972 int __weak machine__create_extra_kernel_maps(struct machine *machine __maybe_unused,
973  struct dso *kernel __maybe_unused)
974 {
975  return 0;
976 }
977 
978 static int
980 {
981  struct kmap *kmap;
982  struct map *map;
983 
984  /* In case of renewal the kernel map, destroy previous one */
986 
987  machine->vmlinux_map = map__new2(0, kernel);
988  if (machine->vmlinux_map == NULL)
989  return -1;
990 
991  machine->vmlinux_map->map_ip = machine->vmlinux_map->unmap_ip = identity__map_ip;
992  map = machine__kernel_map(machine);
993  kmap = map__kmap(map);
994  if (!kmap)
995  return -1;
996 
997  kmap->kmaps = &machine->kmaps;
998  map_groups__insert(&machine->kmaps, map);
999 
1000  return 0;
1001 }
1002 
1004 {
1005  struct kmap *kmap;
1006  struct map *map = machine__kernel_map(machine);
1007 
1008  if (map == NULL)
1009  return;
1010 
1011  kmap = map__kmap(map);
1012  map_groups__remove(&machine->kmaps, map);
1013  if (kmap && kmap->ref_reloc_sym) {
1014  zfree((char **)&kmap->ref_reloc_sym->name);
1015  zfree(&kmap->ref_reloc_sym);
1016  }
1017 
1018  map__zput(machine->vmlinux_map);
1019 }
1020 
1021 int machines__create_guest_kernel_maps(struct machines *machines)
1022 {
1023  int ret = 0;
1024  struct dirent **namelist = NULL;
1025  int i, items = 0;
1026  char path[PATH_MAX];
1027  pid_t pid;
1028  char *endp;
1029 
1034  }
1035 
1036  if (symbol_conf.guestmount) {
1037  items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL);
1038  if (items <= 0)
1039  return -ENOENT;
1040  for (i = 0; i < items; i++) {
1041  if (!isdigit(namelist[i]->d_name[0])) {
1042  /* Filter out . and .. */
1043  continue;
1044  }
1045  pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10);
1046  if ((*endp != '\0') ||
1047  (endp == namelist[i]->d_name) ||
1048  (errno == ERANGE)) {
1049  pr_debug("invalid directory (%s). Skipping.\n",
1050  namelist[i]->d_name);
1051  continue;
1052  }
1053  sprintf(path, "%s/%s/proc/kallsyms",
1055  namelist[i]->d_name);
1056  ret = access(path, R_OK);
1057  if (ret) {
1058  pr_debug("Can't access file %s\n", path);
1059  goto failure;
1060  }
1061  machines__create_kernel_maps(machines, pid);
1062  }
1063 failure:
1064  free(namelist);
1065  }
1066 
1067  return ret;
1068 }
1069 
1070 void machines__destroy_kernel_maps(struct machines *machines)
1071 {
1072  struct rb_node *next = rb_first(&machines->guests);
1073 
1074  machine__destroy_kernel_maps(&machines->host);
1075 
1076  while (next) {
1077  struct machine *pos = rb_entry(next, struct machine, rb_node);
1078 
1079  next = rb_next(&pos->rb_node);
1080  rb_erase(&pos->rb_node, &machines->guests);
1081  machine__delete(pos);
1082  }
1083 }
1084 
1085 int machines__create_kernel_maps(struct machines *machines, pid_t pid)
1086 {
1087  struct machine *machine = machines__findnew(machines, pid);
1088 
1089  if (machine == NULL)
1090  return -1;
1091 
1092  return machine__create_kernel_maps(machine);
1093 }
1094 
1096 {
1097  struct map *map = machine__kernel_map(machine);
1098  int ret = __dso__load_kallsyms(map->dso, filename, map, true);
1099 
1100  if (ret > 0) {
1101  dso__set_loaded(map->dso);
1102  /*
1103  * Since /proc/kallsyms will have multiple sessions for the
1104  * kernel, with modules between them, fixup the end of all
1105  * sections.
1106  */
1107  map_groups__fixup_end(&machine->kmaps);
1108  }
1109 
1110  return ret;
1111 }
1112 
1114 {
1115  struct map *map = machine__kernel_map(machine);
1116  int ret = dso__load_vmlinux_path(map->dso, map);
1117 
1118  if (ret > 0)
1119  dso__set_loaded(map->dso);
1120 
1121  return ret;
1122 }
1123 
1124 static char *get_kernel_version(const char *root_dir)
1125 {
1126  char version[PATH_MAX];
1127  FILE *file;
1128  char *name, *tmp;
1129  const char *prefix = "Linux version ";
1130 
1131  sprintf(version, "%s/proc/version", root_dir);
1132  file = fopen(version, "r");
1133  if (!file)
1134  return NULL;
1135 
1136  version[0] = '\0';
1137  tmp = fgets(version, sizeof(version), file);
1138  fclose(file);
1139 
1140  name = strstr(version, prefix);
1141  if (!name)
1142  return NULL;
1143  name += strlen(prefix);
1144  tmp = strchr(name, ' ');
1145  if (tmp)
1146  *tmp = '\0';
1147 
1148  return strdup(name);
1149 }
1150 
1151 static bool is_kmod_dso(struct dso *dso)
1152 {
1155 }
1156 
1157 static int map_groups__set_module_path(struct map_groups *mg, const char *path,
1158  struct kmod_path *m)
1159 {
1160  char *long_name;
1161  struct map *map = map_groups__find_by_name(mg, m->name);
1162 
1163  if (map == NULL)
1164  return 0;
1165 
1166  long_name = strdup(path);
1167  if (long_name == NULL)
1168  return -ENOMEM;
1169 
1170  dso__set_long_name(map->dso, long_name, true);
1172 
1173  /*
1174  * Full name could reveal us kmod compression, so
1175  * we need to update the symtab_type if needed.
1176  */
1177  if (m->comp && is_kmod_dso(map->dso))
1178  map->dso->symtab_type++;
1179 
1180  return 0;
1181 }
1182 
1184  const char *dir_name, int depth)
1185 {
1186  struct dirent *dent;
1187  DIR *dir = opendir(dir_name);
1188  int ret = 0;
1189 
1190  if (!dir) {
1191  pr_debug("%s: cannot open %s dir\n", __func__, dir_name);
1192  return -1;
1193  }
1194 
1195  while ((dent = readdir(dir)) != NULL) {
1196  char path[PATH_MAX];
1197  struct stat st;
1198 
1199  /*sshfs might return bad dent->d_type, so we have to stat*/
1200  snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name);
1201  if (stat(path, &st))
1202  continue;
1203 
1204  if (S_ISDIR(st.st_mode)) {
1205  if (!strcmp(dent->d_name, ".") ||
1206  !strcmp(dent->d_name, ".."))
1207  continue;
1208 
1209  /* Do not follow top-level source and build symlinks */
1210  if (depth == 0) {
1211  if (!strcmp(dent->d_name, "source") ||
1212  !strcmp(dent->d_name, "build"))
1213  continue;
1214  }
1215 
1216  ret = map_groups__set_modules_path_dir(mg, path,
1217  depth + 1);
1218  if (ret < 0)
1219  goto out;
1220  } else {
1221  struct kmod_path m;
1222 
1223  ret = kmod_path__parse_name(&m, dent->d_name);
1224  if (ret)
1225  goto out;
1226 
1227  if (m.kmod)
1228  ret = map_groups__set_module_path(mg, path, &m);
1229 
1230  free(m.name);
1231 
1232  if (ret)
1233  goto out;
1234  }
1235  }
1236 
1237 out:
1238  closedir(dir);
1239  return ret;
1240 }
1241 
1243 {
1244  char *version;
1245  char modules_path[PATH_MAX];
1246 
1247  version = get_kernel_version(machine->root_dir);
1248  if (!version)
1249  return -1;
1250 
1251  snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s",
1252  machine->root_dir, version);
1253  free(version);
1254 
1255  return map_groups__set_modules_path_dir(&machine->kmaps, modules_path, 0);
1256 }
1257 int __weak arch__fix_module_text_start(u64 *start __maybe_unused,
1258  const char *name __maybe_unused)
1259 {
1260  return 0;
1261 }
1262 
1263 static int machine__create_module(void *arg, const char *name, u64 start,
1264  u64 size)
1265 {
1266  struct machine *machine = arg;
1267  struct map *map;
1268 
1269  if (arch__fix_module_text_start(&start, name) < 0)
1270  return -1;
1271 
1272  map = machine__findnew_module_map(machine, start, name);
1273  if (map == NULL)
1274  return -1;
1275  map->end = start + size;
1276 
1278 
1279  return 0;
1280 }
1281 
1283 {
1284  const char *modules;
1285  char path[PATH_MAX];
1286 
1287  if (machine__is_default_guest(machine)) {
1289  } else {
1290  snprintf(path, PATH_MAX, "%s/proc/modules", machine->root_dir);
1291  modules = path;
1292  }
1293 
1294  if (symbol__restricted_filename(modules, "/proc/modules"))
1295  return -1;
1296 
1297  if (modules__parse(modules, machine, machine__create_module))
1298  return -1;
1299 
1300  if (!machine__set_modules_path(machine))
1301  return 0;
1302 
1303  pr_debug("Problems setting modules path maps, continuing anyway...\n");
1304 
1305  return 0;
1306 }
1307 
1309  u64 start, u64 end)
1310 {
1311  machine->vmlinux_map->start = start;
1312  machine->vmlinux_map->end = end;
1313  /*
1314  * Be a bit paranoid here, some perf.data file came with
1315  * a zero sized synthesized MMAP event for the kernel.
1316  */
1317  if (start == 0 && end == 0)
1318  machine->vmlinux_map->end = ~0ULL;
1319 }
1320 
1322 {
1323  struct dso *kernel = machine__get_kernel(machine);
1324  const char *name = NULL;
1325  struct map *map;
1326  u64 addr = 0;
1327  int ret;
1328 
1329  if (kernel == NULL)
1330  return -1;
1331 
1332  ret = __machine__create_kernel_maps(machine, kernel);
1333  if (ret < 0)
1334  goto out_put;
1335 
1336  if (symbol_conf.use_modules && machine__create_modules(machine) < 0) {
1337  if (machine__is_host(machine))
1338  pr_debug("Problems creating module maps, "
1339  "continuing anyway...\n");
1340  else
1341  pr_debug("Problems creating module maps for guest %d, "
1342  "continuing anyway...\n", machine->pid);
1343  }
1344 
1345  if (!machine__get_running_kernel_start(machine, &name, &addr)) {
1346  if (name &&
1347  map__set_kallsyms_ref_reloc_sym(machine->vmlinux_map, name, addr)) {
1349  ret = -1;
1350  goto out_put;
1351  }
1352 
1353  /* we have a real start address now, so re-order the kmaps */
1354  map = machine__kernel_map(machine);
1355 
1356  map__get(map);
1357  map_groups__remove(&machine->kmaps, map);
1358 
1359  /* assume it's the last in the kmaps */
1360  machine__set_kernel_mmap(machine, addr, ~0ULL);
1361 
1362  map_groups__insert(&machine->kmaps, map);
1363  map__put(map);
1364  }
1365 
1366  if (machine__create_extra_kernel_maps(machine, kernel))
1367  pr_debug("Problems creating extra kernel maps, continuing anyway...\n");
1368 
1369  /* update end address of the kernel map using adjacent module address */
1370  map = map__next(machine__kernel_map(machine));
1371  if (map)
1372  machine__set_kernel_mmap(machine, addr, map->start);
1373 out_put:
1374  dso__put(kernel);
1375  return ret;
1376 }
1377 
1379 {
1380  struct dso *dso;
1381 
1382  list_for_each_entry(dso, &machine->dsos.head, node) {
1383  if (dso__is_kcore(dso))
1384  return true;
1385  }
1386 
1387  return false;
1388 }
1389 
1391  union perf_event *event)
1392 {
1393  return machine__is(machine, "x86_64") &&
1395 }
1396 
1398  union perf_event *event)
1399 {
1400  struct map *kernel_map = machine__kernel_map(machine);
1401  struct dso *kernel = kernel_map ? kernel_map->dso : NULL;
1402  struct extra_kernel_map xm = {
1403  .start = event->mmap.start,
1404  .end = event->mmap.start + event->mmap.len,
1405  .pgoff = event->mmap.pgoff,
1406  };
1407 
1408  if (kernel == NULL)
1409  return -1;
1410 
1411  strlcpy(xm.name, event->mmap.filename, KMAP_NAME_LEN);
1412 
1413  return machine__create_extra_kernel_map(machine, kernel, &xm);
1414 }
1415 
1417  union perf_event *event)
1418 {
1419  struct map *map;
1420  enum dso_kernel_type kernel_type;
1421  bool is_kernel_mmap;
1422 
1423  /* If we have maps from kcore then we do not need or want any others */
1424  if (machine__uses_kcore(machine))
1425  return 0;
1426 
1427  if (machine__is_host(machine))
1428  kernel_type = DSO_TYPE_KERNEL;
1429  else
1430  kernel_type = DSO_TYPE_GUEST_KERNEL;
1431 
1432  is_kernel_mmap = memcmp(event->mmap.filename,
1433  machine->mmap_name,
1434  strlen(machine->mmap_name) - 1) == 0;
1435  if (event->mmap.filename[0] == '/' ||
1436  (!is_kernel_mmap && event->mmap.filename[0] == '[')) {
1437  map = machine__findnew_module_map(machine, event->mmap.start,
1438  event->mmap.filename);
1439  if (map == NULL)
1440  goto out_problem;
1441 
1442  map->end = map->start + event->mmap.len;
1443  } else if (is_kernel_mmap) {
1444  const char *symbol_name = (event->mmap.filename +
1445  strlen(machine->mmap_name));
1446  /*
1447  * Should be there already, from the build-id table in
1448  * the header.
1449  */
1450  struct dso *kernel = NULL;
1451  struct dso *dso;
1452 
1453  down_read(&machine->dsos.lock);
1454 
1455  list_for_each_entry(dso, &machine->dsos.head, node) {
1456 
1457  /*
1458  * The cpumode passed to is_kernel_module is not the
1459  * cpumode of *this* event. If we insist on passing
1460  * correct cpumode to is_kernel_module, we should
1461  * record the cpumode when we adding this dso to the
1462  * linked list.
1463  *
1464  * However we don't really need passing correct
1465  * cpumode. We know the correct cpumode must be kernel
1466  * mode (if not, we should not link it onto kernel_dsos
1467  * list).
1468  *
1469  * Therefore, we pass PERF_RECORD_MISC_CPUMODE_UNKNOWN.
1470  * is_kernel_module() treats it as a kernel cpumode.
1471  */
1472 
1473  if (!dso->kernel ||
1475  PERF_RECORD_MISC_CPUMODE_UNKNOWN))
1476  continue;
1477 
1478 
1479  kernel = dso;
1480  break;
1481  }
1482 
1483  up_read(&machine->dsos.lock);
1484 
1485  if (kernel == NULL)
1486  kernel = machine__findnew_dso(machine, machine->mmap_name);
1487  if (kernel == NULL)
1488  goto out_problem;
1489 
1490  kernel->kernel = kernel_type;
1491  if (__machine__create_kernel_maps(machine, kernel) < 0) {
1492  dso__put(kernel);
1493  goto out_problem;
1494  }
1495 
1496  if (strstr(kernel->long_name, "vmlinux"))
1497  dso__set_short_name(kernel, "[kernel.vmlinux]", false);
1498 
1499  machine__set_kernel_mmap(machine, event->mmap.start,
1500  event->mmap.start + event->mmap.len);
1501 
1502  /*
1503  * Avoid using a zero address (kptr_restrict) for the ref reloc
1504  * symbol. Effectively having zero here means that at record
1505  * time /proc/sys/kernel/kptr_restrict was non zero.
1506  */
1507  if (event->mmap.pgoff != 0) {
1509  symbol_name,
1510  event->mmap.pgoff);
1511  }
1512 
1513  if (machine__is_default_guest(machine)) {
1514  /*
1515  * preload dso of guest kernel and modules
1516  */
1517  dso__load(kernel, machine__kernel_map(machine));
1518  }
1519  } else if (perf_event__is_extra_kernel_mmap(machine, event)) {
1520  return machine__process_extra_kernel_map(machine, event);
1521  }
1522  return 0;
1523 out_problem:
1524  return -1;
1525 }
1526 
1528  union perf_event *event,
1529  struct perf_sample *sample)
1530 {
1531  struct thread *thread;
1532  struct map *map;
1533  int ret = 0;
1534 
1535  if (dump_trace)
1536  perf_event__fprintf_mmap2(event, stdout);
1537 
1538  if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1539  sample->cpumode == PERF_RECORD_MISC_KERNEL) {
1540  ret = machine__process_kernel_mmap_event(machine, event);
1541  if (ret < 0)
1542  goto out_problem;
1543  return 0;
1544  }
1545 
1546  thread = machine__findnew_thread(machine, event->mmap2.pid,
1547  event->mmap2.tid);
1548  if (thread == NULL)
1549  goto out_problem;
1550 
1551  map = map__new(machine, event->mmap2.start,
1552  event->mmap2.len, event->mmap2.pgoff,
1553  event->mmap2.maj,
1554  event->mmap2.min, event->mmap2.ino,
1555  event->mmap2.ino_generation,
1556  event->mmap2.prot,
1557  event->mmap2.flags,
1558  event->mmap2.filename, thread);
1559 
1560  if (map == NULL)
1561  goto out_problem_map;
1562 
1563  ret = thread__insert_map(thread, map);
1564  if (ret)
1565  goto out_problem_insert;
1566 
1567  thread__put(thread);
1568  map__put(map);
1569  return 0;
1570 
1571 out_problem_insert:
1572  map__put(map);
1573 out_problem_map:
1574  thread__put(thread);
1575 out_problem:
1576  dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n");
1577  return 0;
1578 }
1579 
1581  struct perf_sample *sample)
1582 {
1583  struct thread *thread;
1584  struct map *map;
1585  u32 prot = 0;
1586  int ret = 0;
1587 
1588  if (dump_trace)
1589  perf_event__fprintf_mmap(event, stdout);
1590 
1591  if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1592  sample->cpumode == PERF_RECORD_MISC_KERNEL) {
1593  ret = machine__process_kernel_mmap_event(machine, event);
1594  if (ret < 0)
1595  goto out_problem;
1596  return 0;
1597  }
1598 
1599  thread = machine__findnew_thread(machine, event->mmap.pid,
1600  event->mmap.tid);
1601  if (thread == NULL)
1602  goto out_problem;
1603 
1604  if (!(event->header.misc & PERF_RECORD_MISC_MMAP_DATA))
1605  prot = PROT_EXEC;
1606 
1607  map = map__new(machine, event->mmap.start,
1608  event->mmap.len, event->mmap.pgoff,
1609  0, 0, 0, 0, prot, 0,
1610  event->mmap.filename,
1611  thread);
1612 
1613  if (map == NULL)
1614  goto out_problem_map;
1615 
1616  ret = thread__insert_map(thread, map);
1617  if (ret)
1618  goto out_problem_insert;
1619 
1620  thread__put(thread);
1621  map__put(map);
1622  return 0;
1623 
1624 out_problem_insert:
1625  map__put(map);
1626 out_problem_map:
1627  thread__put(thread);
1628 out_problem:
1629  dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
1630  return 0;
1631 }
1632 
1633 static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock)
1634 {
1635  struct threads *threads = machine__threads(machine, th->tid);
1636 
1637  if (threads->last_match == th)
1638  threads->last_match = NULL;
1639 
1640  BUG_ON(refcount_read(&th->refcnt) == 0);
1641  if (lock)
1642  down_write(&threads->lock);
1643  rb_erase_init(&th->rb_node, &threads->entries);
1644  RB_CLEAR_NODE(&th->rb_node);
1645  --threads->nr;
1646  /*
1647  * Move it first to the dead_threads list, then drop the reference,
1648  * if this is the last reference, then the thread__delete destructor
1649  * will be called and we will remove it from the dead_threads list.
1650  */
1651  list_add_tail(&th->node, &threads->dead);
1652  if (lock)
1653  up_write(&threads->lock);
1654  thread__put(th);
1655 }
1656 
1658 {
1659  return __machine__remove_thread(machine, th, true);
1660 }
1661 
1663  struct perf_sample *sample)
1664 {
1665  struct thread *thread = machine__find_thread(machine,
1666  event->fork.pid,
1667  event->fork.tid);
1668  struct thread *parent = machine__findnew_thread(machine,
1669  event->fork.ppid,
1670  event->fork.ptid);
1671  int err = 0;
1672 
1673  if (dump_trace)
1674  perf_event__fprintf_task(event, stdout);
1675 
1676  /*
1677  * There may be an existing thread that is not actually the parent,
1678  * either because we are processing events out of order, or because the
1679  * (fork) event that would have removed the thread was lost. Assume the
1680  * latter case and continue on as best we can.
1681  */
1682  if (parent->pid_ != (pid_t)event->fork.ppid) {
1683  dump_printf("removing erroneous parent thread %d/%d\n",
1684  parent->pid_, parent->tid);
1685  machine__remove_thread(machine, parent);
1686  thread__put(parent);
1687  parent = machine__findnew_thread(machine, event->fork.ppid,
1688  event->fork.ptid);
1689  }
1690 
1691  /* if a thread currently exists for the thread id remove it */
1692  if (thread != NULL) {
1693  machine__remove_thread(machine, thread);
1694  thread__put(thread);
1695  }
1696 
1697  thread = machine__findnew_thread(machine, event->fork.pid,
1698  event->fork.tid);
1699 
1700  if (thread == NULL || parent == NULL ||
1701  thread__fork(thread, parent, sample->time) < 0) {
1702  dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
1703  err = -1;
1704  }
1705  thread__put(thread);
1706  thread__put(parent);
1707 
1708  return err;
1709 }
1710 
1712  struct perf_sample *sample __maybe_unused)
1713 {
1714  struct thread *thread = machine__find_thread(machine,
1715  event->fork.pid,
1716  event->fork.tid);
1717 
1718  if (dump_trace)
1719  perf_event__fprintf_task(event, stdout);
1720 
1721  if (thread != NULL) {
1722  thread__exited(thread);
1723  thread__put(thread);
1724  }
1725 
1726  return 0;
1727 }
1728 
1730  struct perf_sample *sample)
1731 {
1732  int ret;
1733 
1734  switch (event->header.type) {
1735  case PERF_RECORD_COMM:
1736  ret = machine__process_comm_event(machine, event, sample); break;
1737  case PERF_RECORD_MMAP:
1738  ret = machine__process_mmap_event(machine, event, sample); break;
1739  case PERF_RECORD_NAMESPACES:
1740  ret = machine__process_namespaces_event(machine, event, sample); break;
1741  case PERF_RECORD_MMAP2:
1742  ret = machine__process_mmap2_event(machine, event, sample); break;
1743  case PERF_RECORD_FORK:
1744  ret = machine__process_fork_event(machine, event, sample); break;
1745  case PERF_RECORD_EXIT:
1746  ret = machine__process_exit_event(machine, event, sample); break;
1747  case PERF_RECORD_LOST:
1748  ret = machine__process_lost_event(machine, event, sample); break;
1749  case PERF_RECORD_AUX:
1750  ret = machine__process_aux_event(machine, event); break;
1751  case PERF_RECORD_ITRACE_START:
1752  ret = machine__process_itrace_start_event(machine, event); break;
1753  case PERF_RECORD_LOST_SAMPLES:
1754  ret = machine__process_lost_samples_event(machine, event, sample); break;
1755  case PERF_RECORD_SWITCH:
1756  case PERF_RECORD_SWITCH_CPU_WIDE:
1757  ret = machine__process_switch_event(machine, event); break;
1758  default:
1759  ret = -1;
1760  break;
1761  }
1762 
1763  return ret;
1764 }
1765 
1766 static bool symbol__match_regex(struct symbol *sym, regex_t *regex)
1767 {
1768  if (!regexec(regex, sym->name, 0, NULL, 0))
1769  return 1;
1770  return 0;
1771 }
1772 
1773 static void ip__resolve_ams(struct thread *thread,
1774  struct addr_map_symbol *ams,
1775  u64 ip)
1776 {
1777  struct addr_location al;
1778 
1779  memset(&al, 0, sizeof(al));
1780  /*
1781  * We cannot use the header.misc hint to determine whether a
1782  * branch stack address is user, kernel, guest, hypervisor.
1783  * Branches may straddle the kernel/user/hypervisor boundaries.
1784  * Thus, we have to try consecutively until we find a match
1785  * or else, the symbol is unknown
1786  */
1787  thread__find_cpumode_addr_location(thread, ip, &al);
1788 
1789  ams->addr = ip;
1790  ams->al_addr = al.addr;
1791  ams->sym = al.sym;
1792  ams->map = al.map;
1793  ams->phys_addr = 0;
1794 }
1795 
1796 static void ip__resolve_data(struct thread *thread,
1797  u8 m, struct addr_map_symbol *ams,
1798  u64 addr, u64 phys_addr)
1799 {
1800  struct addr_location al;
1801 
1802  memset(&al, 0, sizeof(al));
1803 
1804  thread__find_symbol(thread, m, addr, &al);
1805 
1806  ams->addr = addr;
1807  ams->al_addr = al.addr;
1808  ams->sym = al.sym;
1809  ams->map = al.map;
1810  ams->phys_addr = phys_addr;
1811 }
1812 
1814  struct addr_location *al)
1815 {
1816  struct mem_info *mi = mem_info__new();
1817 
1818  if (!mi)
1819  return NULL;
1820 
1821  ip__resolve_ams(al->thread, &mi->iaddr, sample->ip);
1822  ip__resolve_data(al->thread, al->cpumode, &mi->daddr,
1823  sample->addr, sample->phys_addr);
1824  mi->data_src.val = sample->data_src;
1825 
1826  return mi;
1827 }
1828 
1829 static char *callchain_srcline(struct map *map, struct symbol *sym, u64 ip)
1830 {
1831  char *srcline = NULL;
1832 
1833  if (!map || callchain_param.key == CCKEY_FUNCTION)
1834  return srcline;
1835 
1836  srcline = srcline__tree_find(&map->dso->srclines, ip);
1837  if (!srcline) {
1838  bool show_sym = false;
1839  bool show_addr = callchain_param.key == CCKEY_ADDRESS;
1840 
1841  srcline = get_srcline(map->dso, map__rip_2objdump(map, ip),
1842  sym, show_sym, show_addr, ip);
1843  srcline__tree_insert(&map->dso->srclines, ip, srcline);
1844  }
1845 
1846  return srcline;
1847 }
1848 
1849 struct iterations {
1851  u64 cycles;
1852 };
1853 
1854 static int add_callchain_ip(struct thread *thread,
1855  struct callchain_cursor *cursor,
1856  struct symbol **parent,
1857  struct addr_location *root_al,
1858  u8 *cpumode,
1859  u64 ip,
1860  bool branch,
1861  struct branch_flags *flags,
1862  struct iterations *iter,
1863  u64 branch_from)
1864 {
1865  struct addr_location al;
1866  int nr_loop_iter = 0;
1867  u64 iter_cycles = 0;
1868  const char *srcline = NULL;
1869 
1870  al.filtered = 0;
1871  al.sym = NULL;
1872  if (!cpumode) {
1873  thread__find_cpumode_addr_location(thread, ip, &al);
1874  } else {
1875  if (ip >= PERF_CONTEXT_MAX) {
1876  switch (ip) {
1877  case PERF_CONTEXT_HV:
1878  *cpumode = PERF_RECORD_MISC_HYPERVISOR;
1879  break;
1880  case PERF_CONTEXT_KERNEL:
1881  *cpumode = PERF_RECORD_MISC_KERNEL;
1882  break;
1883  case PERF_CONTEXT_USER:
1884  *cpumode = PERF_RECORD_MISC_USER;
1885  break;
1886  default:
1887  pr_debug("invalid callchain context: "
1888  "%"PRId64"\n", (s64) ip);
1889  /*
1890  * It seems the callchain is corrupted.
1891  * Discard all.
1892  */
1893  callchain_cursor_reset(cursor);
1894  return 1;
1895  }
1896  return 0;
1897  }
1898  thread__find_symbol(thread, *cpumode, ip, &al);
1899  }
1900 
1901  if (al.sym != NULL) {
1902  if (perf_hpp_list.parent && !*parent &&
1904  *parent = al.sym;
1905  else if (have_ignore_callees && root_al &&
1907  /* Treat this symbol as the root,
1908  forgetting its callees. */
1909  *root_al = al;
1910  callchain_cursor_reset(cursor);
1911  }
1912  }
1913 
1914  if (symbol_conf.hide_unresolved && al.sym == NULL)
1915  return 0;
1916 
1917  if (iter) {
1918  nr_loop_iter = iter->nr_loop_iter;
1919  iter_cycles = iter->cycles;
1920  }
1921 
1922  srcline = callchain_srcline(al.map, al.sym, al.addr);
1923  return callchain_cursor_append(cursor, ip, al.map, al.sym,
1924  branch, flags, nr_loop_iter,
1925  iter_cycles, branch_from, srcline);
1926 }
1927 
1929  struct addr_location *al)
1930 {
1931  unsigned int i;
1932  const struct branch_stack *bs = sample->branch_stack;
1933  struct branch_info *bi = calloc(bs->nr, sizeof(struct branch_info));
1934 
1935  if (!bi)
1936  return NULL;
1937 
1938  for (i = 0; i < bs->nr; i++) {
1939  ip__resolve_ams(al->thread, &bi[i].to, bs->entries[i].to);
1940  ip__resolve_ams(al->thread, &bi[i].from, bs->entries[i].from);
1941  bi[i].flags = bs->entries[i].flags;
1942  }
1943  return bi;
1944 }
1945 
1946 static void save_iterations(struct iterations *iter,
1947  struct branch_entry *be, int nr)
1948 {
1949  int i;
1950 
1951  iter->nr_loop_iter = nr;
1952  iter->cycles = 0;
1953 
1954  for (i = 0; i < nr; i++)
1955  iter->cycles += be[i].flags.cycles;
1956 }
1957 
1958 #define CHASHSZ 127
1959 #define CHASHBITS 7
1960 #define NO_ENTRY 0xff
1961 
1962 #define PERF_MAX_BRANCH_DEPTH 127
1963 
1964 /* Remove loops. */
1965 static int remove_loops(struct branch_entry *l, int nr,
1966  struct iterations *iter)
1967 {
1968  int i, j, off;
1969  unsigned char chash[CHASHSZ];
1970 
1971  memset(chash, NO_ENTRY, sizeof(chash));
1972 
1973  BUG_ON(PERF_MAX_BRANCH_DEPTH > 255);
1974 
1975  for (i = 0; i < nr; i++) {
1976  int h = hash_64(l[i].from, CHASHBITS) % CHASHSZ;
1977 
1978  /* no collision handling for now */
1979  if (chash[h] == NO_ENTRY) {
1980  chash[h] = i;
1981  } else if (l[chash[h]].from == l[i].from) {
1982  bool is_loop = true;
1983  /* check if it is a real loop */
1984  off = 0;
1985  for (j = chash[h]; j < i && i + off < nr; j++, off++)
1986  if (l[j].from != l[i + off].from) {
1987  is_loop = false;
1988  break;
1989  }
1990  if (is_loop) {
1991  j = nr - (i + off);
1992  if (j > 0) {
1993  save_iterations(iter + i + off,
1994  l + i, off);
1995 
1996  memmove(iter + i, iter + i + off,
1997  j * sizeof(*iter));
1998 
1999  memmove(l + i, l + i + off,
2000  j * sizeof(*l));
2001  }
2002 
2003  nr -= off;
2004  }
2005  }
2006  }
2007  return nr;
2008 }
2009 
2010 /*
2011  * Recolve LBR callstack chain sample
2012  * Return:
2013  * 1 on success get LBR callchain information
2014  * 0 no available LBR callchain information, should try fp
2015  * negative error code on other errors.
2016  */
2018  struct callchain_cursor *cursor,
2019  struct perf_sample *sample,
2020  struct symbol **parent,
2021  struct addr_location *root_al,
2022  int max_stack)
2023 {
2024  struct ip_callchain *chain = sample->callchain;
2025  int chain_nr = min(max_stack, (int)chain->nr), i;
2026  u8 cpumode = PERF_RECORD_MISC_USER;
2027  u64 ip, branch_from = 0;
2028 
2029  for (i = 0; i < chain_nr; i++) {
2030  if (chain->ips[i] == PERF_CONTEXT_USER)
2031  break;
2032  }
2033 
2034  /* LBR only affects the user callchain */
2035  if (i != chain_nr) {
2036  struct branch_stack *lbr_stack = sample->branch_stack;
2037  int lbr_nr = lbr_stack->nr, j, k;
2038  bool branch;
2039  struct branch_flags *flags;
2040  /*
2041  * LBR callstack can only get user call chain.
2042  * The mix_chain_nr is kernel call chain
2043  * number plus LBR user call chain number.
2044  * i is kernel call chain number,
2045  * 1 is PERF_CONTEXT_USER,
2046  * lbr_nr + 1 is the user call chain number.
2047  * For details, please refer to the comments
2048  * in callchain__printf
2049  */
2050  int mix_chain_nr = i + 1 + lbr_nr + 1;
2051 
2052  for (j = 0; j < mix_chain_nr; j++) {
2053  int err;
2054  branch = false;
2055  flags = NULL;
2056 
2058  if (j < i + 1)
2059  ip = chain->ips[j];
2060  else if (j > i + 1) {
2061  k = j - i - 2;
2062  ip = lbr_stack->entries[k].from;
2063  branch = true;
2064  flags = &lbr_stack->entries[k].flags;
2065  } else {
2066  ip = lbr_stack->entries[0].to;
2067  branch = true;
2068  flags = &lbr_stack->entries[0].flags;
2069  branch_from =
2070  lbr_stack->entries[0].from;
2071  }
2072  } else {
2073  if (j < lbr_nr) {
2074  k = lbr_nr - j - 1;
2075  ip = lbr_stack->entries[k].from;
2076  branch = true;
2077  flags = &lbr_stack->entries[k].flags;
2078  }
2079  else if (j > lbr_nr)
2080  ip = chain->ips[i + 1 - (j - lbr_nr)];
2081  else {
2082  ip = lbr_stack->entries[0].to;
2083  branch = true;
2084  flags = &lbr_stack->entries[0].flags;
2085  branch_from =
2086  lbr_stack->entries[0].from;
2087  }
2088  }
2089 
2090  err = add_callchain_ip(thread, cursor, parent,
2091  root_al, &cpumode, ip,
2092  branch, flags, NULL,
2093  branch_from);
2094  if (err)
2095  return (err < 0) ? err : 0;
2096  }
2097  return 1;
2098  }
2099 
2100  return 0;
2101 }
2102 
2104  struct callchain_cursor *cursor,
2105  struct perf_evsel *evsel,
2106  struct perf_sample *sample,
2107  struct symbol **parent,
2108  struct addr_location *root_al,
2109  int max_stack)
2110 {
2111  struct branch_stack *branch = sample->branch_stack;
2112  struct ip_callchain *chain = sample->callchain;
2113  int chain_nr = 0;
2114  u8 cpumode = PERF_RECORD_MISC_USER;
2115  int i, j, err, nr_entries;
2116  int skip_idx = -1;
2117  int first_call = 0;
2118 
2119  if (chain)
2120  chain_nr = chain->nr;
2121 
2122  if (perf_evsel__has_branch_callstack(evsel)) {
2123  err = resolve_lbr_callchain_sample(thread, cursor, sample, parent,
2124  root_al, max_stack);
2125  if (err)
2126  return (err < 0) ? err : 0;
2127  }
2128 
2129  /*
2130  * Based on DWARF debug information, some architectures skip
2131  * a callchain entry saved by the kernel.
2132  */
2133  skip_idx = arch_skip_callchain_idx(thread, chain);
2134 
2135  /*
2136  * Add branches to call stack for easier browsing. This gives
2137  * more context for a sample than just the callers.
2138  *
2139  * This uses individual histograms of paths compared to the
2140  * aggregated histograms the normal LBR mode uses.
2141  *
2142  * Limitations for now:
2143  * - No extra filters
2144  * - No annotations (should annotate somehow)
2145  */
2146 
2147  if (branch && callchain_param.branch_callstack) {
2148  int nr = min(max_stack, (int)branch->nr);
2149  struct branch_entry be[nr];
2150  struct iterations iter[nr];
2151 
2152  if (branch->nr > PERF_MAX_BRANCH_DEPTH) {
2153  pr_warning("corrupted branch chain. skipping...\n");
2154  goto check_calls;
2155  }
2156 
2157  for (i = 0; i < nr; i++) {
2159  be[i] = branch->entries[i];
2160 
2161  if (chain == NULL)
2162  continue;
2163 
2164  /*
2165  * Check for overlap into the callchain.
2166  * The return address is one off compared to
2167  * the branch entry. To adjust for this
2168  * assume the calling instruction is not longer
2169  * than 8 bytes.
2170  */
2171  if (i == skip_idx ||
2172  chain->ips[first_call] >= PERF_CONTEXT_MAX)
2173  first_call++;
2174  else if (be[i].from < chain->ips[first_call] &&
2175  be[i].from >= chain->ips[first_call] - 8)
2176  first_call++;
2177  } else
2178  be[i] = branch->entries[branch->nr - i - 1];
2179  }
2180 
2181  memset(iter, 0, sizeof(struct iterations) * nr);
2182  nr = remove_loops(be, nr, iter);
2183 
2184  for (i = 0; i < nr; i++) {
2185  err = add_callchain_ip(thread, cursor, parent,
2186  root_al,
2187  NULL, be[i].to,
2188  true, &be[i].flags,
2189  NULL, be[i].from);
2190 
2191  if (!err)
2192  err = add_callchain_ip(thread, cursor, parent, root_al,
2193  NULL, be[i].from,
2194  true, &be[i].flags,
2195  &iter[i], 0);
2196  if (err == -EINVAL)
2197  break;
2198  if (err)
2199  return err;
2200  }
2201 
2202  if (chain_nr == 0)
2203  return 0;
2204 
2205  chain_nr -= nr;
2206  }
2207 
2208 check_calls:
2209  for (i = first_call, nr_entries = 0;
2210  i < chain_nr && nr_entries < max_stack; i++) {
2211  u64 ip;
2212 
2214  j = i;
2215  else
2216  j = chain->nr - i - 1;
2217 
2218 #ifdef HAVE_SKIP_CALLCHAIN_IDX
2219  if (j == skip_idx)
2220  continue;
2221 #endif
2222  ip = chain->ips[j];
2223 
2224  if (ip < PERF_CONTEXT_MAX)
2225  ++nr_entries;
2226 
2227  err = add_callchain_ip(thread, cursor, parent,
2228  root_al, &cpumode, ip,
2229  false, NULL, NULL, 0);
2230 
2231  if (err)
2232  return (err < 0) ? err : 0;
2233  }
2234 
2235  return 0;
2236 }
2237 
2238 static int append_inlines(struct callchain_cursor *cursor,
2239  struct map *map, struct symbol *sym, u64 ip)
2240 {
2241  struct inline_node *inline_node;
2242  struct inline_list *ilist;
2243  u64 addr;
2244  int ret = 1;
2245 
2246  if (!symbol_conf.inline_name || !map || !sym)
2247  return ret;
2248 
2249  addr = map__rip_2objdump(map, ip);
2250 
2251  inline_node = inlines__tree_find(&map->dso->inlined_nodes, addr);
2252  if (!inline_node) {
2253  inline_node = dso__parse_addr_inlines(map->dso, addr, sym);
2254  if (!inline_node)
2255  return ret;
2256  inlines__tree_insert(&map->dso->inlined_nodes, inline_node);
2257  }
2258 
2259  list_for_each_entry(ilist, &inline_node->val, list) {
2260  ret = callchain_cursor_append(cursor, ip, map,
2261  ilist->symbol, false,
2262  NULL, 0, 0, 0, ilist->srcline);
2263 
2264  if (ret != 0)
2265  return ret;
2266  }
2267 
2268  return ret;
2269 }
2270 
2271 static int unwind_entry(struct unwind_entry *entry, void *arg)
2272 {
2273  struct callchain_cursor *cursor = arg;
2274  const char *srcline = NULL;
2275 
2276  if (symbol_conf.hide_unresolved && entry->sym == NULL)
2277  return 0;
2278 
2279  if (append_inlines(cursor, entry->map, entry->sym, entry->ip) == 0)
2280  return 0;
2281 
2282  srcline = callchain_srcline(entry->map, entry->sym, entry->ip);
2283  return callchain_cursor_append(cursor, entry->ip,
2284  entry->map, entry->sym,
2285  false, NULL, 0, 0, 0, srcline);
2286 }
2287 
2289  struct callchain_cursor *cursor,
2290  struct perf_evsel *evsel,
2291  struct perf_sample *sample,
2292  int max_stack)
2293 {
2294  /* Can we do dwarf post unwind? */
2295  if (!((evsel->attr.sample_type & PERF_SAMPLE_REGS_USER) &&
2296  (evsel->attr.sample_type & PERF_SAMPLE_STACK_USER)))
2297  return 0;
2298 
2299  /* Bail out if nothing was captured. */
2300  if ((!sample->user_regs.regs) ||
2301  (!sample->user_stack.size))
2302  return 0;
2303 
2304  return unwind__get_entries(unwind_entry, cursor,
2305  thread, sample, max_stack);
2306 }
2307 
2309  struct callchain_cursor *cursor,
2310  struct perf_evsel *evsel,
2311  struct perf_sample *sample,
2312  struct symbol **parent,
2313  struct addr_location *root_al,
2314  int max_stack)
2315 {
2316  int ret = 0;
2317 
2318  callchain_cursor_reset(cursor);
2319 
2321  ret = thread__resolve_callchain_sample(thread, cursor,
2322  evsel, sample,
2323  parent, root_al,
2324  max_stack);
2325  if (ret)
2326  return ret;
2327  ret = thread__resolve_callchain_unwind(thread, cursor,
2328  evsel, sample,
2329  max_stack);
2330  } else {
2331  ret = thread__resolve_callchain_unwind(thread, cursor,
2332  evsel, sample,
2333  max_stack);
2334  if (ret)
2335  return ret;
2336  ret = thread__resolve_callchain_sample(thread, cursor,
2337  evsel, sample,
2338  parent, root_al,
2339  max_stack);
2340  }
2341 
2342  return ret;
2343 }
2344 
2346  int (*fn)(struct thread *thread, void *p),
2347  void *priv)
2348 {
2349  struct threads *threads;
2350  struct rb_node *nd;
2351  struct thread *thread;
2352  int rc = 0;
2353  int i;
2354 
2355  for (i = 0; i < THREADS__TABLE_SIZE; i++) {
2356  threads = &machine->threads[i];
2357  for (nd = rb_first(&threads->entries); nd; nd = rb_next(nd)) {
2358  thread = rb_entry(nd, struct thread, rb_node);
2359  rc = fn(thread, priv);
2360  if (rc != 0)
2361  return rc;
2362  }
2363 
2364  list_for_each_entry(thread, &threads->dead, node) {
2365  rc = fn(thread, priv);
2366  if (rc != 0)
2367  return rc;
2368  }
2369  }
2370  return rc;
2371 }
2372 
2373 int machines__for_each_thread(struct machines *machines,
2374  int (*fn)(struct thread *thread, void *p),
2375  void *priv)
2376 {
2377  struct rb_node *nd;
2378  int rc = 0;
2379 
2380  rc = machine__for_each_thread(&machines->host, fn, priv);
2381  if (rc != 0)
2382  return rc;
2383 
2384  for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
2385  struct machine *machine = rb_entry(nd, struct machine, rb_node);
2386 
2387  rc = machine__for_each_thread(machine, fn, priv);
2388  if (rc != 0)
2389  return rc;
2390  }
2391  return rc;
2392 }
2393 
2394 int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
2395  struct target *target, struct thread_map *threads,
2396  perf_event__handler_t process, bool data_mmap,
2397  unsigned int proc_map_timeout,
2398  unsigned int nr_threads_synthesize)
2399 {
2400  if (target__has_task(target))
2401  return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap, proc_map_timeout);
2402  else if (target__has_cpu(target))
2403  return perf_event__synthesize_threads(tool, process,
2404  machine, data_mmap,
2405  proc_map_timeout,
2406  nr_threads_synthesize);
2407  /* command specified */
2408  return 0;
2409 }
2410 
2411 pid_t machine__get_current_tid(struct machine *machine, int cpu)
2412 {
2413  if (cpu < 0 || cpu >= MAX_NR_CPUS || !machine->current_tid)
2414  return -1;
2415 
2416  return machine->current_tid[cpu];
2417 }
2418 
2419 int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
2420  pid_t tid)
2421 {
2422  struct thread *thread;
2423 
2424  if (cpu < 0)
2425  return -EINVAL;
2426 
2427  if (!machine->current_tid) {
2428  int i;
2429 
2430  machine->current_tid = calloc(MAX_NR_CPUS, sizeof(pid_t));
2431  if (!machine->current_tid)
2432  return -ENOMEM;
2433  for (i = 0; i < MAX_NR_CPUS; i++)
2434  machine->current_tid[i] = -1;
2435  }
2436 
2437  if (cpu >= MAX_NR_CPUS) {
2438  pr_err("Requested CPU %d too large. ", cpu);
2439  pr_err("Consider raising MAX_NR_CPUS\n");
2440  return -EINVAL;
2441  }
2442 
2443  machine->current_tid[cpu] = tid;
2444 
2445  thread = machine__findnew_thread(machine, pid, tid);
2446  if (!thread)
2447  return -ENOMEM;
2448 
2449  thread->cpu = cpu;
2450  thread__put(thread);
2451 
2452  return 0;
2453 }
2454 
2455 /*
2456  * Compares the raw arch string. N.B. see instead perf_env__arch() if a
2457  * normalized arch is needed.
2458  */
2459 bool machine__is(struct machine *machine, const char *arch)
2460 {
2461  return machine && !strcmp(perf_env__raw_arch(machine->env), arch);
2462 }
2463 
2464 int machine__nr_cpus_avail(struct machine *machine)
2465 {
2466  return machine ? perf_env__nr_cpus_avail(machine->env) : 0;
2467 }
2468 
2469 int machine__get_kernel_start(struct machine *machine)
2470 {
2471  struct map *map = machine__kernel_map(machine);
2472  int err = 0;
2473 
2474  /*
2475  * The only addresses above 2^63 are kernel addresses of a 64-bit
2476  * kernel. Note that addresses are unsigned so that on a 32-bit system
2477  * all addresses including kernel addresses are less than 2^32. In
2478  * that case (32-bit system), if the kernel mapping is unknown, all
2479  * addresses will be assumed to be in user space - see
2480  * machine__kernel_ip().
2481  */
2482  machine->kernel_start = 1ULL << 63;
2483  if (map) {
2484  err = map__load(map);
2485  /*
2486  * On x86_64, PTI entry trampolines are less than the
2487  * start of kernel text, but still above 2^63. So leave
2488  * kernel_start = 1ULL << 63 for x86_64.
2489  */
2490  if (!err && !machine__is(machine, "x86_64"))
2491  machine->kernel_start = map->start;
2492  }
2493  return err;
2494 }
2495 
2496 struct dso *machine__findnew_dso(struct machine *machine, const char *filename)
2497 {
2498  return dsos__findnew(&machine->dsos, filename);
2499 }
2500 
2501 char *machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp)
2502 {
2503  struct machine *machine = vmachine;
2504  struct map *map;
2505  struct symbol *sym = machine__find_kernel_symbol(machine, *addrp, &map);
2506 
2507  if (sym == NULL)
2508  return NULL;
2509 
2510  *modp = __map__is_kmodule(map) ? (char *)map->dso->short_name : NULL;
2511  *addrp = map->unmap_ip(map, sym->start);
2512  return sym->name;
2513 }
static struct threads * machine__threads(struct machine *machine, pid_t tid)
Definition: machine.h:62
u64 ino_generation
Definition: event.h:31
struct map * map
Definition: symbol.h:185
struct maps maps
Definition: map.h:64
static bool symbol__match_regex(struct symbol *sym, regex_t *regex)
Definition: machine.c:1766
struct map_groups * mg
Definition: thread.h:23
bool trampolines_mapped
Definition: machine.h:59
void srcline__tree_insert(struct rb_root *tree, u64 addr, char *srcline)
Definition: srcline.c:566
#define KMAP_NAME_LEN
Definition: map.h:50
int machine__process_itrace_start_event(struct machine *machine __maybe_unused, union perf_event *event)
Definition: machine.c:630
u64(* map_ip)(struct map *, u64)
Definition: map.h:41
static struct dso * machine__get_kernel(struct machine *machine)
Definition: machine.c:780
Definition: mem2node.c:7
static void thread__exited(struct thread *thread)
Definition: thread.h:66
void machines__exit(struct machines *machines)
Definition: machine.c:228
static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock)
Definition: machine.c:1633
int machine__process_switch_event(struct machine *machine __maybe_unused, union perf_event *event)
Definition: machine.c:638
int machine__process_mmap2_event(struct machine *machine, union perf_event *event, struct perf_sample *sample)
Definition: machine.c:1527
struct map * map_groups__find_by_name(struct map_groups *mg, const char *name)
Definition: symbol.c:1679
static void dso__set_loaded(struct dso *dso)
Definition: dso.h:209
u8 has_build_id
Definition: dso.h:160
static int add_callchain_ip(struct thread *thread, struct callchain_cursor *cursor, struct symbol **parent, struct addr_location *root_al, u8 *cpumode, u64 ip, bool branch, struct branch_flags *flags, struct iterations *iter, u64 branch_from)
Definition: machine.c:1854
static int machine__set_modules_path(struct machine *machine)
Definition: machine.c:1242
u64 pgoff
Definition: event.h:27
u64 pgoff
Definition: event.h:18
bool comm_exec
Definition: machine.h:43
struct dso * dsos__findnew(struct dsos *dsos, const char *name)
Definition: dso.c:1421
#define PERF_MAX_BRANCH_DEPTH
Definition: machine.c:1962
const char * vmlinux_name
Definition: symbol.h:123
u32 tid
Definition: event.h:15
int(* perf_event__handler_t)(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct machine *machine)
Definition: event.h:664
u64 pgoff
Definition: map.h:34
pid_t pid_
Definition: thread.h:24
struct rb_root inlined_nodes
Definition: dso.h:145
struct kmap * map__kmap(struct map *map)
Definition: map.c:881
char filename[PATH_MAX]
Definition: event.h:34
static void dsos__purge(struct dsos *dsos)
Definition: machine.c:152
u64 data_src
Definition: event.h:203
size_t size
Definition: evsel.c:60
static bool machine__is_host(struct machine *machine)
Definition: machine.h:187
u32 tid
Definition: event.h:53
static char * dir
Definition: attr.c:39
static void skip(int size)
char * get_srcline(struct dso *dso, u64 addr, struct symbol *sym, bool show_sym, bool show_addr, u64 ip)
Definition: srcline.c:554
struct rb_root * root
Definition: dso.h:142
const char * name
Definition: symbol.h:174
struct machine host
Definition: machine.h:136
static void callchain_cursor_reset(struct callchain_cursor *cursor)
Definition: callchain.h:194
static struct version version
unsigned int page_size
Definition: util.c:40
u32 pid
Definition: event.h:52
const char * filename
Definition: hists_common.c:26
u32 tid
Definition: event.h:39
struct symbol * symbol
Definition: srcline.h:30
u64 addr
Definition: event.h:195
void machines__process_guests(struct machines *machines, machine__process_t process, void *data)
Definition: machine.c:337
static u64 identity__map_ip(struct map *map __maybe_unused, u64 ip)
Definition: map.h:96
const char * default_guest_modules
Definition: symbol.h:127
void map_groups__exit(struct map_groups *mg)
Definition: map.c:546
struct rb_node rb_node
Definition: dso.h:141
int machine__process_aux_event(struct machine *machine __maybe_unused, union perf_event *event)
Definition: machine.c:622
int machine__create_kernel_maps(struct machine *machine)
Definition: machine.c:1321
Definition: map.h:52
dictionary data
Definition: stat-cpi.py:4
u32 min
Definition: event.h:29
int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool, struct target *target, struct thread_map *threads, perf_event__handler_t process, bool data_mmap, unsigned int proc_map_timeout, unsigned int nr_threads_synthesize)
Definition: machine.c:2394
struct ip_callchain * callchain
Definition: event.h:211
void machine__remove_thread(struct machine *machine, struct thread *th)
Definition: machine.c:1657
int int err
Definition: 5sec.c:44
size_t machines__fprintf_dsos(struct machines *machines, FILE *fp)
Definition: machine.c:704
size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp)
Definition: event.c:1234
u64 nr
Definition: event.h:157
static void ip__resolve_data(struct thread *thread, u8 m, struct addr_map_symbol *ams, u64 addr, u64 phys_addr)
Definition: machine.c:1796
static void machine__update_thread_pid(struct machine *machine, struct thread *th, pid_t pid)
Definition: machine.c:363
int machine__nr_cpus_avail(struct machine *machine)
Definition: machine.c:2464
static bool target__has_task(struct target *target)
Definition: target.h:52
int machines__create_kernel_maps(struct machines *machines, pid_t pid)
Definition: machine.c:1085
static char * callchain_srcline(struct map *map, struct symbol *sym, u64 ip)
Definition: machine.c:1829
int strlist__add(struct strlist *slist, const char *new_entry)
Definition: strlist.c:64
void machine__exit(struct machine *machine)
Definition: machine.c:193
#define ENTRY_TRAMPOLINE_NAME
Definition: map.h:249
#define DEFAULT_GUEST_KERNEL_ID
Definition: machine.h:22
int machine__process_exit_event(struct machine *machine, union perf_event *event, struct perf_sample *sample __maybe_unused)
Definition: machine.c:1711
struct list_head node
Definition: thread.h:21
int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
Definition: machine.c:65
int machine__for_each_thread(struct machine *machine, int(*fn)(struct thread *thread, void *p), void *priv)
Definition: machine.c:2345
size_t perf_event__fprintf_aux(union perf_event *event, FILE *fp)
Definition: event.c:1424
#define pr_debug2(fmt,...)
Definition: debug.h:33
static bool is_entry_trampoline(const char *name)
Definition: map.h:251
void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated)
Definition: dso.c:1104
static void machine__threads_init(struct machine *machine)
Definition: machine.c:38
struct thread * __machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid)
Definition: machine.c:487
u64 from
Definition: event.h:151
void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size)
Definition: machine.c:348
struct rb_node rb_node
Definition: thread.h:20
struct addr_map_symbol daddr
Definition: symbol.h:202
const char * long_name
Definition: dso.h:173
struct namespaces_event namespaces
Definition: event.h:628
u64 start
Definition: event.h:16
static int resolve_lbr_callchain_sample(struct thread *thread, struct callchain_cursor *cursor, struct perf_sample *sample, struct symbol **parent, struct addr_location *root_al, int max_stack)
Definition: machine.c:2017
struct rb_root entries
Definition: machine.h:32
u64 cycles
Definition: event.h:145
static void dsos__exit(struct dsos *dsos)
Definition: machine.c:168
static int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
Definition: machine.c:979
struct branch_flags flags
Definition: symbol.h:195
u64 ips[0]
Definition: event.h:137
u32 flags
Definition: event.h:33
struct machine * machines__findnew(struct machines *machines, pid_t pid)
Definition: machine.c:303
int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid, pid_t tid)
Definition: machine.c:2419
u64 ip
Definition: unwind.h:16
size_t __dsos__fprintf(struct list_head *head, FILE *fp)
Definition: dso.c:1445
bool use_modules
Definition: symbol.h:93
int dump_printf(const char *fmt,...)
Definition: debug.c:101
int up_write(struct rw_semaphore *sem)
Definition: rwsem.c:29
u64 phys_addr
Definition: event.h:204
int machine__process_event(struct machine *machine, union perf_event *event, struct perf_sample *sample)
Definition: machine.c:1729
u64 ip
Definition: event.h:192
void machine__get_kallsyms_filename(struct machine *machine, char *buf, size_t bufsz)
Definition: machine.c:810
#define pr_err(fmt,...)
Definition: json.h:21
x86 movsq based memset() in arch/x86/lib/memset_64.S") MEMSET_FN(memset_erms
int unwind__get_entries(unwind_entry_cb_t cb, void *arg, struct thread *thread, struct perf_sample *data, int max_stack)
Definition: unwind-libdw.c:191
static int machine__create_module(void *arg, const char *name, u64 start, u64 size)
Definition: machine.c:1263
struct vdso_info * vdso_info
Definition: machine.h:48
void map__put(struct map *map)
Definition: map.c:279
#define min(x, y)
Definition: jevents.h:15
int map__set_kallsyms_ref_reloc_sym(struct map *map, const char *symbol_name, u64 addr)
Definition: session.c:1976
void dso__put(struct dso *dso)
Definition: dso.c:1270
int thread__init_map_groups(struct thread *thread, struct machine *machine)
Definition: thread.c:19
Definition: comm.h:11
u64 start
Definition: map.h:28
void * priv
Definition: thread.h:38
static bool __map__is_kmodule(const struct map *map)
Definition: map.h:242
struct dso * machine__findnew_dso(struct machine *machine, const char *filename)
Definition: machine.c:2496
bool symbol__restricted_filename(const char *filename, const char *restricted_filename)
Definition: symbol.c:872
static int machine__process_extra_kernel_map(struct machine *machine, union perf_event *event)
Definition: machine.c:1397
#define X86_64_ENTRY_TRAMPOLINE
Definition: machine.c:912
struct branch_entry entries[0]
Definition: event.h:158
static int machine__set_mmap_name(struct machine *machine)
Definition: machine.c:52
struct rb_node rb_node
Definition: machine.h:40
int down_read(struct rw_semaphore *sem)
Definition: rwsem.c:14
const char * default_guest_vmlinux_name
Definition: symbol.h:127
bool branch_callstack
Definition: callchain.h:106
u64 id
Definition: event.h:196
struct lost_samples_event lost_samples
Definition: event.h:631
int dso__kernel_module_get_build_id(struct dso *dso, const char *root_dir)
Definition: dso.c:1299
void * malloc(YYSIZE_T)
void map_groups__init(struct map_groups *mg, struct machine *machine)
Definition: map.c:518
struct map * map__new(struct machine *machine, u64 start, u64 len, u64 pgoff, u32 d_maj, u32 d_min, u64 ino, u64 ino_gen, u32 prot, u32 flags, char *filename, struct thread *thread)
Definition: map.c:142
struct machine * machines__find(struct machines *machines, pid_t pid)
Definition: machine.c:277
bool is_kernel_module(const char *pathname, int cpumode)
Definition: dso.c:216
u64 ino
Definition: event.h:30
void machines__set_comm_exec(struct machines *machines, bool comm_exec)
Definition: machine.c:264
struct dso * __dsos__find(struct dsos *dsos, const char *name, bool cmp_short)
Definition: dso.c:1379
struct comm * thread__exec_comm(const struct thread *thread)
Definition: thread.c:182
void thread__find_cpumode_addr_location(struct thread *thread, u64 addr, struct addr_location *al)
Definition: thread.c:368
u32 tid
Definition: event.h:24
u32 maj
Definition: event.h:28
static struct map * machine__kernel_map(struct machine *machine)
Definition: machine.h:72
int arch_skip_callchain_idx(struct thread *thread, struct ip_callchain *chain)
struct thread * machine__find_thread(struct machine *machine, pid_t pid, pid_t tid)
Definition: machine.c:504
struct map * maps__first(struct maps *maps)
Definition: map.c:856
struct map * map
Definition: symbol.h:210
u8 binding
Definition: symbol.h:61
Definition: thread.h:18
int machine__get_kernel_start(struct machine *machine)
Definition: machine.c:2469
int exit_rwsem(struct rw_semaphore *sem)
Definition: rwsem.c:9
const char * name
struct rw_semaphore lock
Definition: dso.h:133
static int append_inlines(struct callchain_cursor *cursor, struct map *map, struct symbol *sym, u64 ip)
Definition: machine.c:2238
#define CHASHSZ
Definition: machine.c:1958
#define pr_debug(fmt,...)
Definition: json.h:27
Definition: tool.h:44
u64 start
Definition: symbol.h:57
static bool target__has_cpu(struct target *target)
Definition: target.h:57
struct dso * machine__findnew_kernel(struct machine *machine, const char *name, const char *short_name, int dso_type)
Definition: dso.c:1024
static int map_groups__set_modules_path_dir(struct map_groups *mg, const char *dir_name, int depth)
Definition: machine.c:1183
Definition: unwind.h:13
Definition: dso.h:264
u64 start
Definition: event.h:25
static bool perf_event__is_extra_kernel_mmap(struct machine *machine, union perf_event *event)
Definition: machine.c:1390
u64 nr
Definition: event.h:136
int perf_event__synthesize_threads(struct perf_tool *tool, perf_event__handler_t process, struct machine *machine, bool mmap_data, unsigned int proc_map_timeout, unsigned int nr_threads_synthesize)
Definition: event.c:767
refcount_t refcnt
Definition: thread.h:28
struct map_groups kmaps
Definition: machine.h:51
static int machine__create_modules(struct machine *machine)
Definition: machine.c:1282
int __weak machine__create_extra_kernel_maps(struct machine *machine __maybe_unused, struct dso *kernel __maybe_unused)
Definition: machine.c:972
struct map * map__next(struct map *map)
Definition: map.c:865
struct threads threads[THREADS__TABLE_SIZE]
Definition: machine.h:47
static int entry(u64 ip, struct unwind_info *ui)
Definition: unwind-libdw.c:71
int cpu
Definition: thread.h:27
#define map__zput(map)
Definition: map.h:167
struct dso * dso
Definition: map.h:45
size_t perf_event__fprintf_namespaces(union perf_event *event, FILE *fp)
Definition: event.c:1246
size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp)
Definition: event.c:1338
struct regs_dump user_regs
Definition: event.h:213
size_t machine__fprintf_dsos_buildid(struct machine *m, FILE *fp, bool(skip)(struct dso *dso, int parm), int parm)
Definition: machine.c:717
u64 len
Definition: event.h:17
int machine__map_x86_64_entry_trampolines(struct machine *machine, struct dso *kernel)
Definition: machine.c:915
static void dso__adjust_kmod_long_name(struct dso *dso, const char *filename)
Definition: machine.c:646
static struct perf_tool tool
Definition: builtin-diff.c:362
void inlines__tree_insert(struct rb_root *tree, struct inline_node *inlines)
Definition: srcline.c:654
static int unwind_entry(struct unwind_entry *entry, void *arg)
Definition: machine.c:2271
#define PATH_MAX
Definition: jevents.c:1042
bool comp
Definition: dso.h:267
pid_t tid
Definition: thread.h:25
bool kptr_restrict_warned
Definition: machine.h:44
struct symbol * sym
Definition: unwind.h:15
struct dso * __dsos__addnew(struct dsos *dsos, const char *name)
Definition: dso.c:1401
char * srcline__tree_find(struct rb_root *tree, u64 addr)
Definition: srcline.c:593
char name[0]
Definition: symbol.h:66
struct dso * dso__get(struct dso *dso)
Definition: dso.c:1263
bool hide_unresolved
Definition: symbol.h:93
void machine__destroy_kernel_maps(struct machine *machine)
Definition: machine.c:1003
unsigned int nr
Definition: machine.h:34
int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp)
Definition: thread.c:352
union perf_mem_data_src data_src
Definition: symbol.h:203
u64 kernel_start
Definition: machine.h:53
char name[KMAP_NAME_LEN]
Definition: machine.h:283
struct map * map
Definition: unwind.h:14
static void machine__set_kernel_mmap(struct machine *machine, u64 start, u64 end)
Definition: machine.c:1308
void machine__exit_vdso(struct machine *machine)
Definition: vdso.c:105
int __dso__load_kallsyms(struct dso *dso, const char *filename, struct map *map, bool no_kcore)
Definition: symbol.c:1311
struct lost_event lost
Definition: event.h:630
static int thread__resolve_callchain_unwind(struct thread *thread, struct callchain_cursor *cursor, struct perf_evsel *evsel, struct perf_sample *sample, int max_stack)
Definition: machine.c:2288
struct symbol * sym
Definition: symbol.h:211
Definition: map.h:58
void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated)
Definition: dso.c:1132
struct list_head node
Definition: dso.h:140
int machines__create_guest_kernel_maps(struct machines *machines)
Definition: machine.c:1021
static bool dso__is_kcore(struct dso *dso)
Definition: dso.h:365
Definition: dso.h:138
int parent
Definition: hist.h:275
#define event
#define X86_64_CPU_ENTRY_AREA_PER_CPU
Definition: machine.c:910
struct fork_event fork
Definition: event.h:629
int callchain_cursor_append(struct callchain_cursor *cursor, u64 ip, struct map *map, struct symbol *sym, bool branch, struct branch_flags *flags, int nr_loop_iter, u64 iter_cycles, u64 branch_from, const char *srcline)
Definition: callchain.c:1038
static void dsos__init(struct dsos *dsos)
Definition: machine.c:31
struct branch_stack * branch_stack
Definition: event.h:212
void map_groups__fixup_end(struct map_groups *mg)
Definition: symbol.c:223
static int machine__process_kernel_mmap_event(struct machine *machine, union perf_event *event)
Definition: machine.c:1416
enum dso_kernel_type kernel
Definition: dso.h:154
Definition: map.h:63
u32 ptid
Definition: event.h:53
struct map_groups * kmaps
Definition: map.h:54
struct ref_reloc_sym * ref_reloc_sym
Definition: map.h:53
static char * get_kernel_version(const char *root_dir)
Definition: machine.c:1124
u64 map__rip_2objdump(struct map *map, u64 rip)
Definition: map.c:450
struct thread * thread
Definition: symbol.h:209
bool inline_name
Definition: symbol.h:93
size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp, bool(skip)(struct dso *dso, int parm), int parm)
Definition: machine.c:723
int thread__set_namespaces(struct thread *thread, u64 timestamp, struct namespaces_event *event)
Definition: thread.c:163
const char * ref_reloc_sym_names[]
Definition: machine.c:819
static void ip__resolve_ams(struct thread *thread, struct addr_map_symbol *ams, u64 ip)
Definition: machine.c:1773
int dso__load(struct dso *dso, struct map *map)
Definition: symbol.c:1487
char name[KMAP_NAME_LEN]
Definition: map.h:55
void machines__destroy_kernel_maps(struct machines *machines)
Definition: machine.c:1070
#define zfree(ptr)
Definition: util.h:25
static int thread__resolve_callchain_sample(struct thread *thread, struct callchain_cursor *cursor, struct perf_evsel *evsel, struct perf_sample *sample, struct symbol **parent, struct addr_location *root_al, int max_stack)
Definition: machine.c:2103
u32 pid
Definition: event.h:24
static void map_groups__remove(struct map_groups *mg, struct map *map)
Definition: map.h:206
static struct thread_data threads[THREADS]
u16 id_hdr_size
Definition: machine.h:42
struct dsos dsos
Definition: machine.h:50
pid_t * current_tid
Definition: machine.h:54
static struct map * map__get(struct map *map)
Definition: map.h:152
void(* machine__process_t)(struct machine *machine, void *data)
Definition: machine.h:133
static struct map * map_groups__find(struct map_groups *mg, u64 addr)
Definition: map.h:211
struct perf_event_header header
Definition: event.h:624
Definition: event.h:150
u32 pid
Definition: hists_common.c:15
static int thread__set_comm(struct thread *thread, const char *comm, u64 timestamp)
Definition: thread.h:77
struct map_groups * map_groups__new(struct machine *machine)
Definition: map.c:556
int init_rwsem(struct rw_semaphore *sem)
Definition: rwsem.c:4
u64 start
Definition: hists_common.c:25
struct list_head head
Definition: dso.h:131
struct inline_node * dso__parse_addr_inlines(struct dso *dso, u64 addr, struct symbol *sym)
Definition: srcline.c:626
u32 prot
Definition: event.h:32
struct machine * machines__add(struct machines *machines, pid_t pid, const char *root_dir)
Definition: machine.c:234
u64 cycles
Definition: machine.c:1851
void thread__put(struct thread *thread)
Definition: thread.c:119
struct mem_info * mem_info__new(void)
Definition: symbol.c:2241
int machine__load_vmlinux_path(struct machine *machine)
Definition: machine.c:1113
char * name
Definition: dso.h:265
Definition: jevents.c:228
int machine__load_kallsyms(struct machine *machine, const char *filename)
Definition: machine.c:1095
static int machine__get_running_kernel_start(struct machine *machine, const char **symbol_name, u64 *start)
Definition: machine.c:825
Definition: dso.h:130
#define HOST_KERNEL_ID
Definition: machine.h:21
void map_groups__put(struct map_groups *mg)
Definition: map.c:572
regex_t ignore_callees_regex
Definition: sort.c:29
void machines__init(struct machines *machines)
Definition: machine.c:222
struct rb_root srclines
Definition: dso.h:146
int machine__create_extra_kernel_map(struct machine *machine, struct dso *kernel, struct extra_kernel_map *xm)
Definition: machine.c:854
struct kmap * __map__kmap(struct map *map)
Definition: map.c:874
enum chain_order order
Definition: callchain.h:103
int nr_loop_iter
Definition: machine.c:1850
static int sym(yyscan_t scanner, int type, int config)
void machine__delete(struct machine *machine)
Definition: machine.c:214
static struct dso * machine__findnew_module_dso(struct machine *machine, struct kmod_path *m, const char *filename)
Definition: machine.c:598
struct symbol * sym
Definition: symbol.h:186
const char * default_guest_kallsyms
Definition: symbol.h:127
struct comm * machine__thread_exec_comm(struct machine *machine, struct thread *thread)
Definition: machine.c:516
u64 time
Definition: event.h:194
u32 flags
static struct map_groups * map_groups__get(struct map_groups *mg)
Definition: map.h:73
int perf_event__synthesize_thread_map(struct perf_tool *tool, struct thread_map *threads, perf_event__handler_t process, struct machine *machine, bool mmap_data, unsigned int proc_map_timeout)
Definition: event.c:603
struct machine * machine__new_host(void)
Definition: machine.c:118
struct thread * thread__new(pid_t pid, pid_t tid)
Definition: thread.c:36
struct symbol * thread__find_symbol(struct thread *thread, u8 cpumode, u64 addr, struct addr_location *al)
Definition: event.c:1588
Definition: annotate.c:60
int perf_env__nr_cpus_avail(struct perf_env *env)
Definition: env.c:122
char * root_dir
Definition: machine.h:45
struct list_head list
Definition: srcline.h:32
u64 nr_namespaces
Definition: event.h:46
#define kmod_path__parse_name(__m, __p)
Definition: dso.h:275
enum dso_binary_type symtab_type
Definition: dso.h:156
#define X86_64_CPU_ENTRY_AREA_SIZE
Definition: machine.c:911
bool dump_trace
Definition: debug.c:27
u32 pid
Definition: event.h:15
regex_t parent_regex
Definition: sort.c:18
void dso__set_module_info(struct dso *dso, struct kmod_path *m, struct machine *machine)
Definition: dso.c:407
static struct symbol * machine__find_kernel_symbol(struct machine *machine, u64 addr, struct map **mapp)
Definition: machine.h:203
#define NO_ENTRY
Definition: machine.c:1960
struct comm_event comm
Definition: event.h:627
size_t perf_event__fprintf_task(union perf_event *event, FILE *fp)
Definition: event.c:1401
void free(void *)
int arch__fix_module_text_start(u64 *start, const char *name)
Definition: machine.c:9
struct mmap2_event mmap2
Definition: event.h:626
static bool machine__uses_kcore(struct machine *machine)
Definition: machine.c:1378
struct addr_map_symbol to
Definition: symbol.h:194
char * mmap_name
Definition: machine.h:46
#define THREADS__TABLE_SIZE
Definition: machine.h:29
struct map * vmlinux_map
Definition: machine.h:52
struct addr_map_symbol from
Definition: symbol.h:193
int kallsyms__get_function_start(const char *kallsyms_filename, const char *symbol_name, u64 *addr)
Definition: event.c:879
void dso__read_running_kernel_build_id(struct dso *dso, struct machine *machine)
Definition: dso.c:1287
pid_t pid
Definition: machine.h:41
char comm[16]
Definition: event.h:40
static int map_groups__set_module_path(struct map_groups *mg, const char *path, struct kmod_path *m)
Definition: machine.c:1157
int machine__process_lost_event(struct machine *machine __maybe_unused, union perf_event *event, struct perf_sample *sample __maybe_unused)
Definition: machine.c:582
const char * perf_env__raw_arch(struct perf_env *env)
Definition: env.c:117
int modules__parse(const char *filename, void *arg, int(*process_module)(void *arg, const char *name, u64 start, u64 size))
Definition: symbol.c:548
size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp)
Definition: event.c:1440
struct addr_map_symbol iaddr
Definition: symbol.h:201
#define MAX_NR_CPUS
Definition: perf.h:27
int dso__load_vmlinux_path(struct dso *dso, struct map *map)
Definition: symbol.c:1735
int machines__for_each_thread(struct machines *machines, int(*fn)(struct thread *thread, void *p), void *priv)
Definition: machine.c:2373
struct mem_info * sample__resolve_mem(struct perf_sample *sample, struct addr_location *al)
Definition: machine.c:1813
pid_t machine__get_current_tid(struct machine *machine, int cpu)
Definition: machine.c:2411
static struct thread * ____machine__findnew_thread(struct machine *machine, struct threads *threads, pid_t pid, pid_t tid, bool create)
Definition: machine.c:414
struct thread * last_match
Definition: machine.h:36
size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp)
Definition: machine.c:736
struct rb_root guests
Definition: machine.h:137
static int remove_loops(struct branch_entry *l, int nr, struct iterations *iter)
Definition: machine.c:1965
int thread__insert_map(struct thread *thread, struct map *map)
Definition: thread.c:288
dso_kernel_type
Definition: dso.h:40
int down_write(struct rw_semaphore *sem)
Definition: rwsem.c:24
struct branch_info * sample__resolve_bstack(struct perf_sample *sample, struct addr_location *al)
Definition: machine.c:1928
u64(* unmap_ip)(struct map *, u64)
Definition: map.h:43
Definition: symbol.h:55
int thread__resolve_callchain(struct thread *thread, struct callchain_cursor *cursor, struct perf_evsel *evsel, struct perf_sample *sample, struct symbol **parent, struct addr_location *root_al, int max_stack)
Definition: machine.c:2308
u64 id
Definition: event.h:59
size_t thread__fprintf(struct thread *thread, FILE *fp)
Definition: thread.c:282
char * dso__build_id_filename(const struct dso *dso, char *bf, size_t size, bool is_debug)
Definition: build-id.c:252
char ** vmlinux_path
Definition: symbol.c:38
static void save_iterations(struct iterations *iter, struct branch_entry *be, int nr)
Definition: machine.c:1946
struct list_head dead
Definition: machine.h:35
struct comm * thread__comm(const struct thread *thread)
Definition: thread.c:174
u64 * regs
Definition: event.h:105
char * machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp)
Definition: machine.c:2501
struct thread * thread__get(struct thread *thread)
Definition: thread.c:112
struct fake_sym * syms
Definition: hists_common.c:74
#define pr_warning(fmt,...)
Definition: debug.h:25
int have_ignore_callees
Definition: sort.c:30
size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp, bool(skip)(struct dso *dso, int parm), int parm)
Definition: dso.c:1430
size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp)
Definition: event.c:1329
#define isdigit(x)
Definition: sane_ctype.h:34
struct symbol * dso__next_symbol(struct symbol *sym)
Definition: symbol.c:516
u32 prot
Definition: map.h:32
int machine__process_comm_event(struct machine *machine, union perf_event *event, struct perf_sample *sample)
Definition: machine.c:525
static bool is_kmod_dso(struct dso *dso)
Definition: machine.c:1151
int vmlinux_path__nr_entries
Definition: symbol.c:37
u64 lost
Definition: event.h:60
char filename[PATH_MAX]
Definition: event.h:19
u32 ppid
Definition: event.h:52
static u64 find_entry_trampoline(struct dso *dso)
Definition: machine.c:883
static void map_groups__insert(struct map_groups *mg, struct map *map)
Definition: map.h:200
char * srcline
Definition: srcline.h:31
int map__load(struct map *map)
Definition: map.c:307
struct mmap_event mmap
Definition: event.h:625
const char * srcline
Definition: symbol.h:212
size_t perf_event__fprintf_itrace_start(union perf_event *event, FILE *fp)
Definition: event.c:1434
int up_read(struct rw_semaphore *sem)
Definition: rwsem.c:19
void machine__delete_threads(struct machine *machine)
Definition: machine.c:174
bool map_groups__empty(struct map_groups *mg)
Definition: map.c:551
u64 size
Definition: event.h:114
#define CHASHBITS
Definition: machine.c:1959
u8 cpumode
Definition: event.h:207
u64 end
Definition: map.h:29
const char * guestmount
Definition: symbol.h:130
struct stack_dump user_stack
Definition: event.h:215
static bool strlist__has_entry(struct strlist *slist, const char *entry)
Definition: strlist.h:42
u64 len
Definition: event.h:26
u32 pid
Definition: event.h:39
struct strlist * strlist__new(const char *list, const struct strlist_config *config)
Definition: strlist.c:160
struct symbol * dso__first_symbol(struct dso *dso)
Definition: symbol.c:506
u64 to
Definition: event.h:152
const char * short_name
Definition: dso.h:172
size_t machine__fprintf(struct machine *machine, FILE *fp)
Definition: machine.c:756
struct thread * machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid)
Definition: machine.c:492
struct map * machine__findnew_module_map(struct machine *machine, u64 start, const char *filename)
Definition: machine.c:664
struct list_head val
Definition: srcline.h:37
struct rw_semaphore lock
Definition: machine.h:33
struct perf_event_attr attr
Definition: evsel.h:93
struct rb_root root
Definition: dso.h:132
Definition: target.h:8
static bool perf_evsel__has_branch_callstack(const struct perf_evsel *evsel)
Definition: evsel.h:457
struct branch_flags flags
Definition: event.h:153
int __thread__set_comm(struct thread *thread, const char *str, u64 timestamp, bool exec)
Definition: thread.c:220
int machine__process_fork_event(struct machine *machine, union perf_event *event, struct perf_sample *sample)
Definition: machine.c:1662
enum chain_key key
Definition: callchain.h:105
struct perf_env * env
Definition: machine.h:49
int machine__process_namespaces_event(struct machine *machine __maybe_unused, union perf_event *event, struct perf_sample *sample __maybe_unused)
Definition: machine.c:551
struct map * map__new2(u64 start, struct dso *dso)
Definition: map.c:227
bool machine__is(struct machine *machine, const char *arch)
Definition: machine.c:2459
struct machine * machine__new_kallsyms(void)
Definition: machine.c:135
struct inline_node * inlines__tree_find(struct rb_root *tree, u64 addr)
Definition: srcline.c:673
int machine__process_lost_samples_event(struct machine *machine __maybe_unused, union perf_event *event, struct perf_sample *sample)
Definition: machine.c:590
static bool machine__is_default_guest(struct machine *machine)
Definition: machine.h:182
int machine__process_mmap_event(struct machine *machine, union perf_event *event, struct perf_sample *sample)
Definition: machine.c:1580
bool kmod
Definition: dso.h:268