Linux Perf
evlist.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3  *
4  * Parts came from builtin-{top,stat,record}.c, see those files for further
5  * copyright notes.
6  *
7  * Released under the GPL v2. (and only v2, not any later version)
8  */
9 #include "util.h"
10 #include <api/fs/fs.h>
11 #include <errno.h>
12 #include <inttypes.h>
13 #include <poll.h>
14 #include "cpumap.h"
15 #include "thread_map.h"
16 #include "target.h"
17 #include "evlist.h"
18 #include "evsel.h"
19 #include "debug.h"
20 #include "units.h"
21 #include "asm/bug.h"
22 #include <signal.h>
23 #include <unistd.h>
24 
25 #include "parse-events.h"
26 #include <subcmd/parse-options.h>
27 
28 #include <fcntl.h>
29 #include <sys/ioctl.h>
30 #include <sys/mman.h>
31 
32 #include <linux/bitops.h>
33 #include <linux/hash.h>
34 #include <linux/log2.h>
35 #include <linux/err.h>
36 
37 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
38 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
39 
40 void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
41  struct thread_map *threads)
42 {
43  int i;
44 
45  for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
46  INIT_HLIST_HEAD(&evlist->heads[i]);
47  INIT_LIST_HEAD(&evlist->entries);
48  perf_evlist__set_maps(evlist, cpus, threads);
49  fdarray__init(&evlist->pollfd, 64);
50  evlist->workload.pid = -1;
52 }
53 
55 {
56  struct perf_evlist *evlist = zalloc(sizeof(*evlist));
57 
58  if (evlist != NULL)
59  perf_evlist__init(evlist, NULL, NULL);
60 
61  return evlist;
62 }
63 
65 {
66  struct perf_evlist *evlist = perf_evlist__new();
67 
68  if (evlist && perf_evlist__add_default(evlist)) {
69  perf_evlist__delete(evlist);
70  evlist = NULL;
71  }
72 
73  return evlist;
74 }
75 
77 {
78  struct perf_evlist *evlist = perf_evlist__new();
79 
80  if (evlist && perf_evlist__add_dummy(evlist)) {
81  perf_evlist__delete(evlist);
82  evlist = NULL;
83  }
84 
85  return evlist;
86 }
87 
96 {
97  struct perf_evsel *first = perf_evlist__first(evlist);
98 
99  evlist->id_pos = first->id_pos;
100  evlist->is_pos = first->is_pos;
101 }
102 
104 {
105  struct perf_evsel *evsel;
106 
107  evlist__for_each_entry(evlist, evsel)
109 
110  perf_evlist__set_id_pos(evlist);
111 }
112 
114 {
115  struct perf_evsel *pos, *n;
116 
117  evlist__for_each_entry_safe(evlist, n, pos) {
118  list_del_init(&pos->node);
119  pos->evlist = NULL;
120  perf_evsel__delete(pos);
121  }
122 
123  evlist->nr_entries = 0;
124 }
125 
127 {
128  zfree(&evlist->mmap);
129  zfree(&evlist->overwrite_mmap);
130  fdarray__exit(&evlist->pollfd);
131 }
132 
134 {
135  if (evlist == NULL)
136  return;
137 
138  perf_evlist__munmap(evlist);
139  perf_evlist__close(evlist);
140  cpu_map__put(evlist->cpus);
141  thread_map__put(evlist->threads);
142  evlist->cpus = NULL;
143  evlist->threads = NULL;
144  perf_evlist__purge(evlist);
145  perf_evlist__exit(evlist);
146  free(evlist);
147 }
148 
150  struct perf_evsel *evsel)
151 {
152  /*
153  * We already have cpus for evsel (via PMU sysfs) so
154  * keep it, if there's no target cpu list defined.
155  */
156  if (!evsel->own_cpus || evlist->has_user_cpus) {
157  cpu_map__put(evsel->cpus);
158  evsel->cpus = cpu_map__get(evlist->cpus);
159  } else if (evsel->cpus != evsel->own_cpus) {
160  cpu_map__put(evsel->cpus);
161  evsel->cpus = cpu_map__get(evsel->own_cpus);
162  }
163 
164  thread_map__put(evsel->threads);
165  evsel->threads = thread_map__get(evlist->threads);
166 }
167 
169 {
170  struct perf_evsel *evsel;
171 
172  evlist__for_each_entry(evlist, evsel)
173  __perf_evlist__propagate_maps(evlist, evsel);
174 }
175 
177 {
178  entry->evlist = evlist;
179  list_add_tail(&entry->node, &evlist->entries);
180  entry->idx = evlist->nr_entries;
181  entry->tracking = !entry->idx;
182 
183  if (!evlist->nr_entries++)
184  perf_evlist__set_id_pos(evlist);
185 
186  __perf_evlist__propagate_maps(evlist, entry);
187 }
188 
189 void perf_evlist__remove(struct perf_evlist *evlist, struct perf_evsel *evsel)
190 {
191  evsel->evlist = NULL;
192  list_del_init(&evsel->node);
193  evlist->nr_entries -= 1;
194 }
195 
197  struct list_head *list)
198 {
199  struct perf_evsel *evsel, *temp;
200 
201  __evlist__for_each_entry_safe(list, temp, evsel) {
202  list_del_init(&evsel->node);
203  perf_evlist__add(evlist, evsel);
204  }
205 }
206 
207 void __perf_evlist__set_leader(struct list_head *list)
208 {
209  struct perf_evsel *evsel, *leader;
210 
211  leader = list_entry(list->next, struct perf_evsel, node);
212  evsel = list_entry(list->prev, struct perf_evsel, node);
213 
214  leader->nr_members = evsel->idx - leader->idx + 1;
215 
216  __evlist__for_each_entry(list, evsel) {
217  evsel->leader = leader;
218  }
219 }
220 
222 {
223  if (evlist->nr_entries) {
224  evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0;
226  }
227 }
228 
229 void perf_event_attr__set_max_precise_ip(struct perf_event_attr *attr)
230 {
231  attr->precise_ip = 3;
232 
233  while (attr->precise_ip != 0) {
234  int fd = sys_perf_event_open(attr, 0, -1, -1, 0);
235  if (fd != -1) {
236  close(fd);
237  break;
238  }
239  --attr->precise_ip;
240  }
241 }
242 
244 {
245  struct perf_evsel *evsel = perf_evsel__new_cycles(precise);
246 
247  if (evsel == NULL)
248  return -ENOMEM;
249 
250  perf_evlist__add(evlist, evsel);
251  return 0;
252 }
253 
255 {
256  struct perf_event_attr attr = {
257  .type = PERF_TYPE_SOFTWARE,
258  .config = PERF_COUNT_SW_DUMMY,
259  .size = sizeof(attr), /* to capture ABI version */
260  };
261  struct perf_evsel *evsel = perf_evsel__new_idx(&attr, evlist->nr_entries);
262 
263  if (evsel == NULL)
264  return -ENOMEM;
265 
266  perf_evlist__add(evlist, evsel);
267  return 0;
268 }
269 
271  struct perf_event_attr *attrs, size_t nr_attrs)
272 {
273  struct perf_evsel *evsel, *n;
274  LIST_HEAD(head);
275  size_t i;
276 
277  for (i = 0; i < nr_attrs; i++) {
278  evsel = perf_evsel__new_idx(attrs + i, evlist->nr_entries + i);
279  if (evsel == NULL)
280  goto out_delete_partial_list;
281  list_add_tail(&evsel->node, &head);
282  }
283 
284  perf_evlist__splice_list_tail(evlist, &head);
285 
286  return 0;
287 
288 out_delete_partial_list:
289  __evlist__for_each_entry_safe(&head, n, evsel)
290  perf_evsel__delete(evsel);
291  return -1;
292 }
293 
295  struct perf_event_attr *attrs, size_t nr_attrs)
296 {
297  size_t i;
298 
299  for (i = 0; i < nr_attrs; i++)
300  event_attr_init(attrs + i);
301 
302  return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
303 }
304 
305 struct perf_evsel *
307 {
308  struct perf_evsel *evsel;
309 
310  evlist__for_each_entry(evlist, evsel) {
311  if (evsel->attr.type == PERF_TYPE_TRACEPOINT &&
312  (int)evsel->attr.config == id)
313  return evsel;
314  }
315 
316  return NULL;
317 }
318 
319 struct perf_evsel *
321  const char *name)
322 {
323  struct perf_evsel *evsel;
324 
325  evlist__for_each_entry(evlist, evsel) {
326  if ((evsel->attr.type == PERF_TYPE_TRACEPOINT) &&
327  (strcmp(evsel->name, name) == 0))
328  return evsel;
329  }
330 
331  return NULL;
332 }
333 
335  const char *sys, const char *name, void *handler)
336 {
337  struct perf_evsel *evsel = perf_evsel__newtp(sys, name);
338 
339  if (IS_ERR(evsel))
340  return -1;
341 
342  evsel->handler = handler;
343  perf_evlist__add(evlist, evsel);
344  return 0;
345 }
346 
348  struct perf_evsel *evsel)
349 {
350  if (evsel->system_wide)
351  return 1;
352  else
353  return thread_map__nr(evlist->threads);
354 }
355 
357 {
358  struct perf_evsel *pos;
359 
360  evlist__for_each_entry(evlist, pos) {
361  if (!perf_evsel__is_group_leader(pos) || !pos->fd)
362  continue;
363  perf_evsel__disable(pos);
364  }
365 
366  evlist->enabled = false;
367 }
368 
370 {
371  struct perf_evsel *pos;
372 
373  evlist__for_each_entry(evlist, pos) {
374  if (!perf_evsel__is_group_leader(pos) || !pos->fd)
375  continue;
376  perf_evsel__enable(pos);
377  }
378 
379  evlist->enabled = true;
380 }
381 
383 {
384  (evlist->enabled ? perf_evlist__disable : perf_evlist__enable)(evlist);
385 }
386 
388  struct perf_evsel *evsel, int cpu)
389 {
390  int thread;
391  int nr_threads = perf_evlist__nr_threads(evlist, evsel);
392 
393  if (!evsel->fd)
394  return -EINVAL;
395 
396  for (thread = 0; thread < nr_threads; thread++) {
397  int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
398  if (err)
399  return err;
400  }
401  return 0;
402 }
403 
405  struct perf_evsel *evsel,
406  int thread)
407 {
408  int cpu;
409  int nr_cpus = cpu_map__nr(evlist->cpus);
410 
411  if (!evsel->fd)
412  return -EINVAL;
413 
414  for (cpu = 0; cpu < nr_cpus; cpu++) {
415  int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
416  if (err)
417  return err;
418  }
419  return 0;
420 }
421 
423  struct perf_evsel *evsel, int idx)
424 {
425  bool per_cpu_mmaps = !cpu_map__empty(evlist->cpus);
426 
427  if (per_cpu_mmaps)
428  return perf_evlist__enable_event_cpu(evlist, evsel, idx);
429  else
430  return perf_evlist__enable_event_thread(evlist, evsel, idx);
431 }
432 
434 {
435  int nr_cpus = cpu_map__nr(evlist->cpus);
436  int nr_threads = thread_map__nr(evlist->threads);
437  int nfds = 0;
438  struct perf_evsel *evsel;
439 
440  evlist__for_each_entry(evlist, evsel) {
441  if (evsel->system_wide)
442  nfds += nr_cpus;
443  else
444  nfds += nr_cpus * nr_threads;
445  }
446 
447  if (fdarray__available_entries(&evlist->pollfd) < nfds &&
448  fdarray__grow(&evlist->pollfd, nfds) < 0)
449  return -ENOMEM;
450 
451  return 0;
452 }
453 
455  struct perf_mmap *map, short revent)
456 {
457  int pos = fdarray__add(&evlist->pollfd, fd, revent | POLLERR | POLLHUP);
458  /*
459  * Save the idx so that when we filter out fds POLLHUP'ed we can
460  * close the associated evlist->mmap[] entry.
461  */
462  if (pos >= 0) {
463  evlist->pollfd.priv[pos].ptr = map;
464 
465  fcntl(fd, F_SETFL, O_NONBLOCK);
466  }
467 
468  return pos;
469 }
470 
472 {
473  return __perf_evlist__add_pollfd(evlist, fd, NULL, POLLIN);
474 }
475 
476 static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd,
477  void *arg __maybe_unused)
478 {
479  struct perf_mmap *map = fda->priv[fd].ptr;
480 
481  if (map)
482  perf_mmap__put(map);
483 }
484 
485 int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask)
486 {
487  return fdarray__filter(&evlist->pollfd, revents_and_mask,
489 }
490 
491 int perf_evlist__poll(struct perf_evlist *evlist, int timeout)
492 {
493  return fdarray__poll(&evlist->pollfd, timeout);
494 }
495 
496 static void perf_evlist__id_hash(struct perf_evlist *evlist,
497  struct perf_evsel *evsel,
498  int cpu, int thread, u64 id)
499 {
500  int hash;
501  struct perf_sample_id *sid = SID(evsel, cpu, thread);
502 
503  sid->id = id;
504  sid->evsel = evsel;
505  hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
506  hlist_add_head(&sid->node, &evlist->heads[hash]);
507 }
508 
509 void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
510  int cpu, int thread, u64 id)
511 {
512  perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
513  evsel->id[evsel->ids++] = id;
514 }
515 
517  struct perf_evsel *evsel,
518  int cpu, int thread, int fd)
519 {
520  u64 read_data[4] = { 0, };
521  int id_idx = 1; /* The first entry is the counter value */
522  u64 id;
523  int ret;
524 
525  ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
526  if (!ret)
527  goto add;
528 
529  if (errno != ENOTTY)
530  return -1;
531 
532  /* Legacy way to get event id.. All hail to old kernels! */
533 
534  /*
535  * This way does not work with group format read, so bail
536  * out in that case.
537  */
538  if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
539  return -1;
540 
541  if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
542  read(fd, &read_data, sizeof(read_data)) == -1)
543  return -1;
544 
545  if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
546  ++id_idx;
547  if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
548  ++id_idx;
549 
550  id = read_data[id_idx];
551 
552  add:
553  perf_evlist__id_add(evlist, evsel, cpu, thread, id);
554  return 0;
555 }
556 
557 static void perf_evlist__set_sid_idx(struct perf_evlist *evlist,
558  struct perf_evsel *evsel, int idx, int cpu,
559  int thread)
560 {
561  struct perf_sample_id *sid = SID(evsel, cpu, thread);
562  sid->idx = idx;
563  if (evlist->cpus && cpu >= 0)
564  sid->cpu = evlist->cpus->map[cpu];
565  else
566  sid->cpu = -1;
567  if (!evsel->system_wide && evlist->threads && thread >= 0)
568  sid->tid = thread_map__pid(evlist->threads, thread);
569  else
570  sid->tid = -1;
571 }
572 
573 struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id)
574 {
575  struct hlist_head *head;
576  struct perf_sample_id *sid;
577  int hash;
578 
579  hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
580  head = &evlist->heads[hash];
581 
582  hlist_for_each_entry(sid, head, node)
583  if (sid->id == id)
584  return sid;
585 
586  return NULL;
587 }
588 
590 {
591  struct perf_sample_id *sid;
592 
593  if (evlist->nr_entries == 1 || !id)
594  return perf_evlist__first(evlist);
595 
596  sid = perf_evlist__id2sid(evlist, id);
597  if (sid)
598  return sid->evsel;
599 
600  if (!perf_evlist__sample_id_all(evlist))
601  return perf_evlist__first(evlist);
602 
603  return NULL;
604 }
605 
607  u64 id)
608 {
609  struct perf_sample_id *sid;
610 
611  if (!id)
612  return NULL;
613 
614  sid = perf_evlist__id2sid(evlist, id);
615  if (sid)
616  return sid->evsel;
617 
618  return NULL;
619 }
620 
621 static int perf_evlist__event2id(struct perf_evlist *evlist,
622  union perf_event *event, u64 *id)
623 {
624  const u64 *array = event->sample.array;
625  ssize_t n;
626 
627  n = (event->header.size - sizeof(event->header)) >> 3;
628 
629  if (event->header.type == PERF_RECORD_SAMPLE) {
630  if (evlist->id_pos >= n)
631  return -1;
632  *id = array[evlist->id_pos];
633  } else {
634  if (evlist->is_pos > n)
635  return -1;
636  n -= evlist->is_pos;
637  *id = array[n];
638  }
639  return 0;
640 }
641 
643  union perf_event *event)
644 {
645  struct perf_evsel *first = perf_evlist__first(evlist);
646  struct hlist_head *head;
647  struct perf_sample_id *sid;
648  int hash;
649  u64 id;
650 
651  if (evlist->nr_entries == 1)
652  return first;
653 
654  if (!first->attr.sample_id_all &&
655  event->header.type != PERF_RECORD_SAMPLE)
656  return first;
657 
658  if (perf_evlist__event2id(evlist, event, &id))
659  return NULL;
660 
661  /* Synthesized events have an id of zero */
662  if (!id)
663  return first;
664 
665  hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
666  head = &evlist->heads[hash];
667 
668  hlist_for_each_entry(sid, head, node) {
669  if (sid->id == id)
670  return sid->evsel;
671  }
672  return NULL;
673 }
674 
675 static int perf_evlist__set_paused(struct perf_evlist *evlist, bool value)
676 {
677  int i;
678 
679  if (!evlist->overwrite_mmap)
680  return 0;
681 
682  for (i = 0; i < evlist->nr_mmaps; i++) {
683  int fd = evlist->overwrite_mmap[i].fd;
684  int err;
685 
686  if (fd < 0)
687  continue;
688  err = ioctl(fd, PERF_EVENT_IOC_PAUSE_OUTPUT, value ? 1 : 0);
689  if (err)
690  return err;
691  }
692  return 0;
693 }
694 
695 static int perf_evlist__pause(struct perf_evlist *evlist)
696 {
697  return perf_evlist__set_paused(evlist, true);
698 }
699 
700 static int perf_evlist__resume(struct perf_evlist *evlist)
701 {
702  return perf_evlist__set_paused(evlist, false);
703 }
704 
705 static void perf_evlist__munmap_nofree(struct perf_evlist *evlist)
706 {
707  int i;
708 
709  if (evlist->mmap)
710  for (i = 0; i < evlist->nr_mmaps; i++)
711  perf_mmap__munmap(&evlist->mmap[i]);
712 
713  if (evlist->overwrite_mmap)
714  for (i = 0; i < evlist->nr_mmaps; i++)
715  perf_mmap__munmap(&evlist->overwrite_mmap[i]);
716 }
717 
718 void perf_evlist__munmap(struct perf_evlist *evlist)
719 {
721  zfree(&evlist->mmap);
722  zfree(&evlist->overwrite_mmap);
723 }
724 
725 static struct perf_mmap *perf_evlist__alloc_mmap(struct perf_evlist *evlist,
726  bool overwrite)
727 {
728  int i;
729  struct perf_mmap *map;
730 
731  evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
732  if (cpu_map__empty(evlist->cpus))
733  evlist->nr_mmaps = thread_map__nr(evlist->threads);
734  map = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
735  if (!map)
736  return NULL;
737 
738  for (i = 0; i < evlist->nr_mmaps; i++) {
739  map[i].fd = -1;
740  map[i].overwrite = overwrite;
741  /*
742  * When the perf_mmap() call is made we grab one refcount, plus
743  * one extra to let perf_mmap__consume() get the last
744  * events after all real references (perf_mmap__get()) are
745  * dropped.
746  *
747  * Each PERF_EVENT_IOC_SET_OUTPUT points to this mmap and
748  * thus does perf_mmap__get() on it.
749  */
750  refcount_set(&map[i].refcnt, 0);
751  }
752  return map;
753 }
754 
755 static bool
756 perf_evlist__should_poll(struct perf_evlist *evlist __maybe_unused,
757  struct perf_evsel *evsel)
758 {
759  if (evsel->attr.write_backward)
760  return false;
761  return true;
762 }
763 
764 static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
765  struct mmap_params *mp, int cpu_idx,
766  int thread, int *_output, int *_output_overwrite)
767 {
768  struct perf_evsel *evsel;
769  int revent;
770  int evlist_cpu = cpu_map__cpu(evlist->cpus, cpu_idx);
771 
772  evlist__for_each_entry(evlist, evsel) {
773  struct perf_mmap *maps = evlist->mmap;
774  int *output = _output;
775  int fd;
776  int cpu;
777 
778  mp->prot = PROT_READ | PROT_WRITE;
779  if (evsel->attr.write_backward) {
780  output = _output_overwrite;
781  maps = evlist->overwrite_mmap;
782 
783  if (!maps) {
784  maps = perf_evlist__alloc_mmap(evlist, true);
785  if (!maps)
786  return -1;
787  evlist->overwrite_mmap = maps;
788  if (evlist->bkw_mmap_state == BKW_MMAP_NOTREADY)
790  }
791  mp->prot &= ~PROT_WRITE;
792  }
793 
794  if (evsel->system_wide && thread)
795  continue;
796 
797  cpu = cpu_map__idx(evsel->cpus, evlist_cpu);
798  if (cpu == -1)
799  continue;
800 
801  fd = FD(evsel, cpu, thread);
802 
803  if (*output == -1) {
804  *output = fd;
805 
806  if (perf_mmap__mmap(&maps[idx], mp, *output) < 0)
807  return -1;
808  } else {
809  if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
810  return -1;
811 
812  perf_mmap__get(&maps[idx]);
813  }
814 
815  revent = perf_evlist__should_poll(evlist, evsel) ? POLLIN : 0;
816 
817  /*
818  * The system_wide flag causes a selected event to be opened
819  * always without a pid. Consequently it will never get a
820  * POLLHUP, but it is used for tracking in combination with
821  * other events, so it should not need to be polled anyway.
822  * Therefore don't add it for polling.
823  */
824  if (!evsel->system_wide &&
825  __perf_evlist__add_pollfd(evlist, fd, &maps[idx], revent) < 0) {
826  perf_mmap__put(&maps[idx]);
827  return -1;
828  }
829 
830  if (evsel->attr.read_format & PERF_FORMAT_ID) {
831  if (perf_evlist__id_add_fd(evlist, evsel, cpu, thread,
832  fd) < 0)
833  return -1;
834  perf_evlist__set_sid_idx(evlist, evsel, idx, cpu,
835  thread);
836  }
837  }
838 
839  return 0;
840 }
841 
842 static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist,
843  struct mmap_params *mp)
844 {
845  int cpu, thread;
846  int nr_cpus = cpu_map__nr(evlist->cpus);
847  int nr_threads = thread_map__nr(evlist->threads);
848 
849  pr_debug2("perf event ring buffer mmapped per cpu\n");
850  for (cpu = 0; cpu < nr_cpus; cpu++) {
851  int output = -1;
852  int output_overwrite = -1;
853 
854  auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, cpu,
855  true);
856 
857  for (thread = 0; thread < nr_threads; thread++) {
858  if (perf_evlist__mmap_per_evsel(evlist, cpu, mp, cpu,
859  thread, &output, &output_overwrite))
860  goto out_unmap;
861  }
862  }
863 
864  return 0;
865 
866 out_unmap:
868  return -1;
869 }
870 
871 static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist,
872  struct mmap_params *mp)
873 {
874  int thread;
875  int nr_threads = thread_map__nr(evlist->threads);
876 
877  pr_debug2("perf event ring buffer mmapped per thread\n");
878  for (thread = 0; thread < nr_threads; thread++) {
879  int output = -1;
880  int output_overwrite = -1;
881 
882  auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, thread,
883  false);
884 
885  if (perf_evlist__mmap_per_evsel(evlist, thread, mp, 0, thread,
886  &output, &output_overwrite))
887  goto out_unmap;
888  }
889 
890  return 0;
891 
892 out_unmap:
894  return -1;
895 }
896 
897 unsigned long perf_event_mlock_kb_in_pages(void)
898 {
899  unsigned long pages;
900  int max;
901 
902  if (sysctl__read_int("kernel/perf_event_mlock_kb", &max) < 0) {
903  /*
904  * Pick a once upon a time good value, i.e. things look
905  * strange since we can't read a sysctl value, but lets not
906  * die yet...
907  */
908  max = 512;
909  } else {
910  max -= (page_size / 1024);
911  }
912 
913  pages = (max * 1024) / page_size;
914  if (!is_power_of_2(pages))
915  pages = rounddown_pow_of_two(pages);
916 
917  return pages;
918 }
919 
920 size_t perf_evlist__mmap_size(unsigned long pages)
921 {
922  if (pages == UINT_MAX)
924  else if (!is_power_of_2(pages))
925  return 0;
926 
927  return (pages + 1) * page_size;
928 }
929 
930 static long parse_pages_arg(const char *str, unsigned long min,
931  unsigned long max)
932 {
933  unsigned long pages, val;
934  static struct parse_tag tags[] = {
935  { .tag = 'B', .mult = 1 },
936  { .tag = 'K', .mult = 1 << 10 },
937  { .tag = 'M', .mult = 1 << 20 },
938  { .tag = 'G', .mult = 1 << 30 },
939  { .tag = 0 },
940  };
941 
942  if (str == NULL)
943  return -EINVAL;
944 
945  val = parse_tag_value(str, tags);
946  if (val != (unsigned long) -1) {
947  /* we got file size value */
948  pages = PERF_ALIGN(val, page_size) / page_size;
949  } else {
950  /* we got pages count value */
951  char *eptr;
952  pages = strtoul(str, &eptr, 10);
953  if (*eptr != '\0')
954  return -EINVAL;
955  }
956 
957  if (pages == 0 && min == 0) {
958  /* leave number of pages at 0 */
959  } else if (!is_power_of_2(pages)) {
960  char buf[100];
961 
962  /* round pages up to next power of 2 */
963  pages = roundup_pow_of_two(pages);
964  if (!pages)
965  return -EINVAL;
966 
967  unit_number__scnprintf(buf, sizeof(buf), pages * page_size);
968  pr_info("rounding mmap pages size to %s (%lu pages)\n",
969  buf, pages);
970  }
971 
972  if (pages > max)
973  return -EINVAL;
974 
975  return pages;
976 }
977 
978 int __perf_evlist__parse_mmap_pages(unsigned int *mmap_pages, const char *str)
979 {
980  unsigned long max = UINT_MAX;
981  long pages;
982 
983  if (max > SIZE_MAX / page_size)
984  max = SIZE_MAX / page_size;
985 
986  pages = parse_pages_arg(str, 1, max);
987  if (pages < 0) {
988  pr_err("Invalid argument for --mmap_pages/-m\n");
989  return -1;
990  }
991 
992  *mmap_pages = pages;
993  return 0;
994 }
995 
996 int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
997  int unset __maybe_unused)
998 {
999  return __perf_evlist__parse_mmap_pages(opt->value, str);
1000 }
1001 
1019 int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
1020  unsigned int auxtrace_pages,
1021  bool auxtrace_overwrite)
1022 {
1023  struct perf_evsel *evsel;
1024  const struct cpu_map *cpus = evlist->cpus;
1025  const struct thread_map *threads = evlist->threads;
1026  /*
1027  * Delay setting mp.prot: set it before calling perf_mmap__mmap.
1028  * Its value is decided by evsel's write_backward.
1029  * So &mp should not be passed through const pointer.
1030  */
1031  struct mmap_params mp;
1032 
1033  if (!evlist->mmap)
1034  evlist->mmap = perf_evlist__alloc_mmap(evlist, false);
1035  if (!evlist->mmap)
1036  return -ENOMEM;
1037 
1038  if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
1039  return -ENOMEM;
1040 
1041  evlist->mmap_len = perf_evlist__mmap_size(pages);
1042  pr_debug("mmap size %zuB\n", evlist->mmap_len);
1043  mp.mask = evlist->mmap_len - page_size - 1;
1044 
1046  auxtrace_pages, auxtrace_overwrite);
1047 
1048  evlist__for_each_entry(evlist, evsel) {
1049  if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
1050  evsel->sample_id == NULL &&
1051  perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0)
1052  return -ENOMEM;
1053  }
1054 
1055  if (cpu_map__empty(cpus))
1056  return perf_evlist__mmap_per_thread(evlist, &mp);
1057 
1058  return perf_evlist__mmap_per_cpu(evlist, &mp);
1059 }
1060 
1061 int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages)
1062 {
1063  return perf_evlist__mmap_ex(evlist, pages, 0, false);
1064 }
1065 
1067 {
1068  bool all_threads = (target->per_thread && target->system_wide);
1069  struct cpu_map *cpus;
1070  struct thread_map *threads;
1071 
1072  /*
1073  * If specify '-a' and '--per-thread' to perf record, perf record
1074  * will override '--per-thread'. target->per_thread = false and
1075  * target->system_wide = true.
1076  *
1077  * If specify '--per-thread' only to perf record,
1078  * target->per_thread = true and target->system_wide = false.
1079  *
1080  * So target->per_thread && target->system_wide is false.
1081  * For perf record, thread_map__new_str doesn't call
1082  * thread_map__new_all_cpus. That will keep perf record's
1083  * current behavior.
1084  *
1085  * For perf stat, it allows the case that target->per_thread and
1086  * target->system_wide are all true. It means to collect system-wide
1087  * per-thread data. thread_map__new_str will call
1088  * thread_map__new_all_cpus to enumerate all threads.
1089  */
1090  threads = thread_map__new_str(target->pid, target->tid, target->uid,
1091  all_threads);
1092 
1093  if (!threads)
1094  return -1;
1095 
1096  if (target__uses_dummy_map(target))
1097  cpus = cpu_map__dummy_new();
1098  else
1099  cpus = cpu_map__new(target->cpu_list);
1100 
1101  if (!cpus)
1102  goto out_delete_threads;
1103 
1104  evlist->has_user_cpus = !!target->cpu_list;
1105 
1106  perf_evlist__set_maps(evlist, cpus, threads);
1107 
1108  return 0;
1109 
1110 out_delete_threads:
1111  thread_map__put(threads);
1112  return -1;
1113 }
1114 
1115 void perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus,
1116  struct thread_map *threads)
1117 {
1118  /*
1119  * Allow for the possibility that one or another of the maps isn't being
1120  * changed i.e. don't put it. Note we are assuming the maps that are
1121  * being applied are brand new and evlist is taking ownership of the
1122  * original reference count of 1. If that is not the case it is up to
1123  * the caller to increase the reference count.
1124  */
1125  if (cpus != evlist->cpus) {
1126  cpu_map__put(evlist->cpus);
1127  evlist->cpus = cpu_map__get(cpus);
1128  }
1129 
1130  if (threads != evlist->threads) {
1131  thread_map__put(evlist->threads);
1132  evlist->threads = thread_map__get(threads);
1133  }
1134 
1136 }
1137 
1139  enum perf_event_sample_format bit)
1140 {
1141  struct perf_evsel *evsel;
1142 
1143  evlist__for_each_entry(evlist, evsel)
1144  __perf_evsel__set_sample_bit(evsel, bit);
1145 }
1146 
1148  enum perf_event_sample_format bit)
1149 {
1150  struct perf_evsel *evsel;
1151 
1152  evlist__for_each_entry(evlist, evsel)
1153  __perf_evsel__reset_sample_bit(evsel, bit);
1154 }
1155 
1157 {
1158  struct perf_evsel *evsel;
1159  int err = 0;
1160 
1161  evlist__for_each_entry(evlist, evsel) {
1162  if (evsel->filter == NULL)
1163  continue;
1164 
1165  /*
1166  * filters only work for tracepoint event, which doesn't have cpu limit.
1167  * So evlist and evsel should always be same.
1168  */
1169  err = perf_evsel__apply_filter(evsel, evsel->filter);
1170  if (err) {
1171  *err_evsel = evsel;
1172  break;
1173  }
1174  }
1175 
1176  return err;
1177 }
1178 
1180 {
1181  struct perf_evsel *evsel;
1182  int err = 0;
1183 
1184  evlist__for_each_entry(evlist, evsel) {
1185  if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
1186  continue;
1187 
1188  err = perf_evsel__set_filter(evsel, filter);
1189  if (err)
1190  break;
1191  }
1192 
1193  return err;
1194 }
1195 
1196 int perf_evlist__set_filter_pids(struct perf_evlist *evlist, size_t npids, pid_t *pids)
1197 {
1198  char *filter;
1199  int ret = -1;
1200  size_t i;
1201 
1202  for (i = 0; i < npids; ++i) {
1203  if (i == 0) {
1204  if (asprintf(&filter, "common_pid != %d", pids[i]) < 0)
1205  return -1;
1206  } else {
1207  char *tmp;
1208 
1209  if (asprintf(&tmp, "%s && common_pid != %d", filter, pids[i]) < 0)
1210  goto out_free;
1211 
1212  free(filter);
1213  filter = tmp;
1214  }
1215  }
1216 
1217  ret = perf_evlist__set_filter(evlist, filter);
1218 out_free:
1219  free(filter);
1220  return ret;
1221 }
1222 
1224 {
1225  return perf_evlist__set_filter_pids(evlist, 1, &pid);
1226 }
1227 
1229 {
1230  struct perf_evsel *pos;
1231 
1232  if (evlist->nr_entries == 1)
1233  return true;
1234 
1235  if (evlist->id_pos < 0 || evlist->is_pos < 0)
1236  return false;
1237 
1238  evlist__for_each_entry(evlist, pos) {
1239  if (pos->id_pos != evlist->id_pos ||
1240  pos->is_pos != evlist->is_pos)
1241  return false;
1242  }
1243 
1244  return true;
1245 }
1246 
1248 {
1249  struct perf_evsel *evsel;
1250 
1251  if (evlist->combined_sample_type)
1252  return evlist->combined_sample_type;
1253 
1254  evlist__for_each_entry(evlist, evsel)
1255  evlist->combined_sample_type |= evsel->attr.sample_type;
1256 
1257  return evlist->combined_sample_type;
1258 }
1259 
1261 {
1262  evlist->combined_sample_type = 0;
1263  return __perf_evlist__combined_sample_type(evlist);
1264 }
1265 
1267 {
1268  struct perf_evsel *evsel;
1269  u64 branch_type = 0;
1270 
1271  evlist__for_each_entry(evlist, evsel)
1272  branch_type |= evsel->attr.branch_sample_type;
1273  return branch_type;
1274 }
1275 
1277 {
1278  struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
1279  u64 read_format = first->attr.read_format;
1280  u64 sample_type = first->attr.sample_type;
1281 
1282  evlist__for_each_entry(evlist, pos) {
1283  if (read_format != pos->attr.read_format)
1284  return false;
1285  }
1286 
1287  /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
1288  if ((sample_type & PERF_SAMPLE_READ) &&
1289  !(read_format & PERF_FORMAT_ID)) {
1290  return false;
1291  }
1292 
1293  return true;
1294 }
1295 
1297 {
1298  struct perf_evsel *first = perf_evlist__first(evlist);
1299  return first->attr.read_format;
1300 }
1301 
1303 {
1304  struct perf_evsel *first = perf_evlist__first(evlist);
1305  struct perf_sample *data;
1306  u64 sample_type;
1307  u16 size = 0;
1308 
1309  if (!first->attr.sample_id_all)
1310  goto out;
1311 
1312  sample_type = first->attr.sample_type;
1313 
1314  if (sample_type & PERF_SAMPLE_TID)
1315  size += sizeof(data->tid) * 2;
1316 
1317  if (sample_type & PERF_SAMPLE_TIME)
1318  size += sizeof(data->time);
1319 
1320  if (sample_type & PERF_SAMPLE_ID)
1321  size += sizeof(data->id);
1322 
1323  if (sample_type & PERF_SAMPLE_STREAM_ID)
1324  size += sizeof(data->stream_id);
1325 
1326  if (sample_type & PERF_SAMPLE_CPU)
1327  size += sizeof(data->cpu) * 2;
1328 
1329  if (sample_type & PERF_SAMPLE_IDENTIFIER)
1330  size += sizeof(data->id);
1331 out:
1332  return size;
1333 }
1334 
1336 {
1337  struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
1338 
1339  evlist__for_each_entry_continue(evlist, pos) {
1340  if (first->attr.sample_id_all != pos->attr.sample_id_all)
1341  return false;
1342  }
1343 
1344  return true;
1345 }
1346 
1348 {
1349  struct perf_evsel *first = perf_evlist__first(evlist);
1350  return first->attr.sample_id_all;
1351 }
1352 
1354  struct perf_evsel *evsel)
1355 {
1356  evlist->selected = evsel;
1357 }
1358 
1360 {
1361  struct perf_evsel *evsel;
1362 
1363  evlist__for_each_entry_reverse(evlist, evsel)
1364  perf_evsel__close(evsel);
1365 }
1366 
1368 {
1369  struct cpu_map *cpus;
1370  struct thread_map *threads;
1371  int err = -ENOMEM;
1372 
1373  /*
1374  * Try reading /sys/devices/system/cpu/online to get
1375  * an all cpus map.
1376  *
1377  * FIXME: -ENOMEM is the best we can do here, the cpu_map
1378  * code needs an overhaul to properly forward the
1379  * error, and we may not want to do that fallback to a
1380  * default cpu identity map :-\
1381  */
1382  cpus = cpu_map__new(NULL);
1383  if (!cpus)
1384  goto out;
1385 
1386  threads = thread_map__new_dummy();
1387  if (!threads)
1388  goto out_put;
1389 
1390  perf_evlist__set_maps(evlist, cpus, threads);
1391 out:
1392  return err;
1393 out_put:
1394  cpu_map__put(cpus);
1395  goto out;
1396 }
1397 
1398 int perf_evlist__open(struct perf_evlist *evlist)
1399 {
1400  struct perf_evsel *evsel;
1401  int err;
1402 
1403  /*
1404  * Default: one fd per CPU, all threads, aka systemwide
1405  * as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL
1406  */
1407  if (evlist->threads == NULL && evlist->cpus == NULL) {
1408  err = perf_evlist__create_syswide_maps(evlist);
1409  if (err < 0)
1410  goto out_err;
1411  }
1412 
1414 
1415  evlist__for_each_entry(evlist, evsel) {
1416  err = perf_evsel__open(evsel, evsel->cpus, evsel->threads);
1417  if (err < 0)
1418  goto out_err;
1419  }
1420 
1421  return 0;
1422 out_err:
1423  perf_evlist__close(evlist);
1424  errno = -err;
1425  return err;
1426 }
1427 
1429  const char *argv[], bool pipe_output,
1430  void (*exec_error)(int signo, siginfo_t *info, void *ucontext))
1431 {
1432  int child_ready_pipe[2], go_pipe[2];
1433  char bf;
1434 
1435  if (pipe(child_ready_pipe) < 0) {
1436  perror("failed to create 'ready' pipe");
1437  return -1;
1438  }
1439 
1440  if (pipe(go_pipe) < 0) {
1441  perror("failed to create 'go' pipe");
1442  goto out_close_ready_pipe;
1443  }
1444 
1445  evlist->workload.pid = fork();
1446  if (evlist->workload.pid < 0) {
1447  perror("failed to fork");
1448  goto out_close_pipes;
1449  }
1450 
1451  if (!evlist->workload.pid) {
1452  int ret;
1453 
1454  if (pipe_output)
1455  dup2(2, 1);
1456 
1457  signal(SIGTERM, SIG_DFL);
1458 
1459  close(child_ready_pipe[0]);
1460  close(go_pipe[1]);
1461  fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
1462 
1463  /*
1464  * Tell the parent we're ready to go
1465  */
1466  close(child_ready_pipe[1]);
1467 
1468  /*
1469  * Wait until the parent tells us to go.
1470  */
1471  ret = read(go_pipe[0], &bf, 1);
1472  /*
1473  * The parent will ask for the execvp() to be performed by
1474  * writing exactly one byte, in workload.cork_fd, usually via
1475  * perf_evlist__start_workload().
1476  *
1477  * For cancelling the workload without actually running it,
1478  * the parent will just close workload.cork_fd, without writing
1479  * anything, i.e. read will return zero and we just exit()
1480  * here.
1481  */
1482  if (ret != 1) {
1483  if (ret == -1)
1484  perror("unable to read pipe");
1485  exit(ret);
1486  }
1487 
1488  execvp(argv[0], (char **)argv);
1489 
1490  if (exec_error) {
1491  union sigval val;
1492 
1493  val.sival_int = errno;
1494  if (sigqueue(getppid(), SIGUSR1, val))
1495  perror(argv[0]);
1496  } else
1497  perror(argv[0]);
1498  exit(-1);
1499  }
1500 
1501  if (exec_error) {
1502  struct sigaction act = {
1503  .sa_flags = SA_SIGINFO,
1504  .sa_sigaction = exec_error,
1505  };
1506  sigaction(SIGUSR1, &act, NULL);
1507  }
1508 
1509  if (target__none(target)) {
1510  if (evlist->threads == NULL) {
1511  fprintf(stderr, "FATAL: evlist->threads need to be set at this point (%s:%d).\n",
1512  __func__, __LINE__);
1513  goto out_close_pipes;
1514  }
1515  thread_map__set_pid(evlist->threads, 0, evlist->workload.pid);
1516  }
1517 
1518  close(child_ready_pipe[1]);
1519  close(go_pipe[0]);
1520  /*
1521  * wait for child to settle
1522  */
1523  if (read(child_ready_pipe[0], &bf, 1) == -1) {
1524  perror("unable to read pipe");
1525  goto out_close_pipes;
1526  }
1527 
1528  fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC);
1529  evlist->workload.cork_fd = go_pipe[1];
1530  close(child_ready_pipe[0]);
1531  return 0;
1532 
1533 out_close_pipes:
1534  close(go_pipe[0]);
1535  close(go_pipe[1]);
1536 out_close_ready_pipe:
1537  close(child_ready_pipe[0]);
1538  close(child_ready_pipe[1]);
1539  return -1;
1540 }
1541 
1543 {
1544  if (evlist->workload.cork_fd > 0) {
1545  char bf = 0;
1546  int ret;
1547  /*
1548  * Remove the cork, let it rip!
1549  */
1550  ret = write(evlist->workload.cork_fd, &bf, 1);
1551  if (ret < 0)
1552  perror("unable to write to pipe");
1553 
1554  close(evlist->workload.cork_fd);
1555  return ret;
1556  }
1557 
1558  return 0;
1559 }
1560 
1562  struct perf_sample *sample)
1563 {
1564  struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event);
1565 
1566  if (!evsel)
1567  return -EFAULT;
1568  return perf_evsel__parse_sample(evsel, event, sample);
1569 }
1570 
1572  union perf_event *event,
1573  u64 *timestamp)
1574 {
1575  struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event);
1576 
1577  if (!evsel)
1578  return -EFAULT;
1579  return perf_evsel__parse_sample_timestamp(evsel, event, timestamp);
1580 }
1581 
1582 size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
1583 {
1584  struct perf_evsel *evsel;
1585  size_t printed = 0;
1586 
1587  evlist__for_each_entry(evlist, evsel) {
1588  printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "",
1589  perf_evsel__name(evsel));
1590  }
1591 
1592  return printed + fprintf(fp, "\n");
1593 }
1594 
1596  int err, char *buf, size_t size)
1597 {
1598  int printed, value;
1599  char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
1600 
1601  switch (err) {
1602  case EACCES:
1603  case EPERM:
1604  printed = scnprintf(buf, size,
1605  "Error:\t%s.\n"
1606  "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg);
1607 
1608  value = perf_event_paranoid();
1609 
1610  printed += scnprintf(buf + printed, size - printed, "\nHint:\t");
1611 
1612  if (value >= 2) {
1613  printed += scnprintf(buf + printed, size - printed,
1614  "For your workloads it needs to be <= 1\nHint:\t");
1615  }
1616  printed += scnprintf(buf + printed, size - printed,
1617  "For system wide tracing it needs to be set to -1.\n");
1618 
1619  printed += scnprintf(buf + printed, size - printed,
1620  "Hint:\tTry: 'sudo sh -c \"echo -1 > /proc/sys/kernel/perf_event_paranoid\"'\n"
1621  "Hint:\tThe current value is %d.", value);
1622  break;
1623  case EINVAL: {
1624  struct perf_evsel *first = perf_evlist__first(evlist);
1625  int max_freq;
1626 
1627  if (sysctl__read_int("kernel/perf_event_max_sample_rate", &max_freq) < 0)
1628  goto out_default;
1629 
1630  if (first->attr.sample_freq < (u64)max_freq)
1631  goto out_default;
1632 
1633  printed = scnprintf(buf, size,
1634  "Error:\t%s.\n"
1635  "Hint:\tCheck /proc/sys/kernel/perf_event_max_sample_rate.\n"
1636  "Hint:\tThe current value is %d and %" PRIu64 " is being requested.",
1637  emsg, max_freq, first->attr.sample_freq);
1638  break;
1639  }
1640  default:
1641 out_default:
1642  scnprintf(buf, size, "%s", emsg);
1643  break;
1644  }
1645 
1646  return 0;
1647 }
1648 
1649 int perf_evlist__strerror_mmap(struct perf_evlist *evlist, int err, char *buf, size_t size)
1650 {
1651  char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
1652  int pages_attempted = evlist->mmap_len / 1024, pages_max_per_user, printed = 0;
1653 
1654  switch (err) {
1655  case EPERM:
1656  sysctl__read_int("kernel/perf_event_mlock_kb", &pages_max_per_user);
1657  printed += scnprintf(buf + printed, size - printed,
1658  "Error:\t%s.\n"
1659  "Hint:\tCheck /proc/sys/kernel/perf_event_mlock_kb (%d kB) setting.\n"
1660  "Hint:\tTried using %zd kB.\n",
1661  emsg, pages_max_per_user, pages_attempted);
1662 
1663  if (pages_attempted >= pages_max_per_user) {
1664  printed += scnprintf(buf + printed, size - printed,
1665  "Hint:\tTry 'sudo sh -c \"echo %d > /proc/sys/kernel/perf_event_mlock_kb\"', or\n",
1666  pages_max_per_user + pages_attempted);
1667  }
1668 
1669  printed += scnprintf(buf + printed, size - printed,
1670  "Hint:\tTry using a smaller -m/--mmap-pages value.");
1671  break;
1672  default:
1673  scnprintf(buf, size, "%s", emsg);
1674  break;
1675  }
1676 
1677  return 0;
1678 }
1679 
1681  struct perf_evsel *move_evsel)
1682 {
1683  struct perf_evsel *evsel, *n;
1684  LIST_HEAD(move);
1685 
1686  if (move_evsel == perf_evlist__first(evlist))
1687  return;
1688 
1689  evlist__for_each_entry_safe(evlist, n, evsel) {
1690  if (evsel->leader == move_evsel->leader)
1691  list_move_tail(&evsel->node, &move);
1692  }
1693 
1694  list_splice(&move, &evlist->entries);
1695 }
1696 
1698  struct perf_evsel *tracking_evsel)
1699 {
1700  struct perf_evsel *evsel;
1701 
1702  if (tracking_evsel->tracking)
1703  return;
1704 
1705  evlist__for_each_entry(evlist, evsel) {
1706  if (evsel != tracking_evsel)
1707  evsel->tracking = false;
1708  }
1709 
1710  tracking_evsel->tracking = true;
1711 }
1712 
1713 struct perf_evsel *
1715  const char *str)
1716 {
1717  struct perf_evsel *evsel;
1718 
1719  evlist__for_each_entry(evlist, evsel) {
1720  if (!evsel->name)
1721  continue;
1722  if (strcmp(str, evsel->name) == 0)
1723  return evsel;
1724  }
1725 
1726  return NULL;
1727 }
1728 
1730  enum bkw_mmap_state state)
1731 {
1732  enum bkw_mmap_state old_state = evlist->bkw_mmap_state;
1733  enum action {
1734  NONE,
1735  PAUSE,
1736  RESUME,
1737  } action = NONE;
1738 
1739  if (!evlist->overwrite_mmap)
1740  return;
1741 
1742  switch (old_state) {
1743  case BKW_MMAP_NOTREADY: {
1744  if (state != BKW_MMAP_RUNNING)
1745  goto state_err;
1746  break;
1747  }
1748  case BKW_MMAP_RUNNING: {
1749  if (state != BKW_MMAP_DATA_PENDING)
1750  goto state_err;
1751  action = PAUSE;
1752  break;
1753  }
1754  case BKW_MMAP_DATA_PENDING: {
1755  if (state != BKW_MMAP_EMPTY)
1756  goto state_err;
1757  break;
1758  }
1759  case BKW_MMAP_EMPTY: {
1760  if (state != BKW_MMAP_RUNNING)
1761  goto state_err;
1762  action = RESUME;
1763  break;
1764  }
1765  default:
1766  WARN_ONCE(1, "Shouldn't get there\n");
1767  }
1768 
1769  evlist->bkw_mmap_state = state;
1770 
1771  switch (action) {
1772  case PAUSE:
1773  perf_evlist__pause(evlist);
1774  break;
1775  case RESUME:
1776  perf_evlist__resume(evlist);
1777  break;
1778  case NONE:
1779  default:
1780  break;
1781  }
1782 
1783 state_err:
1784  return;
1785 }
1786 
1788 {
1789  struct perf_evsel *evsel;
1790 
1791  evlist__for_each_entry(evlist, evsel) {
1792  if (!evsel->attr.exclude_kernel)
1793  return false;
1794  }
1795 
1796  return true;
1797 }
1798 
1799 /*
1800  * Events in data file are not collect in groups, but we still want
1801  * the group display. Set the artificial group and set the leader's
1802  * forced_leader flag to notify the display code.
1803  */
1805 {
1806  if (!evlist->nr_groups) {
1807  struct perf_evsel *leader = perf_evlist__first(evlist);
1808 
1809  perf_evlist__set_leader(evlist);
1810  leader->forced_leader = true;
1811  }
1812 }
static void perf_evlist__propagate_maps(struct perf_evlist *evlist)
Definition: evlist.c:168
void perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus, struct thread_map *threads)
Definition: evlist.c:1115
void event_attr_init(struct perf_event_attr *attr)
Definition: util.c:83
void perf_evlist__exit(struct perf_evlist *evlist)
Definition: evlist.c:126
int perf_evlist__prepare_workload(struct perf_evlist *evlist, struct target *target, const char *argv[], bool pipe_output, void(*exec_error)(int signo, siginfo_t *info, void *ucontext))
Definition: evlist.c:1428
Definition: mem2node.c:7
#define FD(e, x, y)
Definition: evlist.c:37
struct perf_evlist::@110 workload
int value
Definition: python.c:1143
bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist)
Definition: evlist.c:1335
char tag
Definition: units.h:9
void perf_evlist__set_leader(struct perf_evlist *evlist)
Definition: evlist.c:221
bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
Definition: evlist.c:1228
struct thread_map * thread_map__new_dummy(void)
Definition: thread_map.c:265
refcount_t refcnt
Definition: mmap.h:21
void perf_evlist__set_id_pos(struct perf_evlist *evlist)
Definition: evlist.c:95
bool perf_evlist__valid_read_format(struct perf_evlist *evlist)
Definition: evlist.c:1276
int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
Definition: evlist.c:1066
u64 combined_sample_type
Definition: evlist.h:38
void perf_mmap__put(struct perf_mmap *map)
Definition: mmap.c:110
static LIST_HEAD(page_alloc_sort_input)
u32 branch_type
Definition: db-export.c:431
struct perf_mmap * mmap
Definition: evlist.h:45
size_t size
Definition: evsel.c:60
struct cpu_map * cpu_map__dummy_new(void)
Definition: cpumap.c:252
static int cpu_map__nr(const struct cpu_map *map)
Definition: cpumap.h:53
struct perf_evlist * perf_evlist__new_default(void)
Definition: evlist.c:64
bool tracking
Definition: evsel.h:125
struct perf_evsel * perf_evsel__new_idx(struct perf_event_attr *attr, int idx)
Definition: evsel.c:250
int cpu_map__idx(struct cpu_map *cpus, int cpu)
Definition: cpumap.c:621
struct xyarray * sample_id
Definition: evsel.h:96
int id_pos
Definition: evsel.h:116
int perf_evsel__parse_sample_timestamp(struct perf_evsel *evsel, union perf_event *event, u64 *timestamp)
Definition: evsel.c:2338
void __perf_evsel__set_sample_bit(struct perf_evsel *evsel, enum perf_event_sample_format bit)
Definition: evsel.c:176
unsigned int page_size
Definition: util.c:40
void perf_evlist__enable(struct perf_evlist *evlist)
Definition: evlist.c:369
int __perf_evlist__add_default(struct perf_evlist *evlist, bool precise)
Definition: evlist.c:243
struct thread_map * thread_map__new_str(const char *pid, const char *tid, uid_t uid, bool all_threads)
Definition: thread_map.c:326
struct perf_evsel * perf_evlist__id2evsel_strict(struct perf_evlist *evlist, u64 id)
Definition: evlist.c:606
dictionary data
Definition: stat-cpi.py:4
static int perf_evlist__event2id(struct perf_evlist *evlist, union perf_event *event, u64 *id)
Definition: evlist.c:621
struct thread_map * thread_map__get(struct thread_map *map)
Definition: thread_map.c:354
int int err
Definition: 5sec.c:44
static int perf_evlist__resume(struct perf_evlist *evlist)
Definition: evlist.c:700
void perf_mmap__get(struct perf_mmap *map)
Definition: mmap.c:105
static void perf_evlist__purge(struct perf_evlist *evlist)
Definition: evlist.c:113
bool system_wide
Definition: evsel.h:124
void __perf_evsel__reset_sample_bit(struct perf_evsel *evsel, enum perf_event_sample_format bit)
Definition: evsel.c:186
bool perf_evlist__exclude_kernel(struct perf_evlist *evlist)
Definition: evlist.c:1787
static int perf_evlist__pause(struct perf_evlist *evlist)
Definition: evlist.c:695
bool perf_evlist__sample_id_all(struct perf_evlist *evlist)
Definition: evlist.c:1347
int perf_evsel__apply_filter(struct perf_evsel *evsel, const char *filter)
Definition: evsel.c:1128
bool overwrite
Definition: mmap.h:25
int perf_evlist__set_filter_pid(struct perf_evlist *evlist, pid_t pid)
Definition: evlist.c:1223
static int perf_evlist__add_attrs(struct perf_evlist *evlist, struct perf_event_attr *attrs, size_t nr_attrs)
Definition: evlist.c:270
static void thread_map__set_pid(struct thread_map *map, int thread, pid_t pid)
Definition: thread_map.h:52
#define pr_debug2(fmt,...)
Definition: debug.h:33
#define evlist__for_each_entry_reverse(evlist, evsel)
Definition: evlist.h:279
static int perf_evlist__enable_event_thread(struct perf_evlist *evlist, struct perf_evsel *evsel, int thread)
Definition: evlist.c:404
struct perf_evsel * perf_evlist__event2evsel(struct perf_evlist *evlist, union perf_event *event)
Definition: evlist.c:642
void perf_evsel__delete(struct perf_evsel *evsel)
Definition: evsel.c:1261
void perf_evlist__delete(struct perf_evlist *evlist)
Definition: evlist.c:133
void perf_evlist__remove(struct perf_evlist *evlist, struct perf_evsel *evsel)
Definition: evlist.c:189
void perf_evlist__splice_list_tail(struct perf_evlist *evlist, struct list_head *list)
Definition: evlist.c:196
static int perf_evlist__enable_event_cpu(struct perf_evlist *evlist, struct perf_evsel *evsel, int cpu)
Definition: evlist.c:387
static int sys_perf_event_open(struct perf_event_attr *attr, pid_t pid, int cpu, int group_fd, unsigned long flags)
Definition: perf-sys.h:58
struct cpu_map * cpus
Definition: evsel.h:112
Definition: cpumap.h:12
static u64 max_freq
Definition: svghelper.c:29
int fd
Definition: mmap.h:20
int perf_evlist__start_workload(struct perf_evlist *evlist)
Definition: evlist.c:1542
int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages, unsigned int auxtrace_pages, bool auxtrace_overwrite)
Definition: evlist.c:1019
struct thread_map * threads
Definition: evlist.h:47
Definition: mmap.h:17
int idx
Definition: evsel.h:100
bool enabled
Definition: evlist.h:33
int perf_evsel__disable(struct perf_evsel *evsel)
Definition: evsel.c:1182
static bool target__none(struct target *target)
Definition: target.h:62
struct perf_evlist * perf_evlist__new_dummy(void)
Definition: evlist.c:76
Definition: units.h:8
int perf_evlist__add_newtp(struct perf_evlist *evlist, const char *sys, const char *name, void *handler)
Definition: evlist.c:334
#define PERF_EVLIST__HLIST_SIZE
Definition: evlist.h:25
#define pr_err(fmt,...)
Definition: json.h:21
size_t mmap_len
Definition: evlist.h:35
static void perf_evlist__set_sid_idx(struct perf_evlist *evlist, struct perf_evsel *evsel, int idx, int cpu, int thread)
Definition: evlist.c:557
static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, struct mmap_params *mp)
Definition: evlist.c:842
int nr_entries
Definition: evlist.h:30
#define min(x, y)
Definition: jevents.h:15
int perf_evlist__add_dummy(struct perf_evlist *evlist)
Definition: evlist.c:254
u32 ids
Definition: evsel.h:101
bool per_thread
Definition: target.h:17
int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
Definition: evsel.c:1189
int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str, int unset __maybe_unused)
Definition: evlist.c:996
int __perf_evlist__parse_mmap_pages(unsigned int *mmap_pages, const char *str)
Definition: evlist.c:978
bool has_user_cpus
Definition: evlist.h:34
struct perf_mmap * overwrite_mmap
Definition: evlist.h:46
u64 id
Definition: event.h:196
u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist)
Definition: evlist.c:1260
#define evlist__for_each_entry_safe(evlist, tmp, evsel)
Definition: evlist.h:297
void perf_evlist__set_selected(struct perf_evlist *evlist, struct perf_evsel *evsel)
Definition: evlist.c:1353
int prot
Definition: mmap.h:59
void __perf_evlist__reset_sample_bit(struct perf_evlist *evlist, enum perf_event_sample_format bit)
Definition: evlist.c:1147
void cpu_map__put(struct cpu_map *map)
Definition: cpumap.c:298
void perf_evlist__set_tracking_event(struct perf_evlist *evlist, struct perf_evsel *tracking_evsel)
Definition: evlist.c:1697
void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel, int cpu, int thread, u64 id)
Definition: evlist.c:509
Definition: thread.h:18
const char * name
struct hlist_head heads[PERF_EVLIST__HLIST_SIZE]
Definition: evlist.h:29
static void perf_evlist__update_id_pos(struct perf_evlist *evlist)
Definition: evlist.c:103
bool system_wide
Definition: target.h:14
static int __perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd, struct perf_mmap *map, short revent)
Definition: evlist.c:454
int perf_evlist__poll(struct perf_evlist *evlist, int timeout)
Definition: evlist.c:491
struct perf_evlist * evlist
Definition: evsel.h:92
#define pr_debug(fmt,...)
Definition: json.h:27
pid_t pid
Definition: evlist.h:42
struct hlist_node node
Definition: evsel.h:22
static int perf_evlist__nr_threads(struct perf_evlist *evlist, struct perf_evsel *evsel)
Definition: evlist.c:347
int perf_evlist__open(struct perf_evlist *evlist)
Definition: evlist.c:1398
#define evlist__for_each_entry(evlist, evsel)
Definition: evlist.h:247
int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **err_evsel)
Definition: evlist.c:1156
int perf_evsel__set_filter(struct perf_evsel *evsel, const char *filter)
Definition: evsel.c:1135
int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event, struct perf_sample *sample)
Definition: evlist.c:1561
static bool cpu_map__empty(const struct cpu_map *map)
Definition: cpumap.h:58
bool forced_leader
Definition: evsel.h:129
int cork_fd
Definition: evlist.h:41
pid_t tid
Definition: evsel.h:27
static int entry(u64 ip, struct unwind_info *ui)
Definition: unwind-libdw.c:71
char * filter
Definition: evsel.h:94
int perf_evlist__strerror_mmap(struct perf_evlist *evlist, int err, char *buf, size_t size)
Definition: evlist.c:1649
int cpu_map__cpu(struct cpu_map *cpus, int idx)
Definition: cpumap.c:633
int perf_evlist__id_add_fd(struct perf_evlist *evlist, struct perf_evsel *evsel, int cpu, int thread, int fd)
Definition: evlist.c:516
bkw_mmap_state
Definition: mmap.h:51
static struct perf_mmap * perf_evlist__alloc_mmap(struct perf_evlist *evlist, bool overwrite)
Definition: evlist.c:725
void perf_evlist__toggle_bkw_mmap(struct perf_evlist *evlist, enum bkw_mmap_state state)
Definition: evlist.c:1729
int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd)
Definition: mmap.c:167
static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd, void *arg __maybe_unused)
Definition: evlist.c:476
u32 tid
Definition: event.h:193
static int str(yyscan_t scanner, int token)
list cpus
Definition: stat-cpi.py:7
int nr_groups
Definition: evlist.h:31
struct perf_evsel * perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
Definition: evlist.c:589
#define __evlist__for_each_entry(list, evsel)
Definition: evlist.h:239
int perf_evlist__parse_sample_timestamp(struct perf_evlist *evlist, union perf_event *event, u64 *timestamp)
Definition: evlist.c:1571
int perf_evlist__set_filter_pids(struct perf_evlist *evlist, size_t npids, pid_t *pids)
Definition: evlist.c:1196
static int perf_evlist__set_paused(struct perf_evlist *evlist, bool value)
Definition: evlist.c:675
Definition: map.h:58
u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
Definition: evlist.c:1302
void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
Definition: evlist.c:176
void perf_evsel__close(struct perf_evsel *evsel)
Definition: evsel.c:1948
void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp, struct perf_evlist *evlist, int idx, bool per_cpu)
Definition: auxtrace.c:132
int unit_number__scnprintf(char *buf, size_t size, u64 n)
Definition: units.c:58
#define event
void perf_evlist__to_front(struct perf_evlist *evlist, struct perf_evsel *move_evsel)
Definition: evlist.c:1680
unsigned long parse_tag_value(const char *str, struct parse_tag *tags)
Definition: units.c:10
size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
Definition: evlist.c:1582
int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
Definition: evlist.c:471
u32 cpu
Definition: event.h:201
int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages)
Definition: evlist.c:1061
struct cpu_map * own_cpus
Definition: evsel.h:113
struct thread_map * threads
Definition: evsel.h:114
int map[]
Definition: cpumap.h:15
int perf_event_paranoid(void)
Definition: util.c:388
#define evlist__for_each_entry_continue(evlist, evsel)
Definition: evlist.h:263
int nr_mmaps
Definition: evlist.h:32
static void perf_evlist__munmap_nofree(struct perf_evlist *evlist)
Definition: evlist.c:705
#define PERF_EVLIST__HLIST_BITS
Definition: evlist.h:24
void __perf_evlist__set_sample_bit(struct perf_evlist *evlist, enum perf_event_sample_format bit)
Definition: evlist.c:1138
#define array
#define zfree(ptr)
Definition: util.h:25
u64 perf_evlist__read_format(struct perf_evlist *evlist)
Definition: evlist.c:1296
void perf_evsel__calc_id_pos(struct perf_evsel *evsel)
Definition: evsel.c:170
const char * perf_evsel__name(struct perf_evsel *evsel)
Definition: evsel.c:577
static struct thread_data threads[THREADS]
static int thread_map__nr(struct thread_map *threads)
Definition: thread_map.h:41
struct perf_event_header header
Definition: event.h:624
void perf_mmap__munmap(struct perf_mmap *map)
Definition: mmap.c:156
u32 pid
Definition: hists_common.c:15
int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, struct thread_map *threads)
Definition: evsel.c:1704
static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, struct mmap_params *mp)
Definition: evlist.c:871
int id_pos
Definition: evlist.h:36
struct strfilter * filter
Definition: builtin-probe.c:60
void perf_event_attr__set_max_precise_ip(struct perf_event_attr *attr)
Definition: evlist.c:229
void __perf_evlist__set_leader(struct list_head *list)
Definition: evlist.c:207
struct perf_evsel * selected
Definition: evlist.h:49
static struct @9 output[OUTPUT_TYPE_MAX]
uid_t uid
Definition: target.h:13
static struct perf_evsel * perf_evlist__first(struct perf_evlist *evlist)
Definition: evlist.h:215
void perf_evlist__force_leader(struct perf_evlist *evlist)
Definition: evlist.c:1804
enum bkw_mmap_state bkw_mmap_state
Definition: evlist.h:39
static int perf_evlist__create_syswide_maps(struct perf_evlist *evlist)
Definition: evlist.c:1367
void perf_evlist__disable(struct perf_evlist *evlist)
Definition: evlist.c:356
const char * tid
Definition: target.h:10
void perf_evlist__munmap(struct perf_evlist *evlist)
Definition: evlist.c:718
Definition: jevents.c:228
u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist)
Definition: evlist.c:1247
void perf_evlist__toggle_enable(struct perf_evlist *evlist)
Definition: evlist.c:382
static void __perf_evlist__propagate_maps(struct perf_evlist *evlist, struct perf_evsel *evsel)
Definition: evlist.c:149
struct perf_evsel * leader
Definition: evsel.h:136
int is_pos
Definition: evsel.h:117
struct auxtrace_mmap_params auxtrace_mp
Definition: mmap.h:60
int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask)
Definition: evlist.c:485
int perf_evsel__enable(struct perf_evsel *evsel)
Definition: evsel.c:1175
u64 time
Definition: event.h:194
struct perf_evsel * perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist, const char *name)
Definition: evlist.c:320
size_t perf_evlist__mmap_size(unsigned long pages)
Definition: evlist.c:920
u64 perf_evlist__combined_branch_type(struct perf_evlist *evlist)
Definition: evlist.c:1266
int mask
Definition: mmap.h:59
struct fdarray pollfd
Definition: evlist.h:44
int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
Definition: evlist.c:1179
const char * cpu_list
Definition: target.h:11
#define pr_info(fmt,...)
Definition: json.h:24
void perf_evlist__close(struct perf_evlist *evlist)
Definition: evlist.c:1359
#define STRERR_BUFSIZE
Definition: debug.h:43
void free(void *)
static long parse_pages_arg(const char *str, unsigned long min, unsigned long max)
Definition: evlist.c:930
int nr_members
Definition: evsel.h:133
static pid_t thread_map__pid(struct thread_map *map, int thread)
Definition: thread_map.h:46
unsigned long perf_event_mlock_kb_in_pages(void)
Definition: evlist.c:897
void thread_map__put(struct thread_map *map)
Definition: thread_map.c:361
struct perf_evsel * perf_evsel__new_cycles(bool precise)
Definition: evsel.c:271
static bool perf_evsel__is_group_leader(const struct perf_evsel *evsel)
Definition: evsel.h:380
const char * pid
Definition: target.h:9
int __perf_evlist__add_default_attrs(struct perf_evlist *evlist, struct perf_event_attr *attrs, size_t nr_attrs)
Definition: evlist.c:294
void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus, struct thread_map *threads)
Definition: evlist.c:40
struct cpu_map * cpus
Definition: evlist.h:48
static bool target__uses_dummy_map(struct target *target)
Definition: target.h:72
static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx, struct mmap_params *mp, int cpu_idx, int thread, int *_output, int *_output_overwrite)
Definition: evlist.c:764
Definition: attr.py:1
#define __evlist__for_each_entry_safe(list, tmp, evsel)
Definition: evlist.h:288
int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event, struct perf_sample *data)
Definition: evsel.c:2059
struct perf_evsel * evsel
Definition: evsel.h:24
struct xyarray * fd
Definition: evsel.h:95
struct list_head node
Definition: evsel.h:91
static struct perf_evsel * perf_evsel__newtp(const char *sys, const char *name)
Definition: evsel.h:207
struct perf_evsel * perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
Definition: evlist.c:306
char * name
Definition: evsel.h:102
int perf_evlist__strerror_open(struct perf_evlist *evlist, int err, char *buf, size_t size)
Definition: evlist.c:1595
struct cpu_map * cpu_map__new(const char *cpu_list)
Definition: cpumap.c:125
u64 stream_id
Definition: event.h:197
static void perf_evlist__id_hash(struct perf_evlist *evlist, struct perf_evsel *evsel, int cpu, int thread, u64 id)
Definition: evlist.c:496
int is_pos
Definition: evlist.h:37
struct perf_sample_id * perf_evlist__id2sid(struct perf_evlist *evlist, u64 id)
Definition: evlist.c:573
static int perf_evlist__add_default(struct perf_evlist *evlist)
Definition: evlist.h:74
int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
Definition: evlist.c:433
u64 * id
Definition: evsel.h:97
struct list_head entries
Definition: evlist.h:28
void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp, off_t auxtrace_offset, unsigned int auxtrace_pages, bool auxtrace_overwrite)
Definition: auxtrace.c:116
static bool perf_evlist__should_poll(struct perf_evlist *evlist __maybe_unused, struct perf_evsel *evsel)
Definition: evlist.c:756
struct perf_evsel * perf_evlist__find_evsel_by_str(struct perf_evlist *evlist, const char *str)
Definition: evlist.c:1714
struct perf_evlist * perf_evlist__new(void)
Definition: evlist.c:54
struct perf_event_attr attr
Definition: evsel.h:93
u32 priv
Definition: map.h:31
Definition: target.h:8
int perf_evlist__enable_event_idx(struct perf_evlist *evlist, struct perf_evsel *evsel, int idx)
Definition: evlist.c:422
struct cpu_map * cpu_map__get(struct cpu_map *map)
Definition: cpumap.c:291
void static void * zalloc(size_t size)
Definition: util.h:20
#define SID(e, x, y)
Definition: evlist.c:38
void * handler
Definition: evsel.h:111