Linux Perf
thread-stack.c
Go to the documentation of this file.
1 /*
2  * thread-stack.c: Synthesize a thread's stack using call / return events
3  * Copyright (c) 2014, Intel Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12  * more details.
13  *
14  */
15 
16 #include <linux/rbtree.h>
17 #include <linux/list.h>
18 #include <errno.h>
19 #include "thread.h"
20 #include "event.h"
21 #include "machine.h"
22 #include "util.h"
23 #include "debug.h"
24 #include "symbol.h"
25 #include "comm.h"
26 #include "call-path.h"
27 #include "thread-stack.h"
28 
29 #define STACK_GROWTH 2048
30 
41  u64 ret_addr;
42  u64 timestamp;
43  u64 ref;
45  struct call_path *cp;
46  bool no_call;
47 };
48 
62 struct thread_stack {
64  size_t cnt;
65  size_t sz;
66  u64 trace_nr;
69  u64 last_time;
71  struct comm *comm;
72 };
73 
74 static int thread_stack__grow(struct thread_stack *ts)
75 {
76  struct thread_stack_entry *new_stack;
77  size_t sz, new_sz;
78 
79  new_sz = ts->sz + STACK_GROWTH;
80  sz = new_sz * sizeof(struct thread_stack_entry);
81 
82  new_stack = realloc(ts->stack, sz);
83  if (!new_stack)
84  return -ENOMEM;
85 
86  ts->stack = new_stack;
87  ts->sz = new_sz;
88 
89  return 0;
90 }
91 
93  struct call_return_processor *crp)
94 {
95  struct thread_stack *ts;
96 
97  ts = zalloc(sizeof(struct thread_stack));
98  if (!ts)
99  return NULL;
100 
101  if (thread_stack__grow(ts)) {
102  free(ts);
103  return NULL;
104  }
105 
106  if (thread->mg && thread->mg->machine)
108  else
109  ts->kernel_start = 1ULL << 63;
110  ts->crp = crp;
111 
112  return ts;
113 }
114 
115 static int thread_stack__push(struct thread_stack *ts, u64 ret_addr)
116 {
117  int err = 0;
118 
119  if (ts->cnt == ts->sz) {
120  err = thread_stack__grow(ts);
121  if (err) {
122  pr_warning("Out of memory: discarding thread stack\n");
123  ts->cnt = 0;
124  }
125  }
126 
127  ts->stack[ts->cnt++].ret_addr = ret_addr;
128 
129  return err;
130 }
131 
132 static void thread_stack__pop(struct thread_stack *ts, u64 ret_addr)
133 {
134  size_t i;
135 
136  /*
137  * In some cases there may be functions which are not seen to return.
138  * For example when setjmp / longjmp has been used. Or the perf context
139  * switch in the kernel which doesn't stop and start tracing in exactly
140  * the same code path. When that happens the return address will be
141  * further down the stack. If the return address is not found at all,
142  * we assume the opposite (i.e. this is a return for a call that wasn't
143  * seen for some reason) and leave the stack alone.
144  */
145  for (i = ts->cnt; i; ) {
146  if (ts->stack[--i].ret_addr == ret_addr) {
147  ts->cnt = i;
148  return;
149  }
150  }
151 }
152 
153 static bool thread_stack__in_kernel(struct thread_stack *ts)
154 {
155  if (!ts->cnt)
156  return false;
157 
158  return ts->stack[ts->cnt - 1].cp->in_kernel;
159 }
160 
162  struct thread_stack *ts, size_t idx,
163  u64 timestamp, u64 ref, bool no_return)
164 {
165  struct call_return_processor *crp = ts->crp;
166  struct thread_stack_entry *tse;
167  struct call_return cr = {
168  .thread = thread,
169  .comm = ts->comm,
170  .db_id = 0,
171  };
172 
173  tse = &ts->stack[idx];
174  cr.cp = tse->cp;
175  cr.call_time = tse->timestamp;
176  cr.return_time = timestamp;
177  cr.branch_count = ts->branch_count - tse->branch_count;
178  cr.call_ref = tse->ref;
179  cr.return_ref = ref;
180  if (tse->no_call)
182  if (no_return)
184 
185  return crp->process(&cr, crp->data);
186 }
187 
188 static int __thread_stack__flush(struct thread *thread, struct thread_stack *ts)
189 {
190  struct call_return_processor *crp = ts->crp;
191  int err;
192 
193  if (!crp) {
194  ts->cnt = 0;
195  return 0;
196  }
197 
198  while (ts->cnt) {
199  err = thread_stack__call_return(thread, ts, --ts->cnt,
200  ts->last_time, 0, true);
201  if (err) {
202  pr_err("Error flushing thread stack!\n");
203  ts->cnt = 0;
204  return err;
205  }
206  }
207 
208  return 0;
209 }
210 
212 {
213  if (thread->ts)
214  return __thread_stack__flush(thread, thread->ts);
215 
216  return 0;
217 }
218 
219 int thread_stack__event(struct thread *thread, u32 flags, u64 from_ip,
220  u64 to_ip, u16 insn_len, u64 trace_nr)
221 {
222  if (!thread)
223  return -EINVAL;
224 
225  if (!thread->ts) {
226  thread->ts = thread_stack__new(thread, NULL);
227  if (!thread->ts) {
228  pr_warning("Out of memory: no thread stack\n");
229  return -ENOMEM;
230  }
231  thread->ts->trace_nr = trace_nr;
232  }
233 
234  /*
235  * When the trace is discontinuous, the trace_nr changes. In that case
236  * the stack might be completely invalid. Better to report nothing than
237  * to report something misleading, so flush the stack.
238  */
239  if (trace_nr != thread->ts->trace_nr) {
240  if (thread->ts->trace_nr)
241  __thread_stack__flush(thread, thread->ts);
242  thread->ts->trace_nr = trace_nr;
243  }
244 
245  /* Stop here if thread_stack__process() is in use */
246  if (thread->ts->crp)
247  return 0;
248 
249  if (flags & PERF_IP_FLAG_CALL) {
250  u64 ret_addr;
251 
252  if (!to_ip)
253  return 0;
254  ret_addr = from_ip + insn_len;
255  if (ret_addr == to_ip)
256  return 0; /* Zero-length calls are excluded */
257  return thread_stack__push(thread->ts, ret_addr);
258  } else if (flags & PERF_IP_FLAG_RETURN) {
259  if (!from_ip)
260  return 0;
261  thread_stack__pop(thread->ts, to_ip);
262  }
263 
264  return 0;
265 }
266 
267 void thread_stack__set_trace_nr(struct thread *thread, u64 trace_nr)
268 {
269  if (!thread || !thread->ts)
270  return;
271 
272  if (trace_nr != thread->ts->trace_nr) {
273  if (thread->ts->trace_nr)
274  __thread_stack__flush(thread, thread->ts);
275  thread->ts->trace_nr = trace_nr;
276  }
277 }
278 
280 {
281  if (thread->ts) {
282  __thread_stack__flush(thread, thread->ts);
283  zfree(&thread->ts->stack);
284  zfree(&thread->ts);
285  }
286 }
287 
288 void thread_stack__sample(struct thread *thread, struct ip_callchain *chain,
289  size_t sz, u64 ip)
290 {
291  size_t i;
292 
293  if (!thread || !thread->ts)
294  chain->nr = 1;
295  else
296  chain->nr = min(sz, thread->ts->cnt + 1);
297 
298  chain->ips[0] = ip;
299 
300  for (i = 1; i < chain->nr; i++)
301  chain->ips[i] = thread->ts->stack[thread->ts->cnt - i].ret_addr;
302 }
303 
304 struct call_return_processor *
306  void *data)
307 {
308  struct call_return_processor *crp;
309 
310  crp = zalloc(sizeof(struct call_return_processor));
311  if (!crp)
312  return NULL;
313  crp->cpr = call_path_root__new();
314  if (!crp->cpr)
315  goto out_free;
316  crp->process = process;
317  crp->data = data;
318  return crp;
319 
320 out_free:
321  free(crp);
322  return NULL;
323 }
324 
326 {
327  if (crp) {
329  free(crp);
330  }
331 }
332 
333 static int thread_stack__push_cp(struct thread_stack *ts, u64 ret_addr,
334  u64 timestamp, u64 ref, struct call_path *cp,
335  bool no_call)
336 {
337  struct thread_stack_entry *tse;
338  int err;
339 
340  if (ts->cnt == ts->sz) {
341  err = thread_stack__grow(ts);
342  if (err)
343  return err;
344  }
345 
346  tse = &ts->stack[ts->cnt++];
347  tse->ret_addr = ret_addr;
348  tse->timestamp = timestamp;
349  tse->ref = ref;
350  tse->branch_count = ts->branch_count;
351  tse->cp = cp;
352  tse->no_call = no_call;
353 
354  return 0;
355 }
356 
357 static int thread_stack__pop_cp(struct thread *thread, struct thread_stack *ts,
358  u64 ret_addr, u64 timestamp, u64 ref,
359  struct symbol *sym)
360 {
361  int err;
362 
363  if (!ts->cnt)
364  return 1;
365 
366  if (ts->cnt == 1) {
367  struct thread_stack_entry *tse = &ts->stack[0];
368 
369  if (tse->cp->sym == sym)
370  return thread_stack__call_return(thread, ts, --ts->cnt,
371  timestamp, ref, false);
372  }
373 
374  if (ts->stack[ts->cnt - 1].ret_addr == ret_addr) {
375  return thread_stack__call_return(thread, ts, --ts->cnt,
376  timestamp, ref, false);
377  } else {
378  size_t i = ts->cnt - 1;
379 
380  while (i--) {
381  if (ts->stack[i].ret_addr != ret_addr)
382  continue;
383  i += 1;
384  while (ts->cnt > i) {
385  err = thread_stack__call_return(thread, ts,
386  --ts->cnt,
387  timestamp, ref,
388  true);
389  if (err)
390  return err;
391  }
392  return thread_stack__call_return(thread, ts, --ts->cnt,
393  timestamp, ref, false);
394  }
395  }
396 
397  return 1;
398 }
399 
400 static int thread_stack__bottom(struct thread *thread, struct thread_stack *ts,
401  struct perf_sample *sample,
402  struct addr_location *from_al,
403  struct addr_location *to_al, u64 ref)
404 {
405  struct call_path_root *cpr = ts->crp->cpr;
406  struct call_path *cp;
407  struct symbol *sym;
408  u64 ip;
409 
410  if (sample->ip) {
411  ip = sample->ip;
412  sym = from_al->sym;
413  } else if (sample->addr) {
414  ip = sample->addr;
415  sym = to_al->sym;
416  } else {
417  return 0;
418  }
419 
420  cp = call_path__findnew(cpr, &cpr->call_path, sym, ip,
421  ts->kernel_start);
422  if (!cp)
423  return -ENOMEM;
424 
425  return thread_stack__push_cp(thread->ts, ip, sample->time, ref, cp,
426  true);
427 }
428 
430  struct thread_stack *ts,
431  struct perf_sample *sample,
432  struct addr_location *from_al,
433  struct addr_location *to_al, u64 ref)
434 {
435  struct call_path_root *cpr = ts->crp->cpr;
436  struct call_path *cp, *parent;
437  u64 ks = ts->kernel_start;
438  int err;
439 
440  if (sample->ip >= ks && sample->addr < ks) {
441  /* Return to userspace, so pop all kernel addresses */
442  while (thread_stack__in_kernel(ts)) {
443  err = thread_stack__call_return(thread, ts, --ts->cnt,
444  sample->time, ref,
445  true);
446  if (err)
447  return err;
448  }
449 
450  /* If the stack is empty, push the userspace address */
451  if (!ts->cnt) {
452  cp = call_path__findnew(cpr, &cpr->call_path,
453  to_al->sym, sample->addr,
454  ts->kernel_start);
455  if (!cp)
456  return -ENOMEM;
457  return thread_stack__push_cp(ts, 0, sample->time, ref,
458  cp, true);
459  }
460  } else if (thread_stack__in_kernel(ts) && sample->ip < ks) {
461  /* Return to userspace, so pop all kernel addresses */
462  while (thread_stack__in_kernel(ts)) {
463  err = thread_stack__call_return(thread, ts, --ts->cnt,
464  sample->time, ref,
465  true);
466  if (err)
467  return err;
468  }
469  }
470 
471  if (ts->cnt)
472  parent = ts->stack[ts->cnt - 1].cp;
473  else
474  parent = &cpr->call_path;
475 
476  /* This 'return' had no 'call', so push and pop top of stack */
477  cp = call_path__findnew(cpr, parent, from_al->sym, sample->ip,
478  ts->kernel_start);
479  if (!cp)
480  return -ENOMEM;
481 
482  err = thread_stack__push_cp(ts, sample->addr, sample->time, ref, cp,
483  true);
484  if (err)
485  return err;
486 
487  return thread_stack__pop_cp(thread, ts, sample->addr, sample->time, ref,
488  to_al->sym);
489 }
490 
492  struct thread_stack *ts, u64 timestamp,
493  u64 ref)
494 {
495  struct thread_stack_entry *tse;
496  int err;
497 
498  if (!ts->cnt)
499  return 0;
500 
501  /* Pop trace end */
502  tse = &ts->stack[ts->cnt - 1];
503  if (tse->cp->sym == NULL && tse->cp->ip == 0) {
504  err = thread_stack__call_return(thread, ts, --ts->cnt,
505  timestamp, ref, false);
506  if (err)
507  return err;
508  }
509 
510  return 0;
511 }
512 
513 static int thread_stack__trace_end(struct thread_stack *ts,
514  struct perf_sample *sample, u64 ref)
515 {
516  struct call_path_root *cpr = ts->crp->cpr;
517  struct call_path *cp;
518  u64 ret_addr;
519 
520  /* No point having 'trace end' on the bottom of the stack */
521  if (!ts->cnt || (ts->cnt == 1 && ts->stack[0].ref == ref))
522  return 0;
523 
524  cp = call_path__findnew(cpr, ts->stack[ts->cnt - 1].cp, NULL, 0,
525  ts->kernel_start);
526  if (!cp)
527  return -ENOMEM;
528 
529  ret_addr = sample->ip + sample->insn_len;
530 
531  return thread_stack__push_cp(ts, ret_addr, sample->time, ref, cp,
532  false);
533 }
534 
536  struct perf_sample *sample,
537  struct addr_location *from_al,
538  struct addr_location *to_al, u64 ref,
539  struct call_return_processor *crp)
540 {
541  struct thread_stack *ts = thread->ts;
542  int err = 0;
543 
544  if (ts) {
545  if (!ts->crp) {
546  /* Supersede thread_stack__event() */
547  thread_stack__free(thread);
548  thread->ts = thread_stack__new(thread, crp);
549  if (!thread->ts)
550  return -ENOMEM;
551  ts = thread->ts;
552  ts->comm = comm;
553  }
554  } else {
555  thread->ts = thread_stack__new(thread, crp);
556  if (!thread->ts)
557  return -ENOMEM;
558  ts = thread->ts;
559  ts->comm = comm;
560  }
561 
562  /* Flush stack on exec */
563  if (ts->comm != comm && thread->pid_ == thread->tid) {
564  err = __thread_stack__flush(thread, ts);
565  if (err)
566  return err;
567  ts->comm = comm;
568  }
569 
570  /* If the stack is empty, put the current symbol on the stack */
571  if (!ts->cnt) {
572  err = thread_stack__bottom(thread, ts, sample, from_al, to_al,
573  ref);
574  if (err)
575  return err;
576  }
577 
578  ts->branch_count += 1;
579  ts->last_time = sample->time;
580 
581  if (sample->flags & PERF_IP_FLAG_CALL) {
582  struct call_path_root *cpr = ts->crp->cpr;
583  struct call_path *cp;
584  u64 ret_addr;
585 
586  if (!sample->ip || !sample->addr)
587  return 0;
588 
589  ret_addr = sample->ip + sample->insn_len;
590  if (ret_addr == sample->addr)
591  return 0; /* Zero-length calls are excluded */
592 
593  cp = call_path__findnew(cpr, ts->stack[ts->cnt - 1].cp,
594  to_al->sym, sample->addr,
595  ts->kernel_start);
596  if (!cp)
597  return -ENOMEM;
598  err = thread_stack__push_cp(ts, ret_addr, sample->time, ref,
599  cp, false);
600  } else if (sample->flags & PERF_IP_FLAG_RETURN) {
601  if (!sample->ip || !sample->addr)
602  return 0;
603 
604  err = thread_stack__pop_cp(thread, ts, sample->addr,
605  sample->time, ref, from_al->sym);
606  if (err) {
607  if (err < 0)
608  return err;
609  err = thread_stack__no_call_return(thread, ts, sample,
610  from_al, to_al, ref);
611  }
612  } else if (sample->flags & PERF_IP_FLAG_TRACE_BEGIN) {
613  err = thread_stack__trace_begin(thread, ts, sample->time, ref);
614  } else if (sample->flags & PERF_IP_FLAG_TRACE_END) {
615  err = thread_stack__trace_end(ts, sample, ref);
616  }
617 
618  return err;
619 }
620 
622 {
623  if (!thread->ts)
624  return 0;
625  return thread->ts->cnt;
626 }
struct map_groups * mg
Definition: thread.h:23
const char * comm
Definition: hists_common.c:16
static int thread_stack__bottom(struct thread *thread, struct thread_stack *ts, struct perf_sample *sample, struct addr_location *from_al, struct addr_location *to_al, u64 ref)
Definition: thread-stack.c:400
static int thread_stack__pop_cp(struct thread *thread, struct thread_stack *ts, u64 ret_addr, u64 timestamp, u64 ref, struct symbol *sym)
Definition: thread-stack.c:357
pid_t pid_
Definition: thread.h:24
struct call_return_processor * call_return_processor__new(int(*process)(struct call_return *cr, void *data), void *data)
Definition: thread-stack.c:305
void call_path_root__free(struct call_path_root *cpr)
Definition: call-path.c:46
u64 timestamp
Definition: thread-stack.c:42
u64 addr
Definition: event.h:195
u16 insn_len
Definition: event.h:206
void thread_stack__free(struct thread *thread)
Definition: thread-stack.c:279
dictionary data
Definition: stat-cpi.py:4
int int err
Definition: 5sec.c:44
u64 ret_addr
Definition: thread-stack.c:41
static int thread_stack__push_cp(struct thread_stack *ts, u64 ret_addr, u64 timestamp, u64 ref, struct call_path *cp, bool no_call)
Definition: thread-stack.c:333
int thread_stack__event(struct thread *thread, u32 flags, u64 from_ip, u64 to_ip, u16 insn_len, u64 trace_nr)
Definition: thread-stack.c:219
struct thread_stack * ts
Definition: thread.h:39
u64 ips[0]
Definition: event.h:137
struct call_return_processor * crp
Definition: thread-stack.c:70
u64 ip
Definition: event.h:192
#define pr_err(fmt,...)
Definition: json.h:21
#define min(x, y)
Definition: jevents.h:15
Definition: comm.h:11
struct comm * comm
Definition: thread-stack.c:71
u64 branch_count
Definition: thread-stack.h:63
void call_return_processor__free(struct call_return_processor *crp)
Definition: thread-stack.c:325
struct call_path * parent
Definition: call-path.h:38
struct call_path * cp
Definition: thread-stack.c:45
Definition: thread.h:18
struct call_path call_path
Definition: call-path.h:64
struct call_path * call_path__findnew(struct call_path_root *cpr, struct call_path *parent, struct symbol *sym, u64 ip, u64 ks)
Definition: call-path.c:85
static void thread_stack__pop(struct thread_stack *ts, u64 ret_addr)
Definition: thread-stack.c:132
int thread_stack__process(struct thread *thread, struct comm *comm, struct perf_sample *sample, struct addr_location *from_al, struct addr_location *to_al, u64 ref, struct call_return_processor *crp)
Definition: thread-stack.c:535
u64 nr
Definition: event.h:136
static int thread_stack__grow(struct thread_stack *ts)
Definition: thread-stack.c:74
pid_t tid
Definition: thread.h:25
struct call_path_root * cpr
Definition: thread-stack.h:78
static int thread_stack__no_call_return(struct thread *thread, struct thread_stack *ts, struct perf_sample *sample, struct addr_location *from_al, struct addr_location *to_al, u64 ref)
Definition: thread-stack.c:429
static bool thread_stack__in_kernel(struct thread_stack *ts)
Definition: thread-stack.c:153
u64 db_id
Definition: comm.h:18
struct symbol * sym
Definition: symbol.h:211
Definition: thread-stack.c:40
bool in_kernel
Definition: call-path.h:42
size_t thread_stack__depth(struct thread *thread)
Definition: thread-stack.c:621
#define zfree(ptr)
Definition: util.h:25
static int thread_stack__call_return(struct thread *thread, struct thread_stack *ts, size_t idx, u64 timestamp, u64 ref, bool no_return)
Definition: thread-stack.c:161
bool no_call
Definition: thread-stack.c:46
static struct thread_stack * thread_stack__new(struct thread *thread, struct call_return_processor *crp)
Definition: thread-stack.c:92
u32 flags
Definition: event.h:205
int(* process)(struct call_return *cr, void *data)
Definition: thread-stack.h:79
static int sym(yyscan_t scanner, int type, int config)
u64 time
Definition: event.h:194
u32 flags
#define STACK_GROWTH
Definition: thread-stack.c:29
struct call_path * cp
Definition: thread-stack.h:60
struct symbol * sym
Definition: call-path.h:39
static int thread_stack__trace_begin(struct thread *thread, struct thread_stack *ts, u64 timestamp, u64 ref)
Definition: thread-stack.c:491
void free(void *)
void thread_stack__sample(struct thread *thread, struct ip_callchain *chain, size_t sz, u64 ip)
Definition: thread-stack.c:288
struct thread * thread
Definition: thread-stack.h:58
void thread_stack__set_trace_nr(struct thread *thread, u64 trace_nr)
Definition: thread-stack.c:267
Definition: symbol.h:55
int thread_stack__flush(struct thread *thread)
Definition: thread-stack.c:211
struct call_path_root * call_path_root__new(void)
Definition: call-path.c:34
static int thread_stack__push(struct thread_stack *ts, u64 ret_addr)
Definition: thread-stack.c:115
struct thread_stack_entry * stack
Definition: thread-stack.c:63
#define pr_warning(fmt,...)
Definition: debug.h:25
static int __thread_stack__flush(struct thread *thread, struct thread_stack *ts)
Definition: thread-stack.c:188
struct machine * machine
Definition: map.h:65
static u64 machine__kernel_start(struct machine *machine)
Definition: machine.h:88
u64 branch_count
Definition: thread-stack.c:44
static int thread_stack__trace_end(struct thread_stack *ts, struct perf_sample *sample, u64 ref)
Definition: thread-stack.c:513
u64 ref
Definition: thread-stack.c:43
u64 ip
Definition: call-path.h:40
void static void * zalloc(size_t size)
Definition: util.h:20