Support for the PMU's BTS features has been upstreamed in v2.6.32, but we still have the old and disabled ptrace-BTS, as Linus noticed it not so long ago. It's buggy: TIF_DEBUGCTLMSR is trampling all over that MSR without regard for other uses (perf) and doesn't provide the flexibility needed for perf either. Its users are ptrace-block-step and ptrace-bts, since ptrace-bts was never used and ptrace-block-step can be implemented using a much simpler approach. So axe all 3000 lines of it. That includes the *locked_memory*() APIs in mm/mlock.c as well. Reported-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Roland McGrath <roland@redhat.com> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Markus Metzger <markus.t.metzger@intel.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Andrew Morton <akpm@linux-foundation.org> LKML-Reference: <20100325135413.938004390@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>tirimbino
parent
7c5ecaf766
commit
faa4602e47
@ -1,302 +0,0 @@ |
||||
/*
|
||||
* Debug Store (DS) support |
||||
* |
||||
* This provides a low-level interface to the hardware's Debug Store |
||||
* feature that is used for branch trace store (BTS) and |
||||
* precise-event based sampling (PEBS). |
||||
* |
||||
* It manages: |
||||
* - DS and BTS hardware configuration |
||||
* - buffer overflow handling (to be done) |
||||
* - buffer access |
||||
* |
||||
* It does not do: |
||||
* - security checking (is the caller allowed to trace the task) |
||||
* - buffer allocation (memory accounting) |
||||
* |
||||
* |
||||
* Copyright (C) 2007-2009 Intel Corporation. |
||||
* Markus Metzger <markus.t.metzger@intel.com>, 2007-2009 |
||||
*/ |
||||
|
||||
#ifndef _ASM_X86_DS_H |
||||
#define _ASM_X86_DS_H |
||||
|
||||
|
||||
#include <linux/types.h> |
||||
#include <linux/init.h> |
||||
#include <linux/err.h> |
||||
|
||||
|
||||
#ifdef CONFIG_X86_DS |
||||
|
||||
struct task_struct; |
||||
struct ds_context; |
||||
struct ds_tracer; |
||||
struct bts_tracer; |
||||
struct pebs_tracer; |
||||
|
||||
typedef void (*bts_ovfl_callback_t)(struct bts_tracer *); |
||||
typedef void (*pebs_ovfl_callback_t)(struct pebs_tracer *); |
||||
|
||||
|
||||
/*
|
||||
* A list of features plus corresponding macros to talk about them in |
||||
* the ds_request function's flags parameter. |
||||
* |
||||
* We use the enum to index an array of corresponding control bits; |
||||
* we use the macro to index a flags bit-vector. |
||||
*/ |
||||
enum ds_feature { |
||||
dsf_bts = 0, |
||||
dsf_bts_kernel, |
||||
#define BTS_KERNEL (1 << dsf_bts_kernel) |
||||
/* trace kernel-mode branches */ |
||||
|
||||
dsf_bts_user, |
||||
#define BTS_USER (1 << dsf_bts_user) |
||||
/* trace user-mode branches */ |
||||
|
||||
dsf_bts_overflow, |
||||
dsf_bts_max, |
||||
dsf_pebs = dsf_bts_max, |
||||
|
||||
dsf_pebs_max, |
||||
dsf_ctl_max = dsf_pebs_max, |
||||
dsf_bts_timestamps = dsf_ctl_max, |
||||
#define BTS_TIMESTAMPS (1 << dsf_bts_timestamps) |
||||
/* add timestamps into BTS trace */ |
||||
|
||||
#define BTS_USER_FLAGS (BTS_KERNEL | BTS_USER | BTS_TIMESTAMPS) |
||||
}; |
||||
|
||||
|
||||
/*
|
||||
* Request BTS or PEBS |
||||
* |
||||
* Due to alignement constraints, the actual buffer may be slightly |
||||
* smaller than the requested or provided buffer. |
||||
* |
||||
* Returns a pointer to a tracer structure on success, or |
||||
* ERR_PTR(errcode) on failure. |
||||
* |
||||
* The interrupt threshold is independent from the overflow callback |
||||
* to allow users to use their own overflow interrupt handling mechanism. |
||||
* |
||||
* The function might sleep. |
||||
* |
||||
* task: the task to request recording for |
||||
* cpu: the cpu to request recording for |
||||
* base: the base pointer for the (non-pageable) buffer; |
||||
* size: the size of the provided buffer in bytes |
||||
* ovfl: pointer to a function to be called on buffer overflow; |
||||
* NULL if cyclic buffer requested |
||||
* th: the interrupt threshold in records from the end of the buffer; |
||||
* -1 if no interrupt threshold is requested. |
||||
* flags: a bit-mask of the above flags |
||||
*/ |
||||
extern struct bts_tracer *ds_request_bts_task(struct task_struct *task, |
||||
void *base, size_t size, |
||||
bts_ovfl_callback_t ovfl, |
||||
size_t th, unsigned int flags); |
||||
extern struct bts_tracer *ds_request_bts_cpu(int cpu, void *base, size_t size, |
||||
bts_ovfl_callback_t ovfl, |
||||
size_t th, unsigned int flags); |
||||
extern struct pebs_tracer *ds_request_pebs_task(struct task_struct *task, |
||||
void *base, size_t size, |
||||
pebs_ovfl_callback_t ovfl, |
||||
size_t th, unsigned int flags); |
||||
extern struct pebs_tracer *ds_request_pebs_cpu(int cpu, |
||||
void *base, size_t size, |
||||
pebs_ovfl_callback_t ovfl, |
||||
size_t th, unsigned int flags); |
||||
|
||||
/*
|
||||
* Release BTS or PEBS resources |
||||
* Suspend and resume BTS or PEBS tracing |
||||
* |
||||
* Must be called with irq's enabled. |
||||
* |
||||
* tracer: the tracer handle returned from ds_request_~() |
||||
*/ |
||||
extern void ds_release_bts(struct bts_tracer *tracer); |
||||
extern void ds_suspend_bts(struct bts_tracer *tracer); |
||||
extern void ds_resume_bts(struct bts_tracer *tracer); |
||||
extern void ds_release_pebs(struct pebs_tracer *tracer); |
||||
extern void ds_suspend_pebs(struct pebs_tracer *tracer); |
||||
extern void ds_resume_pebs(struct pebs_tracer *tracer); |
||||
|
||||
/*
|
||||
* Release BTS or PEBS resources |
||||
* Suspend and resume BTS or PEBS tracing |
||||
* |
||||
* Cpu tracers must call this on the traced cpu. |
||||
* Task tracers must call ds_release_~_noirq() for themselves. |
||||
* |
||||
* May be called with irq's disabled. |
||||
* |
||||
* Returns 0 if successful; |
||||
* -EPERM if the cpu tracer does not trace the current cpu. |
||||
* -EPERM if the task tracer does not trace itself. |
||||
* |
||||
* tracer: the tracer handle returned from ds_request_~() |
||||
*/ |
||||
extern int ds_release_bts_noirq(struct bts_tracer *tracer); |
||||
extern int ds_suspend_bts_noirq(struct bts_tracer *tracer); |
||||
extern int ds_resume_bts_noirq(struct bts_tracer *tracer); |
||||
extern int ds_release_pebs_noirq(struct pebs_tracer *tracer); |
||||
extern int ds_suspend_pebs_noirq(struct pebs_tracer *tracer); |
||||
extern int ds_resume_pebs_noirq(struct pebs_tracer *tracer); |
||||
|
||||
|
||||
/*
|
||||
* The raw DS buffer state as it is used for BTS and PEBS recording. |
||||
* |
||||
* This is the low-level, arch-dependent interface for working |
||||
* directly on the raw trace data. |
||||
*/ |
||||
struct ds_trace { |
||||
/* the number of bts/pebs records */ |
||||
size_t n; |
||||
/* the size of a bts/pebs record in bytes */ |
||||
size_t size; |
||||
/* pointers into the raw buffer:
|
||||
- to the first entry */ |
||||
void *begin; |
||||
/* - one beyond the last entry */ |
||||
void *end; |
||||
/* - one beyond the newest entry */ |
||||
void *top; |
||||
/* - the interrupt threshold */ |
||||
void *ith; |
||||
/* flags given on ds_request() */ |
||||
unsigned int flags; |
||||
}; |
||||
|
||||
/*
|
||||
* An arch-independent view on branch trace data. |
||||
*/ |
||||
enum bts_qualifier { |
||||
bts_invalid, |
||||
#define BTS_INVALID bts_invalid |
||||
|
||||
bts_branch, |
||||
#define BTS_BRANCH bts_branch |
||||
|
||||
bts_task_arrives, |
||||
#define BTS_TASK_ARRIVES bts_task_arrives |
||||
|
||||
bts_task_departs, |
||||
#define BTS_TASK_DEPARTS bts_task_departs |
||||
|
||||
bts_qual_bit_size = 4, |
||||
bts_qual_max = (1 << bts_qual_bit_size), |
||||
}; |
||||
|
||||
struct bts_struct { |
||||
__u64 qualifier; |
||||
union { |
||||
/* BTS_BRANCH */ |
||||
struct { |
||||
__u64 from; |
||||
__u64 to; |
||||
} lbr; |
||||
/* BTS_TASK_ARRIVES or BTS_TASK_DEPARTS */ |
||||
struct { |
||||
__u64 clock; |
||||
pid_t pid; |
||||
} event; |
||||
} variant; |
||||
}; |
||||
|
||||
|
||||
/*
|
||||
* The BTS state. |
||||
* |
||||
* This gives access to the raw DS state and adds functions to provide |
||||
* an arch-independent view of the BTS data. |
||||
*/ |
||||
struct bts_trace { |
||||
struct ds_trace ds; |
||||
|
||||
int (*read)(struct bts_tracer *tracer, const void *at, |
||||
struct bts_struct *out); |
||||
int (*write)(struct bts_tracer *tracer, const struct bts_struct *in); |
||||
}; |
||||
|
||||
|
||||
/*
|
||||
* The PEBS state. |
||||
* |
||||
* This gives access to the raw DS state and the PEBS-specific counter |
||||
* reset value. |
||||
*/ |
||||
struct pebs_trace { |
||||
struct ds_trace ds; |
||||
|
||||
/* the number of valid counters in the below array */ |
||||
unsigned int counters; |
||||
|
||||
#define MAX_PEBS_COUNTERS 4 |
||||
/* the counter reset value */ |
||||
unsigned long long counter_reset[MAX_PEBS_COUNTERS]; |
||||
}; |
||||
|
||||
|
||||
/*
|
||||
* Read the BTS or PEBS trace. |
||||
* |
||||
* Returns a view on the trace collected for the parameter tracer. |
||||
* |
||||
* The view remains valid as long as the traced task is not running or |
||||
* the tracer is suspended. |
||||
* Writes into the trace buffer are not reflected. |
||||
* |
||||
* tracer: the tracer handle returned from ds_request_~() |
||||
*/ |
||||
extern const struct bts_trace *ds_read_bts(struct bts_tracer *tracer); |
||||
extern const struct pebs_trace *ds_read_pebs(struct pebs_tracer *tracer); |
||||
|
||||
|
||||
/*
|
||||
* Reset the write pointer of the BTS/PEBS buffer. |
||||
* |
||||
* Returns 0 on success; -Eerrno on error |
||||
* |
||||
* tracer: the tracer handle returned from ds_request_~() |
||||
*/ |
||||
extern int ds_reset_bts(struct bts_tracer *tracer); |
||||
extern int ds_reset_pebs(struct pebs_tracer *tracer); |
||||
|
||||
/*
|
||||
* Set the PEBS counter reset value. |
||||
* |
||||
* Returns 0 on success; -Eerrno on error |
||||
* |
||||
* tracer: the tracer handle returned from ds_request_pebs() |
||||
* counter: the index of the counter |
||||
* value: the new counter reset value |
||||
*/ |
||||
extern int ds_set_pebs_reset(struct pebs_tracer *tracer, |
||||
unsigned int counter, u64 value); |
||||
|
||||
/*
|
||||
* Initialization |
||||
*/ |
||||
struct cpuinfo_x86; |
||||
extern void __cpuinit ds_init_intel(struct cpuinfo_x86 *); |
||||
|
||||
/*
|
||||
* Context switch work |
||||
*/ |
||||
extern void ds_switch_to(struct task_struct *prev, struct task_struct *next); |
||||
|
||||
#else /* CONFIG_X86_DS */ |
||||
|
||||
struct cpuinfo_x86; |
||||
static inline void __cpuinit ds_init_intel(struct cpuinfo_x86 *ignored) {} |
||||
static inline void ds_switch_to(struct task_struct *prev, |
||||
struct task_struct *next) {} |
||||
|
||||
#endif /* CONFIG_X86_DS */ |
||||
#endif /* _ASM_X86_DS_H */ |
File diff suppressed because it is too large
Load Diff
@ -1,408 +0,0 @@ |
||||
/*
|
||||
* Debug Store support - selftest |
||||
* |
||||
* |
||||
* Copyright (C) 2009 Intel Corporation. |
||||
* Markus Metzger <markus.t.metzger@intel.com>, 2009 |
||||
*/ |
||||
|
||||
#include "ds_selftest.h" |
||||
|
||||
#include <linux/kernel.h> |
||||
#include <linux/string.h> |
||||
#include <linux/smp.h> |
||||
#include <linux/cpu.h> |
||||
|
||||
#include <asm/ds.h> |
||||
|
||||
|
||||
#define BUFFER_SIZE 521 /* Intentionally chose an odd size. */ |
||||
#define SMALL_BUFFER_SIZE 24 /* A single bts entry. */ |
||||
|
||||
struct ds_selftest_bts_conf { |
||||
struct bts_tracer *tracer; |
||||
int error; |
||||
int (*suspend)(struct bts_tracer *); |
||||
int (*resume)(struct bts_tracer *); |
||||
}; |
||||
|
||||
static int ds_selftest_bts_consistency(const struct bts_trace *trace) |
||||
{ |
||||
int error = 0; |
||||
|
||||
if (!trace) { |
||||
printk(KERN_CONT "failed to access trace..."); |
||||
/* Bail out. Other tests are pointless. */ |
||||
return -1; |
||||
} |
||||
|
||||
if (!trace->read) { |
||||
printk(KERN_CONT "bts read not available..."); |
||||
error = -1; |
||||
} |
||||
|
||||
/* Do some sanity checks on the trace configuration. */ |
||||
if (!trace->ds.n) { |
||||
printk(KERN_CONT "empty bts buffer..."); |
||||
error = -1; |
||||
} |
||||
if (!trace->ds.size) { |
||||
printk(KERN_CONT "bad bts trace setup..."); |
||||
error = -1; |
||||
} |
||||
if (trace->ds.end != |
||||
(char *)trace->ds.begin + (trace->ds.n * trace->ds.size)) { |
||||
printk(KERN_CONT "bad bts buffer setup..."); |
||||
error = -1; |
||||
} |
||||
/*
|
||||
* We allow top in [begin; end], since its not clear when the |
||||
* overflow adjustment happens: after the increment or before the |
||||
* write. |
||||
*/ |
||||
if ((trace->ds.top < trace->ds.begin) || |
||||
(trace->ds.end < trace->ds.top)) { |
||||
printk(KERN_CONT "bts top out of bounds..."); |
||||
error = -1; |
||||
} |
||||
|
||||
return error; |
||||
} |
||||
|
||||
static int ds_selftest_bts_read(struct bts_tracer *tracer, |
||||
const struct bts_trace *trace, |
||||
const void *from, const void *to) |
||||
{ |
||||
const unsigned char *at; |
||||
|
||||
/*
|
||||
* Check a few things which do not belong to this test. |
||||
* They should be covered by other tests. |
||||
*/ |
||||
if (!trace) |
||||
return -1; |
||||
|
||||
if (!trace->read) |
||||
return -1; |
||||
|
||||
if (to < from) |
||||
return -1; |
||||
|
||||
if (from < trace->ds.begin) |
||||
return -1; |
||||
|
||||
if (trace->ds.end < to) |
||||
return -1; |
||||
|
||||
if (!trace->ds.size) |
||||
return -1; |
||||
|
||||
/* Now to the test itself. */ |
||||
for (at = from; (void *)at < to; at += trace->ds.size) { |
||||
struct bts_struct bts; |
||||
unsigned long index; |
||||
int error; |
||||
|
||||
if (((void *)at - trace->ds.begin) % trace->ds.size) { |
||||
printk(KERN_CONT |
||||
"read from non-integer index..."); |
||||
return -1; |
||||
} |
||||
index = ((void *)at - trace->ds.begin) / trace->ds.size; |
||||
|
||||
memset(&bts, 0, sizeof(bts)); |
||||
error = trace->read(tracer, at, &bts); |
||||
if (error < 0) { |
||||
printk(KERN_CONT |
||||
"error reading bts trace at [%lu] (0x%p)...", |
||||
index, at); |
||||
return error; |
||||
} |
||||
|
||||
switch (bts.qualifier) { |
||||
case BTS_BRANCH: |
||||
break; |
||||
default: |
||||
printk(KERN_CONT |
||||
"unexpected bts entry %llu at [%lu] (0x%p)...", |
||||
bts.qualifier, index, at); |
||||
return -1; |
||||
} |
||||
} |
||||
|
||||
return 0; |
||||
} |
||||
|
||||
static void ds_selftest_bts_cpu(void *arg) |
||||
{ |
||||
struct ds_selftest_bts_conf *conf = arg; |
||||
const struct bts_trace *trace; |
||||
void *top; |
||||
|
||||
if (IS_ERR(conf->tracer)) { |
||||
conf->error = PTR_ERR(conf->tracer); |
||||
conf->tracer = NULL; |
||||
|
||||
printk(KERN_CONT |
||||
"initialization failed (err: %d)...", conf->error); |
||||
return; |
||||
} |
||||
|
||||
/* We should meanwhile have enough trace. */ |
||||
conf->error = conf->suspend(conf->tracer); |
||||
if (conf->error < 0) |
||||
return; |
||||
|
||||
/* Let's see if we can access the trace. */ |
||||
trace = ds_read_bts(conf->tracer); |
||||
|
||||
conf->error = ds_selftest_bts_consistency(trace); |
||||
if (conf->error < 0) |
||||
return; |
||||
|
||||
/* If everything went well, we should have a few trace entries. */ |
||||
if (trace->ds.top == trace->ds.begin) { |
||||
/*
|
||||
* It is possible but highly unlikely that we got a |
||||
* buffer overflow and end up at exactly the same |
||||
* position we started from. |
||||
* Let's issue a warning, but continue. |
||||
*/ |
||||
printk(KERN_CONT "no trace/overflow..."); |
||||
} |
||||
|
||||
/* Let's try to read the trace we collected. */ |
||||
conf->error = |
||||
ds_selftest_bts_read(conf->tracer, trace, |
||||
trace->ds.begin, trace->ds.top); |
||||
if (conf->error < 0) |
||||
return; |
||||
|
||||
/*
|
||||
* Let's read the trace again. |
||||
* Since we suspended tracing, we should get the same result. |
||||
*/ |
||||
top = trace->ds.top; |
||||
|
||||
trace = ds_read_bts(conf->tracer); |
||||
conf->error = ds_selftest_bts_consistency(trace); |
||||
if (conf->error < 0) |
||||
return; |
||||
|
||||
if (top != trace->ds.top) { |
||||
printk(KERN_CONT "suspend not working..."); |
||||
conf->error = -1; |
||||
return; |
||||
} |
||||
|
||||
/* Let's collect some more trace - see if resume is working. */ |
||||
conf->error = conf->resume(conf->tracer); |
||||
if (conf->error < 0) |
||||
return; |
||||
|
||||
conf->error = conf->suspend(conf->tracer); |
||||
if (conf->error < 0) |
||||
return; |
||||
|
||||
trace = ds_read_bts(conf->tracer); |
||||
|
||||
conf->error = ds_selftest_bts_consistency(trace); |
||||
if (conf->error < 0) |
||||
return; |
||||
|
||||
if (trace->ds.top == top) { |
||||
/*
|
||||
* It is possible but highly unlikely that we got a |
||||
* buffer overflow and end up at exactly the same |
||||
* position we started from. |
||||
* Let's issue a warning and check the full trace. |
||||
*/ |
||||
printk(KERN_CONT |
||||
"no resume progress/overflow..."); |
||||
|
||||
conf->error = |
||||
ds_selftest_bts_read(conf->tracer, trace, |
||||
trace->ds.begin, trace->ds.end); |
||||
} else if (trace->ds.top < top) { |
||||
/*
|
||||
* We had a buffer overflow - the entire buffer should |
||||
* contain trace records. |
||||
*/ |
||||
conf->error = |
||||
ds_selftest_bts_read(conf->tracer, trace, |
||||
trace->ds.begin, trace->ds.end); |
||||
} else { |
||||
/*
|
||||
* It is quite likely that the buffer did not overflow. |
||||
* Let's just check the delta trace. |
||||
*/ |
||||
conf->error = |
||||
ds_selftest_bts_read(conf->tracer, trace, top, |
||||
trace->ds.top); |
||||
} |
||||
if (conf->error < 0) |
||||
return; |
||||
|
||||
conf->error = 0; |
||||
} |
||||
|
||||
static int ds_suspend_bts_wrap(struct bts_tracer *tracer) |
||||
{ |
||||
ds_suspend_bts(tracer); |
||||
return 0; |
||||
} |
||||
|
||||
static int ds_resume_bts_wrap(struct bts_tracer *tracer) |
||||
{ |
||||
ds_resume_bts(tracer); |
||||
return 0; |
||||
} |
||||
|
||||
static void ds_release_bts_noirq_wrap(void *tracer) |
||||
{ |
||||
(void)ds_release_bts_noirq(tracer); |
||||
} |
||||
|
||||
static int ds_selftest_bts_bad_release_noirq(int cpu, |
||||
struct bts_tracer *tracer) |
||||
{ |
||||
int error = -EPERM; |
||||
|
||||
/* Try to release the tracer on the wrong cpu. */ |
||||
get_cpu(); |
||||
if (cpu != smp_processor_id()) { |
||||
error = ds_release_bts_noirq(tracer); |
||||
if (error != -EPERM) |
||||
printk(KERN_CONT "release on wrong cpu..."); |
||||
} |
||||
put_cpu(); |
||||
|
||||
return error ? 0 : -1; |
||||
} |
||||
|
||||
static int ds_selftest_bts_bad_request_cpu(int cpu, void *buffer) |
||||
{ |
||||
struct bts_tracer *tracer; |
||||
int error; |
||||
|
||||
/* Try to request cpu tracing while task tracing is active. */ |
||||
tracer = ds_request_bts_cpu(cpu, buffer, BUFFER_SIZE, NULL, |
||||
(size_t)-1, BTS_KERNEL); |
||||
error = PTR_ERR(tracer); |
||||
if (!IS_ERR(tracer)) { |
||||
ds_release_bts(tracer); |
||||
error = 0; |
||||
} |
||||
|
||||
if (error != -EPERM) |
||||
printk(KERN_CONT "cpu/task tracing overlap..."); |
||||
|
||||
return error ? 0 : -1; |
||||
} |
||||
|
||||
static int ds_selftest_bts_bad_request_task(void *buffer) |
||||
{ |
||||
struct bts_tracer *tracer; |
||||
int error; |
||||
|
||||
/* Try to request cpu tracing while task tracing is active. */ |
||||
tracer = ds_request_bts_task(current, buffer, BUFFER_SIZE, NULL, |
||||
(size_t)-1, BTS_KERNEL); |
||||
error = PTR_ERR(tracer); |
||||
if (!IS_ERR(tracer)) { |
||||
error = 0; |
||||
ds_release_bts(tracer); |
||||
} |
||||
|
||||
if (error != -EPERM) |
||||
printk(KERN_CONT "task/cpu tracing overlap..."); |
||||
|
||||
return error ? 0 : -1; |
||||
} |
||||
|
||||
int ds_selftest_bts(void) |
||||
{ |
||||
struct ds_selftest_bts_conf conf; |
||||
unsigned char buffer[BUFFER_SIZE], *small_buffer; |
||||
unsigned long irq; |
||||
int cpu; |
||||
|
||||
printk(KERN_INFO "[ds] bts selftest..."); |
||||
conf.error = 0; |
||||
|
||||
small_buffer = (unsigned char *)ALIGN((unsigned long)buffer, 8) + 8; |
||||
|
||||
get_online_cpus(); |
||||
for_each_online_cpu(cpu) { |
||||
conf.suspend = ds_suspend_bts_wrap; |
||||
conf.resume = ds_resume_bts_wrap; |
||||
conf.tracer = |
||||
ds_request_bts_cpu(cpu, buffer, BUFFER_SIZE, |
||||
NULL, (size_t)-1, BTS_KERNEL); |
||||
ds_selftest_bts_cpu(&conf); |
||||
if (conf.error >= 0) |
||||
conf.error = ds_selftest_bts_bad_request_task(buffer); |
||||
ds_release_bts(conf.tracer); |
||||
if (conf.error < 0) |
||||
goto out; |
||||
|
||||
conf.suspend = ds_suspend_bts_noirq; |
||||
conf.resume = ds_resume_bts_noirq; |
||||
conf.tracer = |
||||
ds_request_bts_cpu(cpu, buffer, BUFFER_SIZE, |
||||
NULL, (size_t)-1, BTS_KERNEL); |
||||
smp_call_function_single(cpu, ds_selftest_bts_cpu, &conf, 1); |
||||
if (conf.error >= 0) { |
||||
conf.error = |
||||
ds_selftest_bts_bad_release_noirq(cpu, |
||||
conf.tracer); |
||||
/* We must not release the tracer twice. */ |
||||
if (conf.error < 0) |
||||
conf.tracer = NULL; |
||||
} |
||||
if (conf.error >= 0) |
||||
conf.error = ds_selftest_bts_bad_request_task(buffer); |
||||
smp_call_function_single(cpu, ds_release_bts_noirq_wrap, |
||||
conf.tracer, 1); |
||||
if (conf.error < 0) |
||||
goto out; |
||||
} |
||||
|
||||
conf.suspend = ds_suspend_bts_wrap; |
||||
conf.resume = ds_resume_bts_wrap; |
||||
conf.tracer = |
||||
ds_request_bts_task(current, buffer, BUFFER_SIZE, |
||||
NULL, (size_t)-1, BTS_KERNEL); |
||||
ds_selftest_bts_cpu(&conf); |
||||
if (conf.error >= 0) |
||||
conf.error = ds_selftest_bts_bad_request_cpu(0, buffer); |
||||
ds_release_bts(conf.tracer); |
||||
if (conf.error < 0) |
||||
goto out; |
||||
|
||||
conf.suspend = ds_suspend_bts_noirq; |
||||
conf.resume = ds_resume_bts_noirq; |
||||
conf.tracer = |
||||
ds_request_bts_task(current, small_buffer, SMALL_BUFFER_SIZE, |
||||
NULL, (size_t)-1, BTS_KERNEL); |
||||
local_irq_save(irq); |
||||
ds_selftest_bts_cpu(&conf); |
||||
if (conf.error >= 0) |
||||
conf.error = ds_selftest_bts_bad_request_cpu(0, buffer); |
||||
ds_release_bts_noirq(conf.tracer); |
||||
local_irq_restore(irq); |
||||
if (conf.error < 0) |
||||
goto out; |
||||
|
||||
conf.error = 0; |
||||
out: |
||||
put_online_cpus(); |
||||
printk(KERN_CONT "%s.\n", (conf.error ? "failed" : "passed")); |
||||
|
||||
return conf.error; |
||||
} |
||||
|
||||
int ds_selftest_pebs(void) |
||||
{ |
||||
return 0; |
||||
} |
@ -1,15 +0,0 @@ |
||||
/*
|
||||
* Debug Store support - selftest |
||||
* |
||||
* |
||||
* Copyright (C) 2009 Intel Corporation. |
||||
* Markus Metzger <markus.t.metzger@intel.com>, 2009 |
||||
*/ |
||||
|
||||
#ifdef CONFIG_X86_DS_SELFTEST |
||||
extern int ds_selftest_bts(void); |
||||
extern int ds_selftest_pebs(void); |
||||
#else |
||||
static inline int ds_selftest_bts(void) { return 0; } |
||||
static inline int ds_selftest_pebs(void) { return 0; } |
||||
#endif |
@ -1,312 +0,0 @@ |
||||
/*
|
||||
* h/w branch tracer for x86 based on BTS |
||||
* |
||||
* Copyright (C) 2008-2009 Intel Corporation. |
||||
* Markus Metzger <markus.t.metzger@gmail.com>, 2008-2009 |
||||
*/ |
||||
#include <linux/kallsyms.h> |
||||
#include <linux/debugfs.h> |
||||
#include <linux/ftrace.h> |
||||
#include <linux/module.h> |
||||
#include <linux/cpu.h> |
||||
#include <linux/smp.h> |
||||
#include <linux/fs.h> |
||||
|
||||
#include <asm/ds.h> |
||||
|
||||
#include "trace_output.h" |
||||
#include "trace.h" |
||||
|
||||
|
||||
#define BTS_BUFFER_SIZE (1 << 13) |
||||
|
||||
static DEFINE_PER_CPU(struct bts_tracer *, hwb_tracer); |
||||
static DEFINE_PER_CPU(unsigned char[BTS_BUFFER_SIZE], hwb_buffer); |
||||
|
||||
#define this_tracer per_cpu(hwb_tracer, smp_processor_id()) |
||||
|
||||
static int trace_hw_branches_enabled __read_mostly; |
||||
static int trace_hw_branches_suspended __read_mostly; |
||||
static struct trace_array *hw_branch_trace __read_mostly; |
||||
|
||||
|
||||
static void bts_trace_init_cpu(int cpu) |
||||
{ |
||||
per_cpu(hwb_tracer, cpu) = |
||||
ds_request_bts_cpu(cpu, per_cpu(hwb_buffer, cpu), |
||||
BTS_BUFFER_SIZE, NULL, (size_t)-1, |
||||
BTS_KERNEL); |
||||
|
||||
if (IS_ERR(per_cpu(hwb_tracer, cpu))) |
||||
per_cpu(hwb_tracer, cpu) = NULL; |
||||
} |
||||
|
||||
static int bts_trace_init(struct trace_array *tr) |
||||
{ |
||||
int cpu; |
||||
|
||||
hw_branch_trace = tr; |
||||
trace_hw_branches_enabled = 0; |
||||
|
||||
get_online_cpus(); |
||||
for_each_online_cpu(cpu) { |
||||
bts_trace_init_cpu(cpu); |
||||
|
||||
if (likely(per_cpu(hwb_tracer, cpu))) |
||||
trace_hw_branches_enabled = 1; |
||||
} |
||||
trace_hw_branches_suspended = 0; |
||||
put_online_cpus(); |
||||
|
||||
/* If we could not enable tracing on a single cpu, we fail. */ |
||||
return trace_hw_branches_enabled ? 0 : -EOPNOTSUPP; |
||||
} |
||||
|
||||
static void bts_trace_reset(struct trace_array *tr) |
||||
{ |
||||
int cpu; |
||||
|
||||
get_online_cpus(); |
||||
for_each_online_cpu(cpu) { |
||||
if (likely(per_cpu(hwb_tracer, cpu))) { |
||||
ds_release_bts(per_cpu(hwb_tracer, cpu)); |
||||
per_cpu(hwb_tracer, cpu) = NULL; |
||||
} |
||||
} |
||||
trace_hw_branches_enabled = 0; |
||||
trace_hw_branches_suspended = 0; |
||||
put_online_cpus(); |
||||
} |
||||
|
||||
static void bts_trace_start(struct trace_array *tr) |
||||
{ |
||||
int cpu; |
||||
|
||||
get_online_cpus(); |
||||
for_each_online_cpu(cpu) |
||||
if (likely(per_cpu(hwb_tracer, cpu))) |
||||
ds_resume_bts(per_cpu(hwb_tracer, cpu)); |
||||
trace_hw_branches_suspended = 0; |
||||
put_online_cpus(); |
||||
} |
||||
|
||||
static void bts_trace_stop(struct trace_array *tr) |
||||
{ |
||||
int cpu; |
||||
|
||||
get_online_cpus(); |
||||
for_each_online_cpu(cpu) |
||||
if (likely(per_cpu(hwb_tracer, cpu))) |
||||
ds_suspend_bts(per_cpu(hwb_tracer, cpu)); |
||||
trace_hw_branches_suspended = 1; |
||||
put_online_cpus(); |
||||
} |
||||
|
||||
static int __cpuinit bts_hotcpu_handler(struct notifier_block *nfb, |
||||
unsigned long action, void *hcpu) |
||||
{ |
||||
int cpu = (long)hcpu; |
||||
|
||||
switch (action) { |
||||
case CPU_ONLINE: |
||||
case CPU_DOWN_FAILED: |
||||
/* The notification is sent with interrupts enabled. */ |
||||
if (trace_hw_branches_enabled) { |
||||
bts_trace_init_cpu(cpu); |
||||
|
||||
if (trace_hw_branches_suspended && |
||||
likely(per_cpu(hwb_tracer, cpu))) |
||||
ds_suspend_bts(per_cpu(hwb_tracer, cpu)); |
||||
} |
||||
break; |
||||
|
||||
case CPU_DOWN_PREPARE: |
||||
/* The notification is sent with interrupts enabled. */ |
||||
if (likely(per_cpu(hwb_tracer, cpu))) { |
||||
ds_release_bts(per_cpu(hwb_tracer, cpu)); |
||||
per_cpu(hwb_tracer, cpu) = NULL; |
||||
} |
||||
} |
||||
|
||||
return NOTIFY_DONE; |
||||
} |
||||
|
||||
static struct notifier_block bts_hotcpu_notifier __cpuinitdata = { |
||||
.notifier_call = bts_hotcpu_handler |
||||
}; |
||||
|
||||
static void bts_trace_print_header(struct seq_file *m) |
||||
{ |
||||
seq_puts(m, "# CPU# TO <- FROM\n"); |
||||
} |
||||
|
||||
static enum print_line_t bts_trace_print_line(struct trace_iterator *iter) |
||||
{ |
||||
unsigned long symflags = TRACE_ITER_SYM_OFFSET; |
||||
struct trace_entry *entry = iter->ent; |
||||
struct trace_seq *seq = &iter->seq; |
||||
struct hw_branch_entry *it; |
||||
|
||||
trace_assign_type(it, entry); |
||||
|
||||
if (entry->type == TRACE_HW_BRANCHES) { |
||||
if (trace_seq_printf(seq, "%4d ", iter->cpu) && |
||||
seq_print_ip_sym(seq, it->to, symflags) && |
||||
trace_seq_printf(seq, "\t <- ") && |
||||
seq_print_ip_sym(seq, it->from, symflags) && |
||||
trace_seq_printf(seq, "\n")) |
||||
return TRACE_TYPE_HANDLED; |
||||
return TRACE_TYPE_PARTIAL_LINE; |
||||
} |
||||
return TRACE_TYPE_UNHANDLED; |
||||
} |
||||
|
||||
void trace_hw_branch(u64 from, u64 to) |
||||
{ |
||||
struct ftrace_event_call *call = &event_hw_branch; |
||||
struct trace_array *tr = hw_branch_trace; |
||||
struct ring_buffer_event *event; |
||||
struct ring_buffer *buf; |
||||
struct hw_branch_entry *entry; |
||||
unsigned long irq1; |
||||
int cpu; |
||||
|
||||
if (unlikely(!tr)) |
||||
return; |
||||
|
||||
if (unlikely(!trace_hw_branches_enabled)) |
||||
return; |
||||
|
||||
local_irq_save(irq1); |
||||
cpu = raw_smp_processor_id(); |
||||
if (atomic_inc_return(&tr->data[cpu]->disabled) != 1) |
||||
goto out; |
||||
|
||||
buf = tr->buffer; |
||||
event = trace_buffer_lock_reserve(buf, TRACE_HW_BRANCHES, |
||||
sizeof(*entry), 0, 0); |
||||
if (!event) |
||||
goto out; |
||||
entry = ring_buffer_event_data(event); |
||||
tracing_generic_entry_update(&entry->ent, 0, from); |
||||
entry->ent.type = TRACE_HW_BRANCHES; |
||||
entry->from = from; |
||||
entry->to = to; |
||||
if (!filter_check_discard(call, entry, buf, event)) |
||||
trace_buffer_unlock_commit(buf, event, 0, 0); |
||||
|
||||
out: |
||||
atomic_dec(&tr->data[cpu]->disabled); |
||||
local_irq_restore(irq1); |
||||
} |
||||
|
||||
static void trace_bts_at(const struct bts_trace *trace, void *at) |
||||
{ |
||||
struct bts_struct bts; |
||||
int err = 0; |
||||
|
||||
WARN_ON_ONCE(!trace->read); |
||||
if (!trace->read) |
||||
return; |
||||
|
||||
err = trace->read(this_tracer, at, &bts); |
||||
if (err < 0) |
||||
return; |
||||
|
||||
switch (bts.qualifier) { |
||||
case BTS_BRANCH: |
||||
trace_hw_branch(bts.variant.lbr.from, bts.variant.lbr.to); |
||||
break; |
||||
} |
||||
} |
||||
|
||||
/*
|
||||
* Collect the trace on the current cpu and write it into the ftrace buffer. |
||||
* |
||||
* pre: tracing must be suspended on the current cpu |
||||
*/ |
||||
static void trace_bts_cpu(void *arg) |
||||
{ |
||||
struct trace_array *tr = (struct trace_array *)arg; |
||||
const struct bts_trace *trace; |
||||
unsigned char *at; |
||||
|
||||
if (unlikely(!tr)) |
||||
return; |
||||
|
||||
if (unlikely(atomic_read(&tr->data[raw_smp_processor_id()]->disabled))) |
||||
return; |
||||
|
||||
if (unlikely(!this_tracer)) |
||||
return; |
||||
|
||||
trace = ds_read_bts(this_tracer); |
||||
if (!trace) |
||||
return; |
||||
|
||||
for (at = trace->ds.top; (void *)at < trace->ds.end; |
||||
at += trace->ds.size) |
||||
trace_bts_at(trace, at); |
||||
|
||||
for (at = trace->ds.begin; (void *)at < trace->ds.top; |
||||
at += trace->ds.size) |
||||
trace_bts_at(trace, at); |
||||
} |
||||
|
||||
static void trace_bts_prepare(struct trace_iterator *iter) |
||||
{ |
||||
int cpu; |
||||
|
||||
get_online_cpus(); |
||||
for_each_online_cpu(cpu) |
||||
if (likely(per_cpu(hwb_tracer, cpu))) |
||||
ds_suspend_bts(per_cpu(hwb_tracer, cpu)); |
||||
/*
|
||||
* We need to collect the trace on the respective cpu since ftrace |
||||
* implicitly adds the record for the current cpu. |
||||
* Once that is more flexible, we could collect the data from any cpu. |
||||
*/ |
||||
on_each_cpu(trace_bts_cpu, iter->tr, 1); |
||||
|
||||
for_each_online_cpu(cpu) |
||||
if (likely(per_cpu(hwb_tracer, cpu))) |
||||
ds_resume_bts(per_cpu(hwb_tracer, cpu)); |
||||
put_online_cpus(); |
||||
} |
||||
|
||||
static void trace_bts_close(struct trace_iterator *iter) |
||||
{ |
||||
tracing_reset_online_cpus(iter->tr); |
||||
} |
||||
|
||||
void trace_hw_branch_oops(void) |
||||
{ |
||||
if (this_tracer) { |
||||
ds_suspend_bts_noirq(this_tracer); |
||||
trace_bts_cpu(hw_branch_trace); |
||||
ds_resume_bts_noirq(this_tracer); |
||||
} |
||||
} |
||||
|
||||
struct tracer bts_tracer __read_mostly = |
||||
{ |
||||
.name = "hw-branch-tracer", |
||||
.init = bts_trace_init, |
||||
.reset = bts_trace_reset, |
||||
.print_header = bts_trace_print_header, |
||||
.print_line = bts_trace_print_line, |
||||
.start = bts_trace_start, |
||||
.stop = bts_trace_stop, |
||||
.open = trace_bts_prepare, |
||||
.close = trace_bts_close, |
||||
#ifdef CONFIG_FTRACE_SELFTEST |
||||
.selftest = trace_selftest_startup_hw_branches, |
||||
#endif /* CONFIG_FTRACE_SELFTEST */ |
||||
}; |
||||
|
||||
__init static int init_bts_trace(void) |
||||
{ |
||||
register_hotcpu_notifier(&bts_hotcpu_notifier); |
||||
return register_tracer(&bts_tracer); |
||||
} |
||||
device_initcall(init_bts_trace); |
Loading…
Reference in new issue