|
|
|
@ -782,6 +782,10 @@ retry: |
|
|
|
|
raw_spin_unlock_irq(&ctx->lock); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
#define MAX_INTERRUPTS (~0ULL) |
|
|
|
|
|
|
|
|
|
static void perf_log_throttle(struct perf_event *event, int enable); |
|
|
|
|
|
|
|
|
|
static int |
|
|
|
|
event_sched_in(struct perf_event *event, |
|
|
|
|
struct perf_cpu_context *cpuctx, |
|
|
|
@ -794,6 +798,17 @@ event_sched_in(struct perf_event *event, |
|
|
|
|
|
|
|
|
|
event->state = PERF_EVENT_STATE_ACTIVE; |
|
|
|
|
event->oncpu = smp_processor_id(); |
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Unthrottle events, since we scheduled we might have missed several |
|
|
|
|
* ticks already, also for a heavily scheduling task there is little |
|
|
|
|
* guarantee it'll get a tick in a timely manner. |
|
|
|
|
*/ |
|
|
|
|
if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) { |
|
|
|
|
perf_log_throttle(event, 1); |
|
|
|
|
event->hw.interrupts = 0; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* The new state must be visible before we turn it on in the hardware: |
|
|
|
|
*/ |
|
|
|
@ -1596,10 +1611,6 @@ void __perf_event_task_sched_in(struct task_struct *task) |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
#define MAX_INTERRUPTS (~0ULL) |
|
|
|
|
|
|
|
|
|
static void perf_log_throttle(struct perf_event *event, int enable); |
|
|
|
|
|
|
|
|
|
static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) |
|
|
|
|
{ |
|
|
|
|
u64 frequency = event->attr.sample_freq; |
|
|
|
|