Convert x86 to use a per-cpu preemption count. The reason for doing so is that accessing per-cpu variables is a lot cheaper than accessing thread_info variables. We still need to save/restore the actual preemption count due to PREEMPT_ACTIVE so we place the per-cpu __preempt_count variable in the same cache-line as the other hot __switch_to() variables such as current_task. NOTE: this save/restore is required even for !PREEMPT kernels as cond_resched() also relies on preempt_count's PREEMPT_ACTIVE to ignore task_struct::state. Also rename thread_info::preempt_count to ensure nobody is 'accidentally' still poking at it. Suggested-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/n/tip-gzn5rfsf8trgjoqx8hyayy3q@git.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>tirimbino
parent
a233f1120c
commit
c2daa3bed5
@ -0,0 +1,98 @@ |
||||
#ifndef __ASM_PREEMPT_H |
||||
#define __ASM_PREEMPT_H |
||||
|
||||
#include <asm/rmwcc.h> |
||||
#include <asm/percpu.h> |
||||
#include <linux/thread_info.h> |
||||
|
||||
DECLARE_PER_CPU(int, __preempt_count); |
||||
|
||||
/*
|
||||
* We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users |
||||
* that think a non-zero value indicates we cannot preempt. |
||||
*/ |
||||
static __always_inline int preempt_count(void) |
||||
{ |
||||
return __this_cpu_read_4(__preempt_count) & ~PREEMPT_NEED_RESCHED; |
||||
} |
||||
|
||||
static __always_inline void preempt_count_set(int pc) |
||||
{ |
||||
__this_cpu_write_4(__preempt_count, pc); |
||||
} |
||||
|
||||
/*
|
||||
* must be macros to avoid header recursion hell |
||||
*/ |
||||
#define task_preempt_count(p) \ |
||||
(task_thread_info(p)->saved_preempt_count & ~PREEMPT_NEED_RESCHED) |
||||
|
||||
#define init_task_preempt_count(p) do { \ |
||||
task_thread_info(p)->saved_preempt_count = PREEMPT_DISABLED; \
|
||||
} while (0) |
||||
|
||||
#define init_idle_preempt_count(p, cpu) do { \ |
||||
task_thread_info(p)->saved_preempt_count = PREEMPT_ENABLED; \
|
||||
per_cpu(__preempt_count, (cpu)) = PREEMPT_ENABLED; \
|
||||
} while (0) |
||||
|
||||
/*
|
||||
* We fold the NEED_RESCHED bit into the preempt count such that |
||||
* preempt_enable() can decrement and test for needing to reschedule with a |
||||
* single instruction. |
||||
* |
||||
* We invert the actual bit, so that when the decrement hits 0 we know we both |
||||
* need to resched (the bit is cleared) and can resched (no preempt count). |
||||
*/ |
||||
|
||||
static __always_inline void set_preempt_need_resched(void) |
||||
{ |
||||
__this_cpu_and_4(__preempt_count, ~PREEMPT_NEED_RESCHED); |
||||
} |
||||
|
||||
static __always_inline void clear_preempt_need_resched(void) |
||||
{ |
||||
__this_cpu_or_4(__preempt_count, PREEMPT_NEED_RESCHED); |
||||
} |
||||
|
||||
static __always_inline bool test_preempt_need_resched(void) |
||||
{ |
||||
return !(__this_cpu_read_4(__preempt_count) & PREEMPT_NEED_RESCHED); |
||||
} |
||||
|
||||
/*
|
||||
* The various preempt_count add/sub methods |
||||
*/ |
||||
|
||||
static __always_inline void __preempt_count_add(int val) |
||||
{ |
||||
__this_cpu_add_4(__preempt_count, val); |
||||
} |
||||
|
||||
static __always_inline void __preempt_count_sub(int val) |
||||
{ |
||||
__this_cpu_add_4(__preempt_count, -val); |
||||
} |
||||
|
||||
static __always_inline bool __preempt_count_dec_and_test(void) |
||||
{ |
||||
GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e"); |
||||
} |
||||
|
||||
/*
|
||||
* Returns true when we need to resched -- even if we can not. |
||||
*/ |
||||
static __always_inline bool need_resched(void) |
||||
{ |
||||
return unlikely(test_preempt_need_resched()); |
||||
} |
||||
|
||||
/*
|
||||
* Returns true when we need to resched and can (barring IRQ state). |
||||
*/ |
||||
static __always_inline bool should_resched(void) |
||||
{ |
||||
return unlikely(!__this_cpu_read_4(__preempt_count)); |
||||
} |
||||
|
||||
#endif /* __ASM_PREEMPT_H */ |
Loading…
Reference in new issue