|
|
|
/*
|
|
|
|
* bitops.c: atomic operations which got too long to be inlined all over
|
|
|
|
* the place.
|
|
|
|
*
|
|
|
|
* Copyright 1999 Philipp Rumpf (prumpf@tux.org)
|
|
|
|
* Copyright 2000 Grant Grundler (grundler@cup.hp.com)
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/spinlock.h>
|
|
|
|
#include <asm/system.h>
|
|
|
|
#include <asm/atomic.h>
|
|
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
[PATCH] spinlock consolidation
This patch (written by me and also containing many suggestions of Arjan van
de Ven) does a major cleanup of the spinlock code. It does the following
things:
- consolidates and enhances the spinlock/rwlock debugging code
- simplifies the asm/spinlock.h files
- encapsulates the raw spinlock type and moves generic spinlock
features (such as ->break_lock) into the generic code.
- cleans up the spinlock code hierarchy to get rid of the spaghetti.
Most notably there's now only a single variant of the debugging code,
located in lib/spinlock_debug.c. (previously we had one SMP debugging
variant per architecture, plus a separate generic one for UP builds)
Also, i've enhanced the rwlock debugging facility, it will now track
write-owners. There is new spinlock-owner/CPU-tracking on SMP builds too.
All locks have lockup detection now, which will work for both soft and hard
spin/rwlock lockups.
The arch-level include files now only contain the minimally necessary
subset of the spinlock code - all the rest that can be generalized now
lives in the generic headers:
include/asm-i386/spinlock_types.h | 16
include/asm-x86_64/spinlock_types.h | 16
I have also split up the various spinlock variants into separate files,
making it easier to see which does what. The new layout is:
SMP | UP
----------------------------|-----------------------------------
asm/spinlock_types_smp.h | linux/spinlock_types_up.h
linux/spinlock_types.h | linux/spinlock_types.h
asm/spinlock_smp.h | linux/spinlock_up.h
linux/spinlock_api_smp.h | linux/spinlock_api_up.h
linux/spinlock.h | linux/spinlock.h
/*
* here's the role of the various spinlock/rwlock related include files:
*
* on SMP builds:
*
* asm/spinlock_types.h: contains the raw_spinlock_t/raw_rwlock_t and the
* initializers
*
* linux/spinlock_types.h:
* defines the generic type and initializers
*
* asm/spinlock.h: contains the __raw_spin_*()/etc. lowlevel
* implementations, mostly inline assembly code
*
* (also included on UP-debug builds:)
*
* linux/spinlock_api_smp.h:
* contains the prototypes for the _spin_*() APIs.
*
* linux/spinlock.h: builds the final spin_*() APIs.
*
* on UP builds:
*
* linux/spinlock_type_up.h:
* contains the generic, simplified UP spinlock type.
* (which is an empty structure on non-debug builds)
*
* linux/spinlock_types.h:
* defines the generic type and initializers
*
* linux/spinlock_up.h:
* contains the __raw_spin_*()/etc. version of UP
* builds. (which are NOPs on non-debug, non-preempt
* builds)
*
* (included on UP-non-debug builds:)
*
* linux/spinlock_api_up.h:
* builds the _spin_*() APIs.
*
* linux/spinlock.h: builds the final spin_*() APIs.
*/
All SMP and UP architectures are converted by this patch.
arm, i386, ia64, ppc, ppc64, s390/s390x, x64 was build-tested via
crosscompilers. m32r, mips, sh, sparc, have not been tested yet, but should
be mostly fine.
From: Grant Grundler <grundler@parisc-linux.org>
Booted and lightly tested on a500-44 (64-bit, SMP kernel, dual CPU).
Builds 32-bit SMP kernel (not booted or tested). I did not try to build
non-SMP kernels. That should be trivial to fix up later if necessary.
I converted bit ops atomic_hash lock to raw_spinlock_t. Doing so avoids
some ugly nesting of linux/*.h and asm/*.h files. Those particular locks
are well tested and contained entirely inside arch specific code. I do NOT
expect any new issues to arise with them.
If someone does ever need to use debug/metrics with them, then they will
need to unravel this hairball between spinlocks, atomic ops, and bit ops
that exist only because parisc has exactly one atomic instruction: LDCW
(load and clear word).
From: "Luck, Tony" <tony.luck@intel.com>
ia64 fix
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Arjan van de Ven <arjanv@infradead.org>
Signed-off-by: Grant Grundler <grundler@parisc-linux.org>
Cc: Matthew Wilcox <willy@debian.org>
Signed-off-by: Hirokazu Takata <takata@linux-m32r.org>
Signed-off-by: Mikael Pettersson <mikpe@csd.uu.se>
Signed-off-by: Benoit Boissinot <benoit.boissinot@ens-lyon.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
20 years ago
|
|
|
raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = {
|
|
|
|
[0 ... (ATOMIC_HASH_SIZE-1)] = __RAW_SPIN_LOCK_UNLOCKED
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef __LP64__
|
|
|
|
unsigned long __xchg64(unsigned long x, unsigned long *ptr)
|
|
|
|
{
|
|
|
|
unsigned long temp, flags;
|
|
|
|
|
|
|
|
_atomic_spin_lock_irqsave(ptr, flags);
|
|
|
|
temp = *ptr;
|
|
|
|
*ptr = x;
|
|
|
|
_atomic_spin_unlock_irqrestore(ptr, flags);
|
|
|
|
return temp;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
unsigned long __xchg32(int x, int *ptr)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
long temp;
|
|
|
|
|
|
|
|
_atomic_spin_lock_irqsave(ptr, flags);
|
|
|
|
temp = (long) *ptr; /* XXX - sign extension wanted? */
|
|
|
|
*ptr = x;
|
|
|
|
_atomic_spin_unlock_irqrestore(ptr, flags);
|
|
|
|
return (unsigned long)temp;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
unsigned long __xchg8(char x, char *ptr)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
long temp;
|
|
|
|
|
|
|
|
_atomic_spin_lock_irqsave(ptr, flags);
|
|
|
|
temp = (long) *ptr; /* XXX - sign extension wanted? */
|
|
|
|
*ptr = x;
|
|
|
|
_atomic_spin_unlock_irqrestore(ptr, flags);
|
|
|
|
return (unsigned long)temp;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
#ifdef __LP64__
|
|
|
|
unsigned long __cmpxchg_u64(volatile unsigned long *ptr, unsigned long old, unsigned long new)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
unsigned long prev;
|
|
|
|
|
|
|
|
_atomic_spin_lock_irqsave(ptr, flags);
|
|
|
|
if ((prev = *ptr) == old)
|
|
|
|
*ptr = new;
|
|
|
|
_atomic_spin_unlock_irqrestore(ptr, flags);
|
|
|
|
return prev;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
unsigned long __cmpxchg_u32(volatile unsigned int *ptr, unsigned int old, unsigned int new)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
unsigned int prev;
|
|
|
|
|
|
|
|
_atomic_spin_lock_irqsave(ptr, flags);
|
|
|
|
if ((prev = *ptr) == old)
|
|
|
|
*ptr = new;
|
|
|
|
_atomic_spin_unlock_irqrestore(ptr, flags);
|
|
|
|
return (unsigned long)prev;
|
|
|
|
}
|