You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
kernel_samsung_sm7125/include/asm-parisc/processor.h

362 lines
11 KiB

/*
* include/asm-parisc/processor.h
*
* Copyright (C) 1994 Linus Torvalds
* Copyright (C) 2001 Grant Grundler
*/
#ifndef __ASM_PARISC_PROCESSOR_H
#define __ASM_PARISC_PROCESSOR_H
#ifndef __ASSEMBLY__
#include <linux/threads.h>
[PATCH] spinlock consolidation This patch (written by me and also containing many suggestions of Arjan van de Ven) does a major cleanup of the spinlock code. It does the following things: - consolidates and enhances the spinlock/rwlock debugging code - simplifies the asm/spinlock.h files - encapsulates the raw spinlock type and moves generic spinlock features (such as ->break_lock) into the generic code. - cleans up the spinlock code hierarchy to get rid of the spaghetti. Most notably there's now only a single variant of the debugging code, located in lib/spinlock_debug.c. (previously we had one SMP debugging variant per architecture, plus a separate generic one for UP builds) Also, i've enhanced the rwlock debugging facility, it will now track write-owners. There is new spinlock-owner/CPU-tracking on SMP builds too. All locks have lockup detection now, which will work for both soft and hard spin/rwlock lockups. The arch-level include files now only contain the minimally necessary subset of the spinlock code - all the rest that can be generalized now lives in the generic headers: include/asm-i386/spinlock_types.h | 16 include/asm-x86_64/spinlock_types.h | 16 I have also split up the various spinlock variants into separate files, making it easier to see which does what. The new layout is: SMP | UP ----------------------------|----------------------------------- asm/spinlock_types_smp.h | linux/spinlock_types_up.h linux/spinlock_types.h | linux/spinlock_types.h asm/spinlock_smp.h | linux/spinlock_up.h linux/spinlock_api_smp.h | linux/spinlock_api_up.h linux/spinlock.h | linux/spinlock.h /* * here's the role of the various spinlock/rwlock related include files: * * on SMP builds: * * asm/spinlock_types.h: contains the raw_spinlock_t/raw_rwlock_t and the * initializers * * linux/spinlock_types.h: * defines the generic type and initializers * * asm/spinlock.h: contains the __raw_spin_*()/etc. lowlevel * implementations, mostly inline assembly code * * (also included on UP-debug builds:) * * linux/spinlock_api_smp.h: * contains the prototypes for the _spin_*() APIs. * * linux/spinlock.h: builds the final spin_*() APIs. * * on UP builds: * * linux/spinlock_type_up.h: * contains the generic, simplified UP spinlock type. * (which is an empty structure on non-debug builds) * * linux/spinlock_types.h: * defines the generic type and initializers * * linux/spinlock_up.h: * contains the __raw_spin_*()/etc. version of UP * builds. (which are NOPs on non-debug, non-preempt * builds) * * (included on UP-non-debug builds:) * * linux/spinlock_api_up.h: * builds the _spin_*() APIs. * * linux/spinlock.h: builds the final spin_*() APIs. */ All SMP and UP architectures are converted by this patch. arm, i386, ia64, ppc, ppc64, s390/s390x, x64 was build-tested via crosscompilers. m32r, mips, sh, sparc, have not been tested yet, but should be mostly fine. From: Grant Grundler <grundler@parisc-linux.org> Booted and lightly tested on a500-44 (64-bit, SMP kernel, dual CPU). Builds 32-bit SMP kernel (not booted or tested). I did not try to build non-SMP kernels. That should be trivial to fix up later if necessary. I converted bit ops atomic_hash lock to raw_spinlock_t. Doing so avoids some ugly nesting of linux/*.h and asm/*.h files. Those particular locks are well tested and contained entirely inside arch specific code. I do NOT expect any new issues to arise with them. If someone does ever need to use debug/metrics with them, then they will need to unravel this hairball between spinlocks, atomic ops, and bit ops that exist only because parisc has exactly one atomic instruction: LDCW (load and clear word). From: "Luck, Tony" <tony.luck@intel.com> ia64 fix Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Arjan van de Ven <arjanv@infradead.org> Signed-off-by: Grant Grundler <grundler@parisc-linux.org> Cc: Matthew Wilcox <willy@debian.org> Signed-off-by: Hirokazu Takata <takata@linux-m32r.org> Signed-off-by: Mikael Pettersson <mikpe@csd.uu.se> Signed-off-by: Benoit Boissinot <benoit.boissinot@ens-lyon.org> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
20 years ago
#include <linux/spinlock_types.h>
#include <asm/hardware.h>
#include <asm/page.h>
#include <asm/pdc.h>
#include <asm/ptrace.h>
#include <asm/types.h>
#include <asm/system.h>
#endif /* __ASSEMBLY__ */
#define KERNEL_STACK_SIZE (4*PAGE_SIZE)
/*
* Default implementation of macro that returns current
* instruction pointer ("program counter").
*/
#ifdef CONFIG_PA20
#define current_ia(x) __asm__("mfia %0" : "=r"(x))
#else /* mfia added in pa2.0 */
#define current_ia(x) __asm__("blr 0,%0\n\tnop" : "=r"(x))
#endif
#define current_text_addr() ({ void *pc; current_ia(pc); pc; })
#define TASK_SIZE (current->thread.task_size)
#define TASK_UNMAPPED_BASE (current->thread.map_base)
#define DEFAULT_TASK_SIZE32 (0xFFF00000UL)
#define DEFAULT_MAP_BASE32 (0x40000000UL)
#ifdef __LP64__
#define DEFAULT_TASK_SIZE (MAX_ADDRESS-0xf000000)
#define DEFAULT_MAP_BASE (0x200000000UL)
#else
#define DEFAULT_TASK_SIZE DEFAULT_TASK_SIZE32
#define DEFAULT_MAP_BASE DEFAULT_MAP_BASE32
#endif
#ifndef __ASSEMBLY__
/*
* Data detected about CPUs at boot time which is the same for all CPU's.
* HP boxes are SMP - ie identical processors.
*
* FIXME: some CPU rev info may be processor specific...
*/
struct system_cpuinfo_parisc {
unsigned int cpu_count;
unsigned int cpu_hz;
unsigned int hversion;
unsigned int sversion;
enum cpu_type cpu_type;
struct {
struct pdc_model model;
unsigned long versions;
unsigned long cpuid;
unsigned long capabilities;
char sys_model_name[81]; /* PDC-ROM returnes this model name */
} pdc;
char *cpu_name; /* e.g. "PA7300LC (PCX-L2)" */
char *family_name; /* e.g. "1.1e" */
};
/* Per CPU data structure - ie varies per CPU. */
struct cpuinfo_parisc {
unsigned long it_value; /* Interval Timer at last timer Intr */
unsigned long it_delta; /* Interval delta (tic_10ms / HZ * 100) */
unsigned long irq_count; /* number of IRQ's since boot */
unsigned long irq_max_cr16; /* longest time to handle a single IRQ */
unsigned long cpuid; /* aka slot_number or set to NO_PROC_ID */
unsigned long hpa; /* Host Physical address */
unsigned long txn_addr; /* MMIO addr of EIR or id_eid */
#ifdef CONFIG_SMP
spinlock_t lock; /* synchronization for ipi's */
unsigned long pending_ipi; /* bitmap of type ipi_message_type */
unsigned long ipi_count; /* number ipi Interrupts */
#endif
unsigned long bh_count; /* number of times bh was invoked */
unsigned long prof_counter; /* per CPU profiling support */
unsigned long prof_multiplier; /* per CPU profiling support */
unsigned long fp_rev;
unsigned long fp_model;
unsigned int state;
struct parisc_device *dev;
unsigned long loops_per_jiffy;
};
extern struct system_cpuinfo_parisc boot_cpu_data;
extern struct cpuinfo_parisc cpu_data[NR_CPUS];
#define current_cpu_data cpu_data[smp_processor_id()]
#define CPU_HVERSION ((boot_cpu_data.hversion >> 4) & 0x0FFF)
typedef struct {
int seg;
} mm_segment_t;
#define ARCH_MIN_TASKALIGN 8
struct thread_struct {
struct pt_regs regs;
unsigned long task_size;
unsigned long map_base;
unsigned long flags;
};
/* Thread struct flags. */
#define PARISC_UAC_NOPRINT (1UL << 0) /* see prctl and unaligned.c */
#define PARISC_UAC_SIGBUS (1UL << 1)
#define PARISC_KERNEL_DEATH (1UL << 31) /* see die_if_kernel()... */
#define PARISC_UAC_SHIFT 0
#define PARISC_UAC_MASK (PARISC_UAC_NOPRINT|PARISC_UAC_SIGBUS)
#define SET_UNALIGN_CTL(task,value) \
({ \
(task)->thread.flags = (((task)->thread.flags & ~PARISC_UAC_MASK) \
| (((value) << PARISC_UAC_SHIFT) & \
PARISC_UAC_MASK)); \
0; \
})
#define GET_UNALIGN_CTL(task,addr) \
({ \
put_user(((task)->thread.flags & PARISC_UAC_MASK) \
>> PARISC_UAC_SHIFT, (int __user *) (addr)); \
})
#define INIT_THREAD { \
.regs = { .gr = { 0, }, \
.fr = { 0, }, \
.sr = { 0, }, \
.iasq = { 0, }, \
.iaoq = { 0, }, \
.cr27 = 0, \
}, \
.task_size = DEFAULT_TASK_SIZE, \
.map_base = DEFAULT_MAP_BASE, \
.flags = 0 \
}
/*
* Return saved PC of a blocked thread. This is used by ps mostly.
*/
unsigned long thread_saved_pc(struct task_struct *t);
void show_trace(struct task_struct *task, unsigned long *stack);
/*
* Start user thread in another space.
*
* Note that we set both the iaoq and r31 to the new pc. When
* the kernel initially calls execve it will return through an
* rfi path that will use the values in the iaoq. The execve
* syscall path will return through the gateway page, and
* that uses r31 to branch to.
*
* For ELF we clear r23, because the dynamic linker uses it to pass
* the address of the finalizer function.
*
* We also initialize sr3 to an illegal value (illegal for our
* implementation, not for the architecture).
*/
typedef unsigned int elf_caddr_t;
#define start_thread_som(regs, new_pc, new_sp) do { \
unsigned long *sp = (unsigned long *)new_sp; \
__u32 spaceid = (__u32)current->mm->context; \
unsigned long pc = (unsigned long)new_pc; \
/* offset pc for priv. level */ \
pc |= 3; \
\
set_fs(USER_DS); \
regs->iasq[0] = spaceid; \
regs->iasq[1] = spaceid; \
regs->iaoq[0] = pc; \
regs->iaoq[1] = pc + 4; \
regs->sr[2] = LINUX_GATEWAY_SPACE; \
regs->sr[3] = 0xffff; \
regs->sr[4] = spaceid; \
regs->sr[5] = spaceid; \
regs->sr[6] = spaceid; \
regs->sr[7] = spaceid; \
regs->gr[ 0] = USER_PSW; \
regs->gr[30] = ((new_sp)+63)&~63; \
regs->gr[31] = pc; \
\
get_user(regs->gr[26],&sp[0]); \
get_user(regs->gr[25],&sp[-1]); \
get_user(regs->gr[24],&sp[-2]); \
get_user(regs->gr[23],&sp[-3]); \
} while(0)
/* The ELF abi wants things done a "wee bit" differently than
* som does. Supporting this behavior here avoids
* having our own version of create_elf_tables.
*
* Oh, and yes, that is not a typo, we are really passing argc in r25
* and argv in r24 (rather than r26 and r25). This is because that's
* where __libc_start_main wants them.
*
* Duplicated from dl-machine.h for the benefit of readers:
*
* Our initial stack layout is rather different from everyone else's
* due to the unique PA-RISC ABI. As far as I know it looks like
* this:
----------------------------------- (user startup code creates this frame)
| 32 bytes of magic |
|---------------------------------|
| 32 bytes argument/sp save area |
|---------------------------------| (bprm->p)
| ELF auxiliary info |
| (up to 28 words) |
|---------------------------------|
| NULL |
|---------------------------------|
| Environment pointers |
|---------------------------------|
| NULL |
|---------------------------------|
| Argument pointers |
|---------------------------------| <- argv
| argc (1 word) |
|---------------------------------| <- bprm->exec (HACK!)
| N bytes of slack |
|---------------------------------|
| filename passed to execve |
|---------------------------------| (mm->env_end)
| env strings |
|---------------------------------| (mm->env_start, mm->arg_end)
| arg strings |
|---------------------------------|
| additional faked arg strings if |
| we're invoked via binfmt_script |
|---------------------------------| (mm->arg_start)
stack base is at TASK_SIZE - rlim_max.
on downward growing arches, it looks like this:
stack base at TASK_SIZE
| filename passed to execve
| env strings
| arg strings
| faked arg strings
| slack
| ELF
| envps
| argvs
| argc
* The pleasant part of this is that if we need to skip arguments we
* can just decrement argc and move argv, because the stack pointer
* is utterly unrelated to the location of the environment and
* argument vectors.
*
* Note that the S/390 people took the easy way out and hacked their
* GCC to make the stack grow downwards.
*
* Final Note: For entry from syscall, the W (wide) bit of the PSW
* is stuffed into the lowest bit of the user sp (%r30), so we fill
* it in here from the current->personality
*/
#ifdef __LP64__
#define USER_WIDE_MODE (personality(current->personality) == PER_LINUX)
#else
#define USER_WIDE_MODE 0
#endif
#define start_thread(regs, new_pc, new_sp) do { \
elf_addr_t *sp = (elf_addr_t *)new_sp; \
__u32 spaceid = (__u32)current->mm->context; \
elf_addr_t pc = (elf_addr_t)new_pc | 3; \
elf_caddr_t *argv = (elf_caddr_t *)bprm->exec + 1; \
\
set_fs(USER_DS); \
regs->iasq[0] = spaceid; \
regs->iasq[1] = spaceid; \
regs->iaoq[0] = pc; \
regs->iaoq[1] = pc + 4; \
regs->sr[2] = LINUX_GATEWAY_SPACE; \
regs->sr[3] = 0xffff; \
regs->sr[4] = spaceid; \
regs->sr[5] = spaceid; \
regs->sr[6] = spaceid; \
regs->sr[7] = spaceid; \
regs->gr[ 0] = USER_PSW | (USER_WIDE_MODE ? PSW_W : 0); \
regs->fr[ 0] = 0LL; \
regs->fr[ 1] = 0LL; \
regs->fr[ 2] = 0LL; \
regs->fr[ 3] = 0LL; \
regs->gr[30] = (((unsigned long)sp + 63) &~ 63) | (USER_WIDE_MODE ? 1 : 0); \
regs->gr[31] = pc; \
\
get_user(regs->gr[25], (argv - 1)); \
regs->gr[24] = (long) argv; \
regs->gr[23] = 0; \
} while(0)
struct task_struct;
struct mm_struct;
/* Free all resources held by a thread. */
extern void release_thread(struct task_struct *);
extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
/* Prepare to copy thread state - unlazy all lazy status */
#define prepare_to_copy(tsk) do { } while (0)
extern void map_hpux_gateway_page(struct task_struct *tsk, struct mm_struct *mm);
extern unsigned long get_wchan(struct task_struct *p);
#define KSTK_EIP(tsk) ((tsk)->thread.regs.iaoq[0])
#define KSTK_ESP(tsk) ((tsk)->thread.regs.gr[30])
/*
* PA 2.0 defines data prefetch instructions on page 6-11 of the Kane book.
* In addition, many implementations do hardware prefetching of both
* instructions and data.
*
* PA7300LC (page 14-4 of the ERS) also implements prefetching by a load
* to gr0 but not in a way that Linux can use. If the load would cause an
* interruption (eg due to prefetching 0), it is suppressed on PA2.0
* processors, but not on 7300LC.
*/
#ifdef CONFIG_PREFETCH
#define ARCH_HAS_PREFETCH
#define ARCH_HAS_PREFETCHW
extern inline void prefetch(const void *addr)
{
__asm__("ldw 0(%0), %%r0" : : "r" (addr));
}
extern inline void prefetchw(const void *addr)
{
__asm__("ldd 0(%0), %%r0" : : "r" (addr));
}
#endif
#define cpu_relax() barrier()
#endif /* __ASSEMBLY__ */
#endif /* __ASM_PARISC_PROCESSOR_H */