-----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.12 (GNU/Linux) iQIcBAABAgAGBQJS4dWNAAoJEI9vqH3mFV2sargP/RCdVvdJm3TxYcNYoKAw6kTs wn1AWQ1vnu77YIoG2YamWytakahvB5v4Elm5UbHc49eCIFb/+wJpvxLNKbAtY0EP bRllFETalWnPnZO0R0tTvEikpDfzM7/OGYCjuOmTeyata6/KOxVsusTjvar28NPA gVp9BwHKZyk5NORkvW3RBRbfga9slUXqbg4rPhD3gFxLpe8aGuO6T6HazBfRadQM pI0zq2VE59LSEyuZrreHiq5EYvmp+eBFR8bJhQp1lnpKc4lMJm5JKYPY+jlPPtu0 bPYLexjb42Gz2KbDtqLqTnd6GEPj50RS15tr9BTX9n3uCd0sIBO46K5FXVy+ALxr DXbyU6R1miZ01Il0A2IuC2j1cVdXb7nSytP35lLRQ9WCXua4HwQO0LL9KPAf9N0X WKlNPEkOr9XR9lO/1JcYVZgsygF/hHGgImf5AEfTVdtWpAj3bhMBhqFuKo0/h6vr 7PwQCTaiL2Hks+d8YS2VyrQy3DMg48MbYmfdriJQFKKzhdyHEhvlnawP5uoIVfAC s43cvj6EsbUdRULf5qKDupQFJpM15qpmYiLizHFIfUTiTQGI0WMaA76rZMqZ2AsQ z0q2edzUV8nBQFdR74eKKPF1QJyXihiaXzug+SZqPdvXKfjPrF+5eZ7/EvuaEBZy qS7PIt5nkTeZ4stP/SWV =JT1d -----END PGP SIGNATURE----- Merge tag 'xtensa-next-20140123' of git://github.com/czankel/xtensa-linux Pull Xtensa patches from Chris Zankel: "The major changes are adding support for SMP for Xtensa, fixing and cleaning up the ISS (simulator) network driver, and better support for device trees" * tag 'xtensa-next-20140123' of git://github.com/czankel/xtensa-linux: (40 commits) xtensa: implement ndelay xtensa: clean up udelay xtensa: enable HAVE_PERF_EVENTS xtensa: remap io area defined in device tree xtensa: support default device tree buses xtensa: initialize device tree clock sources xtensa: xtfpga: fix definitions of platform devices xtensa: standardize devicetree cpu compatible strings xtensa: avoid duplicate of IO range definitions xtensa: fix ATOMCTL register documentation xtensa: Enable irqs after cpu is set online xtensa: ISS: raise network polling rate to 10 times/sec xtensa: remove unused XTENSA_ISS_NETWORK Kconfig parameter xtensa: ISS: avoid simple_strtoul usage xtensa: Switch to sched_clock_register() xtensa: implement CPU hotplug xtensa: add SMP support xtensa: add MX irqchip xtensa: clear timer IRQ unconditionally in its handler xtensa: clean up do_interrupt/do_IRQ ...tirimbino
commit
9b83d851a2
@ -0,0 +1,147 @@ |
||||
/*
|
||||
* Atomic futex routines |
||||
* |
||||
* Based on the PowerPC implementataion |
||||
* |
||||
* This program is free software; you can redistribute it and/or modify |
||||
* it under the terms of the GNU General Public License version 2 as |
||||
* published by the Free Software Foundation. |
||||
* |
||||
* Copyright (C) 2013 TangoTec Ltd. |
||||
* |
||||
* Baruch Siach <baruch@tkos.co.il> |
||||
*/ |
||||
|
||||
#ifndef _ASM_XTENSA_FUTEX_H |
||||
#define _ASM_XTENSA_FUTEX_H |
||||
|
||||
#ifdef __KERNEL__ |
||||
|
||||
#include <linux/futex.h> |
||||
#include <linux/uaccess.h> |
||||
#include <linux/errno.h> |
||||
|
||||
#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ |
||||
__asm__ __volatile( \
|
||||
"1: l32i %0, %2, 0\n" \
|
||||
insn "\n" \
|
||||
" wsr %0, scompare1\n" \
|
||||
"2: s32c1i %1, %2, 0\n" \
|
||||
" bne %1, %0, 1b\n" \
|
||||
" movi %1, 0\n" \
|
||||
"3:\n" \
|
||||
" .section .fixup,\"ax\"\n" \
|
||||
" .align 4\n" \
|
||||
"4: .long 3b\n" \
|
||||
"5: l32r %0, 4b\n" \
|
||||
" movi %1, %3\n" \
|
||||
" jx %0\n" \
|
||||
" .previous\n" \
|
||||
" .section __ex_table,\"a\"\n" \
|
||||
" .long 1b,5b,2b,5b\n" \
|
||||
" .previous\n" \
|
||||
: "=&r" (oldval), "=&r" (ret) \
|
||||
: "r" (uaddr), "I" (-EFAULT), "r" (oparg) \
|
||||
: "memory") |
||||
|
||||
static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) |
||||
{ |
||||
int op = (encoded_op >> 28) & 7; |
||||
int cmp = (encoded_op >> 24) & 15; |
||||
int oparg = (encoded_op << 8) >> 20; |
||||
int cmparg = (encoded_op << 20) >> 20; |
||||
int oldval = 0, ret; |
||||
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) |
||||
oparg = 1 << oparg; |
||||
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) |
||||
return -EFAULT; |
||||
|
||||
#if !XCHAL_HAVE_S32C1I |
||||
return -ENOSYS; |
||||
#endif |
||||
|
||||
pagefault_disable(); |
||||
|
||||
switch (op) { |
||||
case FUTEX_OP_SET: |
||||
__futex_atomic_op("mov %1, %4", ret, oldval, uaddr, oparg); |
||||
break; |
||||
case FUTEX_OP_ADD: |
||||
__futex_atomic_op("add %1, %0, %4", ret, oldval, uaddr, |
||||
oparg); |
||||
break; |
||||
case FUTEX_OP_OR: |
||||
__futex_atomic_op("or %1, %0, %4", ret, oldval, uaddr, |
||||
oparg); |
||||
break; |
||||
case FUTEX_OP_ANDN: |
||||
__futex_atomic_op("and %1, %0, %4", ret, oldval, uaddr, |
||||
~oparg); |
||||
break; |
||||
case FUTEX_OP_XOR: |
||||
__futex_atomic_op("xor %1, %0, %4", ret, oldval, uaddr, |
||||
oparg); |
||||
break; |
||||
default: |
||||
ret = -ENOSYS; |
||||
} |
||||
|
||||
pagefault_enable(); |
||||
|
||||
if (ret) |
||||
return ret; |
||||
|
||||
switch (cmp) { |
||||
case FUTEX_OP_CMP_EQ: return (oldval == cmparg); |
||||
case FUTEX_OP_CMP_NE: return (oldval != cmparg); |
||||
case FUTEX_OP_CMP_LT: return (oldval < cmparg); |
||||
case FUTEX_OP_CMP_GE: return (oldval >= cmparg); |
||||
case FUTEX_OP_CMP_LE: return (oldval <= cmparg); |
||||
case FUTEX_OP_CMP_GT: return (oldval > cmparg); |
||||
} |
||||
|
||||
return -ENOSYS; |
||||
} |
||||
|
||||
static inline int |
||||
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, |
||||
u32 oldval, u32 newval) |
||||
{ |
||||
int ret = 0; |
||||
u32 prev; |
||||
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) |
||||
return -EFAULT; |
||||
|
||||
#if !XCHAL_HAVE_S32C1I |
||||
return -ENOSYS; |
||||
#endif |
||||
|
||||
__asm__ __volatile__ ( |
||||
" # futex_atomic_cmpxchg_inatomic\n" |
||||
"1: l32i %1, %3, 0\n" |
||||
" mov %0, %5\n" |
||||
" wsr %1, scompare1\n" |
||||
"2: s32c1i %0, %3, 0\n" |
||||
"3:\n" |
||||
" .section .fixup,\"ax\"\n" |
||||
" .align 4\n" |
||||
"4: .long 3b\n" |
||||
"5: l32r %1, 4b\n" |
||||
" movi %0, %6\n" |
||||
" jx %1\n" |
||||
" .previous\n" |
||||
" .section __ex_table,\"a\"\n" |
||||
" .long 1b,5b,2b,5b\n" |
||||
" .previous\n" |
||||
: "+r" (ret), "=&r" (prev), "+m" (*uaddr) |
||||
: "r" (uaddr), "r" (oldval), "r" (newval), "I" (-EFAULT) |
||||
: "memory"); |
||||
|
||||
*uval = prev; |
||||
return ret; |
||||
} |
||||
|
||||
#endif /* __KERNEL__ */ |
||||
#endif /* _ASM_XTENSA_FUTEX_H */ |
@ -0,0 +1,46 @@ |
||||
/*
|
||||
* Xtensa MX interrupt distributor |
||||
* |
||||
* This file is subject to the terms and conditions of the GNU General Public |
||||
* License. See the file "COPYING" in the main directory of this archive |
||||
* for more details. |
||||
* |
||||
* Copyright (C) 2008 - 2013 Tensilica Inc. |
||||
*/ |
||||
|
||||
#ifndef _XTENSA_MXREGS_H |
||||
#define _XTENSA_MXREGS_H |
||||
|
||||
/*
|
||||
* RER/WER at, as Read/write external register |
||||
* at: value |
||||
* as: address |
||||
* |
||||
* Address Value |
||||
* 00nn 0...0p..p Interrupt Routing, route IRQ n to processor p |
||||
* 01pp 0...0d..d 16 bits (d) 'ored' as single IPI to processor p |
||||
* 0180 0...0m..m Clear enable specified by mask (m) |
||||
* 0184 0...0m..m Set enable specified by mask (m) |
||||
* 0190 0...0x..x 8-bit IPI partition register |
||||
* VVVVVVVVPPPPUUUUUUUUUUUUUUUUU |
||||
* V (10-bit) Release/Version |
||||
* P ( 4-bit) Number of cores - 1 |
||||
* U (18-bit) ID |
||||
* 01a0 i.......i 32-bit ConfigID |
||||
* 0200 0...0m..m RunStall core 'n' |
||||
* 0220 c Cache coherency enabled |
||||
*/ |
||||
|
||||
#define MIROUT(irq) (0x000 + (irq)) |
||||
#define MIPICAUSE(cpu) (0x100 + (cpu)) |
||||
#define MIPISET(cause) (0x140 + (cause)) |
||||
#define MIENG 0x180 |
||||
#define MIENGSET 0x184 |
||||
#define MIASG 0x188 /* Read Global Assert Register */ |
||||
#define MIASGSET 0x18c /* Set Global Addert Regiter */ |
||||
#define MIPIPART 0x190 |
||||
#define SYSCFGID 0x1a0 |
||||
#define MPSCORE 0x200 |
||||
#define CCON 0x220 |
||||
|
||||
#endif /* _XTENSA_MXREGS_H */ |
@ -0,0 +1,4 @@ |
||||
#ifndef __ASM_XTENSA_PERF_EVENT_H |
||||
#define __ASM_XTENSA_PERF_EVENT_H |
||||
|
||||
#endif /* __ASM_XTENSA_PERF_EVENT_H */ |
@ -1,27 +1,43 @@ |
||||
/*
|
||||
* include/asm-xtensa/smp.h |
||||
* |
||||
* This file is subject to the terms and conditions of the GNU General Public |
||||
* License. See the file "COPYING" in the main directory of this archive |
||||
* for more details. |
||||
* |
||||
* Copyright (C) 2001 - 2005 Tensilica Inc. |
||||
* Copyright (C) 2001 - 2013 Tensilica Inc. |
||||
*/ |
||||
|
||||
#ifndef _XTENSA_SMP_H |
||||
#define _XTENSA_SMP_H |
||||
|
||||
extern struct xtensa_cpuinfo boot_cpu_data; |
||||
#ifdef CONFIG_SMP |
||||
|
||||
#define cpu_data (&boot_cpu_data) |
||||
#define current_cpu_data boot_cpu_data |
||||
#define raw_smp_processor_id() (current_thread_info()->cpu) |
||||
#define cpu_logical_map(cpu) (cpu) |
||||
|
||||
struct xtensa_cpuinfo { |
||||
unsigned long *pgd_cache; |
||||
unsigned long *pte_cache; |
||||
unsigned long pgtable_cache_sz; |
||||
struct start_info { |
||||
unsigned long stack; |
||||
}; |
||||
extern struct start_info start_info; |
||||
|
||||
#define cpu_logical_map(cpu) (cpu) |
||||
struct cpumask; |
||||
void arch_send_call_function_ipi_mask(const struct cpumask *mask); |
||||
void arch_send_call_function_single_ipi(int cpu); |
||||
|
||||
void smp_init_cpus(void); |
||||
void secondary_init_irq(void); |
||||
void ipi_init(void); |
||||
struct seq_file; |
||||
void show_ipi_list(struct seq_file *p, int prec); |
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU |
||||
|
||||
void __cpu_die(unsigned int cpu); |
||||
int __cpu_disable(void); |
||||
void cpu_die(void); |
||||
void cpu_restart(void); |
||||
|
||||
#endif /* CONFIG_HOTPLUG_CPU */ |
||||
|
||||
#endif /* CONFIG_SMP */ |
||||
|
||||
#endif /* _XTENSA_SMP_H */ |
||||
|
@ -0,0 +1,20 @@ |
||||
#ifndef __ASM_SPINLOCK_TYPES_H |
||||
#define __ASM_SPINLOCK_TYPES_H |
||||
|
||||
#ifndef __LINUX_SPINLOCK_TYPES_H |
||||
# error "please don't include this file directly" |
||||
#endif |
||||
|
||||
typedef struct { |
||||
volatile unsigned int slock; |
||||
} arch_spinlock_t; |
||||
|
||||
#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } |
||||
|
||||
typedef struct { |
||||
volatile unsigned int lock; |
||||
} arch_rwlock_t; |
||||
|
||||
#define __ARCH_RW_LOCK_UNLOCKED { 0 } |
||||
|
||||
#endif |
@ -0,0 +1,85 @@ |
||||
/* |
||||
* Xtensa Secondary Processors startup code. |
||||
* |
||||
* This file is subject to the terms and conditions of the GNU General Public |
||||
* License. See the file "COPYING" in the main directory of this archive |
||||
* for more details. |
||||
* |
||||
* Copyright (C) 2001 - 2013 Tensilica Inc. |
||||
* |
||||
* Joe Taylor <joe@tensilica.com>
|
||||
* Chris Zankel <chris@zankel.net>
|
||||
* Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
|
||||
* Pete Delaney <piet@tensilica.com>
|
||||
*/ |
||||
|
||||
#include <linux/linkage.h> |
||||
|
||||
#include <asm/cacheasm.h> |
||||
#include <asm/initialize_mmu.h> |
||||
#include <asm/mxregs.h> |
||||
#include <asm/regs.h> |
||||
|
||||
|
||||
.section .SecondaryResetVector.text, "ax" |
||||
|
||||
|
||||
ENTRY(_SecondaryResetVector) |
||||
_j _SetupOCD |
||||
|
||||
.begin no-absolute-literals |
||||
.literal_position |
||||
|
||||
_SetupOCD: |
||||
/* |
||||
* Initialize WB, WS, and clear PS.EXCM (to allow loop instructions). |
||||
* Set Interrupt Level just below XCHAL_DEBUGLEVEL to allow |
||||
* xt-gdb to single step via DEBUG exceptions received directly |
||||
* by ocd. |
||||
*/ |
||||
movi a1, 1 |
||||
movi a0, 0 |
||||
wsr a1, windowstart |
||||
wsr a0, windowbase |
||||
rsync |
||||
|
||||
movi a1, LOCKLEVEL |
||||
wsr a1, ps |
||||
rsync |
||||
|
||||
_SetupMMU: |
||||
Offset = _SetupMMU - _SecondaryResetVector |
||||
|
||||
#ifdef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX |
||||
initialize_mmu |
||||
#endif |
||||
|
||||
/* |
||||
* Start Secondary Processors with NULL pointer to boot params. |
||||
*/ |
||||
movi a2, 0 # a2 == NULL |
||||
movi a3, _startup |
||||
jx a3 |
||||
|
||||
.end no-absolute-literals |
||||
|
||||
|
||||
.section .SecondaryResetVector.remapped_text, "ax" |
||||
.global _RemappedSecondaryResetVector
|
||||
|
||||
.org 0 # Need to do org before literals |
||||
|
||||
_RemappedSecondaryResetVector: |
||||
.begin no-absolute-literals |
||||
.literal_position |
||||
|
||||
_j _RemappedSetupMMU |
||||
. = _RemappedSecondaryResetVector + Offset |
||||
|
||||
_RemappedSetupMMU: |
||||
|
||||
#ifdef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX |
||||
initialize_mmu |
||||
#endif |
||||
|
||||
.end no-absolute-literals |
@ -0,0 +1,592 @@ |
||||
/*
|
||||
* Xtensa SMP support functions. |
||||
* |
||||
* This file is subject to the terms and conditions of the GNU General Public |
||||
* License. See the file "COPYING" in the main directory of this archive |
||||
* for more details. |
||||
* |
||||
* Copyright (C) 2008 - 2013 Tensilica Inc. |
||||
* |
||||
* Chris Zankel <chris@zankel.net> |
||||
* Joe Taylor <joe@tensilica.com> |
||||
* Pete Delaney <piet@tensilica.com |
||||
*/ |
||||
|
||||
#include <linux/cpu.h> |
||||
#include <linux/cpumask.h> |
||||
#include <linux/delay.h> |
||||
#include <linux/init.h> |
||||
#include <linux/interrupt.h> |
||||
#include <linux/irqdomain.h> |
||||
#include <linux/irq.h> |
||||
#include <linux/kdebug.h> |
||||
#include <linux/module.h> |
||||
#include <linux/reboot.h> |
||||
#include <linux/seq_file.h> |
||||
#include <linux/smp.h> |
||||
#include <linux/thread_info.h> |
||||
|
||||
#include <asm/cacheflush.h> |
||||
#include <asm/kdebug.h> |
||||
#include <asm/mmu_context.h> |
||||
#include <asm/mxregs.h> |
||||
#include <asm/platform.h> |
||||
#include <asm/tlbflush.h> |
||||
#include <asm/traps.h> |
||||
|
||||
#ifdef CONFIG_SMP |
||||
# if XCHAL_HAVE_S32C1I == 0 |
||||
# error "The S32C1I option is required for SMP." |
||||
# endif |
||||
#endif |
||||
|
||||
static void system_invalidate_dcache_range(unsigned long start, |
||||
unsigned long size); |
||||
static void system_flush_invalidate_dcache_range(unsigned long start, |
||||
unsigned long size); |
||||
|
||||
/* IPI (Inter Process Interrupt) */ |
||||
|
||||
#define IPI_IRQ 0 |
||||
|
||||
static irqreturn_t ipi_interrupt(int irq, void *dev_id); |
||||
static struct irqaction ipi_irqaction = { |
||||
.handler = ipi_interrupt, |
||||
.flags = IRQF_PERCPU, |
||||
.name = "ipi", |
||||
}; |
||||
|
||||
void ipi_init(void) |
||||
{ |
||||
unsigned irq = irq_create_mapping(NULL, IPI_IRQ); |
||||
setup_irq(irq, &ipi_irqaction); |
||||
} |
||||
|
||||
static inline unsigned int get_core_count(void) |
||||
{ |
||||
/* Bits 18..21 of SYSCFGID contain the core count minus 1. */ |
||||
unsigned int syscfgid = get_er(SYSCFGID); |
||||
return ((syscfgid >> 18) & 0xf) + 1; |
||||
} |
||||
|
||||
static inline int get_core_id(void) |
||||
{ |
||||
/* Bits 0...18 of SYSCFGID contain the core id */ |
||||
unsigned int core_id = get_er(SYSCFGID); |
||||
return core_id & 0x3fff; |
||||
} |
||||
|
||||
void __init smp_prepare_cpus(unsigned int max_cpus) |
||||
{ |
||||
unsigned i; |
||||
|
||||
for (i = 0; i < max_cpus; ++i) |
||||
set_cpu_present(i, true); |
||||
} |
||||
|
||||
void __init smp_init_cpus(void) |
||||
{ |
||||
unsigned i; |
||||
unsigned int ncpus = get_core_count(); |
||||
unsigned int core_id = get_core_id(); |
||||
|
||||
pr_info("%s: Core Count = %d\n", __func__, ncpus); |
||||
pr_info("%s: Core Id = %d\n", __func__, core_id); |
||||
|
||||
for (i = 0; i < ncpus; ++i) |
||||
set_cpu_possible(i, true); |
||||
} |
||||
|
||||
void __init smp_prepare_boot_cpu(void) |
||||
{ |
||||
unsigned int cpu = smp_processor_id(); |
||||
BUG_ON(cpu != 0); |
||||
cpu_asid_cache(cpu) = ASID_USER_FIRST; |
||||
} |
||||
|
||||
void __init smp_cpus_done(unsigned int max_cpus) |
||||
{ |
||||
} |
||||
|
||||
static int boot_secondary_processors = 1; /* Set with xt-gdb via .xt-gdb */ |
||||
static DECLARE_COMPLETION(cpu_running); |
||||
|
||||
void secondary_start_kernel(void) |
||||
{ |
||||
struct mm_struct *mm = &init_mm; |
||||
unsigned int cpu = smp_processor_id(); |
||||
|
||||
init_mmu(); |
||||
|
||||
#ifdef CONFIG_DEBUG_KERNEL |
||||
if (boot_secondary_processors == 0) { |
||||
pr_debug("%s: boot_secondary_processors:%d; Hanging cpu:%d\n", |
||||
__func__, boot_secondary_processors, cpu); |
||||
for (;;) |
||||
__asm__ __volatile__ ("waiti " __stringify(LOCKLEVEL)); |
||||
} |
||||
|
||||
pr_debug("%s: boot_secondary_processors:%d; Booting cpu:%d\n", |
||||
__func__, boot_secondary_processors, cpu); |
||||
#endif |
||||
/* Init EXCSAVE1 */ |
||||
|
||||
secondary_trap_init(); |
||||
|
||||
/* All kernel threads share the same mm context. */ |
||||
|
||||
atomic_inc(&mm->mm_users); |
||||
atomic_inc(&mm->mm_count); |
||||
current->active_mm = mm; |
||||
cpumask_set_cpu(cpu, mm_cpumask(mm)); |
||||
enter_lazy_tlb(mm, current); |
||||
|
||||
preempt_disable(); |
||||
trace_hardirqs_off(); |
||||
|
||||
calibrate_delay(); |
||||
|
||||
notify_cpu_starting(cpu); |
||||
|
||||
secondary_init_irq(); |
||||
local_timer_setup(cpu); |
||||
|
||||
set_cpu_online(cpu, true); |
||||
|
||||
local_irq_enable(); |
||||
|
||||
complete(&cpu_running); |
||||
|
||||
cpu_startup_entry(CPUHP_ONLINE); |
||||
} |
||||
|
||||
static void mx_cpu_start(void *p) |
||||
{ |
||||
unsigned cpu = (unsigned)p; |
||||
unsigned long run_stall_mask = get_er(MPSCORE); |
||||
|
||||
set_er(run_stall_mask & ~(1u << cpu), MPSCORE); |
||||
pr_debug("%s: cpu: %d, run_stall_mask: %lx ---> %lx\n", |
||||
__func__, cpu, run_stall_mask, get_er(MPSCORE)); |
||||
} |
||||
|
||||
static void mx_cpu_stop(void *p) |
||||
{ |
||||
unsigned cpu = (unsigned)p; |
||||
unsigned long run_stall_mask = get_er(MPSCORE); |
||||
|
||||
set_er(run_stall_mask | (1u << cpu), MPSCORE); |
||||
pr_debug("%s: cpu: %d, run_stall_mask: %lx ---> %lx\n", |
||||
__func__, cpu, run_stall_mask, get_er(MPSCORE)); |
||||
} |
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU |
||||
unsigned long cpu_start_id __cacheline_aligned; |
||||
#endif |
||||
unsigned long cpu_start_ccount; |
||||
|
||||
static int boot_secondary(unsigned int cpu, struct task_struct *ts) |
||||
{ |
||||
unsigned long timeout = jiffies + msecs_to_jiffies(1000); |
||||
unsigned long ccount; |
||||
int i; |
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU |
||||
cpu_start_id = cpu; |
||||
system_flush_invalidate_dcache_range( |
||||
(unsigned long)&cpu_start_id, sizeof(cpu_start_id)); |
||||
#endif |
||||
smp_call_function_single(0, mx_cpu_start, (void *)cpu, 1); |
||||
|
||||
for (i = 0; i < 2; ++i) { |
||||
do |
||||
ccount = get_ccount(); |
||||
while (!ccount); |
||||
|
||||
cpu_start_ccount = ccount; |
||||
|
||||
while (time_before(jiffies, timeout)) { |
||||
mb(); |
||||
if (!cpu_start_ccount) |
||||
break; |
||||
} |
||||
|
||||
if (cpu_start_ccount) { |
||||
smp_call_function_single(0, mx_cpu_stop, |
||||
(void *)cpu, 1); |
||||
cpu_start_ccount = 0; |
||||
return -EIO; |
||||
} |
||||
} |
||||
return 0; |
||||
} |
||||
|
||||
int __cpu_up(unsigned int cpu, struct task_struct *idle) |
||||
{ |
||||
int ret = 0; |
||||
|
||||
if (cpu_asid_cache(cpu) == 0) |
||||
cpu_asid_cache(cpu) = ASID_USER_FIRST; |
||||
|
||||
start_info.stack = (unsigned long)task_pt_regs(idle); |
||||
wmb(); |
||||
|
||||
pr_debug("%s: Calling wakeup_secondary(cpu:%d, idle:%p, sp: %08lx)\n", |
||||
__func__, cpu, idle, start_info.stack); |
||||
|
||||
ret = boot_secondary(cpu, idle); |
||||
if (ret == 0) { |
||||
wait_for_completion_timeout(&cpu_running, |
||||
msecs_to_jiffies(1000)); |
||||
if (!cpu_online(cpu)) |
||||
ret = -EIO; |
||||
} |
||||
|
||||
if (ret) |
||||
pr_err("CPU %u failed to boot\n", cpu); |
||||
|
||||
return ret; |
||||
} |
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU |
||||
|
||||
/*
|
||||
* __cpu_disable runs on the processor to be shutdown. |
||||
*/ |
||||
int __cpu_disable(void) |
||||
{ |
||||
unsigned int cpu = smp_processor_id(); |
||||
|
||||
/*
|
||||
* Take this CPU offline. Once we clear this, we can't return, |
||||
* and we must not schedule until we're ready to give up the cpu. |
||||
*/ |
||||
set_cpu_online(cpu, false); |
||||
|
||||
/*
|
||||
* OK - migrate IRQs away from this CPU |
||||
*/ |
||||
migrate_irqs(); |
||||
|
||||
/*
|
||||
* Flush user cache and TLB mappings, and then remove this CPU |
||||
* from the vm mask set of all processes. |
||||
*/ |
||||
local_flush_cache_all(); |
||||
local_flush_tlb_all(); |
||||
invalidate_page_directory(); |
||||
|
||||
clear_tasks_mm_cpumask(cpu); |
||||
|
||||
return 0; |
||||
} |
||||
|
||||
static void platform_cpu_kill(unsigned int cpu) |
||||
{ |
||||
smp_call_function_single(0, mx_cpu_stop, (void *)cpu, true); |
||||
} |
||||
|
||||
/*
|
||||
* called on the thread which is asking for a CPU to be shutdown - |
||||
* waits until shutdown has completed, or it is timed out. |
||||
*/ |
||||
void __cpu_die(unsigned int cpu) |
||||
{ |
||||
unsigned long timeout = jiffies + msecs_to_jiffies(1000); |
||||
while (time_before(jiffies, timeout)) { |
||||
system_invalidate_dcache_range((unsigned long)&cpu_start_id, |
||||
sizeof(cpu_start_id)); |
||||
if (cpu_start_id == -cpu) { |
||||
platform_cpu_kill(cpu); |
||||
return; |
||||
} |
||||
} |
||||
pr_err("CPU%u: unable to kill\n", cpu); |
||||
} |
||||
|
||||
void arch_cpu_idle_dead(void) |
||||
{ |
||||
cpu_die(); |
||||
} |
||||
/*
|
||||
* Called from the idle thread for the CPU which has been shutdown. |
||||
* |
||||
* Note that we disable IRQs here, but do not re-enable them |
||||
* before returning to the caller. This is also the behaviour |
||||
* of the other hotplug-cpu capable cores, so presumably coming |
||||
* out of idle fixes this. |
||||
*/ |
||||
void __ref cpu_die(void) |
||||
{ |
||||
idle_task_exit(); |
||||
local_irq_disable(); |
||||
__asm__ __volatile__( |
||||
" movi a2, cpu_restart\n" |
||||
" jx a2\n"); |
||||
} |
||||
|
||||
#endif /* CONFIG_HOTPLUG_CPU */ |
||||
|
||||
enum ipi_msg_type { |
||||
IPI_RESCHEDULE = 0, |
||||
IPI_CALL_FUNC, |
||||
IPI_CPU_STOP, |
||||
IPI_MAX |
||||
}; |
||||
|
||||
static const struct { |
||||
const char *short_text; |
||||
const char *long_text; |
||||
} ipi_text[] = { |
||||
{ .short_text = "RES", .long_text = "Rescheduling interrupts" }, |
||||
{ .short_text = "CAL", .long_text = "Function call interrupts" }, |
||||
{ .short_text = "DIE", .long_text = "CPU shutdown interrupts" }, |
||||
}; |
||||
|
||||
struct ipi_data { |
||||
unsigned long ipi_count[IPI_MAX]; |
||||
}; |
||||
|
||||
static DEFINE_PER_CPU(struct ipi_data, ipi_data); |
||||
|
||||
static void send_ipi_message(const struct cpumask *callmask, |
||||
enum ipi_msg_type msg_id) |
||||
{ |
||||
int index; |
||||
unsigned long mask = 0; |
||||
|
||||
for_each_cpu(index, callmask) |
||||
if (index != smp_processor_id()) |
||||
mask |= 1 << index; |
||||
|
||||
set_er(mask, MIPISET(msg_id)); |
||||
} |
||||
|
||||
void arch_send_call_function_ipi_mask(const struct cpumask *mask) |
||||
{ |
||||
send_ipi_message(mask, IPI_CALL_FUNC); |
||||
} |
||||
|
||||
void arch_send_call_function_single_ipi(int cpu) |
||||
{ |
||||
send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC); |
||||
} |
||||
|
||||
void smp_send_reschedule(int cpu) |
||||
{ |
||||
send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE); |
||||
} |
||||
|
||||
void smp_send_stop(void) |
||||
{ |
||||
struct cpumask targets; |
||||
|
||||
cpumask_copy(&targets, cpu_online_mask); |
||||
cpumask_clear_cpu(smp_processor_id(), &targets); |
||||
send_ipi_message(&targets, IPI_CPU_STOP); |
||||
} |
||||
|
||||
static void ipi_cpu_stop(unsigned int cpu) |
||||
{ |
||||
set_cpu_online(cpu, false); |
||||
machine_halt(); |
||||
} |
||||
|
||||
irqreturn_t ipi_interrupt(int irq, void *dev_id) |
||||
{ |
||||
unsigned int cpu = smp_processor_id(); |
||||
struct ipi_data *ipi = &per_cpu(ipi_data, cpu); |
||||
unsigned int msg; |
||||
unsigned i; |
||||
|
||||
msg = get_er(MIPICAUSE(cpu)); |
||||
for (i = 0; i < IPI_MAX; i++) |
||||
if (msg & (1 << i)) { |
||||
set_er(1 << i, MIPICAUSE(cpu)); |
||||
++ipi->ipi_count[i]; |
||||
} |
||||
|
||||
if (msg & (1 << IPI_RESCHEDULE)) |
||||
scheduler_ipi(); |
||||
if (msg & (1 << IPI_CALL_FUNC)) |
||||
generic_smp_call_function_interrupt(); |
||||
if (msg & (1 << IPI_CPU_STOP)) |
||||
ipi_cpu_stop(cpu); |
||||
|
||||
return IRQ_HANDLED; |
||||
} |
||||
|
||||
void show_ipi_list(struct seq_file *p, int prec) |
||||
{ |
||||
unsigned int cpu; |
||||
unsigned i; |
||||
|
||||
for (i = 0; i < IPI_MAX; ++i) { |
||||
seq_printf(p, "%*s:", prec, ipi_text[i].short_text); |
||||
for_each_online_cpu(cpu) |
||||
seq_printf(p, " %10lu", |
||||
per_cpu(ipi_data, cpu).ipi_count[i]); |
||||
seq_printf(p, " %s\n", ipi_text[i].long_text); |
||||
} |
||||
} |
||||
|
||||
int setup_profiling_timer(unsigned int multiplier) |
||||
{ |
||||
pr_debug("setup_profiling_timer %d\n", multiplier); |
||||
return 0; |
||||
} |
||||
|
||||
/* TLB flush functions */ |
||||
|
||||
struct flush_data { |
||||
struct vm_area_struct *vma; |
||||
unsigned long addr1; |
||||
unsigned long addr2; |
||||
}; |
||||
|
||||
static void ipi_flush_tlb_all(void *arg) |
||||
{ |
||||
local_flush_tlb_all(); |
||||
} |
||||
|
||||
void flush_tlb_all(void) |
||||
{ |
||||
on_each_cpu(ipi_flush_tlb_all, NULL, 1); |
||||
} |
||||
|
||||
static void ipi_flush_tlb_mm(void *arg) |
||||
{ |
||||
local_flush_tlb_mm(arg); |
||||
} |
||||
|
||||
void flush_tlb_mm(struct mm_struct *mm) |
||||
{ |
||||
on_each_cpu(ipi_flush_tlb_mm, mm, 1); |
||||
} |
||||
|
||||
static void ipi_flush_tlb_page(void *arg) |
||||
{ |
||||
struct flush_data *fd = arg; |
||||
local_flush_tlb_page(fd->vma, fd->addr1); |
||||
} |
||||
|
||||
void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) |
||||
{ |
||||
struct flush_data fd = { |
||||
.vma = vma, |
||||
.addr1 = addr, |
||||
}; |
||||
on_each_cpu(ipi_flush_tlb_page, &fd, 1); |
||||
} |
||||
|
||||
static void ipi_flush_tlb_range(void *arg) |
||||
{ |
||||
struct flush_data *fd = arg; |
||||
local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2); |
||||
} |
||||
|
||||
void flush_tlb_range(struct vm_area_struct *vma, |
||||
unsigned long start, unsigned long end) |
||||
{ |
||||
struct flush_data fd = { |
||||
.vma = vma, |
||||
.addr1 = start, |
||||
.addr2 = end, |
||||
}; |
||||
on_each_cpu(ipi_flush_tlb_range, &fd, 1); |
||||
} |
||||
|
||||
/* Cache flush functions */ |
||||
|
||||
static void ipi_flush_cache_all(void *arg) |
||||
{ |
||||
local_flush_cache_all(); |
||||
} |
||||
|
||||
void flush_cache_all(void) |
||||
{ |
||||
on_each_cpu(ipi_flush_cache_all, NULL, 1); |
||||
} |
||||
|
||||
static void ipi_flush_cache_page(void *arg) |
||||
{ |
||||
struct flush_data *fd = arg; |
||||
local_flush_cache_page(fd->vma, fd->addr1, fd->addr2); |
||||
} |
||||
|
||||
void flush_cache_page(struct vm_area_struct *vma, |
||||
unsigned long address, unsigned long pfn) |
||||
{ |
||||
struct flush_data fd = { |
||||
.vma = vma, |
||||
.addr1 = address, |
||||
.addr2 = pfn, |
||||
}; |
||||
on_each_cpu(ipi_flush_cache_page, &fd, 1); |
||||
} |
||||
|
||||
static void ipi_flush_cache_range(void *arg) |
||||
{ |
||||
struct flush_data *fd = arg; |
||||
local_flush_cache_range(fd->vma, fd->addr1, fd->addr2); |
||||
} |
||||
|
||||
void flush_cache_range(struct vm_area_struct *vma, |
||||
unsigned long start, unsigned long end) |
||||
{ |
||||
struct flush_data fd = { |
||||
.vma = vma, |
||||
.addr1 = start, |
||||
.addr2 = end, |
||||
}; |
||||
on_each_cpu(ipi_flush_cache_range, &fd, 1); |
||||
} |
||||
|
||||
static void ipi_flush_icache_range(void *arg) |
||||
{ |
||||
struct flush_data *fd = arg; |
||||
local_flush_icache_range(fd->addr1, fd->addr2); |
||||
} |
||||
|
||||
void flush_icache_range(unsigned long start, unsigned long end) |
||||
{ |
||||
struct flush_data fd = { |
||||
.addr1 = start, |
||||
.addr2 = end, |
||||
}; |
||||
on_each_cpu(ipi_flush_icache_range, &fd, 1); |
||||
} |
||||
|
||||
/* ------------------------------------------------------------------------- */ |
||||
|
||||
static void ipi_invalidate_dcache_range(void *arg) |
||||
{ |
||||
struct flush_data *fd = arg; |
||||
__invalidate_dcache_range(fd->addr1, fd->addr2); |
||||
} |
||||
|
||||
static void system_invalidate_dcache_range(unsigned long start, |
||||
unsigned long size) |
||||
{ |
||||
struct flush_data fd = { |
||||
.addr1 = start, |
||||
.addr2 = size, |
||||
}; |
||||
on_each_cpu(ipi_invalidate_dcache_range, &fd, 1); |
||||
} |
||||
|
||||
static void ipi_flush_invalidate_dcache_range(void *arg) |
||||
{ |
||||
struct flush_data *fd = arg; |
||||
__flush_invalidate_dcache_range(fd->addr1, fd->addr2); |
||||
} |
||||
|
||||
static void system_flush_invalidate_dcache_range(unsigned long start, |
||||
unsigned long size) |
||||
{ |
||||
struct flush_data fd = { |
||||
.addr1 = start, |
||||
.addr2 = size, |
||||
}; |
||||
on_each_cpu(ipi_flush_invalidate_dcache_range, &fd, 1); |
||||
} |
@ -0,0 +1,164 @@ |
||||
/*
|
||||
* Xtensa MX interrupt distributor |
||||
* |
||||
* Copyright (C) 2002 - 2013 Tensilica, Inc. |
||||
* |
||||
* This file is subject to the terms and conditions of the GNU General Public |
||||
* License. See the file "COPYING" in the main directory of this archive |
||||
* for more details. |
||||
*/ |
||||
|
||||
#include <linux/interrupt.h> |
||||
#include <linux/irqdomain.h> |
||||
#include <linux/irq.h> |
||||
#include <linux/of.h> |
||||
|
||||
#include <asm/mxregs.h> |
||||
|
||||
#include "irqchip.h" |
||||
|
||||
#define HW_IRQ_IPI_COUNT 2 |
||||
#define HW_IRQ_MX_BASE 2 |
||||
#define HW_IRQ_EXTERN_BASE 3 |
||||
|
||||
static DEFINE_PER_CPU(unsigned int, cached_irq_mask); |
||||
|
||||
static int xtensa_mx_irq_map(struct irq_domain *d, unsigned int irq, |
||||
irq_hw_number_t hw) |
||||
{ |
||||
if (hw < HW_IRQ_IPI_COUNT) { |
||||
struct irq_chip *irq_chip = d->host_data; |
||||
irq_set_chip_and_handler_name(irq, irq_chip, |
||||
handle_percpu_irq, "ipi"); |
||||
irq_set_status_flags(irq, IRQ_LEVEL); |
||||
return 0; |
||||
} |
||||
return xtensa_irq_map(d, irq, hw); |
||||
} |
||||
|
||||
/*
|
||||
* Device Tree IRQ specifier translation function which works with one or |
||||
* two cell bindings. First cell value maps directly to the hwirq number. |
||||
* Second cell if present specifies whether hwirq number is external (1) or |
||||
* internal (0). |
||||
*/ |
||||
static int xtensa_mx_irq_domain_xlate(struct irq_domain *d, |
||||
struct device_node *ctrlr, |
||||
const u32 *intspec, unsigned int intsize, |
||||
unsigned long *out_hwirq, unsigned int *out_type) |
||||
{ |
||||
return xtensa_irq_domain_xlate(intspec, intsize, |
||||
intspec[0], intspec[0] + HW_IRQ_EXTERN_BASE, |
||||
out_hwirq, out_type); |
||||
} |
||||
|
||||
static const struct irq_domain_ops xtensa_mx_irq_domain_ops = { |
||||
.xlate = xtensa_mx_irq_domain_xlate, |
||||
.map = xtensa_mx_irq_map, |
||||
}; |
||||
|
||||
void secondary_init_irq(void) |
||||
{ |
||||
__this_cpu_write(cached_irq_mask, |
||||
XCHAL_INTTYPE_MASK_EXTERN_EDGE | |
||||
XCHAL_INTTYPE_MASK_EXTERN_LEVEL); |
||||
set_sr(XCHAL_INTTYPE_MASK_EXTERN_EDGE | |
||||
XCHAL_INTTYPE_MASK_EXTERN_LEVEL, intenable); |
||||
} |
||||
|
||||
static void xtensa_mx_irq_mask(struct irq_data *d) |
||||
{ |
||||
unsigned int mask = 1u << d->hwirq; |
||||
|
||||
if (mask & (XCHAL_INTTYPE_MASK_EXTERN_EDGE | |
||||
XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) { |
||||
set_er(1u << (xtensa_get_ext_irq_no(d->hwirq) - |
||||
HW_IRQ_MX_BASE), MIENG); |
||||
} else { |
||||
mask = __this_cpu_read(cached_irq_mask) & ~mask; |
||||
__this_cpu_write(cached_irq_mask, mask); |
||||
set_sr(mask, intenable); |
||||
} |
||||
} |
||||
|
||||
static void xtensa_mx_irq_unmask(struct irq_data *d) |
||||
{ |
||||
unsigned int mask = 1u << d->hwirq; |
||||
|
||||
if (mask & (XCHAL_INTTYPE_MASK_EXTERN_EDGE | |
||||
XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) { |
||||
set_er(1u << (xtensa_get_ext_irq_no(d->hwirq) - |
||||
HW_IRQ_MX_BASE), MIENGSET); |
||||
} else { |
||||
mask |= __this_cpu_read(cached_irq_mask); |
||||
__this_cpu_write(cached_irq_mask, mask); |
||||
set_sr(mask, intenable); |
||||
} |
||||
} |
||||
|
||||
static void xtensa_mx_irq_enable(struct irq_data *d) |
||||
{ |
||||
variant_irq_enable(d->hwirq); |
||||
xtensa_mx_irq_unmask(d); |
||||
} |
||||
|
||||
static void xtensa_mx_irq_disable(struct irq_data *d) |
||||
{ |
||||
xtensa_mx_irq_mask(d); |
||||
variant_irq_disable(d->hwirq); |
||||
} |
||||
|
||||
static void xtensa_mx_irq_ack(struct irq_data *d) |
||||
{ |
||||
set_sr(1 << d->hwirq, intclear); |
||||
} |
||||
|
||||
static int xtensa_mx_irq_retrigger(struct irq_data *d) |
||||
{ |
||||
set_sr(1 << d->hwirq, intset); |
||||
return 1; |
||||
} |
||||
|
||||
static int xtensa_mx_irq_set_affinity(struct irq_data *d, |
||||
const struct cpumask *dest, bool force) |
||||
{ |
||||
unsigned mask = 1u << cpumask_any(dest); |
||||
|
||||
set_er(mask, MIROUT(d->hwirq - HW_IRQ_MX_BASE)); |
||||
return 0; |
||||
|
||||
} |
||||
|
||||
static struct irq_chip xtensa_mx_irq_chip = { |
||||
.name = "xtensa-mx", |
||||
.irq_enable = xtensa_mx_irq_enable, |
||||
.irq_disable = xtensa_mx_irq_disable, |
||||
.irq_mask = xtensa_mx_irq_mask, |
||||
.irq_unmask = xtensa_mx_irq_unmask, |
||||
.irq_ack = xtensa_mx_irq_ack, |
||||
.irq_retrigger = xtensa_mx_irq_retrigger, |
||||
.irq_set_affinity = xtensa_mx_irq_set_affinity, |
||||
}; |
||||
|
||||
int __init xtensa_mx_init_legacy(struct device_node *interrupt_parent) |
||||
{ |
||||
struct irq_domain *root_domain = |
||||
irq_domain_add_legacy(NULL, NR_IRQS, 0, 0, |
||||
&xtensa_mx_irq_domain_ops, |
||||
&xtensa_mx_irq_chip); |
||||
irq_set_default_host(root_domain); |
||||
secondary_init_irq(); |
||||
return 0; |
||||
} |
||||
|
||||
static int __init xtensa_mx_init(struct device_node *np, |
||||
struct device_node *interrupt_parent) |
||||
{ |
||||
struct irq_domain *root_domain = |
||||
irq_domain_add_linear(np, NR_IRQS, &xtensa_mx_irq_domain_ops, |
||||
&xtensa_mx_irq_chip); |
||||
irq_set_default_host(root_domain); |
||||
secondary_init_irq(); |
||||
return 0; |
||||
} |
||||
IRQCHIP_DECLARE(xtensa_mx_irq_chip, "cdns,xtensa-mx", xtensa_mx_init); |
@ -0,0 +1,108 @@ |
||||
/*
|
||||
* Xtensa built-in interrupt controller |
||||
* |
||||
* Copyright (C) 2002 - 2013 Tensilica, Inc. |
||||
* Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar |
||||
* |
||||
* This file is subject to the terms and conditions of the GNU General Public |
||||
* License. See the file "COPYING" in the main directory of this archive |
||||
* for more details. |
||||
* |
||||
* Chris Zankel <chris@zankel.net> |
||||
* Kevin Chea |
||||
*/ |
||||
|
||||
#include <linux/interrupt.h> |
||||
#include <linux/irqdomain.h> |
||||
#include <linux/irq.h> |
||||
#include <linux/of.h> |
||||
|
||||
#include "irqchip.h" |
||||
|
||||
unsigned int cached_irq_mask; |
||||
|
||||
/*
|
||||
* Device Tree IRQ specifier translation function which works with one or |
||||
* two cell bindings. First cell value maps directly to the hwirq number. |
||||
* Second cell if present specifies whether hwirq number is external (1) or |
||||
* internal (0). |
||||
*/ |
||||
static int xtensa_pic_irq_domain_xlate(struct irq_domain *d, |
||||
struct device_node *ctrlr, |
||||
const u32 *intspec, unsigned int intsize, |
||||
unsigned long *out_hwirq, unsigned int *out_type) |
||||
{ |
||||
return xtensa_irq_domain_xlate(intspec, intsize, |
||||
intspec[0], intspec[0], |
||||
out_hwirq, out_type); |
||||
} |
||||
|
||||
static const struct irq_domain_ops xtensa_irq_domain_ops = { |
||||
.xlate = xtensa_pic_irq_domain_xlate, |
||||
.map = xtensa_irq_map, |
||||
}; |
||||
|
||||
static void xtensa_irq_mask(struct irq_data *d) |
||||
{ |
||||
cached_irq_mask &= ~(1 << d->hwirq); |
||||
set_sr(cached_irq_mask, intenable); |
||||
} |
||||
|
||||
static void xtensa_irq_unmask(struct irq_data *d) |
||||
{ |
||||
cached_irq_mask |= 1 << d->hwirq; |
||||
set_sr(cached_irq_mask, intenable); |
||||
} |
||||
|
||||
static void xtensa_irq_enable(struct irq_data *d) |
||||
{ |
||||
variant_irq_enable(d->hwirq); |
||||
xtensa_irq_unmask(d); |
||||
} |
||||
|
||||
static void xtensa_irq_disable(struct irq_data *d) |
||||
{ |
||||
xtensa_irq_mask(d); |
||||
variant_irq_disable(d->hwirq); |
||||
} |
||||
|
||||
static void xtensa_irq_ack(struct irq_data *d) |
||||
{ |
||||
set_sr(1 << d->hwirq, intclear); |
||||
} |
||||
|
||||
static int xtensa_irq_retrigger(struct irq_data *d) |
||||
{ |
||||
set_sr(1 << d->hwirq, intset); |
||||
return 1; |
||||
} |
||||
|
||||
static struct irq_chip xtensa_irq_chip = { |
||||
.name = "xtensa", |
||||
.irq_enable = xtensa_irq_enable, |
||||
.irq_disable = xtensa_irq_disable, |
||||
.irq_mask = xtensa_irq_mask, |
||||
.irq_unmask = xtensa_irq_unmask, |
||||
.irq_ack = xtensa_irq_ack, |
||||
.irq_retrigger = xtensa_irq_retrigger, |
||||
}; |
||||
|
||||
int __init xtensa_pic_init_legacy(struct device_node *interrupt_parent) |
||||
{ |
||||
struct irq_domain *root_domain = |
||||
irq_domain_add_legacy(NULL, NR_IRQS, 0, 0, |
||||
&xtensa_irq_domain_ops, &xtensa_irq_chip); |
||||
irq_set_default_host(root_domain); |
||||
return 0; |
||||
} |
||||
|
||||
static int __init xtensa_pic_init(struct device_node *np, |
||||
struct device_node *interrupt_parent) |
||||
{ |
||||
struct irq_domain *root_domain = |
||||
irq_domain_add_linear(np, NR_IRQS, &xtensa_irq_domain_ops, |
||||
&xtensa_irq_chip); |
||||
irq_set_default_host(root_domain); |
||||
return 0; |
||||
} |
||||
IRQCHIP_DECLARE(xtensa_irq_chip, "cdns,xtensa-pic", xtensa_pic_init); |
@ -0,0 +1,17 @@ |
||||
/*
|
||||
* Xtensa MX interrupt distributor |
||||
* |
||||
* Copyright (C) 2002 - 2013 Tensilica, Inc. |
||||
* |
||||
* This file is subject to the terms and conditions of the GNU General Public |
||||
* License. See the file "COPYING" in the main directory of this archive |
||||
* for more details. |
||||
*/ |
||||
|
||||
#ifndef __LINUX_IRQCHIP_XTENSA_MX_H |
||||
#define __LINUX_IRQCHIP_XTENSA_MX_H |
||||
|
||||
struct device_node; |
||||
int xtensa_mx_init_legacy(struct device_node *interrupt_parent); |
||||
|
||||
#endif /* __LINUX_IRQCHIP_XTENSA_MX_H */ |
@ -0,0 +1,18 @@ |
||||
/*
|
||||
* Xtensa built-in interrupt controller |
||||
* |
||||
* Copyright (C) 2002 - 2013 Tensilica, Inc. |
||||
* Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar |
||||
* |
||||
* This file is subject to the terms and conditions of the GNU General Public |
||||
* License. See the file "COPYING" in the main directory of this archive |
||||
* for more details. |
||||
*/ |
||||
|
||||
#ifndef __LINUX_IRQCHIP_XTENSA_PIC_H |
||||
#define __LINUX_IRQCHIP_XTENSA_PIC_H |
||||
|
||||
struct device_node; |
||||
int xtensa_pic_init_legacy(struct device_node *interrupt_parent); |
||||
|
||||
#endif /* __LINUX_IRQCHIP_XTENSA_PIC_H */ |
Loading…
Reference in new issue