Merge "Merge android-4.14-p.70 (e601ab6) into msm-4.14"

tirimbino
qctecmdr Service 6 years ago committed by Gerrit - the friendly Code Review server
commit 2330aa6c4c
  1. 12
      Makefile
  2. 3
      arch/Kconfig
  3. 51
      arch/alpha/kernel/osf_sys.c
  4. 3
      arch/arc/Kconfig
  5. 15
      arch/arc/Makefile
  6. 4
      arch/arc/include/asm/cache.h
  7. 3
      arch/arc/include/asm/delay.h
  8. 2
      arch/arc/include/asm/mach_desc.h
  9. 2
      arch/arc/kernel/irq.c
  10. 45
      arch/arc/kernel/process.c
  11. 7
      arch/arc/mm/cache.c
  12. 10
      arch/arc/plat-eznps/include/plat/ctop.h
  13. 6
      arch/arc/plat-eznps/mtm.c
  14. 5
      arch/arm/boot/dts/am3517.dtsi
  15. 2
      arch/arm/boot/dts/am437x-sk-evm.dts
  16. 2
      arch/arm/boot/dts/armada-385-synology-ds116.dts
  17. 24
      arch/arm/boot/dts/bcm-cygnus.dtsi
  18. 32
      arch/arm/boot/dts/bcm-nsp.dtsi
  19. 2
      arch/arm/boot/dts/bcm5301x.dtsi
  20. 6
      arch/arm/boot/dts/da850.dtsi
  21. 2
      arch/arm/boot/dts/imx6qdl-zii-rdu2.dtsi
  22. 1
      arch/arm/boot/dts/tegra30-cardhu.dtsi
  23. 2
      arch/arm/configs/imx_v4_v5_defconfig
  24. 2
      arch/arm/mach-davinci/board-da850-evm.c
  25. 41
      arch/arm/mach-omap2/omap-smp.c
  26. 4
      arch/arm/mach-pxa/irq.c
  27. 1
      arch/arm/mach-rockchip/Kconfig
  28. 9
      arch/arm/mm/init.c
  29. 4
      arch/arm/probes/kprobes/core.c
  30. 1
      arch/arm/probes/kprobes/test-core.c
  31. 1
      arch/arm64/Kconfig
  32. 1
      arch/arm64/Kconfig.platforms
  33. 2
      arch/arm64/boot/dts/amlogic/meson-gxl-mali.dtsi
  34. 8
      arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi
  35. 4
      arch/arm64/boot/dts/broadcom/stingray/bcm958742k.dts
  36. 4
      arch/arm64/boot/dts/broadcom/stingray/bcm958742t.dts
  37. 4
      arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi
  38. 4
      arch/arm64/boot/dts/qcom/msm8916.dtsi
  39. 2
      arch/arm64/boot/dts/rockchip/rk3328.dtsi
  40. 5
      arch/arm64/include/asm/cache.h
  41. 3
      arch/arm64/include/asm/cpucaps.h
  42. 23
      arch/arm64/kernel/cpu_errata.c
  43. 4
      arch/arm64/kernel/cpufeature.c
  44. 2
      arch/arm64/kernel/probes/kprobes.c
  45. 2
      arch/arm64/kernel/smp.c
  46. 9
      arch/arm64/mm/dma-mapping.c
  47. 8
      arch/arm64/mm/init.c
  48. 4
      arch/m68k/include/asm/mcf_pgalloc.h
  49. 12
      arch/mips/Makefile
  50. 6
      arch/mips/bcm47xx/setup.c
  51. 3
      arch/mips/include/asm/mipsregs.h
  52. 15
      arch/mips/include/asm/processor.h
  53. 2
      arch/mips/kernel/ptrace.c
  54. 2
      arch/mips/kernel/ptrace32.c
  55. 6
      arch/mips/lib/multi3.c
  56. 8
      arch/openrisc/kernel/entry.S
  57. 9
      arch/openrisc/kernel/head.S
  58. 2
      arch/openrisc/kernel/traps.c
  59. 8
      arch/parisc/include/asm/spinlock.h
  60. 24
      arch/parisc/kernel/syscall.S
  61. 3
      arch/powerpc/include/asm/fadump.h
  62. 13
      arch/powerpc/include/asm/uaccess.h
  63. 6
      arch/powerpc/kernel/exceptions-64s.S
  64. 89
      arch/powerpc/kernel/fadump.c
  65. 17
      arch/powerpc/mm/mmu_context_iommu.c
  66. 29
      arch/powerpc/net/bpf_jit_comp64.c
  67. 4
      arch/powerpc/platforms/85xx/t1042rdb_diu.c
  68. 37
      arch/powerpc/platforms/powernv/pci-ioda.c
  69. 4
      arch/powerpc/platforms/pseries/ras.c
  70. 2
      arch/powerpc/sysdev/mpic_msgr.c
  71. 1
      arch/s390/include/asm/qdio.h
  72. 15
      arch/s390/kernel/crash_dump.c
  73. 12
      arch/s390/lib/mem.S
  74. 2
      arch/s390/mm/fault.c
  75. 2
      arch/s390/mm/page-states.c
  76. 3
      arch/s390/net/bpf_jit_comp.c
  77. 16
      arch/s390/numa/numa.c
  78. 2
      arch/s390/pci/pci.c
  79. 1
      arch/sparc/include/asm/Kbuild
  80. 14
      arch/sparc/kernel/sys_sparc_32.c
  81. 14
      arch/sparc/kernel/sys_sparc_64.c
  82. 2
      arch/sparc/kernel/time_64.c
  83. 1
      arch/x86/Kconfig
  84. 8
      arch/x86/boot/compressed/Makefile
  85. 6
      arch/x86/entry/vdso/Makefile
  86. 6
      arch/x86/events/amd/ibs.c
  87. 2
      arch/x86/events/core.c
  88. 3
      arch/x86/include/asm/irqflags.h
  89. 1
      arch/x86/include/asm/mce.h
  90. 7
      arch/x86/include/asm/pgtable-3level.h
  91. 6
      arch/x86/include/asm/processor.h
  92. 40
      arch/x86/include/asm/tlbflush.h
  93. 2
      arch/x86/include/asm/vgtod.h
  94. 50
      arch/x86/kernel/cpu/bugs.c
  95. 1
      arch/x86/kernel/cpu/common.c
  96. 3
      arch/x86/kernel/cpu/intel.c
  97. 5
      arch/x86/kernel/cpu/microcode/intel.c
  98. 4
      arch/x86/kernel/dumpstack.c
  99. 2
      arch/x86/kernel/kexec-bzimage64.c
  100. 1
      arch/x86/kernel/kvmclock.c
  101. Some files were not shown because too many files have changed in this diff Show More

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 14
SUBLEVEL = 66
SUBLEVEL = 70
EXTRAVERSION =
NAME = Petit Gorille
@ -357,9 +357,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
else if [ -x /bin/bash ]; then echo /bin/bash; \
else echo sh; fi ; fi)
HOST_LFS_CFLAGS := $(shell getconf LFS_CFLAGS)
HOST_LFS_LDFLAGS := $(shell getconf LFS_LDFLAGS)
HOST_LFS_LIBS := $(shell getconf LFS_LIBS)
HOST_LFS_CFLAGS := $(shell getconf LFS_CFLAGS 2>/dev/null)
HOST_LFS_LDFLAGS := $(shell getconf LFS_LDFLAGS 2>/dev/null)
HOST_LFS_LIBS := $(shell getconf LFS_LIBS 2>/dev/null)
HOSTCC = gcc
HOSTCXX = g++
@ -501,9 +501,13 @@ KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
endif
RETPOLINE_CFLAGS_GCC := -mindirect-branch=thunk-extern -mindirect-branch-register
RETPOLINE_VDSO_CFLAGS_GCC := -mindirect-branch=thunk-inline -mindirect-branch-register
RETPOLINE_CFLAGS_CLANG := -mretpoline-external-thunk
RETPOLINE_VDSO_CFLAGS_CLANG := -mretpoline
RETPOLINE_CFLAGS := $(call cc-option,$(RETPOLINE_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_CFLAGS_CLANG)))
RETPOLINE_VDSO_CFLAGS := $(call cc-option,$(RETPOLINE_VDSO_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_VDSO_CFLAGS_CLANG)))
export RETPOLINE_CFLAGS
export RETPOLINE_VDSO_CFLAGS
ifeq ($(config-targets),1)
# ===========================================================================

@ -336,6 +336,9 @@ config HAVE_ARCH_JUMP_LABEL
config HAVE_RCU_TABLE_FREE
bool
config HAVE_RCU_TABLE_INVALIDATE
bool
config ARCH_HAVE_NMI_SAFE_CMPXCHG
bool

@ -530,24 +530,19 @@ SYSCALL_DEFINE4(osf_mount, unsigned long, typenr, const char __user *, path,
SYSCALL_DEFINE1(osf_utsname, char __user *, name)
{
int error;
char tmp[5 * 32];
down_read(&uts_sem);
error = -EFAULT;
if (copy_to_user(name + 0, utsname()->sysname, 32))
goto out;
if (copy_to_user(name + 32, utsname()->nodename, 32))
goto out;
if (copy_to_user(name + 64, utsname()->release, 32))
goto out;
if (copy_to_user(name + 96, utsname()->version, 32))
goto out;
if (copy_to_user(name + 128, utsname()->machine, 32))
goto out;
error = 0;
out:
memcpy(tmp + 0 * 32, utsname()->sysname, 32);
memcpy(tmp + 1 * 32, utsname()->nodename, 32);
memcpy(tmp + 2 * 32, utsname()->release, 32);
memcpy(tmp + 3 * 32, utsname()->version, 32);
memcpy(tmp + 4 * 32, utsname()->machine, 32);
up_read(&uts_sem);
return error;
if (copy_to_user(name, tmp, sizeof(tmp)))
return -EFAULT;
return 0;
}
SYSCALL_DEFINE0(getpagesize)
@ -567,18 +562,21 @@ SYSCALL_DEFINE2(osf_getdomainname, char __user *, name, int, namelen)
{
int len, err = 0;
char *kname;
char tmp[32];
if (namelen > 32)
if (namelen < 0 || namelen > 32)
namelen = 32;
down_read(&uts_sem);
kname = utsname()->domainname;
len = strnlen(kname, namelen);
if (copy_to_user(name, kname, min(len + 1, namelen)))
err = -EFAULT;
len = min(len + 1, namelen);
memcpy(tmp, kname, len);
up_read(&uts_sem);
return err;
if (copy_to_user(name, tmp, len))
return -EFAULT;
return 0;
}
/*
@ -739,13 +737,14 @@ SYSCALL_DEFINE3(osf_sysinfo, int, command, char __user *, buf, long, count)
};
unsigned long offset;
const char *res;
long len, err = -EINVAL;
long len;
char tmp[__NEW_UTS_LEN + 1];
offset = command-1;
if (offset >= ARRAY_SIZE(sysinfo_table)) {
/* Digital UNIX has a few unpublished interfaces here */
printk("sysinfo(%d)", command);
goto out;
return -EINVAL;
}
down_read(&uts_sem);
@ -753,13 +752,11 @@ SYSCALL_DEFINE3(osf_sysinfo, int, command, char __user *, buf, long, count)
len = strlen(res)+1;
if ((unsigned long)len > (unsigned long)count)
len = count;
if (copy_to_user(buf, res, len))
err = -EFAULT;
else
err = 0;
memcpy(tmp, res, len);
up_read(&uts_sem);
out:
return err;
if (copy_to_user(buf, tmp, len))
return -EFAULT;
return 0;
}
SYSCALL_DEFINE5(osf_getsysinfo, unsigned long, op, void __user *, buffer,

@ -45,6 +45,9 @@ config ARC
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZMA
config ARCH_HAS_CACHE_LINE_SIZE
def_bool y
config MIGHT_HAVE_PCI
bool

@ -16,7 +16,7 @@ endif
KBUILD_DEFCONFIG := nsim_700_defconfig
cflags-y += -fno-common -pipe -fno-builtin -D__linux__
cflags-y += -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__
cflags-$(CONFIG_ISA_ARCOMPACT) += -mA7
cflags-$(CONFIG_ISA_ARCV2) += -mcpu=archs
@ -140,16 +140,3 @@ dtbs: scripts
archclean:
$(Q)$(MAKE) $(clean)=$(boot)
# Hacks to enable final link due to absence of link-time branch relexation
# and gcc choosing optimal(shorter) branches at -O3
#
# vineetg Feb 2010: -mlong-calls switched off for overall kernel build
# However lib/decompress_inflate.o (.init.text) calls
# zlib_inflate_workspacesize (.text) causing relocation errors.
# Thus forcing all exten calls in this file to be long calls
export CFLAGS_decompress_inflate.o = -mmedium-calls
export CFLAGS_initramfs.o = -mmedium-calls
ifdef CONFIG_SMP
export CFLAGS_core.o = -mmedium-calls
endif

@ -48,7 +48,9 @@
})
/* Largest line length for either L1 or L2 is 128 bytes */
#define ARCH_DMA_MINALIGN 128
#define SMP_CACHE_BYTES 128
#define cache_line_size() SMP_CACHE_BYTES
#define ARCH_DMA_MINALIGN SMP_CACHE_BYTES
extern void arc_cache_init(void);
extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len);

@ -17,8 +17,11 @@
#ifndef __ASM_ARC_UDELAY_H
#define __ASM_ARC_UDELAY_H
#include <asm-generic/types.h>
#include <asm/param.h> /* HZ */
extern unsigned long loops_per_jiffy;
static inline void __delay(unsigned long loops)
{
__asm__ __volatile__(

@ -34,9 +34,7 @@ struct machine_desc {
const char *name;
const char **dt_compat;
void (*init_early)(void);
#ifdef CONFIG_SMP
void (*init_per_cpu)(unsigned int);
#endif
void (*init_machine)(void);
void (*init_late)(void);

@ -31,10 +31,10 @@ void __init init_IRQ(void)
/* a SMP H/w block could do IPI IRQ request here */
if (plat_smp_ops.init_per_cpu)
plat_smp_ops.init_per_cpu(smp_processor_id());
#endif
if (machine_desc->init_per_cpu)
machine_desc->init_per_cpu(smp_processor_id());
#endif
}
/*

@ -47,7 +47,8 @@ SYSCALL_DEFINE0(arc_gettls)
SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new)
{
struct pt_regs *regs = current_pt_regs();
int uval = -EFAULT;
u32 uval;
int ret;
/*
* This is only for old cores lacking LLOCK/SCOND, which by defintion
@ -60,23 +61,47 @@ SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new)
/* Z indicates to userspace if operation succeded */
regs->status32 &= ~STATUS_Z_MASK;
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;
ret = access_ok(VERIFY_WRITE, uaddr, sizeof(*uaddr));
if (!ret)
goto fail;
again:
preempt_disable();
if (__get_user(uval, uaddr))
goto done;
ret = __get_user(uval, uaddr);
if (ret)
goto fault;
if (uval != expected)
goto out;
ret = __put_user(new, uaddr);
if (ret)
goto fault;
if (uval == expected) {
if (!__put_user(new, uaddr))
regs->status32 |= STATUS_Z_MASK;
}
done:
out:
preempt_enable();
return uval;
fault:
preempt_enable();
if (unlikely(ret != -EFAULT))
goto fail;
down_read(&current->mm->mmap_sem);
ret = fixup_user_fault(current, current->mm, (unsigned long) uaddr,
FAULT_FLAG_WRITE, NULL);
up_read(&current->mm->mmap_sem);
if (likely(!ret))
goto again;
fail:
force_sig(SIGSEGV, current);
return ret;
}
#ifdef CONFIG_ISA_ARCV2

@ -1035,7 +1035,7 @@ void flush_cache_mm(struct mm_struct *mm)
void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
unsigned long pfn)
{
unsigned int paddr = pfn << PAGE_SHIFT;
phys_addr_t paddr = pfn << PAGE_SHIFT;
u_vaddr &= PAGE_MASK;
@ -1055,8 +1055,9 @@ void flush_anon_page(struct vm_area_struct *vma, struct page *page,
unsigned long u_vaddr)
{
/* TBD: do we really need to clear the kernel mapping */
__flush_dcache_page(page_address(page), u_vaddr);
__flush_dcache_page(page_address(page), page_address(page));
__flush_dcache_page((phys_addr_t)page_address(page), u_vaddr);
__flush_dcache_page((phys_addr_t)page_address(page),
(phys_addr_t)page_address(page));
}

@ -21,6 +21,7 @@
#error "Incorrect ctop.h include"
#endif
#include <linux/types.h>
#include <soc/nps/common.h>
/* core auxiliary registers */
@ -143,6 +144,15 @@ struct nps_host_reg_gim_p_int_dst {
};
/* AUX registers definition */
struct nps_host_reg_aux_dpc {
union {
struct {
u32 ien:1, men:1, hen:1, reserved:29;
};
u32 value;
};
};
struct nps_host_reg_aux_udmc {
union {
struct {

@ -15,6 +15,8 @@
*/
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/log2.h>
#include <asm/arcregs.h>
@ -157,10 +159,10 @@ void mtm_enable_core(unsigned int cpu)
/* Verify and set the value of the mtm hs counter */
static int __init set_mtm_hs_ctr(char *ctr_str)
{
long hs_ctr;
int hs_ctr;
int ret;
ret = kstrtol(ctr_str, 0, &hs_ctr);
ret = kstrtoint(ctr_str, 0, &hs_ctr);
if (ret || hs_ctr > MT_HS_CNT_MAX || hs_ctr < MT_HS_CNT_MIN) {
pr_err("** Invalid @nps_mtm_hs_ctr [%d] needs to be [%d:%d] (incl)\n",

@ -87,6 +87,11 @@
};
};
/* Table Table 5-79 of the TRM shows 480ab000 is reserved */
&usb_otg_hs {
status = "disabled";
};
&iva {
status = "disabled";
};

@ -535,6 +535,8 @@
touchscreen-size-x = <480>;
touchscreen-size-y = <272>;
wakeup-source;
};
tlv320aic3106: tlv320aic3106@1b {

@ -170,7 +170,7 @@
3700 5
3900 6
4000 7>;
cooling-cells = <2>;
#cooling-cells = <2>;
};
gpio-leds {

@ -216,7 +216,7 @@
reg = <0x18008000 0x100>;
#address-cells = <1>;
#size-cells = <0>;
interrupts = <GIC_SPI 85 IRQ_TYPE_NONE>;
interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <100000>;
status = "disabled";
};
@ -245,7 +245,7 @@
reg = <0x1800b000 0x100>;
#address-cells = <1>;
#size-cells = <0>;
interrupts = <GIC_SPI 86 IRQ_TYPE_NONE>;
interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <100000>;
status = "disabled";
};
@ -256,7 +256,7 @@
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0>;
interrupt-map = <0 0 0 0 &gic GIC_SPI 100 IRQ_TYPE_NONE>;
interrupt-map = <0 0 0 0 &gic GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
linux,pci-domain = <0>;
@ -278,10 +278,10 @@
compatible = "brcm,iproc-msi";
msi-controller;
interrupt-parent = <&gic>;
interrupts = <GIC_SPI 96 IRQ_TYPE_NONE>,
<GIC_SPI 97 IRQ_TYPE_NONE>,
<GIC_SPI 98 IRQ_TYPE_NONE>,
<GIC_SPI 99 IRQ_TYPE_NONE>;
interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>;
};
};
@ -291,7 +291,7 @@
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0>;
interrupt-map = <0 0 0 0 &gic GIC_SPI 106 IRQ_TYPE_NONE>;
interrupt-map = <0 0 0 0 &gic GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
linux,pci-domain = <1>;
@ -313,10 +313,10 @@
compatible = "brcm,iproc-msi";
msi-controller;
interrupt-parent = <&gic>;
interrupts = <GIC_SPI 102 IRQ_TYPE_NONE>,
<GIC_SPI 103 IRQ_TYPE_NONE>,
<GIC_SPI 104 IRQ_TYPE_NONE>,
<GIC_SPI 105 IRQ_TYPE_NONE>;
interrupts = <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
};
};

@ -391,7 +391,7 @@
reg = <0x38000 0x50>;
#address-cells = <1>;
#size-cells = <0>;
interrupts = <GIC_SPI 89 IRQ_TYPE_NONE>;
interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <100000>;
dma-coherent;
status = "disabled";
@ -496,7 +496,7 @@
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0>;
interrupt-map = <0 0 0 0 &gic GIC_SPI 131 IRQ_TYPE_NONE>;
interrupt-map = <0 0 0 0 &gic GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>;
linux,pci-domain = <0>;
@ -519,10 +519,10 @@
compatible = "brcm,iproc-msi";
msi-controller;
interrupt-parent = <&gic>;
interrupts = <GIC_SPI 127 IRQ_TYPE_NONE>,
<GIC_SPI 128 IRQ_TYPE_NONE>,
<GIC_SPI 129 IRQ_TYPE_NONE>,
<GIC_SPI 130 IRQ_TYPE_NONE>;
interrupts = <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 129 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>;
brcm,pcie-msi-inten;
};
};
@ -533,7 +533,7 @@
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0>;
interrupt-map = <0 0 0 0 &gic GIC_SPI 137 IRQ_TYPE_NONE>;
interrupt-map = <0 0 0 0 &gic GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>;
linux,pci-domain = <1>;
@ -556,10 +556,10 @@
compatible = "brcm,iproc-msi";
msi-controller;
interrupt-parent = <&gic>;
interrupts = <GIC_SPI 133 IRQ_TYPE_NONE>,
<GIC_SPI 134 IRQ_TYPE_NONE>,
<GIC_SPI 135 IRQ_TYPE_NONE>,
<GIC_SPI 136 IRQ_TYPE_NONE>;
interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>;
brcm,pcie-msi-inten;
};
};
@ -570,7 +570,7 @@
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0>;
interrupt-map = <0 0 0 0 &gic GIC_SPI 143 IRQ_TYPE_NONE>;
interrupt-map = <0 0 0 0 &gic GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
linux,pci-domain = <2>;
@ -593,10 +593,10 @@
compatible = "brcm,iproc-msi";
msi-controller;
interrupt-parent = <&gic>;
interrupts = <GIC_SPI 139 IRQ_TYPE_NONE>,
<GIC_SPI 140 IRQ_TYPE_NONE>,
<GIC_SPI 141 IRQ_TYPE_NONE>,
<GIC_SPI 142 IRQ_TYPE_NONE>;
interrupts = <GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>;
brcm,pcie-msi-inten;
};
};

@ -365,7 +365,7 @@
i2c0: i2c@18009000 {
compatible = "brcm,iproc-i2c";
reg = <0x18009000 0x50>;
interrupts = <GIC_SPI 121 IRQ_TYPE_NONE>;
interrupts = <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
clock-frequency = <100000>;

@ -518,11 +518,7 @@
gpio-controller;
#gpio-cells = <2>;
reg = <0x226000 0x1000>;
interrupts = <42 IRQ_TYPE_EDGE_BOTH
43 IRQ_TYPE_EDGE_BOTH 44 IRQ_TYPE_EDGE_BOTH
45 IRQ_TYPE_EDGE_BOTH 46 IRQ_TYPE_EDGE_BOTH
47 IRQ_TYPE_EDGE_BOTH 48 IRQ_TYPE_EDGE_BOTH
49 IRQ_TYPE_EDGE_BOTH 50 IRQ_TYPE_EDGE_BOTH>;
interrupts = <42 43 44 45 46 47 48 49 50>;
ti,ngpio = <144>;
ti,davinci-gpio-unbanked = <0>;
status = "disabled";

@ -644,7 +644,7 @@
dsa,member = <0 0>;
eeprom-length = <512>;
interrupt-parent = <&gpio6>;
interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
interrupt-controller;
#interrupt-cells = <2>;

@ -206,6 +206,7 @@
#address-cells = <1>;
#size-cells = <0>;
reg = <0x70>;
reset-gpio = <&gpio TEGRA_GPIO(BB, 0) GPIO_ACTIVE_LOW>;
};
};

@ -144,9 +144,11 @@ CONFIG_USB_STORAGE=y
CONFIG_USB_CHIPIDEA=y
CONFIG_USB_CHIPIDEA_UDC=y
CONFIG_USB_CHIPIDEA_HOST=y
CONFIG_USB_CHIPIDEA_ULPI=y
CONFIG_NOP_USB_XCEIV=y
CONFIG_USB_GADGET=y
CONFIG_USB_ETH=m
CONFIG_USB_ULPI_BUS=y
CONFIG_MMC=y
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y

@ -773,7 +773,7 @@ static struct gpiod_lookup_table mmc_gpios_table = {
GPIO_LOOKUP("davinci_gpio.0", DA850_MMCSD_CD_PIN, "cd",
GPIO_ACTIVE_LOW),
GPIO_LOOKUP("davinci_gpio.0", DA850_MMCSD_WP_PIN, "wp",
GPIO_ACTIVE_LOW),
GPIO_ACTIVE_HIGH),
},
};

@ -109,6 +109,45 @@ void omap5_erratum_workaround_801819(void)
static inline void omap5_erratum_workaround_801819(void) { }
#endif
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
/*
* Configure ACR and enable ACTLR[0] (Enable invalidates of BTB with
* ICIALLU) to activate the workaround for secondary Core.
* NOTE: it is assumed that the primary core's configuration is done
* by the boot loader (kernel will detect a misconfiguration and complain
* if this is not done).
*
* In General Purpose(GP) devices, ACR bit settings can only be done
* by ROM code in "secure world" using the smc call and there is no
* option to update the "firmware" on such devices. This also works for
* High security(HS) devices, as a backup option in case the
* "update" is not done in the "security firmware".
*/
static void omap5_secondary_harden_predictor(void)
{
u32 acr, acr_mask;
asm volatile ("mrc p15, 0, %0, c1, c0, 1" : "=r" (acr));
/*
* ACTLR[0] (Enable invalidates of BTB with ICIALLU)
*/
acr_mask = BIT(0);
/* Do we already have it done.. if yes, skip expensive smc */
if ((acr & acr_mask) == acr_mask)
return;
acr |= acr_mask;
omap_smc1(OMAP5_DRA7_MON_SET_ACR_INDEX, acr);
pr_debug("%s: ARM ACR setup for CVE_2017_5715 applied on CPU%d\n",
__func__, smp_processor_id());
}
#else
static inline void omap5_secondary_harden_predictor(void) { }
#endif
static void omap4_secondary_init(unsigned int cpu)
{
/*
@ -131,6 +170,8 @@ static void omap4_secondary_init(unsigned int cpu)
set_cntfreq();
/* Configure ACR to disable streaming WA for 801819 */
omap5_erratum_workaround_801819();
/* Enable ACR to allow for ICUALLU workaround */
omap5_secondary_harden_predictor();
}
/*

@ -185,7 +185,7 @@ static int pxa_irq_suspend(void)
{
int i;
for (i = 0; i < pxa_internal_irq_nr / 32; i++) {
for (i = 0; i < DIV_ROUND_UP(pxa_internal_irq_nr, 32); i++) {
void __iomem *base = irq_base(i);
saved_icmr[i] = __raw_readl(base + ICMR);
@ -204,7 +204,7 @@ static void pxa_irq_resume(void)
{
int i;
for (i = 0; i < pxa_internal_irq_nr / 32; i++) {
for (i = 0; i < DIV_ROUND_UP(pxa_internal_irq_nr, 32); i++) {
void __iomem *base = irq_base(i);
__raw_writel(saved_icmr[i], base + ICMR);

@ -18,6 +18,7 @@ config ARCH_ROCKCHIP
select ARM_GLOBAL_TIMER
select CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
select ZONE_DMA if ARM_LPAE
select PM
help
Support for Rockchip's Cortex-A9 Single-to-Quad-Core-SoCs
containing the RK2928, RK30xx and RK31xx series.

@ -795,19 +795,28 @@ static int __mark_rodata_ro(void *unused)
return 0;
}
static int kernel_set_to_readonly __read_mostly;
void mark_rodata_ro(void)
{
kernel_set_to_readonly = 1;
stop_machine(__mark_rodata_ro, NULL, NULL);
}
void set_kernel_text_rw(void)
{
if (!kernel_set_to_readonly)
return;
set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), false,
current->active_mm);
}
void set_kernel_text_ro(void)
{
if (!kernel_set_to_readonly)
return;
set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true,
current->active_mm);
}

@ -291,8 +291,8 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
break;
case KPROBE_REENTER:
/* A nested probe was hit in FIQ, it is a BUG */
pr_warn("Unrecoverable kprobe detected at %p.\n",
p->addr);
pr_warn("Unrecoverable kprobe detected.\n");
dump_kprobe(p);
/* fall through */
default:
/* impossible cases */

@ -1517,7 +1517,6 @@ fail:
print_registers(&result_regs);
if (mem) {
pr_err("current_stack=%p\n", current_stack);
pr_err("expected_memory:\n");
print_memory(expected_memory, mem_size);
pr_err("result_memory:\n");

@ -742,7 +742,6 @@ config NEED_PER_CPU_EMBED_FIRST_CHUNK
config HOLES_IN_ZONE
def_bool y
depends on NUMA
source kernel/Kconfig.preempt
source kernel/Kconfig.hz

@ -205,6 +205,7 @@ config ARCH_ROCKCHIP
select GPIOLIB
select PINCTRL
select PINCTRL_ROCKCHIP
select PM
select ROCKCHIP_TIMER
help
This enables support for the ARMv8 based Rockchip chipsets,

@ -7,7 +7,7 @@
&apb {
mali: gpu@c0000 {
compatible = "amlogic,meson-gxbb-mali", "arm,mali-450";
compatible = "amlogic,meson-gxl-mali", "arm,mali-450";
reg = <0x0 0xc0000 0x0 0x40000>;
interrupts = <GIC_SPI 160 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>,

@ -118,7 +118,7 @@
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0>;
interrupt-map = <0 0 0 0 &gic 0 GIC_SPI 281 IRQ_TYPE_NONE>;
interrupt-map = <0 0 0 0 &gic 0 GIC_SPI 281 IRQ_TYPE_LEVEL_HIGH>;
linux,pci-domain = <0>;
@ -149,7 +149,7 @@
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0>;
interrupt-map = <0 0 0 0 &gic 0 GIC_SPI 305 IRQ_TYPE_NONE>;
interrupt-map = <0 0 0 0 &gic 0 GIC_SPI 305 IRQ_TYPE_LEVEL_HIGH>;
linux,pci-domain = <4>;
@ -566,7 +566,7 @@
reg = <0x66080000 0x100>;
#address-cells = <1>;
#size-cells = <0>;
interrupts = <GIC_SPI 394 IRQ_TYPE_NONE>;
interrupts = <GIC_SPI 394 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <100000>;
status = "disabled";
};
@ -594,7 +594,7 @@
reg = <0x660b0000 0x100>;
#address-cells = <1>;
#size-cells = <0>;
interrupts = <GIC_SPI 395 IRQ_TYPE_NONE>;
interrupts = <GIC_SPI 395 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <100000>;
status = "disabled";
};

@ -43,6 +43,10 @@
enet-phy-lane-swap;
};
&sdio0 {
mmc-ddr-1_8v;
};
&uart2 {
status = "okay";
};

@ -42,3 +42,7 @@
&gphy0 {
enet-phy-lane-swap;
};
&sdio0 {
mmc-ddr-1_8v;
};

@ -409,7 +409,7 @@
reg = <0x000b0000 0x100>;
#address-cells = <1>;
#size-cells = <0>;
interrupts = <GIC_SPI 177 IRQ_TYPE_NONE>;
interrupts = <GIC_SPI 177 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <100000>;
status = "disabled";
};
@ -453,7 +453,7 @@
reg = <0x000e0000 0x100>;
#address-cells = <1>;
#size-cells = <0>;
interrupts = <GIC_SPI 178 IRQ_TYPE_NONE>;
interrupts = <GIC_SPI 178 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <100000>;
status = "disabled";
};

@ -1132,14 +1132,14 @@
port@0 {
reg = <0>;
etf_out: endpoint {
etf_in: endpoint {
slave-mode;
remote-endpoint = <&funnel0_out>;
};
};
port@1 {
reg = <0>;
etf_in: endpoint {
etf_out: endpoint {
remote-endpoint = <&replicator_in>;
};
};

@ -331,7 +331,7 @@
reg = <0x0 0xff120000 0x0 0x100>;
interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru SCLK_UART1>, <&cru PCLK_UART1>;
clock-names = "sclk_uart", "pclk_uart";
clock-names = "baudclk", "apb_pclk";
dmas = <&dmac 4>, <&dmac 5>;
#dma-cells = <2>;
pinctrl-names = "default";

@ -20,9 +20,14 @@
#define CTR_L1IP_SHIFT 14
#define CTR_L1IP_MASK 3
#define CTR_DMINLINE_SHIFT 16
#define CTR_IMINLINE_SHIFT 0
#define CTR_CWG_SHIFT 24
#define CTR_CWG_MASK 15
#define CTR_CACHE_MINLINE_MASK \
(0xf << CTR_DMINLINE_SHIFT | 0xf << CTR_IMINLINE_SHIFT)
#define CTR_L1IP(ctr) (((ctr) >> CTR_L1IP_SHIFT) & CTR_L1IP_MASK)
#define ICACHE_POLICY_VPIPT 0

@ -45,7 +45,8 @@
#define ARM64_HARDEN_BP_POST_GUEST_EXIT 25
#define ARM64_HW_DBM 26
#define ARM64_SSBD 27
#define ARM64_MISMATCHED_CACHE_TYPE 28
#define ARM64_NCAPS 28
#define ARM64_NCAPS 29
#endif /* __ASM_CPUCAPS_H */

@ -17,8 +17,8 @@
*/
#include <linux/arm-smccc.h>
#include <linux/types.h>
#include <linux/psci.h>
#include <linux/types.h>
#include <asm/cpu.h>
#include <asm/cputype.h>
#include <asm/cpufeature.h>
@ -47,12 +47,18 @@ is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope)
}
static bool
has_mismatched_cache_line_size(const struct arm64_cpu_capabilities *entry,
has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
int scope)
{
u64 mask = CTR_CACHE_MINLINE_MASK;
/* Skip matching the min line sizes for cache type check */
if (entry->capability == ARM64_MISMATCHED_CACHE_TYPE)
mask ^= arm64_ftr_reg_ctrel0.strict_mask;
WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
return (read_cpuid_cachetype() & arm64_ftr_reg_ctrel0.strict_mask) !=
(arm64_ftr_reg_ctrel0.sys_val & arm64_ftr_reg_ctrel0.strict_mask);
return (read_cpuid_cachetype() & mask) !=
(arm64_ftr_reg_ctrel0.sys_val & mask);
}
static int cpu_enable_trap_ctr_access(void *__unused)
@ -475,7 +481,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
{
.desc = "Mismatched cache line size",
.capability = ARM64_MISMATCHED_CACHE_LINE_SIZE,
.matches = has_mismatched_cache_line_size,
.matches = has_mismatched_cache_type,
.def_scope = SCOPE_LOCAL_CPU,
.enable = cpu_enable_trap_ctr_access,
},
{
.desc = "Mismatched cache type",
.capability = ARM64_MISMATCHED_CACHE_TYPE,
.matches = has_mismatched_cache_type,
.def_scope = SCOPE_LOCAL_CPU,
.enable = cpu_enable_trap_ctr_access,
},

@ -180,14 +180,14 @@ static const struct arm64_ftr_bits ftr_ctr[] = {
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 28, 1, 1), /* IDC */
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0), /* CWG */
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, 20, 4, 0), /* ERG */
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 1), /* DminLine */
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DMINLINE_SHIFT, 4, 1),
/*
* Linux can handle differing I-cache policies. Userspace JITs will
* make use of *minLine.
* If we have differing I-cache policies, report it as the weakest - VIPT.
*/
ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_EXACT, 14, 2, ICACHE_POLICY_VIPT), /* L1Ip */
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* IminLine */
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_IMINLINE_SHIFT, 4, 0),
ARM64_FTR_END,
};

@ -275,7 +275,7 @@ static int __kprobes reenter_kprobe(struct kprobe *p,
break;
case KPROBE_HIT_SS:
case KPROBE_REENTER:
pr_warn("Unrecoverable kprobe detected at %p.\n", p->addr);
pr_warn("Unrecoverable kprobe detected.\n");
dump_kprobe(p);
BUG();
break;

@ -218,7 +218,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
* This is the secondary CPU boot entry. We're using this CPUs
* idle thread stack, but a set of temporary page tables.
*/
asmlinkage void secondary_start_kernel(void)
asmlinkage notrace void secondary_start_kernel(void)
{
struct mm_struct *mm = &init_mm;
unsigned int cpu;

@ -770,13 +770,14 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
size >> PAGE_SHIFT);
return NULL;
}
if (!coherent)
__dma_flush_area(page_to_virt(page), iosize);
addr = dma_common_contiguous_remap(page, size, VM_USERMAP,
prot,
__builtin_return_address(0));
if (!addr) {
if (addr) {
memset(addr, 0, size);
if (!coherent)
__dma_flush_area(page_to_virt(page), iosize);
} else {
iommu_dma_unmap_page(dev, *handle, iosize, 0, attrs);
dma_release_from_contiguous(dev, page,
size >> PAGE_SHIFT);

@ -288,11 +288,13 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
#endif /* CONFIG_NUMA */
#ifdef CONFIG_HAVE_ARCH_PFN_VALID
#define PFN_MASK ((1UL << (64 - PAGE_SHIFT)) - 1)
int pfn_valid(unsigned long pfn)
{
return (pfn & PFN_MASK) == pfn && memblock_is_map_memory(pfn << PAGE_SHIFT);
phys_addr_t addr = pfn << PAGE_SHIFT;
if ((addr >> PAGE_SHIFT) != pfn)
return 0;
return memblock_is_map_memory(addr);
}
EXPORT_SYMBOL(pfn_valid);
#endif

@ -44,6 +44,7 @@ extern inline pmd_t *pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page,
unsigned long address)
{
pgtable_page_dtor(page);
__free_page(page);
}
@ -74,8 +75,9 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
return page;
}
extern inline void pte_free(struct mm_struct *mm, struct page *page)
static inline void pte_free(struct mm_struct *mm, struct page *page)
{
pgtable_page_dtor(page);
__free_page(page);
}

@ -155,15 +155,11 @@ cflags-$(CONFIG_CPU_R4300) += -march=r4300 -Wa,--trap
cflags-$(CONFIG_CPU_VR41XX) += -march=r4100 -Wa,--trap
cflags-$(CONFIG_CPU_R4X00) += -march=r4600 -Wa,--trap
cflags-$(CONFIG_CPU_TX49XX) += -march=r4600 -Wa,--trap
cflags-$(CONFIG_CPU_MIPS32_R1) += $(call cc-option,-march=mips32,-mips32 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS32) \
-Wa,-mips32 -Wa,--trap
cflags-$(CONFIG_CPU_MIPS32_R2) += $(call cc-option,-march=mips32r2,-mips32r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS32) \
-Wa,-mips32r2 -Wa,--trap
cflags-$(CONFIG_CPU_MIPS32_R1) += -march=mips32 -Wa,--trap
cflags-$(CONFIG_CPU_MIPS32_R2) += -march=mips32r2 -Wa,--trap
cflags-$(CONFIG_CPU_MIPS32_R6) += -march=mips32r6 -Wa,--trap -modd-spreg
cflags-$(CONFIG_CPU_MIPS64_R1) += $(call cc-option,-march=mips64,-mips64 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \
-Wa,-mips64 -Wa,--trap
cflags-$(CONFIG_CPU_MIPS64_R2) += $(call cc-option,-march=mips64r2,-mips64r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \
-Wa,-mips64r2 -Wa,--trap
cflags-$(CONFIG_CPU_MIPS64_R1) += -march=mips64 -Wa,--trap
cflags-$(CONFIG_CPU_MIPS64_R2) += -march=mips64r2 -Wa,--trap
cflags-$(CONFIG_CPU_MIPS64_R6) += -march=mips64r6 -Wa,--trap
cflags-$(CONFIG_CPU_R5000) += -march=r5000 -Wa,--trap
cflags-$(CONFIG_CPU_R5432) += $(call cc-option,-march=r5400,-march=r5000) \

@ -212,12 +212,6 @@ static int __init bcm47xx_cpu_fixes(void)
*/
if (bcm47xx_bus.bcma.bus.chipinfo.id == BCMA_CHIP_ID_BCM4706)
cpu_wait = NULL;
/*
* BCM47XX Erratum "R10: PCIe Transactions Periodically Fail"
* Enable ExternalSync for sync instruction to take effect
*/
set_c0_config7(MIPS_CONF7_ES);
break;
#endif
}

@ -680,8 +680,6 @@
#define MIPS_CONF7_WII (_ULCAST_(1) << 31)
#define MIPS_CONF7_RPS (_ULCAST_(1) << 2)
/* ExternalSync */
#define MIPS_CONF7_ES (_ULCAST_(1) << 8)
#define MIPS_CONF7_IAR (_ULCAST_(1) << 10)
#define MIPS_CONF7_AR (_ULCAST_(1) << 16)
@ -2747,7 +2745,6 @@ __BUILD_SET_C0(status)
__BUILD_SET_C0(cause)
__BUILD_SET_C0(config)
__BUILD_SET_C0(config5)
__BUILD_SET_C0(config7)
__BUILD_SET_C0(intcontrol)
__BUILD_SET_C0(intctl)
__BUILD_SET_C0(srsmap)

@ -141,7 +141,7 @@ struct mips_fpu_struct {
#define NUM_DSP_REGS 6
typedef __u32 dspreg_t;
typedef unsigned long dspreg_t;
struct mips_dsp_state {
dspreg_t dspr[NUM_DSP_REGS];
@ -388,7 +388,20 @@ unsigned long get_wchan(struct task_struct *p);
#define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[29])
#define KSTK_STATUS(tsk) (task_pt_regs(tsk)->cp0_status)
#ifdef CONFIG_CPU_LOONGSON3
/*
* Loongson-3's SFB (Store-Fill-Buffer) may buffer writes indefinitely when a
* tight read loop is executed, because reads take priority over writes & the
* hardware (incorrectly) doesn't ensure that writes will eventually occur.
*
* Since spin loops of any kind should have a cpu_relax() in them, force an SFB
* flush from cpu_relax() such that any pending writes will become visible as
* expected.
*/
#define cpu_relax() smp_mb()
#else
#define cpu_relax() barrier()
#endif
/*
* Return_address is a replacement for __builtin_return_address(count)

@ -847,7 +847,7 @@ long arch_ptrace(struct task_struct *child, long request,
goto out;
}
dregs = __get_dsp_regs(child);
tmp = (unsigned long) (dregs[addr - DSP_BASE]);
tmp = dregs[addr - DSP_BASE];
break;
}
case DSP_CONTROL:

@ -141,7 +141,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
goto out;
}
dregs = __get_dsp_regs(child);
tmp = (unsigned long) (dregs[addr - DSP_BASE]);
tmp = dregs[addr - DSP_BASE];
break;
}
case DSP_CONTROL:

@ -4,12 +4,12 @@
#include "libgcc.h"
/*
* GCC 7 suboptimally generates __multi3 calls for mips64r6, so for that
* specific case only we'll implement it here.
* GCC 7 & older can suboptimally generate __multi3 calls for mips64r6, so for
* that specific case only we implement that intrinsic here.
*
* See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82981
*/
#if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPSR6) && (__GNUC__ == 7)
#if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPSR6) && (__GNUC__ < 8)
/* multiply 64-bit values, low 64-bits returned */
static inline long long notrace dmulu(long long a, long long b)

@ -221,12 +221,6 @@ EXCEPTION_ENTRY(_data_page_fault_handler)
l.addi r3,r1,0 // pt_regs
/* r4 set be EXCEPTION_HANDLE */ // effective address of fault
/*
* __PHX__: TODO
*
* all this can be written much simpler. look at
* DTLB miss handler in the CONFIG_GUARD_PROTECTED_CORE part
*/
#ifdef CONFIG_OPENRISC_NO_SPR_SR_DSX
l.lwz r6,PT_PC(r3) // address of an offending insn
l.lwz r6,0(r6) // instruction that caused pf
@ -258,7 +252,7 @@ EXCEPTION_ENTRY(_data_page_fault_handler)
#else
l.lwz r6,PT_SR(r3) // SR
l.mfspr r6,r0,SPR_SR // SR
l.andi r6,r6,SPR_SR_DSX // check for delay slot exception
l.sfne r6,r0 // exception happened in delay slot
l.bnf 7f

@ -141,8 +141,7 @@
* r4 - EEAR exception EA
* r10 - current pointing to current_thread_info struct
* r12 - syscall 0, since we didn't come from syscall
* r13 - temp it actually contains new SR, not needed anymore
* r31 - handler address of the handler we'll jump to
* r30 - handler address of the handler we'll jump to
*
* handler has to save remaining registers to the exception
* ksp frame *before* tainting them!
@ -178,6 +177,7 @@
/* r1 is KSP, r30 is __pa(KSP) */ ;\
tophys (r30,r1) ;\
l.sw PT_GPR12(r30),r12 ;\
/* r4 use for tmp before EA */ ;\
l.mfspr r12,r0,SPR_EPCR_BASE ;\
l.sw PT_PC(r30),r12 ;\
l.mfspr r12,r0,SPR_ESR_BASE ;\
@ -197,7 +197,10 @@
/* r12 == 1 if we come from syscall */ ;\
CLEAR_GPR(r12) ;\
/* ----- turn on MMU ----- */ ;\
l.ori r30,r0,(EXCEPTION_SR) ;\
/* Carry DSX into exception SR */ ;\
l.mfspr r30,r0,SPR_SR ;\
l.andi r30,r30,SPR_SR_DSX ;\
l.ori r30,r30,(EXCEPTION_SR) ;\
l.mtspr r0,r30,SPR_ESR_BASE ;\
/* r30: EA address of handler */ ;\
LOAD_SYMBOL_2_GPR(r30,handler) ;\

@ -358,7 +358,7 @@ static inline int in_delay_slot(struct pt_regs *regs)
return 0;
}
#else
return regs->sr & SPR_SR_DSX;
return mfspr(SPR_SR) & SPR_SR_DSX;
#endif
}

@ -20,7 +20,6 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *x,
{
volatile unsigned int *a;
mb();
a = __ldcw_align(x);
while (__ldcw(a) == 0)
while (*a == 0)
@ -30,16 +29,15 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *x,
local_irq_disable();
} else
cpu_relax();
mb();
}
static inline void arch_spin_unlock(arch_spinlock_t *x)
{
volatile unsigned int *a;
mb();
a = __ldcw_align(x);
*a = 1;
mb();
*a = 1;
}
static inline int arch_spin_trylock(arch_spinlock_t *x)
@ -47,10 +45,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *x)
volatile unsigned int *a;
int ret;
mb();
a = __ldcw_align(x);
ret = __ldcw(a) != 0;
mb();
return ret;
}

@ -629,12 +629,12 @@ cas_action:
stw %r1, 4(%sr2,%r20)
#endif
/* The load and store could fail */
1: ldw,ma 0(%r26), %r28
1: ldw 0(%r26), %r28
sub,<> %r28, %r25, %r0
2: stw,ma %r24, 0(%r26)
2: stw %r24, 0(%r26)
/* Free lock */
sync
stw,ma %r20, 0(%sr2,%r20)
stw %r20, 0(%sr2,%r20)
#if ENABLE_LWS_DEBUG
/* Clear thread register indicator */
stw %r0, 4(%sr2,%r20)
@ -798,30 +798,30 @@ cas2_action:
ldo 1(%r0),%r28
/* 8bit CAS */
13: ldb,ma 0(%r26), %r29
13: ldb 0(%r26), %r29
sub,= %r29, %r25, %r0
b,n cas2_end
14: stb,ma %r24, 0(%r26)
14: stb %r24, 0(%r26)
b cas2_end
copy %r0, %r28
nop
nop
/* 16bit CAS */
15: ldh,ma 0(%r26), %r29
15: ldh 0(%r26), %r29
sub,= %r29, %r25, %r0
b,n cas2_end
16: sth,ma %r24, 0(%r26)
16: sth %r24, 0(%r26)
b cas2_end
copy %r0, %r28
nop
nop
/* 32bit CAS */
17: ldw,ma 0(%r26), %r29
17: ldw 0(%r26), %r29
sub,= %r29, %r25, %r0
b,n cas2_end
18: stw,ma %r24, 0(%r26)
18: stw %r24, 0(%r26)
b cas2_end
copy %r0, %r28
nop
@ -829,10 +829,10 @@ cas2_action:
/* 64bit CAS */
#ifdef CONFIG_64BIT
19: ldd,ma 0(%r26), %r29
19: ldd 0(%r26), %r29
sub,*= %r29, %r25, %r0
b,n cas2_end
20: std,ma %r24, 0(%r26)
20: std %r24, 0(%r26)
copy %r0, %r28
#else
/* Compare first word */
@ -851,7 +851,7 @@ cas2_action:
cas2_end:
/* Free lock */
sync
stw,ma %r20, 0(%sr2,%r20)
stw %r20, 0(%sr2,%r20)
/* Enable interrupts */
ssm PSW_SM_I, %r0
/* Return to userspace, set no error */

@ -195,9 +195,6 @@ struct fadump_crash_info_header {
struct cpumask online_mask;
};
/* Crash memory ranges */
#define INIT_CRASHMEM_RANGES (INIT_MEMBLOCK_REGIONS + 2)
struct fad_crash_memory_ranges {
unsigned long long base;
unsigned long long size;

@ -223,10 +223,17 @@ do { \
} \
} while (0)
/*
* This is a type: either unsigned long, if the argument fits into
* that type, or otherwise unsigned long long.
*/
#define __long_type(x) \
__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
#define __get_user_nocheck(x, ptr, size) \
({ \
long __gu_err; \
unsigned long __gu_val; \
__long_type(*(ptr)) __gu_val; \
const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
__chk_user_ptr(ptr); \
if (!is_kernel_addr((unsigned long)__gu_addr)) \
@ -239,7 +246,7 @@ do { \
#define __get_user_check(x, ptr, size) \
({ \
long __gu_err = -EFAULT; \
unsigned long __gu_val = 0; \
__long_type(*(ptr)) __gu_val = 0; \
const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
might_fault(); \
if (access_ok(VERIFY_READ, __gu_addr, (size))) \
@ -251,7 +258,7 @@ do { \
#define __get_user_nosleep(x, ptr, size) \
({ \
long __gu_err; \
unsigned long __gu_val; \
__long_type(*(ptr)) __gu_val; \
const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
__chk_user_ptr(ptr); \
__get_user_size(__gu_val, __gu_addr, (size), __gu_err); \

@ -1452,6 +1452,8 @@ TRAMP_REAL_BEGIN(stf_barrier_fallback)
TRAMP_REAL_BEGIN(rfi_flush_fallback)
SET_SCRATCH0(r13);
GET_PACA(r13);
std r1,PACA_EXRFI+EX_R12(r13)
ld r1,PACAKSAVE(r13)
std r9,PACA_EXRFI+EX_R9(r13)
std r10,PACA_EXRFI+EX_R10(r13)
std r11,PACA_EXRFI+EX_R11(r13)
@ -1486,12 +1488,15 @@ TRAMP_REAL_BEGIN(rfi_flush_fallback)
ld r9,PACA_EXRFI+EX_R9(r13)
ld r10,PACA_EXRFI+EX_R10(r13)
ld r11,PACA_EXRFI+EX_R11(r13)
ld r1,PACA_EXRFI+EX_R12(r13)
GET_SCRATCH0(r13);
rfid
TRAMP_REAL_BEGIN(hrfi_flush_fallback)
SET_SCRATCH0(r13);
GET_PACA(r13);
std r1,PACA_EXRFI+EX_R12(r13)
ld r1,PACAKSAVE(r13)
std r9,PACA_EXRFI+EX_R9(r13)
std r10,PACA_EXRFI+EX_R10(r13)
std r11,PACA_EXRFI+EX_R11(r13)
@ -1526,6 +1531,7 @@ TRAMP_REAL_BEGIN(hrfi_flush_fallback)
ld r9,PACA_EXRFI+EX_R9(r13)
ld r10,PACA_EXRFI+EX_R10(r13)
ld r11,PACA_EXRFI+EX_R11(r13)
ld r1,PACA_EXRFI+EX_R12(r13)
GET_SCRATCH0(r13);
hrfid

@ -47,8 +47,10 @@ static struct fadump_mem_struct fdm;
static const struct fadump_mem_struct *fdm_active;
static DEFINE_MUTEX(fadump_mutex);
struct fad_crash_memory_ranges crash_memory_ranges[INIT_CRASHMEM_RANGES];
struct fad_crash_memory_ranges *crash_memory_ranges;
int crash_memory_ranges_size;
int crash_mem_ranges;
int max_crash_mem_ranges;
/* Scan the Firmware Assisted dump configuration details. */
int __init early_init_dt_scan_fw_dump(unsigned long node,
@ -843,38 +845,88 @@ static int __init process_fadump(const struct fadump_mem_struct *fdm_active)
return 0;
}
static inline void fadump_add_crash_memory(unsigned long long base,
static void free_crash_memory_ranges(void)
{
kfree(crash_memory_ranges);
crash_memory_ranges = NULL;
crash_memory_ranges_size = 0;
max_crash_mem_ranges = 0;
}
/*
* Allocate or reallocate crash memory ranges array in incremental units
* of PAGE_SIZE.
*/
static int allocate_crash_memory_ranges(void)
{
struct fad_crash_memory_ranges *new_array;
u64 new_size;
new_size = crash_memory_ranges_size + PAGE_SIZE;
pr_debug("Allocating %llu bytes of memory for crash memory ranges\n",
new_size);
new_array = krealloc(crash_memory_ranges, new_size, GFP_KERNEL);
if (new_array == NULL) {
pr_err("Insufficient memory for setting up crash memory ranges\n");
free_crash_memory_ranges();
return -ENOMEM;
}
crash_memory_ranges = new_array;
crash_memory_ranges_size = new_size;
max_crash_mem_ranges = (new_size /
sizeof(struct fad_crash_memory_ranges));
return 0;
}
static inline int fadump_add_crash_memory(unsigned long long base,
unsigned long long end)
{
if (base == end)
return;
return 0;
if (crash_mem_ranges == max_crash_mem_ranges) {
int ret;
ret = allocate_crash_memory_ranges();
if (ret)
return ret;
}
pr_debug("crash_memory_range[%d] [%#016llx-%#016llx], %#llx bytes\n",
crash_mem_ranges, base, end - 1, (end - base));
crash_memory_ranges[crash_mem_ranges].base = base;
crash_memory_ranges[crash_mem_ranges].size = end - base;
crash_mem_ranges++;
return 0;
}
static void fadump_exclude_reserved_area(unsigned long long start,
static int fadump_exclude_reserved_area(unsigned long long start,
unsigned long long end)
{
unsigned long long ra_start, ra_end;
int ret = 0;
ra_start = fw_dump.reserve_dump_area_start;
ra_end = ra_start + fw_dump.reserve_dump_area_size;
if ((ra_start < end) && (ra_end > start)) {
if ((start < ra_start) && (end > ra_end)) {
fadump_add_crash_memory(start, ra_start);
fadump_add_crash_memory(ra_end, end);
ret = fadump_add_crash_memory(start, ra_start);
if (ret)
return ret;
ret = fadump_add_crash_memory(ra_end, end);
} else if (start < ra_start) {
fadump_add_crash_memory(start, ra_start);
ret = fadump_add_crash_memory(start, ra_start);
} else if (ra_end < end) {
fadump_add_crash_memory(ra_end, end);
ret = fadump_add_crash_memory(ra_end, end);
}
} else
fadump_add_crash_memory(start, end);
ret = fadump_add_crash_memory(start, end);
return ret;
}
static int fadump_init_elfcore_header(char *bufp)
@ -914,10 +966,11 @@ static int fadump_init_elfcore_header(char *bufp)
* Traverse through memblock structure and setup crash memory ranges. These
* ranges will be used create PT_LOAD program headers in elfcore header.
*/
static void fadump_setup_crash_memory_ranges(void)
static int fadump_setup_crash_memory_ranges(void)
{
struct memblock_region *reg;
unsigned long long start, end;
int ret;
pr_debug("Setup crash memory ranges.\n");
crash_mem_ranges = 0;
@ -928,7 +981,9 @@ static void fadump_setup_crash_memory_ranges(void)
* specified during fadump registration. We need to create a separate
* program header for this chunk with the correct offset.
*/
fadump_add_crash_memory(RMA_START, fw_dump.boot_memory_size);
ret = fadump_add_crash_memory(RMA_START, fw_dump.boot_memory_size);
if (ret)
return ret;
for_each_memblock(memory, reg) {
start = (unsigned long long)reg->base;
@ -948,8 +1003,12 @@ static void fadump_setup_crash_memory_ranges(void)
}
/* add this range excluding the reserved dump area. */
fadump_exclude_reserved_area(start, end);
ret = fadump_exclude_reserved_area(start, end);
if (ret)
return ret;
}
return 0;
}
/*
@ -1072,6 +1131,7 @@ static int register_fadump(void)
{
unsigned long addr;
void *vaddr;
int ret;
/*
* If no memory is reserved then we can not register for firmware-
@ -1080,7 +1140,9 @@ static int register_fadump(void)
if (!fw_dump.reserve_dump_area_size)
return -ENODEV;
fadump_setup_crash_memory_ranges();
ret = fadump_setup_crash_memory_ranges();
if (ret)
return ret;
addr = be64_to_cpu(fdm.rmr_region.destination_address) + be64_to_cpu(fdm.rmr_region.source_len);
/* Initialize fadump crash info header. */
@ -1158,6 +1220,7 @@ void fadump_cleanup(void)
} else if (fw_dump.dump_registered) {
/* Un-register Firmware-assisted dump if it was registered. */
fadump_unregister_dump(&fdm);
free_crash_memory_ranges();
}
}

@ -130,6 +130,7 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
long i, j, ret = 0, locked_entries = 0;
unsigned int pageshift;
unsigned long flags;
unsigned long cur_ua;
struct page *page = NULL;
mutex_lock(&mem_list_mutex);
@ -178,7 +179,8 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
}
for (i = 0; i < entries; ++i) {
if (1 != get_user_pages_fast(ua + (i << PAGE_SHIFT),
cur_ua = ua + (i << PAGE_SHIFT);
if (1 != get_user_pages_fast(cur_ua,
1/* pages */, 1/* iswrite */, &page)) {
ret = -EFAULT;
for (j = 0; j < i; ++j)
@ -197,7 +199,7 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
if (is_migrate_cma_page(page)) {
if (mm_iommu_move_page_from_cma(page))
goto populate;
if (1 != get_user_pages_fast(ua + (i << PAGE_SHIFT),
if (1 != get_user_pages_fast(cur_ua,
1/* pages */, 1/* iswrite */,
&page)) {
ret = -EFAULT;
@ -211,20 +213,21 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
}
populate:
pageshift = PAGE_SHIFT;
if (PageCompound(page)) {
if (mem->pageshift > PAGE_SHIFT && PageCompound(page)) {
pte_t *pte;
struct page *head = compound_head(page);
unsigned int compshift = compound_order(head);
unsigned int pteshift;
local_irq_save(flags); /* disables as well */
pte = find_linux_pte(mm->pgd, ua, NULL, &pageshift);
local_irq_restore(flags);
pte = find_linux_pte(mm->pgd, cur_ua, NULL, &pteshift);
/* Double check it is still the same pinned page */
if (pte && pte_page(*pte) == head &&
pageshift == compshift)
pageshift = max_t(unsigned int, pageshift,
pteshift == compshift + PAGE_SHIFT)
pageshift = max_t(unsigned int, pteshift,
PAGE_SHIFT);
local_irq_restore(flags);
}
mem->pageshift = min(mem->pageshift, pageshift);
mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT;

@ -322,6 +322,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
u64 imm64;
u8 *func;
u32 true_cond;
u32 tmp_idx;
/*
* addrs[] maps a BPF bytecode address into a real offset from
@ -681,11 +682,7 @@ emit_clear:
case BPF_STX | BPF_XADD | BPF_W:
/* Get EA into TMP_REG_1 */
PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
/* error if EA is not word-aligned */
PPC_ANDI(b2p[TMP_REG_2], b2p[TMP_REG_1], 0x03);
PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + 12);
PPC_LI(b2p[BPF_REG_0], 0);
PPC_JMP(exit_addr);
tmp_idx = ctx->idx * 4;
/* load value from memory into TMP_REG_2 */
PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
/* add value from src_reg into this */
@ -693,32 +690,16 @@ emit_clear:
/* store result back */
PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
/* we're done if this succeeded */
PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (7*4));
/* otherwise, let's try once more */
PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
/* exit if the store was not successful */
PPC_LI(b2p[BPF_REG_0], 0);
PPC_BCC(COND_NE, exit_addr);
PPC_BCC_SHORT(COND_NE, tmp_idx);
break;
/* *(u64 *)(dst + off) += src */
case BPF_STX | BPF_XADD | BPF_DW:
PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
/* error if EA is not doubleword-aligned */
PPC_ANDI(b2p[TMP_REG_2], b2p[TMP_REG_1], 0x07);
PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (3*4));
PPC_LI(b2p[BPF_REG_0], 0);
PPC_JMP(exit_addr);
PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (7*4));
tmp_idx = ctx->idx * 4;
PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
PPC_LI(b2p[BPF_REG_0], 0);
PPC_BCC(COND_NE, exit_addr);
PPC_BCC_SHORT(COND_NE, tmp_idx);
break;
/*

@ -9,8 +9,10 @@
* option) any later version.
*/
#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
@ -150,3 +152,5 @@ static int __init t1042rdb_diu_init(void)
}
early_initcall(t1042rdb_diu_init);
MODULE_LICENSE("GPL");

@ -3286,12 +3286,49 @@ static void pnv_pci_ioda_create_dbgfs(void)
#endif /* CONFIG_DEBUG_FS */
}
static void pnv_pci_enable_bridge(struct pci_bus *bus)
{
struct pci_dev *dev = bus->self;
struct pci_bus *child;
/* Empty bus ? bail */
if (list_empty(&bus->devices))
return;
/*
* If there's a bridge associated with that bus enable it. This works
* around races in the generic code if the enabling is done during
* parallel probing. This can be removed once those races have been
* fixed.
*/
if (dev) {
int rc = pci_enable_device(dev);
if (rc)
pci_err(dev, "Error enabling bridge (%d)\n", rc);
pci_set_master(dev);
}
/* Perform the same to child busses */
list_for_each_entry(child, &bus->children, node)
pnv_pci_enable_bridge(child);
}
static void pnv_pci_enable_bridges(void)
{
struct pci_controller *hose;
list_for_each_entry(hose, &hose_list, list_node)
pnv_pci_enable_bridge(hose->bus);
}
static void pnv_pci_ioda_fixup(void)
{
pnv_pci_ioda_setup_PEs();
pnv_pci_ioda_setup_iommu_api();
pnv_pci_ioda_create_dbgfs();
pnv_pci_enable_bridges();
#ifdef CONFIG_EEH
eeh_init();
eeh_addr_cache_build();

@ -360,7 +360,7 @@ static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs)
}
savep = __va(regs->gpr[3]);
regs->gpr[3] = savep[0]; /* restore original r3 */
regs->gpr[3] = be64_to_cpu(savep[0]); /* restore original r3 */
/* If it isn't an extended log we can use the per cpu 64bit buffer */
h = (struct rtas_error_log *)&savep[1];
@ -371,7 +371,7 @@ static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs)
int len, error_log_length;
error_log_length = 8 + rtas_error_extended_log_length(h);
len = max_t(int, error_log_length, RTAS_ERROR_LOG_MAX);
len = min_t(int, error_log_length, RTAS_ERROR_LOG_MAX);
memset(global_mce_data_buf, 0, RTAS_ERROR_LOG_MAX);
memcpy(global_mce_data_buf, h, len);
errhdr = (struct rtas_error_log *)global_mce_data_buf;

@ -196,7 +196,7 @@ static int mpic_msgr_probe(struct platform_device *dev)
/* IO map the message register block. */
of_address_to_resource(np, 0, &rsrc);
msgr_block_addr = ioremap(rsrc.start, rsrc.end - rsrc.start);
msgr_block_addr = ioremap(rsrc.start, resource_size(&rsrc));
if (!msgr_block_addr) {
dev_err(&dev->dev, "Failed to iomap MPIC message registers");
return -EFAULT;

@ -262,7 +262,6 @@ struct qdio_outbuf_state {
void *user;
};
#define QDIO_OUTBUF_STATE_FLAG_NONE 0x00
#define QDIO_OUTBUF_STATE_FLAG_PENDING 0x01
#define CHSC_AC1_INITIATE_INPUTQ 0x80

@ -404,11 +404,13 @@ static void *get_vmcoreinfo_old(unsigned long *size)
if (copy_oldmem_kernel(nt_name, addr + sizeof(note),
sizeof(nt_name) - 1))
return NULL;
if (strcmp(nt_name, "VMCOREINFO") != 0)
if (strcmp(nt_name, VMCOREINFO_NOTE_NAME) != 0)
return NULL;
vmcoreinfo = kzalloc_panic(note.n_descsz);
if (copy_oldmem_kernel(vmcoreinfo, addr + 24, note.n_descsz))
if (copy_oldmem_kernel(vmcoreinfo, addr + 24, note.n_descsz)) {
kfree(vmcoreinfo);
return NULL;
}
*size = note.n_descsz;
return vmcoreinfo;
}
@ -418,15 +420,20 @@ static void *get_vmcoreinfo_old(unsigned long *size)
*/
static void *nt_vmcoreinfo(void *ptr)
{
const char *name = VMCOREINFO_NOTE_NAME;
unsigned long size;
void *vmcoreinfo;
vmcoreinfo = os_info_old_entry(OS_INFO_VMCOREINFO, &size);
if (!vmcoreinfo)
if (vmcoreinfo)
return nt_init_name(ptr, 0, vmcoreinfo, size, name);
vmcoreinfo = get_vmcoreinfo_old(&size);
if (!vmcoreinfo)
return ptr;
return nt_init_name(ptr, 0, vmcoreinfo, size, "VMCOREINFO");
ptr = nt_init_name(ptr, 0, vmcoreinfo, size, name);
kfree(vmcoreinfo);
return ptr;
}
/*

@ -17,7 +17,7 @@
ENTRY(memmove)
ltgr %r4,%r4
lgr %r1,%r2
bzr %r14
jz .Lmemmove_exit
aghi %r4,-1
clgr %r2,%r3
jnh .Lmemmove_forward
@ -36,6 +36,7 @@ ENTRY(memmove)
.Lmemmove_forward_remainder:
larl %r5,.Lmemmove_mvc
ex %r4,0(%r5)
.Lmemmove_exit:
BR_EX %r14
.Lmemmove_reverse:
ic %r0,0(%r4,%r3)
@ -65,7 +66,7 @@ EXPORT_SYMBOL(memmove)
*/
ENTRY(memset)
ltgr %r4,%r4
bzr %r14
jz .Lmemset_exit
ltgr %r3,%r3
jnz .Lmemset_fill
aghi %r4,-1
@ -80,12 +81,13 @@ ENTRY(memset)
.Lmemset_clear_remainder:
larl %r3,.Lmemset_xc
ex %r4,0(%r3)
.Lmemset_exit:
BR_EX %r14
.Lmemset_fill:
stc %r3,0(%r2)
cghi %r4,1
lgr %r1,%r2
ber %r14
je .Lmemset_fill_exit
aghi %r4,-2
srlg %r3,%r4,8
ltgr %r3,%r3
@ -97,6 +99,7 @@ ENTRY(memset)
.Lmemset_fill_remainder:
larl %r3,.Lmemset_mvc
ex %r4,0(%r3)
.Lmemset_fill_exit:
BR_EX %r14
.Lmemset_xc:
xc 0(1,%r1),0(%r1)
@ -111,7 +114,7 @@ EXPORT_SYMBOL(memset)
*/
ENTRY(memcpy)
ltgr %r4,%r4
bzr %r14
jz .Lmemcpy_exit
aghi %r4,-1
srlg %r5,%r4,8
ltgr %r5,%r5
@ -120,6 +123,7 @@ ENTRY(memcpy)
.Lmemcpy_remainder:
larl %r5,.Lmemcpy_mvc
ex %r4,0(%r5)
.Lmemcpy_exit:
BR_EX %r14
.Lmemcpy_loop:
mvc 0(256,%r1),0(%r3)

@ -486,6 +486,8 @@ retry:
/* No reason to continue if interrupted by SIGKILL. */
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
fault = VM_FAULT_SIGNAL;
if (flags & FAULT_FLAG_RETRY_NOWAIT)
goto out_up;
goto out;
}
if (unlikely(fault & VM_FAULT_ERROR))

@ -271,7 +271,7 @@ void arch_set_page_states(int make_stable)
list_for_each(l, &zone->free_area[order].free_list[t]) {
page = list_entry(l, struct page, lru);
if (make_stable)
set_page_stable_dat(page, 0);
set_page_stable_dat(page, order);
else
set_page_unused(page, order);
}

@ -518,8 +518,6 @@ static void bpf_jit_epilogue(struct bpf_jit *jit)
/* br %r1 */
_EMIT2(0x07f1);
} else {
/* larl %r1,.+14 */
EMIT6_PCREL_RILB(0xc0000000, REG_1, jit->prg + 14);
/* ex 0,S390_lowcore.br_r1_tampoline */
EMIT4_DISP(0x44000000, REG_0, REG_0,
offsetof(struct lowcore, br_r1_trampoline));
@ -1403,6 +1401,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
goto free_addrs;
}
if (bpf_jit_prog(&jit, fp)) {
bpf_jit_binary_free(header);
fp = orig_fp;
goto free_addrs;
}

@ -134,26 +134,14 @@ void __init numa_setup(void)
{
pr_info("NUMA mode: %s\n", mode->name);
nodes_clear(node_possible_map);
/* Initially attach all possible CPUs to node 0. */
cpumask_copy(&node_to_cpumask_map[0], cpu_possible_mask);
if (mode->setup)
mode->setup();
numa_setup_memory();
memblock_dump_all();
}
/*
* numa_init_early() - Initialization initcall
*
* This runs when only one CPU is online and before the first
* topology update is called for by the scheduler.
*/
static int __init numa_init_early(void)
{
/* Attach all possible CPUs to node 0 for now. */
cpumask_copy(&node_to_cpumask_map[0], cpu_possible_mask);
return 0;
}
early_initcall(numa_init_early);
/*
* numa_init_late() - Initialization initcall
*

@ -420,6 +420,8 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
hwirq = 0;
for_each_pci_msi_entry(msi, pdev) {
rc = -EIO;
if (hwirq >= msi_vecs)
break;
irq = irq_alloc_desc(0); /* Alloc irq on node 0 */
if (irq < 0)
return -ENOMEM;

@ -14,6 +14,7 @@ generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
generic-y += module.h
generic-y += msi.h
generic-y += preempt.h
generic-y += rwsem.h
generic-y += serial.h

@ -205,6 +205,7 @@ SYSCALL_DEFINE5(rt_sigaction, int, sig,
asmlinkage long sys_getdomainname(char __user *name, int len)
{
int nlen, err;
char tmp[__NEW_UTS_LEN + 1];
if (len < 0)
return -EINVAL;
@ -214,13 +215,16 @@ asmlinkage long sys_getdomainname(char __user *name, int len)
nlen = strlen(utsname()->domainname) + 1;
err = -EINVAL;
if (nlen > len)
goto out;
goto out_unlock;
memcpy(tmp, utsname()->domainname, nlen);
err = -EFAULT;
if (!copy_to_user(name, utsname()->domainname, nlen))
err = 0;
up_read(&uts_sem);
out:
if (copy_to_user(name, tmp, nlen))
return -EFAULT;
return 0;
out_unlock:
up_read(&uts_sem);
return err;
}

@ -528,6 +528,7 @@ extern void check_pending(int signum);
SYSCALL_DEFINE2(getdomainname, char __user *, name, int, len)
{
int nlen, err;
char tmp[__NEW_UTS_LEN + 1];
if (len < 0)
return -EINVAL;
@ -537,13 +538,16 @@ SYSCALL_DEFINE2(getdomainname, char __user *, name, int, len)
nlen = strlen(utsname()->domainname) + 1;
err = -EINVAL;
if (nlen > len)
goto out;
goto out_unlock;
memcpy(tmp, utsname()->domainname, nlen);
err = -EFAULT;
if (!copy_to_user(name, utsname()->domainname, nlen))
err = 0;
up_read(&uts_sem);
out:
if (copy_to_user(name, tmp, nlen))
return -EFAULT;
return 0;
out_unlock:
up_read(&uts_sem);
return err;
}

@ -813,7 +813,7 @@ static void __init get_tick_patch(void)
}
}
static void init_tick_ops(struct sparc64_tick_ops *ops)
static void __init init_tick_ops(struct sparc64_tick_ops *ops)
{
unsigned long freq, quotient, tick;

@ -170,6 +170,7 @@ config X86
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
select HAVE_RCU_TABLE_FREE
select HAVE_RCU_TABLE_INVALIDATE if HAVE_RCU_TABLE_FREE
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RELIABLE_STACKTRACE if X86_64 && UNWINDER_FRAME_POINTER && STACK_VALIDATION
select HAVE_STACK_VALIDATION if X86_64

@ -104,9 +104,13 @@ define cmd_check_data_rel
done
endef
# We need to run two commands under "if_changed", so merge them into a
# single invocation.
quiet_cmd_check-and-link-vmlinux = LD $@
cmd_check-and-link-vmlinux = $(cmd_check_data_rel); $(cmd_ld)
$(obj)/vmlinux: $(vmlinux-objs-y) FORCE
$(call if_changed,check_data_rel)
$(call if_changed,ld)
$(call if_changed,check-and-link-vmlinux)
OBJCOPYFLAGS_vmlinux.bin := -R .comment -S
$(obj)/vmlinux.bin: vmlinux FORCE

@ -74,9 +74,9 @@ $(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE
CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \
$(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \
-fno-omit-frame-pointer -foptimize-sibling-calls \
-DDISABLE_BRANCH_PROFILING -DBUILD_VDSO
-DDISABLE_BRANCH_PROFILING -DBUILD_VDSO $(RETPOLINE_VDSO_CFLAGS)
$(vobjs): KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS)) $(CFL)
$(vobjs): KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS) $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS)) $(CFL)
#
# vDSO code runs in userspace and -pg doesn't help with profiling anyway.
@ -147,11 +147,13 @@ KBUILD_CFLAGS_32 := $(filter-out -mcmodel=kernel,$(KBUILD_CFLAGS_32))
KBUILD_CFLAGS_32 := $(filter-out -fno-pic,$(KBUILD_CFLAGS_32))
KBUILD_CFLAGS_32 := $(filter-out -mfentry,$(KBUILD_CFLAGS_32))
KBUILD_CFLAGS_32 := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS_32))
KBUILD_CFLAGS_32 := $(filter-out $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS_32))
KBUILD_CFLAGS_32 += -m32 -msoft-float -mregparm=0 -fpic
KBUILD_CFLAGS_32 += $(call cc-option, -fno-stack-protector)
KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls)
KBUILD_CFLAGS_32 += -fno-omit-frame-pointer
KBUILD_CFLAGS_32 += -DDISABLE_BRANCH_PROFILING
KBUILD_CFLAGS_32 += $(RETPOLINE_VDSO_CFLAGS)
$(obj)/vdso32.so.dbg: KBUILD_CFLAGS = $(KBUILD_CFLAGS_32)
$(obj)/vdso32.so.dbg: FORCE \

@ -579,7 +579,7 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
{
struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
struct perf_event *event = pcpu->event;
struct hw_perf_event *hwc = &event->hw;
struct hw_perf_event *hwc;
struct perf_sample_data data;
struct perf_raw_record raw;
struct pt_regs regs;
@ -602,6 +602,10 @@ fail:
return 0;
}
if (WARN_ON_ONCE(!event))
goto fail;
hwc = &event->hw;
msr = hwc->config_base;
buf = ibs_data.regs;
rdmsrl(msr, *buf);

@ -2462,7 +2462,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
perf_callchain_store(entry, regs->ip);
if (!current->mm)
if (!nmi_uaccess_okay())
return;
if (perf_callchain_user32(regs, entry))

@ -33,7 +33,8 @@ extern inline unsigned long native_save_fl(void)
return flags;
}
static inline void native_restore_fl(unsigned long flags)
extern inline void native_restore_fl(unsigned long flags);
extern inline void native_restore_fl(unsigned long flags)
{
asm volatile("push %0 ; popf"
: /* no output */

@ -200,6 +200,7 @@ enum mce_notifier_prios {
MCE_PRIO_LOWEST = 0,
};
struct notifier_block;
extern void mce_register_decode_chain(struct notifier_block *nb);
extern void mce_unregister_decode_chain(struct notifier_block *nb);

@ -2,6 +2,8 @@
#ifndef _ASM_X86_PGTABLE_3LEVEL_H
#define _ASM_X86_PGTABLE_3LEVEL_H
#include <asm/atomic64_32.h>
/*
* Intel Physical Address Extension (PAE) Mode - three-level page
* tables on PPro+ CPUs.
@ -147,10 +149,7 @@ static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
{
pte_t res;
/* xchg acts as a barrier before the setting of the high bits */
res.pte_low = xchg(&ptep->pte_low, 0);
res.pte_high = ptep->pte_high;
ptep->pte_high = 0;
res.pte = (pteval_t)atomic64_xchg((atomic64_t *)ptep, 0);
return res;
}

@ -132,6 +132,8 @@ struct cpuinfo_x86 {
/* Index into per_cpu list: */
u16 cpu_index;
u32 microcode;
/* Address space bits used by the cache internally */
u8 x86_cache_bits;
} __randomize_layout;
struct cpuid_regs {
@ -180,9 +182,9 @@ extern const struct seq_operations cpuinfo_op;
extern void cpu_detect(struct cpuinfo_x86 *c);
static inline unsigned long l1tf_pfn_limit(void)
static inline unsigned long long l1tf_pfn_limit(void)
{
return BIT(boot_cpu_data.x86_phys_bits - 1 - PAGE_SHIFT) - 1;
return BIT_ULL(boot_cpu_data.x86_cache_bits - 1 - PAGE_SHIFT);
}
extern void early_cpu_init(void);

@ -175,8 +175,16 @@ struct tlb_state {
* are on. This means that it may not match current->active_mm,
* which will contain the previous user mm when we're in lazy TLB
* mode even if we've already switched back to swapper_pg_dir.
*
* During switch_mm_irqs_off(), loaded_mm will be set to
* LOADED_MM_SWITCHING during the brief interrupts-off window
* when CR3 and loaded_mm would otherwise be inconsistent. This
* is for nmi_uaccess_okay()'s benefit.
*/
struct mm_struct *loaded_mm;
#define LOADED_MM_SWITCHING ((struct mm_struct *)1)
u16 loaded_mm_asid;
u16 next_asid;
/* last user mm's ctx id */
@ -246,6 +254,38 @@ struct tlb_state {
};
DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
/*
* Blindly accessing user memory from NMI context can be dangerous
* if we're in the middle of switching the current user task or
* switching the loaded mm. It can also be dangerous if we
* interrupted some kernel code that was temporarily using a
* different mm.
*/
static inline bool nmi_uaccess_okay(void)
{
struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
struct mm_struct *current_mm = current->mm;
VM_WARN_ON_ONCE(!loaded_mm);
/*
* The condition we want to check is
* current_mm->pgd == __va(read_cr3_pa()). This may be slow, though,
* if we're running in a VM with shadow paging, and nmi_uaccess_okay()
* is supposed to be reasonably fast.
*
* Instead, we check the almost equivalent but somewhat conservative
* condition below, and we rely on the fact that switch_mm_irqs_off()
* sets loaded_mm to LOADED_MM_SWITCHING before writing to CR3.
*/
if (loaded_mm != current_mm)
return false;
VM_WARN_ON_ONCE(current_mm->pgd != __va(read_cr3_pa()));
return true;
}
/* Initialize cr4 shadow for this CPU. */
static inline void cr4_init_shadow(void)
{

@ -93,7 +93,7 @@ static inline unsigned int __getcpu(void)
*
* If RDPID is available, use it.
*/
alternative_io ("lsl %[p],%[seg]",
alternative_io ("lsl %[seg],%[p]",
".byte 0xf3,0x0f,0xc7,0xf8", /* RDPID %eax/rax */
X86_FEATURE_RDPID,
[p] "=a" (p), [seg] "r" (__PER_CPU_SEG));

@ -652,6 +652,45 @@ EXPORT_SYMBOL_GPL(l1tf_mitigation);
enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
/*
* These CPUs all support 44bits physical address space internally in the
* cache but CPUID can report a smaller number of physical address bits.
*
* The L1TF mitigation uses the top most address bit for the inversion of
* non present PTEs. When the installed memory reaches into the top most
* address bit due to memory holes, which has been observed on machines
* which report 36bits physical address bits and have 32G RAM installed,
* then the mitigation range check in l1tf_select_mitigation() triggers.
* This is a false positive because the mitigation is still possible due to
* the fact that the cache uses 44bit internally. Use the cache bits
* instead of the reported physical bits and adjust them on the affected
* machines to 44bit if the reported bits are less than 44.
*/
static void override_cache_bits(struct cpuinfo_x86 *c)
{
if (c->x86 != 6)
return;
switch (c->x86_model) {
case INTEL_FAM6_NEHALEM:
case INTEL_FAM6_WESTMERE:
case INTEL_FAM6_SANDYBRIDGE:
case INTEL_FAM6_IVYBRIDGE:
case INTEL_FAM6_HASWELL_CORE:
case INTEL_FAM6_HASWELL_ULT:
case INTEL_FAM6_HASWELL_GT3E:
case INTEL_FAM6_BROADWELL_CORE:
case INTEL_FAM6_BROADWELL_GT3E:
case INTEL_FAM6_SKYLAKE_MOBILE:
case INTEL_FAM6_SKYLAKE_DESKTOP:
case INTEL_FAM6_KABYLAKE_MOBILE:
case INTEL_FAM6_KABYLAKE_DESKTOP:
if (c->x86_cache_bits < 44)
c->x86_cache_bits = 44;
break;
}
}
static void __init l1tf_select_mitigation(void)
{
u64 half_pa;
@ -659,6 +698,8 @@ static void __init l1tf_select_mitigation(void)
if (!boot_cpu_has_bug(X86_BUG_L1TF))
return;
override_cache_bits(&boot_cpu_data);
switch (l1tf_mitigation) {
case L1TF_MITIGATION_OFF:
case L1TF_MITIGATION_FLUSH_NOWARN:
@ -678,14 +719,13 @@ static void __init l1tf_select_mitigation(void)
return;
#endif
/*
* This is extremely unlikely to happen because almost all
* systems have far more MAX_PA/2 than RAM can be fit into
* DIMM slots.
*/
half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
half_pa);
pr_info("However, doing so will make a part of your RAM unusable.\n");
pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html might help you decide.\n");
return;
}

@ -890,6 +890,7 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
}
}
#endif
c->x86_cache_bits = c->x86_phys_bits;
}
static const __initconst struct x86_cpu_id cpu_no_speculation[] = {

@ -150,6 +150,9 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
if (cpu_has(c, X86_FEATURE_HYPERVISOR))
return false;
if (c->x86 != 6)
return false;
for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
if (c->x86_model == spectre_bad_microcodes[i].model &&
c->x86_stepping == spectre_bad_microcodes[i].stepping)

@ -190,8 +190,11 @@ static void save_microcode_patch(void *data, unsigned int size)
p = memdup_patch(data, size);
if (!p)
pr_err("Error allocating buffer %p\n", data);
else
else {
list_replace(&iter->plist, &p->plist);
kfree(iter->data);
kfree(iter);
}
}
}

@ -17,6 +17,7 @@
#include <linux/bug.h>
#include <linux/nmi.h>
#include <linux/sysfs.h>
#include <linux/kasan.h>
#include <asm/cpu_entry_area.h>
#include <asm/stacktrace.h>
@ -298,7 +299,10 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
* We're not going to return, but we might be on an IST stack or
* have very little stack space left. Rewind the stack and kill
* the task.
* Before we rewind the stack, we have to tell KASAN that we're going to
* reuse the task stack and that existing poisons are invalid.
*/
kasan_unpoison_task_stack(current);
rewind_stack_do_exit(signr);
}
NOKPROBE_SYMBOL(oops_end);

@ -532,7 +532,7 @@ static int bzImage64_cleanup(void *loader_data)
static int bzImage64_verify_sig(const char *kernel, unsigned long kernel_len)
{
return verify_pefile_signature(kernel, kernel_len,
NULL,
VERIFY_USE_SECONDARY_KEYRING,
VERIFYING_KEXEC_PE_SIGNATURE);
}
#endif

@ -143,6 +143,7 @@ static unsigned long kvm_get_tsc_khz(void)
src = &hv_clock[cpu].pvti;
tsc_khz = pvclock_tsc_khz(src);
put_cpu();
setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
return tsc_khz;
}

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save