This is the 4.14.77 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAlvIM0wACgkQONu9yGCS
 aT5bkRAAqvQaEuy5Cm0BNrn5/f3SIo5H9NcEipxNX8/5KMG2FKT982VkxW5NFDBa
 AiHuzSUSAwufpBJai5TS0VhyjzkhYucoKsmgiIC+Or5rGZGWIkf6yw9LJXwEs3Jl
 xtuhCjs23CqlBjivPFMmFeDN74eIoPcLxjagLjjvSPEusYM8MNXedf87MUd1CNa3
 E07k/8CpD8RiDgQBi8ehjL19TSD4OhhhdPzQqEx1nGLsyiBrv4KsfOSOslZPUw0m
 Wv5X5JJnyk0+nUhtq2KJwkgTqz54IMJzmGvaMPGLrZq2ArwT6ac3Ru3rMYZaDido
 TTy4f3mHb25upZHJ2QjCeuKxFpQMJfvkObj/rB4fOQSu/7MERiKK4jINWRL/g1h8
 g5tKOf16uR7c3ItjXHnracoc3hKSEr4KguvoLit+CQAslcaXSnyxdKmRuEy9Akar
 5nK6GsbxzHnT55ZsgxYMaCIfy4J9K2E61qoggJ+Dif8Cm97CGMkzb2mxygvKLXiV
 ROQ8DWyDvOqUerYBVu06aRfXNZWsBPzyZFsgKUYTKzYelY0AaiFe69D19NazfTGC
 XiFWhSgTlgzhBgtTJByrcGAUBelORfQP+nzB2rtZ3qfacGaJUskRpZn/Ii96a2Cb
 RBVyxs5oarmsdhSqn/0rTXsnOGsz3kIfQFl/AlbzmeRzhD+rbxE=
 =Z+mk
 -----END PGP SIGNATURE-----

Merge 4.14.77 into android-4.14-p

Changes in 4.14.77
	bnxt_en: Fix TX timeout during netpoll.
	bnxt_en: free hwrm resources, if driver probe fails.
	bonding: avoid possible dead-lock
	ip6_tunnel: be careful when accessing the inner header
	ip_tunnel: be careful when accessing the inner header
	ipv4: fix use-after-free in ip_cmsg_recv_dstaddr()
	ipv6: take rcu lock in rawv6_send_hdrinc()
	net: dsa: bcm_sf2: Call setup during switch resume
	net: hns: fix for unmapping problem when SMMU is on
	net: ipv4: update fnhe_pmtu when first hop's MTU changes
	net/ipv6: Display all addresses in output of /proc/net/if_inet6
	netlabel: check for IPV4MASK in addrinfo_get
	net: mvpp2: Extract the correct ethtype from the skb for tx csum offload
	net: mvpp2: fix a txq_done race condition
	net: sched: Add policy validation for tc attributes
	net: systemport: Fix wake-up interrupt race during resume
	net/usb: cancel pending work when unbinding smsc75xx
	qlcnic: fix Tx descriptor corruption on 82xx devices
	qmi_wwan: Added support for Gemalto's Cinterion ALASxx WWAN interface
	rtnetlink: fix rtnl_fdb_dump() for ndmsg header
	rtnl: limit IFLA_NUM_TX_QUEUES and IFLA_NUM_RX_QUEUES to 4096
	sctp: update dst pmtu with the correct daddr
	team: Forbid enslaving team device to itself
	tipc: fix flow control accounting for implicit connect
	udp: Unbreak modules that rely on external __skb_recv_udp() availability
	net: stmmac: Fixup the tail addr setting in xmit path
	net/packet: fix packet drop as of virtio gso
	net: dsa: bcm_sf2: Fix unbind ordering
	net/mlx5e: Set vlan masks for all offloaded TC rules
	net: aquantia: memory corruption on jumbo frames
	net/mlx5: E-Switch, Fix out of bound access when setting vport rate
	bonding: pass link-local packets to bonding master also.
	bonding: fix warning message
	nfp: avoid soft lockups under control message storm
	bnxt_en: don't try to offload VLAN 'modify' action
	net-ethtool: ETHTOOL_GUFO did not and should not require CAP_NET_ADMIN
	tcp/dccp: fix lockdep issue when SYN is backlogged
	inet: make sure to grab rcu_read_lock before using ireq->ireq_opt
	ASoC: rt5514: Fix the issue of the delay volume applied again
	ASoC: wm8804: Add ACPI support
	ASoC: sigmadsp: safeload should not have lower byte limit
	selftests/efivarfs: add required kernel configs
	selftests: memory-hotplug: add required configs
	ASoC: rsnd: adg: care clock-frequency size
	ASoC: rsnd: don't fallback to PIO mode when -EPROBE_DEFER
	Bluetooth: hci_ldisc: Free rw_semaphore on close
	mfd: omap-usb-host: Fix dts probe of children
	scsi: iscsi: target: Don't use stack buffer for scatterlist
	scsi: qla2xxx: Fix an endian bug in fcpcmd_is_corrupted()
	sound: enable interrupt after dma buffer initialization
	sound: don't call skl_init_chip() to reset intel skl soc
	hv_netvsc: fix schedule in RCU context
	stmmac: fix valid numbers of unicast filter entries
	net: macb: disable scatter-gather for macb on sama5d3
	ARM: dts: at91: add new compatibility string for macb on sama5d3
	PCI: hv: support reporting serial number as slot information
	clk: x86: add "ether_clk" alias for Bay Trail / Cherry Trail
	clk: x86: Stop marking clocks as CLK_IS_CRITICAL
	x86/kvm/lapic: always disable MMIO interface in x2APIC mode
	drm/amdgpu: Fix SDMA HQD destroy error on gfx_v7
	mm/vmstat.c: fix outdated vmstat_text
	MIPS: VDSO: Always map near top of user memory
	mach64: detect the dot clock divider correctly on sparc
	percpu: stop leaking bitmap metadata blocks
	perf script python: Fix export-to-postgresql.py occasional failure
	perf script python: Fix export-to-sqlite.py sample columns
	s390/cio: Fix how vfio-ccw checks pinned pages
	dm cache: destroy migration_cache if cache target registration failed
	dm: fix report zone remapping to account for partition offset
	dm linear: eliminate linear_end_io call if CONFIG_DM_ZONED disabled
	dm linear: fix linear_end_io conditional definition
	cgroup: Fix dom_cgrp propagation when enabling threaded mode
	mmc: block: avoid multiblock reads for the last sector in SPI mode
	pinctrl: mcp23s08: fix irq and irqchip setup order
	arm64: perf: Reject stand-alone CHAIN events for PMUv3
	mm/thp: fix call to mmu_notifier in set_pmd_migration_entry() v2
	mm: Preserve _PAGE_DEVMAP across mprotect() calls
	i2c: i2c-scmi: fix for i2c_smbus_write_block_data
	xhci: Don't print a warning when setting link state for disabled ports
	mm: introduce NR_INDIRECTLY_RECLAIMABLE_BYTES
	mm: treat indirectly reclaimable memory as available in MemAvailable
	dcache: account external names as indirectly reclaimable memory
	mm: treat indirectly reclaimable memory as free in overcommit logic
	mm: don't show nr_indirectly_reclaimable in /proc/vmstat
	ARM: add more CPU part numbers for Cortex and Brahma B15 CPUs
	ARM: bugs: prepare processor bug infrastructure
	ARM: bugs: hook processor bug checking into SMP and suspend paths
	ARM: bugs: add support for per-processor bug checking
	ARM: spectre: add Kconfig symbol for CPUs vulnerable to Spectre
	ARM: spectre-v2: harden branch predictor on context switches
	ARM: spectre-v2: add Cortex A8 and A15 validation of the IBE bit
	ARM: spectre-v2: harden user aborts in kernel space
	ARM: spectre-v2: add firmware based hardening
	ARM: spectre-v2: warn about incorrect context switching functions
	ARM: KVM: invalidate BTB on guest exit for Cortex-A12/A17
	ARM: KVM: invalidate icache on guest exit for Cortex-A15
	ARM: spectre-v2: KVM: invalidate icache on guest exit for Brahma B15
	ARM: KVM: Add SMCCC_ARCH_WORKAROUND_1 fast handling
	ARM: KVM: report support for SMCCC_ARCH_WORKAROUND_1
	ARM: spectre-v1: add speculation barrier (csdb) macros
	ARM: spectre-v1: add array_index_mask_nospec() implementation
	ARM: spectre-v1: fix syscall entry
	ARM: signal: copy registers using __copy_from_user()
	ARM: vfp: use __copy_from_user() when restoring VFP state
	ARM: oabi-compat: copy semops using __copy_from_user()
	ARM: use __inttype() in get_user()
	ARM: spectre-v1: use get_user() for __get_user()
	ARM: spectre-v1: mitigate user accesses
	perf tools: Fix snprint warnings for gcc 8
	Linux 4.14.77

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
tirimbino
Greg Kroah-Hartman 6 years ago
commit c2214bce1a
  1. 1
      Documentation/devicetree/bindings/net/macb.txt
  2. 2
      Makefile
  3. 2
      arch/arm/boot/dts/sama5d3_emac.dtsi
  4. 12
      arch/arm/include/asm/assembler.h
  5. 32
      arch/arm/include/asm/barrier.h
  6. 6
      arch/arm/include/asm/bugs.h
  7. 3
      arch/arm/include/asm/cp15.h
  8. 8
      arch/arm/include/asm/cputype.h
  9. 2
      arch/arm/include/asm/kvm_asm.h
  10. 14
      arch/arm/include/asm/kvm_host.h
  11. 23
      arch/arm/include/asm/kvm_mmu.h
  12. 4
      arch/arm/include/asm/proc-fns.h
  13. 15
      arch/arm/include/asm/system_misc.h
  14. 4
      arch/arm/include/asm/thread_info.h
  15. 26
      arch/arm/include/asm/uaccess.h
  16. 1
      arch/arm/kernel/Makefile
  17. 18
      arch/arm/kernel/bugs.c
  18. 18
      arch/arm/kernel/entry-common.S
  19. 25
      arch/arm/kernel/entry-header.S
  20. 58
      arch/arm/kernel/signal.c
  21. 4
      arch/arm/kernel/smp.c
  22. 2
      arch/arm/kernel/suspend.c
  23. 8
      arch/arm/kernel/sys_oabi-compat.c
  24. 112
      arch/arm/kvm/hyp/hyp-entry.S
  25. 9
      arch/arm/lib/copy_from_user.S
  26. 23
      arch/arm/mm/Kconfig
  27. 2
      arch/arm/mm/Makefile
  28. 3
      arch/arm/mm/fault.c
  29. 3
      arch/arm/mm/proc-macros.S
  30. 6
      arch/arm/mm/proc-v7-2level.S
  31. 174
      arch/arm/mm/proc-v7-bugs.c
  32. 154
      arch/arm/mm/proc-v7.S
  33. 17
      arch/arm/vfp/vfpmodule.c
  34. 7
      arch/arm64/kernel/perf_event.c
  35. 10
      arch/mips/include/asm/processor.h
  36. 25
      arch/mips/kernel/process.c
  37. 18
      arch/mips/kernel/vdso.c
  38. 4
      arch/powerpc/include/asm/book3s/64/pgtable.h
  39. 2
      arch/x86/include/asm/pgtable_types.h
  40. 1
      arch/x86/include/uapi/asm/kvm.h
  41. 22
      arch/x86/kvm/lapic.c
  42. 2
      drivers/bluetooth/hci_ldisc.c
  43. 18
      drivers/clk/x86/clk-pmc-atom.c
  44. 2
      drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
  45. 1
      drivers/i2c/busses/i2c-scmi.c
  46. 5
      drivers/md/dm-cache-target.c
  47. 2
      drivers/md/dm-flakey.c
  48. 27
      drivers/md/dm.c
  49. 11
      drivers/mfd/omap-usb-host.c
  50. 10
      drivers/mmc/core/block.c
  51. 65
      drivers/net/bonding/bond_main.c
  52. 12
      drivers/net/dsa/bcm_sf2.c
  53. 32
      drivers/net/ethernet/aquantia/atlantic/aq_ring.c
  54. 22
      drivers/net/ethernet/broadcom/bcmsysport.c
  55. 23
      drivers/net/ethernet/broadcom/bnxt/bnxt.c
  56. 20
      drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
  57. 8
      drivers/net/ethernet/cadence/macb_main.c
  58. 2
      drivers/net/ethernet/hisilicon/hns/hnae.c
  59. 30
      drivers/net/ethernet/hisilicon/hns/hns_enet.c
  60. 20
      drivers/net/ethernet/marvell/mvpp2.c
  61. 3
      drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
  62. 4
      drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
  63. 17
      drivers/net/ethernet/netronome/nfp/nfp_net_common.c
  64. 8
      drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
  65. 3
      drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
  66. 3
      drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
  67. 3
      drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
  68. 12
      drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
  69. 8
      drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
  70. 5
      drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
  71. 9
      drivers/net/hyperv/netvsc_drv.c
  72. 5
      drivers/net/team/team.c
  73. 1
      drivers/net/usb/qmi_wwan.c
  74. 1
      drivers/net/usb/smsc75xx.c
  75. 37
      drivers/pci/host/pci-hyperv.c
  76. 8
      drivers/perf/arm_pmu.c
  77. 13
      drivers/pinctrl/pinctrl-mcp23s08.c
  78. 2
      drivers/s390/cio/vfio_ccw_cp.c
  79. 4
      drivers/scsi/qla2xxx/qla_target.h
  80. 22
      drivers/target/iscsi/iscsi_target.c
  81. 18
      drivers/usb/host/xhci-hub.c
  82. 3
      drivers/video/fbdev/aty/atyfb.h
  83. 7
      drivers/video/fbdev/aty/atyfb_base.c
  84. 10
      drivers/video/fbdev/aty/mach64_ct.c
  85. 38
      fs/dcache.c
  86. 1
      include/linux/cgroup-defs.h
  87. 1
      include/linux/mmzone.h
  88. 7
      include/linux/netdevice.h
  89. 1
      include/linux/perf/arm_pmu.h
  90. 18
      include/linux/virtio_net.h
  91. 7
      include/net/bonding.h
  92. 6
      include/net/inet_sock.h
  93. 1
      include/net/ip_fib.h
  94. 1
      include/sound/hdaudio.h
  95. 25
      kernel/cgroup/cgroup.c
  96. 6
      mm/huge_memory.c
  97. 7
      mm/page_alloc.c
  98. 1
      mm/percpu.c
  99. 7
      mm/util.c
  100. 6
      mm/vmstat.c
  101. Some files were not shown because too many files have changed in this diff Show More

@ -10,6 +10,7 @@ Required properties:
Use "cdns,pc302-gem" for Picochip picoXcell pc302 and later devices based on
the Cadence GEM, or the generic form: "cdns,gem".
Use "atmel,sama5d2-gem" for the GEM IP (10/100) available on Atmel sama5d2 SoCs.
Use "atmel,sama5d3-macb" for the 10/100Mbit IP available on Atmel sama5d3 SoCs.
Use "atmel,sama5d3-gem" for the Gigabit IP available on Atmel sama5d3 SoCs.
Use "atmel,sama5d4-gem" for the GEM IP (10/100) available on Atmel sama5d4 SoCs.
Use "cdns,zynq-gem" Xilinx Zynq-7xxx SoC.

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 14
SUBLEVEL = 76
SUBLEVEL = 77
EXTRAVERSION =
NAME = Petit Gorille

@ -41,7 +41,7 @@
};
macb1: ethernet@f802c000 {
compatible = "cdns,at91sam9260-macb", "cdns,macb";
compatible = "atmel,sama5d3-macb", "cdns,at91sam9260-macb", "cdns,macb";
reg = <0xf802c000 0x100>;
interrupts = <35 IRQ_TYPE_LEVEL_HIGH 3>;
pinctrl-names = "default";

@ -447,11 +447,23 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
.size \name , . - \name
.endm
.macro csdb
#ifdef CONFIG_THUMB2_KERNEL
.inst.w 0xf3af8014
#else
.inst 0xe320f014
#endif
.endm
.macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
#ifndef CONFIG_CPU_USE_DOMAINS
adds \tmp, \addr, #\size - 1
sbcccs \tmp, \tmp, \limit
bcs \bad
#ifdef CONFIG_CPU_SPECTRE
movcs \addr, #0
csdb
#endif
#endif
.endm

@ -17,6 +17,12 @@
#define isb(option) __asm__ __volatile__ ("isb " #option : : : "memory")
#define dsb(option) __asm__ __volatile__ ("dsb " #option : : : "memory")
#define dmb(option) __asm__ __volatile__ ("dmb " #option : : : "memory")
#ifdef CONFIG_THUMB2_KERNEL
#define CSDB ".inst.w 0xf3af8014"
#else
#define CSDB ".inst 0xe320f014"
#endif
#define csdb() __asm__ __volatile__(CSDB : : : "memory")
#elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6
#define isb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
: : "r" (0) : "memory")
@ -37,6 +43,13 @@
#define dmb(x) __asm__ __volatile__ ("" : : : "memory")
#endif
#ifndef CSDB
#define CSDB
#endif
#ifndef csdb
#define csdb()
#endif
#ifdef CONFIG_ARM_HEAVY_MB
extern void (*soc_mb)(void);
extern void arm_heavy_mb(void);
@ -63,6 +76,25 @@ extern void arm_heavy_mb(void);
#define __smp_rmb() __smp_mb()
#define __smp_wmb() dmb(ishst)
#ifdef CONFIG_CPU_SPECTRE
static inline unsigned long array_index_mask_nospec(unsigned long idx,
unsigned long sz)
{
unsigned long mask;
asm volatile(
"cmp %1, %2\n"
" sbc %0, %1, %1\n"
CSDB
: "=r" (mask)
: "r" (idx), "Ir" (sz)
: "cc");
return mask;
}
#define array_index_mask_nospec array_index_mask_nospec
#endif
#include <asm-generic/barrier.h>
#endif /* !__ASSEMBLY__ */

@ -10,12 +10,14 @@
#ifndef __ASM_BUGS_H
#define __ASM_BUGS_H
#ifdef CONFIG_MMU
extern void check_writebuffer_bugs(void);
#define check_bugs() check_writebuffer_bugs()
#ifdef CONFIG_MMU
extern void check_bugs(void);
extern void check_other_bugs(void);
#else
#define check_bugs() do { } while (0)
#define check_other_bugs() do { } while (0)
#endif
#endif

@ -65,6 +65,9 @@
#define __write_sysreg(v, r, w, c, t) asm volatile(w " " c : : "r" ((t)(v)))
#define write_sysreg(v, ...) __write_sysreg(v, __VA_ARGS__)
#define BPIALL __ACCESS_CP15(c7, 0, c5, 6)
#define ICIALLU __ACCESS_CP15(c7, 0, c5, 0)
extern unsigned long cr_alignment; /* defined in entry-armv.S */
static inline unsigned long get_cr(void)

@ -77,8 +77,16 @@
#define ARM_CPU_PART_CORTEX_A12 0x4100c0d0
#define ARM_CPU_PART_CORTEX_A17 0x4100c0e0
#define ARM_CPU_PART_CORTEX_A15 0x4100c0f0
#define ARM_CPU_PART_CORTEX_A53 0x4100d030
#define ARM_CPU_PART_CORTEX_A57 0x4100d070
#define ARM_CPU_PART_CORTEX_A72 0x4100d080
#define ARM_CPU_PART_CORTEX_A73 0x4100d090
#define ARM_CPU_PART_CORTEX_A75 0x4100d0a0
#define ARM_CPU_PART_MASK 0xff00fff0
/* Broadcom cores */
#define ARM_CPU_PART_BRAHMA_B15 0x420000f0
/* DEC implemented cores */
#define ARM_CPU_PART_SA1100 0x4400a110

@ -61,8 +61,6 @@ struct kvm_vcpu;
extern char __kvm_hyp_init[];
extern char __kvm_hyp_init_end[];
extern char __kvm_hyp_vector[];
extern void __kvm_flush_vm_context(void);
extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
extern void __kvm_tlb_flush_vmid(struct kvm *kvm);

@ -21,6 +21,7 @@
#include <linux/types.h>
#include <linux/kvm_types.h>
#include <asm/cputype.h>
#include <asm/kvm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmio.h>
@ -298,8 +299,17 @@ int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
static inline bool kvm_arm_harden_branch_predictor(void)
{
/* No way to detect it yet, pretend it is not there. */
return false;
switch(read_cpuid_part()) {
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
case ARM_CPU_PART_BRAHMA_B15:
case ARM_CPU_PART_CORTEX_A12:
case ARM_CPU_PART_CORTEX_A15:
case ARM_CPU_PART_CORTEX_A17:
return true;
#endif
default:
return false;
}
}
#define KVM_SSBD_UNKNOWN -1

@ -246,7 +246,28 @@ static inline int kvm_read_guest_lock(struct kvm *kvm,
static inline void *kvm_get_hyp_vector(void)
{
return kvm_ksym_ref(__kvm_hyp_vector);
switch(read_cpuid_part()) {
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
case ARM_CPU_PART_CORTEX_A12:
case ARM_CPU_PART_CORTEX_A17:
{
extern char __kvm_hyp_vector_bp_inv[];
return kvm_ksym_ref(__kvm_hyp_vector_bp_inv);
}
case ARM_CPU_PART_BRAHMA_B15:
case ARM_CPU_PART_CORTEX_A15:
{
extern char __kvm_hyp_vector_ic_inv[];
return kvm_ksym_ref(__kvm_hyp_vector_ic_inv);
}
#endif
default:
{
extern char __kvm_hyp_vector[];
return kvm_ksym_ref(__kvm_hyp_vector);
}
}
}
static inline int kvm_map_vectors(void)

@ -36,6 +36,10 @@ extern struct processor {
* Set up any processor specifics
*/
void (*_proc_init)(void);
/*
* Check for processor bugs
*/
void (*check_bugs)(void);
/*
* Disable any processor specifics
*/

@ -8,6 +8,7 @@
#include <linux/linkage.h>
#include <linux/irqflags.h>
#include <linux/reboot.h>
#include <linux/percpu.h>
extern void cpu_init(void);
@ -15,6 +16,20 @@ void soft_restart(unsigned long);
extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
extern void (*arm_pm_idle)(void);
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
typedef void (*harden_branch_predictor_fn_t)(void);
DECLARE_PER_CPU(harden_branch_predictor_fn_t, harden_branch_predictor_fn);
static inline void harden_branch_predictor(void)
{
harden_branch_predictor_fn_t fn = per_cpu(harden_branch_predictor_fn,
smp_processor_id());
if (fn)
fn();
}
#else
#define harden_branch_predictor() do { } while (0)
#endif
#define UDBG_UNDEFINED (1 << 0)
#define UDBG_SYSCALL (1 << 1)
#define UDBG_BADABORT (1 << 2)

@ -126,8 +126,8 @@ struct user_vfp_exc;
extern int vfp_preserve_user_clear_hwstate(struct user_vfp __user *,
struct user_vfp_exc __user *);
extern int vfp_restore_user_hwstate(struct user_vfp __user *,
struct user_vfp_exc __user *);
extern int vfp_restore_user_hwstate(struct user_vfp *,
struct user_vfp_exc *);
#endif
/*

@ -84,6 +84,13 @@ static inline void set_fs(mm_segment_t fs)
: "cc"); \
flag; })
/*
* This is a type: either unsigned long, if the argument fits into
* that type, or otherwise unsigned long long.
*/
#define __inttype(x) \
__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
/*
* Single-value transfer routines. They automatically use the right
* size if we just have the right pointer type. Note that the functions
@ -153,7 +160,7 @@ extern int __get_user_64t_4(void *);
({ \
unsigned long __limit = current_thread_info()->addr_limit - 1; \
register const typeof(*(p)) __user *__p asm("r0") = (p);\
register typeof(x) __r2 asm("r2"); \
register __inttype(x) __r2 asm("r2"); \
register unsigned long __l asm("r1") = __limit; \
register int __e asm("r0"); \
unsigned int __ua_flags = uaccess_save_and_enable(); \
@ -243,6 +250,16 @@ static inline void set_fs(mm_segment_t fs)
#define user_addr_max() \
(uaccess_kernel() ? ~0UL : get_fs())
#ifdef CONFIG_CPU_SPECTRE
/*
* When mitigating Spectre variant 1, it is not worth fixing the non-
* verifying accessors, because we need to add verification of the
* address space there. Force these to use the standard get_user()
* version instead.
*/
#define __get_user(x, ptr) get_user(x, ptr)
#else
/*
* The "__xxx" versions of the user access functions do not verify the
* address space - it must have been done previously with a separate
@ -259,12 +276,6 @@ static inline void set_fs(mm_segment_t fs)
__gu_err; \
})
#define __get_user_error(x, ptr, err) \
({ \
__get_user_err((x), (ptr), err); \
(void) 0; \
})
#define __get_user_err(x, ptr, err) \
do { \
unsigned long __gu_addr = (unsigned long)(ptr); \
@ -324,6 +335,7 @@ do { \
#define __get_user_asm_word(x, addr, err) \
__get_user_asm(x, addr, err, ldr)
#endif
#define __put_user_switch(x, ptr, __err, __fn) \

@ -31,6 +31,7 @@ else
obj-y += entry-armv.o
endif
obj-$(CONFIG_MMU) += bugs.o
obj-$(CONFIG_CPU_IDLE) += cpuidle.o
obj-$(CONFIG_ISA_DMA_API) += dma.o
obj-$(CONFIG_FIQ) += fiq.o fiqasm.o

@ -0,0 +1,18 @@
// SPDX-Identifier: GPL-2.0
#include <linux/init.h>
#include <asm/bugs.h>
#include <asm/proc-fns.h>
void check_other_bugs(void)
{
#ifdef MULTI_CPU
if (processor.check_bugs)
processor.check_bugs();
#endif
}
void __init check_bugs(void)
{
check_writebuffer_bugs();
check_other_bugs();
}

@ -241,9 +241,7 @@ local_restart:
tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls?
bne __sys_trace
cmp scno, #NR_syscalls @ check upper syscall limit
badr lr, ret_fast_syscall @ return address
ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
invoke_syscall tbl, scno, r10, ret_fast_syscall
add r1, sp, #S_OFF
2: cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
@ -277,14 +275,8 @@ __sys_trace:
mov r1, scno
add r0, sp, #S_OFF
bl syscall_trace_enter
badr lr, __sys_trace_return @ return address
mov scno, r0 @ syscall number (possibly new)
add r1, sp, #S_R0 + S_OFF @ pointer to regs
cmp scno, #NR_syscalls @ check upper syscall limit
ldmccia r1, {r0 - r6} @ have to reload r0 - r6
stmccia sp, {r4, r5} @ and update the stack args
ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
mov scno, r0
invoke_syscall tbl, scno, r10, __sys_trace_return, reload=1
cmp scno, #-1 @ skip the syscall?
bne 2b
add sp, sp, #S_OFF @ restore stack
@ -362,6 +354,10 @@ sys_syscall:
bic scno, r0, #__NR_OABI_SYSCALL_BASE
cmp scno, #__NR_syscall - __NR_SYSCALL_BASE
cmpne scno, #NR_syscalls @ check range
#ifdef CONFIG_CPU_SPECTRE
movhs scno, #0
csdb
#endif
stmloia sp, {r5, r6} @ shuffle args
movlo r0, r1
movlo r1, r2

@ -378,6 +378,31 @@
#endif
.endm
.macro invoke_syscall, table, nr, tmp, ret, reload=0
#ifdef CONFIG_CPU_SPECTRE
mov \tmp, \nr
cmp \tmp, #NR_syscalls @ check upper syscall limit
movcs \tmp, #0
csdb
badr lr, \ret @ return address
.if \reload
add r1, sp, #S_R0 + S_OFF @ pointer to regs
ldmccia r1, {r0 - r6} @ reload r0-r6
stmccia sp, {r4, r5} @ update stack arguments
.endif
ldrcc pc, [\table, \tmp, lsl #2] @ call sys_* routine
#else
cmp \nr, #NR_syscalls @ check upper syscall limit
badr lr, \ret @ return address
.if \reload
add r1, sp, #S_R0 + S_OFF @ pointer to regs
ldmccia r1, {r0 - r6} @ reload r0-r6
stmccia sp, {r4, r5} @ update stack arguments
.endif
ldrcc pc, [\table, \nr, lsl #2] @ call sys_* routine
#endif
.endm
/*
* These are the registers used in the syscall handler, and allow us to
* have in theory up to 7 arguments to a function - r0 to r6.

@ -149,22 +149,18 @@ static int preserve_vfp_context(struct vfp_sigframe __user *frame)
static int restore_vfp_context(char __user **auxp)
{
struct vfp_sigframe __user *frame =
(struct vfp_sigframe __user *)*auxp;
unsigned long magic;
unsigned long size;
int err = 0;
__get_user_error(magic, &frame->magic, err);
__get_user_error(size, &frame->size, err);
struct vfp_sigframe frame;
int err;
err = __copy_from_user(&frame, *auxp, sizeof(frame));
if (err)
return -EFAULT;
if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE)
return err;
if (frame.magic != VFP_MAGIC || frame.size != VFP_STORAGE_SIZE)
return -EINVAL;
*auxp += size;
return vfp_restore_user_hwstate(&frame->ufp, &frame->ufp_exc);
*auxp += sizeof(frame);
return vfp_restore_user_hwstate(&frame.ufp, &frame.ufp_exc);
}
#endif
@ -184,6 +180,7 @@ struct rt_sigframe {
static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
{
struct sigcontext context;
char __user *aux;
sigset_t set;
int err;
@ -192,23 +189,26 @@ static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
if (err == 0)
set_current_blocked(&set);
__get_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err);
__get_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err);
__get_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err);
__get_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err);
__get_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err);
__get_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err);
__get_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err);
__get_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err);
__get_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err);
__get_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err);
__get_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err);
__get_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err);
__get_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err);
__get_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err);
__get_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err);
__get_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err);
__get_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err);
err |= __copy_from_user(&context, &sf->uc.uc_mcontext, sizeof(context));
if (err == 0) {
regs->ARM_r0 = context.arm_r0;
regs->ARM_r1 = context.arm_r1;
regs->ARM_r2 = context.arm_r2;
regs->ARM_r3 = context.arm_r3;
regs->ARM_r4 = context.arm_r4;
regs->ARM_r5 = context.arm_r5;
regs->ARM_r6 = context.arm_r6;
regs->ARM_r7 = context.arm_r7;
regs->ARM_r8 = context.arm_r8;
regs->ARM_r9 = context.arm_r9;
regs->ARM_r10 = context.arm_r10;
regs->ARM_fp = context.arm_fp;
regs->ARM_ip = context.arm_ip;
regs->ARM_sp = context.arm_sp;
regs->ARM_lr = context.arm_lr;
regs->ARM_pc = context.arm_pc;
regs->ARM_cpsr = context.arm_cpsr;
}
err |= !valid_user_regs(regs);

@ -31,6 +31,7 @@
#include <linux/irq_work.h>
#include <linux/atomic.h>
#include <asm/bugs.h>
#include <asm/smp.h>
#include <asm/cacheflush.h>
#include <asm/cpu.h>
@ -402,6 +403,9 @@ asmlinkage void secondary_start_kernel(void)
* before we continue - which happens after __cpu_up returns.
*/
set_cpu_online(cpu, true);
check_other_bugs();
complete(&cpu_running);
local_irq_enable();

@ -3,6 +3,7 @@
#include <linux/slab.h>
#include <linux/mm_types.h>
#include <asm/bugs.h>
#include <asm/cacheflush.h>
#include <asm/idmap.h>
#include <asm/pgalloc.h>
@ -36,6 +37,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
cpu_switch_mm(mm->pgd, mm);
local_flush_bp_all();
local_flush_tlb_all();
check_other_bugs();
}
return ret;

@ -329,9 +329,11 @@ asmlinkage long sys_oabi_semtimedop(int semid,
return -ENOMEM;
err = 0;
for (i = 0; i < nsops; i++) {
__get_user_error(sops[i].sem_num, &tsops->sem_num, err);
__get_user_error(sops[i].sem_op, &tsops->sem_op, err);
__get_user_error(sops[i].sem_flg, &tsops->sem_flg, err);
struct oabi_sembuf osb;
err |= __copy_from_user(&osb, tsops, sizeof(osb));
sops[i].sem_num = osb.sem_num;
sops[i].sem_op = osb.sem_op;
sops[i].sem_flg = osb.sem_flg;
tsops++;
}
if (timeout) {

@ -16,6 +16,7 @@
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <linux/arm-smccc.h>
#include <linux/linkage.h>
#include <asm/kvm_arm.h>
#include <asm/kvm_asm.h>
@ -71,6 +72,90 @@ __kvm_hyp_vector:
W(b) hyp_irq
W(b) hyp_fiq
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
.align 5
__kvm_hyp_vector_ic_inv:
.global __kvm_hyp_vector_ic_inv
/*
* We encode the exception entry in the bottom 3 bits of
* SP, and we have to guarantee to be 8 bytes aligned.
*/
W(add) sp, sp, #1 /* Reset 7 */
W(add) sp, sp, #1 /* Undef 6 */
W(add) sp, sp, #1 /* Syscall 5 */
W(add) sp, sp, #1 /* Prefetch abort 4 */
W(add) sp, sp, #1 /* Data abort 3 */
W(add) sp, sp, #1 /* HVC 2 */
W(add) sp, sp, #1 /* IRQ 1 */
W(nop) /* FIQ 0 */
mcr p15, 0, r0, c7, c5, 0 /* ICIALLU */
isb
b decode_vectors
.align 5
__kvm_hyp_vector_bp_inv:
.global __kvm_hyp_vector_bp_inv
/*
* We encode the exception entry in the bottom 3 bits of
* SP, and we have to guarantee to be 8 bytes aligned.
*/
W(add) sp, sp, #1 /* Reset 7 */
W(add) sp, sp, #1 /* Undef 6 */
W(add) sp, sp, #1 /* Syscall 5 */
W(add) sp, sp, #1 /* Prefetch abort 4 */
W(add) sp, sp, #1 /* Data abort 3 */
W(add) sp, sp, #1 /* HVC 2 */
W(add) sp, sp, #1 /* IRQ 1 */
W(nop) /* FIQ 0 */
mcr p15, 0, r0, c7, c5, 6 /* BPIALL */
isb
decode_vectors:
#ifdef CONFIG_THUMB2_KERNEL
/*
* Yet another silly hack: Use VPIDR as a temp register.
* Thumb2 is really a pain, as SP cannot be used with most
* of the bitwise instructions. The vect_br macro ensures
* things gets cleaned-up.
*/
mcr p15, 4, r0, c0, c0, 0 /* VPIDR */
mov r0, sp
and r0, r0, #7
sub sp, sp, r0
push {r1, r2}
mov r1, r0
mrc p15, 4, r0, c0, c0, 0 /* VPIDR */
mrc p15, 0, r2, c0, c0, 0 /* MIDR */
mcr p15, 4, r2, c0, c0, 0 /* VPIDR */
#endif
.macro vect_br val, targ
ARM( eor sp, sp, #\val )
ARM( tst sp, #7 )
ARM( eorne sp, sp, #\val )
THUMB( cmp r1, #\val )
THUMB( popeq {r1, r2} )
beq \targ
.endm
vect_br 0, hyp_fiq
vect_br 1, hyp_irq
vect_br 2, hyp_hvc
vect_br 3, hyp_dabt
vect_br 4, hyp_pabt
vect_br 5, hyp_svc
vect_br 6, hyp_undef
vect_br 7, hyp_reset
#endif
.macro invalid_vector label, cause
.align
\label: mov r0, #\cause
@ -118,7 +203,7 @@ hyp_hvc:
lsr r2, r2, #16
and r2, r2, #0xff
cmp r2, #0
bne guest_trap @ Guest called HVC
bne guest_hvc_trap @ Guest called HVC
/*
* Getting here means host called HVC, we shift parameters and branch
@ -149,7 +234,14 @@ hyp_hvc:
bx ip
1:
push {lr}
/*
* Pushing r2 here is just a way of keeping the stack aligned to
* 8 bytes on any path that can trigger a HYP exception. Here,
* we may well be about to jump into the guest, and the guest
* exit would otherwise be badly decoded by our fancy
* "decode-exception-without-a-branch" code...
*/
push {r2, lr}
mov lr, r0
mov r0, r1
@ -159,7 +251,21 @@ hyp_hvc:
THUMB( orr lr, #1)
blx lr @ Call the HYP function
pop {lr}
pop {r2, lr}
eret
guest_hvc_trap:
movw r2, #:lower16:ARM_SMCCC_ARCH_WORKAROUND_1
movt r2, #:upper16:ARM_SMCCC_ARCH_WORKAROUND_1
ldr r0, [sp] @ Guest's r0
teq r0, r2
bne guest_trap
add sp, sp, #12
@ Returns:
@ r0 = 0
@ r1 = HSR value (perfectly predictable)
@ r2 = ARM_SMCCC_ARCH_WORKAROUND_1
mov r0, #0
eret
guest_trap:

@ -90,6 +90,15 @@
.text
ENTRY(arm_copy_from_user)
#ifdef CONFIG_CPU_SPECTRE
get_thread_info r3
ldr r3, [r3, #TI_ADDR_LIMIT]
adds ip, r1, r2 @ ip=addr+size
sub r3, r3, #1 @ addr_limit - 1
cmpcc ip, r3 @ if (addr+size > addr_limit - 1)
movcs r1, #0 @ addr = NULL
csdb
#endif
#include "copy_template.S"

@ -415,6 +415,7 @@ config CPU_V7
select CPU_CP15_MPU if !MMU
select CPU_HAS_ASID if MMU
select CPU_PABRT_V7
select CPU_SPECTRE if MMU
select CPU_THUMB_CAPABLE
select CPU_TLB_V7 if MMU
@ -826,6 +827,28 @@ config CPU_BPREDICT_DISABLE
help
Say Y here to disable branch prediction. If unsure, say N.
config CPU_SPECTRE
bool
config HARDEN_BRANCH_PREDICTOR
bool "Harden the branch predictor against aliasing attacks" if EXPERT
depends on CPU_SPECTRE
default y
help
Speculation attacks against some high-performance processors rely
on being able to manipulate the branch predictor for a victim
context by executing aliasing branches in the attacker context.
Such attacks can be partially mitigated against by clearing
internal branch predictor state and limiting the prediction
logic in some situations.
This config option will take CPU-specific actions to harden
the branch predictor against aliasing attacks and may rely on
specific instruction sequences or control bits being set by
the system firmware.
If unsure, say Y.
config TLS_REG_EMUL
bool
select NEED_KUSER_HELPERS

@ -95,7 +95,7 @@ obj-$(CONFIG_CPU_MOHAWK) += proc-mohawk.o
obj-$(CONFIG_CPU_FEROCEON) += proc-feroceon.o
obj-$(CONFIG_CPU_V6) += proc-v6.o
obj-$(CONFIG_CPU_V6K) += proc-v6.o
obj-$(CONFIG_CPU_V7) += proc-v7.o
obj-$(CONFIG_CPU_V7) += proc-v7.o proc-v7-bugs.o
obj-$(CONFIG_CPU_V7M) += proc-v7m.o
AFLAGS_proc-v6.o :=-Wa,-march=armv6

@ -164,6 +164,9 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
{
struct siginfo si;
if (addr > TASK_SIZE)
harden_branch_predictor();
#ifdef CONFIG_DEBUG_USER
if (((user_debug & UDBG_SEGV) && (sig == SIGSEGV)) ||
((user_debug & UDBG_BUS) && (sig == SIGBUS))) {

@ -273,13 +273,14 @@
mcr p15, 0, ip, c7, c10, 4 @ data write barrier
.endm
.macro define_processor_functions name:req, dabort:req, pabort:req, nommu=0, suspend=0
.macro define_processor_functions name:req, dabort:req, pabort:req, nommu=0, suspend=0, bugs=0
.type \name\()_processor_functions, #object
.align 2
ENTRY(\name\()_processor_functions)
.word \dabort
.word \pabort
.word cpu_\name\()_proc_init
.word \bugs
.word cpu_\name\()_proc_fin
.word cpu_\name\()_reset
.word cpu_\name\()_do_idle

@ -41,11 +41,6 @@
* even on Cortex-A8 revisions not affected by 430973.
* If IBE is not set, the flush BTAC/BTB won't do anything.
*/
ENTRY(cpu_ca8_switch_mm)
#ifdef CONFIG_MMU
mov r2, #0
mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
#endif
ENTRY(cpu_v7_switch_mm)
#ifdef CONFIG_MMU
mmid r1, r1 @ get mm->context.id
@ -66,7 +61,6 @@ ENTRY(cpu_v7_switch_mm)
#endif
bx lr
ENDPROC(cpu_v7_switch_mm)
ENDPROC(cpu_ca8_switch_mm)
/*
* cpu_v7_set_pte_ext(ptep, pte)

@ -0,0 +1,174 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/arm-smccc.h>
#include <linux/kernel.h>
#include <linux/psci.h>
#include <linux/smp.h>
#include <asm/cp15.h>
#include <asm/cputype.h>
#include <asm/proc-fns.h>
#include <asm/system_misc.h>
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
DEFINE_PER_CPU(harden_branch_predictor_fn_t, harden_branch_predictor_fn);
extern void cpu_v7_iciallu_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
extern void cpu_v7_bpiall_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
extern void cpu_v7_smc_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
extern void cpu_v7_hvc_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
static void harden_branch_predictor_bpiall(void)
{
write_sysreg(0, BPIALL);
}
static void harden_branch_predictor_iciallu(void)
{
write_sysreg(0, ICIALLU);
}
static void __maybe_unused call_smc_arch_workaround_1(void)
{
arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
}
static void __maybe_unused call_hvc_arch_workaround_1(void)
{
arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
}
static void cpu_v7_spectre_init(void)
{
const char *spectre_v2_method = NULL;
int cpu = smp_processor_id();
if (per_cpu(harden_branch_predictor_fn, cpu))
return;
switch (read_cpuid_part()) {
case ARM_CPU_PART_CORTEX_A8:
case ARM_CPU_PART_CORTEX_A9:
case ARM_CPU_PART_CORTEX_A12:
case ARM_CPU_PART_CORTEX_A17:
case ARM_CPU_PART_CORTEX_A73:
case ARM_CPU_PART_CORTEX_A75:
if (processor.switch_mm != cpu_v7_bpiall_switch_mm)
goto bl_error;
per_cpu(harden_branch_predictor_fn, cpu) =
harden_branch_predictor_bpiall;
spectre_v2_method = "BPIALL";
break;
case ARM_CPU_PART_CORTEX_A15:
case ARM_CPU_PART_BRAHMA_B15:
if (processor.switch_mm != cpu_v7_iciallu_switch_mm)
goto bl_error;
per_cpu(harden_branch_predictor_fn, cpu) =
harden_branch_predictor_iciallu;
spectre_v2_method = "ICIALLU";
break;
#ifdef CONFIG_ARM_PSCI
default:
/* Other ARM CPUs require no workaround */
if (read_cpuid_implementor() == ARM_CPU_IMP_ARM)
break;
/* fallthrough */
/* Cortex A57/A72 require firmware workaround */
case ARM_CPU_PART_CORTEX_A57:
case ARM_CPU_PART_CORTEX_A72: {
struct arm_smccc_res res;
if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
break;
switch (psci_ops.conduit) {
case PSCI_CONDUIT_HVC:
arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
ARM_SMCCC_ARCH_WORKAROUND_1, &res);
if ((int)res.a0 != 0)
break;
if (processor.switch_mm != cpu_v7_hvc_switch_mm && cpu)
goto bl_error;
per_cpu(harden_branch_predictor_fn, cpu) =
call_hvc_arch_workaround_1;
processor.switch_mm = cpu_v7_hvc_switch_mm;
spectre_v2_method = "hypervisor";
break;
case PSCI_CONDUIT_SMC:
arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
ARM_SMCCC_ARCH_WORKAROUND_1, &res);
if ((int)res.a0 != 0)
break;
if (processor.switch_mm != cpu_v7_smc_switch_mm && cpu)
goto bl_error;
per_cpu(harden_branch_predictor_fn, cpu) =
call_smc_arch_workaround_1;
processor.switch_mm = cpu_v7_smc_switch_mm;
spectre_v2_method = "firmware";
break;
default:
break;
}
}
#endif
}
if (spectre_v2_method)
pr_info("CPU%u: Spectre v2: using %s workaround\n",
smp_processor_id(), spectre_v2_method);
return;
bl_error:
pr_err("CPU%u: Spectre v2: incorrect context switching function, system vulnerable\n",
cpu);
}
#else
static void cpu_v7_spectre_init(void)
{
}
#endif
static __maybe_unused bool cpu_v7_check_auxcr_set(bool *warned,
u32 mask, const char *msg)
{
u32 aux_cr;
asm("mrc p15, 0, %0, c1, c0, 1" : "=r" (aux_cr));
if ((aux_cr & mask) != mask) {
if (!*warned)
pr_err("CPU%u: %s", smp_processor_id(), msg);
*warned = true;
return false;
}
return true;
}
static DEFINE_PER_CPU(bool, spectre_warned);
static bool check_spectre_auxcr(bool *warned, u32 bit)
{
return IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR) &&
cpu_v7_check_auxcr_set(warned, bit,
"Spectre v2: firmware did not set auxiliary control register IBE bit, system vulnerable\n");
}
void cpu_v7_ca8_ibe(void)
{
if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(6)))
cpu_v7_spectre_init();
}
void cpu_v7_ca15_ibe(void)
{
if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(0)))
cpu_v7_spectre_init();
}
void cpu_v7_bugs_init(void)
{
cpu_v7_spectre_init();
}

@ -9,6 +9,7 @@
*
* This is the "shell" of the ARMv7 processor support.
*/
#include <linux/arm-smccc.h>
#include <linux/init.h>
#include <linux/linkage.h>
#include <asm/assembler.h>
@ -93,6 +94,37 @@ ENTRY(cpu_v7_dcache_clean_area)
ret lr
ENDPROC(cpu_v7_dcache_clean_area)
#ifdef CONFIG_ARM_PSCI
.arch_extension sec
ENTRY(cpu_v7_smc_switch_mm)
stmfd sp!, {r0 - r3}
movw r0, #:lower16:ARM_SMCCC_ARCH_WORKAROUND_1
movt r0, #:upper16:ARM_SMCCC_ARCH_WORKAROUND_1
smc #0
ldmfd sp!, {r0 - r3}
b cpu_v7_switch_mm
ENDPROC(cpu_v7_smc_switch_mm)
.arch_extension virt
ENTRY(cpu_v7_hvc_switch_mm)
stmfd sp!, {r0 - r3}
movw r0, #:lower16:ARM_SMCCC_ARCH_WORKAROUND_1
movt r0, #:upper16:ARM_SMCCC_ARCH_WORKAROUND_1
hvc #0
ldmfd sp!, {r0 - r3}
b cpu_v7_switch_mm
ENDPROC(cpu_v7_smc_switch_mm)
#endif
ENTRY(cpu_v7_iciallu_switch_mm)
mov r3, #0
mcr p15, 0, r3, c7, c5, 0 @ ICIALLU
b cpu_v7_switch_mm
ENDPROC(cpu_v7_iciallu_switch_mm)
ENTRY(cpu_v7_bpiall_switch_mm)
mov r3, #0
mcr p15, 0, r3, c7, c5, 6 @ flush BTAC/BTB
b cpu_v7_switch_mm
ENDPROC(cpu_v7_bpiall_switch_mm)
string cpu_v7_name, "ARMv7 Processor"
.align
@ -158,31 +190,6 @@ ENTRY(cpu_v7_do_resume)
ENDPROC(cpu_v7_do_resume)
#endif
/*
* Cortex-A8
*/
globl_equ cpu_ca8_proc_init, cpu_v7_proc_init
globl_equ cpu_ca8_proc_fin, cpu_v7_proc_fin
globl_equ cpu_ca8_reset, cpu_v7_reset
globl_equ cpu_ca8_do_idle, cpu_v7_do_idle
globl_equ cpu_ca8_dcache_clean_area, cpu_v7_dcache_clean_area
globl_equ cpu_ca8_set_pte_ext, cpu_v7_set_pte_ext
globl_equ cpu_ca8_suspend_size, cpu_v7_suspend_size
#ifdef CONFIG_ARM_CPU_SUSPEND
globl_equ cpu_ca8_do_suspend, cpu_v7_do_suspend
globl_equ cpu_ca8_do_resume, cpu_v7_do_resume
#endif
/*
* Cortex-A9 processor functions
*/
globl_equ cpu_ca9mp_proc_init, cpu_v7_proc_init
globl_equ cpu_ca9mp_proc_fin, cpu_v7_proc_fin
globl_equ cpu_ca9mp_reset, cpu_v7_reset
globl_equ cpu_ca9mp_do_idle, cpu_v7_do_idle
globl_equ cpu_ca9mp_dcache_clean_area, cpu_v7_dcache_clean_area
globl_equ cpu_ca9mp_switch_mm, cpu_v7_switch_mm
globl_equ cpu_ca9mp_set_pte_ext, cpu_v7_set_pte_ext
.globl cpu_ca9mp_suspend_size
.equ cpu_ca9mp_suspend_size, cpu_v7_suspend_size + 4 * 2
#ifdef CONFIG_ARM_CPU_SUSPEND
@ -546,12 +553,79 @@ __v7_setup_stack:
__INITDATA
.weak cpu_v7_bugs_init
@ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
define_processor_functions v7, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
define_processor_functions v7, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_bugs_init
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
@ generic v7 bpiall on context switch
globl_equ cpu_v7_bpiall_proc_init, cpu_v7_proc_init
globl_equ cpu_v7_bpiall_proc_fin, cpu_v7_proc_fin
globl_equ cpu_v7_bpiall_reset, cpu_v7_reset
globl_equ cpu_v7_bpiall_do_idle, cpu_v7_do_idle
globl_equ cpu_v7_bpiall_dcache_clean_area, cpu_v7_dcache_clean_area
globl_equ cpu_v7_bpiall_set_pte_ext, cpu_v7_set_pte_ext
globl_equ cpu_v7_bpiall_suspend_size, cpu_v7_suspend_size
#ifdef CONFIG_ARM_CPU_SUSPEND
globl_equ cpu_v7_bpiall_do_suspend, cpu_v7_do_suspend
globl_equ cpu_v7_bpiall_do_resume, cpu_v7_do_resume
#endif
define_processor_functions v7_bpiall, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_bugs_init
#define HARDENED_BPIALL_PROCESSOR_FUNCTIONS v7_bpiall_processor_functions
#else
#define HARDENED_BPIALL_PROCESSOR_FUNCTIONS v7_processor_functions
#endif
#ifndef CONFIG_ARM_LPAE
define_processor_functions ca8, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
define_processor_functions ca9mp, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
@ Cortex-A8 - always needs bpiall switch_mm implementation
globl_equ cpu_ca8_proc_init, cpu_v7_proc_init
globl_equ cpu_ca8_proc_fin, cpu_v7_proc_fin
globl_equ cpu_ca8_reset, cpu_v7_reset
globl_equ cpu_ca8_do_idle, cpu_v7_do_idle
globl_equ cpu_ca8_dcache_clean_area, cpu_v7_dcache_clean_area
globl_equ cpu_ca8_set_pte_ext, cpu_v7_set_pte_ext
globl_equ cpu_ca8_switch_mm, cpu_v7_bpiall_switch_mm
globl_equ cpu_ca8_suspend_size, cpu_v7_suspend_size
#ifdef CONFIG_ARM_CPU_SUSPEND
globl_equ cpu_ca8_do_suspend, cpu_v7_do_suspend
globl_equ cpu_ca8_do_resume, cpu_v7_do_resume
#endif
define_processor_functions ca8, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_ca8_ibe
@ Cortex-A9 - needs more registers preserved across suspend/resume
@ and bpiall switch_mm for hardening
globl_equ cpu_ca9mp_proc_init, cpu_v7_proc_init
globl_equ cpu_ca9mp_proc_fin, cpu_v7_proc_fin
globl_equ cpu_ca9mp_reset, cpu_v7_reset
globl_equ cpu_ca9mp_do_idle, cpu_v7_do_idle
globl_equ cpu_ca9mp_dcache_clean_area, cpu_v7_dcache_clean_area
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
globl_equ cpu_ca9mp_switch_mm, cpu_v7_bpiall_switch_mm
#else
globl_equ cpu_ca9mp_switch_mm, cpu_v7_switch_mm
#endif
globl_equ cpu_ca9mp_set_pte_ext, cpu_v7_set_pte_ext
define_processor_functions ca9mp, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_bugs_init
#endif
@ Cortex-A15 - needs iciallu switch_mm for hardening
globl_equ cpu_ca15_proc_init, cpu_v7_proc_init
globl_equ cpu_ca15_proc_fin, cpu_v7_proc_fin
globl_equ cpu_ca15_reset, cpu_v7_reset
globl_equ cpu_ca15_do_idle, cpu_v7_do_idle
globl_equ cpu_ca15_dcache_clean_area, cpu_v7_dcache_clean_area
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
globl_equ cpu_ca15_switch_mm, cpu_v7_iciallu_switch_mm
#else
globl_equ cpu_ca15_switch_mm, cpu_v7_switch_mm
#endif
globl_equ cpu_ca15_set_pte_ext, cpu_v7_set_pte_ext
globl_equ cpu_ca15_suspend_size, cpu_v7_suspend_size
globl_equ cpu_ca15_do_suspend, cpu_v7_do_suspend
globl_equ cpu_ca15_do_resume, cpu_v7_do_resume
define_processor_functions ca15, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_ca15_ibe
#ifdef CONFIG_CPU_PJ4B
define_processor_functions pj4b, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
#endif
@ -658,7 +732,7 @@ __v7_ca7mp_proc_info:
__v7_ca12mp_proc_info:
.long 0x410fc0d0
.long 0xff0ffff0
__v7_proc __v7_ca12mp_proc_info, __v7_ca12mp_setup
__v7_proc __v7_ca12mp_proc_info, __v7_ca12mp_setup, proc_fns = HARDENED_BPIALL_PROCESSOR_FUNCTIONS
.size __v7_ca12mp_proc_info, . - __v7_ca12mp_proc_info
/*
@ -668,7 +742,7 @@ __v7_ca12mp_proc_info:
__v7_ca15mp_proc_info:
.long 0x410fc0f0
.long 0xff0ffff0
__v7_proc __v7_ca15mp_proc_info, __v7_ca15mp_setup
__v7_proc __v7_ca15mp_proc_info, __v7_ca15mp_setup, proc_fns = ca15_processor_functions
.size __v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info
/*
@ -678,7 +752,7 @@ __v7_ca15mp_proc_info:
__v7_b15mp_proc_info:
.long 0x420f00f0
.long 0xff0ffff0
__v7_proc __v7_b15mp_proc_info, __v7_b15mp_setup
__v7_proc __v7_b15mp_proc_info, __v7_b15mp_setup, proc_fns = ca15_processor_functions
.size __v7_b15mp_proc_info, . - __v7_b15mp_proc_info
/*
@ -688,9 +762,25 @@ __v7_b15mp_proc_info:
__v7_ca17mp_proc_info:
.long 0x410fc0e0
.long 0xff0ffff0
__v7_proc __v7_ca17mp_proc_info, __v7_ca17mp_setup
__v7_proc __v7_ca17mp_proc_info, __v7_ca17mp_setup, proc_fns = HARDENED_BPIALL_PROCESSOR_FUNCTIONS
.size __v7_ca17mp_proc_info, . - __v7_ca17mp_proc_info
/* ARM Ltd. Cortex A73 processor */
.type __v7_ca73_proc_info, #object
__v7_ca73_proc_info:
.long 0x410fd090
.long 0xff0ffff0
__v7_proc __v7_ca73_proc_info, __v7_setup, proc_fns = HARDENED_BPIALL_PROCESSOR_FUNCTIONS
.size __v7_ca73_proc_info, . - __v7_ca73_proc_info
/* ARM Ltd. Cortex A75 processor */
.type __v7_ca75_proc_info, #object
__v7_ca75_proc_info:
.long 0x410fd0a0
.long 0xff0ffff0
__v7_proc __v7_ca75_proc_info, __v7_setup, proc_fns = HARDENED_BPIALL_PROCESSOR_FUNCTIONS
.size __v7_ca75_proc_info, . - __v7_ca75_proc_info
/*
* Qualcomm Inc. Krait processors.
*/

@ -597,13 +597,11 @@ int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp,
}
/* Sanitise and restore the current VFP state from the provided structures. */
int vfp_restore_user_hwstate(struct user_vfp __user *ufp,
struct user_vfp_exc __user *ufp_exc)
int vfp_restore_user_hwstate(struct user_vfp *ufp, struct user_vfp_exc *ufp_exc)
{
struct thread_info *thread = current_thread_info();
struct vfp_hard_struct *hwstate = &thread->vfpstate.hard;
unsigned long fpexc;
int err = 0;
/* Disable VFP to avoid corrupting the new thread state. */
vfp_flush_hwstate(thread);
@ -612,17 +610,16 @@ int vfp_restore_user_hwstate(struct user_vfp __user *ufp,
* Copy the floating point registers. There can be unused
* registers see asm/hwcap.h for details.
*/
err |= __copy_from_user(&hwstate->fpregs, &ufp->fpregs,
sizeof(hwstate->fpregs));
memcpy(&hwstate->fpregs, &ufp->fpregs, sizeof(hwstate->fpregs));
/*
* Copy the status and control register.
*/
__get_user_error(hwstate->fpscr, &ufp->fpscr, err);
hwstate->fpscr = ufp->fpscr;
/*
* Sanitise and restore the exception registers.
*/
__get_user_error(fpexc, &ufp_exc->fpexc, err);
fpexc = ufp_exc->fpexc;
/* Ensure the VFP is enabled. */
fpexc |= FPEXC_EN;
@ -631,10 +628,10 @@ int vfp_restore_user_hwstate(struct user_vfp __user *ufp,
fpexc &= ~(FPEXC_EX | FPEXC_FP2V);
hwstate->fpexc = fpexc;
__get_user_error(hwstate->fpinst, &ufp_exc->fpinst, err);
__get_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err);
hwstate->fpinst = ufp_exc->fpinst;
hwstate->fpinst2 = ufp_exc->fpinst2;
return err ? -EFAULT : 0;
return 0;
}
/*

@ -824,6 +824,12 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event,
return 0;
}
static int armv8pmu_filter_match(struct perf_event *event)
{
unsigned long evtype = event->hw.config_base & ARMV8_PMU_EVTYPE_EVENT;
return evtype != ARMV8_PMUV3_PERFCTR_CHAIN;
}
static void armv8pmu_reset(void *info)
{
struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
@ -970,6 +976,7 @@ static int armv8_pmu_init(struct arm_pmu *cpu_pmu)
cpu_pmu->reset = armv8pmu_reset,
cpu_pmu->max_period = (1LLU << 32) - 1,
cpu_pmu->set_event_filter = armv8pmu_set_event_filter;
cpu_pmu->filter_match = armv8pmu_filter_match;
return 0;
}

@ -13,6 +13,7 @@
#include <linux/atomic.h>
#include <linux/cpumask.h>
#include <linux/sizes.h>
#include <linux/threads.h>
#include <asm/cachectl.h>
@ -80,11 +81,10 @@ extern unsigned int vced_count, vcei_count;
#endif
/*
* One page above the stack is used for branch delay slot "emulation".
* See dsemul.c for details.
*/
#define STACK_TOP ((TASK_SIZE & PAGE_MASK) - PAGE_SIZE)
#define VDSO_RANDOMIZE_SIZE (TASK_IS_32BIT_ADDR ? SZ_1M : SZ_256M)
extern unsigned long mips_stack_top(void);
#define STACK_TOP mips_stack_top()
/*
* This decides where the kernel will search for a free chunk of vm

@ -31,6 +31,7 @@
#include <linux/prctl.h>
#include <linux/nmi.h>
#include <asm/abi.h>
#include <asm/asm.h>
#include <asm/bootinfo.h>
#include <asm/cpu.h>
@ -38,6 +39,7 @@
#include <asm/dsp.h>
#include <asm/fpu.h>
#include <asm/irq.h>
#include <asm/mips-cps.h>
#include <asm/msa.h>
#include <asm/pgtable.h>
#include <asm/mipsregs.h>
@ -644,6 +646,29 @@ out:
return pc;
}
unsigned long mips_stack_top(void)
{
unsigned long top = TASK_SIZE & PAGE_MASK;
/* One page for branch delay slot "emulation" */
top -= PAGE_SIZE;
/* Space for the VDSO, data page & GIC user page */
top -= PAGE_ALIGN(current->thread.abi->vdso->size);
top -= PAGE_SIZE;
top -= mips_gic_present() ? PAGE_SIZE : 0;
/* Space for cache colour alignment */
if (cpu_has_dc_aliases)
top -= shm_align_mask + 1;
/* Space to randomize the VDSO base */
if (current->flags & PF_RANDOMIZE)
top -= VDSO_RANDOMIZE_SIZE;
return top;
}
/*
* Don't forget that the stack pointer must be aligned on a 8 bytes
* boundary for 32-bits ABI and 16 bytes for 64-bits ABI.

@ -15,6 +15,7 @@
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/random.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/timekeeper_internal.h>
@ -97,6 +98,21 @@ void update_vsyscall_tz(void)
}
}
static unsigned long vdso_base(void)
{
unsigned long base;
/* Skip the delay slot emulation page */
base = STACK_TOP + PAGE_SIZE;
if (current->flags & PF_RANDOMIZE) {
base += get_random_int() & (VDSO_RANDOMIZE_SIZE - 1);
base = PAGE_ALIGN(base);
}
return base;
}
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
struct mips_vdso_image *image = current->thread.abi->vdso;
@ -137,7 +153,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
if (cpu_has_dc_aliases)
size += shm_align_mask + 1;
base = get_unmapped_area(NULL, 0, size, 0, 0);
base = get_unmapped_area(NULL, vdso_base(), size, 0, 0);
if (IS_ERR_VALUE(base)) {
ret = base;
goto out;

@ -102,7 +102,7 @@
*/
#define _HPAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
_PAGE_ACCESSED | H_PAGE_THP_HUGE | _PAGE_PTE | \
_PAGE_SOFT_DIRTY)
_PAGE_SOFT_DIRTY | _PAGE_DEVMAP)
/*
* user access blocked by key
*/
@ -120,7 +120,7 @@
*/
#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
_PAGE_ACCESSED | _PAGE_SPECIAL | _PAGE_PTE | \
_PAGE_SOFT_DIRTY)
_PAGE_SOFT_DIRTY | _PAGE_DEVMAP)
/*
* Mask of bits returned by pte_pgprot()
*/

@ -124,7 +124,7 @@
*/
#define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
_PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY | \
_PAGE_SOFT_DIRTY)
_PAGE_SOFT_DIRTY | _PAGE_DEVMAP)
#define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE)
/*

@ -360,5 +360,6 @@ struct kvm_sync_regs {
#define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0)
#define KVM_X86_QUIRK_CD_NW_CLEARED (1 << 1)
#define KVM_X86_QUIRK_LAPIC_MMIO_HOLE (1 << 2)
#endif /* _ASM_X86_KVM_H */

@ -1282,9 +1282,8 @@ EXPORT_SYMBOL_GPL(kvm_lapic_reg_read);
static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr)
{
return kvm_apic_hw_enabled(apic) &&
addr >= apic->base_address &&
addr < apic->base_address + LAPIC_MMIO_LENGTH;
return addr >= apic->base_address &&
addr < apic->base_address + LAPIC_MMIO_LENGTH;
}
static int apic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
@ -1296,6 +1295,15 @@ static int apic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
if (!apic_mmio_in_range(apic, address))
return -EOPNOTSUPP;
if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) {
if (!kvm_check_has_quirk(vcpu->kvm,
KVM_X86_QUIRK_LAPIC_MMIO_HOLE))
return -EOPNOTSUPP;
memset(data, 0xff, len);
return 0;
}
kvm_lapic_reg_read(apic, offset, len, data);
return 0;
@ -1806,6 +1814,14 @@ static int apic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
if (!apic_mmio_in_range(apic, address))
return -EOPNOTSUPP;
if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) {
if (!kvm_check_has_quirk(vcpu->kvm,
KVM_X86_QUIRK_LAPIC_MMIO_HOLE))
return -EOPNOTSUPP;
return 0;
}
/*
* APIC register must be aligned on 128-bits boundary.
* 32/64/128 bits registers must be accessed thru 32 bits.

@ -539,6 +539,8 @@ static void hci_uart_tty_close(struct tty_struct *tty)
}
clear_bit(HCI_UART_PROTO_SET, &hu->flags);
percpu_free_rwsem(&hu->proto_lock);
kfree(hu);
}

@ -55,6 +55,7 @@ struct clk_plt_data {
u8 nparents;
struct clk_plt *clks[PMC_CLK_NUM];
struct clk_lookup *mclk_lookup;
struct clk_lookup *ether_clk_lookup;
};
/* Return an index in parent table */
@ -186,13 +187,6 @@ static struct clk_plt *plt_clk_register(struct platform_device *pdev, int id,
pclk->reg = base + PMC_CLK_CTL_OFFSET + id * PMC_CLK_CTL_SIZE;
spin_lock_init(&pclk->lock);
/*
* If the clock was already enabled by the firmware mark it as critical
* to avoid it being gated by the clock framework if no driver owns it.
*/
if (plt_clk_is_enabled(&pclk->hw))
init.flags |= CLK_IS_CRITICAL;
ret = devm_clk_hw_register(&pdev->dev, &pclk->hw);
if (ret) {
pclk = ERR_PTR(ret);
@ -351,11 +345,20 @@ static int plt_clk_probe(struct platform_device *pdev)
goto err_unreg_clk_plt;
}
data->ether_clk_lookup = clkdev_hw_create(&data->clks[4]->hw,
"ether_clk", NULL);
if (!data->ether_clk_lookup) {
err = -ENOMEM;
goto err_drop_mclk;
}
plt_clk_free_parent_names_loop(parent_names, data->nparents);
platform_set_drvdata(pdev, data);
return 0;
err_drop_mclk:
clkdev_drop(data->mclk_lookup);
err_unreg_clk_plt:
plt_clk_unregister_loop(data, i);
plt_clk_unregister_parents(data);
@ -369,6 +372,7 @@ static int plt_clk_remove(struct platform_device *pdev)
data = platform_get_drvdata(pdev);
clkdev_drop(data->ether_clk_lookup);
clkdev_drop(data->mclk_lookup);
plt_clk_unregister_loop(data, PMC_CLK_NUM);
plt_clk_unregister_parents(data);

@ -576,7 +576,7 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
while (true) {
temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
if (temp & SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT)
if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
break;
if (timeout <= 0)
return -ETIME;

@ -152,6 +152,7 @@ acpi_smbus_cmi_access(struct i2c_adapter *adap, u16 addr, unsigned short flags,
mt_params[3].type = ACPI_TYPE_INTEGER;
mt_params[3].integer.value = len;
mt_params[4].type = ACPI_TYPE_BUFFER;
mt_params[4].buffer.length = len;
mt_params[4].buffer.pointer = data->block + 1;
}
break;

@ -3571,14 +3571,13 @@ static int __init dm_cache_init(void)
int r;
migration_cache = KMEM_CACHE(dm_cache_migration, 0);
if (!migration_cache) {
dm_unregister_target(&cache_target);
if (!migration_cache)
return -ENOMEM;
}
r = dm_register_target(&cache_target);
if (r) {
DMERR("cache target registration failed: %d", r);
kmem_cache_destroy(migration_cache);
return r;
}

@ -463,7 +463,9 @@ static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_
static struct target_type flakey_target = {
.name = "flakey",
.version = {1, 5, 0},
#ifdef CONFIG_BLK_DEV_ZONED
.features = DM_TARGET_ZONED_HM,
#endif
.module = THIS_MODULE,
.ctr = flakey_ctr,
.dtr = flakey_dtr,

@ -1034,12 +1034,14 @@ void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
/*
* The zone descriptors obtained with a zone report indicate
* zone positions within the target device. The zone descriptors
* must be remapped to match their position within the dm device.
* A target may call dm_remap_zone_report after completion of a
* REQ_OP_ZONE_REPORT bio to remap the zone descriptors obtained
* from the target device mapping to the dm device.
* The zone descriptors obtained with a zone report indicate zone positions
* within the target backing device, regardless of that device is a partition
* and regardless of the target mapping start sector on the device or partition.
* The zone descriptors start sector and write pointer position must be adjusted
* to match their relative position within the dm device.
* A target may call dm_remap_zone_report() after completion of a
* REQ_OP_ZONE_REPORT bio to remap the zone descriptors obtained from the
* backing device.
*/
void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
{
@ -1050,6 +1052,7 @@ void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
struct blk_zone *zone;
unsigned int nr_rep = 0;
unsigned int ofst;
sector_t part_offset;
struct bio_vec bvec;
struct bvec_iter iter;
void *addr;
@ -1057,6 +1060,15 @@ void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
if (bio->bi_status)
return;
/*
* bio sector was incremented by the request size on completion. Taking
* into account the original request sector, the target start offset on
* the backing device and the target mapping offset (ti->begin), the
* start sector of the backing device. The partition offset is always 0
* if the target uses a whole device.
*/
part_offset = bio->bi_iter.bi_sector + ti->begin - (start + bio_end_sector(report_bio));
/*
* Remap the start sector of the reported zones. For sequential zones,
* also remap the write pointer position.
@ -1074,6 +1086,7 @@ void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
/* Set zones start sector */
while (hdr->nr_zones && ofst < bvec.bv_len) {
zone = addr + ofst;
zone->start -= part_offset;
if (zone->start >= start + ti->len) {
hdr->nr_zones = 0;
break;
@ -1085,7 +1098,7 @@ void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
else if (zone->cond == BLK_ZONE_COND_EMPTY)
zone->wp = zone->start;
else
zone->wp = zone->wp + ti->begin - start;
zone->wp = zone->wp + ti->begin - start - part_offset;
}
ofst += sizeof(struct blk_zone);
hdr->nr_zones--;

@ -548,8 +548,8 @@ static int usbhs_omap_get_dt_pdata(struct device *dev,
}
static const struct of_device_id usbhs_child_match_table[] = {
{ .compatible = "ti,omap-ehci", },
{ .compatible = "ti,omap-ohci", },
{ .compatible = "ti,ehci-omap", },
{ .compatible = "ti,ohci-omap3", },
{ }
};
@ -875,6 +875,7 @@ static struct platform_driver usbhs_omap_driver = {
.pm = &usbhsomap_dev_pm_ops,
.of_match_table = usbhs_omap_dt_ids,
},
.probe = usbhs_omap_probe,
.remove = usbhs_omap_remove,
};
@ -884,9 +885,9 @@ MODULE_ALIAS("platform:" USBHS_DRIVER_NAME);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("usb host common core driver for omap EHCI and OHCI");
static int __init omap_usbhs_drvinit(void)
static int omap_usbhs_drvinit(void)
{
return platform_driver_probe(&usbhs_omap_driver, usbhs_omap_probe);
return platform_driver_register(&usbhs_omap_driver);
}
/*
@ -898,7 +899,7 @@ static int __init omap_usbhs_drvinit(void)
*/
fs_initcall_sync(omap_usbhs_drvinit);
static void __exit omap_usbhs_drvexit(void)
static void omap_usbhs_drvexit(void)
{
platform_driver_unregister(&usbhs_omap_driver);
}

@ -1613,6 +1613,16 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
brq->data.blocks = card->host->max_blk_count;
if (brq->data.blocks > 1) {
/*
* Some SD cards in SPI mode return a CRC error or even lock up
* completely when trying to read the last block using a
* multiblock read command.
*/
if (mmc_host_is_spi(card->host) && (rq_data_dir(req) == READ) &&
(blk_rq_pos(req) + blk_rq_sectors(req) ==
get_capacity(md->disk)))
brq->data.blocks--;
/*
* After a read error, we redo the request one sector
* at a time in order to accurately determine which

@ -210,6 +210,7 @@ static void bond_get_stats(struct net_device *bond_dev,
static void bond_slave_arr_handler(struct work_struct *work);
static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
int mod);
static void bond_netdev_notify_work(struct work_struct *work);
/*---------------------------- General routines -----------------------------*/
@ -1176,9 +1177,27 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
}
}
/* don't change skb->dev for link-local packets */
if (is_link_local_ether_addr(eth_hdr(skb)->h_dest))
/* Link-local multicast packets should be passed to the
* stack on the link they arrive as well as pass them to the
* bond-master device. These packets are mostly usable when
* stack receives it with the link on which they arrive
* (e.g. LLDP) they also must be available on master. Some of
* the use cases include (but are not limited to): LLDP agents
* that must be able to operate both on enslaved interfaces as
* well as on bonds themselves; linux bridges that must be able
* to process/pass BPDUs from attached bonds when any kind of
* STP version is enabled on the network.
*/
if (is_link_local_ether_addr(eth_hdr(skb)->h_dest)) {
struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
if (nskb) {
nskb->dev = bond->dev;
nskb->queue_mapping = 0;
netif_rx(nskb);
}
return RX_HANDLER_PASS;
}
if (bond_should_deliver_exact_match(skb, slave, bond))
return RX_HANDLER_EXACT;
@ -1254,6 +1273,8 @@ static struct slave *bond_alloc_slave(struct bonding *bond)
return NULL;
}
}
INIT_DELAYED_WORK(&slave->notify_work, bond_netdev_notify_work);
return slave;
}
@ -1261,6 +1282,7 @@ static void bond_free_slave(struct slave *slave)
{
struct bonding *bond = bond_get_bond_by_slave(slave);
cancel_delayed_work_sync(&slave->notify_work);
if (BOND_MODE(bond) == BOND_MODE_8023AD)
kfree(SLAVE_AD_INFO(slave));
@ -1282,39 +1304,26 @@ static void bond_fill_ifslave(struct slave *slave, struct ifslave *info)
info->link_failure_count = slave->link_failure_count;
}
static void bond_netdev_notify(struct net_device *dev,
struct netdev_bonding_info *info)
{
rtnl_lock();
netdev_bonding_info_change(dev, info);
rtnl_unlock();
}
static void bond_netdev_notify_work(struct work_struct *_work)
{
struct netdev_notify_work *w =
container_of(_work, struct netdev_notify_work, work.work);
struct slave *slave = container_of(_work, struct slave,
notify_work.work);
if (rtnl_trylock()) {
struct netdev_bonding_info binfo;
bond_netdev_notify(w->dev, &w->bonding_info);
dev_put(w->dev);
kfree(w);
bond_fill_ifslave(slave, &binfo.slave);
bond_fill_ifbond(slave->bond, &binfo.master);
netdev_bonding_info_change(slave->dev, &binfo);
rtnl_unlock();
} else {
queue_delayed_work(slave->bond->wq, &slave->notify_work, 1);
}
}
void bond_queue_slave_event(struct slave *slave)
{
struct bonding *bond = slave->bond;
struct netdev_notify_work *nnw = kzalloc(sizeof(*nnw), GFP_ATOMIC);
if (!nnw)
return;
dev_hold(slave->dev);
nnw->dev = slave->dev;
bond_fill_ifslave(slave, &nnw->bonding_info.slave);
bond_fill_ifbond(bond, &nnw->bonding_info.master);
INIT_DELAYED_WORK(&nnw->work, bond_netdev_notify_work);
queue_delayed_work(slave->bond->wq, &nnw->work, 0);
queue_delayed_work(slave->bond->wq, &slave->notify_work, 0);
}
void bond_lower_state_changed(struct slave *slave)

@ -772,7 +772,6 @@ static int bcm_sf2_sw_suspend(struct dsa_switch *ds)
static int bcm_sf2_sw_resume(struct dsa_switch *ds)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
unsigned int port;
int ret;
ret = bcm_sf2_sw_rst(priv);
@ -784,12 +783,7 @@ static int bcm_sf2_sw_resume(struct dsa_switch *ds)
if (priv->hw_params.num_gphy == 1)
bcm_sf2_gphy_enable_set(ds, true);
for (port = 0; port < DSA_MAX_PORTS; port++) {
if ((1 << port) & ds->enabled_port_mask)
bcm_sf2_port_setup(ds, port, NULL);
else if (dsa_is_cpu_port(ds, port))
bcm_sf2_imp_setup(ds, port);
}
ds->ops->setup(ds);
return 0;
}
@ -1270,10 +1264,10 @@ static int bcm_sf2_sw_remove(struct platform_device *pdev)
{
struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
/* Disable all ports and interrupts */
priv->wol_ports_mask = 0;
bcm_sf2_sw_suspend(priv->dev->ds);
dsa_unregister_switch(priv->dev->ds);
/* Disable all ports and interrupts */
bcm_sf2_sw_suspend(priv->dev->ds);
bcm_sf2_mdio_unregister(priv);
return 0;

@ -222,9 +222,10 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
}
/* for single fragment packets use build_skb() */
if (buff->is_eop) {
if (buff->is_eop &&
buff->len <= AQ_CFG_RX_FRAME_MAX - AQ_SKB_ALIGN) {
skb = build_skb(page_address(buff->page),
buff->len + AQ_SKB_ALIGN);
AQ_CFG_RX_FRAME_MAX);
if (unlikely(!skb)) {
err = -ENOMEM;
goto err_exit;
@ -244,18 +245,21 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
buff->len - ETH_HLEN,
SKB_TRUESIZE(buff->len - ETH_HLEN));
for (i = 1U, next_ = buff->next,
buff_ = &self->buff_ring[next_]; true;
next_ = buff_->next,
buff_ = &self->buff_ring[next_], ++i) {
skb_add_rx_frag(skb, i, buff_->page, 0,
buff_->len,
SKB_TRUESIZE(buff->len -
ETH_HLEN));
buff_->is_cleaned = 1;
if (buff_->is_eop)
break;
if (!buff->is_eop) {
for (i = 1U, next_ = buff->next,
buff_ = &self->buff_ring[next_];
true; next_ = buff_->next,
buff_ = &self->buff_ring[next_], ++i) {
skb_add_rx_frag(skb, i,
buff_->page, 0,
buff_->len,
SKB_TRUESIZE(buff->len -
ETH_HLEN));
buff_->is_cleaned = 1;
if (buff_->is_eop)
break;
}
}
}

@ -1001,14 +1001,22 @@ static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
{
u32 reg;
/* Stop monitoring MPD interrupt */
intrl2_0_mask_set(priv, INTRL2_0_MPD);
/* Clear the MagicPacket detection logic */
reg = umac_readl(priv, UMAC_MPD_CTRL);
reg &= ~MPD_EN;
umac_writel(priv, reg, UMAC_MPD_CTRL);
reg = intrl2_0_readl(priv, INTRL2_CPU_STATUS);
if (reg & INTRL2_0_MPD)
netdev_info(priv->netdev, "Wake-on-LAN (MPD) interrupt!\n");
if (reg & INTRL2_0_BRCM_MATCH_TAG) {
reg = rxchk_readl(priv, RXCHK_BRCM_TAG_MATCH_STATUS) &
RXCHK_BRCM_TAG_MATCH_MASK;
netdev_info(priv->netdev,
"Wake-on-LAN (filters 0x%02x) interrupt!\n", reg);
}
netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n");
}
@ -1043,11 +1051,6 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
if (priv->irq0_stat & INTRL2_0_TX_RING_FULL)
bcm_sysport_tx_reclaim_all(priv);
if (priv->irq0_stat & INTRL2_0_MPD) {
netdev_info(priv->netdev, "Wake-on-LAN interrupt!\n");
bcm_sysport_resume_from_wol(priv);
}
if (!priv->is_lite)
goto out;
@ -2248,9 +2251,6 @@ static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)
/* UniMAC receive needs to be turned on */
umac_enable_set(priv, CMD_RX_EN, 1);
/* Enable the interrupt wake-up source */
intrl2_0_mask_clear(priv, INTRL2_0_MPD);
netif_dbg(priv, wol, ndev, "entered WOL mode\n");
return 0;

@ -1864,8 +1864,11 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
tx_pkts++;
/* return full budget so NAPI will complete. */
if (unlikely(tx_pkts > bp->tx_wake_thresh))
if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
rx_pkts = budget;
raw_cons = NEXT_RAW_CMP(raw_cons);
break;
}
} else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
if (likely(budget))
rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event);
@ -1893,7 +1896,7 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
}
raw_cons = NEXT_RAW_CMP(raw_cons);
if (rx_pkts == budget)
if (rx_pkts && rx_pkts == budget)
break;
}
@ -2007,8 +2010,12 @@ static int bnxt_poll(struct napi_struct *napi, int budget)
while (1) {
work_done += bnxt_poll_work(bp, bnapi, budget - work_done);
if (work_done >= budget)
if (work_done >= budget) {
if (!budget)
BNXT_CP_DB_REARM(cpr->cp_doorbell,
cpr->cp_raw_cons);
break;
}
if (!bnxt_has_work(bp, cpr)) {
if (napi_complete_done(napi, work_done))
@ -2957,10 +2964,11 @@ static void bnxt_free_hwrm_resources(struct bnxt *bp)
{
struct pci_dev *pdev = bp->pdev;
dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
bp->hwrm_cmd_resp_dma_addr);
bp->hwrm_cmd_resp_addr = NULL;
if (bp->hwrm_cmd_resp_addr) {
dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
bp->hwrm_cmd_resp_dma_addr);
bp->hwrm_cmd_resp_addr = NULL;
}
if (bp->hwrm_dbg_resp_addr) {
dma_free_coherent(&pdev->dev, HWRM_DBG_REG_BUF_SIZE,
bp->hwrm_dbg_resp_addr,
@ -8210,6 +8218,7 @@ init_err_cleanup_tc:
bnxt_clear_int_mode(bp);
init_err_pci_clean:
bnxt_free_hwrm_resources(bp);
bnxt_cleanup_pci(bp);
init_err_free:

@ -78,17 +78,23 @@ static int bnxt_tc_parse_redir(struct bnxt *bp,
return 0;
}
static void bnxt_tc_parse_vlan(struct bnxt *bp,
struct bnxt_tc_actions *actions,
const struct tc_action *tc_act)
static int bnxt_tc_parse_vlan(struct bnxt *bp,
struct bnxt_tc_actions *actions,
const struct tc_action *tc_act)
{
if (tcf_vlan_action(tc_act) == TCA_VLAN_ACT_POP) {
switch (tcf_vlan_action(tc_act)) {
case TCA_VLAN_ACT_POP:
actions->flags |= BNXT_TC_ACTION_FLAG_POP_VLAN;
} else if (tcf_vlan_action(tc_act) == TCA_VLAN_ACT_PUSH) {
break;
case TCA_VLAN_ACT_PUSH:
actions->flags |= BNXT_TC_ACTION_FLAG_PUSH_VLAN;
actions->push_vlan_tci = htons(tcf_vlan_push_vid(tc_act));
actions->push_vlan_tpid = tcf_vlan_push_proto(tc_act);
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
static int bnxt_tc_parse_actions(struct bnxt *bp,
@ -122,7 +128,9 @@ static int bnxt_tc_parse_actions(struct bnxt *bp,
/* Push/pop VLAN */
if (is_tcf_vlan(tc_act)) {
bnxt_tc_parse_vlan(bp, actions, tc_act);
rc = bnxt_tc_parse_vlan(bp, actions, tc_act);
if (rc)
return rc;
continue;
}
}

@ -3301,6 +3301,13 @@ static const struct macb_config at91sam9260_config = {
.init = macb_init,
};
static const struct macb_config sama5d3macb_config = {
.caps = MACB_CAPS_SG_DISABLED
| MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
.clk_init = macb_clk_init,
.init = macb_init,
};
static const struct macb_config pc302gem_config = {
.caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
.dma_burst_length = 16,
@ -3368,6 +3375,7 @@ static const struct of_device_id macb_dt_ids[] = {
{ .compatible = "cdns,gem", .data = &pc302gem_config },
{ .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config },
{ .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
{ .compatible = "atmel,sama5d3-macb", .data = &sama5d3macb_config },
{ .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
{ .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
{ .compatible = "cdns,emac", .data = &emac_config },

@ -84,7 +84,7 @@ static void hnae_unmap_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
if (cb->type == DESC_TYPE_SKB)
dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
ring_to_dma_dir(ring));
else
else if (cb->length)
dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
ring_to_dma_dir(ring));
}

@ -40,9 +40,9 @@
#define SKB_TMP_LEN(SKB) \
(((SKB)->transport_header - (SKB)->mac_header) + tcp_hdrlen(SKB))
static void fill_v2_desc(struct hnae_ring *ring, void *priv,
int size, dma_addr_t dma, int frag_end,
int buf_num, enum hns_desc_type type, int mtu)
static void fill_v2_desc_hw(struct hnae_ring *ring, void *priv, int size,
int send_sz, dma_addr_t dma, int frag_end,
int buf_num, enum hns_desc_type type, int mtu)
{
struct hnae_desc *desc = &ring->desc[ring->next_to_use];
struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
@ -64,7 +64,7 @@ static void fill_v2_desc(struct hnae_ring *ring, void *priv,
desc_cb->type = type;
desc->addr = cpu_to_le64(dma);
desc->tx.send_size = cpu_to_le16((u16)size);
desc->tx.send_size = cpu_to_le16((u16)send_sz);
/* config bd buffer end */
hnae_set_bit(rrcfv, HNSV2_TXD_VLD_B, 1);
@ -133,6 +133,14 @@ static void fill_v2_desc(struct hnae_ring *ring, void *priv,
ring_ptr_move_fw(ring, next_to_use);
}
static void fill_v2_desc(struct hnae_ring *ring, void *priv,
int size, dma_addr_t dma, int frag_end,
int buf_num, enum hns_desc_type type, int mtu)
{
fill_v2_desc_hw(ring, priv, size, size, dma, frag_end,
buf_num, type, mtu);
}
static const struct acpi_device_id hns_enet_acpi_match[] = {
{ "HISI00C1", 0 },
{ "HISI00C2", 0 },
@ -289,15 +297,15 @@ static void fill_tso_desc(struct hnae_ring *ring, void *priv,
/* when the frag size is bigger than hardware, split this frag */
for (k = 0; k < frag_buf_num; k++)
fill_v2_desc(ring, priv,
(k == frag_buf_num - 1) ?
fill_v2_desc_hw(ring, priv, k == 0 ? size : 0,
(k == frag_buf_num - 1) ?
sizeoflast : BD_MAX_SEND_SIZE,
dma + BD_MAX_SEND_SIZE * k,
frag_end && (k == frag_buf_num - 1) ? 1 : 0,
buf_num,
(type == DESC_TYPE_SKB && !k) ?
dma + BD_MAX_SEND_SIZE * k,
frag_end && (k == frag_buf_num - 1) ? 1 : 0,
buf_num,
(type == DESC_TYPE_SKB && !k) ?
DESC_TYPE_SKB : DESC_TYPE_PAGE,
mtu);
mtu);
}
netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,

@ -33,6 +33,7 @@
#include <linux/hrtimer.h>
#include <linux/ktime.h>
#include <linux/regmap.h>
#include <linux/if_vlan.h>
#include <uapi/linux/ppp_defs.h>
#include <net/ip.h>
#include <net/ipv6.h>
@ -5101,7 +5102,7 @@ static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq)
}
/* Set Tx descriptors fields relevant for CSUM calculation */
static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto,
static u32 mvpp2_txq_desc_csum(int l3_offs, __be16 l3_proto,
int ip_hdr_len, int l4_proto)
{
u32 command;
@ -6065,14 +6066,15 @@ static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
if (skb->ip_summed == CHECKSUM_PARTIAL) {
int ip_hdr_len = 0;
u8 l4_proto;
__be16 l3_proto = vlan_get_protocol(skb);
if (skb->protocol == htons(ETH_P_IP)) {
if (l3_proto == htons(ETH_P_IP)) {
struct iphdr *ip4h = ip_hdr(skb);
/* Calculate IPv4 checksum and L4 checksum */
ip_hdr_len = ip4h->ihl;
l4_proto = ip4h->protocol;
} else if (skb->protocol == htons(ETH_P_IPV6)) {
} else if (l3_proto == htons(ETH_P_IPV6)) {
struct ipv6hdr *ip6h = ipv6_hdr(skb);
/* Read l4_protocol from one of IPv6 extra headers */
@ -6084,7 +6086,7 @@ static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
}
return mvpp2_txq_desc_csum(skb_network_offset(skb),
skb->protocol, ip_hdr_len, l4_proto);
l3_proto, ip_hdr_len, l4_proto);
}
return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
@ -6532,10 +6534,12 @@ static int mvpp2_poll(struct napi_struct *napi, int budget)
cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
}
cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
if (cause_tx) {
cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET;
mvpp2_tx_done(port, cause_tx, qv->sw_thread_id);
if (port->has_tx_irqs) {
cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
if (cause_tx) {
cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET;
mvpp2_tx_done(port, cause_tx, qv->sw_thread_id);
}
}
/* Process RX packets */

@ -864,6 +864,9 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority);
}
} else {
MLX5_SET(fte_match_set_lyr_2_4, headers_c, svlan_tag, 1);
MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
}
if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {

@ -1922,7 +1922,7 @@ static u32 calculate_vports_min_rate_divider(struct mlx5_eswitch *esw)
u32 max_guarantee = 0;
int i;
for (i = 0; i <= esw->total_vports; i++) {
for (i = 0; i < esw->total_vports; i++) {
evport = &esw->vports[i];
if (!evport->enabled || evport->info.min_rate < max_guarantee)
continue;
@ -1942,7 +1942,7 @@ static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider)
int err;
int i;
for (i = 0; i <= esw->total_vports; i++) {
for (i = 0; i < esw->total_vports; i++) {
evport = &esw->vports[i];
if (!evport->enabled)
continue;

@ -2058,14 +2058,17 @@ nfp_ctrl_rx_one(struct nfp_net *nn, struct nfp_net_dp *dp,
return true;
}
static void nfp_ctrl_rx(struct nfp_net_r_vector *r_vec)
static bool nfp_ctrl_rx(struct nfp_net_r_vector *r_vec)
{
struct nfp_net_rx_ring *rx_ring = r_vec->rx_ring;
struct nfp_net *nn = r_vec->nfp_net;
struct nfp_net_dp *dp = &nn->dp;
unsigned int budget = 512;
while (nfp_ctrl_rx_one(nn, dp, r_vec, rx_ring))
while (nfp_ctrl_rx_one(nn, dp, r_vec, rx_ring) && budget--)
continue;
return budget;
}
static void nfp_ctrl_poll(unsigned long arg)
@ -2077,9 +2080,13 @@ static void nfp_ctrl_poll(unsigned long arg)
__nfp_ctrl_tx_queued(r_vec);
spin_unlock_bh(&r_vec->lock);
nfp_ctrl_rx(r_vec);
nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
if (nfp_ctrl_rx(r_vec)) {
nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
} else {
tasklet_schedule(&r_vec->tasklet);
nn_dp_warn(&r_vec->nfp_net->dp,
"control message budget exceeded!\n");
}
}
/* Setup and Configuration

@ -1800,7 +1800,8 @@ struct qlcnic_hardware_ops {
int (*config_loopback) (struct qlcnic_adapter *, u8);
int (*clear_loopback) (struct qlcnic_adapter *, u8);
int (*config_promisc_mode) (struct qlcnic_adapter *, u32);
void (*change_l2_filter) (struct qlcnic_adapter *, u64 *, u16);
void (*change_l2_filter)(struct qlcnic_adapter *adapter, u64 *addr,
u16 vlan, struct qlcnic_host_tx_ring *tx_ring);
int (*get_board_info) (struct qlcnic_adapter *);
void (*set_mac_filter_count) (struct qlcnic_adapter *);
void (*free_mac_list) (struct qlcnic_adapter *);
@ -2064,9 +2065,10 @@ static inline int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter,
}
static inline void qlcnic_change_filter(struct qlcnic_adapter *adapter,
u64 *addr, u16 id)
u64 *addr, u16 vlan,
struct qlcnic_host_tx_ring *tx_ring)
{
adapter->ahw->hw_ops->change_l2_filter(adapter, addr, id);
adapter->ahw->hw_ops->change_l2_filter(adapter, addr, vlan, tx_ring);
}
static inline int qlcnic_get_board_info(struct qlcnic_adapter *adapter)

@ -2134,7 +2134,8 @@ out:
}
void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *adapter, u64 *addr,
u16 vlan_id)
u16 vlan_id,
struct qlcnic_host_tx_ring *tx_ring)
{
u8 mac[ETH_ALEN];
memcpy(&mac, addr, ETH_ALEN);

@ -550,7 +550,8 @@ int qlcnic_83xx_wrt_reg_indirect(struct qlcnic_adapter *, ulong, u32);
int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *, u32);
int qlcnic_83xx_config_hw_lro(struct qlcnic_adapter *, int);
int qlcnic_83xx_config_rss(struct qlcnic_adapter *, int);
void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *, u64 *, u16);
void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *adapter, u64 *addr,
u16 vlan, struct qlcnic_host_tx_ring *ring);
int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info *);
int qlcnic_83xx_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *);
void qlcnic_83xx_initialize_nic(struct qlcnic_adapter *, int);

@ -173,7 +173,8 @@ int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
struct net_device *netdev);
void qlcnic_82xx_get_beacon_state(struct qlcnic_adapter *);
void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter,
u64 *uaddr, u16 vlan_id);
u64 *uaddr, u16 vlan_id,
struct qlcnic_host_tx_ring *tx_ring);
int qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *,
struct ethtool_coalesce *);
int qlcnic_82xx_set_rx_coalesce(struct qlcnic_adapter *);

@ -268,13 +268,12 @@ static void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter,
}
void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
u16 vlan_id)
u16 vlan_id, struct qlcnic_host_tx_ring *tx_ring)
{
struct cmd_desc_type0 *hwdesc;
struct qlcnic_nic_req *req;
struct qlcnic_mac_req *mac_req;
struct qlcnic_vlan_req *vlan_req;
struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
u32 producer;
u64 word;
@ -301,7 +300,8 @@ void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
struct cmd_desc_type0 *first_desc,
struct sk_buff *skb)
struct sk_buff *skb,
struct qlcnic_host_tx_ring *tx_ring)
{
struct vlan_ethhdr *vh = (struct vlan_ethhdr *)(skb->data);
struct ethhdr *phdr = (struct ethhdr *)(skb->data);
@ -335,7 +335,7 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
tmp_fil->vlan_id == vlan_id) {
if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
qlcnic_change_filter(adapter, &src_addr,
vlan_id);
vlan_id, tx_ring);
tmp_fil->ftime = jiffies;
return;
}
@ -350,7 +350,7 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
if (!fil)
return;
qlcnic_change_filter(adapter, &src_addr, vlan_id);
qlcnic_change_filter(adapter, &src_addr, vlan_id, tx_ring);
fil->ftime = jiffies;
fil->vlan_id = vlan_id;
memcpy(fil->faddr, &src_addr, ETH_ALEN);
@ -766,7 +766,7 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
}
if (adapter->drv_mac_learn)
qlcnic_send_filter(adapter, first_desc, skb);
qlcnic_send_filter(adapter, first_desc, skb, tx_ring);
tx_ring->tx_stats.tx_bytes += skb->len;
tx_ring->tx_stats.xmit_called++;

@ -2190,8 +2190,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
priv->plat->dma_cfg,
tx_q->dma_tx_phy, chan);
tx_q->tx_tail_addr = tx_q->dma_tx_phy +
(DMA_TX_SIZE * sizeof(struct dma_desc));
tx_q->tx_tail_addr = tx_q->dma_tx_phy;
priv->hw->dma->set_tx_tail_ptr(priv->ioaddr,
tx_q->tx_tail_addr,
chan);
@ -2963,6 +2962,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
queue);
@ -3178,9 +3178,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
if (priv->synopsys_id < DWMAC_CORE_4_00)
priv->hw->dma->enable_dma_transmission(priv->ioaddr);
else
else {
tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
queue);
}
return NETDEV_TX_OK;

@ -67,7 +67,7 @@ static int dwmac1000_validate_mcast_bins(int mcast_bins)
* Description:
* This function validates the number of Unicast address entries supported
* by a particular Synopsys 10/100/1000 controller. The Synopsys controller
* supports 1, 32, 64, or 128 Unicast filter entries for it's Unicast filter
* supports 1..32, 64, or 128 Unicast filter entries for it's Unicast filter
* logic. This function validates a valid, supported configuration is
* selected, and defaults to 1 Unicast address if an unsupported
* configuration is selected.
@ -77,8 +77,7 @@ static int dwmac1000_validate_ucast_entries(int ucast_entries)
int x = ucast_entries;
switch (x) {
case 1:
case 32:
case 1 ... 32:
case 64:
case 128:
break;

@ -2110,17 +2110,15 @@ static int netvsc_remove(struct hv_device *dev)
cancel_delayed_work_sync(&ndev_ctx->dwork);
rcu_read_lock();
nvdev = rcu_dereference(ndev_ctx->nvdev);
if (nvdev)
rtnl_lock();
nvdev = rtnl_dereference(ndev_ctx->nvdev);
if (nvdev)
cancel_work_sync(&nvdev->subchan_work);
/*
* Call to the vsc driver to let it know that the device is being
* removed. Also blocks mtu and channel changes.
*/
rtnl_lock();
vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
if (vf_netdev)
netvsc_unregister_vf(vf_netdev);
@ -2132,7 +2130,6 @@ static int netvsc_remove(struct hv_device *dev)
list_del(&ndev_ctx->list);
rtnl_unlock();
rcu_read_unlock();
hv_set_drvdata(dev, NULL);

@ -1165,6 +1165,11 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
return -EBUSY;
}
if (dev == port_dev) {
netdev_err(dev, "Cannot enslave team device to itself\n");
return -EINVAL;
}
if (port_dev->features & NETIF_F_VLAN_CHALLENGED &&
vlan_uses_dev(dev)) {
netdev_err(dev, "Device %s is VLAN challenged and team device has VLAN set up\n",

@ -1233,6 +1233,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x0b3c, 0xc00b, 4)}, /* Olivetti Olicard 500 */
{QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */
{QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */
{QMI_FIXED_INTF(0x1e2d, 0x0063, 10)}, /* Cinterion ALASxx (1 RmNet) */
{QMI_FIXED_INTF(0x1e2d, 0x0082, 4)}, /* Cinterion PHxx,PXxx (2 RmNet) */
{QMI_FIXED_INTF(0x1e2d, 0x0082, 5)}, /* Cinterion PHxx,PXxx (2 RmNet) */
{QMI_FIXED_INTF(0x1e2d, 0x0083, 4)}, /* Cinterion PHxx,PXxx (1 RmNet + USB Audio)*/

@ -1517,6 +1517,7 @@ static void smsc75xx_unbind(struct usbnet *dev, struct usb_interface *intf)
{
struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
if (pdata) {
cancel_work_sync(&pdata->set_multicast);
netif_dbg(dev, ifdown, dev->net, "free pdata\n");
kfree(pdata);
pdata = NULL;

@ -100,6 +100,9 @@ static enum pci_protocol_version_t pci_protocol_version;
#define STATUS_REVISION_MISMATCH 0xC0000059
/* space for 32bit serial number as string */
#define SLOT_NAME_SIZE 11
/*
* Message Types
*/
@ -516,6 +519,7 @@ struct hv_pci_dev {
struct list_head list_entry;
refcount_t refs;
enum hv_pcichild_state state;
struct pci_slot *pci_slot;
struct pci_function_description desc;
bool reported_missing;
struct hv_pcibus_device *hbus;
@ -1481,6 +1485,34 @@ static void prepopulate_bars(struct hv_pcibus_device *hbus)
spin_unlock_irqrestore(&hbus->device_list_lock, flags);
}
/*
* Assign entries in sysfs pci slot directory.
*
* Note that this function does not need to lock the children list
* because it is called from pci_devices_present_work which
* is serialized with hv_eject_device_work because they are on the
* same ordered workqueue. Therefore hbus->children list will not change
* even when pci_create_slot sleeps.
*/
static void hv_pci_assign_slots(struct hv_pcibus_device *hbus)
{
struct hv_pci_dev *hpdev;
char name[SLOT_NAME_SIZE];
int slot_nr;
list_for_each_entry(hpdev, &hbus->children, list_entry) {
if (hpdev->pci_slot)
continue;
slot_nr = PCI_SLOT(wslot_to_devfn(hpdev->desc.win_slot.slot));
snprintf(name, SLOT_NAME_SIZE, "%u", hpdev->desc.ser);
hpdev->pci_slot = pci_create_slot(hbus->pci_bus, slot_nr,
name, NULL);
if (!hpdev->pci_slot)
pr_warn("pci_create slot %s failed\n", name);
}
}
/**
* create_root_hv_pci_bus() - Expose a new root PCI bus
* @hbus: Root PCI bus, as understood by this driver
@ -1504,6 +1536,7 @@ static int create_root_hv_pci_bus(struct hv_pcibus_device *hbus)
pci_lock_rescan_remove();
pci_scan_child_bus(hbus->pci_bus);
pci_bus_assign_resources(hbus->pci_bus);
hv_pci_assign_slots(hbus);
pci_bus_add_devices(hbus->pci_bus);
pci_unlock_rescan_remove();
hbus->state = hv_pcibus_installed;
@ -1787,6 +1820,7 @@ static void pci_devices_present_work(struct work_struct *work)
*/
pci_lock_rescan_remove();
pci_scan_child_bus(hbus->pci_bus);
hv_pci_assign_slots(hbus);
pci_unlock_rescan_remove();
break;
@ -1895,6 +1929,9 @@ static void hv_eject_device_work(struct work_struct *work)
list_del(&hpdev->list_entry);
spin_unlock_irqrestore(&hpdev->hbus->device_list_lock, flags);
if (hpdev->pci_slot)
pci_destroy_slot(hpdev->pci_slot);
memset(&ctxt, 0, sizeof(ctxt));
ejct_pkt = (struct pci_eject_response *)&ctxt.pkt.message;
ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE;

@ -483,7 +483,13 @@ static int armpmu_filter_match(struct perf_event *event)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
unsigned int cpu = smp_processor_id();
return cpumask_test_cpu(cpu, &armpmu->supported_cpus);
int ret;
ret = cpumask_test_cpu(cpu, &armpmu->supported_cpus);
if (ret && armpmu->filter_match)
return armpmu->filter_match(event);
return ret;
}
static ssize_t armpmu_cpumask_show(struct device *dev,

@ -643,6 +643,14 @@ static int mcp23s08_irq_setup(struct mcp23s08 *mcp)
return err;
}
return 0;
}
static int mcp23s08_irqchip_setup(struct mcp23s08 *mcp)
{
struct gpio_chip *chip = &mcp->chip;
int err;
err = gpiochip_irqchip_add_nested(chip,
&mcp23s08_irq_chip,
0,
@ -907,7 +915,7 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
}
if (mcp->irq && mcp->irq_controller) {
ret = mcp23s08_irq_setup(mcp);
ret = mcp23s08_irqchip_setup(mcp);
if (ret)
goto fail;
}
@ -932,6 +940,9 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
goto fail;
}
if (mcp->irq)
ret = mcp23s08_irq_setup(mcp);
fail:
if (ret < 0)
dev_dbg(dev, "can't setup chip %d, --> %d\n", addr, ret);

@ -172,7 +172,7 @@ static bool pfn_array_table_iova_pinned(struct pfn_array_table *pat,
for (i = 0; i < pat->pat_nr; i++, pa++)
for (j = 0; j < pa->pa_nr; j++)
if (pa->pa_iova_pfn[i] == iova_pfn)
if (pa->pa_iova_pfn[j] == iova_pfn)
return true;
return false;

@ -374,8 +374,8 @@ struct atio_from_isp {
static inline int fcpcmd_is_corrupted(struct atio *atio)
{
if (atio->entry_type == ATIO_TYPE7 &&
(le16_to_cpu(atio->attr_n_length & FCP_CMD_LENGTH_MASK) <
FCP_CMD_LENGTH_MIN))
((le16_to_cpu(atio->attr_n_length) & FCP_CMD_LENGTH_MASK) <
FCP_CMD_LENGTH_MIN))
return 1;
else
return 0;

@ -1421,7 +1421,8 @@ static void iscsit_do_crypto_hash_buf(
sg_init_table(sg, ARRAY_SIZE(sg));
sg_set_buf(sg, buf, payload_length);
sg_set_buf(sg + 1, pad_bytes, padding);
if (padding)
sg_set_buf(sg + 1, pad_bytes, padding);
ahash_request_set_crypt(hash, sg, data_crc, payload_length + padding);
@ -3942,10 +3943,14 @@ static bool iscsi_target_check_conn_state(struct iscsi_conn *conn)
static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
{
int ret;
u8 buffer[ISCSI_HDR_LEN], opcode;
u8 *buffer, opcode;
u32 checksum = 0, digest = 0;
struct kvec iov;
buffer = kcalloc(ISCSI_HDR_LEN, sizeof(*buffer), GFP_KERNEL);
if (!buffer)
return;
while (!kthread_should_stop()) {
/*
* Ensure that both TX and RX per connection kthreads
@ -3953,7 +3958,6 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
*/
iscsit_thread_check_cpumask(conn, current, 0);
memset(buffer, 0, ISCSI_HDR_LEN);
memset(&iov, 0, sizeof(struct kvec));
iov.iov_base = buffer;
@ -3962,7 +3966,7 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
ret = rx_data(conn, &iov, 1, ISCSI_HDR_LEN);
if (ret != ISCSI_HDR_LEN) {
iscsit_rx_thread_wait_for_tcp(conn);
return;
break;
}
if (conn->conn_ops->HeaderDigest) {
@ -3972,7 +3976,7 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
ret = rx_data(conn, &iov, 1, ISCSI_CRC_LEN);
if (ret != ISCSI_CRC_LEN) {
iscsit_rx_thread_wait_for_tcp(conn);
return;
break;
}
iscsit_do_crypto_hash_buf(conn->conn_rx_hash,
@ -3996,7 +4000,7 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
}
if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT)
return;
break;
opcode = buffer[0] & ISCSI_OPCODE_MASK;
@ -4007,13 +4011,15 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
" while in Discovery Session, rejecting.\n", opcode);
iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
buffer);
return;
break;
}
ret = iscsi_target_rx_opcode(conn, buffer);
if (ret < 0)
return;
break;
}
kfree(buffer);
}
int iscsi_target_rx_thread(void *arg)

@ -1236,17 +1236,17 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
temp = readl(port_array[wIndex]);
break;
}
/* Software should not attempt to set
* port link state above '3' (U3) and the port
* must be enabled.
*/
if ((temp & PORT_PE) == 0 ||
(link_state > USB_SS_PORT_LS_U3)) {
xhci_warn(xhci, "Cannot set link state.\n");
/* Port must be enabled */
if (!(temp & PORT_PE)) {
retval = -ENODEV;
break;
}
/* Can't set port link state above '3' (U3) */
if (link_state > USB_SS_PORT_LS_U3) {
xhci_warn(xhci, "Cannot set port %d link state %d\n",
wIndex, link_state);
goto error;
}
if (link_state == USB_SS_PORT_LS_U3) {
slot_id = xhci_find_slot_id_by_port(hcd, xhci,
wIndex + 1);

@ -333,6 +333,8 @@ extern const struct aty_pll_ops aty_pll_ct; /* Integrated */
extern void aty_set_pll_ct(const struct fb_info *info, const union aty_pll *pll);
extern u8 aty_ld_pll_ct(int offset, const struct atyfb_par *par);
extern const u8 aty_postdividers[8];
/*
* Hardware cursor support
@ -359,7 +361,6 @@ static inline void wait_for_idle(struct atyfb_par *par)
extern void aty_reset_engine(const struct atyfb_par *par);
extern void aty_init_engine(struct atyfb_par *par, struct fb_info *info);
extern u8 aty_ld_pll_ct(int offset, const struct atyfb_par *par);
void atyfb_copyarea(struct fb_info *info, const struct fb_copyarea *area);
void atyfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect);

@ -3087,17 +3087,18 @@ static int atyfb_setup_sparc(struct pci_dev *pdev, struct fb_info *info,
/*
* PLL Reference Divider M:
*/
M = pll_regs[2];
M = pll_regs[PLL_REF_DIV];
/*
* PLL Feedback Divider N (Dependent on CLOCK_CNTL):
*/
N = pll_regs[7 + (clock_cntl & 3)];
N = pll_regs[VCLK0_FB_DIV + (clock_cntl & 3)];
/*
* PLL Post Divider P (Dependent on CLOCK_CNTL):
*/
P = 1 << (pll_regs[6] >> ((clock_cntl & 3) << 1));
P = aty_postdividers[((pll_regs[VCLK_POST_DIV] >> ((clock_cntl & 3) << 1)) & 3) |
((pll_regs[PLL_EXT_CNTL] >> (2 + (clock_cntl & 3))) & 4)];
/*
* PLL Divider Q:

@ -115,7 +115,7 @@ static void aty_st_pll_ct(int offset, u8 val, const struct atyfb_par *par)
*/
#define Maximum_DSP_PRECISION 7
static u8 postdividers[] = {1,2,4,8,3};
const u8 aty_postdividers[8] = {1,2,4,8,3,5,6,12};
static int aty_dsp_gt(const struct fb_info *info, u32 bpp, struct pll_ct *pll)
{
@ -222,7 +222,7 @@ static int aty_valid_pll_ct(const struct fb_info *info, u32 vclk_per, struct pll
pll->vclk_post_div += (q < 64*8);
pll->vclk_post_div += (q < 32*8);
}
pll->vclk_post_div_real = postdividers[pll->vclk_post_div];
pll->vclk_post_div_real = aty_postdividers[pll->vclk_post_div];
// pll->vclk_post_div <<= 6;
pll->vclk_fb_div = q * pll->vclk_post_div_real / 8;
pllvclk = (1000000 * 2 * pll->vclk_fb_div) /
@ -513,7 +513,7 @@ static int aty_init_pll_ct(const struct fb_info *info, union aty_pll *pll)
u8 mclk_fb_div, pll_ext_cntl;
pll->ct.pll_ref_div = aty_ld_pll_ct(PLL_REF_DIV, par);
pll_ext_cntl = aty_ld_pll_ct(PLL_EXT_CNTL, par);
pll->ct.xclk_post_div_real = postdividers[pll_ext_cntl & 0x07];
pll->ct.xclk_post_div_real = aty_postdividers[pll_ext_cntl & 0x07];
mclk_fb_div = aty_ld_pll_ct(MCLK_FB_DIV, par);
if (pll_ext_cntl & PLL_MFB_TIMES_4_2B)
mclk_fb_div <<= 1;
@ -535,7 +535,7 @@ static int aty_init_pll_ct(const struct fb_info *info, union aty_pll *pll)
xpost_div += (q < 64*8);
xpost_div += (q < 32*8);
}
pll->ct.xclk_post_div_real = postdividers[xpost_div];
pll->ct.xclk_post_div_real = aty_postdividers[xpost_div];
pll->ct.mclk_fb_div = q * pll->ct.xclk_post_div_real / 8;
#ifdef CONFIG_PPC
@ -584,7 +584,7 @@ static int aty_init_pll_ct(const struct fb_info *info, union aty_pll *pll)
mpost_div += (q < 64*8);
mpost_div += (q < 32*8);
}
sclk_post_div_real = postdividers[mpost_div];
sclk_post_div_real = aty_postdividers[mpost_div];
pll->ct.sclk_fb_div = q * sclk_post_div_real / 8;
pll->ct.spll_cntl2 = mpost_div << 4;
#ifdef DEBUG

@ -270,11 +270,25 @@ static void __d_free(struct rcu_head *head)
kmem_cache_free(dentry_cache, dentry);
}
static void __d_free_external_name(struct rcu_head *head)
{
struct external_name *name = container_of(head, struct external_name,
u.head);
mod_node_page_state(page_pgdat(virt_to_page(name)),
NR_INDIRECTLY_RECLAIMABLE_BYTES,
-ksize(name));
kfree(name);
}
static void __d_free_external(struct rcu_head *head)
{
struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
kfree(external_name(dentry));
kmem_cache_free(dentry_cache, dentry);
__d_free_external_name(&external_name(dentry)->u.head);
kmem_cache_free(dentry_cache, dentry);
}
static inline int dname_external(const struct dentry *dentry)
@ -305,7 +319,7 @@ void release_dentry_name_snapshot(struct name_snapshot *name)
struct external_name *p;
p = container_of(name->name, struct external_name, name[0]);
if (unlikely(atomic_dec_and_test(&p->u.count)))
kfree_rcu(p, u.head);
call_rcu(&p->u.head, __d_free_external_name);
}
}
EXPORT_SYMBOL(release_dentry_name_snapshot);
@ -1605,6 +1619,7 @@ EXPORT_SYMBOL(d_invalidate);
struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
{
struct external_name *ext = NULL;
struct dentry *dentry;
char *dname;
int err;
@ -1625,14 +1640,13 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
dname = dentry->d_iname;
} else if (name->len > DNAME_INLINE_LEN-1) {
size_t size = offsetof(struct external_name, name[1]);
struct external_name *p = kmalloc(size + name->len,
GFP_KERNEL_ACCOUNT);
if (!p) {
ext = kmalloc(size + name->len, GFP_KERNEL_ACCOUNT);
if (!ext) {
kmem_cache_free(dentry_cache, dentry);
return NULL;
}
atomic_set(&p->u.count, 1);
dname = p->name;
atomic_set(&ext->u.count, 1);
dname = ext->name;
if (IS_ENABLED(CONFIG_DCACHE_WORD_ACCESS))
kasan_unpoison_shadow(dname,
round_up(name->len + 1, sizeof(unsigned long)));
@ -1675,6 +1689,12 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
}
}
if (unlikely(ext)) {
pg_data_t *pgdat = page_pgdat(virt_to_page(ext));
mod_node_page_state(pgdat, NR_INDIRECTLY_RECLAIMABLE_BYTES,
ksize(ext));
}
this_cpu_inc(nr_dentry);
return dentry;
@ -2769,7 +2789,7 @@ static void copy_name(struct dentry *dentry, struct dentry *target)
dentry->d_name.hash_len = target->d_name.hash_len;
}
if (old_name && likely(atomic_dec_and_test(&old_name->u.count)))
kfree_rcu(old_name, u.head);
call_rcu(&old_name->u.head, __d_free_external_name);
}
static void dentry_lock_for_move(struct dentry *dentry, struct dentry *target)

@ -353,6 +353,7 @@ struct cgroup {
* specific task are charged to the dom_cgrp.
*/
struct cgroup *dom_cgrp;
struct cgroup *old_dom_cgrp; /* used while enabling threaded */
/*
* list of pidlists, up to two for each namespace (one for procs, one

@ -180,6 +180,7 @@ enum node_stat_item {
NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */
NR_DIRTIED, /* page dirtyings since bootup */
NR_WRITTEN, /* page writings since bootup */
NR_INDIRECTLY_RECLAIMABLE_BYTES, /* measured in bytes */
NR_VM_NODE_STAT_ITEMS
};

@ -2307,6 +2307,13 @@ struct netdev_notifier_info {
struct net_device *dev;
};
struct netdev_notifier_info_ext {
struct netdev_notifier_info info; /* must be first */
union {
u32 mtu;
} ext;
};
struct netdev_notifier_change_info {
struct netdev_notifier_info info; /* must be first */
unsigned int flags_changed;

@ -110,6 +110,7 @@ struct arm_pmu {
void (*stop)(struct arm_pmu *);
void (*reset)(void *);
int (*map_event)(struct perf_event *event);
int (*filter_match)(struct perf_event *event);
int num_events;
u64 max_period;
bool secure_access; /* 32-bit ARM only */

@ -5,6 +5,24 @@
#include <linux/if_vlan.h>
#include <uapi/linux/virtio_net.h>
static inline int virtio_net_hdr_set_proto(struct sk_buff *skb,
const struct virtio_net_hdr *hdr)
{
switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
case VIRTIO_NET_HDR_GSO_TCPV4:
case VIRTIO_NET_HDR_GSO_UDP:
skb->protocol = cpu_to_be16(ETH_P_IP);
break;
case VIRTIO_NET_HDR_GSO_TCPV6:
skb->protocol = cpu_to_be16(ETH_P_IPV6);
break;
default:
return -EINVAL;
}
return 0;
}
static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
const struct virtio_net_hdr *hdr,
bool little_endian)

@ -139,12 +139,6 @@ struct bond_parm_tbl {
int mode;
};
struct netdev_notify_work {
struct delayed_work work;
struct net_device *dev;
struct netdev_bonding_info bonding_info;
};
struct slave {
struct net_device *dev; /* first - useful for panic debug */
struct bonding *bond; /* our master */
@ -172,6 +166,7 @@ struct slave {
#ifdef CONFIG_NET_POLL_CONTROLLER
struct netpoll *np;
#endif
struct delayed_work notify_work;
struct kobject kobj;
struct rtnl_link_stats64 slave_stats;
};

@ -129,12 +129,6 @@ static inline int inet_request_bound_dev_if(const struct sock *sk,
return sk->sk_bound_dev_if;
}
static inline struct ip_options_rcu *ireq_opt_deref(const struct inet_request_sock *ireq)
{
return rcu_dereference_check(ireq->ireq_opt,
refcount_read(&ireq->req.rsk_refcnt) > 0);
}
struct inet_cork {
unsigned int flags;
__be32 addr;

@ -372,6 +372,7 @@ int ip_fib_check_default(__be32 gw, struct net_device *dev);
int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force);
int fib_sync_down_addr(struct net_device *dev, __be32 local);
int fib_sync_up(struct net_device *dev, unsigned int nh_flags);
void fib_sync_mtu(struct net_device *dev, u32 orig_mtu);
#ifdef CONFIG_IP_ROUTE_MULTIPATH
int fib_multipath_hash(const struct fib_info *fi, const struct flowi4 *fl4,

@ -357,6 +357,7 @@ void snd_hdac_bus_init_cmd_io(struct hdac_bus *bus);
void snd_hdac_bus_stop_cmd_io(struct hdac_bus *bus);
void snd_hdac_bus_enter_link_reset(struct hdac_bus *bus);
void snd_hdac_bus_exit_link_reset(struct hdac_bus *bus);
int snd_hdac_bus_reset_link(struct hdac_bus *bus, bool full_reset);
void snd_hdac_bus_update_rirb(struct hdac_bus *bus);
int snd_hdac_bus_handle_stream_irq(struct hdac_bus *bus, unsigned int status,

@ -2780,11 +2780,12 @@ restart:
}
/**
* cgroup_save_control - save control masks of a subtree
* cgroup_save_control - save control masks and dom_cgrp of a subtree
* @cgrp: root of the target subtree
*
* Save ->subtree_control and ->subtree_ss_mask to the respective old_
* prefixed fields for @cgrp's subtree including @cgrp itself.
* Save ->subtree_control, ->subtree_ss_mask and ->dom_cgrp to the
* respective old_ prefixed fields for @cgrp's subtree including @cgrp
* itself.
*/
static void cgroup_save_control(struct cgroup *cgrp)
{
@ -2794,6 +2795,7 @@ static void cgroup_save_control(struct cgroup *cgrp)
cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) {
dsct->old_subtree_control = dsct->subtree_control;
dsct->old_subtree_ss_mask = dsct->subtree_ss_mask;
dsct->old_dom_cgrp = dsct->dom_cgrp;
}
}
@ -2819,11 +2821,12 @@ static void cgroup_propagate_control(struct cgroup *cgrp)
}
/**
* cgroup_restore_control - restore control masks of a subtree
* cgroup_restore_control - restore control masks and dom_cgrp of a subtree
* @cgrp: root of the target subtree
*
* Restore ->subtree_control and ->subtree_ss_mask from the respective old_
* prefixed fields for @cgrp's subtree including @cgrp itself.
* Restore ->subtree_control, ->subtree_ss_mask and ->dom_cgrp from the
* respective old_ prefixed fields for @cgrp's subtree including @cgrp
* itself.
*/
static void cgroup_restore_control(struct cgroup *cgrp)
{
@ -2833,6 +2836,7 @@ static void cgroup_restore_control(struct cgroup *cgrp)
cgroup_for_each_live_descendant_post(dsct, d_css, cgrp) {
dsct->subtree_control = dsct->old_subtree_control;
dsct->subtree_ss_mask = dsct->old_subtree_ss_mask;
dsct->dom_cgrp = dsct->old_dom_cgrp;
}
}
@ -3140,6 +3144,8 @@ static int cgroup_enable_threaded(struct cgroup *cgrp)
{
struct cgroup *parent = cgroup_parent(cgrp);
struct cgroup *dom_cgrp = parent->dom_cgrp;
struct cgroup *dsct;
struct cgroup_subsys_state *d_css;
int ret;
lockdep_assert_held(&cgroup_mutex);
@ -3169,12 +3175,13 @@ static int cgroup_enable_threaded(struct cgroup *cgrp)
*/
cgroup_save_control(cgrp);
cgrp->dom_cgrp = dom_cgrp;
cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp)
if (dsct == cgrp || cgroup_is_threaded(dsct))
dsct->dom_cgrp = dom_cgrp;
ret = cgroup_apply_control(cgrp);
if (!ret)
parent->nr_threaded_children++;
else
cgrp->dom_cgrp = cgrp;
cgroup_finalize_control(cgrp, ret);
return ret;

@ -2843,9 +2843,6 @@ void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
if (!(pvmw->pmd && !pvmw->pte))
return;
mmu_notifier_invalidate_range_start(mm, address,
address + HPAGE_PMD_SIZE);
flush_cache_range(vma, address, address + HPAGE_PMD_SIZE);
pmdval = *pvmw->pmd;
pmdp_invalidate(vma, address, pvmw->pmd);
@ -2858,9 +2855,6 @@ void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
set_pmd_at(mm, address, pvmw->pmd, pmdswp);
page_remove_rmap(page, true);
put_page(page);
mmu_notifier_invalidate_range_end(mm, address,
address + HPAGE_PMD_SIZE);
}
void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)

@ -4569,6 +4569,13 @@ long si_mem_available(void)
min(global_node_page_state(NR_SLAB_RECLAIMABLE) / 2,
wmark_low);
/*
* Part of the kernel memory, which can be released under memory
* pressure.
*/
available += global_node_page_state(NR_INDIRECTLY_RECLAIMABLE_BYTES) >>
PAGE_SHIFT;
if (available < 0)
available = 0;
return available;

@ -1208,6 +1208,7 @@ static void pcpu_free_chunk(struct pcpu_chunk *chunk)
{
if (!chunk)
return;
pcpu_mem_free(chunk->md_blocks);
pcpu_mem_free(chunk->bound_map);
pcpu_mem_free(chunk->alloc_map);
pcpu_mem_free(chunk);

@ -635,6 +635,13 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
*/
free += global_node_page_state(NR_SLAB_RECLAIMABLE);
/*
* Part of the kernel memory, which can be released
* under memory pressure.
*/
free += global_node_page_state(
NR_INDIRECTLY_RECLAIMABLE_BYTES) >> PAGE_SHIFT;
/*
* Leave reserved pages. The pages are not for anonymous pages.
*/

@ -1090,6 +1090,7 @@ const char * const vmstat_text[] = {
"nr_vmscan_immediate_reclaim",
"nr_dirtied",
"nr_written",
"", /* nr_indirectly_reclaimable */
/* enum writeback_stat_item counters */
"nr_dirty_threshold",
@ -1214,7 +1215,6 @@ const char * const vmstat_text[] = {
#ifdef CONFIG_DEBUG_VM_VMACACHE
"vmacache_find_calls",
"vmacache_find_hits",
"vmacache_full_flushes",
#endif
#ifdef CONFIG_SWAP
"swap_ra",
@ -1673,6 +1673,10 @@ static int vmstat_show(struct seq_file *m, void *arg)
unsigned long *l = arg;
unsigned long off = l - (unsigned long *)m->private;
/* Skip hidden vmstat items. */
if (*vmstat_text[off] == '\0')
return 0;
seq_puts(m, vmstat_text[off]);
seq_put_decimal_ull(m, " ", *l);
seq_putc(m, '\n');

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save