Merge android-4.14-p.70 (e601ab6) into msm-4.14

* refs/heads/tmp-e601ab6:
  Linux 4.14.70
  arm64: Handle mismatched cache type
  arm64: Fix mismatched cache line size detection
  ASoC: wm8994: Fix missing break in switch
  arm64: cpu_errata: include required headers
  x86: kvm: avoid unused variable warning
  kvm: x86: Set highest physical address bits in non-present/reserved SPTEs
  Revert "ARM: imx_v6_v7_defconfig: Select ULPI support"
  irda: Only insert new objects into the global database via setsockopt
  irda: Fix memory leak caused by repeated binds of irda socket
  s390/lib: use expoline for all bcr instructions
  kbuild: make missing $DEPMOD a Warning instead of an Error
  drm/i915: Increase LSPCON timeout
  x86/xen: don't write ptes directly in 32-bit PV guests
  x86/pae: use 64 bit atomic xchg function in native_ptep_get_and_clear
  usb: dwc3: core: Fix ULPI PHYs and prevent phy_get/ulpi_init during suspend/resume
  HID: add quirk for another PIXART OEM mouse used by HP
  mm: Fix devm_memremap_pages() collision handling
  lightnvm: pblk: free padded entries in write buffer
  sched/deadline: Fix switching to -deadline
  debugobjects: Make stack check warning more informative
  uapi/linux/keyctl.h: don't use C++ reserved keyword as a struct member name
  drm/amdgpu:add VCN booting with firmware loaded by PSP
  drm/amdgpu:add VCN support in PSP driver
  drm/amdgpu:add new firmware id for VCN
  drm/amdgpu:add tmr mc address into amdgpu_firmware_info
  drm/amdgpu: update tmr mc address
  drm/edid: Add 6 bpc quirk for SDC panel in Lenovo B50-80
  drm/amd/pp/Polaris12: Fix a chunk of registers missed to program
  drm/amdgpu: Fix RLC safe mode test in gfx_v9_0_enter_rlc_safe_mode
  drm/i915/lpe: Mark LPE audio runtime pm as "no callbacks"
  ARM: rockchip: Force CONFIG_PM on Rockchip systems
  arm64: rockchip: Force CONFIG_PM on Rockchip systems
  btrfs: Don't remove block group that still has pinned down bytes
  btrfs: relocation: Only remove reloc rb_trees if reloc control has been initialized
  btrfs: replace: Reset on-disk dev stats value after replace
  btrfs: Exit gracefully when chunk map cannot be inserted to the tree
  kvm: nVMX: Fix fault vector for VMX operation at CPL > 0
  KVM: vmx: track host_state.loaded using a loaded_vmcs pointer
  clk: rockchip: Add pclk_rkpwm_pmu to PMU critical clocks in rk3399
  powerpc/pseries: Avoid using the size greater than RTAS_ERROR_LOG_MAX.
  powerpc/64s: Make rfi_flush_fallback a little more robust
  powerpc/platforms/85xx: fix t1042rdb_diu.c build errors & warning
  SMB3: Number of requests sent should be displayed for SMB3 not just CIFS
  smb3: fix reset of bytes read and written stats
  cfq: Suppress compiler warnings about comparisons
  RDS: IB: fix 'passing zero to ERR_PTR()' warning
  selftests/powerpc: Kill child processes on SIGINT
  iommu/omap: Fix cache flushes on L2 table entries
  ASoC: rt5677: Fix initialization of rt5677_of_match.data
  staging: comedi: ni_mio_common: fix subdevice flags for PFI subdevice
  dm kcopyd: avoid softlockup in run_complete_job
  PCI: mvebu: Fix I/O space end address calculation
  xen/balloon: fix balloon initialization for PVH Dom0
  Input: do not use WARN() in input_alloc_absinfo()
  NFSv4: Fix error handling in nfs4_sp4_select_mode()
  scsi: aic94xx: fix an error code in aic94xx_init()
  ACPI / scan: Initialize status to ACPI_STA_DEFAULT
  s390/dasd: fix panic for failed online processing
  s390/dasd: fix hanging offline processing due to canceled worker
  block: bvec_nr_vecs() returns value for wrong slab
  perf probe powerpc: Fix trace event post-processing
  powerpc: Fix size calculation using resource_size()
  powerpc/uaccess: Enable get_user(u64, *p) on 32-bit
  f2fs: fix to clear PG_checked flag in set_page_dirty()
  net/9p: fix error path of p9_virtio_probe
  net/9p/trans_fd.c: fix race by holding the lock
  irqchip/bcm7038-l1: Hide cpu offline callback when building for !SMP
  perf tools: Check for null when copying nsinfo.
  net: hns3: Fix for phy link issue when using marvell phy driver
  net: hns3: Fix for command format parsing error in hclge_is_all_function_id_zero
  RDMA/hns: Fix usage of bitmap allocation functions return values
  tcp, ulp: add alias for all ulp modules
  netfilter: fix memory leaks on netlink_dump_start error
  platform/x86: asus-nb-wmi: Add keymap entry for lid flip action on UX360
  mfd: sm501: Set coherent_dma_mask when creating subdevices
  ipvs: fix race between ip_vs_conn_new() and ip_vs_del_dest()
  s390/kdump: Fix memleak in nt_vmcoreinfo
  netfilter: ip6t_rpfilter: set F_IFACE for linklocal addresses
  platform/x86: intel_punit_ipc: fix build errors
  fs/dcache.c: fix kmemcheck splat at take_dentry_name_snapshot()
  mm/fadvise.c: fix signed overflow UBSAN complaint
  pwm: meson: Fix mux clock names
  IB/hfi1: Invalid NUMA node information can cause a divide by zero
  x86/mce: Add notifier_block forward declaration
  virtio: pci-legacy: Validate queue pfn
  scripts: modpost: check memory allocation results
  fat: validate ->i_start before using
  fs/proc/kcore.c: use __pa_symbol() for KCORE_TEXT list entries
  hfsplus: fix NULL dereference in hfsplus_lookup()
  reiserfs: change j_timestamp type to time64_t
  fork: don't copy inconsistent signal handler state to child
  sunrpc: Don't use stack buffer with scatterlist
  hfs: prevent crash on exit from failed search
  hfsplus: don't return 0 when fill_super() failed
  cifs: check if SMB2 PDU size has been padded and suppress the warning
  net: sched: action_ife: take reference to meta module
  act_ife: fix a potential deadlock
  act_ife: move tcfa_lock down to where necessary
  hv_netvsc: Fix a deadlock by getting rtnl lock earlier in netvsc_probe()
  hv_netvsc: ignore devices that are not PCI
  vhost: correctly check the iova range when waking virtqueue
  mlxsw: spectrum_switchdev: Do not leak RIFs when removing bridge
  sctp: hold transport before accessing its asoc in sctp_transport_get_next
  nfp: wait for posted reconfigs when disabling the device
  tipc: fix a missing rhashtable_walk_exit()
  net/sched: act_pedit: fix dump of extended layered op
  vti6: remove !skb->ignore_df check from vti6_xmit()
  tcp: do not restart timewait timer on rst reception
  r8169: add support for NCube 8168 network card
  qlge: Fix netdev features configuration.
  net: sched: Fix memory exposure from short TCA_U32_SEL
  net: macb: do not disable MDIO bus at open/close time
  net: bcmgenet: use MAC link status for fixed phy
  ipv4: tcp: send zero IPID for RST and ACK sent in SYN-RECV and TIME-WAIT state
  act_ife: fix a potential use-after-free

Conflicts:
	arch/arm64/include/asm/cpucaps.h
	arch/arm64/kernel/cpu_errata.c
	drivers/usb/dwc3/core.c

Change-Id: If27731291adb25e780b5eb7f202762f6ea065cd8
Signed-off-by: Blagovest Kolenichev <bkolenichev@codeaurora.org>
tirimbino
Blagovest Kolenichev 6 years ago
commit 9cf1fbab82
  1. 2
      Makefile
  2. 2
      arch/arm/configs/imx_v6_v7_defconfig
  3. 1
      arch/arm/mach-rockchip/Kconfig
  4. 1
      arch/arm64/Kconfig.platforms
  5. 5
      arch/arm64/include/asm/cache.h
  6. 3
      arch/arm64/include/asm/cpucaps.h
  7. 25
      arch/arm64/kernel/cpu_errata.c
  8. 4
      arch/arm64/kernel/cpufeature.c
  9. 13
      arch/powerpc/include/asm/uaccess.h
  10. 6
      arch/powerpc/kernel/exceptions-64s.S
  11. 4
      arch/powerpc/platforms/85xx/t1042rdb_diu.c
  12. 2
      arch/powerpc/platforms/pseries/ras.c
  13. 2
      arch/powerpc/sysdev/mpic_msgr.c
  14. 17
      arch/s390/kernel/crash_dump.c
  15. 12
      arch/s390/lib/mem.S
  16. 1
      arch/x86/include/asm/mce.h
  17. 7
      arch/x86/include/asm/pgtable-3level.h
  18. 43
      arch/x86/kvm/mmu.c
  19. 26
      arch/x86/kvm/vmx.c
  20. 12
      arch/x86/kvm/x86.c
  21. 7
      arch/x86/xen/mmu_pv.c
  22. 2
      block/bio.c
  23. 22
      block/cfq-iosched.c
  24. 5
      drivers/acpi/scan.c
  25. 1
      drivers/clk/rockchip/clk-rk3399.c
  26. 5
      drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
  27. 4
      drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
  28. 17
      drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
  29. 2
      drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
  30. 3
      drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
  31. 40
      drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
  32. 43
      drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
  33. 3
      drivers/gpu/drm/drm_edid.c
  34. 4
      drivers/gpu/drm/i915/intel_lpe_audio.c
  35. 2
      drivers/gpu/drm/i915/intel_lspcon.c
  36. 1
      drivers/hid/hid-ids.h
  37. 1
      drivers/hid/usbhid/hid-quirks.c
  38. 24
      drivers/infiniband/hw/hfi1/affinity.c
  39. 2
      drivers/infiniband/hw/hns/hns_roce_pd.c
  40. 5
      drivers/infiniband/hw/hns/hns_roce_qp.c
  41. 16
      drivers/input/input.c
  42. 4
      drivers/iommu/omap-iommu.c
  43. 4
      drivers/irqchip/irq-bcm7038-l1.c
  44. 1
      drivers/lightnvm/pblk-core.c
  45. 7
      drivers/lightnvm/pblk-write.c
  46. 2
      drivers/md/dm-kcopyd.c
  47. 1
      drivers/mfd/sm501.c
  48. 3
      drivers/net/ethernet/broadcom/genet/bcmgenet.h
  49. 10
      drivers/net/ethernet/broadcom/genet/bcmmii.c
  50. 9
      drivers/net/ethernet/cadence/macb_main.c
  51. 2
      drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
  52. 2
      drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
  53. 2
      drivers/net/ethernet/mellanox/mlxsw/spectrum.h
  54. 11
      drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
  55. 20
      drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
  56. 48
      drivers/net/ethernet/netronome/nfp/nfp_net_common.c
  57. 23
      drivers/net/ethernet/qlogic/qlge/qlge_main.c
  58. 1
      drivers/net/ethernet/realtek/r8169.c
  59. 16
      drivers/net/hyperv/netvsc_drv.c
  60. 2
      drivers/pci/host/pci-mvebu.c
  61. 1
      drivers/platform/x86/asus-nb-wmi.c
  62. 1
      drivers/platform/x86/intel_punit_ipc.c
  63. 3
      drivers/pwm/pwm-meson.c
  64. 10
      drivers/s390/block/dasd_eckd.c
  65. 4
      drivers/scsi/aic94xx/aic94xx_init.c
  66. 3
      drivers/staging/comedi/drivers/ni_mio_common.c
  67. 13
      drivers/staging/irda/net/af_irda.c
  68. 46
      drivers/usb/dwc3/core.c
  69. 5
      drivers/usb/dwc3/core.h
  70. 2
      drivers/vhost/vhost.c
  71. 14
      drivers/virtio/virtio_pci_legacy.c
  72. 2
      drivers/xen/xen-balloon.c
  73. 6
      fs/btrfs/dev-replace.c
  74. 2
      fs/btrfs/extent-tree.c
  75. 23
      fs/btrfs/relocation.c
  76. 8
      fs/btrfs/volumes.c
  77. 8
      fs/cifs/cifs_debug.c
  78. 7
      fs/cifs/smb2misc.c
  79. 2
      fs/cifs/smb2pdu.c
  80. 3
      fs/dcache.c
  81. 4
      fs/f2fs/data.c
  82. 19
      fs/fat/cache.c
  83. 5
      fs/fat/fat.h
  84. 6
      fs/fat/fatent.c
  85. 7
      fs/hfs/brec.c
  86. 4
      fs/hfsplus/dir.c
  87. 4
      fs/hfsplus/super.c
  88. 2
      fs/nfs/nfs4proc.c
  89. 4
      fs/proc/kcore.c
  90. 2
      fs/reiserfs/reiserfs.h
  91. 2
      include/linux/pci_ids.h
  92. 4
      include/net/tcp.h
  93. 2
      include/uapi/linux/keyctl.h
  94. 2
      kernel/fork.c
  95. 11
      kernel/memremap.c
  96. 11
      kernel/sched/deadline.c
  97. 7
      lib/debugobjects.c
  98. 8
      mm/fadvise.c
  99. 10
      net/9p/trans_fd.c
  100. 3
      net/9p/trans_virtio.c
  101. Some files were not shown because too many files have changed in this diff Show More

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 14
SUBLEVEL = 69
SUBLEVEL = 70
EXTRAVERSION =
NAME = Petit Gorille

@ -289,7 +289,6 @@ CONFIG_USB_STORAGE=y
CONFIG_USB_CHIPIDEA=y
CONFIG_USB_CHIPIDEA_UDC=y
CONFIG_USB_CHIPIDEA_HOST=y
CONFIG_USB_CHIPIDEA_ULPI=y
CONFIG_USB_SERIAL=m
CONFIG_USB_SERIAL_GENERIC=y
CONFIG_USB_SERIAL_FTDI_SIO=m
@ -326,7 +325,6 @@ CONFIG_USB_GADGETFS=m
CONFIG_USB_FUNCTIONFS=m
CONFIG_USB_MASS_STORAGE=m
CONFIG_USB_G_SERIAL=m
CONFIG_USB_ULPI_BUS=y
CONFIG_MMC=y
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y

@ -18,6 +18,7 @@ config ARCH_ROCKCHIP
select ARM_GLOBAL_TIMER
select CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
select ZONE_DMA if ARM_LPAE
select PM
help
Support for Rockchip's Cortex-A9 Single-to-Quad-Core-SoCs
containing the RK2928, RK30xx and RK31xx series.

@ -197,6 +197,7 @@ config ARCH_ROCKCHIP
select GPIOLIB
select PINCTRL
select PINCTRL_ROCKCHIP
select PM
select ROCKCHIP_TIMER
help
This enables support for the ARMv8 based Rockchip chipsets,

@ -20,9 +20,14 @@
#define CTR_L1IP_SHIFT 14
#define CTR_L1IP_MASK 3
#define CTR_DMINLINE_SHIFT 16
#define CTR_IMINLINE_SHIFT 0
#define CTR_CWG_SHIFT 24
#define CTR_CWG_MASK 15
#define CTR_CACHE_MINLINE_MASK \
(0xf << CTR_DMINLINE_SHIFT | 0xf << CTR_IMINLINE_SHIFT)
#define CTR_L1IP(ctr) (((ctr) >> CTR_L1IP_SHIFT) & CTR_L1IP_MASK)
#define ICACHE_POLICY_VPIPT 0

@ -45,7 +45,8 @@
#define ARM64_HARDEN_BP_POST_GUEST_EXIT 25
#define ARM64_HW_DBM 26
#define ARM64_SSBD 27
#define ARM64_MISMATCHED_CACHE_TYPE 28
#define ARM64_NCAPS 28
#define ARM64_NCAPS 29
#endif /* __ASM_CPUCAPS_H */

@ -17,8 +17,8 @@
*/
#include <linux/arm-smccc.h>
#include <linux/types.h>
#include <linux/psci.h>
#include <linux/types.h>
#include <asm/cpu.h>
#include <asm/cputype.h>
#include <asm/cpufeature.h>
@ -47,12 +47,18 @@ is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope)
}
static bool
has_mismatched_cache_line_size(const struct arm64_cpu_capabilities *entry,
int scope)
has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
int scope)
{
u64 mask = CTR_CACHE_MINLINE_MASK;
/* Skip matching the min line sizes for cache type check */
if (entry->capability == ARM64_MISMATCHED_CACHE_TYPE)
mask ^= arm64_ftr_reg_ctrel0.strict_mask;
WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
return (read_cpuid_cachetype() & arm64_ftr_reg_ctrel0.strict_mask) !=
(arm64_ftr_reg_ctrel0.sys_val & arm64_ftr_reg_ctrel0.strict_mask);
return (read_cpuid_cachetype() & mask) !=
(arm64_ftr_reg_ctrel0.sys_val & mask);
}
static int cpu_enable_trap_ctr_access(void *__unused)
@ -475,7 +481,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
{
.desc = "Mismatched cache line size",
.capability = ARM64_MISMATCHED_CACHE_LINE_SIZE,
.matches = has_mismatched_cache_line_size,
.matches = has_mismatched_cache_type,
.def_scope = SCOPE_LOCAL_CPU,
.enable = cpu_enable_trap_ctr_access,
},
{
.desc = "Mismatched cache type",
.capability = ARM64_MISMATCHED_CACHE_TYPE,
.matches = has_mismatched_cache_type,
.def_scope = SCOPE_LOCAL_CPU,
.enable = cpu_enable_trap_ctr_access,
},

@ -180,14 +180,14 @@ static const struct arm64_ftr_bits ftr_ctr[] = {
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 28, 1, 1), /* IDC */
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0), /* CWG */
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, 20, 4, 0), /* ERG */
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 1), /* DminLine */
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DMINLINE_SHIFT, 4, 1),
/*
* Linux can handle differing I-cache policies. Userspace JITs will
* make use of *minLine.
* If we have differing I-cache policies, report it as the weakest - VIPT.
*/
ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_EXACT, 14, 2, ICACHE_POLICY_VIPT), /* L1Ip */
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* IminLine */
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_IMINLINE_SHIFT, 4, 0),
ARM64_FTR_END,
};

@ -223,10 +223,17 @@ do { \
} \
} while (0)
/*
* This is a type: either unsigned long, if the argument fits into
* that type, or otherwise unsigned long long.
*/
#define __long_type(x) \
__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
#define __get_user_nocheck(x, ptr, size) \
({ \
long __gu_err; \
unsigned long __gu_val; \
__long_type(*(ptr)) __gu_val; \
const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
__chk_user_ptr(ptr); \
if (!is_kernel_addr((unsigned long)__gu_addr)) \
@ -239,7 +246,7 @@ do { \
#define __get_user_check(x, ptr, size) \
({ \
long __gu_err = -EFAULT; \
unsigned long __gu_val = 0; \
__long_type(*(ptr)) __gu_val = 0; \
const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
might_fault(); \
if (access_ok(VERIFY_READ, __gu_addr, (size))) \
@ -251,7 +258,7 @@ do { \
#define __get_user_nosleep(x, ptr, size) \
({ \
long __gu_err; \
unsigned long __gu_val; \
__long_type(*(ptr)) __gu_val; \
const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
__chk_user_ptr(ptr); \
__get_user_size(__gu_val, __gu_addr, (size), __gu_err); \

@ -1452,6 +1452,8 @@ TRAMP_REAL_BEGIN(stf_barrier_fallback)
TRAMP_REAL_BEGIN(rfi_flush_fallback)
SET_SCRATCH0(r13);
GET_PACA(r13);
std r1,PACA_EXRFI+EX_R12(r13)
ld r1,PACAKSAVE(r13)
std r9,PACA_EXRFI+EX_R9(r13)
std r10,PACA_EXRFI+EX_R10(r13)
std r11,PACA_EXRFI+EX_R11(r13)
@ -1486,12 +1488,15 @@ TRAMP_REAL_BEGIN(rfi_flush_fallback)
ld r9,PACA_EXRFI+EX_R9(r13)
ld r10,PACA_EXRFI+EX_R10(r13)
ld r11,PACA_EXRFI+EX_R11(r13)
ld r1,PACA_EXRFI+EX_R12(r13)
GET_SCRATCH0(r13);
rfid
TRAMP_REAL_BEGIN(hrfi_flush_fallback)
SET_SCRATCH0(r13);
GET_PACA(r13);
std r1,PACA_EXRFI+EX_R12(r13)
ld r1,PACAKSAVE(r13)
std r9,PACA_EXRFI+EX_R9(r13)
std r10,PACA_EXRFI+EX_R10(r13)
std r11,PACA_EXRFI+EX_R11(r13)
@ -1526,6 +1531,7 @@ TRAMP_REAL_BEGIN(hrfi_flush_fallback)
ld r9,PACA_EXRFI+EX_R9(r13)
ld r10,PACA_EXRFI+EX_R10(r13)
ld r11,PACA_EXRFI+EX_R11(r13)
ld r1,PACA_EXRFI+EX_R12(r13)
GET_SCRATCH0(r13);
hrfid

@ -9,8 +9,10 @@
* option) any later version.
*/
#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
@ -150,3 +152,5 @@ static int __init t1042rdb_diu_init(void)
}
early_initcall(t1042rdb_diu_init);
MODULE_LICENSE("GPL");

@ -371,7 +371,7 @@ static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs)
int len, error_log_length;
error_log_length = 8 + rtas_error_extended_log_length(h);
len = max_t(int, error_log_length, RTAS_ERROR_LOG_MAX);
len = min_t(int, error_log_length, RTAS_ERROR_LOG_MAX);
memset(global_mce_data_buf, 0, RTAS_ERROR_LOG_MAX);
memcpy(global_mce_data_buf, h, len);
errhdr = (struct rtas_error_log *)global_mce_data_buf;

@ -196,7 +196,7 @@ static int mpic_msgr_probe(struct platform_device *dev)
/* IO map the message register block. */
of_address_to_resource(np, 0, &rsrc);
msgr_block_addr = ioremap(rsrc.start, rsrc.end - rsrc.start);
msgr_block_addr = ioremap(rsrc.start, resource_size(&rsrc));
if (!msgr_block_addr) {
dev_err(&dev->dev, "Failed to iomap MPIC message registers");
return -EFAULT;

@ -404,11 +404,13 @@ static void *get_vmcoreinfo_old(unsigned long *size)
if (copy_oldmem_kernel(nt_name, addr + sizeof(note),
sizeof(nt_name) - 1))
return NULL;
if (strcmp(nt_name, "VMCOREINFO") != 0)
if (strcmp(nt_name, VMCOREINFO_NOTE_NAME) != 0)
return NULL;
vmcoreinfo = kzalloc_panic(note.n_descsz);
if (copy_oldmem_kernel(vmcoreinfo, addr + 24, note.n_descsz))
if (copy_oldmem_kernel(vmcoreinfo, addr + 24, note.n_descsz)) {
kfree(vmcoreinfo);
return NULL;
}
*size = note.n_descsz;
return vmcoreinfo;
}
@ -418,15 +420,20 @@ static void *get_vmcoreinfo_old(unsigned long *size)
*/
static void *nt_vmcoreinfo(void *ptr)
{
const char *name = VMCOREINFO_NOTE_NAME;
unsigned long size;
void *vmcoreinfo;
vmcoreinfo = os_info_old_entry(OS_INFO_VMCOREINFO, &size);
if (!vmcoreinfo)
vmcoreinfo = get_vmcoreinfo_old(&size);
if (vmcoreinfo)
return nt_init_name(ptr, 0, vmcoreinfo, size, name);
vmcoreinfo = get_vmcoreinfo_old(&size);
if (!vmcoreinfo)
return ptr;
return nt_init_name(ptr, 0, vmcoreinfo, size, "VMCOREINFO");
ptr = nt_init_name(ptr, 0, vmcoreinfo, size, name);
kfree(vmcoreinfo);
return ptr;
}
/*

@ -17,7 +17,7 @@
ENTRY(memmove)
ltgr %r4,%r4
lgr %r1,%r2
bzr %r14
jz .Lmemmove_exit
aghi %r4,-1
clgr %r2,%r3
jnh .Lmemmove_forward
@ -36,6 +36,7 @@ ENTRY(memmove)
.Lmemmove_forward_remainder:
larl %r5,.Lmemmove_mvc
ex %r4,0(%r5)
.Lmemmove_exit:
BR_EX %r14
.Lmemmove_reverse:
ic %r0,0(%r4,%r3)
@ -65,7 +66,7 @@ EXPORT_SYMBOL(memmove)
*/
ENTRY(memset)
ltgr %r4,%r4
bzr %r14
jz .Lmemset_exit
ltgr %r3,%r3
jnz .Lmemset_fill
aghi %r4,-1
@ -80,12 +81,13 @@ ENTRY(memset)
.Lmemset_clear_remainder:
larl %r3,.Lmemset_xc
ex %r4,0(%r3)
.Lmemset_exit:
BR_EX %r14
.Lmemset_fill:
stc %r3,0(%r2)
cghi %r4,1
lgr %r1,%r2
ber %r14
je .Lmemset_fill_exit
aghi %r4,-2
srlg %r3,%r4,8
ltgr %r3,%r3
@ -97,6 +99,7 @@ ENTRY(memset)
.Lmemset_fill_remainder:
larl %r3,.Lmemset_mvc
ex %r4,0(%r3)
.Lmemset_fill_exit:
BR_EX %r14
.Lmemset_xc:
xc 0(1,%r1),0(%r1)
@ -111,7 +114,7 @@ EXPORT_SYMBOL(memset)
*/
ENTRY(memcpy)
ltgr %r4,%r4
bzr %r14
jz .Lmemcpy_exit
aghi %r4,-1
srlg %r5,%r4,8
ltgr %r5,%r5
@ -120,6 +123,7 @@ ENTRY(memcpy)
.Lmemcpy_remainder:
larl %r5,.Lmemcpy_mvc
ex %r4,0(%r5)
.Lmemcpy_exit:
BR_EX %r14
.Lmemcpy_loop:
mvc 0(256,%r1),0(%r3)

@ -200,6 +200,7 @@ enum mce_notifier_prios {
MCE_PRIO_LOWEST = 0,
};
struct notifier_block;
extern void mce_register_decode_chain(struct notifier_block *nb);
extern void mce_unregister_decode_chain(struct notifier_block *nb);

@ -2,6 +2,8 @@
#ifndef _ASM_X86_PGTABLE_3LEVEL_H
#define _ASM_X86_PGTABLE_3LEVEL_H
#include <asm/atomic64_32.h>
/*
* Intel Physical Address Extension (PAE) Mode - three-level page
* tables on PPro+ CPUs.
@ -147,10 +149,7 @@ static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
{
pte_t res;
/* xchg acts as a barrier before the setting of the high bits */
res.pte_low = xchg(&ptep->pte_low, 0);
res.pte_high = ptep->pte_high;
ptep->pte_high = 0;
res.pte = (pteval_t)atomic64_xchg((atomic64_t *)ptep, 0);
return res;
}

@ -220,6 +220,17 @@ static const u64 shadow_acc_track_saved_bits_mask = PT64_EPT_READABLE_MASK |
PT64_EPT_EXECUTABLE_MASK;
static const u64 shadow_acc_track_saved_bits_shift = PT64_SECOND_AVAIL_BITS_SHIFT;
/*
* This mask must be set on all non-zero Non-Present or Reserved SPTEs in order
* to guard against L1TF attacks.
*/
static u64 __read_mostly shadow_nonpresent_or_rsvd_mask;
/*
* The number of high-order 1 bits to use in the mask above.
*/
static const u64 shadow_nonpresent_or_rsvd_mask_len = 5;
static void mmu_spte_set(u64 *sptep, u64 spte);
static void mmu_free_roots(struct kvm_vcpu *vcpu);
@ -308,9 +319,13 @@ static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
{
unsigned int gen = kvm_current_mmio_generation(vcpu);
u64 mask = generation_mmio_spte_mask(gen);
u64 gpa = gfn << PAGE_SHIFT;
access &= ACC_WRITE_MASK | ACC_USER_MASK;
mask |= shadow_mmio_value | access | gfn << PAGE_SHIFT;
mask |= shadow_mmio_value | access;
mask |= gpa | shadow_nonpresent_or_rsvd_mask;
mask |= (gpa & shadow_nonpresent_or_rsvd_mask)
<< shadow_nonpresent_or_rsvd_mask_len;
trace_mark_mmio_spte(sptep, gfn, access, gen);
mmu_spte_set(sptep, mask);
@ -323,8 +338,14 @@ static bool is_mmio_spte(u64 spte)
static gfn_t get_mmio_spte_gfn(u64 spte)
{
u64 mask = generation_mmio_spte_mask(MMIO_GEN_MASK) | shadow_mmio_mask;
return (spte & ~mask) >> PAGE_SHIFT;
u64 mask = generation_mmio_spte_mask(MMIO_GEN_MASK) | shadow_mmio_mask |
shadow_nonpresent_or_rsvd_mask;
u64 gpa = spte & ~mask;
gpa |= (spte >> shadow_nonpresent_or_rsvd_mask_len)
& shadow_nonpresent_or_rsvd_mask;
return gpa >> PAGE_SHIFT;
}
static unsigned get_mmio_spte_access(u64 spte)
@ -381,7 +402,7 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
}
EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
void kvm_mmu_clear_all_pte_masks(void)
static void kvm_mmu_reset_all_pte_masks(void)
{
shadow_user_mask = 0;
shadow_accessed_mask = 0;
@ -391,6 +412,18 @@ void kvm_mmu_clear_all_pte_masks(void)
shadow_mmio_mask = 0;
shadow_present_mask = 0;
shadow_acc_track_mask = 0;
/*
* If the CPU has 46 or less physical address bits, then set an
* appropriate mask to guard against L1TF attacks. Otherwise, it is
* assumed that the CPU is not vulnerable to L1TF.
*/
if (boot_cpu_data.x86_phys_bits <
52 - shadow_nonpresent_or_rsvd_mask_len)
shadow_nonpresent_or_rsvd_mask =
rsvd_bits(boot_cpu_data.x86_phys_bits -
shadow_nonpresent_or_rsvd_mask_len,
boot_cpu_data.x86_phys_bits - 1);
}
static int is_cpuid_PSE36(void)
@ -5473,7 +5506,7 @@ static void mmu_destroy_caches(void)
int kvm_mmu_module_init(void)
{
kvm_mmu_clear_all_pte_masks();
kvm_mmu_reset_all_pte_masks();
pte_list_desc_cache = kmem_cache_create("pte_list_desc",
sizeof(struct pte_list_desc),

@ -749,17 +749,21 @@ struct vcpu_vmx {
/*
* loaded_vmcs points to the VMCS currently used in this vcpu. For a
* non-nested (L1) guest, it always points to vmcs01. For a nested
* guest (L2), it points to a different VMCS.
* guest (L2), it points to a different VMCS. loaded_cpu_state points
* to the VMCS whose state is loaded into the CPU registers that only
* need to be switched when transitioning to/from the kernel; a NULL
* value indicates that host state is loaded.
*/
struct loaded_vmcs vmcs01;
struct loaded_vmcs *loaded_vmcs;
struct loaded_vmcs *loaded_cpu_state;
bool __launched; /* temporary, used in vmx_vcpu_run */
struct msr_autoload {
struct vmx_msrs guest;
struct vmx_msrs host;
} msr_autoload;
struct {
int loaded;
u16 fs_sel, gs_sel, ldt_sel;
#ifdef CONFIG_X86_64
u16 ds_sel, es_sel;
@ -2336,10 +2340,11 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
struct vcpu_vmx *vmx = to_vmx(vcpu);
int i;
if (vmx->host_state.loaded)
if (vmx->loaded_cpu_state)
return;
vmx->host_state.loaded = 1;
vmx->loaded_cpu_state = vmx->loaded_vmcs;
/*
* Set host fs and gs selectors. Unfortunately, 22.2.3 does not
* allow segment selectors with cpl > 0 or ti == 1.
@ -2390,11 +2395,14 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
static void __vmx_load_host_state(struct vcpu_vmx *vmx)
{
if (!vmx->host_state.loaded)
if (!vmx->loaded_cpu_state)
return;
WARN_ON_ONCE(vmx->loaded_cpu_state != vmx->loaded_vmcs);
++vmx->vcpu.stat.host_state_reload;
vmx->host_state.loaded = 0;
vmx->loaded_cpu_state = NULL;
#ifdef CONFIG_X86_64
if (is_long_mode(&vmx->vcpu))
rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
@ -7582,7 +7590,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
/* CPL=0 must be checked manually. */
if (vmx_get_cpl(vcpu)) {
kvm_queue_exception(vcpu, UD_VECTOR);
kvm_inject_gp(vcpu, 0);
return 1;
}
@ -7646,7 +7654,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
static int nested_vmx_check_permission(struct kvm_vcpu *vcpu)
{
if (vmx_get_cpl(vcpu)) {
kvm_queue_exception(vcpu, UD_VECTOR);
kvm_inject_gp(vcpu, 0);
return 0;
}
@ -9944,8 +9952,8 @@ static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
return;
cpu = get_cpu();
vmx->loaded_vmcs = vmcs;
vmx_vcpu_put(vcpu);
vmx->loaded_vmcs = vmcs;
vmx_vcpu_load(vcpu, cpu);
vcpu->cpu = cpu;
put_cpu();

@ -6194,20 +6194,22 @@ static void kvm_set_mmio_spte_mask(void)
* Set the reserved bits and the present bit of an paging-structure
* entry to generate page fault with PFER.RSV = 1.
*/
/* Mask the reserved physical address bits. */
mask = rsvd_bits(maxphyaddr, 51);
/*
* Mask the uppermost physical address bit, which would be reserved as
* long as the supported physical address width is less than 52.
*/
mask = 1ull << 51;
/* Set the present bit. */
mask |= 1ull;
#ifdef CONFIG_X86_64
/*
* If reserved bit is not supported, clear the present bit to disable
* mmio page fault.
*/
if (maxphyaddr == 52)
if (IS_ENABLED(CONFIG_X86_64) && maxphyaddr == 52)
mask &= ~1ull;
#endif
kvm_mmu_set_mmio_spte_mask(mask, mask);
}

@ -425,14 +425,13 @@ static void xen_set_pud(pud_t *ptr, pud_t val)
static void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
{
trace_xen_mmu_set_pte_atomic(ptep, pte);
set_64bit((u64 *)ptep, native_pte_val(pte));
__xen_set_pte(ptep, pte);
}
static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
trace_xen_mmu_pte_clear(mm, addr, ptep);
if (!xen_batched_set_pte(ptep, native_make_pte(0)))
native_pte_clear(mm, addr, ptep);
__xen_set_pte(ptep, native_make_pte(0));
}
static void xen_pmd_clear(pmd_t *pmdp)
@ -1543,7 +1542,7 @@ static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
pte_val_ma(pte));
#endif
native_set_pte(ptep, pte);
__xen_set_pte(ptep, pte);
}
/* Early in boot, while setting up the initial pagetable, assume

@ -156,7 +156,7 @@ out:
unsigned int bvec_nr_vecs(unsigned short idx)
{
return bvec_slabs[idx].nr_vecs;
return bvec_slabs[--idx].nr_vecs;
}
void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned int idx)

@ -4741,12 +4741,13 @@ USEC_SHOW_FUNCTION(cfq_target_latency_us_show, cfqd->cfq_target_latency);
static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
{ \
struct cfq_data *cfqd = e->elevator_data; \
unsigned int __data; \
unsigned int __data, __min = (MIN), __max = (MAX); \
\
cfq_var_store(&__data, (page)); \
if (__data < (MIN)) \
__data = (MIN); \
else if (__data > (MAX)) \
__data = (MAX); \
if (__data < __min) \
__data = __min; \
else if (__data > __max) \
__data = __max; \
if (__CONV) \
*(__PTR) = (u64)__data * NSEC_PER_MSEC; \
else \
@ -4775,12 +4776,13 @@ STORE_FUNCTION(cfq_target_latency_store, &cfqd->cfq_target_latency, 1, UINT_MAX,
static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
{ \
struct cfq_data *cfqd = e->elevator_data; \
unsigned int __data; \
unsigned int __data, __min = (MIN), __max = (MAX); \
\
cfq_var_store(&__data, (page)); \
if (__data < (MIN)) \
__data = (MIN); \
else if (__data > (MAX)) \
__data = (MAX); \
if (__data < __min) \
__data = __min; \
else if (__data > __max) \
__data = __max; \
*(__PTR) = (u64)__data * NSEC_PER_USEC; \
return count; \
}

@ -1599,7 +1599,8 @@ static int acpi_add_single_object(struct acpi_device **child,
* Note this must be done before the get power-/wakeup_dev-flags calls.
*/
if (type == ACPI_BUS_TYPE_DEVICE)
acpi_bus_get_status(device);
if (acpi_bus_get_status(device) < 0)
acpi_set_device_status(device, 0);
acpi_bus_get_power_flags(device);
acpi_bus_get_wakeup_device_flags(device);
@ -1677,7 +1678,7 @@ static int acpi_bus_type_and_status(acpi_handle handle, int *type,
* acpi_add_single_object updates this once we've an acpi_device
* so that acpi_bus_get_status' quirk handling can be used.
*/
*sta = 0;
*sta = ACPI_STA_DEFAULT;
break;
case ACPI_TYPE_PROCESSOR:
*type = ACPI_BUS_TYPE_PROCESSOR;

@ -1522,6 +1522,7 @@ static const char *const rk3399_pmucru_critical_clocks[] __initconst = {
"pclk_pmu_src",
"fclk_cm0s_src_pmu",
"clk_timer_src_pmu",
"pclk_rkpwm_pmu",
};
static void __init rk3399_clk_init(struct device_node *np)

@ -134,6 +134,11 @@ psp_cmd_submit_buf(struct psp_context *psp,
msleep(1);
}
if (ucode) {
ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo;
ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi;
}
return ret;
}

@ -172,6 +172,7 @@ enum AMDGPU_UCODE_ID {
AMDGPU_UCODE_ID_SMC,
AMDGPU_UCODE_ID_UVD,
AMDGPU_UCODE_ID_VCE,
AMDGPU_UCODE_ID_VCN,
AMDGPU_UCODE_ID_MAXIMUM,
};
@ -204,6 +205,9 @@ struct amdgpu_firmware_info {
void *kaddr;
/* ucode_size_bytes */
uint32_t ucode_size;
/* starting tmr mc address */
uint32_t tmr_mc_addr_lo;
uint32_t tmr_mc_addr_hi;
};
void amdgpu_ucode_print_mc_hdr(const struct common_firmware_header *hdr);

@ -93,9 +93,10 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
version_major, version_minor, family_id);
bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
+ AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_HEAP_SIZE
bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_HEAP_SIZE
+ AMDGPU_VCN_SESSION_SIZE * 40;
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.vcpu_bo,
&adev->vcn.gpu_addr, &adev->vcn.cpu_addr);
@ -191,11 +192,13 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev)
unsigned offset;
hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
memcpy_toio(adev->vcn.cpu_addr, adev->vcn.fw->data + offset,
le32_to_cpu(hdr->ucode_size_bytes));
size -= le32_to_cpu(hdr->ucode_size_bytes);
ptr += le32_to_cpu(hdr->ucode_size_bytes);
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
memcpy_toio(adev->vcn.cpu_addr, adev->vcn.fw->data + offset,
le32_to_cpu(hdr->ucode_size_bytes));
size -= le32_to_cpu(hdr->ucode_size_bytes);
ptr += le32_to_cpu(hdr->ucode_size_bytes);
}
memset_io(ptr, 0, size);
}

@ -3113,7 +3113,7 @@ static void gfx_v9_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
/* wait for RLC_SAFE_MODE */
for (i = 0; i < adev->usec_timeout; i++) {
if (!REG_GET_FIELD(SOC15_REG_OFFSET(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
break;
udelay(1);
}

@ -78,6 +78,9 @@ psp_v10_0_get_fw_type(struct amdgpu_firmware_info *ucode, enum psp_gfx_fw_type *
case AMDGPU_UCODE_ID_VCE:
*type = GFX_FW_TYPE_VCE;
break;
case AMDGPU_UCODE_ID_VCN:
*type = GFX_FW_TYPE_VCN;
break;
case AMDGPU_UCODE_ID_MAXIMUM:
default:
return -EINVAL;

@ -91,6 +91,16 @@ static int vcn_v1_0_sw_init(void *handle)
if (r)
return r;
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
const struct common_firmware_header *hdr;
hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN;
adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw;
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
DRM_INFO("PSP loading VCN firmware\n");
}
r = amdgpu_vcn_resume(adev);
if (r)
return r;
@ -248,26 +258,38 @@ static int vcn_v1_0_resume(void *handle)
static void vcn_v1_0_mc_resume(struct amdgpu_device *adev)
{
uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
uint32_t offset;
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo));
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi));
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0);
offset = 0;
} else {
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
lower_32_bits(adev->vcn.gpu_addr));
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
upper_32_bits(adev->vcn.gpu_addr));
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
offset = size;
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
}
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size);
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
lower_32_bits(adev->vcn.gpu_addr + size));
lower_32_bits(adev->vcn.gpu_addr + offset));
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
upper_32_bits(adev->vcn.gpu_addr + size));
upper_32_bits(adev->vcn.gpu_addr + offset));
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0);
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_HEAP_SIZE);
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
lower_32_bits(adev->vcn.gpu_addr + size + AMDGPU_VCN_HEAP_SIZE));
lower_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_HEAP_SIZE));
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
upper_32_bits(adev->vcn.gpu_addr + size + AMDGPU_VCN_HEAP_SIZE));
upper_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_HEAP_SIZE));
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0);
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2,
AMDGPU_VCN_STACK_SIZE + (AMDGPU_VCN_SESSION_SIZE * 40));

@ -403,6 +403,49 @@ static const struct gpu_pt_config_reg DIDTConfig_Polaris12[] = {
{ ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__UNUSED_0_MASK, DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3853, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_0_MASK, DIDT_SQ_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x005a, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_1_MASK, DIDT_SQ_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_2_MASK, DIDT_SQ_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x0ebb, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__UNUSED_0_MASK, DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3853, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3153, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK, DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK, DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__PHASE_OFFSET_MASK, DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__UNUSED_0_MASK, DIDT_SQ_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT0_MASK, DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT, 0x000a, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT1_MASK, DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT2_MASK, DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT, 0x0017, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT3_MASK, DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT, 0x002f, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT4_MASK, DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT, 0x0046, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT5_MASK, DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT, 0x005d, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT6_MASK, DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT7_MASK, DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MIN_POWER_MASK, DIDT_TD_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MAX_POWER_MASK, DIDT_TD_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__UNUSED_0_MASK, DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0x00ff, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, GPU_CONFIGREG_DIDT_IND },

@ -122,6 +122,9 @@ static const struct edid_quirk {
/* CPT panel of Asus UX303LA reports 8 bpc, but is a 6 bpc panel */
{ "CPT", 0x17df, EDID_QUIRK_FORCE_6BPC },
/* SDC panel of Lenovo B50-80 reports 8 bpc, but is a 6 bpc panel */
{ "SDC", 0x3652, EDID_QUIRK_FORCE_6BPC },
/* Belinea 10 15 55 */
{ "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 },
{ "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 },

@ -128,9 +128,7 @@ lpe_audio_platdev_create(struct drm_i915_private *dev_priv)
kfree(rsc);
pm_runtime_forbid(&platdev->dev);
pm_runtime_set_active(&platdev->dev);
pm_runtime_enable(&platdev->dev);
pm_runtime_no_callbacks(&platdev->dev);
return platdev;

@ -75,7 +75,7 @@ static enum drm_lspcon_mode lspcon_wait_mode(struct intel_lspcon *lspcon,
lspcon_mode_name(mode));
wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode ||
current_mode == DRM_LSPCON_MODE_INVALID, 100);
current_mode == DRM_LSPCON_MODE_INVALID, 400);
if (current_mode != mode)
DRM_DEBUG_KMS("LSPCON mode hasn't settled\n");

@ -528,6 +528,7 @@
#define USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A 0x0a4a
#define USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A 0x0b4a
#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE 0x134a
#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A 0x094a
#define USB_VENDOR_ID_HUION 0x256c
#define USB_DEVICE_ID_HUION_TABLET 0x006e

@ -99,6 +99,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_IDEACOM, USB_DEVICE_ID_IDEACOM_IDC6680, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C007, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077, HID_QUIRK_ALWAYS_POLL },

@ -146,7 +146,7 @@ int node_affinity_init(void)
while ((dev = pci_get_device(ids->vendor, ids->device, dev))) {
node = pcibus_to_node(dev->bus);
if (node < 0)
node = numa_node_id();
goto out;
hfi1_per_node_cntr[node]++;
}
@ -154,6 +154,18 @@ int node_affinity_init(void)
}
return 0;
out:
/*
* Invalid PCI NUMA node information found, note it, and populate
* our database 1:1.
*/
pr_err("HFI: Invalid PCI NUMA node. Performance may be affected\n");
pr_err("HFI: System BIOS may need to be upgraded\n");
for (node = 0; node < node_affinity.num_possible_nodes; node++)
hfi1_per_node_cntr[node] = 1;
return 0;
}
void node_affinity_destroy(void)
@ -227,8 +239,14 @@ int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
const struct cpumask *local_mask;
int curr_cpu, possible, i;
if (node < 0)
node = numa_node_id();
/*
* If the BIOS does not have the NUMA node information set, select
* NUMA 0 so we get consistent performance.
*/
if (node < 0) {
dd_dev_err(dd, "Invalid PCI NUMA node. Performance may be affected\n");
node = 0;
}
dd->node = node;
local_mask = cpumask_of_node(dd->node);

@ -35,7 +35,7 @@
static int hns_roce_pd_alloc(struct hns_roce_dev *hr_dev, unsigned long *pdn)
{
return hns_roce_bitmap_alloc(&hr_dev->pd_bitmap, pdn);
return hns_roce_bitmap_alloc(&hr_dev->pd_bitmap, pdn) ? -ENOMEM : 0;
}
static void hns_roce_pd_free(struct hns_roce_dev *hr_dev, unsigned long pdn)

@ -114,7 +114,10 @@ static int hns_roce_reserve_range_qp(struct hns_roce_dev *hr_dev, int cnt,
{
struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
return hns_roce_bitmap_alloc_range(&qp_table->bitmap, cnt, align, base);
return hns_roce_bitmap_alloc_range(&qp_table->bitmap, cnt, align,
base) ?
-ENOMEM :
0;
}
enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state)

@ -480,11 +480,19 @@ EXPORT_SYMBOL(input_inject_event);
*/
void input_alloc_absinfo(struct input_dev *dev)
{
if (!dev->absinfo)
dev->absinfo = kcalloc(ABS_CNT, sizeof(*dev->absinfo),
GFP_KERNEL);
if (dev->absinfo)
return;
WARN(!dev->absinfo, "%s(): kcalloc() failed?\n", __func__);
dev->absinfo = kcalloc(ABS_CNT, sizeof(*dev->absinfo), GFP_KERNEL);
if (!dev->absinfo) {
dev_err(dev->dev.parent ?: &dev->dev,
"%s: unable to allocate memory\n", __func__);
/*
* We will handle this allocation failure in
* input_register_device() when we refuse to register input
* device with ABS bits but without absinfo.
*/
}
}
EXPORT_SYMBOL(input_alloc_absinfo);

@ -529,7 +529,7 @@ static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd,
pte_ready:
iopte = iopte_offset(iopgd, da);
*pt_dma = virt_to_phys(iopte);
*pt_dma = iopgd_page_paddr(iopgd);
dev_vdbg(obj->dev,
"%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n",
__func__, da, iopgd, *iopgd, iopte, *iopte);
@ -717,7 +717,7 @@ static size_t iopgtable_clear_entry_core(struct omap_iommu *obj, u32 da)
}
bytes *= nent;
memset(iopte, 0, nent * sizeof(*iopte));
pt_dma = virt_to_phys(iopte);
pt_dma = iopgd_page_paddr(iopgd);
flush_iopte_range(obj->dev, pt_dma, pt_offset, nent);
/*

@ -217,6 +217,7 @@ static int bcm7038_l1_set_affinity(struct irq_data *d,
return 0;
}
#ifdef CONFIG_SMP
static void bcm7038_l1_cpu_offline(struct irq_data *d)
{
struct cpumask *mask = irq_data_get_affinity_mask(d);
@ -241,6 +242,7 @@ static void bcm7038_l1_cpu_offline(struct irq_data *d)
}
irq_set_affinity_locked(d, &new_affinity, false);
}
#endif
static int __init bcm7038_l1_init_one(struct device_node *dn,
unsigned int idx,
@ -293,7 +295,9 @@ static struct irq_chip bcm7038_l1_irq_chip = {
.irq_mask = bcm7038_l1_mask,
.irq_unmask = bcm7038_l1_unmask,
.irq_set_affinity = bcm7038_l1_set_affinity,
#ifdef CONFIG_SMP
.irq_cpu_offline = bcm7038_l1_cpu_offline,
#endif
};
static int bcm7038_l1_map(struct irq_domain *d, unsigned int virq,

@ -190,7 +190,6 @@ void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
WARN_ON(off + nr_pages != bio->bi_vcnt);
bio_advance(bio, off * PBLK_EXPOSED_PAGE_SIZE);
for (i = off; i < nr_pages + off; i++) {
bv = bio->bi_io_vec[i];
mempool_free(bv.bv_page, pblk->page_bio_pool);

@ -33,6 +33,10 @@ static unsigned long pblk_end_w_bio(struct pblk *pblk, struct nvm_rq *rqd,
bio_endio(original_bio);
}
if (c_ctx->nr_padded)
pblk_bio_free_pages(pblk, rqd->bio, c_ctx->nr_valid,
c_ctx->nr_padded);
#ifdef CONFIG_NVM_DEBUG
atomic_long_add(c_ctx->nr_valid, &pblk->sync_writes);
#endif
@ -521,7 +525,8 @@ static void pblk_free_write_rqd(struct pblk *pblk, struct nvm_rq *rqd)
struct bio *bio = rqd->bio;
if (c_ctx->nr_padded)
pblk_bio_free_pages(pblk, bio, rqd->nr_ppas, c_ctx->nr_padded);
pblk_bio_free_pages(pblk, bio, c_ctx->nr_valid,
c_ctx->nr_padded);
}
static int pblk_submit_write(struct pblk *pblk)

@ -484,6 +484,8 @@ static int run_complete_job(struct kcopyd_job *job)
if (atomic_dec_and_test(&kc->nr_jobs))
wake_up(&kc->destroyq);
cond_resched();
return 0;
}

@ -714,6 +714,7 @@ sm501_create_subdev(struct sm501_devdata *sm, char *name,
smdev->pdev.name = name;
smdev->pdev.id = sm->pdev_id;
smdev->pdev.dev.parent = sm->dev;
smdev->pdev.dev.coherent_dma_mask = 0xffffffff;
if (res_count) {
smdev->pdev.resource = (struct resource *)(smdev+1);

@ -185,6 +185,9 @@ struct bcmgenet_mib_counters {
#define UMAC_MAC1 0x010
#define UMAC_MAX_FRAME_LEN 0x014
#define UMAC_MODE 0x44
#define MODE_LINK_STATUS (1 << 5)
#define UMAC_EEE_CTRL 0x064
#define EN_LPI_RX_PAUSE (1 << 0)
#define EN_LPI_TX_PFC (1 << 1)

@ -115,8 +115,14 @@ void bcmgenet_mii_setup(struct net_device *dev)
static int bcmgenet_fixed_phy_link_update(struct net_device *dev,
struct fixed_phy_status *status)
{
if (dev && dev->phydev && status)
status->link = dev->phydev->link;
struct bcmgenet_priv *priv;
u32 reg;
if (dev && dev->phydev && status) {
priv = netdev_priv(dev);
reg = bcmgenet_umac_readl(priv, UMAC_MODE);
status->link = !!(reg & MODE_LINK_STATUS);
}
return 0;
}

@ -1884,14 +1884,17 @@ static void macb_reset_hw(struct macb *bp)
{
struct macb_queue *queue;
unsigned int q;
u32 ctrl = macb_readl(bp, NCR);
/* Disable RX and TX (XXX: Should we halt the transmission
* more gracefully?)
*/
macb_writel(bp, NCR, 0);
ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE));
/* Clear the stats registers (XXX: Update stats first?) */
macb_writel(bp, NCR, MACB_BIT(CLRSTAT));
ctrl |= MACB_BIT(CLRSTAT);
macb_writel(bp, NCR, ctrl);
/* Clear all status flags */
macb_writel(bp, TSR, -1);
@ -2070,7 +2073,7 @@ static void macb_init_hw(struct macb *bp)
}
/* Enable TX and RX */
macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE));
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE));
}
/* The hash address register is 64 bits long and takes up two

@ -3105,7 +3105,7 @@ static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
#define HCLGE_FUNC_NUMBER_PER_DESC 6
int i, j;
for (i = 0; i < HCLGE_DESC_NUMBER; i++)
for (i = 1; i < HCLGE_DESC_NUMBER; i++)
for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
if (desc[i].data[j])
return false;

@ -187,6 +187,8 @@ int hclge_mac_start_phy(struct hclge_dev *hdev)
if (!phydev)
return 0;
phydev->supported &= ~SUPPORTED_FIBRE;
ret = phy_connect_direct(netdev, phydev,
hclge_mac_adjust_link,
PHY_INTERFACE_MODE_SGMII);

@ -395,6 +395,8 @@ int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
void
mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif);
void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
struct net_device *dev);
/* spectrum_kvdl.c */
int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count,

@ -5131,6 +5131,17 @@ void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
mlxsw_sp_vr_put(vr);
}
void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
struct net_device *dev)
{
struct mlxsw_sp_rif *rif;
rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
if (!rif)
return;
mlxsw_sp_rif_destroy(rif);
}
static void
mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)

@ -140,6 +140,24 @@ bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
}
static int mlxsw_sp_bridge_device_upper_rif_destroy(struct net_device *dev,
void *data)
{
struct mlxsw_sp *mlxsw_sp = data;
mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
return 0;
}
static void mlxsw_sp_bridge_device_rifs_destroy(struct mlxsw_sp *mlxsw_sp,
struct net_device *dev)
{
mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
netdev_walk_all_upper_dev_rcu(dev,
mlxsw_sp_bridge_device_upper_rif_destroy,
mlxsw_sp);
}
static struct mlxsw_sp_bridge_device *
mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
struct net_device *br_dev)
@ -176,6 +194,8 @@ static void
mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge,
struct mlxsw_sp_bridge_device *bridge_device)
{
mlxsw_sp_bridge_device_rifs_destroy(bridge->mlxsw_sp,
bridge_device->dev);
list_del(&bridge_device->list);
if (bridge_device->vlan_enabled)
bridge->vlan_enabled_exists = false;

@ -227,29 +227,16 @@ done:
spin_unlock_bh(&nn->reconfig_lock);
}
/**
* nfp_net_reconfig() - Reconfigure the firmware
* @nn: NFP Net device to reconfigure
* @update: The value for the update field in the BAR config
*
* Write the update word to the BAR and ping the reconfig queue. The
* poll until the firmware has acknowledged the update by zeroing the
* update word.
*
* Return: Negative errno on error, 0 on success
*/
int nfp_net_reconfig(struct nfp_net *nn, u32 update)
static void nfp_net_reconfig_sync_enter(struct nfp_net *nn)
{
bool cancelled_timer = false;
u32 pre_posted_requests;
int ret;
spin_lock_bh(&nn->reconfig_lock);
nn->reconfig_sync_present = true;
if (nn->reconfig_timer_active) {
del_timer(&nn->reconfig_timer);
nn->reconfig_timer_active = false;
cancelled_timer = true;
}
@ -258,14 +245,43 @@ int nfp_net_reconfig(struct nfp_net *nn, u32 update)
spin_unlock_bh(&nn->reconfig_lock);
if (cancelled_timer)
if (cancelled_timer) {
del_timer_sync(&nn->reconfig_timer);
nfp_net_reconfig_wait(nn, nn->reconfig_timer.expires);
}
/* Run the posted reconfigs which were issued before we started */
if (pre_posted_requests) {
nfp_net_reconfig_start(nn, pre_posted_requests);
nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
}
}
static void nfp_net_reconfig_wait_posted(struct nfp_net *nn)
{
nfp_net_reconfig_sync_enter(nn);
spin_lock_bh(&nn->reconfig_lock);
nn->reconfig_sync_present = false;
spin_unlock_bh(&nn->reconfig_lock);
}
/**
* nfp_net_reconfig() - Reconfigure the firmware
* @nn: NFP Net device to reconfigure
* @update: The value for the update field in the BAR config
*
* Write the update word to the BAR and ping the reconfig queue. The
* poll until the firmware has acknowledged the update by zeroing the
* update word.
*
* Return: Negative errno on error, 0 on success
*/
int nfp_net_reconfig(struct nfp_net *nn, u32 update)
{
int ret;
nfp_net_reconfig_sync_enter(nn);
nfp_net_reconfig_start(nn, update);
ret = nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
@ -3560,6 +3576,7 @@ struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev,
*/
void nfp_net_free(struct nfp_net *nn)
{
WARN_ON(timer_pending(&nn->reconfig_timer) || nn->reconfig_posted);
if (nn->xdp_prog)
bpf_prog_put(nn->xdp_prog);
@ -3829,4 +3846,5 @@ void nfp_net_clean(struct nfp_net *nn)
return;
unregister_netdev(nn->dp.netdev);
nfp_net_reconfig_wait_posted(nn);
}

@ -2386,26 +2386,20 @@ static int qlge_update_hw_vlan_features(struct net_device *ndev,
return status;
}
static netdev_features_t qlge_fix_features(struct net_device *ndev,
netdev_features_t features)
{
int err;
/* Update the behavior of vlan accel in the adapter */
err = qlge_update_hw_vlan_features(ndev, features);
if (err)
return err;
return features;
}
static int qlge_set_features(struct net_device *ndev,
netdev_features_t features)
{
netdev_features_t changed = ndev->features ^ features;
int err;
if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
/* Update the behavior of vlan accel in the adapter */
err = qlge_update_hw_vlan_features(ndev, features);
if (err)
return err;
if (changed & NETIF_F_HW_VLAN_CTAG_RX)
qlge_vlan_mode(ndev, features);
}
return 0;
}
@ -4719,7 +4713,6 @@ static const struct net_device_ops qlge_netdev_ops = {
.ndo_set_mac_address = qlge_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_tx_timeout = qlge_tx_timeout,
.ndo_fix_features = qlge_fix_features,
.ndo_set_features = qlge_set_features,
.ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,

@ -329,6 +329,7 @@ static const struct pci_device_id rtl8169_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8161), 0, 0, RTL_CFG_1 },
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 },
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 },
{ PCI_DEVICE(PCI_VENDOR_ID_NCUBE, 0x8168), 0, 0, RTL_CFG_1 },
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 },
{ PCI_VENDOR_ID_DLINK, 0x4300,
PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0, RTL_CFG_1 },

@ -29,6 +29,7 @@
#include <linux/netdevice.h>
#include <linux/inetdevice.h>
#include <linux/etherdevice.h>
#include <linux/pci.h>
#include <linux/skbuff.h>
#include <linux/if_vlan.h>
#include <linux/in.h>
@ -1895,11 +1896,15 @@ static int netvsc_register_vf(struct net_device *vf_netdev)
{
struct net_device *ndev;
struct net_device_context *net_device_ctx;
struct device *pdev = vf_netdev->dev.parent;
struct netvsc_device *netvsc_dev;
if (vf_netdev->addr_len != ETH_ALEN)
return NOTIFY_DONE;
if (!pdev || !dev_is_pci(pdev) || dev_is_pf(pdev))
return NOTIFY_DONE;
/*
* We will use the MAC address to locate the synthetic interface to
* associate with the VF interface. If we don't find a matching
@ -2039,6 +2044,16 @@ static int netvsc_probe(struct hv_device *dev,
memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
/* We must get rtnl lock before scheduling nvdev->subchan_work,
* otherwise netvsc_subchan_work() can get rtnl lock first and wait
* all subchannels to show up, but that may not happen because
* netvsc_probe() can't get rtnl lock and as a result vmbus_onoffer()
* -> ... -> device_add() -> ... -> __device_attach() can't get
* the device lock, so all the subchannels can't be processed --
* finally netvsc_subchan_work() hangs for ever.
*/
rtnl_lock();
if (nvdev->num_chn > 1)
schedule_work(&nvdev->subchan_work);
@ -2057,7 +2072,6 @@ static int netvsc_probe(struct hv_device *dev,
else
net->max_mtu = ETH_DATA_LEN;
rtnl_lock();
ret = register_netdevice(net);
if (ret != 0) {
pr_err("Unable to register netdev.\n");

@ -1220,7 +1220,7 @@ static int mvebu_pcie_probe(struct platform_device *pdev)
pcie->realio.start = PCIBIOS_MIN_IO;
pcie->realio.end = min_t(resource_size_t,
IO_SPACE_LIMIT,
resource_size(&pcie->io));
resource_size(&pcie->io) - 1);
} else
pcie->realio = pcie->io;

@ -487,6 +487,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
{ KE_KEY, 0xC4, { KEY_KBDILLUMUP } },
{ KE_KEY, 0xC5, { KEY_KBDILLUMDOWN } },
{ KE_IGNORE, 0xC6, }, /* Ambient Light Sensor notification */
{ KE_KEY, 0xFA, { KEY_PROG2 } }, /* Lid flip action */
{ KE_END, 0},
};

@ -17,6 +17,7 @@
#include <linux/bitops.h>
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <asm/intel_punit_ipc.h>

@ -432,7 +432,6 @@ static int meson_pwm_init_channels(struct meson_pwm *meson,
struct meson_pwm_channel *channels)
{
struct device *dev = meson->chip.dev;
struct device_node *np = dev->of_node;
struct clk_init_data init;
unsigned int i;
char name[255];
@ -441,7 +440,7 @@ static int meson_pwm_init_channels(struct meson_pwm *meson,
for (i = 0; i < meson->chip.npwm; i++) {
struct meson_pwm_channel *channel = &channels[i];
snprintf(name, sizeof(name), "%pOF#mux%u", np, i);
snprintf(name, sizeof(name), "%s#mux%u", dev_name(dev), i);
init.name = name;
init.ops = &clk_mux_ops;

@ -1778,6 +1778,9 @@ static void dasd_eckd_uncheck_device(struct dasd_device *device)
struct dasd_eckd_private *private = device->private;
int i;
if (!private)
return;
dasd_alias_disconnect_device_from_lcu(device);
private->ned = NULL;
private->sneq = NULL;
@ -2032,8 +2035,11 @@ static int dasd_eckd_basic_to_ready(struct dasd_device *device)
static int dasd_eckd_online_to_ready(struct dasd_device *device)
{
cancel_work_sync(&device->reload_device);
cancel_work_sync(&device->kick_validate);
if (cancel_work_sync(&device->reload_device))
dasd_put_device(device);
if (cancel_work_sync(&device->kick_validate))
dasd_put_device(device);
return 0;
};

@ -1030,8 +1030,10 @@ static int __init aic94xx_init(void)
aic94xx_transport_template =
sas_domain_attach_transport(&aic94xx_transport_functions);
if (!aic94xx_transport_template)
if (!aic94xx_transport_template) {
err = -ENOMEM;
goto out_destroy_caches;
}
err = pci_register_driver(&aic94xx_pci_driver);
if (err)

@ -5453,11 +5453,11 @@ static int ni_E_init(struct comedi_device *dev,
/* Digital I/O (PFI) subdevice */
s = &dev->subdevices[NI_PFI_DIO_SUBDEV];
s->type = COMEDI_SUBD_DIO;
s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL;
s->maxdata = 1;
if (devpriv->is_m_series) {
s->n_chan = 16;
s->insn_bits = ni_pfi_insn_bits;
s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL;
ni_writew(dev, s->state, NI_M_PFI_DO_REG);
for (i = 0; i < NUM_PFI_OUTPUT_SELECT_REGS; ++i) {
@ -5466,6 +5466,7 @@ static int ni_E_init(struct comedi_device *dev,
}
} else {
s->n_chan = 10;
s->subdev_flags = SDF_INTERNAL;
}
s->insn_config = ni_pfi_insn_config;

@ -775,6 +775,13 @@ static int irda_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
return -EINVAL;
lock_sock(sk);
/* Ensure that the socket is not already bound */
if (self->ias_obj) {
err = -EINVAL;
goto out;
}
#ifdef CONFIG_IRDA_ULTRA
/* Special care for Ultra sockets */
if ((sk->sk_type == SOCK_DGRAM) &&
@ -2012,7 +2019,11 @@ static int irda_setsockopt(struct socket *sock, int level, int optname,
err = -EINVAL;
goto out;
}
irias_insert_object(ias_obj);
/* Only insert newly allocated objects */
if (free_ias)
irias_insert_object(ias_obj);
kfree(ias_opt);
break;
case IRLMP_IAS_DEL:

@ -518,6 +518,22 @@ static void dwc3_cache_hwparams(struct dwc3 *dwc)
parms->hwparams8 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS8);
}
static int dwc3_core_ulpi_init(struct dwc3 *dwc)
{
int intf;
int ret = 0;
intf = DWC3_GHWPARAMS3_HSPHY_IFC(dwc->hwparams.hwparams3);
if (intf == DWC3_GHWPARAMS3_HSPHY_IFC_ULPI ||
(intf == DWC3_GHWPARAMS3_HSPHY_IFC_UTMI_ULPI &&
dwc->hsphy_interface &&
!strncmp(dwc->hsphy_interface, "ulpi", 4)))
ret = dwc3_ulpi_init(dwc);
return ret;
}
/**
* dwc3_phy_setup - Configure USB PHY Interface of DWC3 Core
* @dwc: Pointer to our controller context structure
@ -529,7 +545,6 @@ static void dwc3_cache_hwparams(struct dwc3 *dwc)
static int dwc3_phy_setup(struct dwc3 *dwc)
{
u32 reg;
int ret;
reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
@ -604,9 +619,6 @@ static int dwc3_phy_setup(struct dwc3 *dwc)
}
/* FALLTHROUGH */
case DWC3_GHWPARAMS3_HSPHY_IFC_ULPI:
ret = dwc3_ulpi_init(dwc);
if (ret)
return ret;
/* FALLTHROUGH */
default:
break;
@ -767,6 +779,7 @@ static void dwc3_core_setup_global_control(struct dwc3 *dwc)
}
static int dwc3_core_get_phy(struct dwc3 *dwc);
static int dwc3_core_ulpi_init(struct dwc3 *dwc);
/**
* dwc3_core_init - Low-level initialization of DWC3 Core
@ -806,17 +819,27 @@ int dwc3_core_init(struct dwc3 *dwc)
dwc->maximum_speed = USB_SPEED_HIGH;
}
ret = dwc3_core_get_phy(dwc);
ret = dwc3_phy_setup(dwc);
if (ret)
goto err0;
ret = dwc3_core_soft_reset(dwc);
if (ret)
goto err0;
if (!dwc->ulpi_ready) {
ret = dwc3_core_ulpi_init(dwc);
if (ret)
goto err0;
dwc->ulpi_ready = true;
}
ret = dwc3_phy_setup(dwc);
if (!dwc->phys_ready) {
ret = dwc3_core_get_phy(dwc);
if (ret)
goto err0a;
dwc->phys_ready = true;
}
ret = dwc3_core_soft_reset(dwc);
if (ret)
goto err0;
goto err0a;
dwc3_core_setup_global_control(dwc);
dwc3_core_num_eps(dwc);
@ -925,6 +948,9 @@ err1:
phy_exit(dwc->usb2_generic_phy);
phy_exit(dwc->usb3_generic_phy);
err0a:
dwc3_ulpi_exit(dwc);
err0:
return ret;
}

@ -900,7 +900,9 @@ struct dwc3_scratchpad_array {
* @usb3_phy: pointer to USB3 PHY
* @usb2_generic_phy: pointer to USB2 PHY
* @usb3_generic_phy: pointer to USB3 PHY
* @phys_ready: flag to indicate that PHYs are ready
* @ulpi: pointer to ulpi interface
* @ulpi_ready: flag to indicate that ULPI is initialized
* @isoch_delay: wValue from Set Isochronous Delay request;
* @u2sel: parameter from Set SEL request.
* @u2pel: parameter from Set SEL request.
@ -1019,7 +1021,10 @@ struct dwc3 {
struct phy *usb2_generic_phy;
struct phy *usb3_generic_phy;
bool phys_ready;
struct ulpi *ulpi;
bool ulpi_ready;
void __iomem *regs;
size_t regs_size;

@ -960,7 +960,7 @@ static void vhost_iotlb_notify_vq(struct vhost_dev *d,
list_for_each_entry_safe(node, n, &d->pending_list, node) {
struct vhost_iotlb_msg *vq_msg = &node->msg.iotlb;
if (msg->iova <= vq_msg->iova &&
msg->iova + msg->size - 1 > vq_msg->iova &&
msg->iova + msg->size - 1 >= vq_msg->iova &&
vq_msg->type == VHOST_IOTLB_MISS) {
vhost_poll_queue(&node->vq->poll);
list_del(&node->node);

@ -122,6 +122,7 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
struct virtqueue *vq;
u16 num;
int err;
u64 q_pfn;
/* Select the queue we're interested in */
iowrite16(index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
@ -141,9 +142,17 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
if (!vq)
return ERR_PTR(-ENOMEM);
q_pfn = virtqueue_get_desc_addr(vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
if (q_pfn >> 32) {
dev_err(&vp_dev->pci_dev->dev,
"platform bug: legacy virtio-mmio must not be used with RAM above 0x%llxGB\n",
0x1ULL << (32 + PAGE_SHIFT - 30));
err = -E2BIG;
goto out_del_vq;
}
/* activate the queue */
iowrite32(virtqueue_get_desc_addr(vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT,
vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
iowrite32(q_pfn, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
vq->priv = (void __force *)vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY;
@ -160,6 +169,7 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
out_deactivate:
iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
out_del_vq:
vring_del_virtqueue(vq);
return ERR_PTR(err);
}

@ -81,7 +81,7 @@ static void watch_target(struct xenbus_watch *watch,
static_max = new_target;
else
static_max >>= PAGE_SHIFT - 10;
target_diff = xen_pv_domain() ? 0
target_diff = (xen_pv_domain() || xen_initial_domain()) ? 0
: static_max - balloon_stats.target_pages;
}

@ -588,6 +588,12 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
btrfs_rm_dev_replace_unblocked(fs_info);
/*
* Increment dev_stats_ccnt so that btrfs_run_dev_stats() will
* update on-disk dev stats value during commit transaction
*/
atomic_inc(&tgt_device->dev_stats_ccnt);
/*
* this is again a consistent state where no dev_replace procedure
* is running, the target device is part of the filesystem, the

@ -10757,7 +10757,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
/* Don't want to race with allocators so take the groups_sem */
down_write(&space_info->groups_sem);
spin_lock(&block_group->lock);
if (block_group->reserved ||
if (block_group->reserved || block_group->pinned ||
btrfs_block_group_used(&block_group->item) ||
block_group->ro ||
list_is_singular(&block_group->list)) {

@ -1334,18 +1334,19 @@ static void __del_reloc_root(struct btrfs_root *root)
struct mapping_node *node = NULL;
struct reloc_control *rc = fs_info->reloc_ctl;
spin_lock(&rc->reloc_root_tree.lock);
rb_node = tree_search(&rc->reloc_root_tree.rb_root,
root->node->start);
if (rb_node) {
node = rb_entry(rb_node, struct mapping_node, rb_node);
rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
if (rc) {
spin_lock(&rc->reloc_root_tree.lock);
rb_node = tree_search(&rc->reloc_root_tree.rb_root,
root->node->start);
if (rb_node) {
node = rb_entry(rb_node, struct mapping_node, rb_node);
rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
}
spin_unlock(&rc->reloc_root_tree.lock);
if (!node)
return;
BUG_ON((struct btrfs_root *)node->data != root);
}
spin_unlock(&rc->reloc_root_tree.lock);
if (!node)
return;
BUG_ON((struct btrfs_root *)node->data != root);
spin_lock(&fs_info->trans_lock);
list_del_init(&root->root_list);

@ -6492,10 +6492,14 @@ static int read_one_chunk(struct btrfs_fs_info *fs_info, struct btrfs_key *key,
write_lock(&map_tree->map_tree.lock);
ret = add_extent_mapping(&map_tree->map_tree, em, 0);
write_unlock(&map_tree->map_tree.lock);
BUG_ON(ret); /* Tree corruption */
if (ret < 0) {
btrfs_err(fs_info,
"failed to add chunk map, start=%llu len=%llu: %d",
em->start, em->len, ret);
}
free_extent_map(em);
return 0;
return ret;
}
static void fill_device_from_item(struct extent_buffer *leaf,

@ -289,6 +289,10 @@ static ssize_t cifs_stats_proc_write(struct file *file,
atomic_set(&totBufAllocCount, 0);
atomic_set(&totSmBufAllocCount, 0);
#endif /* CONFIG_CIFS_STATS2 */
spin_lock(&GlobalMid_Lock);
GlobalMaxActiveXid = 0;
GlobalCurrentXid = 0;
spin_unlock(&GlobalMid_Lock);
spin_lock(&cifs_tcp_ses_lock);
list_for_each(tmp1, &cifs_tcp_ses_list) {
server = list_entry(tmp1, struct TCP_Server_Info,
@ -301,6 +305,10 @@ static ssize_t cifs_stats_proc_write(struct file *file,
struct cifs_tcon,
tcon_list);
atomic_set(&tcon->num_smbs_sent, 0);
spin_lock(&tcon->stat_lock);
tcon->bytes_read = 0;
tcon->bytes_written = 0;
spin_unlock(&tcon->stat_lock);
if (server->ops->clear_stats)
server->ops->clear_stats(tcon);
}

@ -211,6 +211,13 @@ smb2_check_message(char *buf, unsigned int length, struct TCP_Server_Info *srvr)
if (clc_len == 4 + len + 1)
return 0;
/*
* Some windows servers (win2016) will pad also the final
* PDU in a compound to 8 bytes.
*/
if (((clc_len + 7) & ~7) == len)
return 0;
/*
* MacOS server pads after SMB2.1 write response with 3 bytes
* of junk. Other servers match RFC1001 len to actual

@ -393,7 +393,7 @@ small_smb2_init(__le16 smb2_command, struct cifs_tcon *tcon,
pdu->hdr.smb2_buf_length = cpu_to_be32(total_len);
if (tcon != NULL) {
#ifdef CONFIG_CIFS_STATS2
#ifdef CONFIG_CIFS_STATS
uint16_t com_code = le16_to_cpu(smb2_command);
cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_sent[com_code]);
#endif

@ -291,7 +291,8 @@ void take_dentry_name_snapshot(struct name_snapshot *name, struct dentry *dentry
spin_unlock(&dentry->d_lock);
name->name = p->name;
} else {
memcpy(name->inline_name, dentry->d_iname, DNAME_INLINE_LEN);
memcpy(name->inline_name, dentry->d_iname,
dentry->d_name.len + 1);
spin_unlock(&dentry->d_lock);
name->name = name->inline_name;
}

@ -2530,6 +2530,10 @@ static int f2fs_set_data_page_dirty(struct page *page)
if (!PageUptodate(page))
SetPageUptodate(page);
/* don't remain PG_checked flag which was set during GC */
if (is_cold_data(page))
clear_cold_data(page);
if (f2fs_is_atomic_file(inode) && !f2fs_is_commit_atomic_write(inode)) {
if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
register_inmem_page(inode, page);

@ -225,7 +225,8 @@ static inline void cache_init(struct fat_cache_id *cid, int fclus, int dclus)
int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus)
{
struct super_block *sb = inode->i_sb;
const int limit = sb->s_maxbytes >> MSDOS_SB(sb)->cluster_bits;
struct msdos_sb_info *sbi = MSDOS_SB(sb);
const int limit = sb->s_maxbytes >> sbi->cluster_bits;
struct fat_entry fatent;
struct fat_cache_id cid;
int nr;
@ -234,6 +235,12 @@ int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus)
*fclus = 0;
*dclus = MSDOS_I(inode)->i_start;
if (!fat_valid_entry(sbi, *dclus)) {
fat_fs_error_ratelimit(sb,
"%s: invalid start cluster (i_pos %lld, start %08x)",
__func__, MSDOS_I(inode)->i_pos, *dclus);
return -EIO;
}
if (cluster == 0)
return 0;
@ -250,9 +257,8 @@ int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus)
/* prevent the infinite loop of cluster chain */
if (*fclus > limit) {
fat_fs_error_ratelimit(sb,
"%s: detected the cluster chain loop"
" (i_pos %lld)", __func__,
MSDOS_I(inode)->i_pos);
"%s: detected the cluster chain loop (i_pos %lld)",
__func__, MSDOS_I(inode)->i_pos);
nr = -EIO;
goto out;
}
@ -262,9 +268,8 @@ int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus)
goto out;
else if (nr == FAT_ENT_FREE) {
fat_fs_error_ratelimit(sb,
"%s: invalid cluster chain (i_pos %lld)",
__func__,
MSDOS_I(inode)->i_pos);
"%s: invalid cluster chain (i_pos %lld)",
__func__, MSDOS_I(inode)->i_pos);
nr = -EIO;
goto out;
} else if (nr == FAT_ENT_EOF) {

@ -348,6 +348,11 @@ static inline void fatent_brelse(struct fat_entry *fatent)
fatent->fat_inode = NULL;
}
static inline bool fat_valid_entry(struct msdos_sb_info *sbi, int entry)
{
return FAT_START_ENT <= entry && entry < sbi->max_cluster;
}
extern void fat_ent_access_init(struct super_block *sb);
extern int fat_ent_read(struct inode *inode, struct fat_entry *fatent,
int entry);

@ -23,7 +23,7 @@ static void fat12_ent_blocknr(struct super_block *sb, int entry,
{
struct msdos_sb_info *sbi = MSDOS_SB(sb);
int bytes = entry + (entry >> 1);
WARN_ON(entry < FAT_START_ENT || sbi->max_cluster <= entry);
WARN_ON(!fat_valid_entry(sbi, entry));
*offset = bytes & (sb->s_blocksize - 1);
*blocknr = sbi->fat_start + (bytes >> sb->s_blocksize_bits);
}
@ -33,7 +33,7 @@ static void fat_ent_blocknr(struct super_block *sb, int entry,
{
struct msdos_sb_info *sbi = MSDOS_SB(sb);
int bytes = (entry << sbi->fatent_shift);
WARN_ON(entry < FAT_START_ENT || sbi->max_cluster <= entry);
WARN_ON(!fat_valid_entry(sbi, entry));
*offset = bytes & (sb->s_blocksize - 1);
*blocknr = sbi->fat_start + (bytes >> sb->s_blocksize_bits);
}
@ -353,7 +353,7 @@ int fat_ent_read(struct inode *inode, struct fat_entry *fatent, int entry)
int err, offset;
sector_t blocknr;
if (entry < FAT_START_ENT || sbi->max_cluster <= entry) {
if (!fat_valid_entry(sbi, entry)) {
fatent_brelse(fatent);
fat_fs_error(sb, "invalid access to FAT (entry 0x%08x)", entry);
return -EIO;

@ -75,9 +75,10 @@ int hfs_brec_insert(struct hfs_find_data *fd, void *entry, int entry_len)
if (!fd->bnode) {
if (!tree->root)
hfs_btree_inc_height(tree);
fd->bnode = hfs_bnode_find(tree, tree->leaf_head);
if (IS_ERR(fd->bnode))
return PTR_ERR(fd->bnode);
node = hfs_bnode_find(tree, tree->leaf_head);
if (IS_ERR(node))
return PTR_ERR(node);
fd->bnode = node;
fd->record = -1;
}
new_node = NULL;

@ -78,13 +78,13 @@ again:
cpu_to_be32(HFSP_HARDLINK_TYPE) &&
entry.file.user_info.fdCreator ==
cpu_to_be32(HFSP_HFSPLUS_CREATOR) &&
HFSPLUS_SB(sb)->hidden_dir &&
(entry.file.create_date ==
HFSPLUS_I(HFSPLUS_SB(sb)->hidden_dir)->
create_date ||
entry.file.create_date ==
HFSPLUS_I(d_inode(sb->s_root))->
create_date) &&
HFSPLUS_SB(sb)->hidden_dir) {
create_date)) {
struct qstr str;
char name[32];

@ -524,8 +524,10 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
goto out_put_root;
if (!hfs_brec_read(&fd, &entry, sizeof(entry))) {
hfs_find_exit(&fd);
if (entry.type != cpu_to_be16(HFSPLUS_FOLDER))
if (entry.type != cpu_to_be16(HFSPLUS_FOLDER)) {
err = -EINVAL;
goto out_put_root;
}
inode = hfsplus_iget(sb, be32_to_cpu(entry.folder.id));
if (IS_ERR(inode)) {
err = PTR_ERR(inode);

@ -7497,7 +7497,7 @@ static int nfs4_sp4_select_mode(struct nfs_client *clp,
}
out:
clp->cl_sp4_flags = flags;
return 0;
return ret;
}
struct nfs41_exchange_id_data {

@ -384,8 +384,10 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
phdr->p_flags = PF_R|PF_W|PF_X;
phdr->p_offset = kc_vaddr_to_offset(m->addr) + dataoff;
phdr->p_vaddr = (size_t)m->addr;
if (m->type == KCORE_RAM || m->type == KCORE_TEXT)
if (m->type == KCORE_RAM)
phdr->p_paddr = __pa(m->addr);
else if (m->type == KCORE_TEXT)
phdr->p_paddr = __pa_symbol(m->addr);
else
phdr->p_paddr = (elf_addr_t)-1;
phdr->p_filesz = phdr->p_memsz = m->size;

@ -271,7 +271,7 @@ struct reiserfs_journal_list {
struct mutex j_commit_mutex;
unsigned int j_trans_id;
time_t j_timestamp;
time64_t j_timestamp; /* write-only but useful for crash dump analysis */
struct reiserfs_list_bitmap *j_list_bitmap;
struct buffer_head *j_commit_bh; /* commit buffer head */
struct reiserfs_journal_cnode *j_realblock;

@ -3062,4 +3062,6 @@
#define PCI_VENDOR_ID_OCZ 0x1b85
#define PCI_VENDOR_ID_NCUBE 0x10ff
#endif /* _LINUX_PCI_IDS_H */

@ -2064,6 +2064,10 @@ int tcp_set_ulp(struct sock *sk, const char *name);
void tcp_get_available_ulp(char *buf, size_t len);
void tcp_cleanup_ulp(struct sock *sk);
#define MODULE_ALIAS_TCP_ULP(name) \
__MODULE_INFO(alias, alias_userspace, name); \
__MODULE_INFO(alias, alias_tcp_ulp, "tcp-ulp-" name)
/* Call BPF_SOCK_OPS program that returns an int. If the return value
* is < 0, then the BPF op failed (for example if the loaded BPF
* program does not support the chosen operation or there is no BPF

@ -65,7 +65,7 @@
/* keyctl structures */
struct keyctl_dh_params {
__s32 private;
__s32 dh_private;
__s32 prime;
__s32 base;
};

@ -1361,7 +1361,9 @@ static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
return -ENOMEM;
atomic_set(&sig->count, 1);
spin_lock_irq(&current->sighand->siglock);
memcpy(sig->action, current->sighand->action, sizeof(sig->action));
spin_unlock_irq(&current->sighand->siglock);
return 0;
}

@ -248,13 +248,16 @@ int device_private_entry_fault(struct vm_area_struct *vma,
EXPORT_SYMBOL(device_private_entry_fault);
#endif /* CONFIG_DEVICE_PRIVATE */
static void pgmap_radix_release(struct resource *res)
static void pgmap_radix_release(struct resource *res, unsigned long end_pgoff)
{
unsigned long pgoff, order;
mutex_lock(&pgmap_lock);
foreach_order_pgoff(res, order, pgoff)
foreach_order_pgoff(res, order, pgoff) {
if (pgoff >= end_pgoff)
break;
radix_tree_delete(&pgmap_radix, PHYS_PFN(res->start) + pgoff);
}
mutex_unlock(&pgmap_lock);
synchronize_rcu();
@ -309,7 +312,7 @@ static void devm_memremap_pages_release(struct device *dev, void *data)
mem_hotplug_done();
untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
pgmap_radix_release(res);
pgmap_radix_release(res, -1);
dev_WARN_ONCE(dev, pgmap->altmap && pgmap->altmap->alloc,
"%s: failed to free all reserved pages\n", __func__);
}
@ -459,7 +462,7 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
err_pfn_remap:
err_radix:
pgmap_radix_release(res);
pgmap_radix_release(res, pgoff);
devres_free(page_map);
return ERR_PTR(error);
}

@ -1370,6 +1370,10 @@ enqueue_dl_entity(struct sched_dl_entity *dl_se,
update_dl_entity(dl_se, pi_se);
} else if (flags & ENQUEUE_REPLENISH) {
replenish_dl_entity(dl_se, pi_se);
} else if ((flags & ENQUEUE_RESTORE) &&
dl_time_before(dl_se->deadline,
rq_clock(rq_of_dl_rq(dl_rq_of_se(dl_se))))) {
setup_new_dl_entity(dl_se);
}
__enqueue_dl_entity(dl_se);
@ -2266,13 +2270,6 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p)
return;
}
/*
* If p is boosted we already updated its params in
* rt_mutex_setprio()->enqueue_task(..., ENQUEUE_REPLENISH),
* p's deadline being now already after rq_clock(rq).
*/
if (dl_time_before(p->dl.deadline, rq_clock(rq)))
setup_new_dl_entity(&p->dl);
if (rq->curr != p) {
#ifdef CONFIG_SMP

@ -322,9 +322,12 @@ static void debug_object_is_on_stack(void *addr, int onstack)
limit++;
if (is_on_stack)
pr_warn("object is on stack, but not annotated\n");
pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
task_stack_page(current));
else
pr_warn("object is not on stack, but annotated\n");
pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
task_stack_page(current));
WARN_ON(1);
}

@ -71,8 +71,12 @@ SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice)
goto out;
}
/* Careful about overflows. Len == 0 means "as much as possible" */
endbyte = offset + len;
/*
* Careful about overflows. Len == 0 means "as much as possible". Use
* unsigned math because signed overflows are undefined and UBSan
* complains.
*/
endbyte = (u64)offset + (u64)len;
if (!len || endbyte < len)
endbyte = -1;
else

@ -199,15 +199,14 @@ static void p9_mux_poll_stop(struct p9_conn *m)
static void p9_conn_cancel(struct p9_conn *m, int err)
{
struct p9_req_t *req, *rtmp;
unsigned long flags;
LIST_HEAD(cancel_list);
p9_debug(P9_DEBUG_ERROR, "mux %p err %d\n", m, err);
spin_lock_irqsave(&m->client->lock, flags);
spin_lock(&m->client->lock);
if (m->err) {
spin_unlock_irqrestore(&m->client->lock, flags);
spin_unlock(&m->client->lock);
return;
}
@ -219,7 +218,6 @@ static void p9_conn_cancel(struct p9_conn *m, int err)
list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) {
list_move(&req->req_list, &cancel_list);
}
spin_unlock_irqrestore(&m->client->lock, flags);
list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) {
p9_debug(P9_DEBUG_ERROR, "call back req %p\n", req);
@ -228,6 +226,7 @@ static void p9_conn_cancel(struct p9_conn *m, int err)
req->t_err = err;
p9_client_cb(m->client, req, REQ_STATUS_ERROR);
}
spin_unlock(&m->client->lock);
}
static int
@ -385,8 +384,9 @@ static void p9_read_work(struct work_struct *work)
if (m->req->status != REQ_STATUS_ERROR)
status = REQ_STATUS_RCVD;
list_del(&m->req->req_list);
spin_unlock(&m->client->lock);
/* update req->status while holding client->lock */
p9_client_cb(m->client, m->req, status);
spin_unlock(&m->client->lock);
m->rc.sdata = NULL;
m->rc.offset = 0;
m->rc.capacity = 0;

@ -571,7 +571,7 @@ static int p9_virtio_probe(struct virtio_device *vdev)
chan->vq = virtio_find_single_vq(vdev, req_done, "requests");
if (IS_ERR(chan->vq)) {
err = PTR_ERR(chan->vq);
goto out_free_vq;
goto out_free_chan;
}
chan->vq->vdev->priv = chan;
spin_lock_init(&chan->lock);
@ -624,6 +624,7 @@ out_free_tag:
kfree(tag);
out_free_vq:
vdev->config->del_vqs(vdev);
out_free_chan:
kfree(chan);
fail:
return err;

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save