Merge "Merge android-4.14.117 (74196c0) into msm-4.14"

tirimbino
qctecmdr 6 years ago committed by Gerrit - the friendly Code Review server
commit 23a9348bc0
  1. 6
      Documentation/admin-guide/kernel-parameters.txt
  2. 14
      Documentation/driver-api/usb/power-management.rst
  3. 1
      Documentation/networking/ip-sysctl.txt
  4. 2
      Makefile
  5. 16
      arch/arm/boot/compressed/head.S
  6. 2
      arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts
  7. 4
      arch/arm/boot/dts/imx6qdl-icore-rqs.dtsi
  8. 1
      arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
  9. 12
      arch/arm/boot/dts/rk3288.dtsi
  10. 8
      arch/arm/mach-iop13xx/setup.c
  11. 10
      arch/arm/mach-iop13xx/tpmi.c
  12. 6
      arch/arm/plat-iop/adma.c
  13. 4
      arch/arm/plat-orion/common.c
  14. 6
      arch/arm64/include/asm/traps.h
  15. 8
      arch/arm64/kernel/armv8_deprecated.c
  16. 2
      arch/arm64/kernel/cpufeature.c
  17. 22
      arch/arm64/kernel/traps.c
  18. 2
      arch/mips/kernel/scall64-o32.S
  19. 7
      arch/powerpc/kernel/kvm.c
  20. 10
      arch/powerpc/mm/slice.c
  21. 11
      arch/s390/include/asm/elf.h
  22. 4
      arch/sh/boards/of-generic.c
  23. 9
      arch/x86/Makefile
  24. 1
      arch/x86/configs/x86_64_cuttlefish_defconfig
  25. 111
      arch/x86/events/amd/core.c
  26. 5
      arch/x86/kernel/cpu/mcheck/mce-severity.c
  27. 18
      drivers/android/binder_alloc.c
  28. 34
      drivers/ata/libata-zpodd.c
  29. 58
      drivers/block/loop.c
  30. 1
      drivers/block/loop.h
  31. 2
      drivers/block/xsysace.c
  32. 5
      drivers/block/zram/zram_drv.c
  33. 2
      drivers/bluetooth/btusb.c
  34. 14
      drivers/clk/x86/clk-pmc-atom.c
  35. 4
      drivers/dma/sh/rcar-dmac.c
  36. 2
      drivers/gpio/gpio-aspeed.c
  37. 8
      drivers/gpio/gpiolib-of.c
  38. 12
      drivers/gpu/drm/i915/intel_fbdev.c
  39. 9
      drivers/gpu/drm/meson/meson_drv.c
  40. 2
      drivers/gpu/drm/vc4/vc4_crtc.c
  41. 5
      drivers/hid/hid-debug.c
  42. 1
      drivers/hid/hid-input.c
  43. 8
      drivers/hid/hid-logitech-hidpp.c
  44. 2
      drivers/hwtracing/intel_th/gth.c
  45. 2
      drivers/i2c/busses/i2c-stm32f7.c
  46. 11
      drivers/infiniband/core/security.c
  47. 41
      drivers/infiniband/core/verbs.c
  48. 17
      drivers/infiniband/sw/rdmavt/mr.c
  49. 11
      drivers/infiniband/ulp/srpt/ib_srpt.c
  50. 6
      drivers/input/keyboard/snvs_pwrkey.c
  51. 2
      drivers/input/rmi4/rmi_f11.c
  52. 30
      drivers/input/touchscreen/stmfts.c
  53. 9
      drivers/iommu/amd_iommu.c
  54. 7
      drivers/iommu/amd_iommu_init.c
  55. 2
      drivers/iommu/amd_iommu_types.h
  56. 8
      drivers/leds/leds-pca9532.c
  57. 6
      drivers/md/dm-integrity.c
  58. 16
      drivers/media/i2c/ov7670.c
  59. 4
      drivers/net/bonding/bond_sysfs_slave.c
  60. 6
      drivers/net/dsa/bcm_sf2_cfp.c
  61. 10
      drivers/net/ethernet/broadcom/bnxt/bnxt.c
  62. 10
      drivers/net/ethernet/cadence/macb_main.c
  63. 4
      drivers/net/ethernet/hisilicon/hns/hnae.c
  64. 33
      drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
  65. 2
      drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
  66. 12
      drivers/net/ethernet/hisilicon/hns/hns_enet.c
  67. 1
      drivers/net/ethernet/ibm/ehea/ehea_main.c
  68. 2
      drivers/net/ethernet/intel/fm10k/fm10k_main.c
  69. 2
      drivers/net/ethernet/intel/igb/e1000_defines.h
  70. 57
      drivers/net/ethernet/intel/igb/igb_main.c
  71. 2
      drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
  72. 6
      drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
  73. 4
      drivers/net/ethernet/mellanox/mlx5/core/port.c
  74. 4
      drivers/net/ethernet/mellanox/mlxsw/spectrum.c
  75. 36
      drivers/net/ethernet/micrel/ks8851.c
  76. 2
      drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
  77. 12
      drivers/net/ethernet/stmicro/stmmac/enh_desc.c
  78. 2
      drivers/net/ethernet/stmicro/stmmac/norm_desc.c
  79. 18
      drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
  80. 8
      drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
  81. 8
      drivers/net/ethernet/ti/netcp_ethss.c
  82. 2
      drivers/net/ethernet/xilinx/xilinx_axienet_main.c
  83. 6
      drivers/net/phy/marvell.c
  84. 2
      drivers/net/slip/slhc.c
  85. 6
      drivers/net/team/team.c
  86. 33
      drivers/net/usb/ipheth.c
  87. 20
      drivers/nvme/target/core.c
  88. 21
      drivers/platform/x86/pmc_atom.c
  89. 7
      drivers/rtc/rtc-da9063.c
  90. 2
      drivers/rtc/rtc-sh.c
  91. 4
      drivers/s390/net/qeth_l3_main.c
  92. 21
      drivers/s390/scsi/zfcp_fc.c
  93. 2
      drivers/scsi/qla4xxx/ql4_os.c
  94. 1
      drivers/scsi/scsi_devinfo.c
  95. 1
      drivers/scsi/scsi_dh.c
  96. 13
      drivers/scsi/storvsc_drv.c
  97. 22
      drivers/staging/iio/addac/adt7316.c
  98. 9
      drivers/staging/rtl8188eu/core/rtw_xmit.c
  99. 2
      drivers/staging/rtl8188eu/include/rtw_xmit.h
  100. 10
      drivers/staging/rtl8712/rtl8712_cmd.c
  101. Some files were not shown because too many files have changed in this diff Show More

@ -2689,7 +2689,11 @@
nosmt=force: Force disable SMT, cannot be undone
via the sysfs control file.
nospectre_v2 [X86] Disable all mitigations for the Spectre variant 2
nospectre_v1 [PPC] Disable mitigations for Spectre Variant 1 (bounds
check bypass). With this option data leaks are possible
in the system.
nospectre_v2 [X86,PPC_FSL_BOOK3E] Disable all mitigations for the Spectre variant 2
(indirect branch prediction) vulnerability. System may
allow data leaks with this option, which is equivalent
to spectre_v2=off.

@ -370,11 +370,15 @@ autosuspend the interface's device. When the usage counter is = 0
then the interface is considered to be idle, and the kernel may
autosuspend the device.
Drivers need not be concerned about balancing changes to the usage
counter; the USB core will undo any remaining "get"s when a driver
is unbound from its interface. As a corollary, drivers must not call
any of the ``usb_autopm_*`` functions after their ``disconnect``
routine has returned.
Drivers must be careful to balance their overall changes to the usage
counter. Unbalanced "get"s will remain in effect when a driver is
unbound from its interface, preventing the device from going into
runtime suspend should the interface be bound to a driver again. On
the other hand, drivers are allowed to achieve this balance by calling
the ``usb_autopm_*`` functions even after their ``disconnect`` routine
has returned -- say from within a work-queue routine -- provided they
retain an active reference to the interface (via ``usb_get_intf`` and
``usb_put_intf``).
Drivers using the async routines are responsible for their own
synchronization and mutual exclusion.

@ -402,6 +402,7 @@ tcp_min_rtt_wlen - INTEGER
minimum RTT when it is moved to a longer path (e.g., due to traffic
engineering). A longer window makes the filter more resistant to RTT
inflations such as transient congestion. The unit is seconds.
Possible values: 0 - 86400 (1 day)
Default: 300
tcp_moderate_rcvbuf - BOOLEAN

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 14
SUBLEVEL = 114
SUBLEVEL = 117
EXTRAVERSION =
NAME = Petit Gorille

@ -1395,7 +1395,21 @@ ENTRY(efi_stub_entry)
@ Preserve return value of efi_entry() in r4
mov r4, r0
bl cache_clean_flush
@ our cache maintenance code relies on CP15 barrier instructions
@ but since we arrived here with the MMU and caches configured
@ by UEFI, we must check that the CP15BEN bit is set in SCTLR.
@ Note that this bit is RAO/WI on v6 and earlier, so the ISB in
@ the enable path will be executed on v7+ only.
mrc p15, 0, r1, c1, c0, 0 @ read SCTLR
tst r1, #(1 << 5) @ CP15BEN bit set?
bne 0f
orr r1, r1, #(1 << 5) @ CP15 barrier instructions
mcr p15, 0, r1, c1, c0, 0 @ write SCTLR
ARM( .inst 0xf57ff06f @ v7+ isb )
THUMB( isb )
0: bl cache_clean_flush
bl cache_off
@ Set parameters for booting zImage according to boot protocol

@ -93,7 +93,7 @@
};
&hdmi {
hpd-gpios = <&gpio 46 GPIO_ACTIVE_LOW>;
hpd-gpios = <&gpio 46 GPIO_ACTIVE_HIGH>;
};
&uart0 {

@ -222,7 +222,7 @@
pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
vmcc-supply = <&reg_sd3_vmmc>;
cd-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>;
bus-witdh = <4>;
bus-width = <4>;
no-1-8-v;
status = "okay";
};
@ -233,7 +233,7 @@
pinctrl-1 = <&pinctrl_usdhc4_100mhz>;
pinctrl-2 = <&pinctrl_usdhc4_200mhz>;
vmcc-supply = <&reg_sd4_vmmc>;
bus-witdh = <8>;
bus-width = <8>;
no-1-8-v;
non-removable;
status = "okay";

@ -89,6 +89,7 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_enet>;
phy-mode = "rgmii";
phy-reset-duration = <10>; /* in msecs */
phy-reset-gpios = <&gpio3 23 GPIO_ACTIVE_LOW>;
phy-supply = <&vdd_eth_io_reg>;
status = "disabled";

@ -1181,27 +1181,27 @@
gpu_opp_table: gpu-opp-table {
compatible = "operating-points-v2";
opp@100000000 {
opp-100000000 {
opp-hz = /bits/ 64 <100000000>;
opp-microvolt = <950000>;
};
opp@200000000 {
opp-200000000 {
opp-hz = /bits/ 64 <200000000>;
opp-microvolt = <950000>;
};
opp@300000000 {
opp-300000000 {
opp-hz = /bits/ 64 <300000000>;
opp-microvolt = <1000000>;
};
opp@400000000 {
opp-400000000 {
opp-hz = /bits/ 64 <400000000>;
opp-microvolt = <1100000>;
};
opp@500000000 {
opp-500000000 {
opp-hz = /bits/ 64 <500000000>;
opp-microvolt = <1200000>;
};
opp@600000000 {
opp-600000000 {
opp-hz = /bits/ 64 <600000000>;
opp-microvolt = <1250000>;
};

@ -300,7 +300,7 @@ static struct resource iop13xx_adma_2_resources[] = {
}
};
static u64 iop13xx_adma_dmamask = DMA_BIT_MASK(64);
static u64 iop13xx_adma_dmamask = DMA_BIT_MASK(32);
static struct iop_adma_platform_data iop13xx_adma_0_data = {
.hw_id = 0,
.pool_size = PAGE_SIZE,
@ -324,7 +324,7 @@ static struct platform_device iop13xx_adma_0_channel = {
.resource = iop13xx_adma_0_resources,
.dev = {
.dma_mask = &iop13xx_adma_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(64),
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = (void *) &iop13xx_adma_0_data,
},
};
@ -336,7 +336,7 @@ static struct platform_device iop13xx_adma_1_channel = {
.resource = iop13xx_adma_1_resources,
.dev = {
.dma_mask = &iop13xx_adma_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(64),
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = (void *) &iop13xx_adma_1_data,
},
};
@ -348,7 +348,7 @@ static struct platform_device iop13xx_adma_2_channel = {
.resource = iop13xx_adma_2_resources,
.dev = {
.dma_mask = &iop13xx_adma_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(64),
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = (void *) &iop13xx_adma_2_data,
},
};

@ -152,7 +152,7 @@ static struct resource iop13xx_tpmi_3_resources[] = {
}
};
u64 iop13xx_tpmi_mask = DMA_BIT_MASK(64);
u64 iop13xx_tpmi_mask = DMA_BIT_MASK(32);
static struct platform_device iop13xx_tpmi_0_device = {
.name = "iop-tpmi",
.id = 0,
@ -160,7 +160,7 @@ static struct platform_device iop13xx_tpmi_0_device = {
.resource = iop13xx_tpmi_0_resources,
.dev = {
.dma_mask = &iop13xx_tpmi_mask,
.coherent_dma_mask = DMA_BIT_MASK(64),
.coherent_dma_mask = DMA_BIT_MASK(32),
},
};
@ -171,7 +171,7 @@ static struct platform_device iop13xx_tpmi_1_device = {
.resource = iop13xx_tpmi_1_resources,
.dev = {
.dma_mask = &iop13xx_tpmi_mask,
.coherent_dma_mask = DMA_BIT_MASK(64),
.coherent_dma_mask = DMA_BIT_MASK(32),
},
};
@ -182,7 +182,7 @@ static struct platform_device iop13xx_tpmi_2_device = {
.resource = iop13xx_tpmi_2_resources,
.dev = {
.dma_mask = &iop13xx_tpmi_mask,
.coherent_dma_mask = DMA_BIT_MASK(64),
.coherent_dma_mask = DMA_BIT_MASK(32),
},
};
@ -193,7 +193,7 @@ static struct platform_device iop13xx_tpmi_3_device = {
.resource = iop13xx_tpmi_3_resources,
.dev = {
.dma_mask = &iop13xx_tpmi_mask,
.coherent_dma_mask = DMA_BIT_MASK(64),
.coherent_dma_mask = DMA_BIT_MASK(32),
},
};

@ -143,7 +143,7 @@ struct platform_device iop3xx_dma_0_channel = {
.resource = iop3xx_dma_0_resources,
.dev = {
.dma_mask = &iop3xx_adma_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(64),
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = (void *) &iop3xx_dma_0_data,
},
};
@ -155,7 +155,7 @@ struct platform_device iop3xx_dma_1_channel = {
.resource = iop3xx_dma_1_resources,
.dev = {
.dma_mask = &iop3xx_adma_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(64),
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = (void *) &iop3xx_dma_1_data,
},
};
@ -167,7 +167,7 @@ struct platform_device iop3xx_aau_channel = {
.resource = iop3xx_aau_resources,
.dev = {
.dma_mask = &iop3xx_adma_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(64),
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = (void *) &iop3xx_aau_data,
},
};

@ -622,7 +622,7 @@ static struct platform_device orion_xor0_shared = {
.resource = orion_xor0_shared_resources,
.dev = {
.dma_mask = &orion_xor_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(64),
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &orion_xor0_pdata,
},
};
@ -683,7 +683,7 @@ static struct platform_device orion_xor1_shared = {
.resource = orion_xor1_shared_resources,
.dev = {
.dma_mask = &orion_xor_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(64),
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &orion_xor1_pdata,
},
};

@ -37,6 +37,12 @@ void unregister_undef_hook(struct undef_hook *hook);
void arm64_notify_segfault(struct pt_regs *regs, unsigned long addr);
/*
* Move regs->pc to next instruction and do necessary setup before it
* is executed.
*/
void arm64_skip_faulting_instruction(struct pt_regs *regs, unsigned long size);
static inline int __in_irqentry_text(unsigned long ptr)
{
return ptr >= (unsigned long)&__irqentry_text_start &&

@ -431,7 +431,7 @@ ret:
pr_warn_ratelimited("\"%s\" (%ld) uses obsolete SWP{B} instruction at 0x%llx\n",
current->comm, (unsigned long)current->pid, regs->pc);
regs->pc += 4;
arm64_skip_faulting_instruction(regs, 4);
return 0;
fault:
@ -512,7 +512,7 @@ ret:
pr_warn_ratelimited("\"%s\" (%ld) uses deprecated CP15 Barrier instruction at 0x%llx\n",
current->comm, (unsigned long)current->pid, regs->pc);
regs->pc += 4;
arm64_skip_faulting_instruction(regs, 4);
return 0;
}
@ -586,14 +586,14 @@ static int compat_setend_handler(struct pt_regs *regs, u32 big_endian)
static int a32_setend_handler(struct pt_regs *regs, u32 instr)
{
int rc = compat_setend_handler(regs, (instr >> 9) & 1);
regs->pc += 4;
arm64_skip_faulting_instruction(regs, 4);
return rc;
}
static int t16_setend_handler(struct pt_regs *regs, u32 instr)
{
int rc = compat_setend_handler(regs, (instr >> 3) & 1);
regs->pc += 2;
arm64_skip_faulting_instruction(regs, 2);
return rc;
}

@ -1504,7 +1504,7 @@ static int emulate_mrs(struct pt_regs *regs, u32 insn)
if (!rc) {
dst = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RT, insn);
pt_regs_write_reg(regs, dst, val);
regs->pc += 4;
arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
}
return rc;

@ -262,6 +262,18 @@ void arm64_notify_die(const char *str, struct pt_regs *regs,
}
}
void arm64_skip_faulting_instruction(struct pt_regs *regs, unsigned long size)
{
regs->pc += size;
/*
* If we were single stepping, we want to get the step exception after
* we return from the trap.
*/
if (user_mode(regs))
user_fastforward_single_step(current);
}
static LIST_HEAD(undef_hook);
static DEFINE_RAW_SPINLOCK(undef_lock);
@ -453,7 +465,7 @@ static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs)
if (ret)
arm64_notify_segfault(regs, address);
else
regs->pc += 4;
arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
}
static void ctr_read_handler(unsigned int esr, struct pt_regs *regs)
@ -463,7 +475,7 @@ static void ctr_read_handler(unsigned int esr, struct pt_regs *regs)
pt_regs_write_reg(regs, rt, val);
regs->pc += 4;
arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
}
static void cntvct_read_handler(unsigned int esr, struct pt_regs *regs)
@ -472,7 +484,7 @@ static void cntvct_read_handler(unsigned int esr, struct pt_regs *regs)
isb();
pt_regs_write_reg(regs, rt, arch_counter_get_cntvct());
regs->pc += 4;
arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
}
static void cntfrq_read_handler(unsigned int esr, struct pt_regs *regs)
@ -480,7 +492,7 @@ static void cntfrq_read_handler(unsigned int esr, struct pt_regs *regs)
int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
pt_regs_write_reg(regs, rt, arch_timer_get_rate());
regs->pc += 4;
arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
}
struct sys64_hook {
@ -845,7 +857,7 @@ static int bug_handler(struct pt_regs *regs, unsigned int esr)
}
/* If thread survives, skip over the BUG instruction and continue: */
regs->pc += AARCH64_INSN_SIZE; /* skip BRK and resume */
arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
return DBG_HOOK_HANDLED;
}

@ -125,7 +125,7 @@ trace_a_syscall:
subu t1, v0, __NR_O32_Linux
move a1, v0
bnez t1, 1f /* __NR_syscall at offset 0 */
lw a1, PT_R4(sp) /* Arg1 for __NR_syscall case */
ld a1, PT_R4(sp) /* Arg1 for __NR_syscall case */
.set pop
1: jal syscall_trace_enter

@ -22,6 +22,7 @@
#include <linux/kvm_host.h>
#include <linux/init.h>
#include <linux/export.h>
#include <linux/kmemleak.h>
#include <linux/kvm_para.h>
#include <linux/slab.h>
#include <linux/of.h>
@ -712,6 +713,12 @@ static void kvm_use_magic_page(void)
static __init void kvm_free_tmp(void)
{
/*
* Inform kmemleak about the hole in the .bss section since the
* corresponding pages will be unmapped with DEBUG_PAGEALLOC=y.
*/
kmemleak_free_part(&kvm_tmp[kvm_tmp_index],
ARRAY_SIZE(kvm_tmp) - kvm_tmp_index);
free_reserved_area(&kvm_tmp[kvm_tmp_index],
&kvm_tmp[ARRAY_SIZE(kvm_tmp)], -1, NULL);
}

@ -31,6 +31,7 @@
#include <linux/spinlock.h>
#include <linux/export.h>
#include <linux/hugetlb.h>
#include <linux/security.h>
#include <asm/mman.h>
#include <asm/mmu.h>
#include <asm/copro.h>
@ -328,6 +329,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
unsigned long addr, found, prev;
struct vm_unmapped_area_info info;
unsigned long min_addr = max(PAGE_SIZE, mmap_min_addr);
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
info.length = len;
@ -344,7 +346,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
if (high_limit > DEFAULT_MAP_WINDOW)
addr += mm->context.addr_limit - DEFAULT_MAP_WINDOW;
while (addr > PAGE_SIZE) {
while (addr > min_addr) {
info.high_limit = addr;
if (!slice_scan_available(addr - 1, available, 0, &addr))
continue;
@ -356,8 +358,8 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
* Check if we need to reduce the range, or if we can
* extend it to cover the previous available slice.
*/
if (addr < PAGE_SIZE)
addr = PAGE_SIZE;
if (addr < min_addr)
addr = min_addr;
else if (slice_scan_available(addr - 1, available, 0, &prev)) {
addr = prev;
goto prev_slice;
@ -479,7 +481,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
addr = _ALIGN_UP(addr, page_size);
slice_dbg(" aligned addr=%lx\n", addr);
/* Ignore hint if it's too large or overlaps a VMA */
if (addr > high_limit - len ||
if (addr > high_limit - len || addr < mmap_min_addr ||
!slice_area_is_free(mm, addr, len))
addr = 0;
}

@ -252,11 +252,14 @@ do { \
/*
* Cache aliasing on the latest machines calls for a mapping granularity
* of 512KB. For 64-bit processes use a 512KB alignment and a randomization
* of up to 1GB. For 31-bit processes the virtual address space is limited,
* use no alignment and limit the randomization to 8MB.
* of 512KB for the anonymous mapping base. For 64-bit processes use a
* 512KB alignment and a randomization of up to 1GB. For 31-bit processes
* the virtual address space is limited, use no alignment and limit the
* randomization to 8MB.
* For the additional randomization of the program break use 32MB for
* 64-bit and 8MB for 31-bit.
*/
#define BRK_RND_MASK (is_compat_task() ? 0x7ffUL : 0x3ffffUL)
#define BRK_RND_MASK (is_compat_task() ? 0x7ffUL : 0x1fffUL)
#define MMAP_RND_MASK (is_compat_task() ? 0x7ffUL : 0x3ff80UL)
#define MMAP_ALIGN_MASK (is_compat_task() ? 0 : 0x7fUL)
#define STACK_RND_MASK MMAP_RND_MASK

@ -180,10 +180,10 @@ static struct sh_machine_vector __initmv sh_of_generic_mv = {
struct sh_clk_ops;
void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
void __init __weak arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
{
}
void __init plat_irq_setup(void)
void __init __weak plat_irq_setup(void)
{
}

@ -245,6 +245,15 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
# Avoid indirect branches in kernel to deal with Spectre
ifdef CONFIG_RETPOLINE
KBUILD_CFLAGS += $(RETPOLINE_CFLAGS)
# Additionally, avoid generating expensive indirect jumps which
# are subject to retpolines for small number of switch cases.
# clang turns off jump table generation by default when under
# retpoline builds, however, gcc does not for x86. This has
# only been fixed starting from gcc stable version 8.4.0 and
# onwards, but not for older ones. See gcc bug #86952.
ifndef CONFIG_CC_IS_CLANG
KBUILD_CFLAGS += $(call cc-option,-fno-jump-tables)
endif
endif
archscripts: scripts_basic

@ -479,6 +479,7 @@ CONFIG_CRYPTO_RSA=y
# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
CONFIG_CRYPTO_ADIANTUM=y
CONFIG_CRYPTO_SHA512=y
CONFIG_CRYPTO_AES_NI_INTEL=y
CONFIG_CRYPTO_LZ4=y
CONFIG_CRYPTO_ZSTD=y
CONFIG_CRYPTO_DEV_VIRTIO=y

@ -116,6 +116,110 @@ static __initconst const u64 amd_hw_cache_event_ids
},
};
static __initconst const u64 amd_hw_cache_event_ids_f17h
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
[C(L1D)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = 0x0040, /* Data Cache Accesses */
[C(RESULT_MISS)] = 0xc860, /* L2$ access from DC Miss */
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = 0,
[C(RESULT_MISS)] = 0,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = 0xff5a, /* h/w prefetch DC Fills */
[C(RESULT_MISS)] = 0,
},
},
[C(L1I)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = 0x0080, /* Instruction cache fetches */
[C(RESULT_MISS)] = 0x0081, /* Instruction cache misses */
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS)] = -1,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = 0,
[C(RESULT_MISS)] = 0,
},
},
[C(LL)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = 0,
[C(RESULT_MISS)] = 0,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = 0,
[C(RESULT_MISS)] = 0,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = 0,
[C(RESULT_MISS)] = 0,
},
},
[C(DTLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = 0xff45, /* All L2 DTLB accesses */
[C(RESULT_MISS)] = 0xf045, /* L2 DTLB misses (PT walks) */
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = 0,
[C(RESULT_MISS)] = 0,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = 0,
[C(RESULT_MISS)] = 0,
},
},
[C(ITLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = 0x0084, /* L1 ITLB misses, L2 ITLB hits */
[C(RESULT_MISS)] = 0xff85, /* L1 ITLB misses, L2 misses */
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS)] = -1,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS)] = -1,
},
},
[C(BPU)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = 0x00c2, /* Retired Branch Instr. */
[C(RESULT_MISS)] = 0x00c3, /* Retired Mispredicted BI */
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS)] = -1,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS)] = -1,
},
},
[C(NODE)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = 0,
[C(RESULT_MISS)] = 0,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS)] = -1,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS)] = -1,
},
},
};
/*
* AMD Performance Monitor K7 and later, up to and including Family 16h:
*/
@ -861,9 +965,10 @@ __init int amd_pmu_init(void)
x86_pmu.amd_nb_constraints = 0;
}
/* Events are common for all AMDs */
memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
if (boot_cpu_data.x86 >= 0x17)
memcpy(hw_cache_event_ids, amd_hw_cache_event_ids_f17h, sizeof(hw_cache_event_ids));
else
memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, sizeof(hw_cache_event_ids));
return 0;
}

@ -148,6 +148,11 @@ static struct severity {
SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_DATA),
KERNEL
),
MCESEV(
PANIC, "Instruction fetch error in kernel",
SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_INSTR),
KERNEL
),
#endif
MCESEV(
PANIC, "Action required: unknown MCACOD",

@ -916,14 +916,13 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
index = page - alloc->pages;
page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
mm = alloc->vma_vm_mm;
if (!mmget_not_zero(mm))
goto err_mmget;
if (!down_write_trylock(&mm->mmap_sem))
goto err_down_write_mmap_sem_failed;
vma = binder_alloc_get_vma(alloc);
if (vma) {
if (!mmget_not_zero(alloc->vma_vm_mm))
goto err_mmget;
mm = alloc->vma_vm_mm;
if (!down_write_trylock(&mm->mmap_sem))
goto err_down_write_mmap_sem_failed;
}
list_lru_isolate(lru, item);
spin_unlock(lock);
@ -934,10 +933,9 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
zap_page_range(vma, page_addr, PAGE_SIZE);
trace_binder_unmap_user_end(alloc, index);
up_write(&mm->mmap_sem);
mmput(mm);
}
up_write(&mm->mmap_sem);
mmput(mm);
trace_binder_unmap_kernel_start(alloc, index);

@ -52,38 +52,52 @@ static int eject_tray(struct ata_device *dev)
/* Per the spec, only slot type and drawer type ODD can be supported */
static enum odd_mech_type zpodd_get_mech_type(struct ata_device *dev)
{
char buf[16];
char *buf;
unsigned int ret;
struct rm_feature_desc *desc = (void *)(buf + 8);
struct rm_feature_desc *desc;
struct ata_taskfile tf;
static const char cdb[] = { GPCMD_GET_CONFIGURATION,
2, /* only 1 feature descriptor requested */
0, 3, /* 3, removable medium feature */
0, 0, 0,/* reserved */
0, sizeof(buf),
0, 16,
0, 0, 0,
};
buf = kzalloc(16, GFP_KERNEL);
if (!buf)
return ODD_MECH_TYPE_UNSUPPORTED;
desc = (void *)(buf + 8);
ata_tf_init(dev, &tf);
tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
tf.command = ATA_CMD_PACKET;
tf.protocol = ATAPI_PROT_PIO;
tf.lbam = sizeof(buf);
tf.lbam = 16;
ret = ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
buf, sizeof(buf), 0);
if (ret)
buf, 16, 0);
if (ret) {
kfree(buf);
return ODD_MECH_TYPE_UNSUPPORTED;
}
if (be16_to_cpu(desc->feature_code) != 3)
if (be16_to_cpu(desc->feature_code) != 3) {
kfree(buf);
return ODD_MECH_TYPE_UNSUPPORTED;
}
if (desc->mech_type == 0 && desc->load == 0 && desc->eject == 1)
if (desc->mech_type == 0 && desc->load == 0 && desc->eject == 1) {
kfree(buf);
return ODD_MECH_TYPE_SLOT;
else if (desc->mech_type == 1 && desc->load == 0 && desc->eject == 1)
} else if (desc->mech_type == 1 && desc->load == 0 &&
desc->eject == 1) {
kfree(buf);
return ODD_MECH_TYPE_DRAWER;
else
} else {
kfree(buf);
return ODD_MECH_TYPE_UNSUPPORTED;
}
}
/* Test if ODD is zero power ready by sense code */

@ -82,7 +82,6 @@
static DEFINE_IDR(loop_index_idr);
static DEFINE_MUTEX(loop_index_mutex);
static DEFINE_MUTEX(loop_ctl_mutex);
static int max_part;
static int part_shift;
@ -1019,7 +1018,7 @@ static int loop_clr_fd(struct loop_device *lo)
*/
if (atomic_read(&lo->lo_refcnt) > 1) {
lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
mutex_unlock(&loop_ctl_mutex);
mutex_unlock(&lo->lo_ctl_mutex);
return 0;
}
@ -1071,12 +1070,12 @@ static int loop_clr_fd(struct loop_device *lo)
if (!part_shift)
lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN;
loop_unprepare_queue(lo);
mutex_unlock(&loop_ctl_mutex);
mutex_unlock(&lo->lo_ctl_mutex);
/*
* Need not hold loop_ctl_mutex to fput backing file.
* Calling fput holding loop_ctl_mutex triggers a circular
* Need not hold lo_ctl_mutex to fput backing file.
* Calling fput holding lo_ctl_mutex triggers a circular
* lock dependency possibility warning as fput can take
* bd_mutex which is usually taken before loop_ctl_mutex.
* bd_mutex which is usually taken before lo_ctl_mutex.
*/
fput(filp);
return 0;
@ -1195,7 +1194,7 @@ loop_get_status(struct loop_device *lo, struct loop_info64 *info)
int ret;
if (lo->lo_state != Lo_bound) {
mutex_unlock(&loop_ctl_mutex);
mutex_unlock(&lo->lo_ctl_mutex);
return -ENXIO;
}
@ -1214,10 +1213,10 @@ loop_get_status(struct loop_device *lo, struct loop_info64 *info)
lo->lo_encrypt_key_size);
}
/* Drop loop_ctl_mutex while we call into the filesystem. */
/* Drop lo_ctl_mutex while we call into the filesystem. */
path = lo->lo_backing_file->f_path;
path_get(&path);
mutex_unlock(&loop_ctl_mutex);
mutex_unlock(&lo->lo_ctl_mutex);
ret = vfs_getattr(&path, &stat, STATX_INO, AT_STATX_SYNC_AS_STAT);
if (!ret) {
info->lo_device = huge_encode_dev(stat.dev);
@ -1309,7 +1308,7 @@ loop_get_status_old(struct loop_device *lo, struct loop_info __user *arg) {
int err;
if (!arg) {
mutex_unlock(&loop_ctl_mutex);
mutex_unlock(&lo->lo_ctl_mutex);
return -EINVAL;
}
err = loop_get_status(lo, &info64);
@ -1327,7 +1326,7 @@ loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) {
int err;
if (!arg) {
mutex_unlock(&loop_ctl_mutex);
mutex_unlock(&lo->lo_ctl_mutex);
return -EINVAL;
}
err = loop_get_status(lo, &info64);
@ -1402,7 +1401,7 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
struct loop_device *lo = bdev->bd_disk->private_data;
int err;
mutex_lock_nested(&loop_ctl_mutex, 1);
mutex_lock_nested(&lo->lo_ctl_mutex, 1);
switch (cmd) {
case LOOP_SET_FD:
err = loop_set_fd(lo, mode, bdev, arg);
@ -1411,7 +1410,7 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
err = loop_change_fd(lo, bdev, arg);
break;
case LOOP_CLR_FD:
/* loop_clr_fd would have unlocked loop_ctl_mutex on success */
/* loop_clr_fd would have unlocked lo_ctl_mutex on success */
err = loop_clr_fd(lo);
if (!err)
goto out_unlocked;
@ -1424,7 +1423,7 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
break;
case LOOP_GET_STATUS:
err = loop_get_status_old(lo, (struct loop_info __user *) arg);
/* loop_get_status() unlocks loop_ctl_mutex */
/* loop_get_status() unlocks lo_ctl_mutex */
goto out_unlocked;
case LOOP_SET_STATUS64:
err = -EPERM;
@ -1434,7 +1433,7 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
break;
case LOOP_GET_STATUS64:
err = loop_get_status64(lo, (struct loop_info64 __user *) arg);
/* loop_get_status() unlocks loop_ctl_mutex */
/* loop_get_status() unlocks lo_ctl_mutex */
goto out_unlocked;
case LOOP_SET_CAPACITY:
err = -EPERM;
@ -1454,7 +1453,7 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
default:
err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL;
}
mutex_unlock(&loop_ctl_mutex);
mutex_unlock(&lo->lo_ctl_mutex);
out_unlocked:
return err;
@ -1571,7 +1570,7 @@ loop_get_status_compat(struct loop_device *lo,
int err;
if (!arg) {
mutex_unlock(&loop_ctl_mutex);
mutex_unlock(&lo->lo_ctl_mutex);
return -EINVAL;
}
err = loop_get_status(lo, &info64);
@ -1588,16 +1587,16 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
switch(cmd) {
case LOOP_SET_STATUS:
mutex_lock(&loop_ctl_mutex);
mutex_lock(&lo->lo_ctl_mutex);
err = loop_set_status_compat(
lo, (const struct compat_loop_info __user *) arg);
mutex_unlock(&loop_ctl_mutex);
mutex_unlock(&lo->lo_ctl_mutex);
break;
case LOOP_GET_STATUS:
mutex_lock(&loop_ctl_mutex);
mutex_lock(&lo->lo_ctl_mutex);
err = loop_get_status_compat(
lo, (struct compat_loop_info __user *) arg);
/* loop_get_status() unlocks loop_ctl_mutex */
/* loop_get_status() unlocks lo_ctl_mutex */
break;
case LOOP_SET_CAPACITY:
case LOOP_CLR_FD:
@ -1641,7 +1640,7 @@ static void __lo_release(struct loop_device *lo)
if (atomic_dec_return(&lo->lo_refcnt))
return;
mutex_lock(&loop_ctl_mutex);
mutex_lock(&lo->lo_ctl_mutex);
if (lo->lo_flags & LO_FLAGS_AUTOCLEAR) {
/*
* In autoclear mode, stop the loop thread
@ -1659,7 +1658,7 @@ static void __lo_release(struct loop_device *lo)
blk_mq_unfreeze_queue(lo->lo_queue);
}
mutex_unlock(&loop_ctl_mutex);
mutex_unlock(&lo->lo_ctl_mutex);
}
static void lo_release(struct gendisk *disk, fmode_t mode)
@ -1705,10 +1704,10 @@ static int unregister_transfer_cb(int id, void *ptr, void *data)
struct loop_device *lo = ptr;
struct loop_func_table *xfer = data;
mutex_lock(&loop_ctl_mutex);
mutex_lock(&lo->lo_ctl_mutex);
if (lo->lo_encryption == xfer)
loop_release_xfer(lo);
mutex_unlock(&loop_ctl_mutex);
mutex_unlock(&lo->lo_ctl_mutex);
return 0;
}
@ -1881,6 +1880,7 @@ static int loop_add(struct loop_device **l, int i)
if (!part_shift)
disk->flags |= GENHD_FL_NO_PART_SCAN;
disk->flags |= GENHD_FL_EXT_DEVT;
mutex_init(&lo->lo_ctl_mutex);
atomic_set(&lo->lo_refcnt, 0);
lo->lo_number = i;
spin_lock_init(&lo->lo_lock);
@ -1993,19 +1993,19 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd,
ret = loop_lookup(&lo, parm);
if (ret < 0)
break;
mutex_lock(&loop_ctl_mutex);
mutex_lock(&lo->lo_ctl_mutex);
if (lo->lo_state != Lo_unbound) {
ret = -EBUSY;
mutex_unlock(&loop_ctl_mutex);
mutex_unlock(&lo->lo_ctl_mutex);
break;
}
if (atomic_read(&lo->lo_refcnt) > 0) {
ret = -EBUSY;
mutex_unlock(&loop_ctl_mutex);
mutex_unlock(&lo->lo_ctl_mutex);
break;
}
lo->lo_disk->private_data = NULL;
mutex_unlock(&loop_ctl_mutex);
mutex_unlock(&lo->lo_ctl_mutex);
idr_remove(&loop_index_idr, lo->lo_number);
loop_remove(lo);
break;

@ -54,6 +54,7 @@ struct loop_device {
spinlock_t lo_lock;
int lo_state;
struct mutex lo_ctl_mutex;
struct kthread_worker worker;
struct task_struct *worker_task;
bool use_dio;

@ -1063,6 +1063,8 @@ static int ace_setup(struct ace_device *ace)
return 0;
err_read:
/* prevent double queue cleanup */
ace->gd->queue = NULL;
put_disk(ace->gd);
err_alloc_disk:
blk_cleanup_queue(ace->queue);

@ -782,18 +782,18 @@ struct zram_work {
struct zram *zram;
unsigned long entry;
struct bio *bio;
struct bio_vec bvec;
};
#if PAGE_SIZE != 4096
static void zram_sync_read(struct work_struct *work)
{
struct bio_vec bvec;
struct zram_work *zw = container_of(work, struct zram_work, work);
struct zram *zram = zw->zram;
unsigned long entry = zw->entry;
struct bio *bio = zw->bio;
read_from_bdev_async(zram, &bvec, entry, bio);
read_from_bdev_async(zram, &zw->bvec, entry, bio);
}
/*
@ -806,6 +806,7 @@ static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec,
{
struct zram_work work;
work.bvec = *bvec;
work.zram = zram;
work.entry = entry;
work.bio = bio;

@ -2893,6 +2893,7 @@ static int btusb_config_oob_wake(struct hci_dev *hdev)
return 0;
}
irq_set_status_flags(irq, IRQ_NOAUTOEN);
ret = devm_request_irq(&hdev->dev, irq, btusb_oob_wake_handler,
0, "OOB Wake-on-BT", data);
if (ret) {
@ -2907,7 +2908,6 @@ static int btusb_config_oob_wake(struct hci_dev *hdev)
}
data->oob_wake_irq = irq;
disable_irq(irq);
bt_dev_info(hdev, "OOB Wake-on-BT configured at IRQ %u", irq);
return 0;
}

@ -165,7 +165,7 @@ static const struct clk_ops plt_clk_ops = {
};
static struct clk_plt *plt_clk_register(struct platform_device *pdev, int id,
void __iomem *base,
const struct pmc_clk_data *pmc_data,
const char **parent_names,
int num_parents)
{
@ -184,9 +184,17 @@ static struct clk_plt *plt_clk_register(struct platform_device *pdev, int id,
init.num_parents = num_parents;
pclk->hw.init = &init;
pclk->reg = base + PMC_CLK_CTL_OFFSET + id * PMC_CLK_CTL_SIZE;
pclk->reg = pmc_data->base + PMC_CLK_CTL_OFFSET + id * PMC_CLK_CTL_SIZE;
spin_lock_init(&pclk->lock);
/*
* On some systems, the pmc_plt_clocks already enabled by the
* firmware are being marked as critical to avoid them being
* gated by the clock framework.
*/
if (pmc_data->critical && plt_clk_is_enabled(&pclk->hw))
init.flags |= CLK_IS_CRITICAL;
ret = devm_clk_hw_register(&pdev->dev, &pclk->hw);
if (ret) {
pclk = ERR_PTR(ret);
@ -332,7 +340,7 @@ static int plt_clk_probe(struct platform_device *pdev)
return PTR_ERR(parent_names);
for (i = 0; i < PMC_CLK_NUM; i++) {
data->clks[i] = plt_clk_register(pdev, i, pmc_data->base,
data->clks[i] = plt_clk_register(pdev, i, pmc_data,
parent_names, data->nparents);
if (IS_ERR(data->clks[i])) {
err = PTR_ERR(data->clks[i]);

@ -1332,6 +1332,7 @@ static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan,
enum dma_status status;
unsigned long flags;
unsigned int residue;
bool cyclic;
status = dma_cookie_status(chan, cookie, txstate);
if (status == DMA_COMPLETE || !txstate)
@ -1339,10 +1340,11 @@ static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan,
spin_lock_irqsave(&rchan->lock, flags);
residue = rcar_dmac_chan_get_residue(rchan, cookie);
cyclic = rchan->desc.running ? rchan->desc.running->cyclic : false;
spin_unlock_irqrestore(&rchan->lock, flags);
/* if there's no residue, the cookie is complete */
if (!residue)
if (!residue && !cyclic)
return DMA_COMPLETE;
dma_set_residue(txstate, residue);

@ -861,6 +861,8 @@ static int __init aspeed_gpio_probe(struct platform_device *pdev)
gpio->offset_timer =
devm_kzalloc(&pdev->dev, gpio->chip.ngpio, GFP_KERNEL);
if (!gpio->offset_timer)
return -ENOMEM;
return aspeed_gpio_setup_irqs(gpio, pdev);
}

@ -499,7 +499,13 @@ int of_gpiochip_add(struct gpio_chip *chip)
of_node_get(chip->of_node);
return of_gpiochip_scan_gpios(chip);
status = of_gpiochip_scan_gpios(chip);
if (status) {
of_node_put(chip->of_node);
gpiochip_remove_pin_ranges(chip);
}
return status;
}
void of_gpiochip_remove(struct gpio_chip *chip)

@ -326,8 +326,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
bool *enabled, int width, int height)
{
struct drm_i915_private *dev_priv = to_i915(fb_helper->dev);
unsigned long conn_configured, conn_seq, mask;
unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG);
unsigned long conn_configured, conn_seq;
int i, j;
bool *save_enabled;
bool fallback = true, ret = true;
@ -345,9 +345,10 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
drm_modeset_backoff(&ctx);
memcpy(save_enabled, enabled, count);
conn_seq = GENMASK(count - 1, 0);
mask = GENMASK(count - 1, 0);
conn_configured = 0;
retry:
conn_seq = conn_configured;
for (i = 0; i < count; i++) {
struct drm_fb_helper_connector *fb_conn;
struct drm_connector *connector;
@ -360,8 +361,7 @@ retry:
if (conn_configured & BIT(i))
continue;
/* First pass, only consider tiled connectors */
if (conn_seq == GENMASK(count - 1, 0) && !connector->has_tile)
if (conn_seq == 0 && !connector->has_tile)
continue;
if (connector->status == connector_status_connected)
@ -465,10 +465,8 @@ retry:
conn_configured |= BIT(i);
}
if (conn_configured != conn_seq) { /* repeat until no more are found */
conn_seq = conn_configured;
if ((conn_configured & mask) != mask && conn_configured != conn_seq)
goto retry;
}
/*
* If the BIOS didn't enable everything it could, fall back to have the

@ -277,10 +277,12 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
ret = drm_dev_register(drm, 0);
if (ret)
goto free_drm;
goto uninstall_irq;
return 0;
uninstall_irq:
drm_irq_uninstall(drm);
free_drm:
drm_dev_unref(drm);
@ -294,10 +296,11 @@ static int meson_drv_bind(struct device *dev)
static void meson_drv_unbind(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
struct meson_drm *priv = drm->dev_private;
struct meson_drm *priv = dev_get_drvdata(dev);
struct drm_device *drm = priv->drm;
drm_dev_unregister(drm);
drm_irq_uninstall(drm);
drm_kms_helper_poll_fini(drm);
drm_fbdev_cma_fini(priv->fbdev);
drm_mode_config_cleanup(drm);

@ -867,7 +867,7 @@ static void
vc4_crtc_reset(struct drm_crtc *crtc)
{
if (crtc->state)
__drm_atomic_helper_crtc_destroy_state(crtc->state);
vc4_crtc_destroy_state(crtc, crtc->state);
crtc->state = kzalloc(sizeof(struct vc4_crtc_state), GFP_KERNEL);
if (crtc->state)

@ -1060,10 +1060,15 @@ static int hid_debug_rdesc_show(struct seq_file *f, void *p)
seq_printf(f, "\n\n");
/* dump parsed data and input mappings */
if (down_interruptible(&hdev->driver_input_lock))
return 0;
hid_dump_device(hdev, f);
seq_printf(f, "\n");
hid_dump_input_mapping(hdev, f);
up(&hdev->driver_input_lock);
return 0;
}

@ -973,6 +973,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
case 0x1b8: map_key_clear(KEY_VIDEO); break;
case 0x1bc: map_key_clear(KEY_MESSENGER); break;
case 0x1bd: map_key_clear(KEY_INFO); break;
case 0x1cb: map_key_clear(KEY_ASSISTANT); break;
case 0x201: map_key_clear(KEY_NEW); break;
case 0x202: map_key_clear(KEY_OPEN); break;
case 0x203: map_key_clear(KEY_CLOSE); break;

@ -1907,6 +1907,13 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index)
kfree(data);
return -ENOMEM;
}
data->wq = create_singlethread_workqueue("hidpp-ff-sendqueue");
if (!data->wq) {
kfree(data->effect_ids);
kfree(data);
return -ENOMEM;
}
data->hidpp = hidpp;
data->feature_index = feature_index;
data->version = version;
@ -1951,7 +1958,6 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index)
/* ignore boost value at response.fap.params[2] */
/* init the hardware command queue */
data->wq = create_singlethread_workqueue("hidpp-ff-sendqueue");
atomic_set(&data->workqueue_size, 0);
/* initialize with zero autocenter to get wheel in usable state */

@ -624,7 +624,7 @@ static void intel_th_gth_unassign(struct intel_th_device *thdev,
othdev->output.port = -1;
othdev->output.active = false;
gth->output[port].output = NULL;
for (master = 0; master < TH_CONFIGURABLE_MASTERS; master++)
for (master = 0; master <= TH_CONFIGURABLE_MASTERS; master++)
if (gth->master[master] == port)
gth->master[master] = -1;
spin_unlock(&gth->gth_lock);

@ -340,7 +340,7 @@ static int stm32f7_i2c_compute_timing(struct stm32f7_i2c_dev *i2c_dev,
STM32F7_I2C_ANALOG_FILTER_DELAY_MAX : 0);
dnf_delay = setup->dnf * i2cclk;
sdadel_min = setup->fall_time - i2c_specs[setup->speed].hddat_min -
sdadel_min = i2c_specs[setup->speed].hddat_min + setup->fall_time -
af_delay_min - (setup->dnf + 3) * i2cclk;
sdadel_max = i2c_specs[setup->speed].vddat_max - setup->rise_time -

@ -715,16 +715,20 @@ int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
agent->device->name,
agent->port_num);
if (ret)
return ret;
goto free_security;
agent->lsm_nb.notifier_call = ib_mad_agent_security_change;
ret = register_lsm_notifier(&agent->lsm_nb);
if (ret)
return ret;
goto free_security;
agent->smp_allowed = true;
agent->lsm_nb_reg = true;
return 0;
free_security:
security_ib_free_security(agent->security);
return ret;
}
void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent)
@ -732,9 +736,10 @@ void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent)
if (!rdma_protocol_ib(agent->device, agent->port_num))
return;
security_ib_free_security(agent->security);
if (agent->lsm_nb_reg)
unregister_lsm_notifier(&agent->lsm_nb);
security_ib_free_security(agent->security);
}
int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index)

@ -766,8 +766,8 @@ struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
}
EXPORT_SYMBOL(ib_open_qp);
static struct ib_qp *ib_create_xrc_qp(struct ib_qp *qp,
struct ib_qp_init_attr *qp_init_attr)
static struct ib_qp *create_xrc_qp(struct ib_qp *qp,
struct ib_qp_init_attr *qp_init_attr)
{
struct ib_qp *real_qp = qp;
@ -782,10 +782,10 @@ static struct ib_qp *ib_create_xrc_qp(struct ib_qp *qp,
qp = __ib_open_qp(real_qp, qp_init_attr->event_handler,
qp_init_attr->qp_context);
if (!IS_ERR(qp))
__ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp);
else
real_qp->device->destroy_qp(real_qp);
if (IS_ERR(qp))
return qp;
__ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp);
return qp;
}
@ -816,10 +816,8 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
return qp;
ret = ib_create_qp_security(qp, device);
if (ret) {
ib_destroy_qp(qp);
return ERR_PTR(ret);
}
if (ret)
goto err;
qp->device = device;
qp->real_qp = qp;
@ -834,8 +832,15 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
INIT_LIST_HEAD(&qp->sig_mrs);
qp->port = 0;
if (qp_init_attr->qp_type == IB_QPT_XRC_TGT)
return ib_create_xrc_qp(qp, qp_init_attr);
if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) {
struct ib_qp *xrc_qp = create_xrc_qp(qp, qp_init_attr);
if (IS_ERR(xrc_qp)) {
ret = PTR_ERR(xrc_qp);
goto err;
}
return xrc_qp;
}
qp->event_handler = qp_init_attr->event_handler;
qp->qp_context = qp_init_attr->qp_context;
@ -863,11 +868,8 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
if (qp_init_attr->cap.max_rdma_ctxs) {
ret = rdma_rw_init_mrs(qp, qp_init_attr);
if (ret) {
pr_err("failed to init MR pool ret= %d\n", ret);
ib_destroy_qp(qp);
return ERR_PTR(ret);
}
if (ret)
goto err;
}
/*
@ -880,6 +882,11 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
device->attrs.max_sge_rd);
return qp;
err:
ib_destroy_qp(qp);
return ERR_PTR(ret);
}
EXPORT_SYMBOL(ib_create_qp);

@ -611,11 +611,6 @@ static int rvt_set_page(struct ib_mr *ibmr, u64 addr)
if (unlikely(mapped_segs == mr->mr.max_segs))
return -ENOMEM;
if (mr->mr.length == 0) {
mr->mr.user_base = addr;
mr->mr.iova = addr;
}
m = mapped_segs / RVT_SEGSZ;
n = mapped_segs % RVT_SEGSZ;
mr->mr.map[m]->segs[n].vaddr = (void *)addr;
@ -633,17 +628,24 @@ static int rvt_set_page(struct ib_mr *ibmr, u64 addr)
* @sg_nents: number of entries in sg
* @sg_offset: offset in bytes into sg
*
* Overwrite rvt_mr length with mr length calculated by ib_sg_to_pages.
*
* Return: number of sg elements mapped to the memory region
*/
int rvt_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
int sg_nents, unsigned int *sg_offset)
{
struct rvt_mr *mr = to_imr(ibmr);
int ret;
mr->mr.length = 0;
mr->mr.page_shift = PAGE_SHIFT;
return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
rvt_set_page);
ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rvt_set_page);
mr->mr.user_base = ibmr->iova;
mr->mr.iova = ibmr->iova;
mr->mr.offset = ibmr->iova - (u64)mr->mr.map[0]->segs[0].vaddr;
mr->mr.length = (size_t)ibmr->length;
return ret;
}
/**
@ -674,6 +676,7 @@ int rvt_fast_reg_mr(struct rvt_qp *qp, struct ib_mr *ibmr, u32 key,
ibmr->rkey = key;
mr->mr.lkey = key;
mr->mr.access_flags = access;
mr->mr.iova = ibmr->iova;
atomic_set(&mr->mr.lkey_invalid, 0);
return 0;

@ -2381,8 +2381,19 @@ static void srpt_queue_tm_rsp(struct se_cmd *cmd)
srpt_queue_response(cmd);
}
/*
* This function is called for aborted commands if no response is sent to the
* initiator. Make sure that the credits freed by aborting a command are
* returned to the initiator the next time a response is sent by incrementing
* ch->req_lim_delta.
*/
static void srpt_aborted_task(struct se_cmd *cmd)
{
struct srpt_send_ioctx *ioctx = container_of(cmd,
struct srpt_send_ioctx, cmd);
struct srpt_rdma_ch *ch = ioctx->ch;
atomic_inc(&ch->req_lim_delta);
}
static int srpt_queue_status(struct se_cmd *cmd)

@ -156,6 +156,9 @@ static int imx_snvs_pwrkey_probe(struct platform_device *pdev)
return error;
}
pdata->input = input;
platform_set_drvdata(pdev, pdata);
error = devm_request_irq(&pdev->dev, pdata->irq,
imx_snvs_pwrkey_interrupt,
0, pdev->name, pdev);
@ -171,9 +174,6 @@ static int imx_snvs_pwrkey_probe(struct platform_device *pdev)
return error;
}
pdata->input = input;
platform_set_drvdata(pdev, pdata);
device_init_wakeup(&pdev->dev, pdata->wakeup);
return 0;

@ -1239,7 +1239,7 @@ static int rmi_f11_initialize(struct rmi_function *fn)
}
rc = f11_write_control_regs(fn, &f11->sens_query,
&f11->dev_controls, fn->fd.query_base_addr);
&f11->dev_controls, fn->fd.control_base_addr);
if (rc)
dev_warn(&fn->dev, "Failed to write control registers\n");

@ -111,27 +111,29 @@ struct stmfts_data {
bool running;
};
static void stmfts_brightness_set(struct led_classdev *led_cdev,
static int stmfts_brightness_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
struct stmfts_data *sdata = container_of(led_cdev,
struct stmfts_data, led_cdev);
int err;
if (value == sdata->led_status || !sdata->ledvdd)
return;
if (!value) {
regulator_disable(sdata->ledvdd);
} else {
err = regulator_enable(sdata->ledvdd);
if (err)
dev_warn(&sdata->client->dev,
"failed to disable ledvdd regulator: %d\n",
err);
if (value != sdata->led_status && sdata->ledvdd) {
if (!value) {
regulator_disable(sdata->ledvdd);
} else {
err = regulator_enable(sdata->ledvdd);
if (err) {
dev_warn(&sdata->client->dev,
"failed to disable ledvdd regulator: %d\n",
err);
return err;
}
}
sdata->led_status = value;
}
sdata->led_status = value;
return 0;
}
static enum led_brightness stmfts_brightness_get(struct led_classdev *led_cdev)
@ -613,7 +615,7 @@ static int stmfts_enable_led(struct stmfts_data *sdata)
sdata->led_cdev.name = STMFTS_DEV_NAME;
sdata->led_cdev.max_brightness = LED_ON;
sdata->led_cdev.brightness = LED_OFF;
sdata->led_cdev.brightness_set = stmfts_brightness_set;
sdata->led_cdev.brightness_set_blocking = stmfts_brightness_set;
sdata->led_cdev.brightness_get = stmfts_brightness_get;
err = devm_led_classdev_register(&sdata->client->dev, &sdata->led_cdev);

@ -3127,21 +3127,24 @@ static void amd_iommu_get_resv_regions(struct device *dev,
return;
list_for_each_entry(entry, &amd_iommu_unity_map, list) {
int type, prot = 0;
size_t length;
int prot = 0;
if (devid < entry->devid_start || devid > entry->devid_end)
continue;
type = IOMMU_RESV_DIRECT;
length = entry->address_end - entry->address_start;
if (entry->prot & IOMMU_PROT_IR)
prot |= IOMMU_READ;
if (entry->prot & IOMMU_PROT_IW)
prot |= IOMMU_WRITE;
if (entry->prot & IOMMU_UNITY_MAP_FLAG_EXCL_RANGE)
/* Exclusion range */
type = IOMMU_RESV_RESERVED;
region = iommu_alloc_resv_region(entry->address_start,
length, prot,
IOMMU_RESV_DIRECT);
length, prot, type);
if (!region) {
pr_err("Out of memory allocating dm-regions for %s\n",
dev_name(dev));

@ -1980,6 +1980,9 @@ static int __init init_unity_map_range(struct ivmd_header *m)
if (e == NULL)
return -ENOMEM;
if (m->flags & IVMD_FLAG_EXCL_RANGE)
init_exclusion_range(m);
switch (m->type) {
default:
kfree(e);
@ -2026,9 +2029,7 @@ static int __init init_memory_definitions(struct acpi_table_header *table)
while (p < end) {
m = (struct ivmd_header *)p;
if (m->flags & IVMD_FLAG_EXCL_RANGE)
init_exclusion_range(m);
else if (m->flags & IVMD_FLAG_UNITY_MAP)
if (m->flags & (IVMD_FLAG_UNITY_MAP | IVMD_FLAG_EXCL_RANGE))
init_unity_map_range(m);
p += m->length;

@ -369,6 +369,8 @@
#define IOMMU_PROT_IR 0x01
#define IOMMU_PROT_IW 0x02
#define IOMMU_UNITY_MAP_FLAG_EXCL_RANGE (1 << 2)
/* IOMMU capabilities */
#define IOMMU_CAP_IOTLB 24
#define IOMMU_CAP_NPCACHE 26

@ -513,6 +513,7 @@ static int pca9532_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
int devid;
const struct of_device_id *of_id;
struct pca9532_data *data = i2c_get_clientdata(client);
struct pca9532_platform_data *pca9532_pdata =
dev_get_platdata(&client->dev);
@ -528,8 +529,11 @@ static int pca9532_probe(struct i2c_client *client,
dev_err(&client->dev, "no platform data\n");
return -EINVAL;
}
devid = (int)(uintptr_t)of_match_device(
of_pca9532_leds_match, &client->dev)->data;
of_id = of_match_device(of_pca9532_leds_match,
&client->dev);
if (unlikely(!of_id))
return -EINVAL;
devid = (int)(uintptr_t) of_id->data;
} else {
devid = id->driver_data;
}

@ -2917,17 +2917,17 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto bad;
}
ic->sectors_per_block = val >> SECTOR_SHIFT;
} else if (!memcmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
} else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error,
"Invalid internal_hash argument");
if (r)
goto bad;
} else if (!memcmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) {
} else if (!strncmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) {
r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error,
"Invalid journal_crypt argument");
if (r)
goto bad;
} else if (!memcmp(opt_string, "journal_mac:", strlen("journal_mac:"))) {
} else if (!strncmp(opt_string, "journal_mac:", strlen("journal_mac:"))) {
r = get_alg_and_key(opt_string, &ic->journal_mac_alg, &ti->error,
"Invalid journal_mac argument");
if (r)

@ -158,10 +158,10 @@ MODULE_PARM_DESC(debug, "Debug level (0-1)");
#define REG_GFIX 0x69 /* Fix gain control */
#define REG_DBLV 0x6b /* PLL control an debugging */
#define DBLV_BYPASS 0x00 /* Bypass PLL */
#define DBLV_X4 0x01 /* clock x4 */
#define DBLV_X6 0x10 /* clock x6 */
#define DBLV_X8 0x11 /* clock x8 */
#define DBLV_BYPASS 0x0a /* Bypass PLL */
#define DBLV_X4 0x4a /* clock x4 */
#define DBLV_X6 0x8a /* clock x6 */
#define DBLV_X8 0xca /* clock x8 */
#define REG_REG76 0x76 /* OV's name */
#define R76_BLKPCOR 0x80 /* Black pixel correction enable */
@ -837,7 +837,7 @@ static int ov7675_set_framerate(struct v4l2_subdev *sd,
if (ret < 0)
return ret;
return ov7670_write(sd, REG_DBLV, DBLV_X4);
return 0;
}
static void ov7670_get_framerate_legacy(struct v4l2_subdev *sd,
@ -1601,11 +1601,7 @@ static int ov7670_probe(struct i2c_client *client,
if (config->clock_speed)
info->clock_speed = config->clock_speed;
/*
* It should be allowed for ov7670 too when it is migrated to
* the new frame rate formula.
*/
if (config->pll_bypass && id->driver_data != MODEL_OV7670)
if (config->pll_bypass)
info->pll_bypass = true;
if (config->pclk_hb_disable)

@ -55,7 +55,9 @@ static SLAVE_ATTR_RO(link_failure_count);
static ssize_t perm_hwaddr_show(struct slave *slave, char *buf)
{
return sprintf(buf, "%pM\n", slave->perm_hwaddr);
return sprintf(buf, "%*phC\n",
slave->dev->addr_len,
slave->perm_hwaddr);
}
static SLAVE_ATTR_RO(perm_hwaddr);

@ -130,6 +130,9 @@ static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
(fs->m_ext.vlan_etype || fs->m_ext.data[1]))
return -EINVAL;
if (fs->location != RX_CLS_LOC_ANY && fs->location >= CFP_NUM_RULES)
return -EINVAL;
if (fs->location != RX_CLS_LOC_ANY &&
test_bit(fs->location, priv->cfp.used))
return -EBUSY;
@ -330,6 +333,9 @@ static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port,
int ret;
u32 reg;
if (loc >= CFP_NUM_RULES)
return -EINVAL;
/* Refuse deletion of unused rules, and the default reserved rule */
if (!test_bit(loc, priv->cfp.used) || loc == 0)
return -EINVAL;

@ -6768,8 +6768,15 @@ static int bnxt_cfg_rx_mode(struct bnxt *bp)
skip_uc:
rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
if (rc && vnic->mc_list_count) {
netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
rc);
vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
vnic->mc_list_count = 0;
rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
}
if (rc)
netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n",
netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
rc);
return rc;
@ -8234,6 +8241,7 @@ init_err_cleanup_tc:
bnxt_clear_int_mode(bp);
init_err_pci_clean:
bnxt_free_hwrm_short_cmd_req(bp);
bnxt_free_hwrm_resources(bp);
bnxt_cleanup_pci(bp);

@ -2817,14 +2817,20 @@ static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
*hclk = devm_clk_get(&pdev->dev, "hclk");
}
if (IS_ERR(*pclk)) {
if (IS_ERR_OR_NULL(*pclk)) {
err = PTR_ERR(*pclk);
if (!err)
err = -ENODEV;
dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err);
return err;
}
if (IS_ERR(*hclk)) {
if (IS_ERR_OR_NULL(*hclk)) {
err = PTR_ERR(*hclk);
if (!err)
err = -ENODEV;
dev_err(&pdev->dev, "failed to get hclk (%u)\n", err);
return err;
}

@ -150,7 +150,6 @@ out_buffer_fail:
/* free desc along with its attached buffer */
static void hnae_free_desc(struct hnae_ring *ring)
{
hnae_free_buffers(ring);
dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr,
ring->desc_num * sizeof(ring->desc[0]),
ring_to_dma_dir(ring));
@ -183,6 +182,9 @@ static int hnae_alloc_desc(struct hnae_ring *ring)
/* fini ring, also free the buffer for the ring */
static void hnae_fini_ring(struct hnae_ring *ring)
{
if (is_rx_ring(ring))
hnae_free_buffers(ring);
hnae_free_desc(ring);
kfree(ring->desc_cb);
ring->desc_cb = NULL;

@ -2743,6 +2743,17 @@ int hns_dsaf_get_regs_count(void)
return DSAF_DUMP_REGS_NUM;
}
static int hns_dsaf_get_port_id(u8 port)
{
if (port < DSAF_SERVICE_NW_NUM)
return port;
if (port >= DSAF_BASE_INNER_PORT_NUM)
return port - DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM;
return -EINVAL;
}
static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port)
{
struct dsaf_tbl_tcam_ucast_cfg tbl_tcam_ucast = {0, 1, 0, 0, 0x80};
@ -2808,23 +2819,33 @@ static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port)
memset(&temp_key, 0x0, sizeof(temp_key));
mask_entry.addr[0] = 0x01;
hns_dsaf_set_mac_key(dsaf_dev, &mask_key, mask_entry.in_vlan_id,
port, mask_entry.addr);
0xf, mask_entry.addr);
tbl_tcam_mcast.tbl_mcast_item_vld = 1;
tbl_tcam_mcast.tbl_mcast_old_en = 0;
if (port < DSAF_SERVICE_NW_NUM) {
mskid = port;
} else if (port >= DSAF_BASE_INNER_PORT_NUM) {
mskid = port - DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM;
} else {
/* set MAC port to handle multicast */
mskid = hns_dsaf_get_port_id(port);
if (mskid == -EINVAL) {
dev_err(dsaf_dev->dev, "%s,pnum(%d)error,key(%#x:%#x)\n",
dsaf_dev->ae_dev.name, port,
mask_key.high.val, mask_key.low.val);
return;
}
dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32],
mskid % 32, 1);
/* set pool bit map to handle multicast */
mskid = hns_dsaf_get_port_id(port_num);
if (mskid == -EINVAL) {
dev_err(dsaf_dev->dev,
"%s, pool bit map pnum(%d)error,key(%#x:%#x)\n",
dsaf_dev->ae_dev.name, port_num,
mask_key.high.val, mask_key.low.val);
return;
}
dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32],
mskid % 32, 1);
memcpy(&temp_key, &mask_key, sizeof(mask_key));
hns_dsaf_tcam_mc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_mc,
(struct dsaf_tbl_tcam_data *)(&mask_key),

@ -129,7 +129,7 @@ static void hns_xgmac_lf_rf_control_init(struct mac_driver *mac_drv)
dsaf_set_bit(val, XGMAC_UNIDIR_EN_B, 0);
dsaf_set_bit(val, XGMAC_RF_TX_EN_B, 1);
dsaf_set_field(val, XGMAC_LF_RF_INSERT_M, XGMAC_LF_RF_INSERT_S, 0);
dsaf_write_reg(mac_drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG, val);
dsaf_write_dev(mac_drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG, val);
}
/**

@ -29,9 +29,6 @@
#define SERVICE_TIMER_HZ (1 * HZ)
#define NIC_TX_CLEAN_MAX_NUM 256
#define NIC_RX_CLEAN_MAX_NUM 64
#define RCB_IRQ_NOT_INITED 0
#define RCB_IRQ_INITED 1
#define HNS_BUFFER_SIZE_2048 2048
@ -376,8 +373,6 @@ netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
wmb(); /* commit all data before submit */
assert(skb->queue_mapping < priv->ae_handle->q_num);
hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num);
ring->stats.tx_pkts++;
ring->stats.tx_bytes += skb->len;
return NETDEV_TX_OK;
@ -1099,6 +1094,9 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
/* issue prefetch for next Tx descriptor */
prefetch(&ring->desc_cb[ring->next_to_clean]);
}
/* update tx ring statistics. */
ring->stats.tx_pkts += pkts;
ring->stats.tx_bytes += bytes;
NETIF_TX_UNLOCK(ring);
@ -2269,7 +2267,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
hns_nic_tx_fini_pro_v2;
netif_napi_add(priv->netdev, &rd->napi,
hns_nic_common_poll, NIC_TX_CLEAN_MAX_NUM);
hns_nic_common_poll, NAPI_POLL_WEIGHT);
rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
}
for (i = h->q_num; i < h->q_num * 2; i++) {
@ -2282,7 +2280,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
hns_nic_rx_fini_pro_v2;
netif_napi_add(priv->netdev, &rd->napi,
hns_nic_common_poll, NIC_RX_CLEAN_MAX_NUM);
hns_nic_common_poll, NAPI_POLL_WEIGHT);
rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
}

@ -3176,6 +3176,7 @@ static ssize_t ehea_probe_port(struct device *dev,
if (ehea_add_adapter_mr(adapter)) {
pr_err("creating MR failed\n");
of_node_put(eth_dn);
return -EIO;
}

@ -58,6 +58,8 @@ static int __init fm10k_init_module(void)
/* create driver workqueue */
fm10k_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0,
fm10k_driver_name);
if (!fm10k_workqueue)
return -ENOMEM;
fm10k_dbg_init();

@ -214,6 +214,8 @@
/* enable link status from external LINK_0 and LINK_1 pins */
#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
#define E1000_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */
#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 /* PHY PM enable */
#define E1000_CTRL_SDP0_DIR 0x00400000 /* SDP0 Data direction */
#define E1000_CTRL_SDP1_DIR 0x00800000 /* SDP1 Data direction */
#define E1000_CTRL_RST 0x04000000 /* Global reset */

@ -7934,9 +7934,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
struct e1000_hw *hw = &adapter->hw;
u32 ctrl, rctl, status;
u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
#ifdef CONFIG_PM
int retval = 0;
#endif
bool wake;
rtnl_lock();
netif_device_detach(netdev);
@ -7949,14 +7947,6 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
igb_clear_interrupt_scheme(adapter);
rtnl_unlock();
#ifdef CONFIG_PM
if (!runtime) {
retval = pci_save_state(pdev);
if (retval)
return retval;
}
#endif
status = rd32(E1000_STATUS);
if (status & E1000_STATUS_LU)
wufc &= ~E1000_WUFC_LNKC;
@ -7973,10 +7963,6 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
}
ctrl = rd32(E1000_CTRL);
/* advertise wake from D3Cold */
#define E1000_CTRL_ADVD3WUC 0x00100000
/* phy power management enable */
#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
ctrl |= E1000_CTRL_ADVD3WUC;
wr32(E1000_CTRL, ctrl);
@ -7990,12 +7976,15 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
wr32(E1000_WUFC, 0);
}
*enable_wake = wufc || adapter->en_mng_pt;
if (!*enable_wake)
wake = wufc || adapter->en_mng_pt;
if (!wake)
igb_power_down_link(adapter);
else
igb_power_up_link(adapter);
if (enable_wake)
*enable_wake = wake;
/* Release control of h/w to f/w. If f/w is AMT enabled, this
* would have already happened in close and is redundant.
*/
@ -8038,22 +8027,7 @@ static void igb_deliver_wake_packet(struct net_device *netdev)
static int __maybe_unused igb_suspend(struct device *dev)
{
int retval;
bool wake;
struct pci_dev *pdev = to_pci_dev(dev);
retval = __igb_shutdown(pdev, &wake, 0);
if (retval)
return retval;
if (wake) {
pci_prepare_to_sleep(pdev);
} else {
pci_wake_from_d3(pdev, false);
pci_set_power_state(pdev, PCI_D3hot);
}
return 0;
return __igb_shutdown(to_pci_dev(dev), NULL, 0);
}
static int __maybe_unused igb_resume(struct device *dev)
@ -8124,22 +8098,7 @@ static int __maybe_unused igb_runtime_idle(struct device *dev)
static int __maybe_unused igb_runtime_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
int retval;
bool wake;
retval = __igb_shutdown(pdev, &wake, 1);
if (retval)
return retval;
if (wake) {
pci_prepare_to_sleep(pdev);
} else {
pci_wake_from_d3(pdev, false);
pci_set_power_state(pdev, PCI_D3hot);
}
return 0;
return __igb_shutdown(to_pci_dev(dev), NULL, 1);
}
static int __maybe_unused igb_runtime_resume(struct device *dev)

@ -1622,7 +1622,7 @@ static int mlx5e_get_module_info(struct net_device *netdev,
break;
case MLX5_MODULE_ID_SFP:
modinfo->type = ETH_MODULE_SFF_8472;
modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
modinfo->eeprom_len = MLX5_EEPROM_PAGE_LENGTH;
break;
default:
netdev_err(priv->netdev, "%s: cable type not recognized:0x%x\n",

@ -79,8 +79,7 @@ static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1);
MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
if (vport)
MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
in, nic_vport_context);
@ -108,8 +107,7 @@ static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
MLX5_SET(modify_esw_vport_context_in, in, opcode,
MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
if (vport)
MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
}

@ -392,10 +392,6 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
size -= offset + size - MLX5_EEPROM_PAGE_LENGTH;
i2c_addr = MLX5_I2C_ADDR_LOW;
if (offset >= MLX5_EEPROM_PAGE_LENGTH) {
i2c_addr = MLX5_I2C_ADDR_HIGH;
offset -= MLX5_EEPROM_PAGE_LENGTH;
}
MLX5_SET(mcia_reg, in, l, 0);
MLX5_SET(mcia_reg, in, module, module_num);

@ -2521,11 +2521,11 @@ mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
if (err)
return err;
mlxsw_sp_port->link.autoneg = autoneg;
if (!netif_running(dev))
return 0;
mlxsw_sp_port->link.autoneg = autoneg;
mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);

@ -526,9 +526,8 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
/* set dma read address */
ks8851_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI | 0x00);
/* start the packet dma process, and set auto-dequeue rx */
ks8851_wrreg16(ks, KS_RXQCR,
ks->rc_rxqcr | RXQCR_SDA | RXQCR_ADRFE);
/* start DMA access */
ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA);
if (rxlen > 4) {
unsigned int rxalign;
@ -559,7 +558,8 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
}
}
ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
/* end DMA access and dequeue packet */
ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_RRXEF);
}
}
@ -776,6 +776,15 @@ static void ks8851_tx_work(struct work_struct *work)
static int ks8851_net_open(struct net_device *dev)
{
struct ks8851_net *ks = netdev_priv(dev);
int ret;
ret = request_threaded_irq(dev->irq, NULL, ks8851_irq,
IRQF_TRIGGER_LOW | IRQF_ONESHOT,
dev->name, ks);
if (ret < 0) {
netdev_err(dev, "failed to get irq\n");
return ret;
}
/* lock the card, even if we may not actually be doing anything
* else at the moment */
@ -840,6 +849,7 @@ static int ks8851_net_open(struct net_device *dev)
netif_dbg(ks, ifup, ks->netdev, "network device up\n");
mutex_unlock(&ks->lock);
mii_check_link(&ks->mii);
return 0;
}
@ -890,6 +900,8 @@ static int ks8851_net_stop(struct net_device *dev)
dev_kfree_skb(txb);
}
free_irq(dev->irq, ks);
return 0;
}
@ -1499,6 +1511,7 @@ static int ks8851_probe(struct spi_device *spi)
spi_set_drvdata(spi, ks);
netif_carrier_off(ks->netdev);
ndev->if_port = IF_PORT_100BASET;
ndev->netdev_ops = &ks8851_netdev_ops;
ndev->irq = spi->irq;
@ -1520,14 +1533,6 @@ static int ks8851_probe(struct spi_device *spi)
ks8851_read_selftest(ks);
ks8851_init_mac(ks);
ret = request_threaded_irq(spi->irq, NULL, ks8851_irq,
IRQF_TRIGGER_LOW | IRQF_ONESHOT,
ndev->name, ks);
if (ret < 0) {
dev_err(&spi->dev, "failed to get irq\n");
goto err_irq;
}
ret = register_netdev(ndev);
if (ret) {
dev_err(&spi->dev, "failed to register network device\n");
@ -1540,14 +1545,10 @@ static int ks8851_probe(struct spi_device *spi)
return 0;
err_netdev:
free_irq(ndev->irq, ks);
err_irq:
err_id:
if (gpio_is_valid(gpio))
gpio_set_value(gpio, 0);
err_id:
regulator_disable(ks->vdd_reg);
err_reg:
regulator_disable(ks->vdd_io);
@ -1565,7 +1566,6 @@ static int ks8851_remove(struct spi_device *spi)
dev_info(&spi->dev, "remove\n");
unregister_netdev(priv->netdev);
free_irq(spi->irq, priv);
if (gpio_is_valid(priv->gpio))
gpio_set_value(priv->gpio, 0);
regulator_disable(priv->vdd_reg);

@ -1047,6 +1047,8 @@ int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
for (i = 0; i < QLCNIC_NUM_ILB_PKT; i++) {
skb = netdev_alloc_skb(adapter->netdev, QLCNIC_ILB_PKT_SIZE);
if (!skb)
break;
qlcnic_create_loopback_buff(skb->data, adapter->mac_addr);
skb_put(skb, QLCNIC_ILB_PKT_SIZE);
adapter->ahw->diag_cnt = 0;

@ -201,6 +201,11 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
if (unlikely(rdes0 & RDES0_OWN))
return dma_own;
if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) {
stats->rx_length_errors++;
return discard_frame;
}
if (unlikely(rdes0 & RDES0_ERROR_SUMMARY)) {
if (unlikely(rdes0 & RDES0_DESCRIPTOR_ERROR)) {
x->rx_desc++;
@ -231,9 +236,10 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
* It doesn't match with the information reported into the databook.
* At any rate, we need to understand if the CSUM hw computation is ok
* and report this info to the upper layers. */
ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR),
!!(rdes0 & RDES0_FRAME_TYPE),
!!(rdes0 & ERDES0_RX_MAC_ADDR));
if (likely(ret == good_frame))
ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR),
!!(rdes0 & RDES0_FRAME_TYPE),
!!(rdes0 & ERDES0_RX_MAC_ADDR));
if (unlikely(rdes0 & RDES0_DRIBBLING))
x->dribbling_bit++;

@ -91,8 +91,6 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
return dma_own;
if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) {
pr_warn("%s: Oversized frame spanned multiple buffers\n",
__func__);
stats->rx_length_errors++;
return discard_frame;
}

@ -2582,8 +2582,6 @@ static int stmmac_open(struct net_device *dev)
struct stmmac_priv *priv = netdev_priv(dev);
int ret;
stmmac_check_ether_addr(priv);
if (priv->hw->pcs != STMMAC_PCS_RGMII &&
priv->hw->pcs != STMMAC_PCS_TBI &&
priv->hw->pcs != STMMAC_PCS_RTBI) {
@ -3415,9 +3413,10 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
* ignored
*/
if (frame_len > priv->dma_buf_sz) {
netdev_err(priv->dev,
"len %d larger than size (%d)\n",
frame_len, priv->dma_buf_sz);
if (net_ratelimit())
netdev_err(priv->dev,
"len %d larger than size (%d)\n",
frame_len, priv->dma_buf_sz);
priv->dev->stats.rx_length_errors++;
break;
}
@ -3475,9 +3474,10 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
} else {
skb = rx_q->rx_skbuff[entry];
if (unlikely(!skb)) {
netdev_err(priv->dev,
"%s: Inconsistent Rx chain\n",
priv->dev->name);
if (net_ratelimit())
netdev_err(priv->dev,
"%s: Inconsistent Rx chain\n",
priv->dev->name);
priv->dev->stats.rx_dropped++;
break;
}
@ -4213,6 +4213,8 @@ int stmmac_dvr_probe(struct device *device,
if (ret)
goto error_hw_init;
stmmac_check_ether_addr(priv);
/* Configure real RX and TX queues */
netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);

@ -159,6 +159,12 @@ static const struct dmi_system_id quark_pci_dmi[] = {
},
.driver_data = (void *)&galileo_stmmac_dmi_data,
},
/*
* There are 2 types of SIMATIC IOT2000: IOT20202 and IOT2040.
* The asset tag "6ES7647-0AA00-0YA2" is only for IOT2020 which
* has only one pci network device while other asset tags are
* for IOT2040 which has two.
*/
{
.matches = {
DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
@ -170,8 +176,6 @@ static const struct dmi_system_id quark_pci_dmi[] = {
{
.matches = {
DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
DMI_EXACT_MATCH(DMI_BOARD_ASSET_TAG,
"6ES7647-0AA00-1YA2"),
},
.driver_data = (void *)&iot2040_stmmac_dmi_data,
},

@ -3538,12 +3538,16 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device,
gbe_dev->dma_chan_name, gbe_dev->tx_queue_id);
if (ret)
if (ret) {
of_node_put(interfaces);
return ret;
}
ret = netcp_txpipe_open(&gbe_dev->tx_pipe);
if (ret)
if (ret) {
of_node_put(interfaces);
return ret;
}
/* Create network interfaces */
INIT_LIST_HEAD(&gbe_dev->gbe_intf_head);

@ -1575,12 +1575,14 @@ static int axienet_probe(struct platform_device *pdev)
ret = of_address_to_resource(np, 0, &dmares);
if (ret) {
dev_err(&pdev->dev, "unable to get DMA resource\n");
of_node_put(np);
goto free_netdev;
}
lp->dma_regs = devm_ioremap_resource(&pdev->dev, &dmares);
if (IS_ERR(lp->dma_regs)) {
dev_err(&pdev->dev, "could not map DMA regs\n");
ret = PTR_ERR(lp->dma_regs);
of_node_put(np);
goto free_netdev;
}
lp->rx_irq = irq_of_parse_and_map(np, 1);

@ -1497,9 +1497,10 @@ static int marvell_get_sset_count(struct phy_device *phydev)
static void marvell_get_strings(struct phy_device *phydev, u8 *data)
{
int count = marvell_get_sset_count(phydev);
int i;
for (i = 0; i < ARRAY_SIZE(marvell_hw_stats); i++) {
for (i = 0; i < count; i++) {
memcpy(data + i * ETH_GSTRING_LEN,
marvell_hw_stats[i].string, ETH_GSTRING_LEN);
}
@ -1536,9 +1537,10 @@ static u64 marvell_get_stat(struct phy_device *phydev, int i)
static void marvell_get_stats(struct phy_device *phydev,
struct ethtool_stats *stats, u64 *data)
{
int count = marvell_get_sset_count(phydev);
int i;
for (i = 0; i < ARRAY_SIZE(marvell_hw_stats); i++)
for (i = 0; i < count; i++)
data[i] = marvell_get_stat(phydev, i);
}

@ -153,7 +153,7 @@ out_fail:
void
slhc_free(struct slcompress *comp)
{
if ( comp == NULLSLCOMPR )
if ( IS_ERR_OR_NULL(comp) )
return;
if ( comp->tstate != NULLSLSTATE )

@ -1157,6 +1157,12 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
return -EINVAL;
}
if (netdev_has_upper_dev(dev, port_dev)) {
netdev_err(dev, "Device %s is already an upper device of the team interface\n",
portname);
return -EBUSY;
}
if (port_dev->features & NETIF_F_VLAN_CHALLENGED &&
vlan_uses_dev(dev)) {
netdev_err(dev, "Device %s is VLAN challenged and team device has VLAN set up\n",

@ -148,6 +148,7 @@ struct ipheth_device {
u8 bulk_in;
u8 bulk_out;
struct delayed_work carrier_work;
bool confirmed_pairing;
};
static int ipheth_rx_submit(struct ipheth_device *dev, gfp_t mem_flags);
@ -259,7 +260,7 @@ static void ipheth_rcvbulk_callback(struct urb *urb)
dev->net->stats.rx_packets++;
dev->net->stats.rx_bytes += len;
dev->confirmed_pairing = true;
netif_rx(skb);
ipheth_rx_submit(dev, GFP_ATOMIC);
}
@ -280,14 +281,24 @@ static void ipheth_sndbulk_callback(struct urb *urb)
dev_err(&dev->intf->dev, "%s: urb status: %d\n",
__func__, status);
netif_wake_queue(dev->net);
if (status == 0)
netif_wake_queue(dev->net);
else
// on URB error, trigger immediate poll
schedule_delayed_work(&dev->carrier_work, 0);
}
static int ipheth_carrier_set(struct ipheth_device *dev)
{
struct usb_device *udev = dev->udev;
struct usb_device *udev;
int retval;
if (!dev)
return 0;
if (!dev->confirmed_pairing)
return 0;
udev = dev->udev;
retval = usb_control_msg(udev,
usb_rcvctrlpipe(udev, IPHETH_CTRL_ENDP),
IPHETH_CMD_CARRIER_CHECK, /* request */
@ -302,11 +313,14 @@ static int ipheth_carrier_set(struct ipheth_device *dev)
return retval;
}
if (dev->ctrl_buf[0] == IPHETH_CARRIER_ON)
if (dev->ctrl_buf[0] == IPHETH_CARRIER_ON) {
netif_carrier_on(dev->net);
else
if (dev->tx_urb->status != -EINPROGRESS)
netif_wake_queue(dev->net);
} else {
netif_carrier_off(dev->net);
netif_stop_queue(dev->net);
}
return 0;
}
@ -386,7 +400,6 @@ static int ipheth_open(struct net_device *net)
return retval;
schedule_delayed_work(&dev->carrier_work, IPHETH_CARRIER_CHECK_TIMEOUT);
netif_start_queue(net);
return retval;
}
@ -489,7 +502,7 @@ static int ipheth_probe(struct usb_interface *intf,
dev->udev = udev;
dev->net = netdev;
dev->intf = intf;
dev->confirmed_pairing = false;
/* Set up endpoints */
hintf = usb_altnum_to_altsetting(intf, IPHETH_ALT_INTFNUM);
if (hintf == NULL) {
@ -540,7 +553,9 @@ static int ipheth_probe(struct usb_interface *intf,
retval = -EIO;
goto err_register_netdev;
}
// carrier down and transmit queues stopped until packet from device
netif_carrier_off(netdev);
netif_tx_stop_all_queues(netdev);
dev_info(&intf->dev, "Apple iPhone USB Ethernet device attached\n");
return 0;

@ -746,6 +746,15 @@ bool nvmet_host_allowed(struct nvmet_req *req, struct nvmet_subsys *subsys,
return __nvmet_host_allowed(subsys, hostnqn);
}
static void nvmet_fatal_error_handler(struct work_struct *work)
{
struct nvmet_ctrl *ctrl =
container_of(work, struct nvmet_ctrl, fatal_err_work);
pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid);
ctrl->ops->delete_ctrl(ctrl);
}
u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp)
{
@ -785,6 +794,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
INIT_LIST_HEAD(&ctrl->async_events);
INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
@ -887,21 +897,11 @@ void nvmet_ctrl_put(struct nvmet_ctrl *ctrl)
kref_put(&ctrl->ref, nvmet_ctrl_free);
}
static void nvmet_fatal_error_handler(struct work_struct *work)
{
struct nvmet_ctrl *ctrl =
container_of(work, struct nvmet_ctrl, fatal_err_work);
pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid);
ctrl->ops->delete_ctrl(ctrl);
}
void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
{
mutex_lock(&ctrl->lock);
if (!(ctrl->csts & NVME_CSTS_CFS)) {
ctrl->csts |= NVME_CSTS_CFS;
INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
schedule_work(&ctrl->fatal_err_work);
}
mutex_unlock(&ctrl->lock);

@ -17,6 +17,7 @@
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/dmi.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/platform_data/x86/clk-pmc-atom.h>
@ -421,11 +422,27 @@ static int pmc_dbgfs_register(struct pmc_dev *pmc)
}
#endif /* CONFIG_DEBUG_FS */
/*
* Some systems need one or more of their pmc_plt_clks to be
* marked as critical.
*/
static const struct dmi_system_id critclk_systems[] __initconst = {
{
.ident = "MPL CEC1x",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "MPL AG"),
DMI_MATCH(DMI_PRODUCT_NAME, "CEC10 Family"),
},
},
{ /*sentinel*/ }
};
static int pmc_setup_clks(struct pci_dev *pdev, void __iomem *pmc_regmap,
const struct pmc_data *pmc_data)
{
struct platform_device *clkdev;
struct pmc_clk_data *clk_data;
const struct dmi_system_id *d = dmi_first_match(critclk_systems);
clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL);
if (!clk_data)
@ -433,6 +450,10 @@ static int pmc_setup_clks(struct pci_dev *pdev, void __iomem *pmc_regmap,
clk_data->base = pmc_regmap; /* offset is added by client */
clk_data->clks = pmc_data->clks;
if (d) {
clk_data->critical = true;
pr_info("%s critclks quirk enabled\n", d->ident);
}
clkdev = platform_device_register_data(&pdev->dev, "clk-pmc-atom",
PLATFORM_DEVID_NONE,

@ -480,6 +480,13 @@ static int da9063_rtc_probe(struct platform_device *pdev)
da9063_data_to_tm(data, &rtc->alarm_time, rtc);
rtc->rtc_sync = false;
/*
* TODO: some models have alarms on a minute boundary but still support
* real hardware interrupts. Add this once the core supports it.
*/
if (config->rtc_data_start != RTC_SEC)
rtc->rtc_dev->uie_unsupported = 1;
irq_alarm = platform_get_irq_byname(pdev, "ALARM");
ret = devm_request_threaded_irq(&pdev->dev, irq_alarm, NULL,
da9063_alarm_event,

@ -462,7 +462,7 @@ static int sh_rtc_set_time(struct device *dev, struct rtc_time *tm)
static inline int sh_rtc_read_alarm_value(struct sh_rtc *rtc, int reg_off)
{
unsigned int byte;
int value = 0xff; /* return 0xff for ignored values */
int value = -1; /* return -1 for ignored values */
byte = readb(rtc->regbase + reg_off);
if (byte & AR_ENB) {

@ -3022,12 +3022,14 @@ static int qeth_l3_probe_device(struct ccwgroup_device *gdev)
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
int rc;
hash_init(card->ip_htable);
if (gdev->dev.type == &qeth_generic_devtype) {
rc = qeth_l3_create_device_attributes(&gdev->dev);
if (rc)
return rc;
}
hash_init(card->ip_htable);
hash_init(card->ip_mc_htable);
card->options.layer2 = 0;
card->info.hwtrap = 0;

@ -240,10 +240,6 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
list_for_each_entry(port, &adapter->port_list, list) {
if ((port->d_id & range) == (ntoh24(page->rscn_fid) & range))
zfcp_fc_test_link(port);
if (!port->d_id)
zfcp_erp_port_reopen(port,
ZFCP_STATUS_COMMON_ERP_FAILED,
"fcrscn1");
}
read_unlock_irqrestore(&adapter->port_list_lock, flags);
}
@ -251,6 +247,7 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
{
struct fsf_status_read_buffer *status_buffer = (void *)fsf_req->data;
struct zfcp_adapter *adapter = fsf_req->adapter;
struct fc_els_rscn *head;
struct fc_els_rscn_page *page;
u16 i;
@ -264,6 +261,22 @@ static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
no_entries = be16_to_cpu(head->rscn_plen) /
sizeof(struct fc_els_rscn_page);
if (no_entries > 1) {
/* handle failed ports */
unsigned long flags;
struct zfcp_port *port;
read_lock_irqsave(&adapter->port_list_lock, flags);
list_for_each_entry(port, &adapter->port_list, list) {
if (port->d_id)
continue;
zfcp_erp_port_reopen(port,
ZFCP_STATUS_COMMON_ERP_FAILED,
"fcrscn1");
}
read_unlock_irqrestore(&adapter->port_list_lock, flags);
}
for (i = 1; i < no_entries; i++) {
/* skip head and start with 1st element */
page++;

@ -3207,6 +3207,8 @@ static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
return -EINVAL;
ep = iscsi_lookup_endpoint(transport_fd);
if (!ep)
return -EINVAL;
conn = cls_conn->dd_data;
qla_conn = conn->dd_data;
qla_conn->qla_ep = ep->dd_data;

@ -248,6 +248,7 @@ static struct {
{"NETAPP", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"LSI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"ENGENIO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"LENOVO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36},
{"SONY", "CD-ROM CDU-8001", NULL, BLIST_BORKEN},
{"SONY", "TSL", NULL, BLIST_FORCELUN}, /* DDS3 & DDS4 autoloaders */

@ -75,6 +75,7 @@ static const struct scsi_dh_blist scsi_dh_blist[] = {
{"NETAPP", "INF-01-00", "rdac", },
{"LSI", "INF-01-00", "rdac", },
{"ENGENIO", "INF-01-00", "rdac", },
{"LENOVO", "DE_Series", "rdac", },
{NULL, NULL, NULL },
};

@ -658,13 +658,22 @@ static void handle_sc_creation(struct vmbus_channel *new_sc)
static void handle_multichannel_storage(struct hv_device *device, int max_chns)
{
struct storvsc_device *stor_device;
int num_cpus = num_online_cpus();
int num_sc;
struct storvsc_cmd_request *request;
struct vstor_packet *vstor_packet;
int ret, t;
num_sc = ((max_chns > num_cpus) ? num_cpus : max_chns);
/*
* If the number of CPUs is artificially restricted, such as
* with maxcpus=1 on the kernel boot line, Hyper-V could offer
* sub-channels >= the number of CPUs. These sub-channels
* should not be created. The primary channel is already created
* and assigned to one CPU, so check against # CPUs - 1.
*/
num_sc = min((int)(num_online_cpus() - 1), max_chns);
if (!num_sc)
return;
stor_device = get_out_stor_device(device);
if (!stor_device)
return;

@ -47,6 +47,8 @@
#define ADT7516_MSB_AIN3 0xA
#define ADT7516_MSB_AIN4 0xB
#define ADT7316_DA_DATA_BASE 0x10
#define ADT7316_DA_10_BIT_LSB_SHIFT 6
#define ADT7316_DA_12_BIT_LSB_SHIFT 4
#define ADT7316_DA_MSB_DATA_REGS 4
#define ADT7316_LSB_DAC_A 0x10
#define ADT7316_MSB_DAC_A 0x11
@ -1086,7 +1088,7 @@ static ssize_t adt7316_store_DAC_internal_Vref(struct device *dev,
ldac_config = chip->ldac_config & (~ADT7516_DAC_IN_VREF_MASK);
if (data & 0x1)
ldac_config |= ADT7516_DAC_AB_IN_VREF;
else if (data & 0x2)
if (data & 0x2)
ldac_config |= ADT7516_DAC_CD_IN_VREF;
} else {
ret = kstrtou8(buf, 16, &data);
@ -1408,7 +1410,7 @@ static IIO_DEVICE_ATTR(ex_analog_temp_offset, 0644,
static ssize_t adt7316_show_DAC(struct adt7316_chip_info *chip,
int channel, char *buf)
{
u16 data;
u16 data = 0;
u8 msb, lsb, offset;
int ret;
@ -1433,7 +1435,11 @@ static ssize_t adt7316_show_DAC(struct adt7316_chip_info *chip,
if (ret)
return -EIO;
data = (msb << offset) + (lsb & ((1 << offset) - 1));
if (chip->dac_bits == 12)
data = lsb >> ADT7316_DA_12_BIT_LSB_SHIFT;
else if (chip->dac_bits == 10)
data = lsb >> ADT7316_DA_10_BIT_LSB_SHIFT;
data |= msb << offset;
return sprintf(buf, "%d\n", data);
}
@ -1441,7 +1447,7 @@ static ssize_t adt7316_show_DAC(struct adt7316_chip_info *chip,
static ssize_t adt7316_store_DAC(struct adt7316_chip_info *chip,
int channel, const char *buf, size_t len)
{
u8 msb, lsb, offset;
u8 msb, lsb, lsb_reg, offset;
u16 data;
int ret;
@ -1459,9 +1465,13 @@ static ssize_t adt7316_store_DAC(struct adt7316_chip_info *chip,
return -EINVAL;
if (chip->dac_bits > 8) {
lsb = data & (1 << offset);
lsb = data & ((1 << offset) - 1);
if (chip->dac_bits == 12)
lsb_reg = lsb << ADT7316_DA_12_BIT_LSB_SHIFT;
else
lsb_reg = lsb << ADT7316_DA_10_BIT_LSB_SHIFT;
ret = chip->bus.write(chip->bus.client,
ADT7316_DA_DATA_BASE + channel * 2, lsb);
ADT7316_DA_DATA_BASE + channel * 2, lsb_reg);
if (ret)
return -EIO;
}

@ -188,7 +188,9 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
pxmitpriv->free_xmit_extbuf_cnt = num_xmit_extbuf;
rtw_alloc_hwxmits(padapter);
res = rtw_alloc_hwxmits(padapter);
if (res == _FAIL)
goto exit;
rtw_init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
for (i = 0; i < 4; i++)
@ -1573,7 +1575,7 @@ exit:
return res;
}
void rtw_alloc_hwxmits(struct adapter *padapter)
s32 rtw_alloc_hwxmits(struct adapter *padapter)
{
struct hw_xmit *hwxmits;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
@ -1582,6 +1584,8 @@ void rtw_alloc_hwxmits(struct adapter *padapter)
pxmitpriv->hwxmits = kcalloc(pxmitpriv->hwxmit_entry,
sizeof(struct hw_xmit), GFP_KERNEL);
if (!pxmitpriv->hwxmits)
return _FAIL;
hwxmits = pxmitpriv->hwxmits;
@ -1589,6 +1593,7 @@ void rtw_alloc_hwxmits(struct adapter *padapter)
hwxmits[1] .sta_queue = &pxmitpriv->vi_pending;
hwxmits[2] .sta_queue = &pxmitpriv->be_pending;
hwxmits[3] .sta_queue = &pxmitpriv->bk_pending;
return _SUCCESS;
}
void rtw_free_hwxmits(struct adapter *padapter)

@ -342,7 +342,7 @@ s32 rtw_txframes_sta_ac_pending(struct adapter *padapter,
void rtw_init_hwxmits(struct hw_xmit *phwxmit, int entry);
s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter);
void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv);
void rtw_alloc_hwxmits(struct adapter *padapter);
s32 rtw_alloc_hwxmits(struct adapter *padapter);
void rtw_free_hwxmits(struct adapter *padapter);
s32 rtw_xmit(struct adapter *padapter, struct sk_buff **pkt);

@ -159,17 +159,9 @@ static u8 write_macreg_hdl(struct _adapter *padapter, u8 *pbuf)
static u8 read_bbreg_hdl(struct _adapter *padapter, u8 *pbuf)
{
u32 val;
void (*pcmd_callback)(struct _adapter *dev, struct cmd_obj *pcmd);
struct cmd_obj *pcmd = (struct cmd_obj *)pbuf;
if (pcmd->rsp && pcmd->rspsz > 0)
memcpy(pcmd->rsp, (u8 *)&val, pcmd->rspsz);
pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
if (!pcmd_callback)
r8712_free_cmd_obj(pcmd);
else
pcmd_callback(padapter, pcmd);
r8712_free_cmd_obj(pcmd);
return H2C_SUCCESS;
}

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save