This is the 4.14.74 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAlu1WDwACgkQONu9yGCS
 aT5MLA//e/eO6gdZG8eNNEhi3ywYzSYXskdh8HPFe04KZnE5jYsOpc9PLxqu/s3t
 KMWWpKRPnZ/rlPfs5tifJ0cOw/S73ckHkmPbM9ch/OwVKmQj71SRYnRF7WEe7Hmv
 yoTi92RkpaHB4SzwIOxUsP0TpQAJHfSmWWiRxQAzYAE98pQR/zCxo+/bD7y9mAA1
 uohcdQ5bwOeVeFL4A/1EkkDKSj/SUhoSggVCjxXDB2qIzI2cl6F/v9lPbT2jChFb
 0XdZ6/KEjI+v0wE1Olw489n4OlKwbCqIvUqhKCnLiLV7n8IyjLUWmLR22mmoXOT3
 NZkQn+NRtiedDudyKZBewRhKO7HzyXT+K1ZJcHH+c460nLp3T18LnkQ23YGL6BiU
 5NA+P7ecJJwmao/cjW/Zv0yWVNxpwzwTC8tYgfvJkf8t29DLqe56ApIUP3sHXzED
 lbtPD6crJl5tbSqrCNQmbASJEClgiyHmbwwBDxVHG4SlV7zwcgdXF9BQ+4ddAWLA
 665xFP2CmaIaHV2FKm+o+JDHKOqB/qJXfPGerMnHUzbD0wYWB66/0gQUVt0eChwA
 W7KVAMC8v9E5Vu4mJ0zRqp62lCgY3qH4IYkEo8cSEpm8qCpI5vwSduHlRDz94zQQ
 kw87KkHYqL3rsY+rRWzP0LCY3fT1cuqflYvzAgbTPkPr9qdqJOM=
 =MqqS
 -----END PGP SIGNATURE-----

Merge 4.14.74 into android-4.14-p

Changes in 4.14.74
	crypto: skcipher - Fix -Wstringop-truncation warnings
	iio: adc: ina2xx: avoid kthread_stop() with stale task_struct
	tsl2550: fix lux1_input error in low light
	vmci: type promotion bug in qp_host_get_user_memory()
	x86/numa_emulation: Fix emulated-to-physical node mapping
	staging: rts5208: fix missing error check on call to rtsx_write_register
	power: supply: axp288_charger: Fix initial constant_charge_current value
	misc: sram: enable clock before registering regions
	serial: sh-sci: Stop RX FIFO timer during port shutdown
	uwb: hwa-rc: fix memory leak at probe
	power: vexpress: fix corruption in notifier registration
	iommu/amd: make sure TLB to be flushed before IOVA freed
	Bluetooth: Add a new Realtek 8723DE ID 0bda:b009
	USB: serial: kobil_sct: fix modem-status error handling
	6lowpan: iphc: reset mac_header after decompress to fix panic
	iommu/msm: Don't call iommu_device_{,un}link from atomic context
	s390/mm: correct allocate_pgste proc_handler callback
	power: remove possible deadlock when unregistering power_supply
	md-cluster: clear another node's suspend_area after the copy is finished
	RDMA/bnxt_re: Fix a couple off by one bugs
	RDMA/i40w: Hold read semaphore while looking after VMA
	IB/core: type promotion bug in rdma_rw_init_one_mr()
	media: exynos4-is: Prevent NULL pointer dereference in __isp_video_try_fmt()
	IB/mlx4: Test port number before querying type.
	powerpc/kdump: Handle crashkernel memory reservation failure
	media: fsl-viu: fix error handling in viu_of_probe()
	media: staging/imx: fill vb2_v4l2_buffer field entry
	x86/tsc: Add missing header to tsc_msr.c
	ARM: hwmod: RTC: Don't assume lock/unlock will be called with irq enabled
	x86/entry/64: Add two more instruction suffixes
	ARM: dts: ls1021a: Add missing cooling device properties for CPUs
	scsi: target/iscsi: Make iscsit_ta_authentication() respect the output buffer size
	scsi: klist: Make it safe to use klists in atomic context
	scsi: ibmvscsi: Improve strings handling
	scsi: target: Avoid that EXTENDED COPY commands trigger lock inversion
	usb: wusbcore: security: cast sizeof to int for comparison
	ath10k: sdio: use same endpoint id for all packets in a bundle
	ath10k: sdio: set skb len for all rx packets
	powerpc/powernv/ioda2: Reduce upper limit for DMA window size
	s390/sysinfo: add missing #ifdef CONFIG_PROC_FS
	alarmtimer: Prevent overflow for relative nanosleep
	s390/dasd: correct numa_node in dasd_alloc_queue
	s390/scm_blk: correct numa_node in scm_blk_dev_setup
	s390/extmem: fix gcc 8 stringop-overflow warning
	mtd: rawnand: atmel: add module param to avoid using dma
	iio: accel: adxl345: convert address field usage in iio_chan_spec
	posix-timers: Make forward callback return s64
	posix-timers: Sanitize overrun handling
	ALSA: snd-aoa: add of_node_put() in error path
	media: s3c-camif: ignore -ENOIOCTLCMD from v4l2_subdev_call for s_power
	media: soc_camera: ov772x: correct setting of banding filter
	media: omap3isp: zero-initialize the isp cam_xclk{a,b} initial data
	staging: android: ashmem: Fix mmap size validation
	drivers/tty: add error handling for pcmcia_loop_config
	media: tm6000: add error handling for dvb_register_adapter
	ALSA: hda: Add AZX_DCAPS_PM_RUNTIME for AMD Raven Ridge
	net: phy: xgmiitorgmii: Check read_status results
	ath10k: protect ath10k_htt_rx_ring_free with rx_ring.lock
	net: phy: xgmiitorgmii: Check phy_driver ready before accessing
	drm/sun4i: Fix releasing node when enumerating enpoints
	ath10k: transmit queued frames after processing rx packets
	rndis_wlan: potential buffer overflow in rndis_wlan_auth_indication()
	brcmsmac: fix wrap around in conversion from constant to s16
	wlcore: Add missing PM call for wlcore_cmd_wait_for_event_or_timeout()
	ARM: mvebu: declare asm symbols as character arrays in pmsu.c
	arm: dts: mediatek: Add missing cooling device properties for CPUs
	HID: hid-ntrig: add error handling for sysfs_create_group
	MIPS: boot: fix build rule of vmlinux.its.S
	perf/x86/intel/lbr: Fix incomplete LBR call stack
	scsi: bnx2i: add error handling for ioremap_nocache
	iomap: complete partial direct I/O writes synchronously
	scsi: megaraid_sas: Update controller info during resume
	EDAC, i7core: Fix memleaks and use-after-free on probe and remove
	ASoC: dapm: Fix potential DAI widget pointer deref when linking DAIs
	module: exclude SHN_UNDEF symbols from kallsyms api
	gpio: Fix wrong rounding in gpio-menz127
	nfsd: fix corrupted reply to badly ordered compound
	EDAC: Fix memleak in module init error path
	fs/lock: skip lock owner pid translation in case we are in init_pid_ns
	Input: xen-kbdfront - fix multi-touch XenStore node's locations
	iio: 104-quad-8: Fix off-by-one error in register selection
	ARM: dts: dra7: fix DCAN node addresses
	floppy: Do not copy a kernel pointer to user memory in FDGETPRM ioctl
	x86/mm: Expand static page table for fixmap space
	tty: serial: lpuart: avoid leaking struct tty_struct
	serial: cpm_uart: return immediately from console poll
	intel_th: Fix device removal logic
	spi: tegra20-slink: explicitly enable/disable clock
	spi: sh-msiof: Fix invalid SPI use during system suspend
	spi: sh-msiof: Fix handling of write value for SISTR register
	spi: rspi: Fix invalid SPI use during system suspend
	spi: rspi: Fix interrupted DMA transfers
	regulator: fix crash caused by null driver data
	USB: fix error handling in usb_driver_claim_interface()
	USB: handle NULL config in usb_find_alt_setting()
	usb: musb: dsps: do not disable CPPI41 irq in driver teardown
	slub: make ->cpu_partial unsigned int
	media: uvcvideo: Support realtek's UVC 1.5 device
	USB: usbdevfs: sanitize flags more
	USB: usbdevfs: restore warning for nonsensical flags
	Revert "usb: cdc-wdm: Fix a sleep-in-atomic-context bug in service_outstanding_interrupt()"
	USB: remove LPM management from usb_driver_claim_interface()
	Input: elantech - enable middle button of touchpad on ThinkPad P72
	IB/srp: Avoid that sg_reset -d ${srp_device} triggers an infinite loop
	IB/hfi1: Fix SL array bounds check
	IB/hfi1: Invalid user input can result in crash
	IB/hfi1: Fix context recovery when PBC has an UnsupportedVL
	RDMA/uverbs: Atomically flush and mark closed the comp event queue
	ovl: hash non-dir by lower inode for fsnotify
	drm/i915: Remove vma from object on destroy, not close
	serial: imx: restore handshaking irq for imx1
	arm64: KVM: Tighten guest core register access from userspace
	qed: Wait for ready indication before rereading the shmem
	qed: Wait for MCP halt and resume commands to take place
	qed: Prevent a possible deadlock during driver load and unload
	qed: Avoid sending mailbox commands when MFW is not responsive
	thermal: of-thermal: disable passive polling when thermal zone is disabled
	isofs: reject hardware sector size > 2048 bytes
	tls: possible hang when do_tcp_sendpages hits sndbuf is full case
	bpf: sockmap: write_space events need to be passed to TCP handler
	net: hns: fix length and page_offset overflow when CONFIG_ARM64_64K_PAGES
	net: hns: fix skb->truesize underestimation
	e1000: check on netif_running() before calling e1000_up()
	e1000: ensure to free old tx/rx rings in set_ringparam()
	crypto: cavium/nitrox - fix for command corruption in queue full case with backlog submissions.
	hwmon: (ina2xx) fix sysfs shunt resistor read access
	hwmon: (adt7475) Make adt7475_read_word() return errors
	Revert "ARM: dts: imx7d: Invert legacy PCI irq mapping"
	drm/amdgpu: Enable/disable gfx PG feature in rlc safe mode
	drm/amdgpu: Update power state at the end of smu hw_init.
	ata: ftide010: Add a quirk for SQ201
	nvme-fcloop: Fix dropped LS's to removed target port
	ARM: dts: omap4-droid4: Fix emmc errors seen on some devices
	arm/arm64: smccc-1.1: Make return values unsigned long
	arm/arm64: smccc-1.1: Handle function result as parameters
	i2c: i801: Allow ACPI AML access I/O ports not reserved for SMBus
	x86/pti: Fix section mismatch warning/error
	arm64: KVM: Sanitize PSTATE.M when being set from userspace
	media: v4l: event: Prevent freeing event subscriptions while accessed
	Linux 4.14.74

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
tirimbino
Greg Kroah-Hartman 6 years ago
commit 91ff1d1bc3
  1. 2
      Documentation/hwmon/ina2xx
  2. 2
      Makefile
  3. 4
      arch/arm/boot/dts/dra7.dtsi
  4. 12
      arch/arm/boot/dts/imx7d.dtsi
  5. 1
      arch/arm/boot/dts/ls1021a.dtsi
  6. 3
      arch/arm/boot/dts/mt7623.dtsi
  7. 2
      arch/arm/boot/dts/omap4-droid4-xt894.dts
  8. 6
      arch/arm/mach-mvebu/pmsu.c
  9. 12
      arch/arm/mach-omap2/omap_hwmod_reset.c
  10. 5
      arch/arm64/include/asm/kvm_emulate.h
  11. 55
      arch/arm64/kvm/guest.c
  12. 6
      arch/mips/boot/Makefile
  13. 7
      arch/powerpc/kernel/machine_kexec.c
  14. 2
      arch/powerpc/platforms/powernv/pci-ioda.c
  15. 4
      arch/s390/kernel/sysinfo.c
  16. 4
      arch/s390/mm/extmem.c
  17. 2
      arch/s390/mm/pgalloc.c
  18. 4
      arch/x86/entry/entry_64.S
  19. 32
      arch/x86/events/intel/lbr.c
  20. 1
      arch/x86/events/perf_event.h
  21. 10
      arch/x86/include/asm/fixmap.h
  22. 3
      arch/x86/include/asm/pgtable_64.h
  23. 4
      arch/x86/kernel/head64.c
  24. 16
      arch/x86/kernel/head_64.S
  25. 1
      arch/x86/kernel/tsc_msr.c
  26. 2
      arch/x86/mm/numa_emulation.c
  27. 9
      arch/x86/mm/pgtable.c
  28. 2
      arch/x86/mm/pti.c
  29. 8
      arch/x86/xen/mmu_pv.c
  30. 2
      crypto/ablkcipher.c
  31. 1
      crypto/blkcipher.c
  32. 27
      drivers/ata/pata_ftide010.c
  33. 3
      drivers/block/floppy.c
  34. 1
      drivers/bluetooth/btusb.c
  35. 3
      drivers/crypto/cavium/nitrox/nitrox_dev.h
  36. 1
      drivers/crypto/cavium/nitrox/nitrox_lib.c
  37. 57
      drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
  38. 6
      drivers/edac/edac_mc_sysfs.c
  39. 22
      drivers/edac/i7core_edac.c
  40. 4
      drivers/gpio/gpio-menz127.c
  41. 11
      drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
  42. 4
      drivers/gpu/drm/amd/amdgpu/kv_dpm.c
  43. 3
      drivers/gpu/drm/amd/amdgpu/si_dpm.c
  44. 3
      drivers/gpu/drm/i915/i915_gem.c
  45. 4
      drivers/gpu/drm/i915/i915_vma.c
  46. 3
      drivers/gpu/drm/sun4i/sun4i_drv.c
  47. 2
      drivers/hid/hid-ntrig.c
  48. 14
      drivers/hwmon/adt7475.c
  49. 13
      drivers/hwmon/ina2xx.c
  50. 3
      drivers/hwtracing/intel_th/core.c
  51. 9
      drivers/i2c/busses/i2c-i801.c
  52. 21
      drivers/iio/accel/adxl345_core.c
  53. 17
      drivers/iio/adc/ina2xx-adc.c
  54. 2
      drivers/iio/counter/104-quad-8.c
  55. 2
      drivers/infiniband/core/rw.c
  56. 1
      drivers/infiniband/core/uverbs_main.c
  57. 4
      drivers/infiniband/hw/bnxt_re/qplib_sp.c
  58. 9
      drivers/infiniband/hw/hfi1/pio.c
  59. 2
      drivers/infiniband/hw/hfi1/user_sdma.c
  60. 8
      drivers/infiniband/hw/hfi1/verbs.c
  61. 2
      drivers/infiniband/hw/i40iw/i40iw_verbs.c
  62. 2
      drivers/infiniband/hw/mlx4/qp.c
  63. 6
      drivers/infiniband/ulp/srp/ib_srp.c
  64. 8
      drivers/input/misc/xen-kbdfront.c
  65. 2
      drivers/input/mouse/elantech.c
  66. 2
      drivers/iommu/amd_iommu.c
  67. 16
      drivers/iommu/msm_iommu.c
  68. 19
      drivers/md/md-cluster.c
  69. 2
      drivers/media/i2c/soc_camera/ov772x.c
  70. 11
      drivers/media/platform/exynos4-is/fimc-isp-video.c
  71. 38
      drivers/media/platform/fsl-viu.c
  72. 2
      drivers/media/platform/omap3isp/isp.c
  73. 2
      drivers/media/platform/s3c-camif/camif-capture.c
  74. 5
      drivers/media/usb/tm6000/tm6000-dvb.c
  75. 24
      drivers/media/usb/uvc/uvc_video.c
  76. 38
      drivers/media/v4l2-core/v4l2-event.c
  77. 2
      drivers/media/v4l2-core/v4l2-fh.c
  78. 13
      drivers/misc/sram.c
  79. 2
      drivers/misc/tsl2550.c
  80. 4
      drivers/misc/vmw_vmci/vmci_queue_pair.c
  81. 7
      drivers/mtd/nand/atmel/nand-controller.c
  82. 6
      drivers/net/ethernet/hisilicon/hns/hnae.h
  83. 2
      drivers/net/ethernet/hisilicon/hns/hns_enet.c
  84. 7
      drivers/net/ethernet/intel/e1000/e1000_ethtool.c
  85. 187
      drivers/net/ethernet/qlogic/qed/qed_mcp.c
  86. 27
      drivers/net/ethernet/qlogic/qed/qed_mcp.h
  87. 2
      drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
  88. 10
      drivers/net/phy/xilinx_gmii2rgmii.c
  89. 5
      drivers/net/wireless/ath/ath10k/htt_rx.c
  90. 1
      drivers/net/wireless/ath/ath10k/mac.c
  91. 9
      drivers/net/wireless/ath/ath10k/sdio.c
  92. 2
      drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_qmath.c
  93. 2
      drivers/net/wireless/rndis_wlan.c
  94. 6
      drivers/net/wireless/ti/wlcore/cmd.c
  95. 3
      drivers/nvme/target/fcloop.c
  96. 12
      drivers/power/reset/vexpress-poweroff.c
  97. 2
      drivers/power/supply/axp288_charger.c
  98. 11
      drivers/power/supply/power_supply_core.c
  99. 2
      drivers/regulator/core.c
  100. 1
      drivers/s390/block/dasd.c
  101. Some files were not shown because too many files have changed in this diff Show More

@ -32,7 +32,7 @@ Supported chips:
Datasheet: Publicly available at the Texas Instruments website
http://www.ti.com/
Author: Lothar Felten <l-felten@ti.com>
Author: Lothar Felten <lothar.felten@gmail.com>
Description
-----------

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 14
SUBLEVEL = 73
SUBLEVEL = 74
EXTRAVERSION =
NAME = Petit Gorille

@ -1818,7 +1818,7 @@
};
};
dcan1: can@481cc000 {
dcan1: can@4ae3c000 {
compatible = "ti,dra7-d_can";
ti,hwmods = "dcan1";
reg = <0x4ae3c000 0x2000>;
@ -1828,7 +1828,7 @@
status = "disabled";
};
dcan2: can@481d0000 {
dcan2: can@48480000 {
compatible = "ti,dra7-d_can";
ti,hwmods = "dcan2";
reg = <0x48480000 0x2000>;

@ -144,10 +144,14 @@
interrupt-names = "msi";
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0x7>;
interrupt-map = <0 0 0 1 &intc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
<0 0 0 2 &intc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
<0 0 0 3 &intc GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
<0 0 0 4 &intc GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>;
/*
* Reference manual lists pci irqs incorrectly
* Real hardware ordering is same as imx6: D+MSI, C, B, A
*/
interrupt-map = <0 0 0 1 &intc GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>,
<0 0 0 2 &intc GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
<0 0 0 3 &intc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
<0 0 0 4 &intc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX7D_PCIE_CTRL_ROOT_CLK>,
<&clks IMX7D_PLL_ENET_MAIN_100M_CLK>,
<&clks IMX7D_PCIE_PHY_ROOT_CLK>;

@ -84,6 +84,7 @@
device_type = "cpu";
reg = <0xf01>;
clocks = <&clockgen 1 0>;
#cooling-cells = <2>;
};
};

@ -98,6 +98,7 @@
compatible = "arm,cortex-a7";
reg = <0x1>;
operating-points-v2 = <&cpu_opp_table>;
#cooling-cells = <2>;
clock-frequency = <1300000000>;
};
@ -106,6 +107,7 @@
compatible = "arm,cortex-a7";
reg = <0x2>;
operating-points-v2 = <&cpu_opp_table>;
#cooling-cells = <2>;
clock-frequency = <1300000000>;
};
@ -114,6 +116,7 @@
compatible = "arm,cortex-a7";
reg = <0x3>;
operating-points-v2 = <&cpu_opp_table>;
#cooling-cells = <2>;
clock-frequency = <1300000000>;
};
};

@ -314,7 +314,7 @@
&mmc2 {
vmmc-supply = <&vsdio>;
bus-width = <8>;
non-removable;
ti,non-removable;
};
&mmc3 {

@ -116,8 +116,8 @@ void mvebu_pmsu_set_cpu_boot_addr(int hw_cpu, void *boot_addr)
PMSU_BOOT_ADDR_REDIRECT_OFFSET(hw_cpu));
}
extern unsigned char mvebu_boot_wa_start;
extern unsigned char mvebu_boot_wa_end;
extern unsigned char mvebu_boot_wa_start[];
extern unsigned char mvebu_boot_wa_end[];
/*
* This function sets up the boot address workaround needed for SMP
@ -130,7 +130,7 @@ int mvebu_setup_boot_addr_wa(unsigned int crypto_eng_target,
phys_addr_t resume_addr_reg)
{
void __iomem *sram_virt_base;
u32 code_len = &mvebu_boot_wa_end - &mvebu_boot_wa_start;
u32 code_len = mvebu_boot_wa_end - mvebu_boot_wa_start;
mvebu_mbus_del_window(BOOTROM_BASE, BOOTROM_SIZE);
mvebu_mbus_add_window_by_id(crypto_eng_target, crypto_eng_attribute,

@ -92,11 +92,13 @@ static void omap_rtc_wait_not_busy(struct omap_hwmod *oh)
*/
void omap_hwmod_rtc_unlock(struct omap_hwmod *oh)
{
local_irq_disable();
unsigned long flags;
local_irq_save(flags);
omap_rtc_wait_not_busy(oh);
omap_hwmod_write(OMAP_RTC_KICK0_VALUE, oh, OMAP_RTC_KICK0_REG);
omap_hwmod_write(OMAP_RTC_KICK1_VALUE, oh, OMAP_RTC_KICK1_REG);
local_irq_enable();
local_irq_restore(flags);
}
/**
@ -110,9 +112,11 @@ void omap_hwmod_rtc_unlock(struct omap_hwmod *oh)
*/
void omap_hwmod_rtc_lock(struct omap_hwmod *oh)
{
local_irq_disable();
unsigned long flags;
local_irq_save(flags);
omap_rtc_wait_not_busy(oh);
omap_hwmod_write(0x0, oh, OMAP_RTC_KICK0_REG);
omap_hwmod_write(0x0, oh, OMAP_RTC_KICK1_REG);
local_irq_enable();
local_irq_restore(flags);
}

@ -42,6 +42,11 @@ void kvm_inject_vabt(struct kvm_vcpu *vcpu);
void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
static inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
{
return !(vcpu->arch.hcr_el2 & HCR_RW);
}
static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
{
vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;

@ -57,6 +57,45 @@ static u64 core_reg_offset_from_id(u64 id)
return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
}
static int validate_core_offset(const struct kvm_one_reg *reg)
{
u64 off = core_reg_offset_from_id(reg->id);
int size;
switch (off) {
case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
KVM_REG_ARM_CORE_REG(regs.regs[30]):
case KVM_REG_ARM_CORE_REG(regs.sp):
case KVM_REG_ARM_CORE_REG(regs.pc):
case KVM_REG_ARM_CORE_REG(regs.pstate):
case KVM_REG_ARM_CORE_REG(sp_el1):
case KVM_REG_ARM_CORE_REG(elr_el1):
case KVM_REG_ARM_CORE_REG(spsr[0]) ...
KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]):
size = sizeof(__u64);
break;
case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
size = sizeof(__uint128_t);
break;
case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
size = sizeof(__u32);
break;
default:
return -EINVAL;
}
if (KVM_REG_SIZE(reg->id) == size &&
IS_ALIGNED(off, size / sizeof(__u32)))
return 0;
return -EINVAL;
}
static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{
/*
@ -76,6 +115,9 @@ static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
(off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
return -ENOENT;
if (validate_core_offset(reg))
return -EINVAL;
if (copy_to_user(uaddr, ((u32 *)regs) + off, KVM_REG_SIZE(reg->id)))
return -EFAULT;
@ -98,6 +140,9 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
(off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
return -ENOENT;
if (validate_core_offset(reg))
return -EINVAL;
if (KVM_REG_SIZE(reg->id) > sizeof(tmp))
return -EINVAL;
@ -107,17 +152,25 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
}
if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) {
u32 mode = (*(u32 *)valp) & COMPAT_PSR_MODE_MASK;
u64 mode = (*(u64 *)valp) & COMPAT_PSR_MODE_MASK;
switch (mode) {
case COMPAT_PSR_MODE_USR:
if (!system_supports_32bit_el0())
return -EINVAL;
break;
case COMPAT_PSR_MODE_FIQ:
case COMPAT_PSR_MODE_IRQ:
case COMPAT_PSR_MODE_SVC:
case COMPAT_PSR_MODE_ABT:
case COMPAT_PSR_MODE_UND:
if (!vcpu_el1_is_32bit(vcpu))
return -EINVAL;
break;
case PSR_MODE_EL0t:
case PSR_MODE_EL1t:
case PSR_MODE_EL1h:
if (vcpu_el1_is_32bit(vcpu))
return -EINVAL;
break;
default:
err = -EINVAL;

@ -118,10 +118,12 @@ ifeq ($(ADDR_BITS),64)
itb_addr_cells = 2
endif
targets += vmlinux.its.S
quiet_cmd_its_cat = CAT $@
cmd_its_cat = cat $^ >$@
cmd_its_cat = cat $(filter-out $(PHONY), $^) >$@
$(obj)/vmlinux.its.S: $(addprefix $(srctree)/arch/mips/$(PLATFORM)/,$(ITS_INPUTS))
$(obj)/vmlinux.its.S: $(addprefix $(srctree)/arch/mips/$(PLATFORM)/,$(ITS_INPUTS)) FORCE
$(call if_changed,its_cat)
quiet_cmd_cpp_its_S = ITS $@

@ -186,7 +186,12 @@ void __init reserve_crashkernel(void)
(unsigned long)(crashk_res.start >> 20),
(unsigned long)(memblock_phys_mem_size() >> 20));
memblock_reserve(crashk_res.start, crash_size);
if (!memblock_is_region_memory(crashk_res.start, crash_size) ||
memblock_reserve(crashk_res.start, crash_size)) {
pr_err("Failed to reserve memory for crashkernel!\n");
crashk_res.start = crashk_res.end = 0;
return;
}
}
int overlaps_crashkernel(unsigned long start, unsigned long size)

@ -2787,7 +2787,7 @@ static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
level_shift = entries_shift + 3;
level_shift = max_t(unsigned, level_shift, PAGE_SHIFT);
if ((level_shift - 3) * levels + page_shift >= 60)
if ((level_shift - 3) * levels + page_shift >= 55)
return -EINVAL;
/* Allocate TCE table */

@ -59,6 +59,8 @@ int stsi(void *sysinfo, int fc, int sel1, int sel2)
}
EXPORT_SYMBOL(stsi);
#ifdef CONFIG_PROC_FS
static bool convert_ext_name(unsigned char encoding, char *name, size_t len)
{
switch (encoding) {
@ -311,6 +313,8 @@ static int __init sysinfo_create_proc(void)
}
device_initcall(sysinfo_create_proc);
#endif /* CONFIG_PROC_FS */
/*
* Service levels interface.
*/

@ -80,7 +80,7 @@ struct qin64 {
struct dcss_segment {
struct list_head list;
char dcss_name[8];
char res_name[15];
char res_name[16];
unsigned long start_addr;
unsigned long end;
atomic_t ref_count;
@ -433,7 +433,7 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
memcpy(&seg->res_name, seg->dcss_name, 8);
EBCASC(seg->res_name, 8);
seg->res_name[8] = '\0';
strncat(seg->res_name, " (DCSS)", 7);
strlcat(seg->res_name, " (DCSS)", sizeof(seg->res_name));
seg->res->name = seg->res_name;
rc = seg->vm_segtype;
if (rc == SEG_TYPE_SC ||

@ -27,7 +27,7 @@ static struct ctl_table page_table_sysctl[] = {
.data = &page_table_allocate_pgste,
.maxlen = sizeof(int),
.mode = S_IRUGO | S_IWUSR,
.proc_handler = proc_dointvec,
.proc_handler = proc_dointvec_minmax,
.extra1 = &page_table_allocate_pgste_min,
.extra2 = &page_table_allocate_pgste_max,
},

@ -88,7 +88,7 @@ END(native_usergs_sysret64)
.endm
.macro TRACE_IRQS_IRETQ_DEBUG
bt $9, EFLAGS(%rsp) /* interrupts off? */
btl $9, EFLAGS(%rsp) /* interrupts off? */
jnc 1f
TRACE_IRQS_ON_DEBUG
1:
@ -630,7 +630,7 @@ retint_kernel:
#ifdef CONFIG_PREEMPT
/* Interrupts are off */
/* Check if we need preemption */
bt $9, EFLAGS(%rsp) /* were interrupts off? */
btl $9, EFLAGS(%rsp) /* were interrupts off? */
jnc 1f
0: cmpl $0, PER_CPU_VAR(__preempt_count)
jnz 1f

@ -346,7 +346,7 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
mask = x86_pmu.lbr_nr - 1;
tos = task_ctx->tos;
for (i = 0; i < tos; i++) {
for (i = 0; i < task_ctx->valid_lbrs; i++) {
lbr_idx = (tos - i) & mask;
wrlbr_from(lbr_idx, task_ctx->lbr_from[i]);
wrlbr_to (lbr_idx, task_ctx->lbr_to[i]);
@ -354,6 +354,15 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
wrmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
}
for (; i < x86_pmu.lbr_nr; i++) {
lbr_idx = (tos - i) & mask;
wrlbr_from(lbr_idx, 0);
wrlbr_to(lbr_idx, 0);
if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
wrmsrl(MSR_LBR_INFO_0 + lbr_idx, 0);
}
wrmsrl(x86_pmu.lbr_tos, tos);
task_ctx->lbr_stack_state = LBR_NONE;
}
@ -361,7 +370,7 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
{
unsigned lbr_idx, mask;
u64 tos;
u64 tos, from;
int i;
if (task_ctx->lbr_callstack_users == 0) {
@ -371,13 +380,17 @@ static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
mask = x86_pmu.lbr_nr - 1;
tos = intel_pmu_lbr_tos();
for (i = 0; i < tos; i++) {
for (i = 0; i < x86_pmu.lbr_nr; i++) {
lbr_idx = (tos - i) & mask;
task_ctx->lbr_from[i] = rdlbr_from(lbr_idx);
from = rdlbr_from(lbr_idx);
if (!from)
break;
task_ctx->lbr_from[i] = from;
task_ctx->lbr_to[i] = rdlbr_to(lbr_idx);
if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
rdmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
}
task_ctx->valid_lbrs = i;
task_ctx->tos = tos;
task_ctx->lbr_stack_state = LBR_VALID;
}
@ -531,7 +544,7 @@ static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc)
*/
static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
{
bool need_info = false;
bool need_info = false, call_stack = false;
unsigned long mask = x86_pmu.lbr_nr - 1;
int lbr_format = x86_pmu.intel_cap.lbr_format;
u64 tos = intel_pmu_lbr_tos();
@ -542,7 +555,7 @@ static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
if (cpuc->lbr_sel) {
need_info = !(cpuc->lbr_sel->config & LBR_NO_INFO);
if (cpuc->lbr_sel->config & LBR_CALL_STACK)
num = tos;
call_stack = true;
}
for (i = 0; i < num; i++) {
@ -555,6 +568,13 @@ static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
from = rdlbr_from(lbr_idx);
to = rdlbr_to(lbr_idx);
/*
* Read LBR call stack entries
* until invalid entry (0s) is detected.
*/
if (call_stack && !from)
break;
if (lbr_format == LBR_FORMAT_INFO && need_info) {
u64 info;

@ -646,6 +646,7 @@ struct x86_perf_task_context {
u64 lbr_to[MAX_LBR_ENTRIES];
u64 lbr_info[MAX_LBR_ENTRIES];
int tos;
int valid_lbrs;
int lbr_callstack_users;
int lbr_stack_state;
};

@ -14,6 +14,16 @@
#ifndef _ASM_X86_FIXMAP_H
#define _ASM_X86_FIXMAP_H
/*
* Exposed to assembly code for setting up initial page tables. Cannot be
* calculated in assembly code (fixmap entries are an enum), but is sanity
* checked in the actual fixmap C code to make sure that the fixmap is
* covered fully.
*/
#define FIXMAP_PMD_NUM 2
/* fixmap starts downwards from the 507th entry in level2_fixmap_pgt */
#define FIXMAP_PMD_TOP 507
#ifndef __ASSEMBLY__
#include <linux/kernel.h>
#include <asm/acpi.h>

@ -14,6 +14,7 @@
#include <asm/processor.h>
#include <linux/bitops.h>
#include <linux/threads.h>
#include <asm/fixmap.h>
extern p4d_t level4_kernel_pgt[512];
extern p4d_t level4_ident_pgt[512];
@ -22,7 +23,7 @@ extern pud_t level3_ident_pgt[512];
extern pmd_t level2_kernel_pgt[512];
extern pmd_t level2_fixmap_pgt[512];
extern pmd_t level2_ident_pgt[512];
extern pte_t level1_fixmap_pgt[512];
extern pte_t level1_fixmap_pgt[512 * FIXMAP_PMD_NUM];
extern pgd_t init_top_pgt[];
#define swapper_pg_dir init_top_pgt

@ -31,6 +31,7 @@
#include <asm/bootparam_utils.h>
#include <asm/microcode.h>
#include <asm/kasan.h>
#include <asm/fixmap.h>
/*
* Manage page tables very early on.
@ -93,7 +94,8 @@ unsigned long __head __startup_64(unsigned long physaddr,
pud[511] += load_delta;
pmd = fixup_pointer(level2_fixmap_pgt, physaddr);
pmd[506] += load_delta;
for (i = FIXMAP_PMD_TOP; i > FIXMAP_PMD_TOP - FIXMAP_PMD_NUM; i--)
pmd[i] += load_delta;
/*
* Set up the identity mapping for the switchover. These

@ -24,6 +24,7 @@
#include "../entry/calling.h"
#include <asm/export.h>
#include <asm/nospec-branch.h>
#include <asm/fixmap.h>
#ifdef CONFIG_PARAVIRT
#include <asm/asm-offsets.h>
@ -438,13 +439,20 @@ NEXT_PAGE(level2_kernel_pgt)
KERNEL_IMAGE_SIZE/PMD_SIZE)
NEXT_PAGE(level2_fixmap_pgt)
.fill 506,8,0
.quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
/* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
.fill 5,8,0
.fill (512 - 4 - FIXMAP_PMD_NUM),8,0
pgtno = 0
.rept (FIXMAP_PMD_NUM)
.quad level1_fixmap_pgt + (pgtno << PAGE_SHIFT) - __START_KERNEL_map \
+ _PAGE_TABLE_NOENC;
pgtno = pgtno + 1
.endr
/* 6 MB reserved space + a 2MB hole */
.fill 4,8,0
NEXT_PAGE(level1_fixmap_pgt)
.rept (FIXMAP_PMD_NUM)
.fill 512,8,0
.endr
#undef PMDS

@ -12,6 +12,7 @@
#include <asm/setup.h>
#include <asm/apic.h>
#include <asm/param.h>
#include <asm/tsc.h>
#define MAX_NUM_FREQS 9

@ -61,7 +61,7 @@ static int __init emu_setup_memblk(struct numa_meminfo *ei,
eb->nid = nid;
if (emu_nid_to_phys[nid] == NUMA_NO_NODE)
emu_nid_to_phys[nid] = nid;
emu_nid_to_phys[nid] = pb->nid;
pb->start += size;
if (pb->start >= pb->end) {

@ -573,6 +573,15 @@ void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
{
unsigned long address = __fix_to_virt(idx);
#ifdef CONFIG_X86_64
/*
* Ensure that the static initial page tables are covering the
* fixmap completely.
*/
BUILD_BUG_ON(__end_of_permanent_fixed_addresses >
(FIXMAP_PMD_NUM * PTRS_PER_PTE));
#endif
if (idx >= __end_of_fixed_addresses) {
BUG();
return;

@ -224,7 +224,7 @@ static __init pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
*
* Returns a pointer to a PTE on success, or NULL on failure.
*/
static __init pte_t *pti_user_pagetable_walk_pte(unsigned long address)
static pte_t *pti_user_pagetable_walk_pte(unsigned long address)
{
gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
pmd_t *pmd;

@ -1879,7 +1879,7 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
/* L3_k[511] -> level2_fixmap_pgt */
convert_pfn_mfn(level3_kernel_pgt);
/* L3_k[511][506] -> level1_fixmap_pgt */
/* L3_k[511][508-FIXMAP_PMD_NUM ... 507] -> level1_fixmap_pgt */
convert_pfn_mfn(level2_fixmap_pgt);
/* We get [511][511] and have Xen's version of level2_kernel_pgt */
@ -1924,7 +1924,11 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
for (i = 0; i < FIXMAP_PMD_NUM; i++) {
set_page_prot(level1_fixmap_pgt + i * PTRS_PER_PTE,
PAGE_KERNEL_RO);
}
/* Pin down new L4 */
pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,

@ -368,6 +368,7 @@ static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
strncpy(rblkcipher.type, "ablkcipher", sizeof(rblkcipher.type));
strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<default>",
sizeof(rblkcipher.geniv));
rblkcipher.geniv[sizeof(rblkcipher.geniv) - 1] = '\0';
rblkcipher.blocksize = alg->cra_blocksize;
rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
@ -442,6 +443,7 @@ static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
strncpy(rblkcipher.type, "givcipher", sizeof(rblkcipher.type));
strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<built-in>",
sizeof(rblkcipher.geniv));
rblkcipher.geniv[sizeof(rblkcipher.geniv) - 1] = '\0';
rblkcipher.blocksize = alg->cra_blocksize;
rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;

@ -511,6 +511,7 @@ static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
strncpy(rblkcipher.type, "blkcipher", sizeof(rblkcipher.type));
strncpy(rblkcipher.geniv, alg->cra_blkcipher.geniv ?: "<default>",
sizeof(rblkcipher.geniv));
rblkcipher.geniv[sizeof(rblkcipher.geniv) - 1] = '\0';
rblkcipher.blocksize = alg->cra_blocksize;
rblkcipher.min_keysize = alg->cra_blkcipher.min_keysize;

@ -256,14 +256,12 @@ static struct ata_port_operations pata_ftide010_port_ops = {
.qc_issue = ftide010_qc_issue,
};
static struct ata_port_info ftide010_port_info[] = {
{
.flags = ATA_FLAG_SLAVE_POSS,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.pio_mask = ATA_PIO4,
.port_ops = &pata_ftide010_port_ops,
},
static struct ata_port_info ftide010_port_info = {
.flags = ATA_FLAG_SLAVE_POSS,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.pio_mask = ATA_PIO4,
.port_ops = &pata_ftide010_port_ops,
};
#if IS_ENABLED(CONFIG_SATA_GEMINI)
@ -349,6 +347,7 @@ static int pata_ftide010_gemini_cable_detect(struct ata_port *ap)
}
static int pata_ftide010_gemini_init(struct ftide010 *ftide,
struct ata_port_info *pi,
bool is_ata1)
{
struct device *dev = ftide->dev;
@ -373,7 +372,13 @@ static int pata_ftide010_gemini_init(struct ftide010 *ftide,
/* Flag port as SATA-capable */
if (gemini_sata_bridge_enabled(sg, is_ata1))
ftide010_port_info[0].flags |= ATA_FLAG_SATA;
pi->flags |= ATA_FLAG_SATA;
/* This device has broken DMA, only PIO works */
if (of_machine_is_compatible("itian,sq201")) {
pi->mwdma_mask = 0;
pi->udma_mask = 0;
}
/*
* We assume that a simple 40-wire cable is used in the PATA mode.
@ -435,6 +440,7 @@ static int pata_ftide010_gemini_init(struct ftide010 *ftide,
}
#else
static int pata_ftide010_gemini_init(struct ftide010 *ftide,
struct ata_port_info *pi,
bool is_ata1)
{
return -ENOTSUPP;
@ -446,7 +452,7 @@ static int pata_ftide010_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
const struct ata_port_info pi = ftide010_port_info[0];
struct ata_port_info pi = ftide010_port_info;
const struct ata_port_info *ppi[] = { &pi, NULL };
struct ftide010 *ftide;
struct resource *res;
@ -490,6 +496,7 @@ static int pata_ftide010_probe(struct platform_device *pdev)
* are ATA0. This will also set up the cable types.
*/
ret = pata_ftide010_gemini_init(ftide,
&pi,
(res->start == 0x63400000));
if (ret)
goto err_dis_clk;

@ -3462,6 +3462,9 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
(struct floppy_struct **)&outparam);
if (ret)
return ret;
memcpy(&inparam.g, outparam,
offsetof(struct floppy_struct, name));
outparam = &inparam.g;
break;
case FDMSGON:
UDP->flags |= FTD_MSG;

@ -375,6 +375,7 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x7392, 0xa611), .driver_info = BTUSB_REALTEK },
/* Additional Realtek 8723DE Bluetooth devices */
{ USB_DEVICE(0x0bda, 0xb009), .driver_info = BTUSB_REALTEK },
{ USB_DEVICE(0x2ff8, 0xb011), .driver_info = BTUSB_REALTEK },
/* Additional Realtek 8821AE Bluetooth devices */

@ -35,6 +35,7 @@ struct nitrox_cmdq {
/* requests in backlog queues */
atomic_t backlog_count;
int write_idx;
/* command size 32B/64B */
u8 instr_size;
u8 qno;
@ -87,7 +88,7 @@ struct nitrox_bh {
struct bh_data *slc;
};
/* NITROX-5 driver state */
/* NITROX-V driver state */
#define NITROX_UCODE_LOADED 0
#define NITROX_READY 1

@ -36,6 +36,7 @@ static int cmdq_common_init(struct nitrox_cmdq *cmdq)
cmdq->head = PTR_ALIGN(cmdq->head_unaligned, PKT_IN_ALIGN);
cmdq->dma = PTR_ALIGN(cmdq->dma_unaligned, PKT_IN_ALIGN);
cmdq->qsize = (qsize + PKT_IN_ALIGN);
cmdq->write_idx = 0;
spin_lock_init(&cmdq->response_lock);
spin_lock_init(&cmdq->cmdq_lock);

@ -43,6 +43,16 @@
* Invalid flag options in AES-CCM IV.
*/
static inline int incr_index(int index, int count, int max)
{
if ((index + count) >= max)
index = index + count - max;
else
index += count;
return index;
}
/**
* dma_free_sglist - unmap and free the sg lists.
* @ndev: N5 device
@ -427,30 +437,29 @@ static void post_se_instr(struct nitrox_softreq *sr,
struct nitrox_cmdq *cmdq)
{
struct nitrox_device *ndev = sr->ndev;
union nps_pkt_in_instr_baoff_dbell pkt_in_baoff_dbell;
u64 offset;
int idx;
u8 *ent;
spin_lock_bh(&cmdq->cmdq_lock);
/* get the next write offset */
offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(cmdq->qno);
pkt_in_baoff_dbell.value = nitrox_read_csr(ndev, offset);
idx = cmdq->write_idx;
/* copy the instruction */
ent = cmdq->head + pkt_in_baoff_dbell.s.aoff;
ent = cmdq->head + (idx * cmdq->instr_size);
memcpy(ent, &sr->instr, cmdq->instr_size);
/* flush the command queue updates */
dma_wmb();
sr->tstamp = jiffies;
atomic_set(&sr->status, REQ_POSTED);
response_list_add(sr, cmdq);
sr->tstamp = jiffies;
/* flush the command queue updates */
dma_wmb();
/* Ring doorbell with count 1 */
writeq(1, cmdq->dbell_csr_addr);
/* orders the doorbell rings */
mmiowb();
cmdq->write_idx = incr_index(idx, 1, ndev->qlen);
spin_unlock_bh(&cmdq->cmdq_lock);
}
@ -460,6 +469,9 @@ static int post_backlog_cmds(struct nitrox_cmdq *cmdq)
struct nitrox_softreq *sr, *tmp;
int ret = 0;
if (!atomic_read(&cmdq->backlog_count))
return 0;
spin_lock_bh(&cmdq->backlog_lock);
list_for_each_entry_safe(sr, tmp, &cmdq->backlog_head, backlog) {
@ -467,7 +479,7 @@ static int post_backlog_cmds(struct nitrox_cmdq *cmdq)
/* submit until space available */
if (unlikely(cmdq_full(cmdq, ndev->qlen))) {
ret = -EBUSY;
ret = -ENOSPC;
break;
}
/* delete from backlog list */
@ -492,23 +504,20 @@ static int nitrox_enqueue_request(struct nitrox_softreq *sr)
{
struct nitrox_cmdq *cmdq = sr->cmdq;
struct nitrox_device *ndev = sr->ndev;
int ret = -EBUSY;
/* try to post backlog requests */
post_backlog_cmds(cmdq);
if (unlikely(cmdq_full(cmdq, ndev->qlen))) {
if (!(sr->flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
return -EAGAIN;
return -ENOSPC;
/* add to backlog list */
backlog_list_add(sr, cmdq);
} else {
ret = post_backlog_cmds(cmdq);
if (ret) {
backlog_list_add(sr, cmdq);
return ret;
}
post_se_instr(sr, cmdq);
ret = -EINPROGRESS;
return -EBUSY;
}
return ret;
post_se_instr(sr, cmdq);
return -EINPROGRESS;
}
/**
@ -625,11 +634,9 @@ int nitrox_process_se_request(struct nitrox_device *ndev,
*/
sr->instr.fdata[0] = *((u64 *)&req->gph);
sr->instr.fdata[1] = 0;
/* flush the soft_req changes before posting the cmd */
wmb();
ret = nitrox_enqueue_request(sr);
if (ret == -EAGAIN)
if (ret == -ENOSPC)
goto send_fail;
return ret;

@ -1097,14 +1097,14 @@ int __init edac_mc_sysfs_init(void)
err = device_add(mci_pdev);
if (err < 0)
goto out_dev_free;
goto out_put_device;
edac_dbg(0, "device %s created\n", dev_name(mci_pdev));
return 0;
out_dev_free:
kfree(mci_pdev);
out_put_device:
put_device(mci_pdev);
out:
return err;
}

@ -1177,15 +1177,14 @@ static int i7core_create_sysfs_devices(struct mem_ctl_info *mci)
rc = device_add(pvt->addrmatch_dev);
if (rc < 0)
return rc;
goto err_put_addrmatch;
if (!pvt->is_registered) {
pvt->chancounts_dev = kzalloc(sizeof(*pvt->chancounts_dev),
GFP_KERNEL);
if (!pvt->chancounts_dev) {
put_device(pvt->addrmatch_dev);
device_del(pvt->addrmatch_dev);
return -ENOMEM;
rc = -ENOMEM;
goto err_del_addrmatch;
}
pvt->chancounts_dev->type = &all_channel_counts_type;
@ -1199,9 +1198,18 @@ static int i7core_create_sysfs_devices(struct mem_ctl_info *mci)
rc = device_add(pvt->chancounts_dev);
if (rc < 0)
return rc;
goto err_put_chancounts;
}
return 0;
err_put_chancounts:
put_device(pvt->chancounts_dev);
err_del_addrmatch:
device_del(pvt->addrmatch_dev);
err_put_addrmatch:
put_device(pvt->addrmatch_dev);
return rc;
}
static void i7core_delete_sysfs_devices(struct mem_ctl_info *mci)
@ -1211,11 +1219,11 @@ static void i7core_delete_sysfs_devices(struct mem_ctl_info *mci)
edac_dbg(1, "\n");
if (!pvt->is_registered) {
put_device(pvt->chancounts_dev);
device_del(pvt->chancounts_dev);
put_device(pvt->chancounts_dev);
}
put_device(pvt->addrmatch_dev);
device_del(pvt->addrmatch_dev);
put_device(pvt->addrmatch_dev);
}
/****************************************************************************

@ -56,9 +56,9 @@ static int men_z127_debounce(struct gpio_chip *gc, unsigned gpio,
rnd = fls(debounce) - 1;
if (rnd && (debounce & BIT(rnd - 1)))
debounce = round_up(debounce, MEN_Z127_DB_MIN_US);
debounce = roundup(debounce, MEN_Z127_DB_MIN_US);
else
debounce = round_down(debounce, MEN_Z127_DB_MIN_US);
debounce = rounddown(debounce, MEN_Z127_DB_MIN_US);
if (debounce > MEN_Z127_DB_MAX_US)
debounce = MEN_Z127_DB_MAX_US;

@ -5479,6 +5479,11 @@ static int gfx_v8_0_set_powergating_state(void *handle,
if (amdgpu_sriov_vf(adev))
return 0;
if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG |
AMD_PG_SUPPORT_RLC_SMU_HS |
AMD_PG_SUPPORT_CP |
AMD_PG_SUPPORT_GFX_DMG))
adev->gfx.rlc.funcs->enter_safe_mode(adev);
switch (adev->asic_type) {
case CHIP_CARRIZO:
case CHIP_STONEY:
@ -5527,7 +5532,11 @@ static int gfx_v8_0_set_powergating_state(void *handle,
default:
break;
}
if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG |
AMD_PG_SUPPORT_RLC_SMU_HS |
AMD_PG_SUPPORT_CP |
AMD_PG_SUPPORT_GFX_DMG))
adev->gfx.rlc.funcs->exit_safe_mode(adev);
return 0;
}

@ -1352,8 +1352,6 @@ static int kv_dpm_enable(struct amdgpu_device *adev)
return ret;
}
kv_update_current_ps(adev, adev->pm.dpm.boot_ps);
if (adev->irq.installed &&
amdgpu_is_internal_thermal_sensor(adev->pm.int_thermal_type)) {
ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX);
@ -3054,7 +3052,7 @@ static int kv_dpm_hw_init(void *handle)
else
adev->pm.dpm_enabled = true;
mutex_unlock(&adev->pm.mutex);
amdgpu_pm_compute_clocks(adev);
return ret;
}

@ -6884,7 +6884,6 @@ static int si_dpm_enable(struct amdgpu_device *adev)
si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
si_thermal_start_thermal_controller(adev);
ni_update_current_ps(adev, boot_ps);
return 0;
}
@ -7758,7 +7757,7 @@ static int si_dpm_hw_init(void *handle)
else
adev->pm.dpm_enabled = true;
mutex_unlock(&adev->pm.mutex);
amdgpu_pm_compute_clocks(adev);
return ret;
}

@ -3608,7 +3608,8 @@ restart:
return -EBUSY;
}
if (i915_gem_valid_gtt_space(vma, cache_level))
if (!i915_vma_is_closed(vma) &&
i915_gem_valid_gtt_space(vma, cache_level))
continue;
ret = i915_vma_unbind(vma);

@ -430,6 +430,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
u64 start, end;
int ret;
GEM_BUG_ON(i915_vma_is_closed(vma));
GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
@ -590,7 +591,9 @@ static void i915_vma_destroy(struct i915_vma *vma)
GEM_BUG_ON(i915_gem_active_isset(&vma->last_read[i]));
GEM_BUG_ON(i915_gem_active_isset(&vma->last_fence));
list_del(&vma->obj_link);
list_del(&vma->vm_link);
if (!i915_vma_is_ggtt(vma))
i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
@ -602,7 +605,6 @@ void i915_vma_close(struct i915_vma *vma)
GEM_BUG_ON(i915_vma_is_closed(vma));
vma->flags |= I915_VMA_CLOSED;
list_del(&vma->obj_link);
rb_erase(&vma->obj_node, &vma->obj->vma_tree);
if (!i915_vma_is_active(vma) && !i915_vma_is_pinned(vma))

@ -241,7 +241,6 @@ static int sun4i_drv_add_endpoints(struct device *dev,
remote = of_graph_get_remote_port_parent(ep);
if (!remote) {
DRM_DEBUG_DRIVER("Error retrieving the output node\n");
of_node_put(remote);
continue;
}
@ -255,11 +254,13 @@ static int sun4i_drv_add_endpoints(struct device *dev,
if (of_graph_parse_endpoint(ep, &endpoint)) {
DRM_DEBUG_DRIVER("Couldn't parse endpoint\n");
of_node_put(remote);
continue;
}
if (!endpoint.id) {
DRM_DEBUG_DRIVER("Endpoint is our panel... skipping\n");
of_node_put(remote);
continue;
}
}

@ -955,6 +955,8 @@ static int ntrig_probe(struct hid_device *hdev, const struct hid_device_id *id)
ret = sysfs_create_group(&hdev->dev.kobj,
&ntrig_attribute_group);
if (ret)
hid_err(hdev, "cannot create sysfs group\n");
return 0;
err_free:

@ -303,14 +303,18 @@ static inline u16 volt2reg(int channel, long volt, u8 bypass_attn)
return clamp_val(reg, 0, 1023) & (0xff << 2);
}
static u16 adt7475_read_word(struct i2c_client *client, int reg)
static int adt7475_read_word(struct i2c_client *client, int reg)
{
u16 val;
int val1, val2;
val = i2c_smbus_read_byte_data(client, reg);
val |= (i2c_smbus_read_byte_data(client, reg + 1) << 8);
val1 = i2c_smbus_read_byte_data(client, reg);
if (val1 < 0)
return val1;
val2 = i2c_smbus_read_byte_data(client, reg + 1);
if (val2 < 0)
return val2;
return val;
return val1 | (val2 << 8);
}
static void adt7475_write_word(struct i2c_client *client, int reg, u16 val)

@ -17,7 +17,7 @@
* Bi-directional Current/Power Monitor with I2C Interface
* Datasheet: http://www.ti.com/product/ina230
*
* Copyright (C) 2012 Lothar Felten <l-felten@ti.com>
* Copyright (C) 2012 Lothar Felten <lothar.felten@gmail.com>
* Thanks to Jan Volkering
*
* This program is free software; you can redistribute it and/or modify
@ -329,6 +329,15 @@ static int ina2xx_set_shunt(struct ina2xx_data *data, long val)
return 0;
}
static ssize_t ina2xx_show_shunt(struct device *dev,
struct device_attribute *da,
char *buf)
{
struct ina2xx_data *data = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%li\n", data->rshunt);
}
static ssize_t ina2xx_store_shunt(struct device *dev,
struct device_attribute *da,
const char *buf, size_t count)
@ -403,7 +412,7 @@ static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, ina2xx_show_value, NULL,
/* shunt resistance */
static SENSOR_DEVICE_ATTR(shunt_resistor, S_IRUGO | S_IWUSR,
ina2xx_show_value, ina2xx_store_shunt,
ina2xx_show_shunt, ina2xx_store_shunt,
INA2XX_CALIBRATION);
/* update interval (ina226 only) */

@ -147,7 +147,8 @@ static int intel_th_remove(struct device *dev)
th->thdev[i] = NULL;
}
th->num_thdevs = lowest;
if (lowest >= 0)
th->num_thdevs = lowest;
}
if (thdrv->attr_group)

@ -1416,6 +1416,13 @@ static void i801_add_tco(struct i801_priv *priv)
}
#ifdef CONFIG_ACPI
static bool i801_acpi_is_smbus_ioport(const struct i801_priv *priv,
acpi_physical_address address)
{
return address >= priv->smba &&
address <= pci_resource_end(priv->pci_dev, SMBBAR);
}
static acpi_status
i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits,
u64 *value, void *handler_context, void *region_context)
@ -1431,7 +1438,7 @@ i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits,
*/
mutex_lock(&priv->acpi_lock);
if (!priv->acpi_reserved) {
if (!priv->acpi_reserved && i801_acpi_is_smbus_ioport(priv, address)) {
priv->acpi_reserved = true;
dev_warn(&pdev->dev, "BIOS is accessing SMBus registers\n");

@ -21,6 +21,8 @@
#define ADXL345_REG_DATAX0 0x32
#define ADXL345_REG_DATAY0 0x34
#define ADXL345_REG_DATAZ0 0x36
#define ADXL345_REG_DATA_AXIS(index) \
(ADXL345_REG_DATAX0 + (index) * sizeof(__le16))
#define ADXL345_POWER_CTL_MEASURE BIT(3)
#define ADXL345_POWER_CTL_STANDBY 0x00
@ -47,19 +49,19 @@ struct adxl345_data {
u8 data_range;
};
#define ADXL345_CHANNEL(reg, axis) { \
#define ADXL345_CHANNEL(index, axis) { \
.type = IIO_ACCEL, \
.modified = 1, \
.channel2 = IIO_MOD_##axis, \
.address = reg, \
.address = index, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
}
static const struct iio_chan_spec adxl345_channels[] = {
ADXL345_CHANNEL(ADXL345_REG_DATAX0, X),
ADXL345_CHANNEL(ADXL345_REG_DATAY0, Y),
ADXL345_CHANNEL(ADXL345_REG_DATAZ0, Z),
ADXL345_CHANNEL(0, X),
ADXL345_CHANNEL(1, Y),
ADXL345_CHANNEL(2, Z),
};
static int adxl345_read_raw(struct iio_dev *indio_dev,
@ -67,7 +69,7 @@ static int adxl345_read_raw(struct iio_dev *indio_dev,
int *val, int *val2, long mask)
{
struct adxl345_data *data = iio_priv(indio_dev);
__le16 regval;
__le16 accel;
int ret;
switch (mask) {
@ -77,12 +79,13 @@ static int adxl345_read_raw(struct iio_dev *indio_dev,
* ADXL345_REG_DATA(X0/Y0/Z0) contain the least significant byte
* and ADXL345_REG_DATA(X0/Y0/Z0) + 1 the most significant byte
*/
ret = regmap_bulk_read(data->regmap, chan->address, &regval,
sizeof(regval));
ret = regmap_bulk_read(data->regmap,
ADXL345_REG_DATA_AXIS(chan->address),
&accel, sizeof(accel));
if (ret < 0)
return ret;
*val = sign_extend32(le16_to_cpu(regval), 12);
*val = sign_extend32(le16_to_cpu(accel), 12);
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
*val = 0;

@ -30,6 +30,7 @@
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/regmap.h>
#include <linux/sched/task.h>
#include <linux/util_macros.h>
#include <linux/platform_data/ina2xx.h>
@ -701,6 +702,7 @@ static int ina2xx_buffer_enable(struct iio_dev *indio_dev)
{
struct ina2xx_chip_info *chip = iio_priv(indio_dev);
unsigned int sampling_us = SAMPLING_PERIOD(chip);
struct task_struct *task;
dev_dbg(&indio_dev->dev, "Enabling buffer w/ scan_mask %02x, freq = %d, avg =%u\n",
(unsigned int)(*indio_dev->active_scan_mask),
@ -710,11 +712,17 @@ static int ina2xx_buffer_enable(struct iio_dev *indio_dev)
dev_dbg(&indio_dev->dev, "Async readout mode: %d\n",
chip->allow_async_readout);
chip->task = kthread_run(ina2xx_capture_thread, (void *)indio_dev,
"%s:%d-%uus", indio_dev->name, indio_dev->id,
sampling_us);
task = kthread_create(ina2xx_capture_thread, (void *)indio_dev,
"%s:%d-%uus", indio_dev->name, indio_dev->id,
sampling_us);
if (IS_ERR(task))
return PTR_ERR(task);
get_task_struct(task);
wake_up_process(task);
chip->task = task;
return PTR_ERR_OR_ZERO(chip->task);
return 0;
}
static int ina2xx_buffer_disable(struct iio_dev *indio_dev)
@ -723,6 +731,7 @@ static int ina2xx_buffer_disable(struct iio_dev *indio_dev)
if (chip->task) {
kthread_stop(chip->task);
put_task_struct(chip->task);
chip->task = NULL;
}

@ -138,7 +138,7 @@ static int quad8_write_raw(struct iio_dev *indio_dev,
outb(val >> (8 * i), base_offset);
/* Reset Borrow, Carry, Compare, and Sign flags */
outb(0x02, base_offset + 1);
outb(0x04, base_offset + 1);
/* Reset Error flag */
outb(0x06, base_offset + 1);

@ -87,7 +87,7 @@ static int rdma_rw_init_one_mr(struct ib_qp *qp, u8 port_num,
}
ret = ib_map_mr_sg(reg->mr, sg, nents, &offset, PAGE_SIZE);
if (ret < nents) {
if (ret < 0 || ret < nents) {
ib_mr_pool_put(qp, &qp->rdma_mrs, reg->mr);
return -EINVAL;
}

@ -424,6 +424,7 @@ static int ib_uverbs_comp_event_close(struct inode *inode, struct file *filp)
list_del(&entry->obj_list);
kfree(entry);
}
file->ev_queue.is_closed = 1;
spin_unlock_irq(&file->ev_queue.lock);
uverbs_close_fd(filp);

@ -156,7 +156,7 @@ int bnxt_qplib_get_sgid(struct bnxt_qplib_res *res,
struct bnxt_qplib_sgid_tbl *sgid_tbl, int index,
struct bnxt_qplib_gid *gid)
{
if (index > sgid_tbl->max) {
if (index >= sgid_tbl->max) {
dev_err(&res->pdev->dev,
"QPLIB: Index %d exceeded SGID table max (%d)",
index, sgid_tbl->max);
@ -361,7 +361,7 @@ int bnxt_qplib_get_pkey(struct bnxt_qplib_res *res,
*pkey = 0xFFFF;
return 0;
}
if (index > pkey_tbl->max) {
if (index >= pkey_tbl->max) {
dev_err(&res->pdev->dev,
"QPLIB: Index %d exceeded PKEY table max (%d)",
index, pkey_tbl->max);

@ -86,6 +86,7 @@ void pio_send_control(struct hfi1_devdata *dd, int op)
unsigned long flags;
int write = 1; /* write sendctrl back */
int flush = 0; /* re-read sendctrl to make sure it is flushed */
int i;
spin_lock_irqsave(&dd->sendctrl_lock, flags);
@ -95,9 +96,13 @@ void pio_send_control(struct hfi1_devdata *dd, int op)
reg |= SEND_CTRL_SEND_ENABLE_SMASK;
/* Fall through */
case PSC_DATA_VL_ENABLE:
mask = 0;
for (i = 0; i < ARRAY_SIZE(dd->vld); i++)
if (!dd->vld[i].mtu)
mask |= BIT_ULL(i);
/* Disallow sending on VLs not enabled */
mask = (((~0ull) << num_vls) & SEND_CTRL_UNSUPPORTED_VL_MASK) <<
SEND_CTRL_UNSUPPORTED_VL_SHIFT;
mask = (mask & SEND_CTRL_UNSUPPORTED_VL_MASK) <<
SEND_CTRL_UNSUPPORTED_VL_SHIFT;
reg = (reg & ~SEND_CTRL_UNSUPPORTED_VL_SMASK) | mask;
break;
case PSC_GLOBAL_DISABLE:

@ -828,7 +828,7 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
if (ACCESS_ONCE(iovec->offset) == iovec->iov.iov_len) {
if (++req->iov_idx == req->data_iovs) {
ret = -EFAULT;
goto free_txreq;
goto free_tx;
}
iovec = &req->iovs[req->iov_idx];
WARN_ON(iovec->offset);

@ -1573,6 +1573,7 @@ static int hfi1_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr)
struct hfi1_pportdata *ppd;
struct hfi1_devdata *dd;
u8 sc5;
u8 sl;
if (hfi1_check_mcast(rdma_ah_get_dlid(ah_attr)) &&
!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH))
@ -1581,8 +1582,13 @@ static int hfi1_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr)
/* test the mapping for validity */
ibp = to_iport(ibdev, rdma_ah_get_port_num(ah_attr));
ppd = ppd_from_ibp(ibp);
sc5 = ibp->sl_to_sc[rdma_ah_get_sl(ah_attr)];
dd = dd_from_ppd(ppd);
sl = rdma_ah_get_sl(ah_attr);
if (sl >= ARRAY_SIZE(ibp->sl_to_sc))
return -EINVAL;
sc5 = ibp->sl_to_sc[sl];
if (sc_to_vlt(dd, sc5) > num_vls && sc_to_vlt(dd, sc5) != 0xf)
return -EINVAL;
return 0;

@ -1408,6 +1408,7 @@ static void i40iw_set_hugetlb_values(u64 addr, struct i40iw_mr *iwmr)
struct vm_area_struct *vma;
struct hstate *h;
down_read(&current->mm->mmap_sem);
vma = find_vma(current->mm, addr);
if (vma && is_vm_hugetlb_page(vma)) {
h = hstate_vma(vma);
@ -1416,6 +1417,7 @@ static void i40iw_set_hugetlb_values(u64 addr, struct i40iw_mr *iwmr)
iwmr->page_msk = huge_page_mask(h);
}
}
up_read(&current->mm->mmap_sem);
}
/**

@ -4014,9 +4014,9 @@ static void to_rdma_ah_attr(struct mlx4_ib_dev *ibdev,
u8 port_num = path->sched_queue & 0x40 ? 2 : 1;
memset(ah_attr, 0, sizeof(*ah_attr));
ah_attr->type = rdma_ah_find_type(&ibdev->ib_dev, port_num);
if (port_num == 0 || port_num > dev->caps.num_ports)
return;
ah_attr->type = rdma_ah_find_type(&ibdev->ib_dev, port_num);
if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE)
rdma_ah_set_sl(ah_attr, ((path->sched_queue >> 3) & 0x7) |

@ -2669,7 +2669,7 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
{
struct srp_target_port *target = host_to_target(scmnd->device->host);
struct srp_rdma_ch *ch;
int i;
int i, j;
u8 status;
shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
@ -2683,8 +2683,8 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
for (i = 0; i < target->ch_count; i++) {
ch = &target->ch[i];
for (i = 0; i < target->req_ring_size; ++i) {
struct srp_request *req = &ch->req_ring[i];
for (j = 0; j < target->req_ring_size; ++j) {
struct srp_request *req = &ch->req_ring[j];
srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
}

@ -229,7 +229,7 @@ static int xenkbd_probe(struct xenbus_device *dev,
}
}
touch = xenbus_read_unsigned(dev->nodename,
touch = xenbus_read_unsigned(dev->otherend,
XENKBD_FIELD_FEAT_MTOUCH, 0);
if (touch) {
ret = xenbus_write(XBT_NIL, dev->nodename,
@ -304,13 +304,13 @@ static int xenkbd_probe(struct xenbus_device *dev,
if (!mtouch)
goto error_nomem;
num_cont = xenbus_read_unsigned(info->xbdev->nodename,
num_cont = xenbus_read_unsigned(info->xbdev->otherend,
XENKBD_FIELD_MT_NUM_CONTACTS,
1);
width = xenbus_read_unsigned(info->xbdev->nodename,
width = xenbus_read_unsigned(info->xbdev->otherend,
XENKBD_FIELD_MT_WIDTH,
XENFB_WIDTH);
height = xenbus_read_unsigned(info->xbdev->nodename,
height = xenbus_read_unsigned(info->xbdev->otherend,
XENKBD_FIELD_MT_HEIGHT,
XENFB_HEIGHT);

@ -1180,6 +1180,8 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = {
static const char * const middle_button_pnp_ids[] = {
"LEN2131", /* ThinkPad P52 w/ NFC */
"LEN2132", /* ThinkPad P52 */
"LEN2133", /* ThinkPad P72 w/ NFC */
"LEN2134", /* ThinkPad P72 */
NULL
};

@ -2400,9 +2400,9 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
}
if (amd_iommu_unmap_flush) {
dma_ops_free_iova(dma_dom, dma_addr, pages);
domain_flush_tlb(&dma_dom->domain);
domain_flush_complete(&dma_dom->domain);
dma_ops_free_iova(dma_dom, dma_addr, pages);
} else {
pages = __roundup_pow_of_two(pages);
queue_iova(&dma_dom->iovad, dma_addr >> PAGE_SHIFT, pages, 0);

@ -395,20 +395,15 @@ static int msm_iommu_add_device(struct device *dev)
struct msm_iommu_dev *iommu;
struct iommu_group *group;
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&msm_iommu_lock, flags);
iommu = find_iommu_for_dev(dev);
spin_unlock_irqrestore(&msm_iommu_lock, flags);
if (iommu)
iommu_device_link(&iommu->iommu, dev);
else
ret = -ENODEV;
spin_unlock_irqrestore(&msm_iommu_lock, flags);
if (ret)
return ret;
return -ENODEV;
group = iommu_group_get_for_dev(dev);
if (IS_ERR(group))
@ -425,13 +420,12 @@ static void msm_iommu_remove_device(struct device *dev)
unsigned long flags;
spin_lock_irqsave(&msm_iommu_lock, flags);
iommu = find_iommu_for_dev(dev);
spin_unlock_irqrestore(&msm_iommu_lock, flags);
if (iommu)
iommu_device_unlink(&iommu->iommu, dev);
spin_unlock_irqrestore(&msm_iommu_lock, flags);
iommu_group_remove_device(dev);
}

@ -304,15 +304,6 @@ static void recover_bitmaps(struct md_thread *thread)
while (cinfo->recovery_map) {
slot = fls64((u64)cinfo->recovery_map) - 1;
/* Clear suspend_area associated with the bitmap */
spin_lock_irq(&cinfo->suspend_lock);
list_for_each_entry_safe(s, tmp, &cinfo->suspend_list, list)
if (slot == s->slot) {
list_del(&s->list);
kfree(s);
}
spin_unlock_irq(&cinfo->suspend_lock);
snprintf(str, 64, "bitmap%04d", slot);
bm_lockres = lockres_init(mddev, str, NULL, 1);
if (!bm_lockres) {
@ -331,6 +322,16 @@ static void recover_bitmaps(struct md_thread *thread)
pr_err("md-cluster: Could not copy data from bitmap %d\n", slot);
goto clear_bit;
}
/* Clear suspend_area associated with the bitmap */
spin_lock_irq(&cinfo->suspend_lock);
list_for_each_entry_safe(s, tmp, &cinfo->suspend_list, list)
if (slot == s->slot) {
list_del(&s->list);
kfree(s);
}
spin_unlock_irq(&cinfo->suspend_lock);
if (hi > 0) {
if (lo < mddev->recovery_cp)
mddev->recovery_cp = lo;

@ -834,7 +834,7 @@ static int ov772x_set_params(struct ov772x_priv *priv,
* set COM8
*/
if (priv->band_filter) {
ret = ov772x_mask_set(client, COM8, BNDF_ON_OFF, 1);
ret = ov772x_mask_set(client, COM8, BNDF_ON_OFF, BNDF_ON_OFF);
if (!ret)
ret = ov772x_mask_set(client, BDBASE,
0xff, 256 - priv->band_filter);

@ -384,12 +384,17 @@ static void __isp_video_try_fmt(struct fimc_isp *isp,
struct v4l2_pix_format_mplane *pixm,
const struct fimc_fmt **fmt)
{
*fmt = fimc_isp_find_format(&pixm->pixelformat, NULL, 2);
const struct fimc_fmt *__fmt;
__fmt = fimc_isp_find_format(&pixm->pixelformat, NULL, 2);
if (fmt)
*fmt = __fmt;
pixm->colorspace = V4L2_COLORSPACE_SRGB;
pixm->field = V4L2_FIELD_NONE;
pixm->num_planes = (*fmt)->memplanes;
pixm->pixelformat = (*fmt)->fourcc;
pixm->num_planes = __fmt->memplanes;
pixm->pixelformat = __fmt->fourcc;
/*
* TODO: double check with the docmentation these width/height
* constraints are correct.

@ -1417,7 +1417,7 @@ static int viu_of_probe(struct platform_device *op)
sizeof(struct viu_reg), DRV_NAME)) {
dev_err(&op->dev, "Error while requesting mem region\n");
ret = -EBUSY;
goto err;
goto err_irq;
}
/* remap registers */
@ -1425,7 +1425,7 @@ static int viu_of_probe(struct platform_device *op)
if (!viu_regs) {
dev_err(&op->dev, "Can't map register set\n");
ret = -ENOMEM;
goto err;
goto err_irq;
}
/* Prepare our private structure */
@ -1433,7 +1433,7 @@ static int viu_of_probe(struct platform_device *op)
if (!viu_dev) {
dev_err(&op->dev, "Can't allocate private structure\n");
ret = -ENOMEM;
goto err;
goto err_irq;
}
viu_dev->vr = viu_regs;
@ -1449,16 +1449,21 @@ static int viu_of_probe(struct platform_device *op)
ret = v4l2_device_register(viu_dev->dev, &viu_dev->v4l2_dev);
if (ret < 0) {
dev_err(&op->dev, "v4l2_device_register() failed: %d\n", ret);
goto err;
goto err_irq;
}
ad = i2c_get_adapter(0);
if (!ad) {
ret = -EFAULT;
dev_err(&op->dev, "couldn't get i2c adapter\n");
goto err_v4l2;
}
v4l2_ctrl_handler_init(&viu_dev->hdl, 5);
if (viu_dev->hdl.error) {
ret = viu_dev->hdl.error;
dev_err(&op->dev, "couldn't register control\n");
goto err_vdev;
goto err_i2c;
}
/* This control handler will inherit the control(s) from the
sub-device(s). */
@ -1475,7 +1480,7 @@ static int viu_of_probe(struct platform_device *op)
vdev = video_device_alloc();
if (vdev == NULL) {
ret = -ENOMEM;
goto err_vdev;
goto err_hdl;
}
*vdev = viu_template;
@ -1496,7 +1501,7 @@ static int viu_of_probe(struct platform_device *op)
ret = video_register_device(viu_dev->vdev, VFL_TYPE_GRABBER, -1);
if (ret < 0) {
video_device_release(viu_dev->vdev);
goto err_vdev;
goto err_unlock;
}
/* enable VIU clock */
@ -1504,12 +1509,12 @@ static int viu_of_probe(struct platform_device *op)
if (IS_ERR(clk)) {
dev_err(&op->dev, "failed to lookup the clock!\n");
ret = PTR_ERR(clk);
goto err_clk;
goto err_vdev;
}
ret = clk_prepare_enable(clk);
if (ret) {
dev_err(&op->dev, "failed to enable the clock!\n");
goto err_clk;
goto err_vdev;
}
viu_dev->clk = clk;
@ -1520,7 +1525,7 @@ static int viu_of_probe(struct platform_device *op)
if (request_irq(viu_dev->irq, viu_intr, 0, "viu", (void *)viu_dev)) {
dev_err(&op->dev, "Request VIU IRQ failed.\n");
ret = -ENODEV;
goto err_irq;
goto err_clk;
}
mutex_unlock(&viu_dev->lock);
@ -1528,16 +1533,19 @@ static int viu_of_probe(struct platform_device *op)
dev_info(&op->dev, "Freescale VIU Video Capture Board\n");
return ret;
err_irq:
clk_disable_unprepare(viu_dev->clk);
err_clk:
video_unregister_device(viu_dev->vdev);
clk_disable_unprepare(viu_dev->clk);
err_vdev:
v4l2_ctrl_handler_free(&viu_dev->hdl);
video_unregister_device(viu_dev->vdev);
err_unlock:
mutex_unlock(&viu_dev->lock);
err_hdl:
v4l2_ctrl_handler_free(&viu_dev->hdl);
err_i2c:
i2c_put_adapter(ad);
err_v4l2:
v4l2_device_unregister(&viu_dev->v4l2_dev);
err:
err_irq:
irq_dispose_mapping(viu_irq);
return ret;
}

@ -305,7 +305,7 @@ static struct clk *isp_xclk_src_get(struct of_phandle_args *clkspec, void *data)
static int isp_xclk_init(struct isp_device *isp)
{
struct device_node *np = isp->dev->of_node;
struct clk_init_data init;
struct clk_init_data init = { 0 };
unsigned int i;
for (i = 0; i < ARRAY_SIZE(isp->xclks); ++i)

@ -117,6 +117,8 @@ static int sensor_set_power(struct camif_dev *camif, int on)
if (camif->sensor.power_count == !on)
err = v4l2_subdev_call(sensor->sd, core, s_power, on);
if (err == -ENOIOCTLCMD)
err = 0;
if (!err)
sensor->power_count += on ? 1 : -1;

@ -267,6 +267,11 @@ static int register_dvb(struct tm6000_core *dev)
ret = dvb_register_adapter(&dvb->adapter, "Trident TVMaster 6000 DVB-T",
THIS_MODULE, &dev->udev->dev, adapter_nr);
if (ret < 0) {
pr_err("tm6000: couldn't register the adapter!\n");
goto err;
}
dvb->adapter.priv = dev;
if (dvb->frontend) {

@ -163,14 +163,27 @@ static void uvc_fixup_video_ctrl(struct uvc_streaming *stream,
}
}
static size_t uvc_video_ctrl_size(struct uvc_streaming *stream)
{
/*
* Return the size of the video probe and commit controls, which depends
* on the protocol version.
*/
if (stream->dev->uvc_version < 0x0110)
return 26;
else if (stream->dev->uvc_version < 0x0150)
return 34;
else
return 48;
}
static int uvc_get_video_ctrl(struct uvc_streaming *stream,
struct uvc_streaming_control *ctrl, int probe, __u8 query)
{
__u16 size = uvc_video_ctrl_size(stream);
__u8 *data;
__u16 size;
int ret;
size = stream->dev->uvc_version >= 0x0110 ? 34 : 26;
if ((stream->dev->quirks & UVC_QUIRK_PROBE_DEF) &&
query == UVC_GET_DEF)
return -EIO;
@ -225,7 +238,7 @@ static int uvc_get_video_ctrl(struct uvc_streaming *stream,
ctrl->dwMaxVideoFrameSize = get_unaligned_le32(&data[18]);
ctrl->dwMaxPayloadTransferSize = get_unaligned_le32(&data[22]);
if (size == 34) {
if (size >= 34) {
ctrl->dwClockFrequency = get_unaligned_le32(&data[26]);
ctrl->bmFramingInfo = data[30];
ctrl->bPreferedVersion = data[31];
@ -254,11 +267,10 @@ out:
static int uvc_set_video_ctrl(struct uvc_streaming *stream,
struct uvc_streaming_control *ctrl, int probe)
{
__u16 size = uvc_video_ctrl_size(stream);
__u8 *data;
__u16 size;
int ret;
size = stream->dev->uvc_version >= 0x0110 ? 34 : 26;
data = kzalloc(size, GFP_KERNEL);
if (data == NULL)
return -ENOMEM;
@ -275,7 +287,7 @@ static int uvc_set_video_ctrl(struct uvc_streaming *stream,
put_unaligned_le32(ctrl->dwMaxVideoFrameSize, &data[18]);
put_unaligned_le32(ctrl->dwMaxPayloadTransferSize, &data[22]);
if (size == 34) {
if (size >= 34) {
put_unaligned_le32(ctrl->dwClockFrequency, &data[26]);
data[30] = ctrl->bmFramingInfo;
data[31] = ctrl->bPreferedVersion;

@ -115,14 +115,6 @@ static void __v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *e
if (sev == NULL)
return;
/*
* If the event has been added to the fh->subscribed list, but its
* add op has not completed yet elems will be 0, treat this as
* not being subscribed.
*/
if (!sev->elems)
return;
/* Increase event sequence number on fh. */
fh->sequence++;
@ -208,6 +200,7 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
struct v4l2_subscribed_event *sev, *found_ev;
unsigned long flags;
unsigned i;
int ret = 0;
if (sub->type == V4L2_EVENT_ALL)
return -EINVAL;
@ -226,31 +219,36 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
sev->flags = sub->flags;
sev->fh = fh;
sev->ops = ops;
sev->elems = elems;
mutex_lock(&fh->subscribe_lock);
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
if (!found_ev)
list_add(&sev->list, &fh->subscribed);
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
if (found_ev) {
/* Already listening */
kvfree(sev);
return 0; /* Already listening */
goto out_unlock;
}
if (sev->ops && sev->ops->add) {
int ret = sev->ops->add(sev, elems);
ret = sev->ops->add(sev, elems);
if (ret) {
sev->ops = NULL;
v4l2_event_unsubscribe(fh, sub);
return ret;
kvfree(sev);
goto out_unlock;
}
}
/* Mark as ready for use */
sev->elems = elems;
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
list_add(&sev->list, &fh->subscribed);
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
return 0;
out_unlock:
mutex_unlock(&fh->subscribe_lock);
return ret;
}
EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
@ -289,6 +287,8 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
return 0;
}
mutex_lock(&fh->subscribe_lock);
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
sev = v4l2_event_subscribed(fh, sub->type, sub->id);
@ -306,6 +306,8 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
if (sev && sev->ops && sev->ops->del)
sev->ops->del(sev);
mutex_unlock(&fh->subscribe_lock);
kvfree(sev);
return 0;

@ -45,6 +45,7 @@ void v4l2_fh_init(struct v4l2_fh *fh, struct video_device *vdev)
INIT_LIST_HEAD(&fh->available);
INIT_LIST_HEAD(&fh->subscribed);
fh->sequence = -1;
mutex_init(&fh->subscribe_lock);
}
EXPORT_SYMBOL_GPL(v4l2_fh_init);
@ -90,6 +91,7 @@ void v4l2_fh_exit(struct v4l2_fh *fh)
return;
v4l_disable_media_source(fh->vdev);
v4l2_event_unsubscribe_all(fh);
mutex_destroy(&fh->subscribe_lock);
fh->vdev = NULL;
}
EXPORT_SYMBOL_GPL(v4l2_fh_exit);

@ -391,23 +391,23 @@ static int sram_probe(struct platform_device *pdev)
if (IS_ERR(sram->pool))
return PTR_ERR(sram->pool);
ret = sram_reserve_regions(sram, res);
if (ret)
return ret;
sram->clk = devm_clk_get(sram->dev, NULL);
if (IS_ERR(sram->clk))
sram->clk = NULL;
else
clk_prepare_enable(sram->clk);
ret = sram_reserve_regions(sram, res);
if (ret)
goto err_disable_clk;
platform_set_drvdata(pdev, sram);
init_func = of_device_get_match_data(&pdev->dev);
if (init_func) {
ret = init_func();
if (ret)
goto err_disable_clk;
goto err_free_partitions;
}
dev_dbg(sram->dev, "SRAM pool: %zu KiB @ 0x%p\n",
@ -415,10 +415,11 @@ static int sram_probe(struct platform_device *pdev)
return 0;
err_free_partitions:
sram_free_partitions(sram);
err_disable_clk:
if (sram->clk)
clk_disable_unprepare(sram->clk);
sram_free_partitions(sram);
return ret;
}

@ -177,7 +177,7 @@ static int tsl2550_calculate_lux(u8 ch0, u8 ch1)
} else
lux = 0;
else
return -EAGAIN;
return 0;
/* LUX range check */
return lux > TSL2550_MAX_LUX ? TSL2550_MAX_LUX : lux;

@ -755,7 +755,7 @@ static int qp_host_get_user_memory(u64 produce_uva,
retval = get_user_pages_fast((uintptr_t) produce_uva,
produce_q->kernel_if->num_pages, 1,
produce_q->kernel_if->u.h.header_page);
if (retval < produce_q->kernel_if->num_pages) {
if (retval < (int)produce_q->kernel_if->num_pages) {
pr_debug("get_user_pages_fast(produce) failed (retval=%d)",
retval);
qp_release_pages(produce_q->kernel_if->u.h.header_page,
@ -767,7 +767,7 @@ static int qp_host_get_user_memory(u64 produce_uva,
retval = get_user_pages_fast((uintptr_t) consume_uva,
consume_q->kernel_if->num_pages, 1,
consume_q->kernel_if->u.h.header_page);
if (retval < consume_q->kernel_if->num_pages) {
if (retval < (int)consume_q->kernel_if->num_pages) {
pr_debug("get_user_pages_fast(consume) failed (retval=%d)",
retval);
qp_release_pages(consume_q->kernel_if->u.h.header_page,

@ -129,6 +129,11 @@
#define DEFAULT_TIMEOUT_MS 1000
#define MIN_DMA_LEN 128
static bool atmel_nand_avoid_dma __read_mostly;
MODULE_PARM_DESC(avoiddma, "Avoid using DMA");
module_param_named(avoiddma, atmel_nand_avoid_dma, bool, 0400);
enum atmel_nand_rb_type {
ATMEL_NAND_NO_RB,
ATMEL_NAND_NATIVE_RB,
@ -1975,7 +1980,7 @@ static int atmel_nand_controller_init(struct atmel_nand_controller *nc,
return ret;
}
if (nc->caps->has_dma) {
if (nc->caps->has_dma && !atmel_nand_avoid_dma) {
dma_cap_mask_t mask;
dma_cap_zero(mask);

@ -220,10 +220,10 @@ struct hnae_desc_cb {
/* priv data for the desc, e.g. skb when use with ip stack*/
void *priv;
u16 page_offset;
u16 reuse_flag;
u32 page_offset;
u32 length; /* length of the buffer */
u16 length; /* length of the buffer */
u16 reuse_flag;
/* desc type, used by the ring user to mark the type of the priv data */
u16 type;

@ -530,7 +530,7 @@ static void hns_nic_reuse_page(struct sk_buff *skb, int i,
}
skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
size - pull_len, truesize - pull_len);
size - pull_len, truesize);
/* avoid re-using remote pages,flag default unreuse */
if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()))

@ -644,14 +644,14 @@ static int e1000_set_ringparam(struct net_device *netdev,
adapter->tx_ring = tx_old;
e1000_free_all_rx_resources(adapter);
e1000_free_all_tx_resources(adapter);
kfree(tx_old);
kfree(rx_old);
adapter->rx_ring = rxdr;
adapter->tx_ring = txdr;
err = e1000_up(adapter);
if (err)
goto err_setup;
}
kfree(tx_old);
kfree(rx_old);
clear_bit(__E1000_RESETTING, &adapter->flags);
return 0;
@ -664,7 +664,8 @@ err_setup_rx:
err_alloc_rx:
kfree(txdr);
err_alloc_tx:
e1000_up(adapter);
if (netif_running(adapter->netdev))
e1000_up(adapter);
err_setup:
clear_bit(__E1000_RESETTING, &adapter->flags);
return err;

@ -47,7 +47,7 @@
#include "qed_reg_addr.h"
#include "qed_sriov.h"
#define CHIP_MCP_RESP_ITER_US 10
#define QED_MCP_RESP_ITER_US 10
#define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
#define QED_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
@ -182,18 +182,57 @@ int qed_mcp_free(struct qed_hwfn *p_hwfn)
return 0;
}
/* Maximum of 1 sec to wait for the SHMEM ready indication */
#define QED_MCP_SHMEM_RDY_MAX_RETRIES 20
#define QED_MCP_SHMEM_RDY_ITER_MS 50
static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct qed_mcp_info *p_info = p_hwfn->mcp_info;
u8 cnt = QED_MCP_SHMEM_RDY_MAX_RETRIES;
u8 msec = QED_MCP_SHMEM_RDY_ITER_MS;
u32 drv_mb_offsize, mfw_mb_offsize;
u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
if (!p_info->public_base)
return 0;
if (!p_info->public_base) {
DP_NOTICE(p_hwfn,
"The address of the MCP scratch-pad is not configured\n");
return -EINVAL;
}
p_info->public_base |= GRCBASE_MCP;
/* Get the MFW MB address and number of supported messages */
mfw_mb_offsize = qed_rd(p_hwfn, p_ptt,
SECTION_OFFSIZE_ADDR(p_info->public_base,
PUBLIC_MFW_MB));
p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt,
p_info->mfw_mb_addr +
offsetof(struct public_mfw_mb,
sup_msgs));
/* The driver can notify that there was an MCP reset, and might read the
* SHMEM values before the MFW has completed initializing them.
* To avoid this, the "sup_msgs" field in the MFW mailbox is used as a
* data ready indication.
*/
while (!p_info->mfw_mb_length && --cnt) {
msleep(msec);
p_info->mfw_mb_length =
(u16)qed_rd(p_hwfn, p_ptt,
p_info->mfw_mb_addr +
offsetof(struct public_mfw_mb, sup_msgs));
}
if (!cnt) {
DP_NOTICE(p_hwfn,
"Failed to get the SHMEM ready notification after %d msec\n",
QED_MCP_SHMEM_RDY_MAX_RETRIES * msec);
return -EBUSY;
}
/* Calculate the driver and MFW mailbox address */
drv_mb_offsize = qed_rd(p_hwfn, p_ptt,
SECTION_OFFSIZE_ADDR(p_info->public_base,
@ -203,13 +242,6 @@ static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
"drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
/* Set the MFW MB address */
mfw_mb_offsize = qed_rd(p_hwfn, p_ptt,
SECTION_OFFSIZE_ADDR(p_info->public_base,
PUBLIC_MFW_MB));
p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt, p_info->mfw_mb_addr);
/* Get the current driver mailbox sequence before sending
* the first command
*/
@ -284,9 +316,15 @@ static void qed_mcp_reread_offsets(struct qed_hwfn *p_hwfn,
int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u32 org_mcp_reset_seq, seq, delay = CHIP_MCP_RESP_ITER_US, cnt = 0;
u32 org_mcp_reset_seq, seq, delay = QED_MCP_RESP_ITER_US, cnt = 0;
int rc = 0;
if (p_hwfn->mcp_info->b_block_cmd) {
DP_NOTICE(p_hwfn,
"The MFW is not responsive. Avoid sending MCP_RESET mailbox command.\n");
return -EBUSY;
}
/* Ensure that only a single thread is accessing the mailbox */
spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
@ -412,14 +450,41 @@ static void __qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
(p_mb_params->cmd | seq_num), p_mb_params->param);
}
static void qed_mcp_cmd_set_blocking(struct qed_hwfn *p_hwfn, bool block_cmd)
{
p_hwfn->mcp_info->b_block_cmd = block_cmd;
DP_INFO(p_hwfn, "%s sending of mailbox commands to the MFW\n",
block_cmd ? "Block" : "Unblock");
}
static void qed_mcp_print_cpu_info(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
u32 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2;
u32 delay = QED_MCP_RESP_ITER_US;
cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
cpu_pc_0 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
udelay(delay);
cpu_pc_1 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
udelay(delay);
cpu_pc_2 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
DP_NOTICE(p_hwfn,
"MCP CPU info: mode 0x%08x, state 0x%08x, pc {0x%08x, 0x%08x, 0x%08x}\n",
cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2);
}
static int
_qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_mcp_mb_params *p_mb_params,
u32 max_retries, u32 delay)
u32 max_retries, u32 usecs)
{
u32 cnt = 0, msecs = DIV_ROUND_UP(usecs, 1000);
struct qed_mcp_cmd_elem *p_cmd_elem;
u32 cnt = 0;
u16 seq_num;
int rc = 0;
@ -442,7 +507,11 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
goto err;
spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
udelay(delay);
if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP))
msleep(msecs);
else
udelay(usecs);
} while (++cnt < max_retries);
if (cnt >= max_retries) {
@ -471,7 +540,11 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
* The spinlock stays locked until the list element is removed.
*/
udelay(delay);
if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP))
msleep(msecs);
else
udelay(usecs);
spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
if (p_cmd_elem->b_is_completed)
@ -490,11 +563,15 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
DP_NOTICE(p_hwfn,
"The MFW failed to respond to command 0x%08x [param 0x%08x].\n",
p_mb_params->cmd, p_mb_params->param);
qed_mcp_print_cpu_info(p_hwfn, p_ptt);
spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
if (!QED_MB_FLAGS_IS_SET(p_mb_params, AVOID_BLOCK))
qed_mcp_cmd_set_blocking(p_hwfn, true);
return -EAGAIN;
}
@ -506,7 +583,7 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
"MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n",
p_mb_params->mcp_resp,
p_mb_params->mcp_param,
(cnt * delay) / 1000, (cnt * delay) % 1000);
(cnt * usecs) / 1000, (cnt * usecs) % 1000);
/* Clear the sequence number from the MFW response */
p_mb_params->mcp_resp &= FW_MSG_CODE_MASK;
@ -524,7 +601,7 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
{
size_t union_data_size = sizeof(union drv_union_data);
u32 max_retries = QED_DRV_MB_MAX_RETRIES;
u32 delay = CHIP_MCP_RESP_ITER_US;
u32 usecs = QED_MCP_RESP_ITER_US;
/* MCP not initialized */
if (!qed_mcp_is_init(p_hwfn)) {
@ -532,6 +609,13 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
return -EBUSY;
}
if (p_hwfn->mcp_info->b_block_cmd) {
DP_NOTICE(p_hwfn,
"The MFW is not responsive. Avoid sending mailbox command 0x%08x [param 0x%08x].\n",
p_mb_params->cmd, p_mb_params->param);
return -EBUSY;
}
if (p_mb_params->data_src_size > union_data_size ||
p_mb_params->data_dst_size > union_data_size) {
DP_ERR(p_hwfn,
@ -541,8 +625,13 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
return -EINVAL;
}
if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) {
max_retries = DIV_ROUND_UP(max_retries, 1000);
usecs *= 1000;
}
return _qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries,
delay);
usecs);
}
int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
@ -731,6 +820,7 @@ __qed_mcp_load_req(struct qed_hwfn *p_hwfn,
mb_params.data_src_size = sizeof(load_req);
mb_params.p_data_dst = &load_rsp;
mb_params.data_dst_size = sizeof(load_rsp);
mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK;
DP_VERBOSE(p_hwfn, QED_MSG_SP,
"Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
@ -952,7 +1042,8 @@ int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u32 wol_param, mcp_resp, mcp_param;
struct qed_mcp_mb_params mb_params;
u32 wol_param;
switch (p_hwfn->cdev->wol_config) {
case QED_OV_WOL_DISABLED:
@ -970,8 +1061,12 @@ int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
}
return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_UNLOAD_REQ, wol_param,
&mcp_resp, &mcp_param);
memset(&mb_params, 0, sizeof(mb_params));
mb_params.cmd = DRV_MSG_CODE_UNLOAD_REQ;
mb_params.param = wol_param;
mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK;
return qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
}
int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
@ -1966,31 +2061,65 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
return rc;
}
/* A maximal 100 msec waiting time for the MCP to halt */
#define QED_MCP_HALT_SLEEP_MS 10
#define QED_MCP_HALT_MAX_RETRIES 10
int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u32 resp = 0, param = 0;
u32 resp = 0, param = 0, cpu_state, cnt = 0;
int rc;
rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
&param);
if (rc)
if (rc) {
DP_ERR(p_hwfn, "MCP response failure, aborting\n");
return rc;
}
return rc;
do {
msleep(QED_MCP_HALT_SLEEP_MS);
cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED)
break;
} while (++cnt < QED_MCP_HALT_MAX_RETRIES);
if (cnt == QED_MCP_HALT_MAX_RETRIES) {
DP_NOTICE(p_hwfn,
"Failed to halt the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE), cpu_state);
return -EBUSY;
}
qed_mcp_cmd_set_blocking(p_hwfn, true);
return 0;
}
#define QED_MCP_RESUME_SLEEP_MS 10
int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u32 value, cpu_mode;
u32 cpu_mode, cpu_state;
qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
value = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
value &= ~MCP_REG_CPU_MODE_SOFT_HALT;
qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, value);
cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
cpu_mode &= ~MCP_REG_CPU_MODE_SOFT_HALT;
qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, cpu_mode);
msleep(QED_MCP_RESUME_SLEEP_MS);
cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
return (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) ? -EAGAIN : 0;
if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) {
DP_NOTICE(p_hwfn,
"Failed to resume the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
cpu_mode, cpu_state);
return -EBUSY;
}
qed_mcp_cmd_set_blocking(p_hwfn, false);
return 0;
}
int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn,

@ -540,11 +540,14 @@ struct qed_mcp_info {
*/
spinlock_t cmd_lock;
/* Flag to indicate whether sending a MFW mailbox command is blocked */
bool b_block_cmd;
/* Spinlock used for syncing SW link-changes and link-changes
* originating from attention context.
*/
spinlock_t link_lock;
bool block_mb_sending;
u32 public_base;
u32 drv_mb_addr;
u32 mfw_mb_addr;
@ -565,14 +568,20 @@ struct qed_mcp_info {
};
struct qed_mcp_mb_params {
u32 cmd;
u32 param;
void *p_data_src;
u8 data_src_size;
void *p_data_dst;
u8 data_dst_size;
u32 mcp_resp;
u32 mcp_param;
u32 cmd;
u32 param;
void *p_data_src;
void *p_data_dst;
u8 data_src_size;
u8 data_dst_size;
u32 mcp_resp;
u32 mcp_param;
u32 flags;
#define QED_MB_FLAG_CAN_SLEEP (0x1 << 0)
#define QED_MB_FLAG_AVOID_BLOCK (0x1 << 1)
#define QED_MB_FLAGS_IS_SET(params, flag) \
({ typeof(params) __params = (params); \
(__params && (__params->flags & QED_MB_FLAG_ ## flag)); })
};
/**

@ -554,8 +554,10 @@
0
#define MCP_REG_CPU_STATE \
0xe05004UL
#define MCP_REG_CPU_STATE_SOFT_HALTED (0x1UL << 10)
#define MCP_REG_CPU_EVENT_MASK \
0xe05008UL
#define MCP_REG_CPU_PROGRAM_COUNTER 0xe0501cUL
#define PGLUE_B_REG_PF_BAR0_SIZE \
0x2aae60UL
#define PGLUE_B_REG_PF_BAR1_SIZE \

@ -40,8 +40,11 @@ static int xgmiitorgmii_read_status(struct phy_device *phydev)
{
struct gmii2rgmii *priv = phydev->priv;
u16 val = 0;
int err;
priv->phy_drv->read_status(phydev);
err = priv->phy_drv->read_status(phydev);
if (err < 0)
return err;
val = mdiobus_read(phydev->mdio.bus, priv->addr, XILINX_GMII2RGMII_REG);
val &= ~XILINX_GMII2RGMII_SPEED_MASK;
@ -81,6 +84,11 @@ static int xgmiitorgmii_probe(struct mdio_device *mdiodev)
return -EPROBE_DEFER;
}
if (!priv->phy_dev->drv) {
dev_info(dev, "Attached phy not ready\n");
return -EPROBE_DEFER;
}
priv->addr = mdiodev->addr;
priv->phy_drv = priv->phy_dev->drv;
memcpy(&priv->conv_phy_drv, priv->phy_dev->drv,

@ -215,11 +215,12 @@ int ath10k_htt_rx_ring_refill(struct ath10k *ar)
spin_lock_bh(&htt->rx_ring.lock);
ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level -
htt->rx_ring.fill_cnt));
spin_unlock_bh(&htt->rx_ring.lock);
if (ret)
ath10k_htt_rx_ring_free(htt);
spin_unlock_bh(&htt->rx_ring.lock);
return ret;
}
@ -231,7 +232,9 @@ void ath10k_htt_rx_free(struct ath10k_htt *htt)
skb_queue_purge(&htt->rx_in_ord_compl_q);
skb_queue_purge(&htt->tx_fetch_ind_q);
spin_lock_bh(&htt->rx_ring.lock);
ath10k_htt_rx_ring_free(htt);
spin_unlock_bh(&htt->rx_ring.lock);
dma_free_coherent(htt->ar->dev,
(htt->rx_ring.size *

@ -4015,6 +4015,7 @@ void ath10k_mac_tx_push_pending(struct ath10k *ar)
rcu_read_unlock();
spin_unlock_bh(&ar->txqs_lock);
}
EXPORT_SYMBOL(ath10k_mac_tx_push_pending);
/************/
/* Scanning */

@ -30,6 +30,7 @@
#include "debug.h"
#include "hif.h"
#include "htc.h"
#include "mac.h"
#include "targaddrs.h"
#include "trace.h"
#include "sdio.h"
@ -396,6 +397,7 @@ static int ath10k_sdio_mbox_rx_process_packet(struct ath10k *ar,
int ret;
payload_len = le16_to_cpu(htc_hdr->len);
skb->len = payload_len + sizeof(struct ath10k_htc_hdr);
if (trailer_present) {
trailer = skb->data + sizeof(*htc_hdr) +
@ -434,12 +436,14 @@ static int ath10k_sdio_mbox_rx_process_packets(struct ath10k *ar,
enum ath10k_htc_ep_id id;
int ret, i, *n_lookahead_local;
u32 *lookaheads_local;
int lookahead_idx = 0;
for (i = 0; i < ar_sdio->n_rx_pkts; i++) {
lookaheads_local = lookaheads;
n_lookahead_local = n_lookahead;
id = ((struct ath10k_htc_hdr *)&lookaheads[i])->eid;
id = ((struct ath10k_htc_hdr *)
&lookaheads[lookahead_idx++])->eid;
if (id >= ATH10K_HTC_EP_COUNT) {
ath10k_warn(ar, "invalid endpoint in look-ahead: %d\n",
@ -462,6 +466,7 @@ static int ath10k_sdio_mbox_rx_process_packets(struct ath10k *ar,
/* Only read lookahead's from RX trailers
* for the last packet in a bundle.
*/
lookahead_idx--;
lookaheads_local = NULL;
n_lookahead_local = NULL;
}
@ -1342,6 +1347,8 @@ static void ath10k_sdio_irq_handler(struct sdio_func *func)
break;
} while (time_before(jiffies, timeout) && !done);
ath10k_mac_tx_push_pending(ar);
sdio_claim_host(ar_sdio->func);
if (ret && ret != -ECANCELED)

@ -213,7 +213,7 @@ static const s16 log_table[] = {
30498,
31267,
32024,
32768
32767
};
#define LOG_TABLE_SIZE 32 /* log_table size */

@ -2928,6 +2928,8 @@ static void rndis_wlan_auth_indication(struct usbnet *usbdev,
while (buflen >= sizeof(*auth_req)) {
auth_req = (void *)buf;
if (buflen < le32_to_cpu(auth_req->length))
return;
type = "unknown";
flags = le32_to_cpu(auth_req->flags);
pairwise_error = false;

@ -35,6 +35,7 @@
#include "wl12xx_80211.h"
#include "cmd.h"
#include "event.h"
#include "ps.h"
#include "tx.h"
#include "hw_ops.h"
@ -191,6 +192,10 @@ int wlcore_cmd_wait_for_event_or_timeout(struct wl1271 *wl,
timeout_time = jiffies + msecs_to_jiffies(WL1271_EVENT_TIMEOUT);
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
return ret;
do {
if (time_after(jiffies, timeout_time)) {
wl1271_debug(DEBUG_CMD, "timeout waiting for event %d",
@ -222,6 +227,7 @@ int wlcore_cmd_wait_for_event_or_timeout(struct wl1271 *wl,
} while (!event);
out:
wl1271_ps_elp_sleep(wl);
kfree(events_vector);
return ret;
}

@ -300,7 +300,7 @@ fcloop_tgt_lsrqst_done_work(struct work_struct *work)
struct fcloop_tport *tport = tls_req->tport;
struct nvmefc_ls_req *lsreq = tls_req->lsreq;
if (tport->remoteport)
if (!tport || tport->remoteport)
lsreq->done(lsreq, tls_req->status);
}
@ -318,6 +318,7 @@ fcloop_ls_req(struct nvme_fc_local_port *localport,
if (!rport->targetport) {
tls_req->status = -ECONNREFUSED;
tls_req->tport = NULL;
schedule_work(&tls_req->work);
return ret;
}

@ -35,6 +35,7 @@ static void vexpress_reset_do(struct device *dev, const char *what)
}
static struct device *vexpress_power_off_device;
static atomic_t vexpress_restart_nb_refcnt = ATOMIC_INIT(0);
static void vexpress_power_off(void)
{
@ -99,10 +100,13 @@ static int _vexpress_register_restart_handler(struct device *dev)
int err;
vexpress_restart_device = dev;
err = register_restart_handler(&vexpress_restart_nb);
if (err) {
dev_err(dev, "cannot register restart handler (err=%d)\n", err);
return err;
if (atomic_inc_return(&vexpress_restart_nb_refcnt) == 1) {
err = register_restart_handler(&vexpress_restart_nb);
if (err) {
dev_err(dev, "cannot register restart handler (err=%d)\n", err);
atomic_dec(&vexpress_restart_nb_refcnt);
return err;
}
}
device_create_file(dev, &dev_attr_active);

@ -771,7 +771,7 @@ static int charger_init_hw_regs(struct axp288_chrg_info *info)
}
/* Determine charge current limit */
cc = (ret & CHRG_CCCV_CC_MASK) >> CHRG_CCCV_CC_BIT_POS;
cc = (val & CHRG_CCCV_CC_MASK) >> CHRG_CCCV_CC_BIT_POS;
cc = (cc * CHRG_CCCV_CC_LSB_RES) + CHRG_CCCV_CC_OFFSET;
info->cc = cc;

@ -14,6 +14,7 @@
#include <linux/types.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/notifier.h>
#include <linux/err.h>
@ -139,8 +140,13 @@ static void power_supply_deferred_register_work(struct work_struct *work)
struct power_supply *psy = container_of(work, struct power_supply,
deferred_register_work.work);
if (psy->dev.parent)
mutex_lock(&psy->dev.parent->mutex);
if (psy->dev.parent) {
while (!mutex_trylock(&psy->dev.parent->mutex)) {
if (psy->removing)
return;
msleep(10);
}
}
power_supply_changed(psy);
@ -1071,6 +1077,7 @@ EXPORT_SYMBOL_GPL(devm_power_supply_register_no_ws);
void power_supply_unregister(struct power_supply *psy)
{
WARN_ON(atomic_dec_return(&psy->use_cnt));
psy->removing = true;
cancel_work_sync(&psy->changed_work);
cancel_delayed_work_sync(&psy->deferred_register_work);
sysfs_remove_link(&psy->dev.kobj, "powers");

@ -4115,13 +4115,13 @@ regulator_register(const struct regulator_desc *regulator_desc,
!rdev->desc->fixed_uV)
rdev->is_switch = true;
dev_set_drvdata(&rdev->dev, rdev);
ret = device_register(&rdev->dev);
if (ret != 0) {
put_device(&rdev->dev);
goto unset_supplies;
}
dev_set_drvdata(&rdev->dev, rdev);
rdev_init_debugfs(rdev);
/* try to resolve regulators supply since a new one was registered */

@ -3190,6 +3190,7 @@ static int dasd_alloc_queue(struct dasd_block *block)
block->tag_set.nr_hw_queues = DASD_NR_HW_QUEUES;
block->tag_set.queue_depth = DASD_MAX_LCU_DEV * DASD_REQ_PER_DEV;
block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
block->tag_set.numa_node = NUMA_NO_NODE;
rc = blk_mq_alloc_tag_set(&block->tag_set);
if (rc)

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save