This is the 4.14.53 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAls7QPEACgkQONu9yGCS
 aT5Zuw//UYR0Hahnjiv61N2NCo5cH+uSOc0XjR/a8iTBHVa5lN459dmrKVUDJKyS
 JrIJjwsaUL5H/VHN/XrdRUQMqo38osQ395t+sVCzVaouaJ0nYlEaxVexI0E87mpk
 zsd7qF0HfgGxOEEVfCcxlwKDzgstSNMP3KWprTZZ/5V04NjPlOXPsNOnKj6PWKTI
 4XCp7OrVQhL5zFQKm0kPok9CHrunjjYpF0pgftKblhdB/RPi0E/XbpLrW5hDxOvY
 MxnzKWKHsbEzV6PJKFNmEvFc4D3/Dm3mDG9aI7fL4FbnSBxkxKrzkAX8HP163Lc1
 cNiwhqo4v2IsfVvuJcV9+toVsg+UHcmPETd02hfhIBnN7lCo56+IBoo2FTsV9BRy
 AIWtwzpBj52j0gXTHhORYRhQqa6Jd/N7+9Aay40avWs8NI1tokOGfgifLoJlbXqE
 spfMZdK1ihiUNav2PmY7WklPlN4OeGGcMKvt0bJ4IY2nprI/oeKEUvAkwC5CVRo+
 w/Qvgp94vJDALWRA7e0dUR2cQMN0Y9ELLCy08KgdzRDTUY5f0xVw9Qz0Swx1Zxgk
 DwD+nxscEzr4n0wKtcLkkt2wu9sS/eUeAAHKFqNKRtHQvgqx0oymgow35pw4XHjt
 04sXUemWUXzR73T55HC960vWBrpu67HbNAyGqlCbiATX63euEDY=
 =YCfp
 -----END PGP SIGNATURE-----

Merge 4.14.53 into android-4.14

Changes in 4.14.53
	x86/spectre_v1: Disable compiler optimizations over array_index_mask_nospec()
	x86/xen: Add call of speculative_store_bypass_ht_init() to PV paths
	x86/mce: Improve error message when kernel cannot recover
	x86/mce: Check for alternate indication of machine check recovery on Skylake
	x86/mce: Fix incorrect "Machine check from unknown source" message
	x86/mce: Do not overwrite MCi_STATUS in mce_no_way_out()
	x86: Call fixup_exception() before notify_die() in math_error()
	m68k/mm: Adjust VM area to be unmapped by gap size for __iounmap()
	m68k/mac: Fix SWIM memory resource end address
	serial: sh-sci: Use spin_{try}lock_irqsave instead of open coding version
	signal/xtensa: Consistenly use SIGBUS in do_unaligned_user
	PM / Domains: Fix error path during attach in genpd
	PM / core: Fix supplier device runtime PM usage counter imbalance
	PM / OPP: Update voltage in case freq == old_freq
	usb: do not reset if a low-speed or full-speed device timed out
	1wire: family module autoload fails because of upper/lower case mismatch.
	ASoC: dapm: delete dapm_kcontrol_data paths list before freeing it
	ASoC: cs35l35: Add use_single_rw to regmap config
	ASoC: cirrus: i2s: Fix LRCLK configuration
	ASoC: cirrus: i2s: Fix {TX|RX}LinCtrlData setup
	thermal: bcm2835: Stop using printk format %pCr
	clk: renesas: cpg-mssr: Stop using printk format %pCr
	lib/vsprintf: Remove atomic-unsafe support for %pCr
	ftrace/selftest: Have the reset_trigger code be a bit more careful
	mips: ftrace: fix static function graph tracing
	branch-check: fix long->int truncation when profiling branches
	ipmi:bt: Set the timeout before doing a capabilities check
	Bluetooth: hci_qca: Avoid missing rampatch failure with userspace fw loader
	printk: fix possible reuse of va_list variable
	fuse: fix congested state leak on aborted connections
	fuse: atomic_o_trunc should truncate pagecache
	fuse: don't keep dead fuse_conn at fuse_fill_super().
	fuse: fix control dir setup and teardown
	powerpc/mm/hash: Add missing isync prior to kernel stack SLB switch
	powerpc/ptrace: Fix setting 512B aligned breakpoints with PTRACE_SET_DEBUGREG
	powerpc/perf: Fix memory allocation for core-imc based on num_possible_cpus()
	powerpc/ptrace: Fix enforcement of DAWR constraints
	powerpc/powernv/ioda2: Remove redundant free of TCE pages
	powerpc/powernv: copy/paste - Mask SO bit in CR
	powerpc/powernv/cpuidle: Init all present cpus for deep states
	cpuidle: powernv: Fix promotion from snooze if next state disabled
	powerpc/fadump: Unregister fadump on kexec down path.
	soc: rockchip: power-domain: Fix wrong value when power up pd with writemask
	cxl: Disable prefault_mode in Radix mode
	ARM: 8764/1: kgdb: fix NUMREGBYTES so that gdb_regs[] is the correct size
	ARM: dts: Fix SPI node for Arria10
	ARM: dts: socfpga: Fix NAND controller node compatible
	ARM: dts: socfpga: Fix NAND controller clock supply
	ARM: dts: socfpga: Fix NAND controller node compatible for Arria10
	arm64: Fix syscall restarting around signal suppressed by tracer
	arm64: kpti: Use early_param for kpti= command-line option
	arm64: mm: Ensure writes to swapper are ordered wrt subsequent cache maintenance
	ARM64: dts: meson: disable sd-uhs modes on the libretech-cc
	of: overlay: validate offset from property fixups
	of: unittest: for strings, account for trailing \0 in property length field
	of: platform: stop accessing invalid dev in of_platform_device_destroy
	tpm: fix use after free in tpm2_load_context()
	tpm: fix race condition in tpm_common_write()
	IB/qib: Fix DMA api warning with debug kernel
	IB/{hfi1, qib}: Add handling of kernel restart
	IB/mlx4: Mark user MR as writable if actual virtual memory is writable
	IB/core: Make testing MR flags for writability a static inline function
	IB/mlx5: Fetch soft WQE's on fatal error state
	IB/isert: Fix for lib/dma_debug check_sync warning
	IB/isert: fix T10-pi check mask setting
	IB/hfi1: Fix fault injection init/exit issues
	IB/hfi1: Reorder incorrect send context disable
	IB/hfi1: Optimize kthread pointer locking when queuing CQ entries
	IB/hfi1: Fix user context tail allocation for DMA_RTAIL
	RDMA/mlx4: Discard unknown SQP work requests
	xprtrdma: Return -ENOBUFS when no pages are available
	mtd: cfi_cmdset_0002: Change write buffer to check correct value
	mtd: cfi_cmdset_0002: Use right chip in do_ppb_xxlock()
	mtd: cfi_cmdset_0002: fix SEGV unlocking multiple chips
	mtd: cfi_cmdset_0002: Fix unlocking requests crossing a chip boudary
	mtd: cfi_cmdset_0002: Avoid walking all chips when unlocking.
	MIPS: BCM47XX: Enable 74K Core ExternalSync for PCIe erratum
	PCI: hv: Make sure the bus domain is really unique
	PCI: Add ACS quirk for Intel 7th & 8th Gen mobile
	PCI: Add ACS quirk for Intel 300 series
	PCI: pciehp: Clear Presence Detect and Data Link Layer Status Changed on resume
	auxdisplay: fix broken menu
	pinctrl: samsung: Correct EINTG banks order
	pinctrl: devicetree: Fix pctldev pointer overwrite
	cpufreq: intel_pstate: Fix scaling max/min limits with Turbo 3.0
	MIPS: io: Add barrier after register read in inX()
	time: Make sure jiffies_to_msecs() preserves non-zero time periods
	irqchip/gic-v3-its: Don't bind LPI to unavailable NUMA node
	X.509: unpack RSA signatureValue field from BIT STRING
	Btrfs: fix return value on rename exchange failure
	iio: adc: ad7791: remove sample freq sysfs attributes
	iio: sca3000: Fix an error handling path in 'sca3000_probe()'
	mm: fix __gup_device_huge vs unmap
	scsi: hpsa: disable device during shutdown
	scsi: qla2xxx: Fix setting lower transfer speed if GPSC fails
	scsi: qla2xxx: Mask off Scope bits in retry delay
	scsi: zfcp: fix missing SCSI trace for result of eh_host_reset_handler
	scsi: zfcp: fix missing SCSI trace for retry of abort / scsi_eh TMF
	scsi: zfcp: fix misleading REC trigger trace where erp_action setup failed
	scsi: zfcp: fix missing REC trigger trace on terminate_rport_io early return
	scsi: zfcp: fix missing REC trigger trace on terminate_rport_io for ERP_FAILED
	scsi: zfcp: fix missing REC trigger trace for all objects in ERP_FAILED
	scsi: zfcp: fix missing REC trigger trace on enqueue without ERP thread
	linvdimm, pmem: Preserve read-only setting for pmem devices
	clk: at91: PLL recalc_rate() now using cached MUL and DIV values
	rtc: sun6i: Fix bit_idx value for clk_register_gate
	md: fix two problems with setting the "re-add" device state.
	rpmsg: smd: do not use mananged resources for endpoints and channels
	ubi: fastmap: Cancel work upon detach
	ubi: fastmap: Correctly handle interrupted erasures in EBA
	UBIFS: Fix potential integer overflow in allocation
	backlight: as3711_bl: Fix Device Tree node lookup
	backlight: max8925_bl: Fix Device Tree node lookup
	backlight: tps65217_bl: Fix Device Tree node lookup
	mfd: intel-lpss: Program REMAP register in PIO mode
	mfd: intel-lpss: Fix Intel Cannon Lake LPSS I2C input clock
	arm: dts: mt7623: fix invalid memory node being generated
	perf tools: Fix symbol and object code resolution for vdso32 and vdsox32
	perf intel-pt: Fix sync_switch INTEL_PT_SS_NOT_TRACING
	perf intel-pt: Fix decoding to accept CBR between FUP and corresponding TIP
	perf intel-pt: Fix MTC timing after overflow
	perf intel-pt: Fix "Unexpected indirect branch" error
	perf intel-pt: Fix packet decoding of CYC packets
	perf vendor events: Add Goldmont Plus V1 event file
	perf/x86/intel/uncore: Add event constraint for BDX PCU
	media: vsp1: Release buffers for each video node
	media: v4l2-compat-ioctl32: prevent go past max size
	media: cx231xx: Add support for AverMedia DVD EZMaker 7
	media: dvb_frontend: fix locking issues at dvb_frontend_get_event()
	nfsd: restrict rd_maxcount to svc_max_payload in nfsd_encode_readdir
	NFSv4: Fix possible 1-byte stack overflow in nfs_idmap_read_and_verify_message
	NFSv4: Revert commit 5f83d86cf5 ("NFSv4.x: Fix wraparound issues..")
	NFSv4: Fix a typo in nfs41_sequence_process
	video: uvesafb: Fix integer overflow in allocation
	ACPI / LPSS: Add missing prv_offset setting for byt/cht PWM devices
	Input: elan_i2c - add ELAN0618 (Lenovo v330 15IKB) ACPI ID
	pwm: lpss: platform: Save/restore the ctrl register over a suspend/resume
	rbd: flush rbd_dev->watch_dwork after watch is unregistered
	mm/ksm.c: ignore STABLE_FLAG of rmap_item->address in rmap_walk_ksm()
	mm: fix devmem_is_allowed() for sub-page System RAM intersections
	xen: Remove unnecessary BUG_ON from __unbind_from_irq()
	udf: Detect incorrect directory size
	Input: xpad - fix GPD Win 2 controller name
	Input: elan_i2c_smbus - fix more potential stack buffer overflows
	Input: elantech - enable middle button of touchpads on ThinkPad P52
	Input: elantech - fix V4 report decoding for module with middle key
	ALSA: timer: Fix UBSAN warning at SNDRV_TIMER_IOCTL_NEXT_DEVICE ioctl
	ALSA: hda/realtek - Fix pop noise on Lenovo P50 & co
	ALSA: hda/realtek - Add a quirk for FSC ESPRIMO U9210
	ALSA: hda/realtek - Fix the problem of two front mics on more machines
	slub: fix failure when we delete and create a slab cache
	block: Fix transfer when chunk sectors exceeds max
	block: Fix cloning of requests with a special payload
	x86/efi: Fix efi_call_phys_epilog() with CONFIG_X86_5LEVEL=y
	dm zoned: avoid triggering reclaim from inside dmz_map()
	dm thin: handle running out of data space vs concurrent discard
	xhci: Fix use-after-free in xhci_free_virt_device
	Linux 4.14.53

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
tirimbino
Greg Kroah-Hartman 7 years ago
commit 57c28741d0
  1. 4
      Documentation/ABI/testing/sysfs-class-cxl
  2. 3
      Documentation/printk-formats.txt
  3. 2
      Makefile
  4. 3
      arch/arm/boot/dts/mt7623.dtsi
  5. 1
      arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts
  6. 1
      arch/arm/boot/dts/mt7623n-rfb.dtsi
  7. 4
      arch/arm/boot/dts/socfpga.dtsi
  8. 5
      arch/arm/boot/dts/socfpga_arria10.dtsi
  9. 2
      arch/arm/include/asm/kgdb.h
  10. 3
      arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts
  11. 2
      arch/arm64/kernel/cpufeature.c
  12. 5
      arch/arm64/kernel/signal.c
  13. 5
      arch/arm64/mm/proc.S
  14. 2
      arch/m68k/mac/config.c
  15. 3
      arch/m68k/mm/kmap.c
  16. 6
      arch/mips/bcm47xx/setup.c
  17. 2
      arch/mips/include/asm/io.h
  18. 3
      arch/mips/include/asm/mipsregs.h
  19. 27
      arch/mips/kernel/mcount.S
  20. 1
      arch/powerpc/kernel/entry_64.S
  21. 3
      arch/powerpc/kernel/fadump.c
  22. 4
      arch/powerpc/kernel/hw_breakpoint.c
  23. 1
      arch/powerpc/kernel/ptrace.c
  24. 4
      arch/powerpc/perf/imc-pmu.c
  25. 3
      arch/powerpc/platforms/powernv/copy-paste.h
  26. 4
      arch/powerpc/platforms/powernv/idle.c
  27. 1
      arch/powerpc/platforms/powernv/pci-ioda.c
  28. 8
      arch/x86/events/intel/uncore_snbep.c
  29. 2
      arch/x86/include/asm/barrier.h
  30. 5
      arch/x86/kernel/cpu/mcheck/mce-severity.c
  31. 44
      arch/x86/kernel/cpu/mcheck/mce.c
  32. 11
      arch/x86/kernel/quirks.c
  33. 14
      arch/x86/kernel/traps.c
  34. 4
      arch/x86/mm/init.c
  35. 4
      arch/x86/platform/efi/efi_64.c
  36. 5
      arch/x86/xen/smp_pv.c
  37. 2
      arch/xtensa/kernel/traps.c
  38. 4
      block/blk-core.c
  39. 9
      crypto/asymmetric_keys/x509_cert_parser.c
  40. 2
      drivers/acpi/acpi_lpss.c
  41. 10
      drivers/auxdisplay/Kconfig
  42. 15
      drivers/base/core.c
  43. 3
      drivers/base/power/domain.c
  44. 2
      drivers/base/power/opp/core.c
  45. 2
      drivers/block/rbd.c
  46. 6
      drivers/bluetooth/hci_qca.c
  47. 3
      drivers/char/ipmi/ipmi_bt_sm.c
  48. 40
      drivers/char/tpm/tpm-dev-common.c
  49. 2
      drivers/char/tpm/tpm-dev.h
  50. 3
      drivers/char/tpm/tpm2-space.c
  51. 13
      drivers/clk/at91/clk-pll.c
  52. 9
      drivers/clk/renesas/renesas-cpg-mssr.c
  53. 27
      drivers/cpufreq/intel_pstate.c
  54. 32
      drivers/cpuidle/cpuidle-powernv.c
  55. 9
      drivers/iio/accel/sca3000.c
  56. 49
      drivers/iio/adc/ad7791.c
  57. 11
      drivers/infiniband/core/umem.c
  58. 8
      drivers/infiniband/hw/hfi1/chip.c
  59. 8
      drivers/infiniband/hw/hfi1/debugfs.c
  60. 4
      drivers/infiniband/hw/hfi1/file_ops.c
  61. 1
      drivers/infiniband/hw/hfi1/hfi.h
  62. 22
      drivers/infiniband/hw/hfi1/init.c
  63. 44
      drivers/infiniband/hw/hfi1/pio.c
  64. 1
      drivers/infiniband/hw/mlx4/mad.c
  65. 50
      drivers/infiniband/hw/mlx4/mr.c
  66. 15
      drivers/infiniband/hw/mlx5/cq.c
  67. 4
      drivers/infiniband/hw/qib/qib.h
  68. 10
      drivers/infiniband/hw/qib/qib_file_ops.c
  69. 13
      drivers/infiniband/hw/qib/qib_init.c
  70. 20
      drivers/infiniband/hw/qib/qib_user_pages.c
  71. 31
      drivers/infiniband/sw/rdmavt/cq.c
  72. 28
      drivers/infiniband/ulp/isert/ib_isert.c
  73. 2
      drivers/input/joystick/xpad.c
  74. 2
      drivers/input/mouse/elan_i2c.h
  75. 3
      drivers/input/mouse/elan_i2c_core.c
  76. 10
      drivers/input/mouse/elan_i2c_smbus.c
  77. 11
      drivers/input/mouse/elantech.c
  78. 9
      drivers/irqchip/irq-gic-v3-its.c
  79. 11
      drivers/md/dm-thin.c
  80. 2
      drivers/md/dm-zoned-target.c
  81. 4
      drivers/md/md.c
  82. 23
      drivers/media/dvb-core/dvb_frontend.c
  83. 21
      drivers/media/platform/vsp1/vsp1_video.c
  84. 3
      drivers/media/usb/cx231xx/cx231xx-cards.c
  85. 2
      drivers/media/v4l2-core/v4l2-compat-ioctl32.c
  86. 25
      drivers/mfd/intel-lpss-pci.c
  87. 4
      drivers/mfd/intel-lpss.c
  88. 16
      drivers/misc/cxl/sysfs.c
  89. 21
      drivers/mtd/chips/cfi_cmdset_0002.c
  90. 3
      drivers/mtd/ubi/build.c
  91. 90
      drivers/mtd/ubi/eba.c
  92. 4
      drivers/mtd/ubi/wl.c
  93. 14
      drivers/nvdimm/bus.c
  94. 5
      drivers/of/platform.c
  95. 5
      drivers/of/resolver.c
  96. 8
      drivers/of/unittest.c
  97. 11
      drivers/pci/host/pci-hyperv.c
  98. 2
      drivers/pci/hotplug/pciehp.h
  99. 2
      drivers/pci/hotplug/pciehp_core.c
  100. 13
      drivers/pci/hotplug/pciehp_hpc.c
  101. Some files were not shown because too many files have changed in this diff Show More

@ -69,7 +69,9 @@ Date: September 2014
Contact: linuxppc-dev@lists.ozlabs.org
Description: read/write
Set the mode for prefaulting in segments into the segment table
when performing the START_WORK ioctl. Possible values:
when performing the START_WORK ioctl. Only applicable when
running under hashed page table mmu.
Possible values:
none: No prefaulting (default)
work_element_descriptor: Treat the work element
descriptor as an effective address and

@ -397,11 +397,10 @@ struct clk
%pC pll1
%pCn pll1
%pCr 1560000000
For printing struct clk structures. ``%pC`` and ``%pCn`` print the name
(Common Clock Framework) or address (legacy clock framework) of the
structure; ``%pCr`` prints the current clock rate.
structure.
Passed by reference.

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 14
SUBLEVEL = 52
SUBLEVEL = 53
EXTRAVERSION =
NAME = Petit Gorille

@ -22,11 +22,12 @@
#include <dt-bindings/phy/phy.h>
#include <dt-bindings/reset/mt2701-resets.h>
#include <dt-bindings/thermal/thermal.h>
#include "skeleton64.dtsi"
/ {
compatible = "mediatek,mt7623";
interrupt-parent = <&sysirq>;
#address-cells = <2>;
#size-cells = <2>;
cpu_opp_table: opp_table {
compatible = "operating-points-v2";

@ -100,6 +100,7 @@
};
memory@80000000 {
device_type = "memory";
reg = <0 0x80000000 0 0x40000000>;
};
};

@ -47,6 +47,7 @@
};
memory@80000000 {
device_type = "memory";
reg = <0 0x80000000 0 0x40000000>;
};

@ -744,13 +744,13 @@
nand0: nand@ff900000 {
#address-cells = <0x1>;
#size-cells = <0x1>;
compatible = "denali,denali-nand-dt";
compatible = "altr,socfpga-denali-nand";
reg = <0xff900000 0x100000>,
<0xffb80000 0x10000>;
reg-names = "nand_data", "denali_reg";
interrupts = <0x0 0x90 0x4>;
dma-mask = <0xffffffff>;
clocks = <&nand_clk>;
clocks = <&nand_x_clk>;
status = "disabled";
};

@ -593,8 +593,7 @@
#size-cells = <0>;
reg = <0xffda5000 0x100>;
interrupts = <0 102 4>;
num-chipselect = <4>;
bus-num = <0>;
num-cs = <4>;
/*32bit_access;*/
tx-dma-channel = <&pdma 16>;
rx-dma-channel = <&pdma 17>;
@ -633,7 +632,7 @@
nand: nand@ffb90000 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "denali,denali-nand-dt", "altr,socfpga-denali-nand";
compatible = "altr,socfpga-denali-nand";
reg = <0xffb90000 0x72000>,
<0xffb80000 0x10000>;
reg-names = "nand_data", "denali_reg";

@ -77,7 +77,7 @@ extern int kgdb_fault_expected;
#define KGDB_MAX_NO_CPUS 1
#define BUFMAX 400
#define NUMREGBYTES (DBG_MAX_REG_NUM << 2)
#define NUMREGBYTES (GDB_MAX_REGS << 2)
#define NUMCRITREGBYTES (32 << 2)
#define _R0 0

@ -205,9 +205,6 @@
bus-width = <4>;
cap-sd-highspeed;
sd-uhs-sdr12;
sd-uhs-sdr25;
sd-uhs-sdr50;
max-frequency = <100000000>;
disable-wp;

@ -877,7 +877,7 @@ static int __init parse_kpti(char *str)
__kpti_forced = enabled ? 1 : -1;
return 0;
}
__setup("kpti=", parse_kpti);
early_param("kpti", parse_kpti);
#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
static const struct arm64_cpu_capabilities arm64_features[] = {

@ -676,11 +676,12 @@ static void do_signal(struct pt_regs *regs)
unsigned long continue_addr = 0, restart_addr = 0;
int retval = 0;
struct ksignal ksig;
bool syscall = in_syscall(regs);
/*
* If we were from a system call, check for system call restarting...
*/
if (in_syscall(regs)) {
if (syscall) {
continue_addr = regs->pc;
restart_addr = continue_addr - (compat_thumb_mode(regs) ? 2 : 4);
retval = regs->regs[0];
@ -732,7 +733,7 @@ static void do_signal(struct pt_regs *regs)
* Handle restarting a different system call. As above, if a debugger
* has chosen to restart at a different PC, ignore the restart.
*/
if (in_syscall(regs) && regs->pc == restart_addr) {
if (syscall && regs->pc == restart_addr) {
if (retval == -ERESTART_RESTARTBLOCK)
setup_restart_syscall(regs);
user_rewind_single_step(current);

@ -196,8 +196,9 @@ ENDPROC(idmap_cpu_replace_ttbr1)
.macro __idmap_kpti_put_pgtable_ent_ng, type
orr \type, \type, #PTE_NG // Same bit for blocks and pages
str \type, [cur_\()\type\()p] // Update the entry and ensure it
dc civac, cur_\()\type\()p // is visible to all CPUs.
str \type, [cur_\()\type\()p] // Update the entry and ensure
dmb sy // that it is visible to all
dc civac, cur_\()\type\()p // CPUs.
.endm
/*

@ -1017,7 +1017,7 @@ int __init mac_platform_init(void)
struct resource swim_rsrc = {
.flags = IORESOURCE_MEM,
.start = (resource_size_t)swim_base,
.end = (resource_size_t)swim_base + 0x2000,
.end = (resource_size_t)swim_base + 0x1FFF,
};
platform_device_register_simple("swim", -1, &swim_rsrc, 1);

@ -89,7 +89,8 @@ static inline void free_io_area(void *addr)
for (p = &iolist ; (tmp = *p) ; p = &tmp->next) {
if (tmp->addr == addr) {
*p = tmp->next;
__iounmap(tmp->addr, tmp->size);
/* remove gap added in get_io_area() */
__iounmap(tmp->addr, tmp->size - IO_SIZE);
kfree(tmp);
return;
}

@ -212,6 +212,12 @@ static int __init bcm47xx_cpu_fixes(void)
*/
if (bcm47xx_bus.bcma.bus.chipinfo.id == BCMA_CHIP_ID_BCM4706)
cpu_wait = NULL;
/*
* BCM47XX Erratum "R10: PCIe Transactions Periodically Fail"
* Enable ExternalSync for sync instruction to take effect
*/
set_c0_config7(MIPS_CONF7_ES);
break;
#endif
}

@ -414,6 +414,8 @@ static inline type pfx##in##bwlq##p(unsigned long port) \
__val = *__addr; \
slow; \
\
/* prevent prefetching of coherent DMA data prematurely */ \
rmb(); \
return pfx##ioswab##bwlq(__addr, __val); \
}

@ -680,6 +680,8 @@
#define MIPS_CONF7_WII (_ULCAST_(1) << 31)
#define MIPS_CONF7_RPS (_ULCAST_(1) << 2)
/* ExternalSync */
#define MIPS_CONF7_ES (_ULCAST_(1) << 8)
#define MIPS_CONF7_IAR (_ULCAST_(1) << 10)
#define MIPS_CONF7_AR (_ULCAST_(1) << 16)
@ -2745,6 +2747,7 @@ __BUILD_SET_C0(status)
__BUILD_SET_C0(cause)
__BUILD_SET_C0(config)
__BUILD_SET_C0(config5)
__BUILD_SET_C0(config7)
__BUILD_SET_C0(intcontrol)
__BUILD_SET_C0(intctl)
__BUILD_SET_C0(srsmap)

@ -119,10 +119,20 @@ NESTED(_mcount, PT_SIZE, ra)
EXPORT_SYMBOL(_mcount)
PTR_LA t1, ftrace_stub
PTR_L t2, ftrace_trace_function /* Prepare t2 for (1) */
bne t1, t2, static_trace
beq t1, t2, fgraph_trace
nop
MCOUNT_SAVE_REGS
move a0, ra /* arg1: self return address */
jalr t2 /* (1) call *ftrace_trace_function */
move a1, AT /* arg2: parent's return address */
MCOUNT_RESTORE_REGS
fgraph_trace:
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
PTR_LA t1, ftrace_stub
PTR_L t3, ftrace_graph_return
bne t1, t3, ftrace_graph_caller
nop
@ -131,24 +141,11 @@ EXPORT_SYMBOL(_mcount)
bne t1, t3, ftrace_graph_caller
nop
#endif
b ftrace_stub
#ifdef CONFIG_32BIT
addiu sp, sp, 8
#else
nop
#endif
static_trace:
MCOUNT_SAVE_REGS
move a0, ra /* arg1: self return address */
jalr t2 /* (1) call *ftrace_trace_function */
move a1, AT /* arg2: parent's return address */
MCOUNT_RESTORE_REGS
#ifdef CONFIG_32BIT
addiu sp, sp, 8
#endif
.globl ftrace_stub
ftrace_stub:
RETURN_BACK

@ -597,6 +597,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
* actually hit this code path.
*/
isync
slbie r6
slbie r6 /* Workaround POWER5 < DD2.1 issue */
slbmte r7,r0

@ -1155,6 +1155,9 @@ void fadump_cleanup(void)
init_fadump_mem_struct(&fdm,
be64_to_cpu(fdm_active->cpu_state_data.destination_address));
fadump_invalidate_dump(&fdm);
} else if (fw_dump.dump_registered) {
/* Un-register Firmware-assisted dump if it was registered. */
fadump_unregister_dump(&fdm);
}
}

@ -175,8 +175,8 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
if (cpu_has_feature(CPU_FTR_DAWR)) {
length_max = 512 ; /* 64 doublewords */
/* DAWR region can't cross 512 boundary */
if ((bp->attr.bp_addr >> 10) !=
((bp->attr.bp_addr + bp->attr.bp_len - 1) >> 10))
if ((bp->attr.bp_addr >> 9) !=
((bp->attr.bp_addr + bp->attr.bp_len - 1) >> 9))
return -EINVAL;
}
if (info->len >

@ -2362,6 +2362,7 @@ static int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
/* Create a new breakpoint request if one doesn't exist already */
hw_breakpoint_init(&attr);
attr.bp_addr = hw_brk.address;
attr.bp_len = 8;
arch_bp_generic_fields(hw_brk.type,
&attr.bp_type);

@ -1131,7 +1131,7 @@ static int init_nest_pmu_ref(void)
static void cleanup_all_core_imc_memory(void)
{
int i, nr_cores = DIV_ROUND_UP(num_present_cpus(), threads_per_core);
int i, nr_cores = DIV_ROUND_UP(num_possible_cpus(), threads_per_core);
struct imc_mem_info *ptr = core_imc_pmu->mem_info;
int size = core_imc_pmu->counter_mem_size;
@ -1239,7 +1239,7 @@ static int imc_mem_init(struct imc_pmu *pmu_ptr, struct device_node *parent,
if (!pmu_ptr->pmu.name)
return -ENOMEM;
nr_cores = DIV_ROUND_UP(num_present_cpus(), threads_per_core);
nr_cores = DIV_ROUND_UP(num_possible_cpus(), threads_per_core);
pmu_ptr->mem_info = kcalloc(nr_cores, sizeof(struct imc_mem_info),
GFP_KERNEL);

@ -42,5 +42,6 @@ static inline int vas_paste(void *paste_address, int offset)
: "b" (offset), "b" (paste_address)
: "memory", "cr0");
return (cr >> CR0_SHIFT) & CR0_MASK;
/* We mask with 0xE to ignore SO */
return (cr >> CR0_SHIFT) & 0xE;
}

@ -78,7 +78,7 @@ static int pnv_save_sprs_for_deep_states(void)
uint64_t msr_val = MSR_IDLE;
uint64_t psscr_val = pnv_deepest_stop_psscr_val;
for_each_possible_cpu(cpu) {
for_each_present_cpu(cpu) {
uint64_t pir = get_hard_smp_processor_id(cpu);
uint64_t hsprg0_val = (uint64_t)&paca[cpu];
@ -741,7 +741,7 @@ static int __init pnv_init_idle_states(void)
int cpu;
pr_info("powernv: idle: Saving PACA pointers of all CPUs in their thread sibling PACA\n");
for_each_possible_cpu(cpu) {
for_each_present_cpu(cpu) {
int base_cpu = cpu_first_thread_sibling(cpu);
int idx = cpu_thread_in_core(cpu);
int i;

@ -3591,7 +3591,6 @@ static void pnv_pci_ioda2_release_pe_dma(struct pnv_ioda_pe *pe)
WARN_ON(pe->table_group.group);
}
pnv_pci_ioda2_table_free_pages(tbl);
iommu_tce_table_put(tbl);
}

@ -3035,11 +3035,19 @@ static struct intel_uncore_type *bdx_msr_uncores[] = {
NULL,
};
/* Bit 7 'Use Occupancy' is not available for counter 0 on BDX */
static struct event_constraint bdx_uncore_pcu_constraints[] = {
EVENT_CONSTRAINT(0x80, 0xe, 0x80),
EVENT_CONSTRAINT_END
};
void bdx_uncore_cpu_init(void)
{
if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
uncore_msr_uncores = bdx_msr_uncores;
hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints;
}
static struct intel_uncore_type bdx_uncore_ha = {

@ -38,7 +38,7 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
{
unsigned long mask;
asm ("cmp %1,%2; sbb %0,%0;"
asm volatile ("cmp %1,%2; sbb %0,%0;"
:"=r" (mask)
:"g"(size),"r" (index)
:"cc");

@ -143,6 +143,11 @@ static struct severity {
SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_INSTR),
USER
),
MCESEV(
PANIC, "Data load in unrecoverable area of kernel",
SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_DATA),
KERNEL
),
#endif
MCESEV(
PANIC, "Action required: unknown MCACOD",

@ -760,23 +760,25 @@ EXPORT_SYMBOL_GPL(machine_check_poll);
static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
struct pt_regs *regs)
{
int i, ret = 0;
char *tmp;
int i;
for (i = 0; i < mca_cfg.banks; i++) {
m->status = mce_rdmsrl(msr_ops.status(i));
if (m->status & MCI_STATUS_VAL) {
__set_bit(i, validp);
if (quirk_no_way_out)
quirk_no_way_out(i, m, regs);
}
if (!(m->status & MCI_STATUS_VAL))
continue;
__set_bit(i, validp);
if (quirk_no_way_out)
quirk_no_way_out(i, m, regs);
if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) {
mce_read_aux(m, i);
*msg = tmp;
ret = 1;
return 1;
}
}
return ret;
return 0;
}
/*
@ -1205,13 +1207,18 @@ void do_machine_check(struct pt_regs *regs, long error_code)
lmce = m.mcgstatus & MCG_STATUS_LMCES;
/*
* Local machine check may already know that we have to panic.
* Broadcast machine check begins rendezvous in mce_start()
* Go through all banks in exclusion of the other CPUs. This way we
* don't report duplicated events on shared banks because the first one
* to see it will clear it. If this is a Local MCE, then no need to
* perform rendezvous.
* to see it will clear it.
*/
if (!lmce)
if (lmce) {
if (no_way_out)
mce_panic("Fatal local machine check", &m, msg);
} else {
order = mce_start(&no_way_out);
}
for (i = 0; i < cfg->banks; i++) {
__clear_bit(i, toclear);
@ -1287,12 +1294,17 @@ void do_machine_check(struct pt_regs *regs, long error_code)
no_way_out = worst >= MCE_PANIC_SEVERITY;
} else {
/*
* Local MCE skipped calling mce_reign()
* If we found a fatal error, we need to panic here.
* If there was a fatal machine check we should have
* already called mce_panic earlier in this function.
* Since we re-read the banks, we might have found
* something new. Check again to see if we found a
* fatal error. We call "mce_severity()" again to
* make sure we have the right "msg".
*/
if (worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3)
mce_panic("Machine check from unknown source",
NULL, NULL);
if (worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3) {
mce_severity(&m, cfg->tolerant, &msg, true);
mce_panic("Local fatal machine check!", &m, msg);
}
}
/*

@ -645,12 +645,19 @@ static void quirk_intel_brickland_xeon_ras_cap(struct pci_dev *pdev)
/* Skylake */
static void quirk_intel_purley_xeon_ras_cap(struct pci_dev *pdev)
{
u32 capid0;
u32 capid0, capid5;
pci_read_config_dword(pdev, 0x84, &capid0);
pci_read_config_dword(pdev, 0x98, &capid5);
if ((capid0 & 0xc0) == 0xc0)
/*
* CAPID0{7:6} indicate whether this is an advanced RAS SKU
* CAPID5{8:5} indicate that various NVDIMM usage modes are
* enabled, so memory machine check recovery is also enabled.
*/
if ((capid0 & 0xc0) == 0xc0 || (capid5 & 0x1e0))
static_branch_inc(&mcsafe_key);
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x0ec3, quirk_intel_brickland_xeon_ras_cap);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, quirk_intel_brickland_xeon_ras_cap);

@ -828,16 +828,18 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr)
char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" :
"simd exception";
if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP)
return;
cond_local_irq_enable(regs);
if (!user_mode(regs)) {
if (!fixup_exception(regs, trapnr)) {
task->thread.error_code = error_code;
task->thread.trap_nr = trapnr;
if (fixup_exception(regs, trapnr))
return;
task->thread.error_code = error_code;
task->thread.trap_nr = trapnr;
if (notify_die(DIE_TRAP, str, regs, error_code,
trapnr, SIGFPE) != NOTIFY_STOP)
die(str, regs, error_code);
}
return;
}

@ -706,7 +706,9 @@ void __init init_mem_mapping(void)
*/
int devmem_is_allowed(unsigned long pagenr)
{
if (page_is_ram(pagenr)) {
if (region_intersects(PFN_PHYS(pagenr), PAGE_SIZE,
IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE)
!= REGION_DISJOINT) {
/*
* For disallowed memory regions in the low 1MB range,
* request that the page be shown as all zeros.

@ -166,14 +166,14 @@ void __init efi_call_phys_epilog(pgd_t *save_pgd)
pgd = pgd_offset_k(pgd_idx * PGDIR_SIZE);
set_pgd(pgd_offset_k(pgd_idx * PGDIR_SIZE), save_pgd[pgd_idx]);
if (!(pgd_val(*pgd) & _PAGE_PRESENT))
if (!pgd_present(*pgd))
continue;
for (i = 0; i < PTRS_PER_P4D; i++) {
p4d = p4d_offset(pgd,
pgd_idx * PGDIR_SIZE + i * P4D_SIZE);
if (!(p4d_val(*p4d) & _PAGE_PRESENT))
if (!p4d_present(*p4d))
continue;
pud = (pud_t *)p4d_page_vaddr(*p4d);

@ -32,6 +32,7 @@
#include <xen/interface/vcpu.h>
#include <xen/interface/xenpmu.h>
#include <asm/spec-ctrl.h>
#include <asm/xen/interface.h>
#include <asm/xen/hypercall.h>
@ -70,6 +71,8 @@ static void cpu_bringup(void)
cpu_data(cpu).x86_max_cores = 1;
set_cpu_sibling_map(cpu);
speculative_store_bypass_ht_init();
xen_setup_cpu_clockevents();
notify_cpu_starting(cpu);
@ -250,6 +253,8 @@ static void __init xen_pv_smp_prepare_cpus(unsigned int max_cpus)
}
set_cpu_sibling_map(0);
speculative_store_bypass_ht_init();
xen_pmu_init(0);
if (xen_smp_intr_init(0) || xen_smp_intr_init_pv(0))

@ -336,7 +336,7 @@ do_unaligned_user (struct pt_regs *regs)
info.si_errno = 0;
info.si_code = BUS_ADRALN;
info.si_addr = (void *) regs->excvaddr;
force_sig_info(SIGSEGV, &info, current);
force_sig_info(SIGBUS, &info, current);
}
#endif

@ -3150,6 +3150,10 @@ static void __blk_rq_prep_clone(struct request *dst, struct request *src)
dst->cpu = src->cpu;
dst->__sector = blk_rq_pos(src);
dst->__data_len = blk_rq_bytes(src);
if (src->rq_flags & RQF_SPECIAL_PAYLOAD) {
dst->rq_flags |= RQF_SPECIAL_PAYLOAD;
dst->special_vec = src->special_vec;
}
dst->nr_phys_segments = src->nr_phys_segments;
dst->ioprio = src->ioprio;
dst->extra_len = src->extra_len;

@ -249,6 +249,15 @@ int x509_note_signature(void *context, size_t hdrlen,
return -EINVAL;
}
if (strcmp(ctx->cert->sig->pkey_algo, "rsa") == 0) {
/* Discard the BIT STRING metadata */
if (vlen < 1 || *(const u8 *)value != 0)
return -EBADMSG;
value++;
vlen--;
}
ctx->cert->raw_sig = value;
ctx->cert->raw_sig_size = vlen;
return 0;

@ -229,11 +229,13 @@ static const struct lpss_device_desc lpt_sdio_dev_desc = {
static const struct lpss_device_desc byt_pwm_dev_desc = {
.flags = LPSS_SAVE_CTX,
.prv_offset = 0x800,
.setup = byt_pwm_setup,
};
static const struct lpss_device_desc bsw_pwm_dev_desc = {
.flags = LPSS_SAVE_CTX | LPSS_NO_D3_DELAY,
.prv_offset = 0x800,
.setup = bsw_pwm_setup,
};

@ -14,9 +14,6 @@ menuconfig AUXDISPLAY
If you say N, all options in this submenu will be skipped and disabled.
config CHARLCD
tristate "Character LCD core support" if COMPILE_TEST
if AUXDISPLAY
config HD44780
@ -157,8 +154,6 @@ config HT16K33
Say yes here to add support for Holtek HT16K33, RAM mapping 16*8
LED controller driver with keyscan.
endif # AUXDISPLAY
config ARM_CHARLCD
bool "ARM Ltd. Character LCD Driver"
depends on PLAT_VERSATILE
@ -169,6 +164,8 @@ config ARM_CHARLCD
line and the Linux version on the second line, but that's
still useful.
endif # AUXDISPLAY
config PANEL
tristate "Parallel port LCD/Keypad Panel support"
depends on PARPORT
@ -448,3 +445,6 @@ config PANEL_BOOT_MESSAGE
printf()-formatted message is valid with newline and escape codes.
endif # PANEL
config CHARLCD
tristate "Character LCD core support" if COMPILE_TEST

@ -217,6 +217,13 @@ struct device_link *device_link_add(struct device *consumer,
link->rpm_active = true;
}
pm_runtime_new_link(consumer);
/*
* If the link is being added by the consumer driver at probe
* time, balance the decrementation of the supplier's runtime PM
* usage counter after consumer probe in driver_probe_device().
*/
if (consumer->links.status == DL_DEV_PROBING)
pm_runtime_get_noresume(supplier);
}
get_device(supplier);
link->supplier = supplier;
@ -235,12 +242,12 @@ struct device_link *device_link_add(struct device *consumer,
switch (consumer->links.status) {
case DL_DEV_PROBING:
/*
* Balance the decrementation of the supplier's
* runtime PM usage counter after consumer probe
* in driver_probe_device().
* Some callers expect the link creation during
* consumer driver probe to resume the supplier
* even without DL_FLAG_RPM_ACTIVE.
*/
if (flags & DL_FLAG_PM_RUNTIME)
pm_runtime_get_sync(supplier);
pm_runtime_resume(supplier);
link->status = DL_STATE_CONSUMER_PROBE;
break;

@ -2162,6 +2162,9 @@ int genpd_dev_pm_attach(struct device *dev)
genpd_lock(pd);
ret = genpd_power_on(pd, 0);
genpd_unlock(pd);
if (ret)
genpd_remove_device(pd, dev);
out:
return ret ? -EPROBE_DEFER : 0;
}

@ -552,7 +552,7 @@ static int _generic_set_opp_regulator(const struct opp_table *opp_table,
}
/* Scaling up? Scale voltage before frequency */
if (freq > old_freq) {
if (freq >= old_freq) {
ret = _set_opp_voltage(dev, reg, new_supply);
if (ret)
goto restore_voltage;

@ -3841,7 +3841,6 @@ static void cancel_tasks_sync(struct rbd_device *rbd_dev)
{
dout("%s rbd_dev %p\n", __func__, rbd_dev);
cancel_delayed_work_sync(&rbd_dev->watch_dwork);
cancel_work_sync(&rbd_dev->acquired_lock_work);
cancel_work_sync(&rbd_dev->released_lock_work);
cancel_delayed_work_sync(&rbd_dev->lock_dwork);
@ -3859,6 +3858,7 @@ static void rbd_unregister_watch(struct rbd_device *rbd_dev)
rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
mutex_unlock(&rbd_dev->watch_mutex);
cancel_delayed_work_sync(&rbd_dev->watch_dwork);
ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
}

@ -936,6 +936,12 @@ static int qca_setup(struct hci_uart *hu)
} else if (ret == -ENOENT) {
/* No patch/nvm-config found, run with original fw/config */
ret = 0;
} else if (ret == -EAGAIN) {
/*
* Userspace firmware loader will return -EAGAIN in case no
* patch/nvm-config is found, so run with original fw/config.
*/
ret = 0;
}
/* Setup bdaddr */

@ -522,11 +522,12 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
if (status & BT_H_BUSY) /* clear a leftover H_BUSY */
BT_CONTROL(BT_H_BUSY);
bt->timeout = bt->BT_CAP_req2rsp;
/* Read BT capabilities if it hasn't been done yet */
if (!bt->BT_CAP_outreqs)
BT_STATE_CHANGE(BT_STATE_CAPABILITIES_BEGIN,
SI_SM_CALL_WITHOUT_DELAY);
bt->timeout = bt->BT_CAP_req2rsp;
BT_SI_SM_RETURN(SI_SM_IDLE);
case BT_STATE_XACTION_START:

@ -37,7 +37,7 @@ static void timeout_work(struct work_struct *work)
struct file_priv *priv = container_of(work, struct file_priv, work);
mutex_lock(&priv->buffer_mutex);
atomic_set(&priv->data_pending, 0);
priv->data_pending = 0;
memset(priv->data_buffer, 0, sizeof(priv->data_buffer));
mutex_unlock(&priv->buffer_mutex);
}
@ -46,7 +46,6 @@ void tpm_common_open(struct file *file, struct tpm_chip *chip,
struct file_priv *priv)
{
priv->chip = chip;
atomic_set(&priv->data_pending, 0);
mutex_init(&priv->buffer_mutex);
setup_timer(&priv->user_read_timer, user_reader_timeout,
(unsigned long)priv);
@ -59,29 +58,24 @@ ssize_t tpm_common_read(struct file *file, char __user *buf,
size_t size, loff_t *off)
{
struct file_priv *priv = file->private_data;
ssize_t ret_size;
ssize_t orig_ret_size;
ssize_t ret_size = 0;
int rc;
del_singleshot_timer_sync(&priv->user_read_timer);
flush_work(&priv->work);
ret_size = atomic_read(&priv->data_pending);
if (ret_size > 0) { /* relay data */
orig_ret_size = ret_size;
if (size < ret_size)
ret_size = size;
mutex_lock(&priv->buffer_mutex);
mutex_lock(&priv->buffer_mutex);
if (priv->data_pending) {
ret_size = min_t(ssize_t, size, priv->data_pending);
rc = copy_to_user(buf, priv->data_buffer, ret_size);
memset(priv->data_buffer, 0, orig_ret_size);
memset(priv->data_buffer, 0, priv->data_pending);
if (rc)
ret_size = -EFAULT;
mutex_unlock(&priv->buffer_mutex);
priv->data_pending = 0;
}
atomic_set(&priv->data_pending, 0);
mutex_unlock(&priv->buffer_mutex);
return ret_size;
}
@ -92,17 +86,19 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf,
size_t in_size = size;
ssize_t out_size;
if (in_size > TPM_BUFSIZE)
return -E2BIG;
mutex_lock(&priv->buffer_mutex);
/* Cannot perform a write until the read has cleared either via
* tpm_read or a user_read_timer timeout. This also prevents split
* buffered writes from blocking here.
*/
if (atomic_read(&priv->data_pending) != 0)
if (priv->data_pending != 0) {
mutex_unlock(&priv->buffer_mutex);
return -EBUSY;
if (in_size > TPM_BUFSIZE)
return -E2BIG;
mutex_lock(&priv->buffer_mutex);
}
if (copy_from_user
(priv->data_buffer, (void __user *) buf, in_size)) {
@ -133,7 +129,7 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf,
return out_size;
}
atomic_set(&priv->data_pending, out_size);
priv->data_pending = out_size;
mutex_unlock(&priv->buffer_mutex);
/* Set a timeout by which the reader must come claim the result */
@ -150,5 +146,5 @@ void tpm_common_release(struct file *file, struct file_priv *priv)
del_singleshot_timer_sync(&priv->user_read_timer);
flush_work(&priv->work);
file->private_data = NULL;
atomic_set(&priv->data_pending, 0);
priv->data_pending = 0;
}

@ -8,7 +8,7 @@ struct file_priv {
struct tpm_chip *chip;
/* Data passed to and from the tpm via the read/write calls */
atomic_t data_pending;
size_t data_pending;
struct mutex buffer_mutex;
struct timer_list user_read_timer; /* user needs to claim result */

@ -102,8 +102,9 @@ static int tpm2_load_context(struct tpm_chip *chip, u8 *buf,
* TPM_RC_REFERENCE_H0 means the session has been
* flushed outside the space
*/
rc = -ENOENT;
*handle = 0;
tpm_buf_destroy(&tbuf);
return -ENOENT;
} else if (rc > 0) {
dev_warn(&chip->dev, "%s: failed with a TPM error 0x%04X\n",
__func__, rc);

@ -132,19 +132,8 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_pll *pll = to_clk_pll(hw);
unsigned int pllr;
u16 mul;
u8 div;
regmap_read(pll->regmap, PLL_REG(pll->id), &pllr);
div = PLL_DIV(pllr);
mul = PLL_MUL(pllr, pll->layout);
if (!div || !mul)
return 0;
return (parent_rate / div) * (mul + 1);
return (parent_rate / pll->div) * (pll->mul + 1);
}
static long clk_pll_get_best_div_mul(struct clk_pll *pll, unsigned long rate,

@ -248,8 +248,9 @@ struct clk *cpg_mssr_clk_src_twocell_get(struct of_phandle_args *clkspec,
dev_err(dev, "Cannot get %s clock %u: %ld", type, clkidx,
PTR_ERR(clk));
else
dev_dbg(dev, "clock (%u, %u) is %pC at %pCr Hz\n",
clkspec->args[0], clkspec->args[1], clk, clk);
dev_dbg(dev, "clock (%u, %u) is %pC at %lu Hz\n",
clkspec->args[0], clkspec->args[1], clk,
clk_get_rate(clk));
return clk;
}
@ -314,7 +315,7 @@ static void __init cpg_mssr_register_core_clk(const struct cpg_core_clk *core,
if (IS_ERR_OR_NULL(clk))
goto fail;
dev_dbg(dev, "Core clock %pC at %pCr Hz\n", clk, clk);
dev_dbg(dev, "Core clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
priv->clks[id] = clk;
return;
@ -380,7 +381,7 @@ static void __init cpg_mssr_register_mod_clk(const struct mssr_mod_clk *mod,
if (IS_ERR(clk))
goto fail;
dev_dbg(dev, "Module clock %pC at %pCr Hz\n", clk, clk);
dev_dbg(dev, "Module clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
priv->clks[id] = clk;
return;

@ -285,6 +285,7 @@ struct pstate_funcs {
static struct pstate_funcs pstate_funcs __read_mostly;
static int hwp_active __read_mostly;
static int hwp_mode_bdw __read_mostly;
static bool per_cpu_limits __read_mostly;
static struct cpufreq_driver *intel_pstate_driver __read_mostly;
@ -1371,7 +1372,15 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
cpu->pstate.scaling = pstate_funcs.get_scaling();
cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling;
cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
if (hwp_active && !hwp_mode_bdw) {
unsigned int phy_max, current_max;
intel_pstate_get_hwp_max(cpu->cpu, &phy_max, &current_max);
cpu->pstate.turbo_freq = phy_max * cpu->pstate.scaling;
} else {
cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
}
if (pstate_funcs.get_aperf_mperf_shift)
cpu->aperf_mperf_shift = pstate_funcs.get_aperf_mperf_shift();
@ -2261,28 +2270,36 @@ static inline bool intel_pstate_has_acpi_ppc(void) { return false; }
static inline void intel_pstate_request_control_from_smm(void) {}
#endif /* CONFIG_ACPI */
#define INTEL_PSTATE_HWP_BROADWELL 0x01
#define ICPU_HWP(model, hwp_mode) \
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_HWP, hwp_mode }
static const struct x86_cpu_id hwp_support_ids[] __initconst = {
{ X86_VENDOR_INTEL, 6, X86_MODEL_ANY, X86_FEATURE_HWP },
ICPU_HWP(INTEL_FAM6_BROADWELL_X, INTEL_PSTATE_HWP_BROADWELL),
ICPU_HWP(INTEL_FAM6_BROADWELL_XEON_D, INTEL_PSTATE_HWP_BROADWELL),
ICPU_HWP(X86_MODEL_ANY, 0),
{}
};
static int __init intel_pstate_init(void)
{
const struct x86_cpu_id *id;
int rc;
if (no_load)
return -ENODEV;
if (x86_match_cpu(hwp_support_ids)) {
id = x86_match_cpu(hwp_support_ids);
if (id) {
copy_cpu_funcs(&core_funcs);
if (!no_hwp) {
hwp_active++;
hwp_mode_bdw = id->driver_data;
intel_pstate.attr = hwp_cpufreq_attrs;
goto hwp_cpu_matched;
}
} else {
const struct x86_cpu_id *id;
id = x86_match_cpu(intel_pstate_cpu_ids);
if (!id)
return -ENODEV;

@ -43,9 +43,31 @@ struct stop_psscr_table {
static struct stop_psscr_table stop_psscr_table[CPUIDLE_STATE_MAX] __read_mostly;
static u64 snooze_timeout __read_mostly;
static u64 default_snooze_timeout __read_mostly;
static bool snooze_timeout_en __read_mostly;
static u64 get_snooze_timeout(struct cpuidle_device *dev,
struct cpuidle_driver *drv,
int index)
{
int i;
if (unlikely(!snooze_timeout_en))
return default_snooze_timeout;
for (i = index + 1; i < drv->state_count; i++) {
struct cpuidle_state *s = &drv->states[i];
struct cpuidle_state_usage *su = &dev->states_usage[i];
if (s->disabled || su->disable)
continue;
return s->target_residency * tb_ticks_per_usec;
}
return default_snooze_timeout;
}
static int snooze_loop(struct cpuidle_device *dev,
struct cpuidle_driver *drv,
int index)
@ -56,7 +78,7 @@ static int snooze_loop(struct cpuidle_device *dev,
local_irq_enable();
snooze_exit_time = get_tb() + snooze_timeout;
snooze_exit_time = get_tb() + get_snooze_timeout(dev, drv, index);
ppc64_runlatch_off();
HMT_very_low();
while (!need_resched()) {
@ -463,11 +485,9 @@ static int powernv_idle_probe(void)
cpuidle_state_table = powernv_states;
/* Device tree can indicate more idle states */
max_idle_state = powernv_add_idle_states();
if (max_idle_state > 1) {
default_snooze_timeout = TICK_USEC * tb_ticks_per_usec;
if (max_idle_state > 1)
snooze_timeout_en = true;
snooze_timeout = powernv_states[1].target_residency *
tb_ticks_per_usec;
}
} else
return -ENODEV;

@ -1277,7 +1277,7 @@ static int sca3000_configure_ring(struct iio_dev *indio_dev)
{
struct iio_buffer *buffer;
buffer = iio_kfifo_allocate();
buffer = devm_iio_kfifo_allocate(&indio_dev->dev);
if (!buffer)
return -ENOMEM;
@ -1287,11 +1287,6 @@ static int sca3000_configure_ring(struct iio_dev *indio_dev)
return 0;
}
static void sca3000_unconfigure_ring(struct iio_dev *indio_dev)
{
iio_kfifo_free(indio_dev->buffer);
}
static inline
int __sca3000_hw_ring_state_set(struct iio_dev *indio_dev, bool state)
{
@ -1547,8 +1542,6 @@ static int sca3000_remove(struct spi_device *spi)
if (spi->irq)
free_irq(spi->irq, indio_dev);
sca3000_unconfigure_ring(indio_dev);
return 0;
}

@ -244,58 +244,9 @@ static int ad7791_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
static const char * const ad7791_sample_freq_avail[] = {
[AD7791_FILTER_RATE_120] = "120",
[AD7791_FILTER_RATE_100] = "100",
[AD7791_FILTER_RATE_33_3] = "33.3",
[AD7791_FILTER_RATE_20] = "20",
[AD7791_FILTER_RATE_16_6] = "16.6",
[AD7791_FILTER_RATE_16_7] = "16.7",
[AD7791_FILTER_RATE_13_3] = "13.3",
[AD7791_FILTER_RATE_9_5] = "9.5",
};
static ssize_t ad7791_read_frequency(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ad7791_state *st = iio_priv(indio_dev);
unsigned int rate = st->filter & AD7791_FILTER_RATE_MASK;
return sprintf(buf, "%s\n", ad7791_sample_freq_avail[rate]);
}
static ssize_t ad7791_write_frequency(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ad7791_state *st = iio_priv(indio_dev);
int i, ret;
i = sysfs_match_string(ad7791_sample_freq_avail, buf);
if (i < 0)
return i;
ret = iio_device_claim_direct_mode(indio_dev);
if (ret)
return ret;
st->filter &= ~AD7791_FILTER_RATE_MASK;
st->filter |= i;
ad_sd_write_reg(&st->sd, AD7791_REG_FILTER, sizeof(st->filter),
st->filter);
iio_device_release_direct_mode(indio_dev);
return len;
}
static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
ad7791_read_frequency,
ad7791_write_frequency);
static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("120 100 33.3 20 16.7 16.6 13.3 9.5");
static struct attribute *ad7791_attributes[] = {
&iio_dev_attr_sampling_frequency.dev_attr.attr,
&iio_const_attr_sampling_frequency_available.dev_attr.attr,
NULL
};

@ -119,16 +119,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
umem->length = size;
umem->address = addr;
umem->page_shift = PAGE_SHIFT;
/*
* We ask for writable memory if any of the following
* access flags are set. "Local write" and "remote write"
* obviously require write access. "Remote atomic" can do
* things like fetch and add, which will modify memory, and
* "MW bind" can change permissions by binding a window.
*/
umem->writable = !!(access &
(IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE |
IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND));
umem->writable = ib_access_writable(access);
if (access & IB_ACCESS_ON_DEMAND) {
ret = ib_umem_odp_get(context, umem, access);

@ -6829,7 +6829,7 @@ static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
}
rcvmask = HFI1_RCVCTRL_CTXT_ENB;
/* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */
rcvmask |= HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ?
rcvmask |= rcd->rcvhdrtail_kvaddr ?
HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
hfi1_rcvctrl(dd, rcvmask, rcd);
hfi1_rcd_put(rcd);
@ -8341,7 +8341,7 @@ static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
u32 tail;
int present;
if (!HFI1_CAP_IS_KSET(DMA_RTAIL))
if (!rcd->rcvhdrtail_kvaddr)
present = (rcd->seq_cnt ==
rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd))));
else /* is RDMA rtail */
@ -11813,7 +11813,7 @@ void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op,
/* reset the tail and hdr addresses, and sequence count */
write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
rcd->rcvhdrq_dma);
if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL))
if (rcd->rcvhdrtail_kvaddr)
write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
rcd->rcvhdrqtailaddr_dma);
rcd->seq_cnt = 1;
@ -11893,7 +11893,7 @@ void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op,
rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
if (op & HFI1_RCVCTRL_INTRAVAIL_DIS)
rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
if (op & HFI1_RCVCTRL_TAILUPD_ENB && rcd->rcvhdrqtailaddr_dma)
if ((op & HFI1_RCVCTRL_TAILUPD_ENB) && rcd->rcvhdrtail_kvaddr)
rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
/* See comment on RcvCtxtCtrl.TailUpd above */

@ -1179,7 +1179,8 @@ DEBUGFS_FILE_OPS(fault_stats);
static void fault_exit_opcode_debugfs(struct hfi1_ibdev *ibd)
{
debugfs_remove_recursive(ibd->fault_opcode->dir);
if (ibd->fault_opcode)
debugfs_remove_recursive(ibd->fault_opcode->dir);
kfree(ibd->fault_opcode);
ibd->fault_opcode = NULL;
}
@ -1207,6 +1208,7 @@ static int fault_init_opcode_debugfs(struct hfi1_ibdev *ibd)
&ibd->fault_opcode->attr);
if (IS_ERR(ibd->fault_opcode->dir)) {
kfree(ibd->fault_opcode);
ibd->fault_opcode = NULL;
return -ENOENT;
}
@ -1230,7 +1232,8 @@ fail:
static void fault_exit_packet_debugfs(struct hfi1_ibdev *ibd)
{
debugfs_remove_recursive(ibd->fault_packet->dir);
if (ibd->fault_packet)
debugfs_remove_recursive(ibd->fault_packet->dir);
kfree(ibd->fault_packet);
ibd->fault_packet = NULL;
}
@ -1256,6 +1259,7 @@ static int fault_init_packet_debugfs(struct hfi1_ibdev *ibd)
&ibd->fault_opcode->attr);
if (IS_ERR(ibd->fault_packet->dir)) {
kfree(ibd->fault_packet);
ibd->fault_packet = NULL;
return -ENOENT;
}

@ -622,7 +622,7 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
ret = -EINVAL;
goto done;
}
if (flags & VM_WRITE) {
if ((flags & VM_WRITE) || !uctxt->rcvhdrtail_kvaddr) {
ret = -EPERM;
goto done;
}
@ -807,8 +807,8 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
* checks to default and disable the send context.
*/
if (uctxt->sc) {
set_pio_integrity(uctxt->sc);
sc_disable(uctxt->sc);
set_pio_integrity(uctxt->sc);
}
hfi1_free_ctxt_rcv_groups(uctxt);

@ -1851,6 +1851,7 @@ struct cc_state *get_cc_state_protected(struct hfi1_pportdata *ppd)
#define HFI1_HAS_SDMA_TIMEOUT 0x8
#define HFI1_HAS_SEND_DMA 0x10 /* Supports Send DMA */
#define HFI1_FORCED_FREEZE 0x80 /* driver forced freeze mode */
#define HFI1_SHUTDOWN 0x100 /* device is shutting down */
/* IB dword length mask in PBC (lower 11 bits); same for all chips */
#define HFI1_PBC_LENGTH_MASK ((1 << 11) - 1)

@ -1029,6 +1029,10 @@ static void shutdown_device(struct hfi1_devdata *dd)
unsigned pidx;
int i;
if (dd->flags & HFI1_SHUTDOWN)
return;
dd->flags |= HFI1_SHUTDOWN;
for (pidx = 0; pidx < dd->num_pports; ++pidx) {
ppd = dd->pport + pidx;
@ -1353,6 +1357,7 @@ void hfi1_disable_after_error(struct hfi1_devdata *dd)
static void remove_one(struct pci_dev *);
static int init_one(struct pci_dev *, const struct pci_device_id *);
static void shutdown_one(struct pci_dev *);
#define DRIVER_LOAD_MSG "Intel " DRIVER_NAME " loaded: "
#define PFX DRIVER_NAME ": "
@ -1369,6 +1374,7 @@ static struct pci_driver hfi1_pci_driver = {
.name = DRIVER_NAME,
.probe = init_one,
.remove = remove_one,
.shutdown = shutdown_one,
.id_table = hfi1_pci_tbl,
.err_handler = &hfi1_pci_err_handler,
};
@ -1780,6 +1786,13 @@ static void remove_one(struct pci_dev *pdev)
postinit_cleanup(dd);
}
static void shutdown_one(struct pci_dev *pdev)
{
struct hfi1_devdata *dd = pci_get_drvdata(pdev);
shutdown_device(dd);
}
/**
* hfi1_create_rcvhdrq - create a receive header queue
* @dd: the hfi1_ib device
@ -1795,7 +1808,6 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
u64 reg;
if (!rcd->rcvhdrq) {
dma_addr_t dma_hdrqtail;
gfp_t gfp_flags;
/*
@ -1821,13 +1833,13 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
goto bail;
}
if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) {
if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ||
HFI1_CAP_UGET_MASK(rcd->flags, DMA_RTAIL)) {
rcd->rcvhdrtail_kvaddr = dma_zalloc_coherent(
&dd->pcidev->dev, PAGE_SIZE, &dma_hdrqtail,
gfp_flags);
&dd->pcidev->dev, PAGE_SIZE,
&rcd->rcvhdrqtailaddr_dma, gfp_flags);
if (!rcd->rcvhdrtail_kvaddr)
goto bail_free;
rcd->rcvhdrqtailaddr_dma = dma_hdrqtail;
}
rcd->rcvhdrq_size = amt;

@ -50,8 +50,6 @@
#include "qp.h"
#include "trace.h"
#define SC_CTXT_PACKET_EGRESS_TIMEOUT 350 /* in chip cycles */
#define SC(name) SEND_CTXT_##name
/*
* Send Context functions
@ -977,15 +975,40 @@ void sc_disable(struct send_context *sc)
}
/* return SendEgressCtxtStatus.PacketOccupancy */
#define packet_occupancy(r) \
(((r) & SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SMASK)\
>> SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SHIFT)
static u64 packet_occupancy(u64 reg)
{
return (reg &
SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SMASK)
>> SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SHIFT;
}
/* is egress halted on the context? */
#define egress_halted(r) \
((r) & SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_HALT_STATUS_SMASK)
static bool egress_halted(u64 reg)
{
return !!(reg & SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_HALT_STATUS_SMASK);
}
/* wait for packet egress, optionally pause for credit return */
/* is the send context halted? */
static bool is_sc_halted(struct hfi1_devdata *dd, u32 hw_context)
{
return !!(read_kctxt_csr(dd, hw_context, SC(STATUS)) &
SC(STATUS_CTXT_HALTED_SMASK));
}
/**
* sc_wait_for_packet_egress
* @sc: valid send context
* @pause: wait for credit return
*
* Wait for packet egress, optionally pause for credit return
*
* Egress halt and Context halt are not necessarily the same thing, so
* check for both.
*
* NOTE: The context halt bit may not be set immediately. Because of this,
* it is necessary to check the SW SFC_HALTED bit (set in the IRQ) and the HW
* context bit to determine if the context is halted.
*/
static void sc_wait_for_packet_egress(struct send_context *sc, int pause)
{
struct hfi1_devdata *dd = sc->dd;
@ -997,8 +1020,9 @@ static void sc_wait_for_packet_egress(struct send_context *sc, int pause)
reg_prev = reg;
reg = read_csr(dd, sc->hw_context * 8 +
SEND_EGRESS_CTXT_STATUS);
/* done if egress is stopped */
if (egress_halted(reg))
/* done if any halt bits, SW or HW are set */
if (sc->flags & SCF_HALTED ||
is_sc_halted(dd, sc->hw_context) || egress_halted(reg))
break;
reg = packet_occupancy(reg);
if (reg == 0)

@ -1934,7 +1934,6 @@ static void mlx4_ib_sqp_comp_worker(struct work_struct *work)
"buf:%lld\n", wc.wr_id);
break;
default:
BUG_ON(1);
break;
}
} else {

@ -131,6 +131,40 @@ out:
return err;
}
static struct ib_umem *mlx4_get_umem_mr(struct ib_ucontext *context, u64 start,
u64 length, u64 virt_addr,
int access_flags)
{
/*
* Force registering the memory as writable if the underlying pages
* are writable. This is so rereg can change the access permissions
* from readable to writable without having to run through ib_umem_get
* again
*/
if (!ib_access_writable(access_flags)) {
struct vm_area_struct *vma;
down_read(&current->mm->mmap_sem);
/*
* FIXME: Ideally this would iterate over all the vmas that
* cover the memory, but for now it requires a single vma to
* entirely cover the MR to support RO mappings.
*/
vma = find_vma(current->mm, start);
if (vma && vma->vm_end >= start + length &&
vma->vm_start <= start) {
if (vma->vm_flags & VM_WRITE)
access_flags |= IB_ACCESS_LOCAL_WRITE;
} else {
access_flags |= IB_ACCESS_LOCAL_WRITE;
}
up_read(&current->mm->mmap_sem);
}
return ib_umem_get(context, start, length, access_flags, 0);
}
struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
struct ib_udata *udata)
@ -145,10 +179,8 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (!mr)
return ERR_PTR(-ENOMEM);
/* Force registering the memory as writable. */
/* Used for memory re-registeration. HCA protects the access */
mr->umem = ib_umem_get(pd->uobject->context, start, length,
access_flags | IB_ACCESS_LOCAL_WRITE, 0);
mr->umem = mlx4_get_umem_mr(pd->uobject->context, start, length,
virt_addr, access_flags);
if (IS_ERR(mr->umem)) {
err = PTR_ERR(mr->umem);
goto err_free;
@ -215,6 +247,9 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
}
if (flags & IB_MR_REREG_ACCESS) {
if (ib_access_writable(mr_access_flags) && !mmr->umem->writable)
return -EPERM;
err = mlx4_mr_hw_change_access(dev->dev, *pmpt_entry,
convert_access(mr_access_flags));
@ -228,10 +263,9 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr);
ib_umem_release(mmr->umem);
mmr->umem = ib_umem_get(mr->uobject->context, start, length,
mr_access_flags |
IB_ACCESS_LOCAL_WRITE,
0);
mmr->umem =
mlx4_get_umem_mr(mr->uobject->context, start, length,
virt_addr, mr_access_flags);
if (IS_ERR(mmr->umem)) {
err = PTR_ERR(mmr->umem);
/* Prevent mlx4_ib_dereg_mr from free'ing invalid pointer */

@ -646,7 +646,7 @@ repoll:
}
static int poll_soft_wc(struct mlx5_ib_cq *cq, int num_entries,
struct ib_wc *wc)
struct ib_wc *wc, bool is_fatal_err)
{
struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
struct mlx5_ib_wc *soft_wc, *next;
@ -659,6 +659,10 @@ static int poll_soft_wc(struct mlx5_ib_cq *cq, int num_entries,
mlx5_ib_dbg(dev, "polled software generated completion on CQ 0x%x\n",
cq->mcq.cqn);
if (unlikely(is_fatal_err)) {
soft_wc->wc.status = IB_WC_WR_FLUSH_ERR;
soft_wc->wc.vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
}
wc[npolled++] = soft_wc->wc;
list_del(&soft_wc->list);
kfree(soft_wc);
@ -679,12 +683,17 @@ int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
spin_lock_irqsave(&cq->lock, flags);
if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
mlx5_ib_poll_sw_comp(cq, num_entries, wc, &npolled);
/* make sure no soft wqe's are waiting */
if (unlikely(!list_empty(&cq->wc_list)))
soft_polled = poll_soft_wc(cq, num_entries, wc, true);
mlx5_ib_poll_sw_comp(cq, num_entries - soft_polled,
wc + soft_polled, &npolled);
goto out;
}
if (unlikely(!list_empty(&cq->wc_list)))
soft_polled = poll_soft_wc(cq, num_entries, wc);
soft_polled = poll_soft_wc(cq, num_entries, wc, false);
for (npolled = 0; npolled < num_entries - soft_polled; npolled++) {
if (mlx5_poll_one(cq, &cur_qp, wc + soft_polled + npolled))

@ -1250,6 +1250,7 @@ static inline struct qib_ibport *to_iport(struct ib_device *ibdev, u8 port)
#define QIB_BADINTR 0x8000 /* severe interrupt problems */
#define QIB_DCA_ENABLED 0x10000 /* Direct Cache Access enabled */
#define QIB_HAS_QSFP 0x20000 /* device (card instance) has QSFP */
#define QIB_SHUTDOWN 0x40000 /* device is shutting down */
/*
* values for ppd->lflags (_ib_port_ related flags)
@ -1448,8 +1449,7 @@ u64 qib_sps_ints(void);
/*
* dma_addr wrappers - all 0's invalid for hw
*/
dma_addr_t qib_map_page(struct pci_dev *, struct page *, unsigned long,
size_t, int);
int qib_map_page(struct pci_dev *d, struct page *p, dma_addr_t *daddr);
const char *qib_get_unit_name(int unit);
const char *qib_get_card_name(struct rvt_dev_info *rdi);
struct pci_dev *qib_get_pci_dev(struct rvt_dev_info *rdi);

@ -364,6 +364,8 @@ static int qib_tid_update(struct qib_ctxtdata *rcd, struct file *fp,
goto done;
}
for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) {
dma_addr_t daddr;
for (; ntids--; tid++) {
if (tid == tidcnt)
tid = 0;
@ -380,12 +382,14 @@ static int qib_tid_update(struct qib_ctxtdata *rcd, struct file *fp,
ret = -ENOMEM;
break;
}
ret = qib_map_page(dd->pcidev, pagep[i], &daddr);
if (ret)
break;
tidlist[i] = tid + tidoff;
/* we "know" system pages and TID pages are same size */
dd->pageshadow[ctxttid + tid] = pagep[i];
dd->physshadow[ctxttid + tid] =
qib_map_page(dd->pcidev, pagep[i], 0, PAGE_SIZE,
PCI_DMA_FROMDEVICE);
dd->physshadow[ctxttid + tid] = daddr;
/*
* don't need atomic or it's overhead
*/

@ -850,6 +850,10 @@ static void qib_shutdown_device(struct qib_devdata *dd)
struct qib_pportdata *ppd;
unsigned pidx;
if (dd->flags & QIB_SHUTDOWN)
return;
dd->flags |= QIB_SHUTDOWN;
for (pidx = 0; pidx < dd->num_pports; ++pidx) {
ppd = dd->pport + pidx;
@ -1189,6 +1193,7 @@ void qib_disable_after_error(struct qib_devdata *dd)
static void qib_remove_one(struct pci_dev *);
static int qib_init_one(struct pci_dev *, const struct pci_device_id *);
static void qib_shutdown_one(struct pci_dev *);
#define DRIVER_LOAD_MSG "Intel " QIB_DRV_NAME " loaded: "
#define PFX QIB_DRV_NAME ": "
@ -1206,6 +1211,7 @@ static struct pci_driver qib_driver = {
.name = QIB_DRV_NAME,
.probe = qib_init_one,
.remove = qib_remove_one,
.shutdown = qib_shutdown_one,
.id_table = qib_pci_tbl,
.err_handler = &qib_pci_err_handler,
};
@ -1556,6 +1562,13 @@ static void qib_remove_one(struct pci_dev *pdev)
qib_postinit_cleanup(dd);
}
static void qib_shutdown_one(struct pci_dev *pdev)
{
struct qib_devdata *dd = pci_get_drvdata(pdev);
qib_shutdown_device(dd);
}
/**
* qib_create_rcvhdrq - create a receive header queue
* @dd: the qlogic_ib device

@ -99,23 +99,27 @@ bail:
*
* I'm sure we won't be so lucky with other iommu's, so FIXME.
*/
dma_addr_t qib_map_page(struct pci_dev *hwdev, struct page *page,
unsigned long offset, size_t size, int direction)
int qib_map_page(struct pci_dev *hwdev, struct page *page, dma_addr_t *daddr)
{
dma_addr_t phys;
phys = pci_map_page(hwdev, page, offset, size, direction);
phys = pci_map_page(hwdev, page, 0, PAGE_SIZE, PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(hwdev, phys))
return -ENOMEM;
if (phys == 0) {
pci_unmap_page(hwdev, phys, size, direction);
phys = pci_map_page(hwdev, page, offset, size, direction);
if (!phys) {
pci_unmap_page(hwdev, phys, PAGE_SIZE, PCI_DMA_FROMDEVICE);
phys = pci_map_page(hwdev, page, 0, PAGE_SIZE,
PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(hwdev, phys))
return -ENOMEM;
/*
* FIXME: If we get 0 again, we should keep this page,
* map another, then free the 0 page.
*/
}
return phys;
*daddr = phys;
return 0;
}
/**

@ -121,17 +121,20 @@ void rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited)
if (cq->notify == IB_CQ_NEXT_COMP ||
(cq->notify == IB_CQ_SOLICITED &&
(solicited || entry->status != IB_WC_SUCCESS))) {
struct kthread_worker *worker;
/*
* This will cause send_complete() to be called in
* another thread.
*/
spin_lock(&cq->rdi->n_cqs_lock);
if (likely(cq->rdi->worker)) {
rcu_read_lock();
worker = rcu_dereference(cq->rdi->worker);
if (likely(worker)) {
cq->notify = RVT_CQ_NONE;
cq->triggered++;
kthread_queue_work(cq->rdi->worker, &cq->comptask);
kthread_queue_work(worker, &cq->comptask);
}
spin_unlock(&cq->rdi->n_cqs_lock);
rcu_read_unlock();
}
spin_unlock_irqrestore(&cq->lock, flags);
@ -513,7 +516,7 @@ int rvt_driver_cq_init(struct rvt_dev_info *rdi)
int cpu;
struct kthread_worker *worker;
if (rdi->worker)
if (rcu_access_pointer(rdi->worker))
return 0;
spin_lock_init(&rdi->n_cqs_lock);
@ -525,7 +528,7 @@ int rvt_driver_cq_init(struct rvt_dev_info *rdi)
return PTR_ERR(worker);
set_user_nice(worker->task, MIN_NICE);
rdi->worker = worker;
RCU_INIT_POINTER(rdi->worker, worker);
return 0;
}
@ -537,15 +540,19 @@ void rvt_cq_exit(struct rvt_dev_info *rdi)
{
struct kthread_worker *worker;
/* block future queuing from send_complete() */
spin_lock_irq(&rdi->n_cqs_lock);
worker = rdi->worker;
if (!rcu_access_pointer(rdi->worker))
return;
spin_lock(&rdi->n_cqs_lock);
worker = rcu_dereference_protected(rdi->worker,
lockdep_is_held(&rdi->n_cqs_lock));
if (!worker) {
spin_unlock_irq(&rdi->n_cqs_lock);
spin_unlock(&rdi->n_cqs_lock);
return;
}
rdi->worker = NULL;
spin_unlock_irq(&rdi->n_cqs_lock);
RCU_INIT_POINTER(rdi->worker, NULL);
spin_unlock(&rdi->n_cqs_lock);
synchronize_rcu();
kthread_destroy_worker(worker);
}

@ -885,15 +885,9 @@ isert_login_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_des
}
static void
isert_create_send_desc(struct isert_conn *isert_conn,
struct isert_cmd *isert_cmd,
struct iser_tx_desc *tx_desc)
__isert_create_send_desc(struct isert_device *device,
struct iser_tx_desc *tx_desc)
{
struct isert_device *device = isert_conn->device;
struct ib_device *ib_dev = device->ib_device;
ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr,
ISER_HEADERS_LEN, DMA_TO_DEVICE);
memset(&tx_desc->iser_header, 0, sizeof(struct iser_ctrl));
tx_desc->iser_header.flags = ISCSI_CTRL;
@ -906,6 +900,20 @@ isert_create_send_desc(struct isert_conn *isert_conn,
}
}
static void
isert_create_send_desc(struct isert_conn *isert_conn,
struct isert_cmd *isert_cmd,
struct iser_tx_desc *tx_desc)
{
struct isert_device *device = isert_conn->device;
struct ib_device *ib_dev = device->ib_device;
ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr,
ISER_HEADERS_LEN, DMA_TO_DEVICE);
__isert_create_send_desc(device, tx_desc);
}
static int
isert_init_tx_hdrs(struct isert_conn *isert_conn,
struct iser_tx_desc *tx_desc)
@ -993,7 +1001,7 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
struct iser_tx_desc *tx_desc = &isert_conn->login_tx_desc;
int ret;
isert_create_send_desc(isert_conn, NULL, tx_desc);
__isert_create_send_desc(device, tx_desc);
memcpy(&tx_desc->iscsi_header, &login->rsp[0],
sizeof(struct iscsi_hdr));
@ -2108,7 +2116,7 @@ isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs)
sig_attrs->check_mask =
(se_cmd->prot_checks & TARGET_DIF_CHECK_GUARD ? 0xc0 : 0) |
(se_cmd->prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x30 : 0) |
(se_cmd->prot_checks & TARGET_DIF_CHECK_APPTAG ? 0x30 : 0) |
(se_cmd->prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x0f : 0);
return 0;
}

@ -126,7 +126,7 @@ static const struct xpad_device {
u8 mapping;
u8 xtype;
} xpad_device[] = {
{ 0x0079, 0x18d4, "GPD Win 2 Controller", 0, XTYPE_XBOX360 },
{ 0x0079, 0x18d4, "GPD Win 2 X-Box Controller", 0, XTYPE_XBOX360 },
{ 0x044f, 0x0f00, "Thrustmaster Wheel", 0, XTYPE_XBOX },
{ 0x044f, 0x0f03, "Thrustmaster Wheel", 0, XTYPE_XBOX },
{ 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX },

@ -27,6 +27,8 @@
#define ETP_DISABLE_POWER 0x0001
#define ETP_PRESSURE_OFFSET 25
#define ETP_CALIBRATE_MAX_LEN 3
/* IAP Firmware handling */
#define ETP_PRODUCT_ID_FORMAT_STRING "%d.0"
#define ETP_FW_NAME "elan_i2c_" ETP_PRODUCT_ID_FORMAT_STRING ".bin"

@ -610,7 +610,7 @@ static ssize_t calibrate_store(struct device *dev,
int tries = 20;
int retval;
int error;
u8 val[3];
u8 val[ETP_CALIBRATE_MAX_LEN];
retval = mutex_lock_interruptible(&data->sysfs_mutex);
if (retval)
@ -1261,6 +1261,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
{ "ELAN060C", 0 },
{ "ELAN0611", 0 },
{ "ELAN0612", 0 },
{ "ELAN0618", 0 },
{ "ELAN1000", 0 },
{ }
};

@ -56,7 +56,7 @@
static int elan_smbus_initialize(struct i2c_client *client)
{
u8 check[ETP_SMBUS_HELLOPACKET_LEN] = { 0x55, 0x55, 0x55, 0x55, 0x55 };
u8 values[ETP_SMBUS_HELLOPACKET_LEN] = { 0, 0, 0, 0, 0 };
u8 values[I2C_SMBUS_BLOCK_MAX] = {0};
int len, error;
/* Get hello packet */
@ -117,12 +117,16 @@ static int elan_smbus_calibrate(struct i2c_client *client)
static int elan_smbus_calibrate_result(struct i2c_client *client, u8 *val)
{
int error;
u8 buf[I2C_SMBUS_BLOCK_MAX] = {0};
BUILD_BUG_ON(ETP_CALIBRATE_MAX_LEN > sizeof(buf));
error = i2c_smbus_read_block_data(client,
ETP_SMBUS_CALIBRATE_QUERY, val);
ETP_SMBUS_CALIBRATE_QUERY, buf);
if (error < 0)
return error;
memcpy(val, buf, ETP_CALIBRATE_MAX_LEN);
return 0;
}
@ -472,6 +476,8 @@ static int elan_smbus_get_report(struct i2c_client *client, u8 *report)
{
int len;
BUILD_BUG_ON(I2C_SMBUS_BLOCK_MAX > ETP_SMBUS_REPORT_LEN);
len = i2c_smbus_read_block_data(client,
ETP_SMBUS_PACKET_QUERY,
&report[ETP_SMBUS_REPORT_OFFSET]);

@ -804,7 +804,7 @@ static int elantech_packet_check_v4(struct psmouse *psmouse)
else if (ic_version == 7 && etd->samples[1] == 0x2A)
sanity_check = ((packet[3] & 0x1c) == 0x10);
else
sanity_check = ((packet[0] & 0x0c) == 0x04 &&
sanity_check = ((packet[0] & 0x08) == 0x00 &&
(packet[3] & 0x1c) == 0x10);
if (!sanity_check)
@ -1177,6 +1177,12 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = {
{ }
};
static const char * const middle_button_pnp_ids[] = {
"LEN2131", /* ThinkPad P52 w/ NFC */
"LEN2132", /* ThinkPad P52 */
NULL
};
/*
* Set the appropriate event bits for the input subsystem
*/
@ -1196,7 +1202,8 @@ static int elantech_set_input_params(struct psmouse *psmouse)
__clear_bit(EV_REL, dev->evbit);
__set_bit(BTN_LEFT, dev->keybit);
if (dmi_check_system(elantech_dmi_has_middle_button))
if (dmi_check_system(elantech_dmi_has_middle_button) ||
psmouse_matches_pnp_id(psmouse, middle_button_pnp_ids))
__set_bit(BTN_MIDDLE, dev->keybit);
__set_bit(BTN_RIGHT, dev->keybit);

@ -2221,7 +2221,14 @@ static void its_irq_domain_activate(struct irq_domain *domain,
cpu_mask = cpumask_of_node(its_dev->its->numa_node);
/* Bind the LPI to the first possible CPU */
cpu = cpumask_first(cpu_mask);
cpu = cpumask_first_and(cpu_mask, cpu_online_mask);
if (cpu >= nr_cpu_ids) {
if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144)
return;
cpu = cpumask_first(cpu_online_mask);
}
its_dev->event_map.col_map[event] = cpu;
irq_data_update_effective_affinity(d, cpumask_of(cpu));

@ -1380,6 +1380,8 @@ static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
static void requeue_bios(struct pool *pool);
static void check_for_space(struct pool *pool)
{
int r;
@ -1392,8 +1394,10 @@ static void check_for_space(struct pool *pool)
if (r)
return;
if (nr_free)
if (nr_free) {
set_pool_mode(pool, PM_WRITE);
requeue_bios(pool);
}
}
/*
@ -1470,7 +1474,10 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
r = dm_pool_alloc_data_block(pool->pmd, result);
if (r) {
metadata_operation_failed(pool, "dm_pool_alloc_data_block", r);
if (r == -ENOSPC)
set_pool_mode(pool, PM_OUT_OF_DATA_SPACE);
else
metadata_operation_failed(pool, "dm_pool_alloc_data_block", r);
return r;
}

@ -788,7 +788,7 @@ static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv)
/* Chunk BIO work */
mutex_init(&dmz->chunk_lock);
INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_KERNEL);
INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_NOIO);
dmz->chunk_wq = alloc_workqueue("dmz_cwq_%s", WQ_MEM_RECLAIM | WQ_UNBOUND,
0, dev->name);
if (!dmz->chunk_wq) {

@ -2823,7 +2823,8 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
err = 0;
}
} else if (cmd_match(buf, "re-add")) {
if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1)) {
if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1) &&
rdev->saved_raid_disk >= 0) {
/* clear_bit is performed _after_ all the devices
* have their local Faulty bit cleared. If any writes
* happen in the meantime in the local node, they
@ -8594,6 +8595,7 @@ static int remove_and_add_spares(struct mddev *mddev,
if (mddev->pers->hot_remove_disk(
mddev, rdev) == 0) {
sysfs_unlink_rdev(mddev, rdev);
rdev->saved_raid_disk = rdev->raid_disk;
rdev->raid_disk = -1;
removed++;
}

@ -275,8 +275,20 @@ static void dvb_frontend_add_event(struct dvb_frontend *fe,
wake_up_interruptible (&events->wait_queue);
}
static int dvb_frontend_test_event(struct dvb_frontend_private *fepriv,
struct dvb_fe_events *events)
{
int ret;
up(&fepriv->sem);
ret = events->eventw != events->eventr;
down(&fepriv->sem);
return ret;
}
static int dvb_frontend_get_event(struct dvb_frontend *fe,
struct dvb_frontend_event *event, int flags)
struct dvb_frontend_event *event, int flags)
{
struct dvb_frontend_private *fepriv = fe->frontend_priv;
struct dvb_fe_events *events = &fepriv->events;
@ -294,13 +306,8 @@ static int dvb_frontend_get_event(struct dvb_frontend *fe,
if (flags & O_NONBLOCK)
return -EWOULDBLOCK;
up(&fepriv->sem);
ret = wait_event_interruptible (events->wait_queue,
events->eventw != events->eventr);
if (down_interruptible (&fepriv->sem))
return -ERESTARTSYS;
ret = wait_event_interruptible(events->wait_queue,
dvb_frontend_test_event(fepriv, events));
if (ret < 0)
return ret;

@ -849,9 +849,8 @@ static int vsp1_video_setup_pipeline(struct vsp1_pipeline *pipe)
return 0;
}
static void vsp1_video_cleanup_pipeline(struct vsp1_pipeline *pipe)
static void vsp1_video_release_buffers(struct vsp1_video *video)
{
struct vsp1_video *video = pipe->output->video;
struct vsp1_vb2_buffer *buffer;
unsigned long flags;
@ -861,12 +860,18 @@ static void vsp1_video_cleanup_pipeline(struct vsp1_pipeline *pipe)
vb2_buffer_done(&buffer->buf.vb2_buf, VB2_BUF_STATE_ERROR);
INIT_LIST_HEAD(&video->irqqueue);
spin_unlock_irqrestore(&video->irqlock, flags);
}
static void vsp1_video_cleanup_pipeline(struct vsp1_pipeline *pipe)
{
lockdep_assert_held(&pipe->lock);
/* Release our partition table allocation */
mutex_lock(&pipe->lock);
kfree(pipe->part_table);
pipe->part_table = NULL;
mutex_unlock(&pipe->lock);
vsp1_dl_list_put(pipe->dl);
pipe->dl = NULL;
}
static int vsp1_video_start_streaming(struct vb2_queue *vq, unsigned int count)
@ -881,8 +886,9 @@ static int vsp1_video_start_streaming(struct vb2_queue *vq, unsigned int count)
if (pipe->stream_count == pipe->num_inputs) {
ret = vsp1_video_setup_pipeline(pipe);
if (ret < 0) {
mutex_unlock(&pipe->lock);
vsp1_video_release_buffers(video);
vsp1_video_cleanup_pipeline(pipe);
mutex_unlock(&pipe->lock);
return ret;
}
@ -932,13 +938,12 @@ static void vsp1_video_stop_streaming(struct vb2_queue *vq)
if (ret == -ETIMEDOUT)
dev_err(video->vsp1->dev, "pipeline stop timeout\n");
vsp1_dl_list_put(pipe->dl);
pipe->dl = NULL;
vsp1_video_cleanup_pipeline(pipe);
}
mutex_unlock(&pipe->lock);
media_pipeline_stop(&video->video.entity);
vsp1_video_cleanup_pipeline(pipe);
vsp1_video_release_buffers(video);
vsp1_video_pipeline_put(pipe);
}

@ -918,6 +918,9 @@ struct usb_device_id cx231xx_id_table[] = {
.driver_info = CX231XX_BOARD_CNXT_RDE_250},
{USB_DEVICE(0x0572, 0x58A0),
.driver_info = CX231XX_BOARD_CNXT_RDU_250},
/* AverMedia DVD EZMaker 7 */
{USB_DEVICE(0x07ca, 0xc039),
.driver_info = CX231XX_BOARD_CNXT_VIDEO_GRABBER},
{USB_DEVICE(0x2040, 0xb110),
.driver_info = CX231XX_BOARD_HAUPPAUGE_USB2_FM_PAL},
{USB_DEVICE(0x2040, 0xb111),

@ -871,7 +871,7 @@ static int put_v4l2_ext_controls32(struct file *file,
get_user(kcontrols, &kp->controls))
return -EFAULT;
if (!count)
if (!count || count > (U32_MAX/sizeof(*ucontrols)))
return 0;
if (get_user(p, &up->controls))
return -EFAULT;

@ -124,6 +124,11 @@ static const struct intel_lpss_platform_info apl_i2c_info = {
.properties = apl_i2c_properties,
};
static const struct intel_lpss_platform_info cnl_i2c_info = {
.clk_rate = 216000000,
.properties = spt_i2c_properties,
};
static const struct pci_device_id intel_lpss_pci_ids[] = {
/* BXT A-Step */
{ PCI_VDEVICE(INTEL, 0x0aac), (kernel_ulong_t)&bxt_i2c_info },
@ -207,13 +212,13 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x9daa), (kernel_ulong_t)&spt_info },
{ PCI_VDEVICE(INTEL, 0x9dab), (kernel_ulong_t)&spt_info },
{ PCI_VDEVICE(INTEL, 0x9dfb), (kernel_ulong_t)&spt_info },
{ PCI_VDEVICE(INTEL, 0x9dc5), (kernel_ulong_t)&spt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x9dc6), (kernel_ulong_t)&spt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x9dc5), (kernel_ulong_t)&cnl_i2c_info },
{ PCI_VDEVICE(INTEL, 0x9dc6), (kernel_ulong_t)&cnl_i2c_info },
{ PCI_VDEVICE(INTEL, 0x9dc7), (kernel_ulong_t)&spt_uart_info },
{ PCI_VDEVICE(INTEL, 0x9de8), (kernel_ulong_t)&spt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x9de9), (kernel_ulong_t)&spt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x9dea), (kernel_ulong_t)&spt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x9deb), (kernel_ulong_t)&spt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x9de8), (kernel_ulong_t)&cnl_i2c_info },
{ PCI_VDEVICE(INTEL, 0x9de9), (kernel_ulong_t)&cnl_i2c_info },
{ PCI_VDEVICE(INTEL, 0x9dea), (kernel_ulong_t)&cnl_i2c_info },
{ PCI_VDEVICE(INTEL, 0x9deb), (kernel_ulong_t)&cnl_i2c_info },
/* SPT-H */
{ PCI_VDEVICE(INTEL, 0xa127), (kernel_ulong_t)&spt_uart_info },
{ PCI_VDEVICE(INTEL, 0xa128), (kernel_ulong_t)&spt_uart_info },
@ -240,10 +245,10 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0xa32b), (kernel_ulong_t)&spt_info },
{ PCI_VDEVICE(INTEL, 0xa37b), (kernel_ulong_t)&spt_info },
{ PCI_VDEVICE(INTEL, 0xa347), (kernel_ulong_t)&spt_uart_info },
{ PCI_VDEVICE(INTEL, 0xa368), (kernel_ulong_t)&spt_i2c_info },
{ PCI_VDEVICE(INTEL, 0xa369), (kernel_ulong_t)&spt_i2c_info },
{ PCI_VDEVICE(INTEL, 0xa36a), (kernel_ulong_t)&spt_i2c_info },
{ PCI_VDEVICE(INTEL, 0xa36b), (kernel_ulong_t)&spt_i2c_info },
{ PCI_VDEVICE(INTEL, 0xa368), (kernel_ulong_t)&cnl_i2c_info },
{ PCI_VDEVICE(INTEL, 0xa369), (kernel_ulong_t)&cnl_i2c_info },
{ PCI_VDEVICE(INTEL, 0xa36a), (kernel_ulong_t)&cnl_i2c_info },
{ PCI_VDEVICE(INTEL, 0xa36b), (kernel_ulong_t)&cnl_i2c_info },
{ }
};
MODULE_DEVICE_TABLE(pci, intel_lpss_pci_ids);

@ -275,11 +275,11 @@ static void intel_lpss_init_dev(const struct intel_lpss *lpss)
intel_lpss_deassert_reset(lpss);
intel_lpss_set_remap_addr(lpss);
if (!intel_lpss_has_idma(lpss))
return;
intel_lpss_set_remap_addr(lpss);
/* Make sure that SPI multiblock DMA transfers are re-enabled */
if (lpss->type == LPSS_DEV_SPI)
writel(value, lpss->priv + LPSS_PRIV_SSP_REG);

@ -331,12 +331,20 @@ static ssize_t prefault_mode_store(struct device *device,
struct cxl_afu *afu = to_cxl_afu(device);
enum prefault_modes mode = -1;
if (!strncmp(buf, "work_element_descriptor", 23))
mode = CXL_PREFAULT_WED;
if (!strncmp(buf, "all", 3))
mode = CXL_PREFAULT_ALL;
if (!strncmp(buf, "none", 4))
mode = CXL_PREFAULT_NONE;
else {
if (!radix_enabled()) {
/* only allowed when not in radix mode */
if (!strncmp(buf, "work_element_descriptor", 23))
mode = CXL_PREFAULT_WED;
if (!strncmp(buf, "all", 3))
mode = CXL_PREFAULT_ALL;
} else {
dev_err(device, "Cannot prefault with radix enabled\n");
}
}
if (mode == -1)
return -EINVAL;

@ -1880,7 +1880,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
if (time_after(jiffies, timeo) && !chip_ready(map, adr))
break;
if (chip_ready(map, adr)) {
if (chip_good(map, adr, datum)) {
xip_enable(map, chip, adr);
goto op_done;
}
@ -2535,7 +2535,7 @@ static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
struct ppb_lock {
struct flchip *chip;
loff_t offset;
unsigned long adr;
int locked;
};
@ -2553,8 +2553,9 @@ static int __maybe_unused do_ppb_xxlock(struct map_info *map,
unsigned long timeo;
int ret;
adr += chip->start;
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
ret = get_chip(map, chip, adr, FL_LOCKING);
if (ret) {
mutex_unlock(&chip->mutex);
return ret;
@ -2572,8 +2573,8 @@ static int __maybe_unused do_ppb_xxlock(struct map_info *map,
if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
chip->state = FL_LOCKING;
map_write(map, CMD(0xA0), chip->start + adr);
map_write(map, CMD(0x00), chip->start + adr);
map_write(map, CMD(0xA0), adr);
map_write(map, CMD(0x00), adr);
} else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
/*
* Unlocking of one specific sector is not supported, so we
@ -2611,7 +2612,7 @@ static int __maybe_unused do_ppb_xxlock(struct map_info *map,
map_write(map, CMD(0x00), chip->start);
chip->state = FL_READY;
put_chip(map, chip, adr + chip->start);
put_chip(map, chip, adr);
mutex_unlock(&chip->mutex);
return ret;
@ -2668,9 +2669,9 @@ static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
* sectors shall be unlocked, so lets keep their locking
* status at "unlocked" (locked=0) for the final re-locking.
*/
if ((adr < ofs) || (adr >= (ofs + len))) {
if ((offset < ofs) || (offset >= (ofs + len))) {
sect[sectors].chip = &cfi->chips[chipnum];
sect[sectors].offset = offset;
sect[sectors].adr = adr;
sect[sectors].locked = do_ppb_xxlock(
map, &cfi->chips[chipnum], adr, 0,
DO_XXLOCK_ONEBLOCK_GETLOCK);
@ -2684,6 +2685,8 @@ static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
i++;
if (adr >> cfi->chipshift) {
if (offset >= (ofs + len))
break;
adr = 0;
chipnum++;
@ -2714,7 +2717,7 @@ static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
*/
for (i = 0; i < sectors; i++) {
if (sect[i].locked)
do_ppb_xxlock(map, sect[i].chip, sect[i].offset, 0,
do_ppb_xxlock(map, sect[i].chip, sect[i].adr, 0,
DO_XXLOCK_ONEBLOCK_LOCK);
}

@ -1082,6 +1082,9 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
if (ubi->bgt_thread)
kthread_stop(ubi->bgt_thread);
#ifdef CONFIG_MTD_UBI_FASTMAP
cancel_work_sync(&ubi->fm_work);
#endif
ubi_debugfs_exit_dev(ubi);
uif_close(ubi);

@ -490,6 +490,82 @@ out_unlock:
return err;
}
#ifdef CONFIG_MTD_UBI_FASTMAP
/**
* check_mapping - check and fixup a mapping
* @ubi: UBI device description object
* @vol: volume description object
* @lnum: logical eraseblock number
* @pnum: physical eraseblock number
*
* Checks whether a given mapping is valid. Fastmap cannot track LEB unmap
* operations, if such an operation is interrupted the mapping still looks
* good, but upon first read an ECC is reported to the upper layer.
* Normaly during the full-scan at attach time this is fixed, for Fastmap
* we have to deal with it while reading.
* If the PEB behind a LEB shows this symthom we change the mapping to
* %UBI_LEB_UNMAPPED and schedule the PEB for erasure.
*
* Returns 0 on success, negative error code in case of failure.
*/
static int check_mapping(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
int *pnum)
{
int err;
struct ubi_vid_io_buf *vidb;
if (!ubi->fast_attach)
return 0;
vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
if (!vidb)
return -ENOMEM;
err = ubi_io_read_vid_hdr(ubi, *pnum, vidb, 0);
if (err > 0 && err != UBI_IO_BITFLIPS) {
int torture = 0;
switch (err) {
case UBI_IO_FF:
case UBI_IO_FF_BITFLIPS:
case UBI_IO_BAD_HDR:
case UBI_IO_BAD_HDR_EBADMSG:
break;
default:
ubi_assert(0);
}
if (err == UBI_IO_BAD_HDR_EBADMSG || err == UBI_IO_FF_BITFLIPS)
torture = 1;
down_read(&ubi->fm_eba_sem);
vol->eba_tbl->entries[lnum].pnum = UBI_LEB_UNMAPPED;
up_read(&ubi->fm_eba_sem);
ubi_wl_put_peb(ubi, vol->vol_id, lnum, *pnum, torture);
*pnum = UBI_LEB_UNMAPPED;
} else if (err < 0) {
ubi_err(ubi, "unable to read VID header back from PEB %i: %i",
*pnum, err);
goto out_free;
}
err = 0;
out_free:
ubi_free_vid_buf(vidb);
return err;
}
#else
static int check_mapping(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
int *pnum)
{
return 0;
}
#endif
/**
* ubi_eba_read_leb - read data.
* @ubi: UBI device description object
@ -522,7 +598,13 @@ int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
return err;
pnum = vol->eba_tbl->entries[lnum].pnum;
if (pnum < 0) {
if (pnum >= 0) {
err = check_mapping(ubi, vol, lnum, &pnum);
if (err < 0)
goto out_unlock;
}
if (pnum == UBI_LEB_UNMAPPED) {
/*
* The logical eraseblock is not mapped, fill the whole buffer
* with 0xFF bytes. The exception is static volumes for which
@ -930,6 +1012,12 @@ int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
return err;
pnum = vol->eba_tbl->entries[lnum].pnum;
if (pnum >= 0) {
err = check_mapping(ubi, vol, lnum, &pnum);
if (err < 0)
goto out;
}
if (pnum >= 0) {
dbg_eba("write %d bytes at offset %d of LEB %d:%d, PEB %d",
len, offset, vol_id, lnum, pnum);

@ -1505,6 +1505,7 @@ int ubi_thread(void *u)
}
dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
ubi->thread_enabled = 0;
return 0;
}
@ -1514,9 +1515,6 @@ int ubi_thread(void *u)
*/
static void shutdown_work(struct ubi_device *ubi)
{
#ifdef CONFIG_MTD_UBI_FASTMAP
flush_work(&ubi->fm_work);
#endif
while (!list_empty(&ubi->works)) {
struct ubi_work *wrk;

@ -565,14 +565,18 @@ int nvdimm_revalidate_disk(struct gendisk *disk)
{
struct device *dev = disk_to_dev(disk)->parent;
struct nd_region *nd_region = to_nd_region(dev->parent);
const char *pol = nd_region->ro ? "only" : "write";
int disk_ro = get_disk_ro(disk);
if (nd_region->ro == get_disk_ro(disk))
/*
* Upgrade to read-only if the region is read-only preserve as
* read-only if the disk is already read-only.
*/
if (disk_ro || nd_region->ro == disk_ro)
return 0;
dev_info(dev, "%s read-%s, marking %s read-%s\n",
dev_name(&nd_region->dev), pol, disk->disk_name, pol);
set_disk_ro(disk, nd_region->ro);
dev_info(dev, "%s read-only, marking %s read-only\n",
dev_name(&nd_region->dev), disk->disk_name);
set_disk_ro(disk, 1);
return 0;

@ -533,6 +533,9 @@ int of_platform_device_destroy(struct device *dev, void *data)
if (of_node_check_flag(dev->of_node, OF_POPULATED_BUS))
device_for_each_child(dev, NULL, of_platform_device_destroy);
of_node_clear_flag(dev->of_node, OF_POPULATED);
of_node_clear_flag(dev->of_node, OF_POPULATED_BUS);
if (dev->bus == &platform_bus_type)
platform_device_unregister(to_platform_device(dev));
#ifdef CONFIG_ARM_AMBA
@ -540,8 +543,6 @@ int of_platform_device_destroy(struct device *dev, void *data)
amba_device_unregister(to_amba_device(dev));
#endif
of_node_clear_flag(dev->of_node, OF_POPULATED);
of_node_clear_flag(dev->of_node, OF_POPULATED_BUS);
return 0;
}
EXPORT_SYMBOL_GPL(of_platform_device_destroy);

@ -129,6 +129,11 @@ static int update_usages_of_a_phandle_reference(struct device_node *overlay,
goto err_fail;
}
if (offset < 0 || offset + sizeof(__be32) > prop->length) {
err = -EINVAL;
goto err_fail;
}
*(__be32 *)(prop->value + offset) = cpu_to_be32(phandle);
}

@ -164,20 +164,20 @@ static void __init of_unittest_dynamic(void)
/* Add a new property - should pass*/
prop->name = "new-property";
prop->value = "new-property-data";
prop->length = strlen(prop->value);
prop->length = strlen(prop->value) + 1;
unittest(of_add_property(np, prop) == 0, "Adding a new property failed\n");
/* Try to add an existing property - should fail */
prop++;
prop->name = "new-property";
prop->value = "new-property-data-should-fail";
prop->length = strlen(prop->value);
prop->length = strlen(prop->value) + 1;
unittest(of_add_property(np, prop) != 0,
"Adding an existing property should have failed\n");
/* Try to modify an existing property - should pass */
prop->value = "modify-property-data-should-pass";
prop->length = strlen(prop->value);
prop->length = strlen(prop->value) + 1;
unittest(of_update_property(np, prop) == 0,
"Updating an existing property should have passed\n");
@ -185,7 +185,7 @@ static void __init of_unittest_dynamic(void)
prop++;
prop->name = "modify-property";
prop->value = "modify-missing-property-data-should-pass";
prop->length = strlen(prop->value);
prop->length = strlen(prop->value) + 1;
unittest(of_update_property(np, prop) == 0,
"Updating a missing property should have passed\n");

@ -1610,17 +1610,6 @@ static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus,
get_pcichild(hpdev, hv_pcidev_ref_childlist);
spin_lock_irqsave(&hbus->device_list_lock, flags);
/*
* When a device is being added to the bus, we set the PCI domain
* number to be the device serial number, which is non-zero and
* unique on the same VM. The serial numbers start with 1, and
* increase by 1 for each device. So device names including this
* can have shorter names than based on the bus instance UUID.
* Only the first device serial number is used for domain, so the
* domain number will not change after the first device is added.
*/
if (list_empty(&hbus->children))
hbus->sysdata.domain = desc->ser;
list_add_tail(&hpdev->list_entry, &hbus->children);
spin_unlock_irqrestore(&hbus->device_list_lock, flags);
return hpdev;

@ -134,7 +134,7 @@ struct controller *pcie_init(struct pcie_device *dev);
int pcie_init_notification(struct controller *ctrl);
int pciehp_enable_slot(struct slot *p_slot);
int pciehp_disable_slot(struct slot *p_slot);
void pcie_enable_notification(struct controller *ctrl);
void pcie_reenable_notification(struct controller *ctrl);
int pciehp_power_on_slot(struct slot *slot);
void pciehp_power_off_slot(struct slot *slot);
void pciehp_get_power_status(struct slot *slot, u8 *status);

@ -297,7 +297,7 @@ static int pciehp_resume(struct pcie_device *dev)
ctrl = get_service_data(dev);
/* reinitialize the chipset's event detection logic */
pcie_enable_notification(ctrl);
pcie_reenable_notification(ctrl);
slot = ctrl->slot;

@ -676,7 +676,7 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
return handled;
}
void pcie_enable_notification(struct controller *ctrl)
static void pcie_enable_notification(struct controller *ctrl)
{
u16 cmd, mask;
@ -714,6 +714,17 @@ void pcie_enable_notification(struct controller *ctrl)
pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, cmd);
}
void pcie_reenable_notification(struct controller *ctrl)
{
/*
* Clear both Presence and Data Link Layer Changed to make sure
* those events still fire after we have re-enabled them.
*/
pcie_capability_write_word(ctrl->pcie->port, PCI_EXP_SLTSTA,
PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC);
pcie_enable_notification(ctrl);
}
static void pcie_disable_notification(struct controller *ctrl)
{
u16 mask;

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save