This is the 4.14.78 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAlvK3eYACgkQONu9yGCS
 aT6VRxAAmEobgHTPIT6pg46RwrNch96qWRwHCRv9nT46KJfLyMwkXig/wY7YIdyg
 UnUptqRmZpoLhUB+sQm/G4jo0dWKl5PDDweP58MlVMw5AnhEeXiuH8NHdH7N1kzr
 M/8FWHbIj3wE6bhsj3oZniMpSeDhOU/PU6CTR04xw1pSUC3fsipxq39tuslrIUQd
 wcn9iQ4oGve3jV08teQnlDOUV8EvcRIV/t5eofkZRMPhq5QnLVX4h1sfPTt202bi
 ae5tHw0Q7JwEIDF8XzyghdFTEAQTyeIulUxFGbUnb72f/a17b46D4/vWfNslcur+
 YSgtatHtGXV2TG/OMneLctDaWqwt+3Tayhbsu1KSU4c06tNfSlfl3xT83deXe5Jg
 aaDizdxk8tZOD4aJ3LjhF/Okayog5nAy4xauIuYI2iXVs30zrjs/N20gPOBS14T7
 doX4khcD7Vs66+DAm9nxu9dOKsHJacNFw4moPgGJ3dFLy2kzAsS/6Pdq+N1+Ls3b
 jw2jHLsm3yD/wfGqENUuI0Yjnfy8pPj9s5uvt66r44AQba2diZw2TctPvVF34Gv3
 JBHzhWpMfV/x3r634ekk9XfYSoLNNc7QseDLYd7mGqaSwP7B3gAAuoHlfwCY/lmo
 OfKCs4qcVCE6QjjV/G7+VxLo5y/tNnAGFb57vmIVcvOMyVhVSOk=
 =GWX0
 -----END PGP SIGNATURE-----

Merge 4.14.78 into android-4.14-p

Changes in 4.14.78
	media: af9035: prevent buffer overflow on write
	batman-adv: Avoid probe ELP information leak
	batman-adv: Fix segfault when writing to throughput_override
	batman-adv: Fix segfault when writing to sysfs elp_interval
	batman-adv: Prevent duplicated gateway_node entry
	batman-adv: Prevent duplicated nc_node entry
	batman-adv: Prevent duplicated softif_vlan entry
	batman-adv: Prevent duplicated global TT entry
	batman-adv: Prevent duplicated tvlv handler
	batman-adv: fix backbone_gw refcount on queue_work() failure
	batman-adv: fix hardif_neigh refcount on queue_work() failure
	clocksource/drivers/ti-32k: Add CLOCK_SOURCE_SUSPEND_NONSTOP flag for non-am43 SoCs
	scsi: ibmvscsis: Fix a stringop-overflow warning
	scsi: ibmvscsis: Ensure partition name is properly NUL terminated
	intel_th: pci: Add Ice Lake PCH support
	Input: atakbd - fix Atari keymap
	Input: atakbd - fix Atari CapsLock behaviour
	net: emac: fix fixed-link setup for the RTL8363SB switch
	ravb: do not write 1 to reserved bits
	PCI: dwc: Fix scheduling while atomic issues
	drm: mali-dp: Call drm_crtc_vblank_reset on device init
	scsi: ipr: System hung while dlpar adding primary ipr adapter back
	scsi: sd: don't crash the host on invalid commands
	net/mlx4: Use cpumask_available for eq->affinity_mask
	clocksource/drivers/fttmr010: Fix set_next_event handler
	powerpc/tm: Fix userspace r13 corruption
	powerpc/tm: Avoid possible userspace r1 corruption on reclaim
	iommu/amd: Return devid as alias for ACPI HID devices
	powerpc/lib/feature-fixups: use raw_patch_instruction()
	Revert "vfs: fix freeze protection in mnt_want_write_file() for overlayfs"
	mremap: properly flush TLB before releasing the page
	ARC: build: Get rid of toolchain check
	ARC: build: Don't set CROSS_COMPILE in arch's Makefile
	HID: quirks: fix support for Apple Magic Keyboards
	drm/i915: Nuke the LVDS lid notifier
	staging: ccree: check DMA pool buf !NULL before free
	mm: disallow mappings that conflict for devm_memremap_pages()
	drm/i915/glk: Add Quirk for GLK NUC HDMI port issues.
	i2c: rcar: handle RXDMA HW behaviour on Gen3
	IB/hfi1: Fix destroy_qp hang after a link down
	Linux 4.14.78

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
tirimbino
Greg Kroah-Hartman 6 years ago
commit 0560ddf11a
  1. 2
      Makefile
  2. 24
      arch/arc/Makefile
  3. 1
      arch/powerpc/include/asm/code-patching.h
  4. 20
      arch/powerpc/kernel/tm.S
  5. 4
      arch/powerpc/lib/code-patching.c
  6. 8
      arch/powerpc/lib/feature-fixups.c
  7. 18
      drivers/clocksource/timer-fttmr010.c
  8. 3
      drivers/clocksource/timer-ti-32k.c
  9. 1
      drivers/gpu/drm/arm/malidp_drv.c
  10. 10
      drivers/gpu/drm/i915/i915_drv.c
  11. 9
      drivers/gpu/drm/i915/i915_drv.h
  12. 13
      drivers/gpu/drm/i915/intel_ddi.c
  13. 21
      drivers/gpu/drm/i915/intel_display.c
  14. 3
      drivers/gpu/drm/i915/intel_drv.h
  15. 136
      drivers/gpu/drm/i915/intel_lvds.c
  16. 3
      drivers/hid/hid-core.c
  17. 5
      drivers/hwtracing/intel_th/pci.c
  18. 54
      drivers/i2c/busses/i2c-rcar.c
  19. 7
      drivers/infiniband/hw/hfi1/chip.c
  20. 42
      drivers/infiniband/hw/hfi1/pio.c
  21. 2
      drivers/infiniband/hw/hfi1/pio.h
  22. 74
      drivers/input/keyboard/atakbd.c
  23. 6
      drivers/iommu/amd_iommu.c
  24. 6
      drivers/media/usb/dvb-usb-v2/af9035.c
  25. 15
      drivers/net/ethernet/ibm/emac/core.c
  26. 3
      drivers/net/ethernet/mellanox/mlx4/eq.c
  27. 5
      drivers/net/ethernet/renesas/ravb.h
  28. 11
      drivers/net/ethernet/renesas/ravb_main.c
  29. 2
      drivers/net/ethernet/renesas/ravb_ptp.c
  30. 8
      drivers/pci/dwc/pcie-designware.c
  31. 3
      drivers/pci/dwc/pcie-designware.h
  32. 5
      drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
  33. 106
      drivers/scsi/ipr.c
  34. 1
      drivers/scsi/ipr.h
  35. 3
      drivers/scsi/sd.c
  36. 3
      drivers/staging/ccree/ssi_buffer_mgr.c
  37. 7
      fs/namespace.c
  38. 2
      include/linux/huge_mm.h
  39. 18
      kernel/memremap.c
  40. 10
      mm/huge_memory.c
  41. 30
      mm/mremap.c
  42. 10
      net/batman-adv/bat_v_elp.c
  43. 10
      net/batman-adv/bridge_loop_avoidance.c
  44. 11
      net/batman-adv/gateway_client.c
  45. 27
      net/batman-adv/network-coding.c
  46. 25
      net/batman-adv/soft-interface.c
  47. 30
      net/batman-adv/sysfs.c
  48. 6
      net/batman-adv/translation-table.c
  49. 8
      net/batman-adv/tvlv.c

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 14
SUBLEVEL = 77
SUBLEVEL = 78
EXTRAVERSION =
NAME = Petit Gorille

@ -6,34 +6,12 @@
# published by the Free Software Foundation.
#
ifeq ($(CROSS_COMPILE),)
ifndef CONFIG_CPU_BIG_ENDIAN
CROSS_COMPILE := arc-linux-
else
CROSS_COMPILE := arceb-linux-
endif
endif
KBUILD_DEFCONFIG := nsim_700_defconfig
cflags-y += -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__
cflags-$(CONFIG_ISA_ARCOMPACT) += -mA7
cflags-$(CONFIG_ISA_ARCV2) += -mcpu=archs
is_700 = $(shell $(CC) -dM -E - < /dev/null | grep -q "ARC700" && echo 1 || echo 0)
ifdef CONFIG_ISA_ARCOMPACT
ifeq ($(is_700), 0)
$(error Toolchain not configured for ARCompact builds)
endif
endif
ifdef CONFIG_ISA_ARCV2
ifeq ($(is_700), 1)
$(error Toolchain not configured for ARCv2 builds)
endif
endif
ifdef CONFIG_ARC_CURR_IN_REG
# For a global register defintion, make sure it gets passed to every file
# We had a customer reported bug where some code built in kernel was NOT using
@ -87,7 +65,7 @@ ldflags-$(CONFIG_CPU_BIG_ENDIAN) += -EB
# --build-id w/o "-marclinux". Default arc-elf32-ld is OK
ldflags-$(upto_gcc44) += -marclinux
LIBGCC := $(shell $(CC) $(cflags-y) --print-libgcc-file-name)
LIBGCC = $(shell $(CC) $(cflags-y) --print-libgcc-file-name)
# Modules with short calls might break for calls into builtin-kernel
KBUILD_CFLAGS_MODULE += -mlong-calls -mno-millicode

@ -31,6 +31,7 @@ unsigned int create_cond_branch(const unsigned int *addr,
unsigned long target, int flags);
int patch_branch(unsigned int *addr, unsigned long target, int flags);
int patch_instruction(unsigned int *addr, unsigned int instr);
int raw_patch_instruction(unsigned int *addr, unsigned int instr);
int instr_is_relative_branch(unsigned int instr);
int instr_is_relative_link_branch(unsigned int instr);

@ -167,13 +167,27 @@ _GLOBAL(tm_reclaim)
std r1, PACATMSCRATCH(r13)
ld r1, PACAR1(r13)
/* Store the PPR in r11 and reset to decent value */
std r11, GPR11(r1) /* Temporary stash */
/*
* Move the saved user r1 to the kernel stack in case PACATMSCRATCH is
* clobbered by an exception once we turn on MSR_RI below.
*/
ld r11, PACATMSCRATCH(r13)
std r11, GPR1(r1)
/*
* Store r13 away so we can free up the scratch SPR for the SLB fault
* handler (needed once we start accessing the thread_struct).
*/
GET_SCRATCH0(r11)
std r11, GPR13(r1)
/* Reset MSR RI so we can take SLB faults again */
li r11, MSR_RI
mtmsrd r11, 1
/* Store the PPR in r11 and reset to decent value */
mfspr r11, SPRN_PPR
HMT_MEDIUM
@ -198,11 +212,11 @@ _GLOBAL(tm_reclaim)
SAVE_GPR(8, r7) /* user r8 */
SAVE_GPR(9, r7) /* user r9 */
SAVE_GPR(10, r7) /* user r10 */
ld r3, PACATMSCRATCH(r13) /* user r1 */
ld r3, GPR1(r1) /* user r1 */
ld r4, GPR7(r1) /* user r7 */
ld r5, GPR11(r1) /* user r11 */
ld r6, GPR12(r1) /* user r12 */
GET_SCRATCH0(8) /* user r13 */
ld r8, GPR13(r1) /* user r13 */
std r3, GPR1(r7)
std r4, GPR7(r7)
std r5, GPR11(r7)

@ -39,7 +39,7 @@ static int __patch_instruction(unsigned int *exec_addr, unsigned int instr,
return 0;
}
static int raw_patch_instruction(unsigned int *addr, unsigned int instr)
int raw_patch_instruction(unsigned int *addr, unsigned int instr)
{
return __patch_instruction(addr, instr, addr);
}
@ -156,7 +156,7 @@ static int do_patch_instruction(unsigned int *addr, unsigned int instr)
* when text_poke_area is not ready, but we still need
* to allow patching. We just do the plain old patching
*/
if (!this_cpu_read(*PTRRELOC(&text_poke_area)))
if (!this_cpu_read(text_poke_area))
return raw_patch_instruction(addr, instr);
local_irq_save(flags);

@ -63,7 +63,7 @@ static int patch_alt_instruction(unsigned int *src, unsigned int *dest,
}
}
patch_instruction(dest, instr);
raw_patch_instruction(dest, instr);
return 0;
}
@ -92,7 +92,7 @@ static int patch_feature_section(unsigned long value, struct fixup_entry *fcur)
}
for (; dest < end; dest++)
patch_instruction(dest, PPC_INST_NOP);
raw_patch_instruction(dest, PPC_INST_NOP);
return 0;
}
@ -292,7 +292,7 @@ void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
for (; start < end; start++) {
dest = (void *)start + *start;
patch_instruction(dest, PPC_INST_LWSYNC);
raw_patch_instruction(dest, PPC_INST_LWSYNC);
}
}
@ -310,7 +310,7 @@ static void do_final_fixups(void)
length = (__end_interrupts - _stext) / sizeof(int);
while (length--) {
patch_instruction(dest, *src);
raw_patch_instruction(dest, *src);
src++;
dest++;
}

@ -130,13 +130,17 @@ static int fttmr010_timer_set_next_event(unsigned long cycles,
cr &= ~fttmr010->t1_enable_val;
writel(cr, fttmr010->base + TIMER_CR);
/* Setup the match register forward/backward in time */
cr = readl(fttmr010->base + TIMER1_COUNT);
if (fttmr010->count_down)
cr -= cycles;
else
cr += cycles;
writel(cr, fttmr010->base + TIMER1_MATCH1);
if (fttmr010->count_down) {
/*
* ASPEED Timer Controller will load TIMER1_LOAD register
* into TIMER1_COUNT register when the timer is re-enabled.
*/
writel(cycles, fttmr010->base + TIMER1_LOAD);
} else {
/* Setup the match register forward in time */
cr = readl(fttmr010->base + TIMER1_COUNT);
writel(cr + cycles, fttmr010->base + TIMER1_MATCH1);
}
/* Start */
cr = readl(fttmr010->base + TIMER_CR);

@ -98,6 +98,9 @@ static int __init ti_32k_timer_init(struct device_node *np)
return -ENXIO;
}
if (!of_machine_is_compatible("ti,am43"))
ti_32k_timer.cs.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
ti_32k_timer.counter = ti_32k_timer.base;
/*

@ -617,6 +617,7 @@ static int malidp_bind(struct device *dev)
drm->irq_enabled = true;
ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
drm_crtc_vblank_reset(&malidp->crtc);
if (ret < 0) {
DRM_ERROR("failed to initialise vblank\n");
goto vblank_fail;

@ -878,7 +878,6 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
spin_lock_init(&dev_priv->mm.object_stat_lock);
mutex_init(&dev_priv->sb_lock);
mutex_init(&dev_priv->modeset_restore_lock);
mutex_init(&dev_priv->av_mutex);
mutex_init(&dev_priv->wm.wm_mutex);
mutex_init(&dev_priv->pps_mutex);
@ -1505,11 +1504,6 @@ static int i915_drm_suspend(struct drm_device *dev)
pci_power_t opregion_target_state;
int error;
/* ignore lid events during suspend */
mutex_lock(&dev_priv->modeset_restore_lock);
dev_priv->modeset_restore = MODESET_SUSPENDED;
mutex_unlock(&dev_priv->modeset_restore_lock);
disable_rpm_wakeref_asserts(dev_priv);
/* We do a lot of poking in a lot of registers, make sure they work
@ -1718,10 +1712,6 @@ static int i915_drm_resume(struct drm_device *dev)
intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
mutex_lock(&dev_priv->modeset_restore_lock);
dev_priv->modeset_restore = MODESET_DONE;
mutex_unlock(&dev_priv->modeset_restore_lock);
intel_opregion_notify_adapter(dev_priv, PCI_D0);
intel_autoenable_gt_powersave(dev_priv);

@ -1183,6 +1183,7 @@ enum intel_sbi_destination {
#define QUIRK_BACKLIGHT_PRESENT (1<<3)
#define QUIRK_PIN_SWIZZLED_PAGES (1<<5)
#define QUIRK_INCREASE_T12_DELAY (1<<6)
#define QUIRK_INCREASE_DDI_DISABLED_TIME (1<<7)
struct intel_fbdev;
struct intel_fbc_work;
@ -1614,12 +1615,6 @@ struct i915_gpu_error {
unsigned long test_irq_rings;
};
enum modeset_restore {
MODESET_ON_LID_OPEN,
MODESET_DONE,
MODESET_SUSPENDED,
};
#define DP_AUX_A 0x40
#define DP_AUX_B 0x10
#define DP_AUX_C 0x20
@ -2296,8 +2291,6 @@ struct drm_i915_private {
unsigned long quirks;
enum modeset_restore modeset_restore;
struct mutex modeset_restore_lock;
struct drm_atomic_state *modeset_restore_state;
struct drm_modeset_acquire_ctx reset_ctx;

@ -1526,15 +1526,24 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
}
void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder)
void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
i915_reg_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
uint32_t val = I915_READ(reg);
val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK | TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
val |= TRANS_DDI_PORT_NONE;
I915_WRITE(reg, val);
if (dev_priv->quirks & QUIRK_INCREASE_DDI_DISABLED_TIME &&
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
DRM_DEBUG_KMS("Quirk Increase DDI disabled time\n");
/* Quirk time at 100ms for reliable operation */
msleep(100);
}
}
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)

@ -5653,7 +5653,7 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
intel_ddi_set_vc_payload_alloc(intel_crtc->config, false);
if (!transcoder_is_dsi(cpu_transcoder))
intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
intel_ddi_disable_transcoder_func(old_crtc_state);
if (INTEL_GEN(dev_priv) >= 9)
skylake_scaler_disable(intel_crtc);
@ -14286,6 +14286,18 @@ static void quirk_increase_t12_delay(struct drm_device *dev)
DRM_INFO("Applying T12 delay quirk\n");
}
/*
* GeminiLake NUC HDMI outputs require additional off time
* this allows the onboard retimer to correctly sync to signal
*/
static void quirk_increase_ddi_disabled_time(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
dev_priv->quirks |= QUIRK_INCREASE_DDI_DISABLED_TIME;
DRM_INFO("Applying Increase DDI Disabled quirk\n");
}
struct intel_quirk {
int device;
int subsystem_vendor;
@ -14372,6 +14384,13 @@ static struct intel_quirk intel_quirks[] = {
/* Toshiba Satellite P50-C-18C */
{ 0x191B, 0x1179, 0xF840, quirk_increase_t12_delay },
/* GeminiLake NUC */
{ 0x3185, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
{ 0x3184, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
/* ASRock ITX*/
{ 0x3185, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
{ 0x3184, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
};
static void intel_init_quirks(struct drm_device *dev)

@ -1254,8 +1254,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port);
enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder);
bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state);
void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder);
void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state);
void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state);
void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state);
struct intel_encoder *

@ -44,8 +44,6 @@
/* Private structure for the integrated LVDS support */
struct intel_lvds_connector {
struct intel_connector base;
struct notifier_block lid_notifier;
};
struct intel_lvds_pps {
@ -440,26 +438,9 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
return true;
}
/**
* Detect the LVDS connection.
*
* Since LVDS doesn't have hotlug, we use the lid as a proxy. Open means
* connected and closed means disconnected. We also send hotplug events as
* needed, using lid status notification from the input layer.
*/
static enum drm_connector_status
intel_lvds_detect(struct drm_connector *connector, bool force)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
enum drm_connector_status status;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
status = intel_panel_detect(dev_priv);
if (status != connector_status_unknown)
return status;
return connector_status_connected;
}
@ -484,117 +465,6 @@ static int intel_lvds_get_modes(struct drm_connector *connector)
return 1;
}
static int intel_no_modeset_on_lid_dmi_callback(const struct dmi_system_id *id)
{
DRM_INFO("Skipping forced modeset for %s\n", id->ident);
return 1;
}
/* The GPU hangs up on these systems if modeset is performed on LID open */
static const struct dmi_system_id intel_no_modeset_on_lid[] = {
{
.callback = intel_no_modeset_on_lid_dmi_callback,
.ident = "Toshiba Tecra A11",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "TECRA A11"),
},
},
{ } /* terminating entry */
};
/*
* Lid events. Note the use of 'modeset':
* - we set it to MODESET_ON_LID_OPEN on lid close,
* and set it to MODESET_DONE on open
* - we use it as a "only once" bit (ie we ignore
* duplicate events where it was already properly set)
* - the suspend/resume paths will set it to
* MODESET_SUSPENDED and ignore the lid open event,
* because they restore the mode ("lid open").
*/
static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
void *unused)
{
struct intel_lvds_connector *lvds_connector =
container_of(nb, struct intel_lvds_connector, lid_notifier);
struct drm_connector *connector = &lvds_connector->base.base;
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
if (dev->switch_power_state != DRM_SWITCH_POWER_ON)
return NOTIFY_OK;
mutex_lock(&dev_priv->modeset_restore_lock);
if (dev_priv->modeset_restore == MODESET_SUSPENDED)
goto exit;
/*
* check and update the status of LVDS connector after receiving
* the LID nofication event.
*/
connector->status = connector->funcs->detect(connector, false);
/* Don't force modeset on machines where it causes a GPU lockup */
if (dmi_check_system(intel_no_modeset_on_lid))
goto exit;
if (!acpi_lid_open()) {
/* do modeset on next lid open event */
dev_priv->modeset_restore = MODESET_ON_LID_OPEN;
goto exit;
}
if (dev_priv->modeset_restore == MODESET_DONE)
goto exit;
/*
* Some old platform's BIOS love to wreak havoc while the lid is closed.
* We try to detect this here and undo any damage. The split for PCH
* platforms is rather conservative and a bit arbitrary expect that on
* those platforms VGA disabling requires actual legacy VGA I/O access,
* and as part of the cleanup in the hw state restore we also redisable
* the vga plane.
*/
if (!HAS_PCH_SPLIT(dev_priv))
intel_display_resume(dev);
dev_priv->modeset_restore = MODESET_DONE;
exit:
mutex_unlock(&dev_priv->modeset_restore_lock);
return NOTIFY_OK;
}
static int
intel_lvds_connector_register(struct drm_connector *connector)
{
struct intel_lvds_connector *lvds = to_lvds_connector(connector);
int ret;
ret = intel_connector_register(connector);
if (ret)
return ret;
lvds->lid_notifier.notifier_call = intel_lid_notify;
if (acpi_lid_notifier_register(&lvds->lid_notifier)) {
DRM_DEBUG_KMS("lid notifier registration failed\n");
lvds->lid_notifier.notifier_call = NULL;
}
return 0;
}
static void
intel_lvds_connector_unregister(struct drm_connector *connector)
{
struct intel_lvds_connector *lvds = to_lvds_connector(connector);
if (lvds->lid_notifier.notifier_call)
acpi_lid_notifier_unregister(&lvds->lid_notifier);
intel_connector_unregister(connector);
}
/**
* intel_lvds_destroy - unregister and free LVDS structures
* @connector: connector to free
@ -627,8 +497,8 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.atomic_get_property = intel_digital_connector_atomic_get_property,
.atomic_set_property = intel_digital_connector_atomic_set_property,
.late_register = intel_lvds_connector_register,
.early_unregister = intel_lvds_connector_unregister,
.late_register = intel_connector_register,
.early_unregister = intel_connector_unregister,
.destroy = intel_lvds_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = intel_digital_connector_duplicate_state,
@ -1091,8 +961,6 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
* 2) check for VBT data
* 3) check to see if LVDS is already on
* if none of the above, no panel
* 4) make sure lid is open
* if closed, act like it's not there for now
*/
/*

@ -1964,6 +1964,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI) },
{ HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI) },
{ HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
#endif

@ -168,6 +168,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x18e1),
.driver_data = (kernel_ulong_t)&intel_th_2x,
},
{
/* Ice Lake PCH */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x34a6),
.driver_data = (kernel_ulong_t)&intel_th_2x,
},
{ 0 },
};

@ -32,6 +32,7 @@
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/slab.h>
/* register offsets */
@ -111,8 +112,9 @@
#define ID_ARBLOST (1 << 3)
#define ID_NACK (1 << 4)
/* persistent flags */
#define ID_P_NO_RXDMA (1 << 30) /* HW forbids RXDMA sometimes */
#define ID_P_PM_BLOCKED (1 << 31)
#define ID_P_MASK ID_P_PM_BLOCKED
#define ID_P_MASK (ID_P_PM_BLOCKED | ID_P_NO_RXDMA)
enum rcar_i2c_type {
I2C_RCAR_GEN1,
@ -140,6 +142,8 @@ struct rcar_i2c_priv {
struct dma_chan *dma_rx;
struct scatterlist sg;
enum dma_data_direction dma_direction;
struct reset_control *rstc;
};
#define rcar_i2c_priv_to_dev(p) ((p)->adap.dev.parent)
@ -321,6 +325,11 @@ static void rcar_i2c_dma_unmap(struct rcar_i2c_priv *priv)
dma_unmap_single(chan->device->dev, sg_dma_address(&priv->sg),
sg_dma_len(&priv->sg), priv->dma_direction);
/* Gen3 can only do one RXDMA per transfer and we just completed it */
if (priv->devtype == I2C_RCAR_GEN3 &&
priv->dma_direction == DMA_FROM_DEVICE)
priv->flags |= ID_P_NO_RXDMA;
priv->dma_direction = DMA_NONE;
}
@ -358,8 +367,9 @@ static void rcar_i2c_dma(struct rcar_i2c_priv *priv)
unsigned char *buf;
int len;
/* Do not use DMA if it's not available or for messages < 8 bytes */
if (IS_ERR(chan) || msg->len < 8)
/* Do various checks to see if DMA is feasible at all */
if (IS_ERR(chan) || msg->len < 8 ||
(read && priv->flags & ID_P_NO_RXDMA))
return;
if (read) {
@ -688,6 +698,25 @@ static void rcar_i2c_release_dma(struct rcar_i2c_priv *priv)
}
}
/* I2C is a special case, we need to poll the status of a reset */
static int rcar_i2c_do_reset(struct rcar_i2c_priv *priv)
{
int i, ret;
ret = reset_control_reset(priv->rstc);
if (ret)
return ret;
for (i = 0; i < LOOP_TIMEOUT; i++) {
ret = reset_control_status(priv->rstc);
if (ret == 0)
return 0;
udelay(1);
}
return -ETIMEDOUT;
}
static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
struct i2c_msg *msgs,
int num)
@ -699,6 +728,16 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
pm_runtime_get_sync(dev);
/* Gen3 needs a reset before allowing RXDMA once */
if (priv->devtype == I2C_RCAR_GEN3) {
priv->flags |= ID_P_NO_RXDMA;
if (!IS_ERR(priv->rstc)) {
ret = rcar_i2c_do_reset(priv);
if (ret == 0)
priv->flags &= ~ID_P_NO_RXDMA;
}
}
rcar_i2c_init(priv);
ret = rcar_i2c_bus_barrier(priv);
@ -868,6 +907,15 @@ static int rcar_i2c_probe(struct platform_device *pdev)
if (ret < 0)
goto out_pm_put;
if (priv->devtype == I2C_RCAR_GEN3) {
priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
if (!IS_ERR(priv->rstc)) {
ret = reset_control_status(priv->rstc);
if (ret < 0)
priv->rstc = ERR_PTR(-ENOTSUPP);
}
}
/* Stay always active when multi-master to keep arbitration working */
if (of_property_read_bool(dev->of_node, "multi-master"))
priv->flags |= ID_P_PM_BLOCKED;

@ -6722,6 +6722,7 @@ void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
struct hfi1_devdata *dd = ppd->dd;
struct send_context *sc;
int i;
int sc_flags;
if (flags & FREEZE_SELF)
write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
@ -6732,11 +6733,13 @@ void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
/* notify all SDMA engines that they are going into a freeze */
sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
sc_flags = SCF_FROZEN | SCF_HALTED | (flags & FREEZE_LINK_DOWN ?
SCF_LINK_DOWN : 0);
/* do halt pre-handling on all enabled send contexts */
for (i = 0; i < dd->num_send_contexts; i++) {
sc = dd->send_contexts[i].sc;
if (sc && (sc->flags & SCF_ENABLED))
sc_stop(sc, SCF_FROZEN | SCF_HALTED);
sc_stop(sc, sc_flags);
}
/* Send context are frozen. Notify user space */
@ -10646,6 +10649,8 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state)
add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
handle_linkup_change(dd, 1);
pio_kernel_linkup(dd);
ppd->host_link_state = HLS_UP_INIT;
break;
case HLS_UP_ARMED:

@ -942,20 +942,18 @@ void sc_free(struct send_context *sc)
void sc_disable(struct send_context *sc)
{
u64 reg;
unsigned long flags;
struct pio_buf *pbuf;
if (!sc)
return;
/* do all steps, even if already disabled */
spin_lock_irqsave(&sc->alloc_lock, flags);
spin_lock_irq(&sc->alloc_lock);
reg = read_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL));
reg &= ~SC(CTRL_CTXT_ENABLE_SMASK);
sc->flags &= ~SCF_ENABLED;
sc_wait_for_packet_egress(sc, 1);
write_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL), reg);
spin_unlock_irqrestore(&sc->alloc_lock, flags);
/*
* Flush any waiters. Once the context is disabled,
@ -965,7 +963,7 @@ void sc_disable(struct send_context *sc)
* proceed with the flush.
*/
udelay(1);
spin_lock_irqsave(&sc->release_lock, flags);
spin_lock(&sc->release_lock);
if (sc->sr) { /* this context has a shadow ring */
while (sc->sr_tail != sc->sr_head) {
pbuf = &sc->sr[sc->sr_tail].pbuf;
@ -976,7 +974,8 @@ void sc_disable(struct send_context *sc)
sc->sr_tail = 0;
}
}
spin_unlock_irqrestore(&sc->release_lock, flags);
spin_unlock(&sc->release_lock);
spin_unlock_irq(&sc->alloc_lock);
}
/* return SendEgressCtxtStatus.PacketOccupancy */
@ -1199,11 +1198,39 @@ void pio_kernel_unfreeze(struct hfi1_devdata *dd)
sc = dd->send_contexts[i].sc;
if (!sc || !(sc->flags & SCF_FROZEN) || sc->type == SC_USER)
continue;
if (sc->flags & SCF_LINK_DOWN)
continue;
sc_enable(sc); /* will clear the sc frozen flag */
}
}
/**
* pio_kernel_linkup() - Re-enable send contexts after linkup event
* @dd: valid devive data
*
* When the link goes down, the freeze path is taken. However, a link down
* event is different from a freeze because if the send context is re-enabled
* whowever is sending data will start sending data again, which will hang
* any QP that is sending data.
*
* The freeze path now looks at the type of event that occurs and takes this
* path for link down event.
*/
void pio_kernel_linkup(struct hfi1_devdata *dd)
{
struct send_context *sc;
int i;
for (i = 0; i < dd->num_send_contexts; i++) {
sc = dd->send_contexts[i].sc;
if (!sc || !(sc->flags & SCF_LINK_DOWN) || sc->type == SC_USER)
continue;
sc_enable(sc); /* will clear the sc link down flag */
}
}
/*
* Wait for the SendPioInitCtxt.PioInitInProgress bit to clear.
* Returns:
@ -1403,11 +1430,10 @@ void sc_stop(struct send_context *sc, int flag)
{
unsigned long flags;
/* mark the context */
sc->flags |= flag;
/* stop buffer allocations */
spin_lock_irqsave(&sc->alloc_lock, flags);
/* mark the context */
sc->flags |= flag;
sc->flags &= ~SCF_ENABLED;
spin_unlock_irqrestore(&sc->alloc_lock, flags);
wake_up(&sc->halt_wait);

@ -145,6 +145,7 @@ struct send_context {
#define SCF_IN_FREE 0x02
#define SCF_HALTED 0x04
#define SCF_FROZEN 0x08
#define SCF_LINK_DOWN 0x10
struct send_context_info {
struct send_context *sc; /* allocated working context */
@ -312,6 +313,7 @@ void set_pio_integrity(struct send_context *sc);
void pio_reset_all(struct hfi1_devdata *dd);
void pio_freeze(struct hfi1_devdata *dd);
void pio_kernel_unfreeze(struct hfi1_devdata *dd);
void pio_kernel_linkup(struct hfi1_devdata *dd);
/* global PIO send control operations */
#define PSC_GLOBAL_ENABLE 0

@ -79,8 +79,7 @@ MODULE_LICENSE("GPL");
*/
static unsigned char atakbd_keycode[0x72] = { /* American layout */
[0] = KEY_GRAVE,
static unsigned char atakbd_keycode[0x73] = { /* American layout */
[1] = KEY_ESC,
[2] = KEY_1,
[3] = KEY_2,
@ -121,9 +120,9 @@ static unsigned char atakbd_keycode[0x72] = { /* American layout */
[38] = KEY_L,
[39] = KEY_SEMICOLON,
[40] = KEY_APOSTROPHE,
[41] = KEY_BACKSLASH, /* FIXME, '#' */
[41] = KEY_GRAVE,
[42] = KEY_LEFTSHIFT,
[43] = KEY_GRAVE, /* FIXME: '~' */
[43] = KEY_BACKSLASH,
[44] = KEY_Z,
[45] = KEY_X,
[46] = KEY_C,
@ -149,45 +148,34 @@ static unsigned char atakbd_keycode[0x72] = { /* American layout */
[66] = KEY_F8,
[67] = KEY_F9,
[68] = KEY_F10,
[69] = KEY_ESC,
[70] = KEY_DELETE,
[71] = KEY_KP7,
[72] = KEY_KP8,
[73] = KEY_KP9,
[71] = KEY_HOME,
[72] = KEY_UP,
[74] = KEY_KPMINUS,
[75] = KEY_KP4,
[76] = KEY_KP5,
[77] = KEY_KP6,
[75] = KEY_LEFT,
[77] = KEY_RIGHT,
[78] = KEY_KPPLUS,
[79] = KEY_KP1,
[80] = KEY_KP2,
[81] = KEY_KP3,
[82] = KEY_KP0,
[83] = KEY_KPDOT,
[90] = KEY_KPLEFTPAREN,
[91] = KEY_KPRIGHTPAREN,
[92] = KEY_KPASTERISK, /* FIXME */
[93] = KEY_KPASTERISK,
[94] = KEY_KPPLUS,
[95] = KEY_HELP,
[80] = KEY_DOWN,
[82] = KEY_INSERT,
[83] = KEY_DELETE,
[96] = KEY_102ND,
[97] = KEY_KPASTERISK, /* FIXME */
[98] = KEY_KPSLASH,
[97] = KEY_UNDO,
[98] = KEY_HELP,
[99] = KEY_KPLEFTPAREN,
[100] = KEY_KPRIGHTPAREN,
[101] = KEY_KPSLASH,
[102] = KEY_KPASTERISK,
[103] = KEY_UP,
[104] = KEY_KPASTERISK, /* FIXME */
[105] = KEY_LEFT,
[106] = KEY_RIGHT,
[107] = KEY_KPASTERISK, /* FIXME */
[108] = KEY_DOWN,
[109] = KEY_KPASTERISK, /* FIXME */
[110] = KEY_KPASTERISK, /* FIXME */
[111] = KEY_KPASTERISK, /* FIXME */
[112] = KEY_KPASTERISK, /* FIXME */
[113] = KEY_KPASTERISK /* FIXME */
[103] = KEY_KP7,
[104] = KEY_KP8,
[105] = KEY_KP9,
[106] = KEY_KP4,
[107] = KEY_KP5,
[108] = KEY_KP6,
[109] = KEY_KP1,
[110] = KEY_KP2,
[111] = KEY_KP3,
[112] = KEY_KP0,
[113] = KEY_KPDOT,
[114] = KEY_KPENTER,
};
static struct input_dev *atakbd_dev;
@ -195,21 +183,15 @@ static struct input_dev *atakbd_dev;
static void atakbd_interrupt(unsigned char scancode, char down)
{
if (scancode < 0x72) { /* scancodes < 0xf2 are keys */
if (scancode < 0x73) { /* scancodes < 0xf3 are keys */
// report raw events here?
scancode = atakbd_keycode[scancode];
if (scancode == KEY_CAPSLOCK) { /* CapsLock is a toggle switch key on Amiga */
input_report_key(atakbd_dev, scancode, 1);
input_report_key(atakbd_dev, scancode, 0);
input_sync(atakbd_dev);
} else {
input_report_key(atakbd_dev, scancode, down);
input_sync(atakbd_dev);
}
} else /* scancodes >= 0xf2 are mouse data, most likely */
input_report_key(atakbd_dev, scancode, down);
input_sync(atakbd_dev);
} else /* scancodes >= 0xf3 are mouse data, most likely */
printk(KERN_INFO "atakbd: unhandled scancode %x\n", scancode);
return;

@ -253,7 +253,13 @@ static u16 get_alias(struct device *dev)
/* The callers make sure that get_device_id() does not fail here */
devid = get_device_id(dev);
/* For ACPI HID devices, we simply return the devid as such */
if (!dev_is_pci(dev))
return devid;
ivrs_alias = amd_iommu_alias_table[devid];
pci_for_each_dma_alias(pdev, __last_alias, &pci_alias);
if (ivrs_alias == pci_alias)

@ -402,8 +402,10 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
if (msg[0].addr == state->af9033_i2c_addr[1])
reg |= 0x100000;
ret = af9035_wr_regs(d, reg, &msg[0].buf[3],
msg[0].len - 3);
ret = (msg[0].len >= 3) ? af9035_wr_regs(d, reg,
&msg[0].buf[3],
msg[0].len - 3)
: -EOPNOTSUPP;
} else {
/* I2C write */
u8 buf[MAX_XFER_SIZE];

@ -2671,12 +2671,17 @@ static int emac_init_phy(struct emac_instance *dev)
if (of_phy_is_fixed_link(np)) {
int res = emac_dt_mdio_probe(dev);
if (!res) {
res = of_phy_register_fixed_link(np);
if (res)
mdiobus_unregister(dev->mii_bus);
if (res)
return res;
res = of_phy_register_fixed_link(np);
dev->phy_dev = of_phy_find_device(np);
if (res || !dev->phy_dev) {
mdiobus_unregister(dev->mii_bus);
return res ? res : -EINVAL;
}
return res;
emac_adjust_link(dev->ndev);
put_device(&dev->phy_dev->mdio.dev);
}
return 0;
}

@ -240,7 +240,8 @@ static void mlx4_set_eq_affinity_hint(struct mlx4_priv *priv, int vec)
struct mlx4_dev *dev = &priv->dev;
struct mlx4_eq *eq = &priv->eq_table.eq[vec];
if (!eq->affinity_mask || cpumask_empty(eq->affinity_mask))
if (!cpumask_available(eq->affinity_mask) ||
cpumask_empty(eq->affinity_mask))
return;
hint_err = irq_set_affinity_hint(eq->irq, eq->affinity_mask);

@ -431,6 +431,7 @@ enum EIS_BIT {
EIS_CULF1 = 0x00000080,
EIS_TFFF = 0x00000100,
EIS_QFS = 0x00010000,
EIS_RESERVED = (GENMASK(31, 17) | GENMASK(15, 11)),
};
/* RIC0 */
@ -475,6 +476,7 @@ enum RIS0_BIT {
RIS0_FRF15 = 0x00008000,
RIS0_FRF16 = 0x00010000,
RIS0_FRF17 = 0x00020000,
RIS0_RESERVED = GENMASK(31, 18),
};
/* RIC1 */
@ -531,6 +533,7 @@ enum RIS2_BIT {
RIS2_QFF16 = 0x00010000,
RIS2_QFF17 = 0x00020000,
RIS2_RFFF = 0x80000000,
RIS2_RESERVED = GENMASK(30, 18),
};
/* TIC */
@ -547,6 +550,7 @@ enum TIS_BIT {
TIS_FTF1 = 0x00000002, /* Undocumented? */
TIS_TFUF = 0x00000100,
TIS_TFWF = 0x00000200,
TIS_RESERVED = (GENMASK(31, 20) | GENMASK(15, 12) | GENMASK(7, 4))
};
/* ISS */
@ -620,6 +624,7 @@ enum GIC_BIT {
enum GIS_BIT {
GIS_PTCF = 0x00000001, /* Undocumented? */
GIS_PTMF = 0x00000004,
GIS_RESERVED = GENMASK(15, 10),
};
/* GIE (R-Car Gen3 only) */

@ -721,10 +721,11 @@ static void ravb_error_interrupt(struct net_device *ndev)
u32 eis, ris2;
eis = ravb_read(ndev, EIS);
ravb_write(ndev, ~EIS_QFS, EIS);
ravb_write(ndev, ~(EIS_QFS | EIS_RESERVED), EIS);
if (eis & EIS_QFS) {
ris2 = ravb_read(ndev, RIS2);
ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF), RIS2);
ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF | RIS2_RESERVED),
RIS2);
/* Receive Descriptor Empty int */
if (ris2 & RIS2_QFF0)
@ -777,7 +778,7 @@ static bool ravb_timestamp_interrupt(struct net_device *ndev)
u32 tis = ravb_read(ndev, TIS);
if (tis & TIS_TFUF) {
ravb_write(ndev, ~TIS_TFUF, TIS);
ravb_write(ndev, ~(TIS_TFUF | TIS_RESERVED), TIS);
ravb_get_tx_tstamp(ndev);
return true;
}
@ -912,7 +913,7 @@ static int ravb_poll(struct napi_struct *napi, int budget)
/* Processing RX Descriptor Ring */
if (ris0 & mask) {
/* Clear RX interrupt */
ravb_write(ndev, ~mask, RIS0);
ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
if (ravb_rx(ndev, &quota, q))
goto out;
}
@ -920,7 +921,7 @@ static int ravb_poll(struct napi_struct *napi, int budget)
if (tis & mask) {
spin_lock_irqsave(&priv->lock, flags);
/* Clear TX interrupt */
ravb_write(ndev, ~mask, TIS);
ravb_write(ndev, ~(mask | TIS_RESERVED), TIS);
ravb_tx_free(ndev, q, true);
netif_wake_subqueue(ndev, q);
mmiowb();

@ -319,7 +319,7 @@ void ravb_ptp_interrupt(struct net_device *ndev)
}
}
ravb_write(ndev, ~gis, GIS);
ravb_write(ndev, ~(gis | GIS_RESERVED), GIS);
}
void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev)

@ -138,7 +138,7 @@ static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index,
if (val & PCIE_ATU_ENABLE)
return;
usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
mdelay(LINK_WAIT_IATU);
}
dev_err(pci->dev, "outbound iATU is not being enabled\n");
}
@ -181,7 +181,7 @@ void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
if (val & PCIE_ATU_ENABLE)
return;
usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
mdelay(LINK_WAIT_IATU);
}
dev_err(pci->dev, "outbound iATU is not being enabled\n");
}
@ -239,7 +239,7 @@ static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, int index,
if (val & PCIE_ATU_ENABLE)
return 0;
usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
mdelay(LINK_WAIT_IATU);
}
dev_err(pci->dev, "inbound iATU is not being enabled\n");
@ -285,7 +285,7 @@ int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar,
if (val & PCIE_ATU_ENABLE)
return 0;
usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
mdelay(LINK_WAIT_IATU);
}
dev_err(pci->dev, "inbound iATU is not being enabled\n");

@ -28,8 +28,7 @@
/* Parameters for the waiting for iATU enabled routine */
#define LINK_WAIT_MAX_IATU_RETRIES 5
#define LINK_WAIT_IATU_MIN 9000
#define LINK_WAIT_IATU_MAX 10000
#define LINK_WAIT_IATU 9
/* Synopsys-specific PCIe configuration registers */
#define PCIE_PORT_LINK_CONTROL 0x710

@ -3465,11 +3465,10 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
vscsi->dds.window[LOCAL].liobn,
vscsi->dds.window[REMOTE].liobn);
strcpy(vscsi->eye, "VSCSI ");
strncat(vscsi->eye, vdev->name, MAX_EYE);
snprintf(vscsi->eye, sizeof(vscsi->eye), "VSCSI %s", vdev->name);
vscsi->dds.unit_id = vdev->unit_address;
strncpy(vscsi->dds.partition_name, partition_name,
strscpy(vscsi->dds.partition_name, partition_name,
sizeof(vscsi->dds.partition_name));
vscsi->dds.partition_num = partition_number;

@ -3308,6 +3308,65 @@ static void ipr_release_dump(struct kref *kref)
LEAVE;
}
static void ipr_add_remove_thread(struct work_struct *work)
{
unsigned long lock_flags;
struct ipr_resource_entry *res;
struct scsi_device *sdev;
struct ipr_ioa_cfg *ioa_cfg =
container_of(work, struct ipr_ioa_cfg, scsi_add_work_q);
u8 bus, target, lun;
int did_work;
ENTER;
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
restart:
do {
did_work = 0;
if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
return;
}
list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
if (res->del_from_ml && res->sdev) {
did_work = 1;
sdev = res->sdev;
if (!scsi_device_get(sdev)) {
if (!res->add_to_ml)
list_move_tail(&res->queue, &ioa_cfg->free_res_q);
else
res->del_from_ml = 0;
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
scsi_remove_device(sdev);
scsi_device_put(sdev);
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
}
break;
}
}
} while (did_work);
list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
if (res->add_to_ml) {
bus = res->bus;
target = res->target;
lun = res->lun;
res->add_to_ml = 0;
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
scsi_add_device(ioa_cfg->host, bus, target, lun);
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
goto restart;
}
}
ioa_cfg->scan_done = 1;
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
LEAVE;
}
/**
* ipr_worker_thread - Worker thread
* @work: ioa config struct
@ -3322,13 +3381,9 @@ static void ipr_release_dump(struct kref *kref)
static void ipr_worker_thread(struct work_struct *work)
{
unsigned long lock_flags;
struct ipr_resource_entry *res;
struct scsi_device *sdev;
struct ipr_dump *dump;
struct ipr_ioa_cfg *ioa_cfg =
container_of(work, struct ipr_ioa_cfg, work_q);
u8 bus, target, lun;
int did_work;
ENTER;
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
@ -3366,49 +3421,9 @@ static void ipr_worker_thread(struct work_struct *work)
return;
}
restart:
do {
did_work = 0;
if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
return;
}
schedule_work(&ioa_cfg->scsi_add_work_q);
list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
if (res->del_from_ml && res->sdev) {
did_work = 1;
sdev = res->sdev;
if (!scsi_device_get(sdev)) {
if (!res->add_to_ml)
list_move_tail(&res->queue, &ioa_cfg->free_res_q);
else
res->del_from_ml = 0;
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
scsi_remove_device(sdev);
scsi_device_put(sdev);
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
}
break;
}
}
} while (did_work);
list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
if (res->add_to_ml) {
bus = res->bus;
target = res->target;
lun = res->lun;
res->add_to_ml = 0;
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
scsi_add_device(ioa_cfg->host, bus, target, lun);
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
goto restart;
}
}
ioa_cfg->scan_done = 1;
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
LEAVE;
}
@ -9937,6 +9952,7 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
INIT_LIST_HEAD(&ioa_cfg->free_res_q);
INIT_LIST_HEAD(&ioa_cfg->used_res_q);
INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
INIT_WORK(&ioa_cfg->scsi_add_work_q, ipr_add_remove_thread);
init_waitqueue_head(&ioa_cfg->reset_wait_q);
init_waitqueue_head(&ioa_cfg->msi_wait_q);
init_waitqueue_head(&ioa_cfg->eeh_wait_q);

@ -1568,6 +1568,7 @@ struct ipr_ioa_cfg {
u8 saved_mode_page_len;
struct work_struct work_q;
struct work_struct scsi_add_work_q;
struct workqueue_struct *reset_work_q;
wait_queue_head_t reset_wait_q;

@ -1285,7 +1285,8 @@ static int sd_init_command(struct scsi_cmnd *cmd)
case REQ_OP_ZONE_RESET:
return sd_zbc_setup_reset_cmnd(cmd);
default:
BUG();
WARN_ON_ONCE(1);
return BLKPREP_KILL;
}
}

@ -492,7 +492,8 @@ void ssi_buffer_mgr_unmap_blkcipher_request(
DMA_TO_DEVICE);
}
/* Release pool */
if (req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI) {
if (req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI &&
req_ctx->mlli_params.mlli_virt_addr) {
dma_pool_free(req_ctx->mlli_params.curr_pool,
req_ctx->mlli_params.mlli_virt_addr,
req_ctx->mlli_params.mlli_dma_addr);

@ -447,10 +447,10 @@ int mnt_want_write_file_path(struct file *file)
{
int ret;
sb_start_write(file_inode(file)->i_sb);
sb_start_write(file->f_path.mnt->mnt_sb);
ret = __mnt_want_write_file(file);
if (ret)
sb_end_write(file_inode(file)->i_sb);
sb_end_write(file->f_path.mnt->mnt_sb);
return ret;
}
@ -541,8 +541,7 @@ void __mnt_drop_write_file(struct file *file)
void mnt_drop_write_file_path(struct file *file)
{
__mnt_drop_write_file(file);
sb_end_write(file_inode(file)->i_sb);
mnt_drop_write(file->f_path.mnt);
}
void mnt_drop_write_file(struct file *file)

@ -42,7 +42,7 @@ extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned char *vec);
extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
unsigned long new_addr, unsigned long old_end,
pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush);
pmd_t *old_pmd, pmd_t *new_pmd);
extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, pgprot_t newprot,
int prot_numa);

@ -355,10 +355,27 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
struct dev_pagemap *pgmap;
struct page_map *page_map;
int error, nid, is_ram, i = 0;
struct dev_pagemap *conflict_pgmap;
align_start = res->start & ~(SECTION_SIZE - 1);
align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
- align_start;
align_end = align_start + align_size - 1;
conflict_pgmap = get_dev_pagemap(PHYS_PFN(align_start), NULL);
if (conflict_pgmap) {
dev_WARN(dev, "Conflicting mapping in same section\n");
put_dev_pagemap(conflict_pgmap);
return ERR_PTR(-ENOMEM);
}
conflict_pgmap = get_dev_pagemap(PHYS_PFN(align_end), NULL);
if (conflict_pgmap) {
dev_WARN(dev, "Conflicting mapping in same section\n");
put_dev_pagemap(conflict_pgmap);
return ERR_PTR(-ENOMEM);
}
is_ram = region_intersects(align_start, align_size,
IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
@ -396,7 +413,6 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
mutex_lock(&pgmap_lock);
error = 0;
align_end = align_start + align_size - 1;
foreach_order_pgoff(res, order, pgoff) {
struct dev_pagemap *dup;

@ -1765,7 +1765,7 @@ static pmd_t move_soft_dirty_pmd(pmd_t pmd)
bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
unsigned long new_addr, unsigned long old_end,
pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush)
pmd_t *old_pmd, pmd_t *new_pmd)
{
spinlock_t *old_ptl, *new_ptl;
pmd_t pmd;
@ -1796,7 +1796,7 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
if (new_ptl != old_ptl)
spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd);
if (pmd_present(pmd) && pmd_dirty(pmd))
if (pmd_present(pmd))
force_flush = true;
VM_BUG_ON(!pmd_none(*new_pmd));
@ -1807,12 +1807,10 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
}
pmd = move_soft_dirty_pmd(pmd);
set_pmd_at(mm, new_addr, new_pmd, pmd);
if (new_ptl != old_ptl)
spin_unlock(new_ptl);
if (force_flush)
flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
else
*need_flush = true;
if (new_ptl != old_ptl)
spin_unlock(new_ptl);
spin_unlock(old_ptl);
return true;
}

@ -115,7 +115,7 @@ static pte_t move_soft_dirty_pte(pte_t pte)
static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
unsigned long old_addr, unsigned long old_end,
struct vm_area_struct *new_vma, pmd_t *new_pmd,
unsigned long new_addr, bool need_rmap_locks, bool *need_flush)
unsigned long new_addr, bool need_rmap_locks)
{
struct mm_struct *mm = vma->vm_mm;
pte_t *old_pte, *new_pte, pte;
@ -163,15 +163,17 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
pte = ptep_get_and_clear(mm, old_addr, old_pte);
/*
* If we are remapping a dirty PTE, make sure
* If we are remapping a valid PTE, make sure
* to flush TLB before we drop the PTL for the
* old PTE or we may race with page_mkclean().
* PTE.
*
* This check has to be done after we removed the
* old PTE from page tables or another thread may
* dirty it after the check and before the removal.
* NOTE! Both old and new PTL matter: the old one
* for racing with page_mkclean(), the new one to
* make sure the physical page stays valid until
* the TLB entry for the old mapping has been
* flushed.
*/
if (pte_present(pte) && pte_dirty(pte))
if (pte_present(pte))
force_flush = true;
pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
pte = move_soft_dirty_pte(pte);
@ -179,13 +181,11 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
}
arch_leave_lazy_mmu_mode();
if (force_flush)
flush_tlb_range(vma, old_end - len, old_end);
if (new_ptl != old_ptl)
spin_unlock(new_ptl);
pte_unmap(new_pte - 1);
if (force_flush)
flush_tlb_range(vma, old_end - len, old_end);
else
*need_flush = true;
pte_unmap_unlock(old_pte - 1, old_ptl);
if (need_rmap_locks)
drop_rmap_locks(vma);
@ -200,7 +200,6 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
{
unsigned long extent, next, old_end;
pmd_t *old_pmd, *new_pmd;
bool need_flush = false;
unsigned long mmun_start; /* For mmu_notifiers */
unsigned long mmun_end; /* For mmu_notifiers */
@ -231,8 +230,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
if (need_rmap_locks)
take_rmap_locks(vma);
moved = move_huge_pmd(vma, old_addr, new_addr,
old_end, old_pmd, new_pmd,
&need_flush);
old_end, old_pmd, new_pmd);
if (need_rmap_locks)
drop_rmap_locks(vma);
if (moved)
@ -250,10 +248,8 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
if (extent > LATENCY_LIMIT)
extent = LATENCY_LIMIT;
move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma,
new_pmd, new_addr, need_rmap_locks, &need_flush);
new_pmd, new_addr, need_rmap_locks);
}
if (need_flush)
flush_tlb_range(vma, old_end-len, old_addr);
mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);

@ -227,7 +227,7 @@ batadv_v_elp_wifi_neigh_probe(struct batadv_hardif_neigh_node *neigh)
* the packet to be exactly of that size to make the link
* throughput estimation effective.
*/
skb_put(skb, probe_len - hard_iface->bat_v.elp_skb->len);
skb_put_zero(skb, probe_len - hard_iface->bat_v.elp_skb->len);
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Sending unicast (probe) ELP packet on interface %s to %pM\n",
@ -254,6 +254,7 @@ static void batadv_v_elp_periodic_work(struct work_struct *work)
struct batadv_priv *bat_priv;
struct sk_buff *skb;
u32 elp_interval;
bool ret;
bat_v = container_of(work, struct batadv_hard_iface_bat_v, elp_wq.work);
hard_iface = container_of(bat_v, struct batadv_hard_iface, bat_v);
@ -315,8 +316,11 @@ static void batadv_v_elp_periodic_work(struct work_struct *work)
* may sleep and that is not allowed in an rcu protected
* context. Therefore schedule a task for that.
*/
queue_work(batadv_event_workqueue,
&hardif_neigh->bat_v.metric_work);
ret = queue_work(batadv_event_workqueue,
&hardif_neigh->bat_v.metric_work);
if (!ret)
batadv_hardif_neigh_put(hardif_neigh);
}
rcu_read_unlock();

@ -1772,6 +1772,7 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
{
struct batadv_bla_backbone_gw *backbone_gw;
struct ethhdr *ethhdr;
bool ret;
ethhdr = eth_hdr(skb);
@ -1795,8 +1796,13 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
if (unlikely(!backbone_gw))
return true;
queue_work(batadv_event_workqueue, &backbone_gw->report_work);
/* backbone_gw is unreferenced in the report work function function */
ret = queue_work(batadv_event_workqueue, &backbone_gw->report_work);
/* backbone_gw is unreferenced in the report work function function
* if queue_work() call was successful
*/
if (!ret)
batadv_backbone_gw_put(backbone_gw);
return true;
}

@ -31,6 +31,7 @@
#include <linux/kernel.h>
#include <linux/kref.h>
#include <linux/list.h>
#include <linux/lockdep.h>
#include <linux/netdevice.h>
#include <linux/netlink.h>
#include <linux/rculist.h>
@ -325,6 +326,9 @@ out:
* @bat_priv: the bat priv with all the soft interface information
* @orig_node: originator announcing gateway capabilities
* @gateway: announced bandwidth information
*
* Has to be called with the appropriate locks being acquired
* (gw.list_lock).
*/
static void batadv_gw_node_add(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node,
@ -332,6 +336,8 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
{
struct batadv_gw_node *gw_node;
lockdep_assert_held(&bat_priv->gw.list_lock);
if (gateway->bandwidth_down == 0)
return;
@ -346,10 +352,8 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
gw_node->bandwidth_down = ntohl(gateway->bandwidth_down);
gw_node->bandwidth_up = ntohl(gateway->bandwidth_up);
spin_lock_bh(&bat_priv->gw.list_lock);
kref_get(&gw_node->refcount);
hlist_add_head_rcu(&gw_node->list, &bat_priv->gw.gateway_list);
spin_unlock_bh(&bat_priv->gw.list_lock);
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Found new gateway %pM -> gw bandwidth: %u.%u/%u.%u MBit\n",
@ -405,11 +409,14 @@ void batadv_gw_node_update(struct batadv_priv *bat_priv,
{
struct batadv_gw_node *gw_node, *curr_gw = NULL;
spin_lock_bh(&bat_priv->gw.list_lock);
gw_node = batadv_gw_node_get(bat_priv, orig_node);
if (!gw_node) {
batadv_gw_node_add(bat_priv, orig_node, gateway);
spin_unlock_bh(&bat_priv->gw.list_lock);
goto out;
}
spin_unlock_bh(&bat_priv->gw.list_lock);
if ((gw_node->bandwidth_down == ntohl(gateway->bandwidth_down)) &&
(gw_node->bandwidth_up == ntohl(gateway->bandwidth_up)))

@ -850,16 +850,27 @@ batadv_nc_get_nc_node(struct batadv_priv *bat_priv,
spinlock_t *lock; /* Used to lock list selected by "int in_coding" */
struct list_head *list;
/* Select ingoing or outgoing coding node */
if (in_coding) {
lock = &orig_neigh_node->in_coding_list_lock;
list = &orig_neigh_node->in_coding_list;
} else {
lock = &orig_neigh_node->out_coding_list_lock;
list = &orig_neigh_node->out_coding_list;
}
spin_lock_bh(lock);
/* Check if nc_node is already added */
nc_node = batadv_nc_find_nc_node(orig_node, orig_neigh_node, in_coding);
/* Node found */
if (nc_node)
return nc_node;
goto unlock;
nc_node = kzalloc(sizeof(*nc_node), GFP_ATOMIC);
if (!nc_node)
return NULL;
goto unlock;
/* Initialize nc_node */
INIT_LIST_HEAD(&nc_node->list);
@ -868,22 +879,14 @@ batadv_nc_get_nc_node(struct batadv_priv *bat_priv,
kref_get(&orig_neigh_node->refcount);
nc_node->orig_node = orig_neigh_node;
/* Select ingoing or outgoing coding node */
if (in_coding) {
lock = &orig_neigh_node->in_coding_list_lock;
list = &orig_neigh_node->in_coding_list;
} else {
lock = &orig_neigh_node->out_coding_list_lock;
list = &orig_neigh_node->out_coding_list;
}
batadv_dbg(BATADV_DBG_NC, bat_priv, "Adding nc_node %pM -> %pM\n",
nc_node->addr, nc_node->orig_node->orig);
/* Add nc_node to orig_node */
spin_lock_bh(lock);
kref_get(&nc_node->refcount);
list_add_tail_rcu(&nc_node->list, list);
unlock:
spin_unlock_bh(lock);
return nc_node;

@ -566,15 +566,20 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
struct batadv_softif_vlan *vlan;
int err;
spin_lock_bh(&bat_priv->softif_vlan_list_lock);
vlan = batadv_softif_vlan_get(bat_priv, vid);
if (vlan) {
batadv_softif_vlan_put(vlan);
spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
return -EEXIST;
}
vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
if (!vlan)
if (!vlan) {
spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
return -ENOMEM;
}
vlan->bat_priv = bat_priv;
vlan->vid = vid;
@ -582,17 +587,23 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
atomic_set(&vlan->ap_isolation, 0);
kref_get(&vlan->refcount);
hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list);
spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
/* batadv_sysfs_add_vlan cannot be in the spinlock section due to the
* sleeping behavior of the sysfs functions and the fs_reclaim lock
*/
err = batadv_sysfs_add_vlan(bat_priv->soft_iface, vlan);
if (err) {
kfree(vlan);
/* ref for the function */
batadv_softif_vlan_put(vlan);
/* ref for the list */
batadv_softif_vlan_put(vlan);
return err;
}
spin_lock_bh(&bat_priv->softif_vlan_list_lock);
kref_get(&vlan->refcount);
hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list);
spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
/* add a new TT local entry. This one will be marked with the NOPURGE
* flag
*/

@ -186,7 +186,8 @@ ssize_t batadv_store_##_name(struct kobject *kobj, \
\
return __batadv_store_uint_attr(buff, count, _min, _max, \
_post_func, attr, \
&bat_priv->_var, net_dev); \
&bat_priv->_var, net_dev, \
NULL); \
}
#define BATADV_ATTR_SIF_SHOW_UINT(_name, _var) \
@ -260,7 +261,9 @@ ssize_t batadv_store_##_name(struct kobject *kobj, \
\
length = __batadv_store_uint_attr(buff, count, _min, _max, \
_post_func, attr, \
&hard_iface->_var, net_dev); \
&hard_iface->_var, \
hard_iface->soft_iface, \
net_dev); \
\
batadv_hardif_put(hard_iface); \
return length; \
@ -354,10 +357,12 @@ __batadv_store_bool_attr(char *buff, size_t count,
static int batadv_store_uint_attr(const char *buff, size_t count,
struct net_device *net_dev,
struct net_device *slave_dev,
const char *attr_name,
unsigned int min, unsigned int max,
atomic_t *attr)
{
char ifname[IFNAMSIZ + 3] = "";
unsigned long uint_val;
int ret;
@ -383,8 +388,11 @@ static int batadv_store_uint_attr(const char *buff, size_t count,
if (atomic_read(attr) == uint_val)
return count;
batadv_info(net_dev, "%s: Changing from: %i to: %lu\n",
attr_name, atomic_read(attr), uint_val);
if (slave_dev)
snprintf(ifname, sizeof(ifname), "%s: ", slave_dev->name);
batadv_info(net_dev, "%s: %sChanging from: %i to: %lu\n",
attr_name, ifname, atomic_read(attr), uint_val);
atomic_set(attr, uint_val);
return count;
@ -395,12 +403,13 @@ static ssize_t __batadv_store_uint_attr(const char *buff, size_t count,
void (*post_func)(struct net_device *),
const struct attribute *attr,
atomic_t *attr_store,
struct net_device *net_dev)
struct net_device *net_dev,
struct net_device *slave_dev)
{
int ret;
ret = batadv_store_uint_attr(buff, count, net_dev, attr->name, min, max,
attr_store);
ret = batadv_store_uint_attr(buff, count, net_dev, slave_dev,
attr->name, min, max, attr_store);
if (post_func && ret)
post_func(net_dev);
@ -569,7 +578,7 @@ static ssize_t batadv_store_gw_sel_class(struct kobject *kobj,
return __batadv_store_uint_attr(buff, count, 1, BATADV_TQ_MAX_VALUE,
batadv_post_gw_reselect, attr,
&bat_priv->gw.sel_class,
bat_priv->soft_iface);
bat_priv->soft_iface, NULL);
}
static ssize_t batadv_show_gw_bwidth(struct kobject *kobj,
@ -1078,8 +1087,9 @@ static ssize_t batadv_store_throughput_override(struct kobject *kobj,
if (old_tp_override == tp_override)
goto out;
batadv_info(net_dev, "%s: Changing from: %u.%u MBit to: %u.%u MBit\n",
"throughput_override",
batadv_info(hard_iface->soft_iface,
"%s: %s: Changing from: %u.%u MBit to: %u.%u MBit\n",
"throughput_override", net_dev->name,
old_tp_override / 10, old_tp_override % 10,
tp_override / 10, tp_override % 10);

@ -1587,6 +1587,8 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
{
struct batadv_tt_orig_list_entry *orig_entry;
spin_lock_bh(&tt_global->list_lock);
orig_entry = batadv_tt_global_orig_entry_find(tt_global, orig_node);
if (orig_entry) {
/* refresh the ttvn: the current value could be a bogus one that
@ -1609,11 +1611,9 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
orig_entry->flags = flags;
kref_init(&orig_entry->refcount);
spin_lock_bh(&tt_global->list_lock);
kref_get(&orig_entry->refcount);
hlist_add_head_rcu(&orig_entry->list,
&tt_global->orig_list);
spin_unlock_bh(&tt_global->list_lock);
atomic_inc(&tt_global->orig_list_count);
sync_flags:
@ -1621,6 +1621,8 @@ sync_flags:
out:
if (orig_entry)
batadv_tt_orig_list_entry_put(orig_entry);
spin_unlock_bh(&tt_global->list_lock);
}
/**

@ -528,15 +528,20 @@ void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
{
struct batadv_tvlv_handler *tvlv_handler;
spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
tvlv_handler = batadv_tvlv_handler_get(bat_priv, type, version);
if (tvlv_handler) {
spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
batadv_tvlv_handler_put(tvlv_handler);
return;
}
tvlv_handler = kzalloc(sizeof(*tvlv_handler), GFP_ATOMIC);
if (!tvlv_handler)
if (!tvlv_handler) {
spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
return;
}
tvlv_handler->ogm_handler = optr;
tvlv_handler->unicast_handler = uptr;
@ -546,7 +551,6 @@ void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
kref_init(&tvlv_handler->refcount);
INIT_HLIST_NODE(&tvlv_handler->list);
spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
kref_get(&tvlv_handler->refcount);
hlist_add_head_rcu(&tvlv_handler->list, &bat_priv->tvlv.handler_list);
spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);

Loading…
Cancel
Save