Merge android-4.14-p.61 (b7e55e8) into msm-4.14

* remotes/origin/tmp-b7e55e8:
  Linux 4.14.61
  scsi: sg: fix minor memory leak in error path
  drm/vc4: Reset ->{x, y}_scaling[1] when dealing with uniplanar formats
  crypto: padlock-aes - Fix Nano workaround data corruption
  RDMA/uverbs: Expand primary and alt AV port checks
  iwlwifi: add more card IDs for 9000 series
  userfaultfd: remove uffd flags from vma->vm_flags if UFFD_EVENT_FORK fails
  audit: fix potential null dereference 'context->module.name'
  kvm: x86: vmx: fix vpid leak
  x86/entry/64: Remove %ebx handling from error_entry/exit
  x86/apic: Future-proof the TSC_DEADLINE quirk for SKX
  virtio_balloon: fix another race between migration and ballooning
  net: socket: fix potential spectre v1 gadget in socketcall
  can: ems_usb: Fix memory leak on ems_usb_disconnect()
  squashfs: more metadata hardenings
  squashfs: more metadata hardening
  net/mlx5e: E-Switch, Initialize eswitch only if eswitch manager
  rxrpc: Fix user call ID check in rxrpc_service_prealloc_one
  net: stmmac: Fix WoL for PCI-based setups
  netlink: Fix spectre v1 gadget in netlink_create()
  net: dsa: Do not suspend/resume closed slave_dev
  ipv4: frags: handle possible skb truesize change
  inet: frag: enforce memory limits earlier
  bonding: avoid lockdep confusion in bond_get_stats()
  Linux 4.14.60
  tcp: add one more quick ack after after ECN events
  tcp: refactor tcp_ecn_check_ce to remove sk type cast
  tcp: do not aggressively quick ack after ECN events
  tcp: add max_quickacks param to tcp_incr_quickack and tcp_enter_quickack_mode
  tcp: do not force quickack when receiving out-of-order packets
  netlink: Don't shift with UB on nlk->ngroups
  netlink: Do not subscribe to non-existent groups
  xen-netfront: wait xenbus state change when load module manually
  tcp_bbr: fix bw probing to raise in-flight data for very small BDPs
  NET: stmmac: align DMA stuff to largest cache line length
  net: mdio-mux: bcm-iproc: fix wrong getter and setter pair
  net: lan78xx: fix rx handling before first packet is send
  net: fix amd-xgbe flow-control issue
  net: ena: Fix use of uninitialized DMA address bits field
  ipv4: remove BUG_ON() from fib_compute_spec_dst
  net: dsa: qca8k: Allow overwriting CPU port setting
  net: dsa: qca8k: Add QCA8334 binding documentation
  net: dsa: qca8k: Enable RXMAC when bringing up a port
  net: dsa: qca8k: Force CPU port to its highest bandwidth
  RDMA/uverbs: Protect from attempts to create flows on unsupported QP
  usb: gadget: udc: renesas_usb3: should remove debugfs
  ovl: Sync upper dirty data when syncing overlayfs
  PCI: xgene: Remove leftover pci_scan_child_bus() call
  PCI: pciehp: Assume NoCompl+ for Thunderbolt ports
  ext4: fix check to prevent initializing reserved inodes
  ext4: check for allocation block validity with block group locked
  ext4: fix inline data updates with checksums enabled
  squashfs: be more careful about metadata corruption
  random: mix rdrand with entropy sent in from userspace
  block: reset bi_iter.bi_done after splitting bio
  blkdev: __blkdev_direct_IO_simple: fix leak in error case
  block: bio_iov_iter_get_pages: fix size of last iovec
  drm/dp/mst: Fix off-by-one typo when dump payload table
  drm/atomic-helper: Drop plane->fb references only for drm_atomic_helper_shutdown()
  drm: Add DP PSR2 sink enable bit
  ASoC: topology: Add missing clock gating parameter when parsing hw_configs
  ASoC: topology: Fix bclk and fsync inversion in set_link_hw_format()
  media: si470x: fix __be16 annotations
  media: atomisp: compat32: fix __user annotations
  scsi: cxlflash: Avoid clobbering context control register value
  scsi: cxlflash: Synchronize reset and remove ops
  scsi: megaraid_sas: Increase timeout by 1 sec for non-RAID fastpath IOs
  scsi: scsi_dh: replace too broad "TP9" string with the exact models
  regulator: Don't return or expect -errno from of_map_mode()
  media: omap3isp: fix unbalanced dma_iommu_mapping
  crypto: authenc - don't leak pointers to authenc keys
  crypto: authencesn - don't leak pointers to authenc keys
  usb: hub: Don't wait for connect state at resume for powered-off ports
  microblaze: Fix simpleImage format generation
  soc: imx: gpcv2: Do not pass static memory as platform data
  serial: core: Make sure compiler barfs for 16-byte earlycon names
  staging: lustre: ldlm: free resource when ldlm_lock_create() fails.
  staging: lustre: llite: correct removexattr detection
  staging: vchiq_core: Fix missing semaphore release in error case
  audit: allow not equal op for audit by executable
  rsi: fix nommu_map_sg overflow kernel panic
  rsi: Fix 'invalid vdd' warning in mmc
  ipconfig: Correctly initialise ic_nameservers
  drm/gma500: fix psb_intel_lvds_mode_valid()'s return type
  igb: Fix queue selection on MAC filters on i210
  arm64: defconfig: Enable Rockchip io-domain driver
  nvme: lightnvm: add granby support
  memory: tegra: Apply interrupts mask per SoC
  memory: tegra: Do not handle spurious interrupts
  delayacct: Use raw_spinlocks
  stop_machine: Use raw spinlocks
  backlight: pwm_bl: Don't use GPIOF_* with gpiod_get_direction
  dt-bindings: net: meson-dwmac: new compatible name for AXG SoC
  net: hns3: Fixes the out of bounds access in hclge_map_tqp
  spi: meson-spicc: Fix error handling in meson_spicc_probe()
  dt-bindings: pinctrl: meson: add support for the Meson8m2 SoC
  mmc: pwrseq: Use kmalloc_array instead of stack VLA
  mmc: dw_mmc: update actual clock for mmc debugfs
  ALSA: hda/ca0132: fix build failure when a local macro is defined
  drm/atomic: Handling the case when setting old crtc for plane
  media: siano: get rid of __le32/__le16 cast warnings
  f2fs: avoid fsync() failure caused by EAGAIN in writepage()
  bpf: fix references to free_bpf_prog_info() in comments
  thermal: exynos: fix setting rising_threshold for Exynos5433
  staging: lustre: o2iblnd: Fix FastReg map/unmap for MLX5
  staging: lustre: o2iblnd: fix race at kiblnd_connect_peer
  scsi: qedf: Set the UNLOADING flag when removing a vport
  scsi: hisi_sas: config ATA de-reset as an constrained command for v3 hw
  scsi: megaraid: silence a static checker bug
  scsi: 3w-xxxx: fix a missing-check bug
  scsi: 3w-9xxx: fix a missing-check bug
  bnxt_en: Check unsupported speeds in bnxt_update_link() on PF only.
  perf: fix invalid bit in diagnostic entry
  s390/cpum_sf: Add data entry sizes to sampling trailer entry
  brcmfmac: Add support for bcm43364 wireless chipset
  mtd: rawnand: fsl_ifc: fix FSL NAND driver to read all ONFI parameter pages
  media: saa7164: Fix driver name in debug output
  media: media-device: fix ioctl function types
  ACPI / LPSS: Only call pwm_add_table() for Bay Trail PWM if PMIC HRV is 2
  libata: Fix command retry decision
  media: rcar_jpu: Add missing clk_disable_unprepare() on error in jpu_open()
  net: phy: phylink: Release link GPIO
  dma-iommu: Fix compilation when !CONFIG_IOMMU_DMA
  tty: Fix data race in tty_insert_flip_string_fixed_flag
  i40e: free the skb after clearing the bitlock
  nvmem: properly handle returned value nvmem_reg_read
  ARM: dts: sh73a0: Add missing interrupt-affinity to PMU node
  ARM: dts: emev2: Add missing interrupt-affinity to PMU node
  ARM: dts: stih407-pinctrl: Fix complain about IRQ_TYPE_NONE usage
  EDAC, altera: Fix ARM64 build warning
  HID: i2c-hid: check if device is there before really probing
  powerpc/embedded6xx/hlwd-pic: Prevent interrupts from being handled by Starlet
  drm/amdgpu: Remove VRAM from shared bo domains.
  drm/radeon: fix mode_valid's return type
  arm64: dts: renesas: salvator-common: use audio-graph-card for Sound
  HID: hid-plantronics: Re-resend Update to map button for PTT products
  arm64: cmpwait: Clear event register before arming exclusive monitor
  media: atomisp: ov2680: don't declare unused vars
  ALSA: usb-audio: Apply rate limit to warning messages in URB complete callback
  net: ethernet: ti: cpsw-phy-sel: check bus_find_device() ret value
  media: smiapp: fix timeout checking in smiapp_read_nvm
  ixgbevf: fix MAC address changes through ixgbevf_set_mac()
  md: fix NULL dereference of mddev->pers in remove_and_add_spares()
  md/raid1: add error handling of read error from FailFast device
  regulator: pfuze100: add .is_enable() for pfuze100_swb_regulator_ops
  ALSA: emu10k1: Rate-limit error messages about page errors
  rtc: tps65910: fix possible race condition
  rtc: vr41xx: fix possible race condition
  rtc: tps6586x: fix possible race condition
  Bluetooth: btusb: add ID for LiteOn 04ca:301a
  drm/nouveau/fifo/gk104-: poll for runlist update completion
  scsi: zfcp: assert that the ERP lock is held when tracing a recovery trigger
  scsi: ufs: fix exception event handling
  scsi: ufs: ufshcd: fix possible unclocked register access
  fscrypt: use unbound workqueue for decryption
  net: hns3: Fix the missing client list node initialization
  spi: Add missing pm_runtime_put_noidle() after failed get
  drivers/perf: arm-ccn: don't log to dmesg in event_init
  ima: based on policy verify firmware signatures (pre-allocated buffer)
  mwifiex: correct histogram data with appropriate index
  net: dsa: qca8k: Add support for QCA8334 switch
  PCI: pciehp: Request control of native hotplug only if supported
  bpf: powerpc64: pad function address loads with NOPs
  pinctrl: at91-pio4: add missing of_node_put
  powerpc/8xx: fix invalid register expression in head_8xx.S
  spi: sh-msiof: Fix setting SIRMDR1.SYNCAC to match SITMDR1.SYNCAC
  powerpc: Add __printf verification to prom_printf
  powerpc/powermac: Mark variable x as unused
  powerpc/powermac: Add missing prototype for note_bootable_part()
  powerpc/chrp/time: Make some functions static, add missing header include
  powerpc/32: Add a missing include header
  ath: Add regulatory mapping for Bahamas
  ath: Add regulatory mapping for Bermuda
  ath: Add regulatory mapping for Serbia
  ath: Add regulatory mapping for Tanzania
  ath: Add regulatory mapping for Uganda
  ath: Add regulatory mapping for APL2_FCCA
  ath: Add regulatory mapping for APL13_WORLD
  ath: Add regulatory mapping for ETSI8_WORLD
  ath: Add regulatory mapping for FCC3_ETSIC
  nvme-pci: Fix AER reset handling
  nvme-rdma: stop admin queue before freeing it
  PCI: Prevent sysfs disable of device while driver is attached
  PM / wakeup: Make s2idle_lock a RAW_SPINLOCK
  x86/microcode: Make the late update update_lock a raw lock for RT
  btrfs: qgroup: Finish rescan when hit the last leaf of extent tree
  btrfs: add barriers to btrfs_sync_log before log_commit_wait wakeups
  Btrfs: don't BUG_ON() in btrfs_truncate_inode_items()
  Btrfs: don't return ino to ino cache if inode item removal fails
  media: videobuf2-core: don't call memop 'finish' when queueing
  media: tw686x: Fix incorrect vb2_mem_ops GFP flags
  net: hns3: Fixes the init of the VALID BD info in the descriptor
  wlcore: sdio: check for valid platform device data before suspend
  mwifiex: handle race during mwifiex_usb_disconnect
  mfd: cros_ec: Fail early if we cannot identify the EC
  ASoC: dpcm: fix BE dai not hw_free and shutdown
  Bluetooth: btusb: Add a new Realtek 8723DE ID 2ff8:b011
  Bluetooth: hci_qca: Fix "Sleep inside atomic section" warning
  iwlwifi: pcie: fix race in Rx buffer allocator
  btrfs: balance dirty metadata pages in btrfs_finish_ordered_io
  PCI: Fix devm_pci_alloc_host_bridge() memory leak
  selftests: intel_pstate: return Kselftest Skip code for skipped tests
  selftests: memfd: return Kselftest Skip code for skipped tests
  selftests/intel_pstate: Improve test, minor fixes
  perf/x86/intel/uncore: Correct fixed counter index check for NHM
  perf/x86/intel/uncore: Correct fixed counter index check in generic code
  usbip: dynamically allocate idev by nports found in sysfs
  usbip: usbip_detach: Fix memory, udev context and udev leak
  block, bfq: remove wrong lock in bfq_requests_merged
  f2fs: fix race in between GC and atomic open
  f2fs: fix to detect failure of dquot_initialize
  f2fs: Fix deadlock in shutdown ioctl
  f2fs: fix to wait page writeback during revoking atomic write
  f2fs: fix to don't trigger writeback during recovery
  f2fs: fix error path of move_data_page
  disable loading f2fs module on PAGE_SIZE > 4KB
  pnfs: Don't release the sequence slot until we've processed layoutget on open
  netfilter: nf_tables: check msg_type before nft_trans_set(trans)
  lightnvm: pblk: warn in case of corrupted write buffer
  RDMA/mad: Convert BUG_ONs to error flows
  powerpc/64s: Fix compiler store ordering to SLB shadow area
  hvc_opal: don't set tb_ticks_per_usec in udbg_init_opal_common()
  powerpc/eeh: Fix use-after-release of EEH driver
  powerpc/64s: Add barrier_nospec
  powerpc/lib: Adjust .balign inside string functions for PPC32
  infiniband: fix a possible use-after-free bug
  e1000e: Ignore TSYNCRXCTL when getting I219 clock attributes
  ceph: fix alignment of rasize
  bpf, arm32: fix inconsistent naming about emit_a32_lsr_{r64,i64}
  printk: drop in_nmi check from printk_safe_flush_on_panic()
  watchdog: da9063: Fix updating timeout value
  irqchip/ls-scfg-msi: Map MSIs in the iommu
  netfilter: ipset: List timing out entries with "timeout 1" instead of zero
  netfilter: ipset: forbid family for hash:mac sets
  perf tools: Fix pmu events parsing rule
  rtc: ensure rtc_set_alarm fails when alarms are not supported
  mm/slub.c: add __printf verification to slab_err()
  mm: vmalloc: avoid racy handling of debugobjects in vunmap
  mm: /proc/pid/pagemap: hide swap entries from unprivileged users
  kernel/hung_task.c: show all hung tasks before panic
  vfio/type1: Fix task tracking for QEMU vCPU hotplug
  vfio/mdev: Check globally for duplicate devices
  vfio: platform: Fix reset module leak in error path
  nfsd: fix potential use-after-free in nfsd4_decode_getdeviceinfo
  NFSv4.1: Fix the client behaviour on NFS4ERR_SEQ_FALSE_RETRY
  ALSA: fm801: add error handling for snd_ctl_add
  ALSA: emu10k1: add error handling for snd_ctl_add
  skip LAYOUTRETURN if layout is invalid
  hv_netvsc: fix network namespace issues with VF support
  xen/netfront: raise max number of slots in xennet_get_responses()
  kcov: ensure irq code sees a valid area
  mlxsw: spectrum_switchdev: Fix port_vlan refcounting
  arm64: fix vmemmap BUILD_BUG_ON() triggering on !vmemmap setups
  tracing: Quiet gcc warning about maybe unused link variable
  tracing/kprobes: Fix trace_probe flags on enable_trace_kprobe() failure
  kthread, tracing: Don't expose half-written comm when creating kthreads
  tracing: Fix possible double free in event_enable_trigger_func()
  tracing: Fix double free of event_trigger_data
  delayacct: fix crash in delayacct_blkio_end() after delayacct init failure
  kvm, mm: account shadow page tables to kmemcg
  Input: elan_i2c - add another ACPI ID for Lenovo Ideapad 330-15AST
  Input: i8042 - add Lenovo LaVie Z to the i8042 reset list
  Input: elan_i2c - add ACPI ID for lenovo ideapad 330
  spi: spi-s3c64xx: Fix system resume support
  drivers/infiniband/ulp/srpt/ib_srpt.c: fix build with gcc-4.4.4
  IB/srpt: Fix an out-of-bounds stack access in srpt_zerolength_write()
  drivers/infiniband/core/verbs.c: fix build with gcc-4.4.4
  RDMA/core: Avoid that ib_drain_qp() triggers an out-of-bounds stack access
  i2c: core: decrease reference count of device node in i2c_unregister_device
  fork: unconditionally clear stack on fork
  Linux 4.14.59
  turn off -Wattribute-alias
  can: m_can.c: fix setup of CCCR register: clear CCCR NISO bit before checking can.ctrlmode
  can: peak_canfd: fix firmware < v3.3.0: limit allocation to 32-bit DMA addr only
  can: xilinx_can: fix RX overflow interrupt not being enabled
  can: xilinx_can: fix incorrect clear of non-processed interrupts
  can: xilinx_can: keep only 1-2 frames in TX FIFO to fix TX accounting
  can: xilinx_can: fix device dropping off bus on RX overrun
  can: xilinx_can: fix recovery from error states not being propagated
  can: xilinx_can: fix power management handling
  can: xilinx_can: fix RX loop if RXNEMP is asserted without RXOK
  driver core: Partially revert "driver core: correct device's shutdown order"
  usb: gadget: f_fs: Only return delayed status when len is 0
  usb: dwc2: Fix DMA alignment to start at allocated boundary
  usb: core: handle hub C_PORT_OVER_CURRENT condition
  usb: cdc_acm: Add quirk for Castles VEGA3000
  staging: speakup: fix wraparound in uaccess length check
  tcp: add tcp_ooo_try_coalesce() helper
  tcp: call tcp_drop() from tcp_data_queue_ofo()
  tcp: detect malicious patterns in tcp_collapse_ofo_queue()
  tcp: avoid collapses in tcp_prune_queue() if possible
  tcp: free batches of packets in tcp_prune_ofo_queue()
  tcp: do not delay ACK in DCTCP upon CE status change
  tcp: do not cancel delay-AcK on DCTCP special ACK
  tcp: helpers to send special DCTCP ack
  tcp: fix dctcp delayed ACK schedule
  vxlan: fix default fdb entry netlink notify ordering during netdev create
  vxlan: make netlink notify in vxlan_fdb_destroy optional
  vxlan: add new fdb alloc and create helpers
  rtnetlink: add rtnl_link_state check in rtnl_configure_link
  sock: fix sg page frag coalescing in sk_alloc_sg
  net: phy: consider PHY_IGNORE_INTERRUPT in phy_start_aneg_priv
  multicast: do not restore deleted record source filter mode to new one
  net/ipv6: Fix linklocal to global address with VRF
  net/mlx5e: Fix quota counting in aRFS expire flow
  net/mlx5e: Don't allow aRFS for encapsulated packets
  net/mlx5: Adjust clock overflow work period
  net: skb_segment() should not return NULL
  net/mlx4_core: Save the qpn from the input modifier in RST2INIT wrapper
  ip: in cmsg IP(V6)_ORIGDSTADDR call pskb_may_pull
  ip: hash fragments consistently
  bonding: set default miimon value for non-arp modes if not set
  drm/nouveau: Set DRIVER_ATOMIC cap earlier to fix debugfs
  drm/nouveau/drm/nouveau: Fix runtime PM leak in nv50_disp_atomic_commit()
  KVM: PPC: Check if IOMMU page is contained in the pinned physical page
  xen/PVH: Set up GS segment for stack canary
  MIPS: Fix off-by-one in pci_resource_to_user()
  MIPS: ath79: fix register address in ath79_ddr_wb_flush()
  Revert "cifs: Fix slab-out-of-bounds in send_set_info() on SMB2 ACE setting"
  ANDROID: verity: really fix android-verity Kconfig
  tcp: add tcp_ooo_try_coalesce() helper
  tcp: call tcp_drop() from tcp_data_queue_ofo()
  tcp: detect malicious patterns in tcp_collapse_ofo_queue()
  tcp: avoid collapses in tcp_prune_queue() if possible
  tcp: free batches of packets in tcp_prune_ofo_queue()
  x86_64_cuttlefish_defconfig: Enable android-verity
  x86_64_cuttlefish_defconfig: enable verity cert
  ANDROID: android-verity: Fix broken parameter handling.
  ANDROID: android-verity: Make it work with newer kernels
  ANDROID: android-verity: Add API to verify signature with builtin keys.
  ANDROID: verity: fix android-verity Kconfig dependencies
  Linux 4.14.58
  xhci: Fix perceived dead host due to runtime suspend race with event handler
  powerpc/powernv: Fix save/restore of SPRG3 on entry/exit from stop (idle)
  cxl_getfile(): fix double-iput() on alloc_file() failures
  alpha: fix osf_wait4() breakage
  net: usb: asix: replace mii_nway_restart in resume path
  ipv6: make DAD fail with enhanced DAD when nonce length differs
  net: systemport: Fix CRC forwarding check for SYSTEMPORT Lite
  net/mlx4_en: Don't reuse RX page when XDP is set
  hv_netvsc: Fix napi reschedule while receive completion is busy
  tg3: Add higher cpu clock for 5762.
  qmi_wwan: add support for Quectel EG91
  ptp: fix missing break in switch
  net: phy: fix flag masking in __set_phy_supported
  net/ipv4: Set oif in fib_compute_spec_dst
  skbuff: Unconditionally copy pfmemalloc in __skb_clone()
  net: Don't copy pfmemalloc flag in __copy_skb_header()
  net: diag: Don't double-free TCP_NEW_SYN_RECV sockets in tcp_abort
  lib/rhashtable: consider param->min_size when setting initial table size
  ipv6: ila: select CONFIG_DST_CACHE
  ipv6: fix useless rol32 call on hash
  ipv4: Return EINVAL when ping_group_range sysctl doesn't map to user ns
  gen_stats: Fix netlink stats dumping in the presence of padding
  drm/nouveau: Avoid looping through fake MST connectors
  drm/nouveau: Use drm_connector_list_iter_* for iterating connectors
  drm/i915: Fix hotplug irq ack on i965/g4x
  stop_machine: Disable preemption when waking two stopper threads
  vfio/spapr: Use IOMMU pageshift rather than pagesize
  vfio/pci: Fix potential Spectre v1
  cpufreq: intel_pstate: Register when ACPI PCCH is present
  mm/huge_memory.c: fix data loss when splitting a file pmd
  mm: memcg: fix use after free in mem_cgroup_iter()
  ARC: mm: allow mprotect to make stack mappings executable
  ARC: configs: Remove CONFIG_INITRAMFS_SOURCE from defconfigs
  ARC: Fix CONFIG_SWAP
  ARCv2: [plat-hsdk]: Save accl reg pair by default
  ALSA: hda: add mute led support for HP ProBook 455 G5
  ALSA: hda/realtek - Add Panasonic CF-SZ6 headset jack quirk
  ALSA: rawmidi: Change resized buffers atomically
  fat: fix memory allocation failure handling of match_strdup()
  x86/MCE: Remove min interval polling limitation
  x86/events/intel/ds: Fix bts_interrupt_threshold alignment
  x86/apm: Don't access __preempt_count with zeroed fs
  KVM/Eventfd: Avoid crash when assign and deassign specific eventfd in parallel.
  scsi: sd_zbc: Fix variable type and bogus comment
  ANDROID: uid_sys_stats: Replace tasklist lock with RCU in uid_cputime_show
  Linux 4.14.57
  string: drop __must_check from strscpy() and restore strscpy() usages in cgroup
  arm64: KVM: Add ARCH_WORKAROUND_2 discovery through ARCH_FEATURES_FUNC_ID
  arm64: KVM: Handle guest's ARCH_WORKAROUND_2 requests
  arm64: KVM: Add ARCH_WORKAROUND_2 support for guests
  arm64: KVM: Add HYP per-cpu accessors
  arm64: ssbd: Add prctl interface for per-thread mitigation
  arm64: ssbd: Introduce thread flag to control userspace mitigation
  arm64: ssbd: Restore mitigation status on CPU resume
  arm64: ssbd: Skip apply_ssbd if not using dynamic mitigation
  arm64: ssbd: Add global mitigation state accessor
  arm64: Add 'ssbd' command-line option
  arm64: Add ARCH_WORKAROUND_2 probing
  arm64: Add per-cpu infrastructure to call ARCH_WORKAROUND_2
  arm64: Call ARCH_WORKAROUND_2 on transitions between EL0 and EL1
  arm/arm64: smccc: Add SMCCC-specific return codes
  KVM: arm64: Avoid storing the vcpu pointer on the stack
  KVM: arm/arm64: Do not use kern_hyp_va() with kvm_vgic_global_state
  arm64: alternatives: Add dynamic patching feature
  KVM: arm64: Stop save/restoring host tpidr_el1 on VHE
  arm64: alternatives: use tpidr_el2 on VHE hosts
  KVM: arm64: Change hyp_panic()s dependency on tpidr_el2
  KVM: arm/arm64: Convert kvm_host_cpu_state to a static per-cpu allocation
  KVM: arm64: Store vcpu on the stack during __guest_enter()
  net/nfc: Avoid stalls when nfc_alloc_send_skb() returned NULL.
  rds: avoid unenecessary cong_update in loop transport
  bdi: Fix another oops in wb_workfn()
  netfilter: ipv6: nf_defrag: drop skb dst before queueing
  nsh: set mac len based on inner packet
  autofs: fix slab out of bounds read in getname_kernel()
  tls: Stricter error checking in zerocopy sendmsg path
  KEYS: DNS: fix parsing multiple options
  reiserfs: fix buffer overflow with long warning messages
  netfilter: ebtables: reject non-bridge targets
  PCI: hv: Disable/enable IRQs rather than BH in hv_compose_msi_msg()
  block: do not use interruptible wait anywhere
  mtd: rawnand: denali_dt: set clk_x_rate to 200 MHz unconditionally
  crypto: af_alg - Initialize sg_num_bytes in error code path
  clocksource: Initialize cs->wd_list
  media: rc: oops in ir_timer_keyup after device unplug
  xhci: Fix USB3 NULL pointer dereference at logical disconnect.
  net: lan78xx: Fix race in tx pending skb size calculation
  rtlwifi: rtl8821ae: fix firmware is not ready to run
  rtlwifi: Fix kernel Oops "Fw download fail!!"
  net: cxgb3_main: fix potential Spectre v1
  VSOCK: fix loopback on big-endian systems
  vhost_net: validate sock before trying to put its fd
  tcp: prevent bogus FRTO undos with non-SACK flows
  tcp: fix Fast Open key endianness
  strparser: Remove early eaten to fix full tcp receive buffer stall
  stmmac: fix DMA channel hang in half-duplex mode
  r8152: napi hangup fix after disconnect
  qmi_wwan: add support for the Dell Wireless 5821e module
  qed: Limit msix vectors in kdump kernel to the minimum required count.
  qed: Fix use of incorrect size in memcpy call.
  qed: Fix setting of incorrect eswitch mode.
  qede: Adverstise software timestamp caps when PHC is not available.
  net/tcp: Fix socket lookups with SO_BINDTODEVICE
  net: sungem: fix rx checksum support
  net_sched: blackhole: tell upper qdisc about dropped packets
  net/packet: fix use-after-free
  net: mvneta: fix the Rx desc DMA address in the Rx path
  net/mlx5: Fix wrong size allocation for QoS ETC TC regitster
  net/mlx5: Fix required capability for manipulating MPFS
  net/mlx5: Fix incorrect raw command length parsing
  net/mlx5: Fix command interface race in polling mode
  net/mlx5: E-Switch, Avoid setup attempt if not being e-switch manager
  net/mlx5e: Don't attempt to dereference the ppriv struct if not being eswitch manager
  net/mlx5e: Avoid dealing with vport representors if not being e-switch manager
  net: macb: Fix ptp time adjustment for large negative delta
  net: fix use-after-free in GRO with ESP
  net: dccp: switch rx_tstamp_last_feedback to monotonic clock
  net: dccp: avoid crash in ccid3_hc_rx_send_feedback()
  ixgbe: split XDP_TX tail and XDP_REDIRECT map flushing
  ipvlan: fix IFLA_MTU ignored on NEWLINK
  ipv6: sr: fix passing wrong flags to crypto_alloc_shash()
  hv_netvsc: split sub-channel setup into async and sync
  atm: zatm: Fix potential Spectre v1
  atm: Preserve value of skb->truesize when accounting to vcc
  alx: take rtnl before calling __alx_open from resume
  crypto: crypto4xx - fix crypto4xx_build_pdr, crypto4xx_build_sdr leak
  crypto: crypto4xx - remove bad list_del
  PCI: exynos: Fix a potential init_clk_resources NULL pointer dereference
  bcm63xx_enet: do not write to random DMA channel on BCM6345
  bcm63xx_enet: correct clock usage
  ocfs2: ip_alloc_sem should be taken in ocfs2_get_block()
  ocfs2: subsystem.su_mutex is required while accessing the item->ci_parent
  xprtrdma: Fix corner cases when handling device removal
  cpufreq / CPPC: Set platform specific transition_delay_us
  Btrfs: fix duplicate extents after fsync of file with prealloc extents
  x86/paravirt: Make native_save_fl() extern inline
  x86/asm: Add _ASM_ARG* constants for argument registers to <asm/asm.h>
  compiler-gcc.h: Add __attribute__((gnu_inline)) to all inline declarations
  ANDROID: Add hold functionality to schedtune CPU boost
  ANDROID: sched/rt: Add schedtune accounting to rt task enqueue/dequeue
  UPSTREAM: cpuidle: menu: Avoid selecting shallow states with stopped tick
  UPSTREAM: cpuidle: menu: Refine idle state selection for running tick
  UPSTREAM: sched: idle: Select idle state before stopping the tick
  BACKPORT: time: hrtimer: Introduce hrtimer_next_event_without()
  BACKPORT: time: tick-sched: Split tick_nohz_stop_sched_tick()
  UPSTREAM: cpuidle: Return nohz hint from cpuidle_select()
  UPSTREAM: jiffies: Introduce USER_TICK_USEC and redefine TICK_USEC
  UPSTREAM: sched: idle: Do not stop the tick before cpuidle_idle_call()
  BACKPORT: sched: idle: Do not stop the tick upfront in the idle loop
  BACKPORT: time: tick-sched: Reorganize idle tick management code
  ANDROID: sched/fair: fix a warning
  ANDROID: sched/walt: Fix compilation issue for x86_64
  ANDROID: mnt: Fix next_descendent
  ANDROID: sched/events: Introduce util_est trace events
  ANDROID: sched/fair: schedtune: update before schedutil
  FROMLIST: sched/fair: add support to tune PELT ramp/decay timings
  BACKPORT: sched/fair: Update util_est before updating schedutil
  BACKPORT: sched/fair: Update util_est only on util_avg updates
  BACKPORT: sched/fair: Use util_est in LB and WU paths
  BACKPORT: sched/fair: Add util_est on top of PELT
  ANDROID: sched/fair: Cleanup cpu_util{_wake}()
  ANDROID: sched: Update max cpu capacity in case of max frequency constraints
  ANDROID: arm: enable max frequency capping
  ANDROID: arm64: enable max frequency capping
  ANDROID: implement max frequency capping
  ANDROID: sched/fair: add arch scaling function for max frequency capping
  ANDROID: trace: Add WALT util signal to trace event sched_load_cfs_rq
  ANDROID: sched, trace: Remove trace event sched_load_avg_cpu
  ANDROID: Rename and move include/linux/sched_energy.h
  ANDROID: Adjust juno energy model
  ANDROID: Check equality of max cap state cap and cpu scale
  ANDROID: Move energy model init call into arch_topology driver
  ANDROID: Streamline sched_domain_energy_f functions
  ANDROID: Separate cpu_scale and energy model setup
  ANDROID: update_group_capacity for single cpu in cluster
  ANDROID: sched/fair: return idle CPU immediately for prefer_idle
  ANDROID: sched/fair: add idle state filter to prefer_idle case
  ANDROID: sched/fair: remove order from CPU selection
  ANDROID: sched/fair: unify spare capacity calculation
  ANDROID:sched/fair: prefer energy efficient CPUs for !prefer_idle tasks
  ANDROID: sched/fair: fix CPU selection for non latency sensitive tasks
  ANDROID: sched/fair: Also do misfit in overloaded groups
  ANDROID: sched/fair: Don't balance misfits if it would overload local group
  ANDROID: sched/fair: Attempt to improve throughput for asym cap systems
  FROMLIST: sched/fair: Don't move tasks to lower capacity cpus unless necessary
  FROMLIST: sched/core: Disable SD_PREFER_SIBLING on asymmetric cpu capacity domains
  FROMLIST: sched/core: Disable SD_ASYM_CPUCAPACITY for root_domains without asymmetry
  FROMLIST: sched/fair: Set rq->rd->overload when misfit
  FROMLIST: sched: Wrap rq->rd->overload accesses with READ/WRITE_ONCE
  FROMLIST: sched: Change root_domain->overload type to int
  FROMLIST: sched/fair: Change prefer_sibling type to bool
  FROMLIST: sched/fair: Consider misfit tasks when load-balancing
  FROMLIST: sched: Add sched_group per-cpu max capacity
  FROMLIST: sched/fair: Add group_misfit_task load-balance type
  FROMLIST: sched: Add static_key for asymmetric cpu capacity optimizations
  UPSTREAM: ANDROID: binder: change down_write to down_read
  UPSTREAM: ANDROID: binder: correct the cmd print for BINDER_WORK_RETURN_ERROR
  UPSTREAM: ANDROID: binder: remove 32-bit binder interface.
  UPSTREAM: android: binder: Use true and false for boolean values
  UPSTREAM: android: binder: Use octal permissions
  UPSTREAM: android: binder: Prefer __func__ to using hardcoded function name
  UPSTREAM: ANDROID: binder: make binder_alloc_new_buf_locked static and indent its arguments
  UPSTREAM: android: binder: Check for errors in binder_alloc_shrinker_init().

Conflicts:
	arch/arm64/Kconfig
	arch/arm64/include/asm/cpucaps.h
	arch/arm64/include/asm/cpufeature.h
	arch/arm64/include/asm/thread_info.h
	arch/arm64/kernel/cpu_errata.c
	arch/arm64/kernel/cpufeature.c
	arch/arm64/kernel/entry.S
	arch/arm64/kernel/ssbd.c
	drivers/base/arch_topology.c
	drivers/md/Kconfig
	drivers/scsi/ufs/ufshcd.c
	drivers/usb/gadget/function/f_fs.c
	include/trace/events/sched.h
	kernel/sched/cpufreq_schedutil.c
	kernel/sched/energy.c
	kernel/sched/fair.c
	kernel/sched/features.h
	kernel/sched/sched.h
	kernel/sched/topology.c
	kernel/sched/tune.c
	kernel/sched/walt.c
	kernel/sched/walt.h
	kernel/stop_machine.c
	kernel/time/tick-sched.c
	net/socket.c
	sound/core/rawmidi.c

Change-Id: Ia246711317930ecd55bb42565a04e6b4fdfc26d2
Signed-off-by: Isaac J. Manjarres <isaacm@codeaurora.org>
tirimbino
Isaac J. Manjarres 7 years ago
commit b2c8463039
  1. 23
      Documentation/devicetree/bindings/net/dsa/qca8k.txt
  2. 1
      Documentation/devicetree/bindings/net/meson-dwmac.txt
  3. 2
      Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt
  4. 5
      Documentation/vfio-mediated-device.txt
  5. 3
      Makefile
  6. 5
      arch/alpha/kernel/osf_sys.c
  7. 2
      arch/arc/Kconfig
  8. 1
      arch/arc/configs/axs101_defconfig
  9. 1
      arch/arc/configs/axs103_defconfig
  10. 1
      arch/arc/configs/axs103_smp_defconfig
  11. 1
      arch/arc/configs/haps_hs_defconfig
  12. 1
      arch/arc/configs/haps_hs_smp_defconfig
  13. 1
      arch/arc/configs/hsdk_defconfig
  14. 1
      arch/arc/configs/nsim_700_defconfig
  15. 1
      arch/arc/configs/nsim_hs_defconfig
  16. 1
      arch/arc/configs/nsim_hs_smp_defconfig
  17. 1
      arch/arc/configs/nsimosci_defconfig
  18. 1
      arch/arc/configs/nsimosci_hs_defconfig
  19. 1
      arch/arc/configs/nsimosci_hs_smp_defconfig
  20. 2
      arch/arc/include/asm/page.h
  21. 2
      arch/arc/include/asm/pgtable.h
  22. 2
      arch/arc/plat-hsdk/Kconfig
  23. 5
      arch/arm/boot/dts/emev2.dtsi
  24. 5
      arch/arm/boot/dts/sh73a0.dtsi
  25. 10
      arch/arm/boot/dts/stih407-pinctrl.dtsi
  26. 12
      arch/arm/include/asm/kvm_host.h
  27. 12
      arch/arm/include/asm/kvm_mmu.h
  28. 10
      arch/arm/net/bpf_jit_32.c
  29. 38
      arch/arm64/boot/dts/renesas/salvator-common.dtsi
  30. 2
      arch/arm64/configs/defconfig
  31. 43
      arch/arm64/include/asm/alternative.h
  32. 8
      arch/arm64/include/asm/assembler.h
  33. 4
      arch/arm64/include/asm/cmpxchg.h
  34. 41
      arch/arm64/include/asm/kvm_asm.h
  35. 43
      arch/arm64/include/asm/kvm_host.h
  36. 44
      arch/arm64/include/asm/kvm_mmu.h
  37. 11
      arch/arm64/include/asm/percpu.h
  38. 2
      arch/arm64/include/asm/thread_info.h
  39. 1
      arch/arm64/kernel/Makefile
  40. 52
      arch/arm64/kernel/alternative.c
  41. 2
      arch/arm64/kernel/asm-offsets.c
  42. 2
      arch/arm64/kernel/cpu_errata.c
  43. 17
      arch/arm64/kernel/cpufeature.c
  44. 4
      arch/arm64/kernel/ssbd.c
  45. 4
      arch/arm64/kvm/hyp-init.S
  46. 12
      arch/arm64/kvm/hyp/entry.S
  47. 62
      arch/arm64/kvm/hyp/hyp-entry.S
  48. 64
      arch/arm64/kvm/hyp/switch.c
  49. 21
      arch/arm64/kvm/hyp/sysreg-sr.c
  50. 4
      arch/arm64/kvm/reset.c
  51. 4
      arch/arm64/mm/init.c
  52. 8
      arch/arm64/mm/proc.S
  53. 10
      arch/microblaze/boot/Makefile
  54. 2
      arch/mips/ath79/common.c
  55. 2
      arch/mips/pci/pci.c
  56. 15
      arch/powerpc/include/asm/barrier.h
  57. 3
      arch/powerpc/include/asm/cache.h
  58. 4
      arch/powerpc/include/asm/mmu_context.h
  59. 28
      arch/powerpc/kernel/eeh_driver.c
  60. 2
      arch/powerpc/kernel/head_8xx.S
  61. 2
      arch/powerpc/kernel/idle_book3s.S
  62. 1
      arch/powerpc/kernel/pci_32.c
  63. 114
      arch/powerpc/kernel/prom_init.c
  64. 2
      arch/powerpc/kvm/book3s_64_vio.c
  65. 6
      arch/powerpc/kvm/book3s_64_vio_hv.c
  66. 7
      arch/powerpc/lib/string.S
  67. 37
      arch/powerpc/mm/mmu_context_iommu.c
  68. 8
      arch/powerpc/mm/slb.c
  69. 34
      arch/powerpc/net/bpf_jit_comp64.c
  70. 6
      arch/powerpc/platforms/chrp/time.c
  71. 5
      arch/powerpc/platforms/embedded6xx/hlwd-pic.c
  72. 4
      arch/powerpc/platforms/powermac/bootx_init.c
  73. 1
      arch/powerpc/platforms/powermac/setup.c
  74. 6
      arch/s390/include/asm/cpu_mf.h
  75. 8
      arch/x86/configs/x86_64_cuttlefish_defconfig
  76. 18
      arch/x86/entry/entry_64.S
  77. 8
      arch/x86/events/intel/ds.c
  78. 2
      arch/x86/events/intel/uncore.c
  79. 2
      arch/x86/events/intel/uncore_nhmex.c
  80. 6
      arch/x86/include/asm/apm.h
  81. 59
      arch/x86/include/asm/asm.h
  82. 2
      arch/x86/include/asm/irqflags.h
  83. 1
      arch/x86/kernel/Makefile
  84. 3
      arch/x86/kernel/apic/apic.c
  85. 5
      arch/x86/kernel/apm_32.c
  86. 3
      arch/x86/kernel/cpu/mcheck/mce.c
  87. 6
      arch/x86/kernel/cpu/microcode/core.c
  88. 26
      arch/x86/kernel/irqflags.S
  89. 2
      arch/x86/kvm/mmu.c
  90. 7
      arch/x86/kvm/vmx.c
  91. 1
      arch/x86/xen/smp_pv.c
  92. 26
      arch/x86/xen/xen-pvh.S
  93. 2
      block/bfq-iosched.c
  94. 19
      block/bio.c
  95. 9
      block/blk-core.c
  96. 43
      certs/system_keyring.c
  97. 4
      crypto/af_alg.c
  98. 1
      crypto/authenc.c
  99. 1
      crypto/authencesn.c
  100. 6
      drivers/acpi/acpi_lpss.c
  101. Some files were not shown because too many files have changed in this diff Show More

@ -2,7 +2,10 @@
Required properties:
- compatible: should be "qca,qca8337"
- compatible: should be one of:
"qca,qca8334"
"qca,qca8337"
- #size-cells: must be 0
- #address-cells: must be 1
@ -14,6 +17,20 @@ port and PHY id, each subnode describing a port needs to have a valid phandle
referencing the internal PHY connected to it. The CPU port of this switch is
always port 0.
A CPU port node has the following optional node:
- fixed-link : Fixed-link subnode describing a link to a non-MDIO
managed entity. See
Documentation/devicetree/bindings/net/fixed-link.txt
for details.
For QCA8K the 'fixed-link' sub-node supports only the following properties:
- 'speed' (integer, mandatory), to indicate the link speed. Accepted
values are 10, 100 and 1000
- 'full-duplex' (boolean, optional), to indicate that full duplex is
used. When absent, half duplex is assumed.
Example:
@ -53,6 +70,10 @@ Example:
label = "cpu";
ethernet = <&gmac1>;
phy-mode = "rgmii";
fixed-link {
speed = 1000;
full-duplex;
};
};
port@1 {

@ -10,6 +10,7 @@ Required properties on all platforms:
- "amlogic,meson6-dwmac"
- "amlogic,meson8b-dwmac"
- "amlogic,meson-gxbb-dwmac"
- "amlogic,meson-axg-dwmac"
Additionally "snps,dwmac" and any applicable more
detailed version number described in net/stmmac.txt
should be used.

@ -3,8 +3,10 @@
Required properties for the root node:
- compatible: one of "amlogic,meson8-cbus-pinctrl"
"amlogic,meson8b-cbus-pinctrl"
"amlogic,meson8m2-cbus-pinctrl"
"amlogic,meson8-aobus-pinctrl"
"amlogic,meson8b-aobus-pinctrl"
"amlogic,meson8m2-aobus-pinctrl"
"amlogic,meson-gxbb-periphs-pinctrl"
"amlogic,meson-gxbb-aobus-pinctrl"
"amlogic,meson-gxl-periphs-pinctrl"

@ -145,6 +145,11 @@ The functions in the mdev_parent_ops structure are as follows:
* create: allocate basic resources in a driver for a mediated device
* remove: free resources in a driver when a mediated device is destroyed
(Note that mdev-core provides no implicit serialization of create/remove
callbacks per mdev parent device, per mdev type, or any other categorization.
Vendor drivers are expected to be fully asynchronous in this respect or
provide their own internal resource protection.)
The callbacks in the mdev_parent_ops structure are as follows:
* open: open callback of mediated device

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 14
SUBLEVEL = 56
SUBLEVEL = 61
EXTRAVERSION =
NAME = Petit Gorille
@ -667,6 +667,7 @@ KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,)
KBUILD_CFLAGS += $(call cc-disable-warning, format-truncation)
KBUILD_CFLAGS += $(call cc-disable-warning, format-overflow)
KBUILD_CFLAGS += $(call cc-disable-warning, int-in-bool-context)
KBUILD_CFLAGS += $(call cc-disable-warning, attribute-alias)
ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
KBUILD_CFLAGS += $(call cc-option,-Oz,-Os)

@ -1183,13 +1183,10 @@ SYSCALL_DEFINE2(osf_getrusage, int, who, struct rusage32 __user *, ru)
SYSCALL_DEFINE4(osf_wait4, pid_t, pid, int __user *, ustatus, int, options,
struct rusage32 __user *, ur)
{
unsigned int status = 0;
struct rusage r;
long err = kernel_wait4(pid, &status, options, &r);
long err = kernel_wait4(pid, ustatus, options, &r);
if (err <= 0)
return err;
if (put_user(status, ustatus))
return -EFAULT;
if (!ur)
return err;
if (put_tv32(&ur->ru_utime, &r.ru_utime))

@ -408,7 +408,7 @@ config ARC_HAS_DIV_REM
config ARC_HAS_ACCL_REGS
bool "Reg Pair ACCL:ACCH (FPU and/or MPY > 6)"
default n
default y
help
Depending on the configuration, CPU can contain accumulator reg-pair
(also referred to as r58:r59). These can also be used by gcc as GPR so

@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_PID_NS is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE="../arc_initramfs/"
CONFIG_EMBEDDED=y
CONFIG_PERF_EVENTS=y
# CONFIG_VM_EVENT_COUNTERS is not set

@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_PID_NS is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/"
CONFIG_EMBEDDED=y
CONFIG_PERF_EVENTS=y
# CONFIG_VM_EVENT_COUNTERS is not set

@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_PID_NS is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/"
CONFIG_EMBEDDED=y
CONFIG_PERF_EVENTS=y
# CONFIG_VM_EVENT_COUNTERS is not set

@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_PID_NS is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/"
CONFIG_EXPERT=y
CONFIG_PERF_EVENTS=y
# CONFIG_COMPAT_BRK is not set

@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_PID_NS is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/"
CONFIG_EMBEDDED=y
CONFIG_PERF_EVENTS=y
# CONFIG_VM_EVENT_COUNTERS is not set

@ -9,7 +9,6 @@ CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_PID_NS is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/"
CONFIG_EMBEDDED=y
CONFIG_PERF_EVENTS=y
# CONFIG_VM_EVENT_COUNTERS is not set

@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_PID_NS is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE="../arc_initramfs/"
CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
CONFIG_PERF_EVENTS=y

@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_PID_NS is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/"
CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
CONFIG_PERF_EVENTS=y

@ -9,7 +9,6 @@ CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_PID_NS is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/"
CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
CONFIG_PERF_EVENTS=y

@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_PID_NS is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE="../arc_initramfs/"
CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
CONFIG_PERF_EVENTS=y

@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_PID_NS is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/"
CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
CONFIG_PERF_EVENTS=y

@ -9,7 +9,6 @@ CONFIG_IKCONFIG_PROC=y
# CONFIG_UTS_NS is not set
# CONFIG_PID_NS is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/"
CONFIG_PERF_EVENTS=y
# CONFIG_COMPAT_BRK is not set
CONFIG_KPROBES=y

@ -105,7 +105,7 @@ typedef pte_t * pgtable_t;
#define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr))
/* Default Permissions for stack/heaps pages (Non Executable) */
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE)
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#define WANT_PAGE_VIRTUAL 1

@ -379,7 +379,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
/* Decode a PTE containing swap "identifier "into constituents */
#define __swp_type(pte_lookalike) (((pte_lookalike).val) & 0x1f)
#define __swp_offset(pte_lookalike) ((pte_lookalike).val << 13)
#define __swp_offset(pte_lookalike) ((pte_lookalike).val >> 13)
/* NOPs, to keep generic kernel happy */
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })

@ -7,5 +7,7 @@
menuconfig ARC_SOC_HSDK
bool "ARC HS Development Kit SOC"
depends on ISA_ARCV2
select ARC_HAS_ACCL_REGS
select CLK_HSDK
select RESET_HSDK

@ -31,13 +31,13 @@
#address-cells = <1>;
#size-cells = <0>;
cpu@0 {
cpu0: cpu@0 {
device_type = "cpu";
compatible = "arm,cortex-a9";
reg = <0>;
clock-frequency = <533000000>;
};
cpu@1 {
cpu1: cpu@1 {
device_type = "cpu";
compatible = "arm,cortex-a9";
reg = <1>;
@ -57,6 +57,7 @@
compatible = "arm,cortex-a9-pmu";
interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>;
interrupt-affinity = <&cpu0>, <&cpu1>;
};
clocks@e0110000 {

@ -22,7 +22,7 @@
#address-cells = <1>;
#size-cells = <0>;
cpu@0 {
cpu0: cpu@0 {
device_type = "cpu";
compatible = "arm,cortex-a9";
reg = <0>;
@ -30,7 +30,7 @@
power-domains = <&pd_a2sl>;
next-level-cache = <&L2>;
};
cpu@1 {
cpu1: cpu@1 {
device_type = "cpu";
compatible = "arm,cortex-a9";
reg = <1>;
@ -89,6 +89,7 @@
compatible = "arm,cortex-a9-pmu";
interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>;
interrupt-affinity = <&cpu0>, <&cpu1>;
};
cmt1: timer@e6138000 {

@ -52,7 +52,7 @@
st,syscfg = <&syscfg_sbc>;
reg = <0x0961f080 0x4>;
reg-names = "irqmux";
interrupts = <GIC_SPI 188 IRQ_TYPE_NONE>;
interrupts = <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "irqmux";
ranges = <0 0x09610000 0x6000>;
@ -376,7 +376,7 @@
st,syscfg = <&syscfg_front>;
reg = <0x0920f080 0x4>;
reg-names = "irqmux";
interrupts = <GIC_SPI 189 IRQ_TYPE_NONE>;
interrupts = <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "irqmux";
ranges = <0 0x09200000 0x10000>;
@ -936,7 +936,7 @@
st,syscfg = <&syscfg_front>;
reg = <0x0921f080 0x4>;
reg-names = "irqmux";
interrupts = <GIC_SPI 190 IRQ_TYPE_NONE>;
interrupts = <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "irqmux";
ranges = <0 0x09210000 0x10000>;
@ -969,7 +969,7 @@
st,syscfg = <&syscfg_rear>;
reg = <0x0922f080 0x4>;
reg-names = "irqmux";
interrupts = <GIC_SPI 191 IRQ_TYPE_NONE>;
interrupts = <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "irqmux";
ranges = <0 0x09220000 0x6000>;
@ -1164,7 +1164,7 @@
st,syscfg = <&syscfg_flash>;
reg = <0x0923f080 0x4>;
reg-names = "irqmux";
interrupts = <GIC_SPI 192 IRQ_TYPE_NONE>;
interrupts = <GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "irqmux";
ranges = <0 0x09230000 0x3000>;

@ -302,4 +302,16 @@ static inline bool kvm_arm_harden_branch_predictor(void)
return false;
}
#define KVM_SSBD_UNKNOWN -1
#define KVM_SSBD_FORCE_DISABLE 0
#define KVM_SSBD_KERNEL 1
#define KVM_SSBD_FORCE_ENABLE 2
#define KVM_SSBD_MITIGATED 3
static inline int kvm_arm_have_ssbd(void)
{
/* No way to detect it yet, pretend it is not there. */
return KVM_SSBD_UNKNOWN;
}
#endif /* __ARM_KVM_HOST_H__ */

@ -28,6 +28,13 @@
*/
#define kern_hyp_va(kva) (kva)
/* Contrary to arm64, there is no need to generate a PC-relative address */
#define hyp_symbol_addr(s) \
({ \
typeof(s) *addr = &(s); \
addr; \
})
/*
* KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation levels.
*/
@ -247,6 +254,11 @@ static inline int kvm_map_vectors(void)
return 0;
}
static inline int hyp_map_aux_data(void)
{
return 0;
}
#endif /* !__ASSEMBLY__ */
#endif /* __ARM_KVM_MMU_H__ */

@ -718,7 +718,7 @@ static inline void emit_a32_arsh_r64(const u8 dst[], const u8 src[], bool dstk,
}
/* dst = dst >> src */
static inline void emit_a32_lsr_r64(const u8 dst[], const u8 src[], bool dstk,
static inline void emit_a32_rsh_r64(const u8 dst[], const u8 src[], bool dstk,
bool sstk, struct jit_ctx *ctx) {
const u8 *tmp = bpf2a32[TMP_REG_1];
const u8 *tmp2 = bpf2a32[TMP_REG_2];
@ -734,7 +734,7 @@ static inline void emit_a32_lsr_r64(const u8 dst[], const u8 src[], bool dstk,
emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
}
/* Do LSH operation */
/* Do RSH operation */
emit(ARM_RSB_I(ARM_IP, rt, 32), ctx);
emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx);
emit(ARM_MOV_SR(ARM_LR, rd, SRTYPE_LSR, rt), ctx);
@ -784,7 +784,7 @@ static inline void emit_a32_lsh_i64(const u8 dst[], bool dstk,
}
/* dst = dst >> val */
static inline void emit_a32_lsr_i64(const u8 dst[], bool dstk,
static inline void emit_a32_rsh_i64(const u8 dst[], bool dstk,
const u32 val, struct jit_ctx *ctx) {
const u8 *tmp = bpf2a32[TMP_REG_1];
const u8 *tmp2 = bpf2a32[TMP_REG_2];
@ -1340,7 +1340,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
case BPF_ALU64 | BPF_RSH | BPF_K:
if (unlikely(imm > 63))
return -EINVAL;
emit_a32_lsr_i64(dst, dstk, imm, ctx);
emit_a32_rsh_i64(dst, dstk, imm, ctx);
break;
/* dst = dst << src */
case BPF_ALU64 | BPF_LSH | BPF_X:
@ -1348,7 +1348,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
break;
/* dst = dst >> src */
case BPF_ALU64 | BPF_RSH | BPF_X:
emit_a32_lsr_r64(dst, src, dstk, sstk, ctx);
emit_a32_rsh_r64(dst, src, dstk, sstk, ctx);
break;
/* dst = dst >> src (signed) */
case BPF_ALU64 | BPF_ARSH | BPF_X:

@ -93,20 +93,12 @@
regulator-always-on;
};
rsnd_ak4613: sound {
compatible = "simple-audio-card";
sound_card: sound {
compatible = "audio-graph-card";
simple-audio-card,format = "left_j";
simple-audio-card,bitclock-master = <&sndcpu>;
simple-audio-card,frame-master = <&sndcpu>;
label = "rcar-sound";
sndcpu: simple-audio-card,cpu {
sound-dai = <&rcar_sound>;
};
sndcodec: simple-audio-card,codec {
sound-dai = <&ak4613>;
};
dais = <&rsnd_port0>;
};
vbus0_usb2: regulator-vbus0-usb2 {
@ -320,6 +312,12 @@
asahi-kasei,out4-single-end;
asahi-kasei,out5-single-end;
asahi-kasei,out6-single-end;
port {
ak4613_endpoint: endpoint {
remote-endpoint = <&rsnd_endpoint0>;
};
};
};
cs2000: clk_multiplier@4f {
@ -538,10 +536,18 @@
<&audio_clk_c>,
<&cpg CPG_CORE CPG_AUDIO_CLK_I>;
rcar_sound,dai {
dai0 {
playback = <&ssi0 &src0 &dvc0>;
capture = <&ssi1 &src1 &dvc1>;
ports {
rsnd_port0: port@0 {
rsnd_endpoint0: endpoint {
remote-endpoint = <&ak4613_endpoint>;
dai-format = "left_j";
bitclock-master = <&rsnd_endpoint0>;
frame-master = <&rsnd_endpoint0>;
playback = <&ssi0 &src0 &dvc0>;
capture = <&ssi1 &src1 &dvc1>;
};
};
};
};

@ -304,6 +304,8 @@ CONFIG_GPIO_XGENE_SB=y
CONFIG_GPIO_PCA953X=y
CONFIG_GPIO_PCA953X_IRQ=y
CONFIG_GPIO_MAX77620=y
CONFIG_POWER_AVS=y
CONFIG_ROCKCHIP_IODOMAIN=y
CONFIG_POWER_RESET_MSM=y
CONFIG_POWER_RESET_XGENE=y
CONFIG_POWER_RESET_SYSCON=y

@ -5,6 +5,8 @@
#include <asm/cpucaps.h>
#include <asm/insn.h>
#define ARM64_CB_PATCH ARM64_NCAPS
#ifndef __ASSEMBLY__
#include <linux/init.h>
@ -12,6 +14,8 @@
#include <linux/stddef.h>
#include <linux/stringify.h>
extern int alternatives_applied;
struct alt_instr {
s32 orig_offset; /* offset to original instruction */
s32 alt_offset; /* offset to replacement instruction */
@ -20,12 +24,19 @@ struct alt_instr {
u8 alt_len; /* size of new instruction(s), <= orig_len */
};
typedef void (*alternative_cb_t)(struct alt_instr *alt,
__le32 *origptr, __le32 *updptr, int nr_inst);
void __init apply_alternatives_all(void);
void apply_alternatives(void *start, size_t length);
#define ALTINSTR_ENTRY(feature) \
#define ALTINSTR_ENTRY(feature,cb) \
" .word 661b - .\n" /* label */ \
" .if " __stringify(cb) " == 0\n" \
" .word 663f - .\n" /* new instruction */ \
" .else\n" \
" .word " __stringify(cb) "- .\n" /* callback */ \
" .endif\n" \
" .hword " __stringify(feature) "\n" /* feature bit */ \
" .byte 662b-661b\n" /* source len */ \
" .byte 664f-663f\n" /* replacement len */
@ -43,15 +54,18 @@ void apply_alternatives(void *start, size_t length);
* but most assemblers die if insn1 or insn2 have a .inst. This should
* be fixed in a binutils release posterior to 2.25.51.0.2 (anything
* containing commit 4e4d08cf7399b606 or c1baaddf8861).
*
* Alternatives with callbacks do not generate replacement instructions.
*/
#define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled) \
#define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled, cb) \
".if "__stringify(cfg_enabled)" == 1\n" \
"661:\n\t" \
oldinstr "\n" \
"662:\n" \
".pushsection .altinstructions,\"a\"\n" \
ALTINSTR_ENTRY(feature) \
ALTINSTR_ENTRY(feature,cb) \
".popsection\n" \
" .if " __stringify(cb) " == 0\n" \
".pushsection .altinstr_replacement, \"a\"\n" \
"663:\n\t" \
newinstr "\n" \
@ -59,11 +73,17 @@ void apply_alternatives(void *start, size_t length);
".popsection\n\t" \
".org . - (664b-663b) + (662b-661b)\n\t" \
".org . - (662b-661b) + (664b-663b)\n" \
".else\n\t" \
"663:\n\t" \
"664:\n\t" \
".endif\n" \
".endif\n"
#define _ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg, ...) \
__ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg))
__ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg), 0)
#define ALTERNATIVE_CB(oldinstr, cb) \
__ALTERNATIVE_CFG(oldinstr, "NOT_AN_INSTRUCTION", ARM64_CB_PATCH, 1, cb)
#else
#include <asm/assembler.h>
@ -130,6 +150,14 @@ void apply_alternatives(void *start, size_t length);
661:
.endm
.macro alternative_cb cb
.set .Lasm_alt_mode, 0
.pushsection .altinstructions, "a"
altinstruction_entry 661f, \cb, ARM64_CB_PATCH, 662f-661f, 0
.popsection
661:
.endm
/*
* Provide the other half of the alternative code sequence.
*/
@ -155,6 +183,13 @@ void apply_alternatives(void *start, size_t length);
.org . - (662b-661b) + (664b-663b)
.endm
/*
* Callback-based alternative epilogue
*/
.macro alternative_cb_end
662:
.endm
/*
* Provides a trivial alternative or default sequence consisting solely
* of NOPs. The number of NOPs is chosen automatically to match the

@ -272,7 +272,11 @@ lr .req x30 // link register
#else
adr_l \dst, \sym
#endif
alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
mrs \tmp, tpidr_el1
alternative_else
mrs \tmp, tpidr_el2
alternative_endif
add \dst, \dst, \tmp
.endm
@ -283,7 +287,11 @@ lr .req x30 // link register
*/
.macro ldr_this_cpu dst, sym, tmp
adr_l \dst, \sym
alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
mrs \tmp, tpidr_el1
alternative_else
mrs \tmp, tpidr_el2
alternative_endif
ldr \dst, [\dst, \tmp]
.endm

@ -229,7 +229,9 @@ static inline void __cmpwait_case_##name(volatile void *ptr, \
unsigned long tmp; \
\
asm volatile( \
" ldxr" #sz "\t%" #w "[tmp], %[v]\n" \
" sevl\n" \
" wfe\n" \
" ldxr" #sz "\t%" #w "[tmp], %[v]\n" \
" eor %" #w "[tmp], %" #w "[tmp], %" #w "[val]\n" \
" cbnz %" #w "[tmp], 1f\n" \
" wfe\n" \

@ -33,6 +33,10 @@
#define KVM_ARM64_DEBUG_DIRTY_SHIFT 0
#define KVM_ARM64_DEBUG_DIRTY (1 << KVM_ARM64_DEBUG_DIRTY_SHIFT)
#define VCPU_WORKAROUND_2_FLAG_SHIFT 0
#define VCPU_WORKAROUND_2_FLAG (_AC(1, UL) << VCPU_WORKAROUND_2_FLAG_SHIFT)
/* Translate a kernel address of @sym into its equivalent linear mapping */
#define kvm_ksym_ref(sym) \
({ \
void *val = &sym; \
@ -68,6 +72,43 @@ extern u32 __init_stage2_translation(void);
extern void __qcom_hyp_sanitize_btac_predictors(void);
/* Home-grown __this_cpu_{ptr,read} variants that always work at HYP */
#define __hyp_this_cpu_ptr(sym) \
({ \
void *__ptr = hyp_symbol_addr(sym); \
__ptr += read_sysreg(tpidr_el2); \
(typeof(&sym))__ptr; \
})
#define __hyp_this_cpu_read(sym) \
({ \
*__hyp_this_cpu_ptr(sym); \
})
#else /* __ASSEMBLY__ */
.macro hyp_adr_this_cpu reg, sym, tmp
adr_l \reg, \sym
mrs \tmp, tpidr_el2
add \reg, \reg, \tmp
.endm
.macro hyp_ldr_this_cpu reg, sym, tmp
adr_l \reg, \sym
mrs \tmp, tpidr_el2
ldr \reg, [\reg, \tmp]
.endm
.macro get_host_ctxt reg, tmp
hyp_adr_this_cpu \reg, kvm_host_cpu_state, \tmp
.endm
.macro get_vcpu_ptr vcpu, ctxt
get_host_ctxt \ctxt, \vcpu
ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
kern_hyp_va \vcpu
.endm
#endif
#endif /* __ARM_KVM_ASM_H__ */

@ -194,6 +194,8 @@ struct kvm_cpu_context {
u64 sys_regs[NR_SYS_REGS];
u32 copro[NR_COPRO_REGS];
};
struct kvm_vcpu *__hyp_running_vcpu;
};
typedef struct kvm_cpu_context kvm_cpu_context_t;
@ -208,6 +210,9 @@ struct kvm_vcpu_arch {
/* Exception Information */
struct kvm_vcpu_fault_info fault;
/* State of various workarounds, see kvm_asm.h for bit assignment */
u64 workaround_flags;
/* Guest debug state */
u64 debug_flags;
@ -348,10 +353,15 @@ int kvm_perf_teardown(void);
struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
void __kvm_set_tpidr_el2(u64 tpidr_el2);
DECLARE_PER_CPU(kvm_cpu_context_t, kvm_host_cpu_state);
static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
unsigned long hyp_stack_ptr,
unsigned long vector_ptr)
{
u64 tpidr_el2;
/*
* Call initialization code, and switch to the full blown HYP code.
* If the cpucaps haven't been finalized yet, something has gone very
@ -360,6 +370,16 @@ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
*/
BUG_ON(!static_branch_likely(&arm64_const_caps_ready));
__kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr);
/*
* Calculate the raw per-cpu offset without a translation from the
* kernel's mapping to the linear mapping, and store it in tpidr_el2
* so that we can use adr_l to access per-cpu variables in EL2.
*/
tpidr_el2 = (u64)this_cpu_ptr(&kvm_host_cpu_state)
- (u64)kvm_ksym_ref(kvm_host_cpu_state);
kvm_call_hyp(__kvm_set_tpidr_el2, tpidr_el2);
}
static inline void kvm_arch_hardware_unsetup(void) {}
@ -392,4 +412,27 @@ static inline bool kvm_arm_harden_branch_predictor(void)
return cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR);
}
#define KVM_SSBD_UNKNOWN -1
#define KVM_SSBD_FORCE_DISABLE 0
#define KVM_SSBD_KERNEL 1
#define KVM_SSBD_FORCE_ENABLE 2
#define KVM_SSBD_MITIGATED 3
static inline int kvm_arm_have_ssbd(void)
{
switch (arm64_get_ssbd_state()) {
case ARM64_SSBD_FORCE_DISABLE:
return KVM_SSBD_FORCE_DISABLE;
case ARM64_SSBD_KERNEL:
return KVM_SSBD_KERNEL;
case ARM64_SSBD_FORCE_ENABLE:
return KVM_SSBD_FORCE_ENABLE;
case ARM64_SSBD_MITIGATED:
return KVM_SSBD_MITIGATED;
case ARM64_SSBD_UNKNOWN:
default:
return KVM_SSBD_UNKNOWN;
}
}
#endif /* __ARM64_KVM_HOST_H__ */

@ -130,6 +130,26 @@ static inline unsigned long __kern_hyp_va(unsigned long v)
#define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v))))
/*
* Obtain the PC-relative address of a kernel symbol
* s: symbol
*
* The goal of this macro is to return a symbol's address based on a
* PC-relative computation, as opposed to a loading the VA from a
* constant pool or something similar. This works well for HYP, as an
* absolute VA is guaranteed to be wrong. Only use this if trying to
* obtain the address of a symbol (i.e. not something you obtained by
* following a pointer).
*/
#define hyp_symbol_addr(s) \
({ \
typeof(s) *addr; \
asm("adrp %0, %1\n" \
"add %0, %0, :lo12:%1\n" \
: "=r" (addr) : "S" (&s)); \
addr; \
})
/*
* We currently only support a 40bit IPA.
*/
@ -363,5 +383,29 @@ static inline int kvm_map_vectors(void)
}
#endif
#ifdef CONFIG_ARM64_SSBD
DECLARE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
static inline int hyp_map_aux_data(void)
{
int cpu, err;
for_each_possible_cpu(cpu) {
u64 *ptr;
ptr = per_cpu_ptr(&arm64_ssbd_callback_required, cpu);
err = create_hyp_mappings(ptr, ptr + 1, PAGE_HYP);
if (err)
return err;
}
return 0;
}
#else
static inline int hyp_map_aux_data(void)
{
return 0;
}
#endif
#endif /* __ASSEMBLY__ */
#endif /* __ARM64_KVM_MMU_H__ */

@ -16,11 +16,15 @@
#ifndef __ASM_PERCPU_H
#define __ASM_PERCPU_H
#include <asm/alternative.h>
#include <asm/stack_pointer.h>
static inline void set_my_cpu_offset(unsigned long off)
{
asm volatile("msr tpidr_el1, %0" :: "r" (off) : "memory");
asm volatile(ALTERNATIVE("msr tpidr_el1, %0",
"msr tpidr_el2, %0",
ARM64_HAS_VIRT_HOST_EXTN)
:: "r" (off) : "memory");
}
static inline unsigned long __my_cpu_offset(void)
@ -31,7 +35,10 @@ static inline unsigned long __my_cpu_offset(void)
* We want to allow caching the value, so avoid using volatile and
* instead use a fake stack read to hazard against barrier().
*/
asm("mrs %0, tpidr_el1" : "=r" (off) :
asm(ALTERNATIVE("mrs %0, tpidr_el1",
"mrs %0, tpidr_el2",
ARM64_HAS_VIRT_HOST_EXTN)
: "=r" (off) :
"Q" (*(const unsigned long *)current_stack_pointer));
return off;

@ -92,8 +92,8 @@ void arch_setup_new_exec(void);
#define TIF_RESTORE_SIGMASK 20
#define TIF_SINGLESTEP 21
#define TIF_32BIT 22 /* 32bit process */
#define TIF_SSBD 23 /* Wants SSB mitigation */
#define TIF_MM_RELEASED 24
#define TIF_SSBD 25 /* Wants SSB mitigation */
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)

@ -55,6 +55,7 @@ arm64-obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o \
arm64-obj-$(CONFIG_ARM64_RELOC_TEST) += arm64-reloc-test.o
arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o
arm64-obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
arm64-obj-$(CONFIG_ARM64_SSBD) += ssbd.o
ifeq ($(CONFIG_KVM),y)
arm64-obj-$(CONFIG_HARDEN_BRANCH_PREDICTOR) += bpi.o

@ -32,6 +32,8 @@
#define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset)
#define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset)
int alternatives_applied;
struct alt_region {
struct alt_instr *begin;
struct alt_instr *end;
@ -105,32 +107,53 @@ static u32 get_alt_insn(struct alt_instr *alt, __le32 *insnptr, __le32 *altinsnp
return insn;
}
static void patch_alternative(struct alt_instr *alt,
__le32 *origptr, __le32 *updptr, int nr_inst)
{
__le32 *replptr;
int i;
replptr = ALT_REPL_PTR(alt);
for (i = 0; i < nr_inst; i++) {
u32 insn;
insn = get_alt_insn(alt, origptr + i, replptr + i);
updptr[i] = cpu_to_le32(insn);
}
}
static void __apply_alternatives(void *alt_region, bool use_linear_alias)
{
struct alt_instr *alt;
struct alt_region *region = alt_region;
__le32 *origptr, *replptr, *updptr;
__le32 *origptr, *updptr;
alternative_cb_t alt_cb;
for (alt = region->begin; alt < region->end; alt++) {
u32 insn;
int i, nr_inst;
int nr_inst;
if (!cpus_have_cap(alt->cpufeature))
/* Use ARM64_CB_PATCH as an unconditional patch */
if (alt->cpufeature < ARM64_CB_PATCH &&
!cpus_have_cap(alt->cpufeature))
continue;
BUG_ON(alt->alt_len != alt->orig_len);
if (alt->cpufeature == ARM64_CB_PATCH)
BUG_ON(alt->alt_len != 0);
else
BUG_ON(alt->alt_len != alt->orig_len);
pr_info_once("patching kernel code\n");
origptr = ALT_ORIG_PTR(alt);
replptr = ALT_REPL_PTR(alt);
updptr = use_linear_alias ? lm_alias(origptr) : origptr;
nr_inst = alt->alt_len / sizeof(insn);
nr_inst = alt->orig_len / AARCH64_INSN_SIZE;
for (i = 0; i < nr_inst; i++) {
insn = get_alt_insn(alt, origptr + i, replptr + i);
updptr[i] = cpu_to_le32(insn);
}
if (alt->cpufeature < ARM64_CB_PATCH)
alt_cb = patch_alternative;
else
alt_cb = ALT_REPL_PTR(alt);
alt_cb(alt, origptr, updptr, nr_inst);
flush_icache_range((uintptr_t)origptr,
(uintptr_t)(origptr + nr_inst));
@ -143,7 +166,6 @@ static void __apply_alternatives(void *alt_region, bool use_linear_alias)
*/
static int __apply_alternatives_multi_stop(void *unused)
{
static int patched = 0;
struct alt_region region = {
.begin = (struct alt_instr *)__alt_instructions,
.end = (struct alt_instr *)__alt_instructions_end,
@ -151,14 +173,14 @@ static int __apply_alternatives_multi_stop(void *unused)
/* We always have a CPU 0 at this point (__init) */
if (smp_processor_id()) {
while (!READ_ONCE(patched))
while (!READ_ONCE(alternatives_applied))
cpu_relax();
isb();
} else {
BUG_ON(patched);
BUG_ON(alternatives_applied);
__apply_alternatives(&region, true);
/* Barriers provided by the cache flushing */
WRITE_ONCE(patched, 1);
WRITE_ONCE(alternatives_applied, 1);
}
return 0;

@ -131,11 +131,13 @@ int main(void)
BLANK();
#ifdef CONFIG_KVM_ARM_HOST
DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt));
DEFINE(VCPU_WORKAROUND_FLAGS, offsetof(struct kvm_vcpu, arch.workaround_flags));
DEFINE(CPU_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs));
DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_regs, regs));
DEFINE(CPU_FP_REGS, offsetof(struct kvm_regs, fp_regs));
DEFINE(VCPU_FPEXC32_EL2, offsetof(struct kvm_vcpu, arch.ctxt.sys_regs[FPEXC32_EL2]));
DEFINE(VCPU_HOST_CONTEXT, offsetof(struct kvm_vcpu, arch.host_cpu_context));
DEFINE(HOST_CONTEXT_VCPU, offsetof(struct kvm_cpu_context, __hyp_running_vcpu));
#endif
#ifdef CONFIG_CPU_PM
DEFINE(CPU_SUSPEND_SZ, sizeof(struct cpu_suspend_ctx));

@ -362,7 +362,7 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
return required;
}
#endif /* CONFIG_ARM64_SSBD */
#endif /* CONFIG_ARM64_SSBD */
#define MIDR_RANGE(model, min, max) \
.def_scope = SCOPE_LOCAL_CPU, \

@ -958,6 +958,22 @@ static bool has_hw_dbm(const struct arm64_cpu_capabilities *cap,
#endif
static int cpu_copy_el2regs(void *__unused)
{
/*
* Copy register values that aren't redirected by hardware.
*
* Before code patching, we only set tpidr_el1, all CPUs need to copy
* this value to tpidr_el2 before we patch the code. Once we've done
* that, freshly-onlined CPUs will set tpidr_el2, so we don't need to
* do anything here.
*/
if (!alternatives_applied)
write_sysreg(read_sysreg(tpidr_el1), tpidr_el2);
return 0;
}
static const struct arm64_cpu_capabilities arm64_features[] = {
{
.desc = "GIC system register CPU interface",
@ -1027,6 +1043,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.capability = ARM64_HAS_VIRT_HOST_EXTN,
.def_scope = SCOPE_SYSTEM,
.matches = runs_at_el2,
.enable = cpu_copy_el2regs,
},
{
.desc = "32-bit EL0 Support",

@ -4,6 +4,7 @@
*/
#include <linux/errno.h>
#include <linux/prctl.h>
#include <linux/sched.h>
#include <linux/thread_info.h>
@ -11,9 +12,7 @@
/*
* prctl interface for SSBD
* FIXME: Drop the below ifdefery once merged in 4.18.
*/
#ifdef PR_SPEC_STORE_BYPASS
static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl)
{
int state = arm64_get_ssbd_state();
@ -107,4 +106,3 @@ int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
return -ENODEV;
}
}
#endif /* PR_SPEC_STORE_BYPASS */

@ -122,6 +122,10 @@ CPU_BE( orr x4, x4, #SCTLR_ELx_EE)
kern_hyp_va x2
msr vbar_el2, x2
/* copy tpidr_el1 into tpidr_el2 for use by HYP */
mrs x1, tpidr_el1
msr tpidr_el2, x1
/* Hello, World! */
eret
ENDPROC(__kvm_hyp_init)

@ -62,9 +62,6 @@ ENTRY(__guest_enter)
// Store the host regs
save_callee_saved_regs x1
// Store the host_ctxt for use at exit time
str x1, [sp, #-16]!
add x18, x0, #VCPU_CONTEXT
// Restore guest regs x0-x17
@ -118,8 +115,7 @@ ENTRY(__guest_exit)
// Store the guest regs x19-x29, lr
save_callee_saved_regs x1
// Restore the host_ctxt from the stack
ldr x2, [sp], #16
get_host_ctxt x2, x3
// Now restore the host regs
restore_callee_saved_regs x2
@ -159,6 +155,10 @@ abort_guest_exit_end:
ENDPROC(__guest_exit)
ENTRY(__fpsimd_guest_restore)
// x0: esr
// x1: vcpu
// x2-x29,lr: vcpu regs
// vcpu x0-x1 on the stack
stp x2, x3, [sp, #-16]!
stp x4, lr, [sp, #-16]!
@ -173,7 +173,7 @@ alternative_else
alternative_endif
isb
mrs x3, tpidr_el2
mov x3, x1
ldr x0, [x3, #VCPU_HOST_CONTEXT]
kern_hyp_va x0

@ -57,13 +57,8 @@ ENDPROC(__vhe_hyp_call)
el1_sync: // Guest trapped into EL2
stp x0, x1, [sp, #-16]!
alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
mrs x1, esr_el2
alternative_else
mrs x1, esr_el1
alternative_endif
lsr x0, x1, #ESR_ELx_EC_SHIFT
mrs x0, esr_el2
lsr x0, x0, #ESR_ELx_EC_SHIFT
cmp x0, #ESR_ELx_EC_HVC64
ccmp x0, #ESR_ELx_EC_HVC32, #4, ne
b.ne el1_trap
@ -111,14 +106,55 @@ el1_hvc_guest:
*/
ldr x1, [sp] // Guest's x0
eor w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1
cbz w1, wa_epilogue
/* ARM_SMCCC_ARCH_WORKAROUND_2 handling */
eor w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_1 ^ \
ARM_SMCCC_ARCH_WORKAROUND_2)
cbnz w1, el1_trap
mov x0, x1
#ifdef CONFIG_ARM64_SSBD
alternative_cb arm64_enable_wa2_handling
b wa2_end
alternative_cb_end
get_vcpu_ptr x2, x0
ldr x0, [x2, #VCPU_WORKAROUND_FLAGS]
// Sanitize the argument and update the guest flags
ldr x1, [sp, #8] // Guest's x1
clz w1, w1 // Murphy's device:
lsr w1, w1, #5 // w1 = !!w1 without using
eor w1, w1, #1 // the flags...
bfi x0, x1, #VCPU_WORKAROUND_2_FLAG_SHIFT, #1
str x0, [x2, #VCPU_WORKAROUND_FLAGS]
/* Check that we actually need to perform the call */
hyp_ldr_this_cpu x0, arm64_ssbd_callback_required, x2
cbz x0, wa2_end
mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
smc #0
/* Don't leak data from the SMC call */
mov x3, xzr
wa2_end:
mov x2, xzr
mov x1, xzr
#endif
wa_epilogue:
mov x0, xzr
add sp, sp, #16
eret
el1_trap:
get_vcpu_ptr x1, x0
mrs x0, esr_el2
lsr x0, x0, #ESR_ELx_EC_SHIFT
/*
* x0: ESR_EC
* x1: vcpu pointer
*/
/*
@ -132,19 +168,18 @@ alternative_if_not ARM64_HAS_NO_FPSIMD
b.eq __fpsimd_guest_restore
alternative_else_nop_endif
mrs x1, tpidr_el2
mov x0, #ARM_EXCEPTION_TRAP
b __guest_exit
el1_irq:
stp x0, x1, [sp, #-16]!
mrs x1, tpidr_el2
get_vcpu_ptr x1, x0
mov x0, #ARM_EXCEPTION_IRQ
b __guest_exit
el1_error:
stp x0, x1, [sp, #-16]!
mrs x1, tpidr_el2
get_vcpu_ptr x1, x0
mov x0, #ARM_EXCEPTION_EL1_SERROR
b __guest_exit
@ -179,6 +214,11 @@ ENTRY(__hyp_do_panic)
eret
ENDPROC(__hyp_do_panic)
ENTRY(__hyp_panic)
get_host_ctxt x0, x1
b hyp_panic
ENDPROC(__hyp_panic)
.macro invalid_vector label, target = __hyp_panic
.align 2
\label:

@ -15,6 +15,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/arm-smccc.h>
#include <linux/types.h>
#include <linux/jump_label.h>
#include <uapi/linux/psci.h>
@ -281,6 +282,39 @@ static void __hyp_text __skip_instr(struct kvm_vcpu *vcpu)
write_sysreg_el2(*vcpu_pc(vcpu), elr);
}
static inline bool __hyp_text __needs_ssbd_off(struct kvm_vcpu *vcpu)
{
if (!cpus_have_const_cap(ARM64_SSBD))
return false;
return !(vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG);
}
static void __hyp_text __set_guest_arch_workaround_state(struct kvm_vcpu *vcpu)
{
#ifdef CONFIG_ARM64_SSBD
/*
* The host runs with the workaround always present. If the
* guest wants it disabled, so be it...
*/
if (__needs_ssbd_off(vcpu) &&
__hyp_this_cpu_read(arm64_ssbd_callback_required))
arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 0, NULL);
#endif
}
static void __hyp_text __set_host_arch_workaround_state(struct kvm_vcpu *vcpu)
{
#ifdef CONFIG_ARM64_SSBD
/*
* If the guest has disabled the workaround, bring it back on.
*/
if (__needs_ssbd_off(vcpu) &&
__hyp_this_cpu_read(arm64_ssbd_callback_required))
arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 1, NULL);
#endif
}
int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *host_ctxt;
@ -289,9 +323,9 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
u64 exit_code;
vcpu = kern_hyp_va(vcpu);
write_sysreg(vcpu, tpidr_el2);
host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
host_ctxt->__hyp_running_vcpu = vcpu;
guest_ctxt = &vcpu->arch.ctxt;
__sysreg_save_host_state(host_ctxt);
@ -311,6 +345,8 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
__sysreg_restore_guest_state(guest_ctxt);
__debug_restore_state(vcpu, kern_hyp_va(vcpu->arch.debug_ptr), guest_ctxt);
__set_guest_arch_workaround_state(vcpu);
/* Jump in the fire! */
again:
exit_code = __guest_enter(vcpu, host_ctxt);
@ -367,6 +403,8 @@ again:
/* 0 falls through to be handled out of EL2 */
}
__set_host_arch_workaround_state(vcpu);
if (cpus_have_const_cap(ARM64_HARDEN_BP_POST_GUEST_EXIT)) {
u32 midr = read_cpuid_id();
@ -406,7 +444,8 @@ again:
static const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n";
static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par)
static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par,
struct kvm_vcpu *vcpu)
{
unsigned long str_va;
@ -420,35 +459,32 @@ static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par)
__hyp_do_panic(str_va,
spsr, elr,
read_sysreg(esr_el2), read_sysreg_el2(far),
read_sysreg(hpfar_el2), par,
(void *)read_sysreg(tpidr_el2));
read_sysreg(hpfar_el2), par, vcpu);
}
static void __hyp_text __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par)
static void __hyp_text __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par,
struct kvm_vcpu *vcpu)
{
panic(__hyp_panic_string,
spsr, elr,
read_sysreg_el2(esr), read_sysreg_el2(far),
read_sysreg(hpfar_el2), par,
(void *)read_sysreg(tpidr_el2));
read_sysreg(hpfar_el2), par, vcpu);
}
static hyp_alternate_select(__hyp_call_panic,
__hyp_call_panic_nvhe, __hyp_call_panic_vhe,
ARM64_HAS_VIRT_HOST_EXTN);
void __hyp_text __noreturn __hyp_panic(void)
void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt)
{
struct kvm_vcpu *vcpu = NULL;
u64 spsr = read_sysreg_el2(spsr);
u64 elr = read_sysreg_el2(elr);
u64 par = read_sysreg(par_el1);
if (read_sysreg(vttbr_el2)) {
struct kvm_vcpu *vcpu;
struct kvm_cpu_context *host_ctxt;
vcpu = (struct kvm_vcpu *)read_sysreg(tpidr_el2);
host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
vcpu = host_ctxt->__hyp_running_vcpu;
__timer_save_state(vcpu);
__deactivate_traps(vcpu);
__deactivate_vm(vcpu);
@ -456,7 +492,7 @@ void __hyp_text __noreturn __hyp_panic(void)
}
/* Call panic for real */
__hyp_call_panic()(spsr, elr, par);
__hyp_call_panic()(spsr, elr, par, vcpu);
unreachable();
}

@ -27,8 +27,8 @@ static void __hyp_text __sysreg_do_nothing(struct kvm_cpu_context *ctxt) { }
/*
* Non-VHE: Both host and guest must save everything.
*
* VHE: Host must save tpidr*_el[01], actlr_el1, mdscr_el1, sp0, pc,
* pstate, and guest must save everything.
* VHE: Host must save tpidr*_el0, actlr_el1, mdscr_el1, sp_el0,
* and guest must save everything.
*/
static void __hyp_text __sysreg_save_common_state(struct kvm_cpu_context *ctxt)
@ -36,11 +36,8 @@ static void __hyp_text __sysreg_save_common_state(struct kvm_cpu_context *ctxt)
ctxt->sys_regs[ACTLR_EL1] = read_sysreg(actlr_el1);
ctxt->sys_regs[TPIDR_EL0] = read_sysreg(tpidr_el0);
ctxt->sys_regs[TPIDRRO_EL0] = read_sysreg(tpidrro_el0);
ctxt->sys_regs[TPIDR_EL1] = read_sysreg(tpidr_el1);
ctxt->sys_regs[MDSCR_EL1] = read_sysreg(mdscr_el1);
ctxt->gp_regs.regs.sp = read_sysreg(sp_el0);
ctxt->gp_regs.regs.pc = read_sysreg_el2(elr);
ctxt->gp_regs.regs.pstate = read_sysreg_el2(spsr);
}
static void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt)
@ -62,10 +59,13 @@ static void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt)
ctxt->sys_regs[AMAIR_EL1] = read_sysreg_el1(amair);
ctxt->sys_regs[CNTKCTL_EL1] = read_sysreg_el1(cntkctl);
ctxt->sys_regs[PAR_EL1] = read_sysreg(par_el1);
ctxt->sys_regs[TPIDR_EL1] = read_sysreg(tpidr_el1);
ctxt->gp_regs.sp_el1 = read_sysreg(sp_el1);
ctxt->gp_regs.elr_el1 = read_sysreg_el1(elr);
ctxt->gp_regs.spsr[KVM_SPSR_EL1]= read_sysreg_el1(spsr);
ctxt->gp_regs.regs.pc = read_sysreg_el2(elr);
ctxt->gp_regs.regs.pstate = read_sysreg_el2(spsr);
}
static hyp_alternate_select(__sysreg_call_save_host_state,
@ -89,11 +89,8 @@ static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctx
write_sysreg(ctxt->sys_regs[ACTLR_EL1], actlr_el1);
write_sysreg(ctxt->sys_regs[TPIDR_EL0], tpidr_el0);
write_sysreg(ctxt->sys_regs[TPIDRRO_EL0], tpidrro_el0);
write_sysreg(ctxt->sys_regs[TPIDR_EL1], tpidr_el1);
write_sysreg(ctxt->sys_regs[MDSCR_EL1], mdscr_el1);
write_sysreg(ctxt->gp_regs.regs.sp, sp_el0);
write_sysreg_el2(ctxt->gp_regs.regs.pc, elr);
write_sysreg_el2(ctxt->gp_regs.regs.pstate, spsr);
}
static void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt)
@ -115,10 +112,13 @@ static void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt)
write_sysreg_el1(ctxt->sys_regs[AMAIR_EL1], amair);
write_sysreg_el1(ctxt->sys_regs[CNTKCTL_EL1], cntkctl);
write_sysreg(ctxt->sys_regs[PAR_EL1], par_el1);
write_sysreg(ctxt->sys_regs[TPIDR_EL1], tpidr_el1);
write_sysreg(ctxt->gp_regs.sp_el1, sp_el1);
write_sysreg_el1(ctxt->gp_regs.elr_el1, elr);
write_sysreg_el1(ctxt->gp_regs.spsr[KVM_SPSR_EL1],spsr);
write_sysreg_el2(ctxt->gp_regs.regs.pc, elr);
write_sysreg_el2(ctxt->gp_regs.regs.pstate, spsr);
}
static hyp_alternate_select(__sysreg_call_restore_host_state,
@ -183,3 +183,8 @@ void __hyp_text __sysreg32_restore_state(struct kvm_vcpu *vcpu)
if (vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY)
write_sysreg(sysreg[DBGVCR32_EL2], dbgvcr32_el2);
}
void __hyp_text __kvm_set_tpidr_el2(u64 tpidr_el2)
{
asm("msr tpidr_el2, %0": : "r" (tpidr_el2));
}

@ -122,6 +122,10 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
/* Reset PMU */
kvm_pmu_vcpu_reset(vcpu);
/* Default workaround setup is enabled (if supported) */
if (kvm_arm_have_ssbd() == KVM_SSBD_KERNEL)
vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG;
/* Reset timer */
return kvm_timer_vcpu_reset(vcpu);
}

@ -721,11 +721,13 @@ void __init mem_init(void)
BUILD_BUG_ON(TASK_SIZE_32 > TASK_SIZE_64);
#endif
#ifdef CONFIG_SPARSEMEM_VMEMMAP
/*
* Make sure we chose the upper bound of sizeof(struct page)
* correctly.
* correctly when sizing the VMEMMAP array.
*/
BUILD_BUG_ON(sizeof(struct page) > (1 << STRUCT_PAGE_MAX_SHIFT));
#endif
if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) {
extern int sysctl_overcommit_memory;

@ -116,7 +116,11 @@ ENTRY(cpu_do_suspend)
mrs x8, mdscr_el1
mrs x9, oslsr_el1
mrs x10, sctlr_el1
alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
mrs x11, tpidr_el1
alternative_else
mrs x11, tpidr_el2
alternative_endif
mrs x12, sp_el0
stp x2, x3, [x0]
stp x4, xzr, [x0, #16]
@ -162,7 +166,11 @@ ENTRY(cpu_do_resume)
msr mdscr_el1, x10
msr sctlr_el1, x12
alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
msr tpidr_el1, x13
alternative_else
msr tpidr_el2, x13
alternative_endif
msr sp_el0, x14
/*
* Restore oslsr_el1 by writing oslar_el1

@ -22,17 +22,19 @@ $(obj)/linux.bin.gz: $(obj)/linux.bin FORCE
quiet_cmd_cp = CP $< $@$2
cmd_cp = cat $< >$@$2 || (rm -f $@ && echo false)
quiet_cmd_strip = STRIP $@
quiet_cmd_strip = STRIP $< $@$2
cmd_strip = $(STRIP) -K microblaze_start -K _end -K __log_buf \
-K _fdt_start vmlinux -o $@
-K _fdt_start $< -o $@$2
UIMAGE_LOADADDR = $(CONFIG_KERNEL_BASE_ADDR)
UIMAGE_IN = $@
UIMAGE_OUT = $@.ub
$(obj)/simpleImage.%: vmlinux FORCE
$(call if_changed,cp,.unstrip)
$(call if_changed,objcopy)
$(call if_changed,uimage)
$(call if_changed,strip)
@echo 'Kernel: $@ is ready' ' (#'`cat .version`')'
$(call if_changed,strip,.strip)
@echo 'Kernel: $(UIMAGE_OUT) is ready' ' (#'`cat .version`')'
clean-files += simpleImage.*.unstrip linux.bin.ub dts/*.dtb

@ -58,7 +58,7 @@ EXPORT_SYMBOL_GPL(ath79_ddr_ctrl_init);
void ath79_ddr_wb_flush(u32 reg)
{
void __iomem *flush_reg = ath79_ddr_wb_flush_base + reg;
void __iomem *flush_reg = ath79_ddr_wb_flush_base + (reg * 4);
/* Flush the DDR write buffer. */
__raw_writel(0x1, flush_reg);

@ -54,5 +54,5 @@ void pci_resource_to_user(const struct pci_dev *dev, int bar,
phys_addr_t size = resource_size(rsrc);
*start = fixup_bigphys_addr(rsrc->start, size);
*end = rsrc->start + size;
*end = rsrc->start + size - 1;
}

@ -76,6 +76,21 @@ do { \
___p1; \
})
#ifdef CONFIG_PPC_BOOK3S_64
/*
* Prevent execution of subsequent instructions until preceding branches have
* been fully resolved and are no longer executing speculatively.
*/
#define barrier_nospec_asm ori 31,31,0
// This also acts as a compiler barrier due to the memory clobber.
#define barrier_nospec() asm (stringify_in_c(barrier_nospec_asm) ::: "memory")
#else /* !CONFIG_PPC_BOOK3S_64 */
#define barrier_nospec_asm
#define barrier_nospec()
#endif
#include <asm-generic/barrier.h>
#endif /* _ASM_POWERPC_BARRIER_H */

@ -9,11 +9,14 @@
#if defined(CONFIG_PPC_8xx) || defined(CONFIG_403GCX)
#define L1_CACHE_SHIFT 4
#define MAX_COPY_PREFETCH 1
#define IFETCH_ALIGN_SHIFT 2
#elif defined(CONFIG_PPC_E500MC)
#define L1_CACHE_SHIFT 6
#define MAX_COPY_PREFETCH 4
#define IFETCH_ALIGN_SHIFT 3
#elif defined(CONFIG_PPC32)
#define MAX_COPY_PREFETCH 4
#define IFETCH_ALIGN_SHIFT 3 /* 603 fetches 2 insn at a time */
#if defined(CONFIG_PPC_47x)
#define L1_CACHE_SHIFT 7
#else

@ -35,9 +35,9 @@ extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(
extern struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
unsigned long ua, unsigned long entries);
extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
unsigned long ua, unsigned long *hpa);
unsigned long ua, unsigned int pageshift, unsigned long *hpa);
extern long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
unsigned long ua, unsigned long *hpa);
unsigned long ua, unsigned int pageshift, unsigned long *hpa);
extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem);
extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem);
#endif

@ -450,9 +450,11 @@ static void *eeh_add_virt_device(void *data, void *userdata)
driver = eeh_pcid_get(dev);
if (driver) {
eeh_pcid_put(dev);
if (driver->err_handler)
if (driver->err_handler) {
eeh_pcid_put(dev);
return NULL;
}
eeh_pcid_put(dev);
}
#ifdef CONFIG_PPC_POWERNV
@ -489,17 +491,19 @@ static void *eeh_rmv_device(void *data, void *userdata)
if (eeh_dev_removed(edev))
return NULL;
driver = eeh_pcid_get(dev);
if (driver) {
eeh_pcid_put(dev);
if (removed &&
eeh_pe_passed(edev->pe))
return NULL;
if (removed &&
driver->err_handler &&
driver->err_handler->error_detected &&
driver->err_handler->slot_reset)
if (removed) {
if (eeh_pe_passed(edev->pe))
return NULL;
driver = eeh_pcid_get(dev);
if (driver) {
if (driver->err_handler &&
driver->err_handler->error_detected &&
driver->err_handler->slot_reset) {
eeh_pcid_put(dev);
return NULL;
}
eeh_pcid_put(dev);
}
}
/* Remove it from PCI subsystem */

@ -958,7 +958,7 @@ start_here:
tovirt(r6,r6)
lis r5, abatron_pteptrs@h
ori r5, r5, abatron_pteptrs@l
stw r5, 0xf0(r0) /* Must match your Abatron config file */
stw r5, 0xf0(0) /* Must match your Abatron config file */
tophys(r5,r5)
stw r6, 0(r5)

@ -140,6 +140,8 @@ power9_restore_additional_sprs:
ld r4, STOP_MMCR2(r13)
mtspr SPRN_MMCR1, r3
mtspr SPRN_MMCR2, r4
ld r4, PACA_SPRG_VDSO(r13)
mtspr SPRN_SPRG3, r4
blr
/*

@ -11,6 +11,7 @@
#include <linux/sched.h>
#include <linux/errno.h>
#include <linux/bootmem.h>
#include <linux/syscalls.h>
#include <linux/irq.h>
#include <linux/list.h>
#include <linux/of.h>

@ -334,6 +334,7 @@ static void __init prom_print_dec(unsigned long val)
call_prom("write", 3, 1, prom.stdout, buf+i, size);
}
__printf(1, 2)
static void __init prom_printf(const char *format, ...)
{
const char *p, *q, *s;
@ -1148,7 +1149,7 @@ static void __init prom_send_capabilities(void)
*/
cores = DIV_ROUND_UP(NR_CPUS, prom_count_smt_threads());
prom_printf("Max number of cores passed to firmware: %lu (NR_CPUS = %lu)\n",
prom_printf("Max number of cores passed to firmware: %u (NR_CPUS = %d)\n",
cores, NR_CPUS);
ibm_architecture_vec.vec5.max_cpus = cpu_to_be32(cores);
@ -1230,7 +1231,7 @@ static unsigned long __init alloc_up(unsigned long size, unsigned long align)
if (align)
base = _ALIGN_UP(base, align);
prom_debug("alloc_up(%x, %x)\n", size, align);
prom_debug("%s(%lx, %lx)\n", __func__, size, align);
if (ram_top == 0)
prom_panic("alloc_up() called with mem not initialized\n");
@ -1241,7 +1242,7 @@ static unsigned long __init alloc_up(unsigned long size, unsigned long align)
for(; (base + size) <= alloc_top;
base = _ALIGN_UP(base + 0x100000, align)) {
prom_debug(" trying: 0x%x\n\r", base);
prom_debug(" trying: 0x%lx\n\r", base);
addr = (unsigned long)prom_claim(base, size, 0);
if (addr != PROM_ERROR && addr != 0)
break;
@ -1253,12 +1254,12 @@ static unsigned long __init alloc_up(unsigned long size, unsigned long align)
return 0;
alloc_bottom = addr + size;
prom_debug(" -> %x\n", addr);
prom_debug(" alloc_bottom : %x\n", alloc_bottom);
prom_debug(" alloc_top : %x\n", alloc_top);
prom_debug(" alloc_top_hi : %x\n", alloc_top_high);
prom_debug(" rmo_top : %x\n", rmo_top);
prom_debug(" ram_top : %x\n", ram_top);
prom_debug(" -> %lx\n", addr);
prom_debug(" alloc_bottom : %lx\n", alloc_bottom);
prom_debug(" alloc_top : %lx\n", alloc_top);
prom_debug(" alloc_top_hi : %lx\n", alloc_top_high);
prom_debug(" rmo_top : %lx\n", rmo_top);
prom_debug(" ram_top : %lx\n", ram_top);
return addr;
}
@ -1273,7 +1274,7 @@ static unsigned long __init alloc_down(unsigned long size, unsigned long align,
{
unsigned long base, addr = 0;
prom_debug("alloc_down(%x, %x, %s)\n", size, align,
prom_debug("%s(%lx, %lx, %s)\n", __func__, size, align,
highmem ? "(high)" : "(low)");
if (ram_top == 0)
prom_panic("alloc_down() called with mem not initialized\n");
@ -1301,7 +1302,7 @@ static unsigned long __init alloc_down(unsigned long size, unsigned long align,
base = _ALIGN_DOWN(alloc_top - size, align);
for (; base > alloc_bottom;
base = _ALIGN_DOWN(base - 0x100000, align)) {
prom_debug(" trying: 0x%x\n\r", base);
prom_debug(" trying: 0x%lx\n\r", base);
addr = (unsigned long)prom_claim(base, size, 0);
if (addr != PROM_ERROR && addr != 0)
break;
@ -1312,12 +1313,12 @@ static unsigned long __init alloc_down(unsigned long size, unsigned long align,
alloc_top = addr;
bail:
prom_debug(" -> %x\n", addr);
prom_debug(" alloc_bottom : %x\n", alloc_bottom);
prom_debug(" alloc_top : %x\n", alloc_top);
prom_debug(" alloc_top_hi : %x\n", alloc_top_high);
prom_debug(" rmo_top : %x\n", rmo_top);
prom_debug(" ram_top : %x\n", ram_top);
prom_debug(" -> %lx\n", addr);
prom_debug(" alloc_bottom : %lx\n", alloc_bottom);
prom_debug(" alloc_top : %lx\n", alloc_top);
prom_debug(" alloc_top_hi : %lx\n", alloc_top_high);
prom_debug(" rmo_top : %lx\n", rmo_top);
prom_debug(" ram_top : %lx\n", ram_top);
return addr;
}
@ -1443,7 +1444,7 @@ static void __init prom_init_mem(void)
if (size == 0)
continue;
prom_debug(" %x %x\n", base, size);
prom_debug(" %lx %lx\n", base, size);
if (base == 0 && (of_platform & PLATFORM_LPAR))
rmo_top = size;
if ((base + size) > ram_top)
@ -1463,12 +1464,12 @@ static void __init prom_init_mem(void)
if (prom_memory_limit) {
if (prom_memory_limit <= alloc_bottom) {
prom_printf("Ignoring mem=%x <= alloc_bottom.\n",
prom_memory_limit);
prom_printf("Ignoring mem=%lx <= alloc_bottom.\n",
prom_memory_limit);
prom_memory_limit = 0;
} else if (prom_memory_limit >= ram_top) {
prom_printf("Ignoring mem=%x >= ram_top.\n",
prom_memory_limit);
prom_printf("Ignoring mem=%lx >= ram_top.\n",
prom_memory_limit);
prom_memory_limit = 0;
} else {
ram_top = prom_memory_limit;
@ -1500,12 +1501,13 @@ static void __init prom_init_mem(void)
alloc_bottom = PAGE_ALIGN(prom_initrd_end);
prom_printf("memory layout at init:\n");
prom_printf(" memory_limit : %x (16 MB aligned)\n", prom_memory_limit);
prom_printf(" alloc_bottom : %x\n", alloc_bottom);
prom_printf(" alloc_top : %x\n", alloc_top);
prom_printf(" alloc_top_hi : %x\n", alloc_top_high);
prom_printf(" rmo_top : %x\n", rmo_top);
prom_printf(" ram_top : %x\n", ram_top);
prom_printf(" memory_limit : %lx (16 MB aligned)\n",
prom_memory_limit);
prom_printf(" alloc_bottom : %lx\n", alloc_bottom);
prom_printf(" alloc_top : %lx\n", alloc_top);
prom_printf(" alloc_top_hi : %lx\n", alloc_top_high);
prom_printf(" rmo_top : %lx\n", rmo_top);
prom_printf(" ram_top : %lx\n", ram_top);
}
static void __init prom_close_stdin(void)
@ -1566,7 +1568,7 @@ static void __init prom_instantiate_opal(void)
return;
}
prom_printf("instantiating opal at 0x%x...", base);
prom_printf("instantiating opal at 0x%llx...", base);
if (call_prom_ret("call-method", 4, 3, rets,
ADDR("load-opal-runtime"),
@ -1582,10 +1584,10 @@ static void __init prom_instantiate_opal(void)
reserve_mem(base, size);
prom_debug("opal base = 0x%x\n", base);
prom_debug("opal align = 0x%x\n", align);
prom_debug("opal entry = 0x%x\n", entry);
prom_debug("opal size = 0x%x\n", (long)size);
prom_debug("opal base = 0x%llx\n", base);
prom_debug("opal align = 0x%llx\n", align);
prom_debug("opal entry = 0x%llx\n", entry);
prom_debug("opal size = 0x%llx\n", size);
prom_setprop(opal_node, "/ibm,opal", "opal-base-address",
&base, sizeof(base));
@ -1662,7 +1664,7 @@ static void __init prom_instantiate_rtas(void)
prom_debug("rtas base = 0x%x\n", base);
prom_debug("rtas entry = 0x%x\n", entry);
prom_debug("rtas size = 0x%x\n", (long)size);
prom_debug("rtas size = 0x%x\n", size);
prom_debug("prom_instantiate_rtas: end...\n");
}
@ -1720,7 +1722,7 @@ static void __init prom_instantiate_sml(void)
if (base == 0)
prom_panic("Could not allocate memory for sml\n");
prom_printf("instantiating sml at 0x%x...", base);
prom_printf("instantiating sml at 0x%llx...", base);
memset((void *)base, 0, size);
@ -1739,8 +1741,8 @@ static void __init prom_instantiate_sml(void)
prom_setprop(ibmvtpm_node, "/vdevice/vtpm", "linux,sml-size",
&size, sizeof(size));
prom_debug("sml base = 0x%x\n", base);
prom_debug("sml size = 0x%x\n", (long)size);
prom_debug("sml base = 0x%llx\n", base);
prom_debug("sml size = 0x%x\n", size);
prom_debug("prom_instantiate_sml: end...\n");
}
@ -1841,7 +1843,7 @@ static void __init prom_initialize_tce_table(void)
prom_debug("TCE table: %s\n", path);
prom_debug("\tnode = 0x%x\n", node);
prom_debug("\tbase = 0x%x\n", base);
prom_debug("\tbase = 0x%llx\n", base);
prom_debug("\tsize = 0x%x\n", minsize);
/* Initialize the table to have a one-to-one mapping
@ -1928,12 +1930,12 @@ static void __init prom_hold_cpus(void)
}
prom_debug("prom_hold_cpus: start...\n");
prom_debug(" 1) spinloop = 0x%x\n", (unsigned long)spinloop);
prom_debug(" 1) *spinloop = 0x%x\n", *spinloop);
prom_debug(" 1) acknowledge = 0x%x\n",
prom_debug(" 1) spinloop = 0x%lx\n", (unsigned long)spinloop);
prom_debug(" 1) *spinloop = 0x%lx\n", *spinloop);
prom_debug(" 1) acknowledge = 0x%lx\n",
(unsigned long)acknowledge);
prom_debug(" 1) *acknowledge = 0x%x\n", *acknowledge);
prom_debug(" 1) secondary_hold = 0x%x\n", secondary_hold);
prom_debug(" 1) *acknowledge = 0x%lx\n", *acknowledge);
prom_debug(" 1) secondary_hold = 0x%lx\n", secondary_hold);
/* Set the common spinloop variable, so all of the secondary cpus
* will block when they are awakened from their OF spinloop.
@ -1961,7 +1963,7 @@ static void __init prom_hold_cpus(void)
prom_getprop(node, "reg", &reg, sizeof(reg));
cpu_no = be32_to_cpu(reg);
prom_debug("cpu hw idx = %lu\n", cpu_no);
prom_debug("cpu hw idx = %u\n", cpu_no);
/* Init the acknowledge var which will be reset by
* the secondary cpu when it awakens from its OF
@ -1971,7 +1973,7 @@ static void __init prom_hold_cpus(void)
if (cpu_no != prom.cpu) {
/* Primary Thread of non-boot cpu or any thread */
prom_printf("starting cpu hw idx %lu... ", cpu_no);
prom_printf("starting cpu hw idx %u... ", cpu_no);
call_prom("start-cpu", 3, 0, node,
secondary_hold, cpu_no);
@ -1982,11 +1984,11 @@ static void __init prom_hold_cpus(void)
if (*acknowledge == cpu_no)
prom_printf("done\n");
else
prom_printf("failed: %x\n", *acknowledge);
prom_printf("failed: %lx\n", *acknowledge);
}
#ifdef CONFIG_SMP
else
prom_printf("boot cpu hw idx %lu\n", cpu_no);
prom_printf("boot cpu hw idx %u\n", cpu_no);
#endif /* CONFIG_SMP */
}
@ -2264,7 +2266,7 @@ static void __init *make_room(unsigned long *mem_start, unsigned long *mem_end,
while ((*mem_start + needed) > *mem_end) {
unsigned long room, chunk;
prom_debug("Chunk exhausted, claiming more at %x...\n",
prom_debug("Chunk exhausted, claiming more at %lx...\n",
alloc_bottom);
room = alloc_top - alloc_bottom;
if (room > DEVTREE_CHUNK_SIZE)
@ -2490,7 +2492,7 @@ static void __init flatten_device_tree(void)
room = alloc_top - alloc_bottom - 0x4000;
if (room > DEVTREE_CHUNK_SIZE)
room = DEVTREE_CHUNK_SIZE;
prom_debug("starting device tree allocs at %x\n", alloc_bottom);
prom_debug("starting device tree allocs at %lx\n", alloc_bottom);
/* Now try to claim that */
mem_start = (unsigned long)alloc_up(room, PAGE_SIZE);
@ -2553,7 +2555,7 @@ static void __init flatten_device_tree(void)
int i;
prom_printf("reserved memory map:\n");
for (i = 0; i < mem_reserve_cnt; i++)
prom_printf(" %x - %x\n",
prom_printf(" %llx - %llx\n",
be64_to_cpu(mem_reserve_map[i].base),
be64_to_cpu(mem_reserve_map[i].size));
}
@ -2563,9 +2565,9 @@ static void __init flatten_device_tree(void)
*/
mem_reserve_cnt = MEM_RESERVE_MAP_SIZE;
prom_printf("Device tree strings 0x%x -> 0x%x\n",
prom_printf("Device tree strings 0x%lx -> 0x%lx\n",
dt_string_start, dt_string_end);
prom_printf("Device tree struct 0x%x -> 0x%x\n",
prom_printf("Device tree struct 0x%lx -> 0x%lx\n",
dt_struct_start, dt_struct_end);
}
@ -2997,7 +2999,7 @@ static void __init prom_find_boot_cpu(void)
prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval));
prom.cpu = be32_to_cpu(rval);
prom_debug("Booting CPU hw index = %lu\n", prom.cpu);
prom_debug("Booting CPU hw index = %d\n", prom.cpu);
}
static void __init prom_check_initrd(unsigned long r3, unsigned long r4)
@ -3019,8 +3021,8 @@ static void __init prom_check_initrd(unsigned long r3, unsigned long r4)
reserve_mem(prom_initrd_start,
prom_initrd_end - prom_initrd_start);
prom_debug("initrd_start=0x%x\n", prom_initrd_start);
prom_debug("initrd_end=0x%x\n", prom_initrd_end);
prom_debug("initrd_start=0x%lx\n", prom_initrd_start);
prom_debug("initrd_end=0x%lx\n", prom_initrd_end);
}
#endif /* CONFIG_BLK_DEV_INITRD */
}
@ -3273,7 +3275,7 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
/* Don't print anything after quiesce under OPAL, it crashes OFW */
if (of_platform != PLATFORM_OPAL) {
prom_printf("Booting Linux via __start() @ 0x%lx ...\n", kbase);
prom_debug("->dt_header_start=0x%x\n", hdr);
prom_debug("->dt_header_start=0x%lx\n", hdr);
}
#ifdef CONFIG_PPC32

@ -433,7 +433,7 @@ long kvmppc_tce_iommu_map(struct kvm *kvm, struct iommu_table *tbl,
/* This only handles v2 IOMMU type, v1 is handled via ioctl() */
return H_TOO_HARD;
if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, &hpa)))
if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, tbl->it_page_shift, &hpa)))
return H_HARDWARE;
if (mm_iommu_mapped_inc(mem))

@ -262,7 +262,8 @@ static long kvmppc_rm_tce_iommu_map(struct kvm *kvm, struct iommu_table *tbl,
if (!mem)
return H_TOO_HARD;
if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, &hpa)))
if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, tbl->it_page_shift,
&hpa)))
return H_HARDWARE;
pua = (void *) vmalloc_to_phys(pua);
@ -431,7 +432,8 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K);
if (mem)
prereg = mm_iommu_ua_to_hpa_rm(mem, ua, &tces) == 0;
prereg = mm_iommu_ua_to_hpa_rm(mem, ua,
IOMMU_PAGE_SHIFT_4K, &tces) == 0;
}
if (!prereg) {

@ -12,6 +12,7 @@
#include <asm/errno.h>
#include <asm/ppc_asm.h>
#include <asm/export.h>
#include <asm/cache.h>
.text
@ -23,7 +24,7 @@ _GLOBAL(strncpy)
mtctr r5
addi r6,r3,-1
addi r4,r4,-1
.balign 16
.balign IFETCH_ALIGN_BYTES
1: lbzu r0,1(r4)
cmpwi 0,r0,0
stbu r0,1(r6)
@ -43,7 +44,7 @@ _GLOBAL(strncmp)
mtctr r5
addi r5,r3,-1
addi r4,r4,-1
.balign 16
.balign IFETCH_ALIGN_BYTES
1: lbzu r3,1(r5)
cmpwi 1,r3,0
lbzu r0,1(r4)
@ -77,7 +78,7 @@ _GLOBAL(memchr)
beq- 2f
mtctr r5
addi r3,r3,-1
.balign 16
.balign IFETCH_ALIGN_BYTES
1: lbzu r0,1(r3)
cmpw 0,r0,r4
bdnzf 2,1b

@ -19,6 +19,7 @@
#include <linux/hugetlb.h>
#include <linux/swap.h>
#include <asm/mmu_context.h>
#include <asm/pte-walk.h>
static DEFINE_MUTEX(mem_list_mutex);
@ -27,6 +28,7 @@ struct mm_iommu_table_group_mem_t {
struct rcu_head rcu;
unsigned long used;
atomic64_t mapped;
unsigned int pageshift;
u64 ua; /* userspace address */
u64 entries; /* number of entries in hpas[] */
u64 *hpas; /* vmalloc'ed */
@ -126,6 +128,8 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
{
struct mm_iommu_table_group_mem_t *mem;
long i, j, ret = 0, locked_entries = 0;
unsigned int pageshift;
unsigned long flags;
struct page *page = NULL;
mutex_lock(&mem_list_mutex);
@ -160,6 +164,12 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
goto unlock_exit;
}
/*
* For a starting point for a maximum page size calculation
* we use @ua and @entries natural alignment to allow IOMMU pages
* smaller than huge pages but still bigger than PAGE_SIZE.
*/
mem->pageshift = __ffs(ua | (entries << PAGE_SHIFT));
mem->hpas = vzalloc(entries * sizeof(mem->hpas[0]));
if (!mem->hpas) {
kfree(mem);
@ -200,6 +210,23 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
}
}
populate:
pageshift = PAGE_SHIFT;
if (PageCompound(page)) {
pte_t *pte;
struct page *head = compound_head(page);
unsigned int compshift = compound_order(head);
local_irq_save(flags); /* disables as well */
pte = find_linux_pte(mm->pgd, ua, NULL, &pageshift);
local_irq_restore(flags);
/* Double check it is still the same pinned page */
if (pte && pte_page(*pte) == head &&
pageshift == compshift)
pageshift = max_t(unsigned int, pageshift,
PAGE_SHIFT);
}
mem->pageshift = min(mem->pageshift, pageshift);
mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT;
}
@ -350,7 +377,7 @@ struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
EXPORT_SYMBOL_GPL(mm_iommu_find);
long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
unsigned long ua, unsigned long *hpa)
unsigned long ua, unsigned int pageshift, unsigned long *hpa)
{
const long entry = (ua - mem->ua) >> PAGE_SHIFT;
u64 *va = &mem->hpas[entry];
@ -358,6 +385,9 @@ long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
if (entry >= mem->entries)
return -EFAULT;
if (pageshift > mem->pageshift)
return -EFAULT;
*hpa = *va | (ua & ~PAGE_MASK);
return 0;
@ -365,7 +395,7 @@ long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa);
long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
unsigned long ua, unsigned long *hpa)
unsigned long ua, unsigned int pageshift, unsigned long *hpa)
{
const long entry = (ua - mem->ua) >> PAGE_SHIFT;
void *va = &mem->hpas[entry];
@ -374,6 +404,9 @@ long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
if (entry >= mem->entries)
return -EFAULT;
if (pageshift > mem->pageshift)
return -EFAULT;
pa = (void *) vmalloc_to_phys(va);
if (!pa)
return -EFAULT;

@ -62,14 +62,14 @@ static inline void slb_shadow_update(unsigned long ea, int ssize,
* updating it. No write barriers are needed here, provided
* we only update the current CPU's SLB shadow buffer.
*/
p->save_area[index].esid = 0;
p->save_area[index].vsid = cpu_to_be64(mk_vsid_data(ea, ssize, flags));
p->save_area[index].esid = cpu_to_be64(mk_esid_data(ea, ssize, index));
WRITE_ONCE(p->save_area[index].esid, 0);
WRITE_ONCE(p->save_area[index].vsid, cpu_to_be64(mk_vsid_data(ea, ssize, flags)));
WRITE_ONCE(p->save_area[index].esid, cpu_to_be64(mk_esid_data(ea, ssize, index)));
}
static inline void slb_shadow_clear(enum slb_index index)
{
get_slb_shadow()->save_area[index].esid = 0;
WRITE_ONCE(get_slb_shadow()->save_area[index].esid, 0);
}
static inline void create_shadowed_slbe(unsigned long ea, int ssize,

@ -203,25 +203,37 @@ static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
static void bpf_jit_emit_func_call(u32 *image, struct codegen_context *ctx, u64 func)
{
unsigned int i, ctx_idx = ctx->idx;
/* Load function address into r12 */
PPC_LI64(12, func);
/* For bpf-to-bpf function calls, the callee's address is unknown
* until the last extra pass. As seen above, we use PPC_LI64() to
* load the callee's address, but this may optimize the number of
* instructions required based on the nature of the address.
*
* Since we don't want the number of instructions emitted to change,
* we pad the optimized PPC_LI64() call with NOPs to guarantee that
* we always have a five-instruction sequence, which is the maximum
* that PPC_LI64() can emit.
*/
for (i = ctx->idx - ctx_idx; i < 5; i++)
PPC_NOP();
#ifdef PPC64_ELF_ABI_v1
/* func points to the function descriptor */
PPC_LI64(b2p[TMP_REG_2], func);
/* Load actual entry point from function descriptor */
PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_2], 0);
/* ... and move it to LR */
PPC_MTLR(b2p[TMP_REG_1]);
/*
* Load TOC from function descriptor at offset 8.
* We can clobber r2 since we get called through a
* function pointer (so caller will save/restore r2)
* and since we don't use a TOC ourself.
*/
PPC_BPF_LL(2, b2p[TMP_REG_2], 8);
#else
/* We can clobber r12 */
PPC_FUNC_ADDR(12, func);
PPC_MTLR(12);
PPC_BPF_LL(2, 12, 8);
/* Load actual entry point from function descriptor */
PPC_BPF_LL(12, 12, 0);
#endif
PPC_MTLR(12);
PPC_BLRL();
}

@ -28,6 +28,8 @@
#include <asm/sections.h>
#include <asm/time.h>
#include <platforms/chrp/chrp.h>
extern spinlock_t rtc_lock;
#define NVRAM_AS0 0x74
@ -63,7 +65,7 @@ long __init chrp_time_init(void)
return 0;
}
int chrp_cmos_clock_read(int addr)
static int chrp_cmos_clock_read(int addr)
{
if (nvram_as1 != 0)
outb(addr>>8, nvram_as1);
@ -71,7 +73,7 @@ int chrp_cmos_clock_read(int addr)
return (inb(nvram_data));
}
void chrp_cmos_clock_write(unsigned long val, int addr)
static void chrp_cmos_clock_write(unsigned long val, int addr)
{
if (nvram_as1 != 0)
outb(addr>>8, nvram_as1);

@ -35,6 +35,8 @@
*/
#define HW_BROADWAY_ICR 0x00
#define HW_BROADWAY_IMR 0x04
#define HW_STARLET_ICR 0x08
#define HW_STARLET_IMR 0x0c
/*
@ -74,6 +76,9 @@ static void hlwd_pic_unmask(struct irq_data *d)
void __iomem *io_base = irq_data_get_irq_chip_data(d);
setbits32(io_base + HW_BROADWAY_IMR, 1 << irq);
/* Make sure the ARM (aka. Starlet) doesn't handle this interrupt. */
clrbits32(io_base + HW_STARLET_IMR, 1 << irq);
}

@ -468,7 +468,7 @@ void __init bootx_init(unsigned long r3, unsigned long r4)
boot_infos_t *bi = (boot_infos_t *) r4;
unsigned long hdr;
unsigned long space;
unsigned long ptr, x;
unsigned long ptr;
char *model;
unsigned long offset = reloc_offset();
@ -562,6 +562,8 @@ void __init bootx_init(unsigned long r3, unsigned long r4)
* MMU switched OFF, so this should not be useful anymore.
*/
if (bi->version < 4) {
unsigned long x __maybe_unused;
bootx_printf("Touching pages...\n");
/*

@ -352,6 +352,7 @@ static int pmac_late_init(void)
}
machine_late_initcall(powermac, pmac_late_init);
void note_bootable_part(dev_t dev, int part, int goodness);
/*
* This is __ref because we check for "initializing" before
* touching any of the __init sensitive things and "initializing"

@ -116,7 +116,7 @@ struct hws_basic_entry {
struct hws_diag_entry {
unsigned int def:16; /* 0-15 Data Entry Format */
unsigned int R:14; /* 16-19 and 20-30 reserved */
unsigned int R:15; /* 16-19 and 20-30 reserved */
unsigned int I:1; /* 31 entry valid or invalid */
u8 data[]; /* Machine-dependent sample data */
} __packed;
@ -132,7 +132,9 @@ struct hws_trailer_entry {
unsigned int f:1; /* 0 - Block Full Indicator */
unsigned int a:1; /* 1 - Alert request control */
unsigned int t:1; /* 2 - Timestamp format */
unsigned long long:61; /* 3 - 63: Reserved */
unsigned int :29; /* 3 - 31: Reserved */
unsigned int bsdes:16; /* 32-47: size of basic SDE */
unsigned int dsdes:16; /* 48-63: size of diagnostic SDE */
};
unsigned long long flags; /* 0 - 63: All indicators */
};

@ -219,7 +219,9 @@ CONFIG_DM_MIRROR=y
CONFIG_DM_ZERO=y
CONFIG_DM_UEVENT=y
CONFIG_DM_VERITY=y
CONFIG_DM_VERITY_HASH_PREFETCH_MIN_SIZE=1
CONFIG_DM_VERITY_FEC=y
CONFIG_DM_ANDROID_VERITY=y
CONFIG_NETDEVICES=y
CONFIG_NETCONSOLE=y
CONFIG_NETCONSOLE_DYNAMIC=y
@ -447,6 +449,12 @@ CONFIG_SECURITY_PATH=y
CONFIG_HARDENED_USERCOPY=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1
CONFIG_CRYPTO_RSA=y
# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
CONFIG_CRYPTO_SHA512=y
CONFIG_CRYPTO_DEV_VIRTIO=y
CONFIG_ASYMMETRIC_KEY_TYPE=y
CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y
CONFIG_X509_CERTIFICATE_PARSER=y
CONFIG_SYSTEM_TRUSTED_KEYRING=y
CONFIG_SYSTEM_TRUSTED_KEYS="verity_dev_keys.x509"

@ -933,7 +933,7 @@ ENTRY(\sym)
call \do_sym
jmp error_exit /* %ebx: no swapgs flag */
jmp error_exit
.endif
END(\sym)
.endm
@ -1166,7 +1166,6 @@ END(paranoid_exit)
/*
* Save all registers in pt_regs, and switch GS if needed.
* Return: EBX=0: came from user mode; EBX=1: otherwise
*/
ENTRY(error_entry)
UNWIND_HINT_FUNC
@ -1213,7 +1212,6 @@ ENTRY(error_entry)
* for these here too.
*/
.Lerror_kernelspace:
incl %ebx
leaq native_irq_return_iret(%rip), %rcx
cmpq %rcx, RIP+8(%rsp)
je .Lerror_bad_iret
@ -1247,28 +1245,20 @@ ENTRY(error_entry)
/*
* Pretend that the exception came from user mode: set up pt_regs
* as if we faulted immediately after IRET and clear EBX so that
* error_exit knows that we will be returning to user mode.
* as if we faulted immediately after IRET.
*/
mov %rsp, %rdi
call fixup_bad_iret
mov %rax, %rsp
decl %ebx
jmp .Lerror_entry_from_usermode_after_swapgs
END(error_entry)
/*
* On entry, EBX is a "return to kernel mode" flag:
* 1: already in kernel mode, don't need SWAPGS
* 0: user gsbase is loaded, we need SWAPGS and standard preparation for return to usermode
*/
ENTRY(error_exit)
UNWIND_HINT_REGS
DISABLE_INTERRUPTS(CLBR_ANY)
TRACE_IRQS_OFF
testl %ebx, %ebx
jnz retint_kernel
testb $3, CS(%rsp)
jz retint_kernel
jmp retint_user
END(error_exit)

@ -410,9 +410,11 @@ static int alloc_bts_buffer(int cpu)
ds->bts_buffer_base = (unsigned long) cea;
ds_update_cea(cea, buffer, BTS_BUFFER_SIZE, PAGE_KERNEL);
ds->bts_index = ds->bts_buffer_base;
max = BTS_RECORD_SIZE * (BTS_BUFFER_SIZE / BTS_RECORD_SIZE);
ds->bts_absolute_maximum = ds->bts_buffer_base + max;
ds->bts_interrupt_threshold = ds->bts_absolute_maximum - (max / 16);
max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE;
ds->bts_absolute_maximum = ds->bts_buffer_base +
max * BTS_RECORD_SIZE;
ds->bts_interrupt_threshold = ds->bts_absolute_maximum -
(max / 16) * BTS_RECORD_SIZE;
return 0;
}

@ -218,7 +218,7 @@ void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *e
u64 prev_count, new_count, delta;
int shift;
if (event->hw.idx >= UNCORE_PMC_IDX_FIXED)
if (event->hw.idx == UNCORE_PMC_IDX_FIXED)
shift = 64 - uncore_fixed_ctr_bits(box);
else
shift = 64 - uncore_perf_ctr_bits(box);

@ -246,7 +246,7 @@ static void nhmex_uncore_msr_enable_event(struct intel_uncore_box *box, struct p
{
struct hw_perf_event *hwc = &event->hw;
if (hwc->idx >= UNCORE_PMC_IDX_FIXED)
if (hwc->idx == UNCORE_PMC_IDX_FIXED)
wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0);
else if (box->pmu->type->event_mask & NHMEX_PMON_CTL_EN_BIT0)
wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22);

@ -7,8 +7,6 @@
#ifndef _ASM_X86_MACH_DEFAULT_APM_H
#define _ASM_X86_MACH_DEFAULT_APM_H
#include <asm/nospec-branch.h>
#ifdef APM_ZERO_SEGS
# define APM_DO_ZERO_SEGS \
"pushl %%ds\n\t" \
@ -34,7 +32,6 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
* N.B. We do NOT need a cld after the BIOS call
* because we always save and restore the flags.
*/
firmware_restrict_branch_speculation_start();
__asm__ __volatile__(APM_DO_ZERO_SEGS
"pushl %%edi\n\t"
"pushl %%ebp\n\t"
@ -47,7 +44,6 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
"=S" (*esi)
: "a" (func), "b" (ebx_in), "c" (ecx_in)
: "memory", "cc");
firmware_restrict_branch_speculation_end();
}
static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in,
@ -60,7 +56,6 @@ static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in,
* N.B. We do NOT need a cld after the BIOS call
* because we always save and restore the flags.
*/
firmware_restrict_branch_speculation_start();
__asm__ __volatile__(APM_DO_ZERO_SEGS
"pushl %%edi\n\t"
"pushl %%ebp\n\t"
@ -73,7 +68,6 @@ static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in,
"=S" (si)
: "a" (func), "b" (ebx_in), "c" (ecx_in)
: "memory", "cc");
firmware_restrict_branch_speculation_end();
return error;
}

@ -46,6 +46,65 @@
#define _ASM_SI __ASM_REG(si)
#define _ASM_DI __ASM_REG(di)
#ifndef __x86_64__
/* 32 bit */
#define _ASM_ARG1 _ASM_AX
#define _ASM_ARG2 _ASM_DX
#define _ASM_ARG3 _ASM_CX
#define _ASM_ARG1L eax
#define _ASM_ARG2L edx
#define _ASM_ARG3L ecx
#define _ASM_ARG1W ax
#define _ASM_ARG2W dx
#define _ASM_ARG3W cx
#define _ASM_ARG1B al
#define _ASM_ARG2B dl
#define _ASM_ARG3B cl
#else
/* 64 bit */
#define _ASM_ARG1 _ASM_DI
#define _ASM_ARG2 _ASM_SI
#define _ASM_ARG3 _ASM_DX
#define _ASM_ARG4 _ASM_CX
#define _ASM_ARG5 r8
#define _ASM_ARG6 r9
#define _ASM_ARG1Q rdi
#define _ASM_ARG2Q rsi
#define _ASM_ARG3Q rdx
#define _ASM_ARG4Q rcx
#define _ASM_ARG5Q r8
#define _ASM_ARG6Q r9
#define _ASM_ARG1L edi
#define _ASM_ARG2L esi
#define _ASM_ARG3L edx
#define _ASM_ARG4L ecx
#define _ASM_ARG5L r8d
#define _ASM_ARG6L r9d
#define _ASM_ARG1W di
#define _ASM_ARG2W si
#define _ASM_ARG3W dx
#define _ASM_ARG4W cx
#define _ASM_ARG5W r8w
#define _ASM_ARG6W r9w
#define _ASM_ARG1B dil
#define _ASM_ARG2B sil
#define _ASM_ARG3B dl
#define _ASM_ARG4B cl
#define _ASM_ARG5B r8b
#define _ASM_ARG6B r9b
#endif
/*
* Macros to generate condition code outputs from inline assembly,
* The output operand must be type "bool".

@ -13,7 +13,7 @@
* Interrupt control:
*/
static inline unsigned long native_save_fl(void)
extern inline unsigned long native_save_fl(void)
{
unsigned long flags;

@ -58,6 +58,7 @@ obj-y += alternative.o i8253.o pci-nommu.o hw_breakpoint.o
obj-y += tsc.o tsc_msr.o io_delay.o rtc.o
obj-y += pci-iommu_table.o
obj-y += resource.o
obj-y += irqflags.o
obj-y += process.o
obj-y += fpu/

@ -580,6 +580,9 @@ static u32 skx_deadline_rev(void)
case 0x04: return 0x02000014;
}
if (boot_cpu_data.x86_stepping > 4)
return 0;
return ~0U;
}

@ -240,6 +240,7 @@
#include <asm/olpc.h>
#include <asm/paravirt.h>
#include <asm/reboot.h>
#include <asm/nospec-branch.h>
#if defined(CONFIG_APM_DISPLAY_BLANK) && defined(CONFIG_VT)
extern int (*console_blank_hook)(int);
@ -614,11 +615,13 @@ static long __apm_bios_call(void *_call)
gdt[0x40 / 8] = bad_bios_desc;
apm_irq_save(flags);
firmware_restrict_branch_speculation_start();
APM_DO_SAVE_SEGS;
apm_bios_call_asm(call->func, call->ebx, call->ecx,
&call->eax, &call->ebx, &call->ecx, &call->edx,
&call->esi);
APM_DO_RESTORE_SEGS;
firmware_restrict_branch_speculation_end();
apm_irq_restore(flags);
gdt[0x40 / 8] = save_desc_40;
put_cpu();
@ -690,10 +693,12 @@ static long __apm_bios_call_simple(void *_call)
gdt[0x40 / 8] = bad_bios_desc;
apm_irq_save(flags);
firmware_restrict_branch_speculation_start();
APM_DO_SAVE_SEGS;
error = apm_bios_call_simple_asm(call->func, call->ebx, call->ecx,
&call->eax);
APM_DO_RESTORE_SEGS;
firmware_restrict_branch_speculation_end();
apm_irq_restore(flags);
gdt[0x40 / 8] = save_desc_40;
put_cpu();

@ -2150,9 +2150,6 @@ static ssize_t store_int_with_restart(struct device *s,
if (check_interval == old_check_interval)
return ret;
if (check_interval < 1)
check_interval = 1;
mutex_lock(&mce_sysfs_mutex);
mce_restart();
mutex_unlock(&mce_sysfs_mutex);

@ -70,7 +70,7 @@ static DEFINE_MUTEX(microcode_mutex);
/*
* Serialize late loading so that CPUs get updated one-by-one.
*/
static DEFINE_SPINLOCK(update_lock);
static DEFINE_RAW_SPINLOCK(update_lock);
struct ucode_cpu_info ucode_cpu_info[NR_CPUS];
@ -560,9 +560,9 @@ static int __reload_late(void *info)
if (__wait_for_cpus(&late_cpus_in, NSEC_PER_SEC))
return -1;
spin_lock(&update_lock);
raw_spin_lock(&update_lock);
apply_microcode_local(&err);
spin_unlock(&update_lock);
raw_spin_unlock(&update_lock);
/* siblings return UCODE_OK because their engine got updated already */
if (err > UCODE_NFOUND) {

@ -0,0 +1,26 @@
/* SPDX-License-Identifier: GPL-2.0 */
#include <asm/asm.h>
#include <asm/export.h>
#include <linux/linkage.h>
/*
* unsigned long native_save_fl(void)
*/
ENTRY(native_save_fl)
pushf
pop %_ASM_AX
ret
ENDPROC(native_save_fl)
EXPORT_SYMBOL(native_save_fl)
/*
* void native_restore_fl(unsigned long flags)
* %eax/%rdi: flags
*/
ENTRY(native_restore_fl)
push %_ASM_ARG1
popf
ret
ENDPROC(native_restore_fl)
EXPORT_SYMBOL(native_restore_fl)

@ -890,7 +890,7 @@ static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
if (cache->nobjs >= min)
return 0;
while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
page = (void *)__get_free_page(GFP_KERNEL);
page = (void *)__get_free_page(GFP_KERNEL_ACCOUNT);
if (!page)
return -ENOMEM;
cache->objects[cache->nobjs++] = page;

@ -7354,6 +7354,8 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu)
HRTIMER_MODE_REL_PINNED);
vmx->nested.preemption_timer.function = vmx_preemption_timer_fn;
vmx->nested.vpid02 = allocate_vpid();
vmx->nested.vmxon = true;
return 0;
@ -9802,10 +9804,8 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
goto free_vmcs;
}
if (nested) {
if (nested)
nested_vmx_setup_ctls_msrs(vmx);
vmx->nested.vpid02 = allocate_vpid();
}
vmx->nested.posted_intr_nv = -1;
vmx->nested.current_vmptr = -1ull;
@ -9822,7 +9822,6 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
return &vmx->vcpu;
free_vmcs:
free_vpid(vmx->nested.vpid02);
free_loaded_vmcs(vmx->loaded_vmcs);
free_msrs:
kfree(vmx->guest_msrs);

@ -430,6 +430,7 @@ static void xen_pv_play_dead(void) /* used only with HOTPLUG_CPU */
* data back is to call:
*/
tick_nohz_idle_enter();
tick_nohz_idle_stop_tick_protected();
cpuhp_online_idle(CPUHP_AP_ONLINE_IDLE);
}

@ -54,6 +54,9 @@
* charge of setting up it's own stack, GDT and IDT.
*/
#define PVH_GDT_ENTRY_CANARY 4
#define PVH_CANARY_SEL (PVH_GDT_ENTRY_CANARY * 8)
ENTRY(pvh_start_xen)
cld
@ -98,6 +101,12 @@ ENTRY(pvh_start_xen)
/* 64-bit entry point. */
.code64
1:
/* Set base address in stack canary descriptor. */
mov $MSR_GS_BASE,%ecx
mov $_pa(canary), %eax
xor %edx, %edx
wrmsr
call xen_prepare_pvh
/* startup_64 expects boot_params in %rsi. */
@ -107,6 +116,17 @@ ENTRY(pvh_start_xen)
#else /* CONFIG_X86_64 */
/* Set base address in stack canary descriptor. */
movl $_pa(gdt_start),%eax
movl $_pa(canary),%ecx
movw %cx, (PVH_GDT_ENTRY_CANARY * 8) + 2(%eax)
shrl $16, %ecx
movb %cl, (PVH_GDT_ENTRY_CANARY * 8) + 4(%eax)
movb %ch, (PVH_GDT_ENTRY_CANARY * 8) + 7(%eax)
mov $PVH_CANARY_SEL,%eax
mov %eax,%gs
call mk_early_pgtbl_32
mov $_pa(initial_page_table), %eax
@ -150,9 +170,13 @@ gdt_start:
.quad GDT_ENTRY(0xc09a, 0, 0xfffff) /* __KERNEL_CS */
#endif
.quad GDT_ENTRY(0xc092, 0, 0xfffff) /* __KERNEL_DS */
.quad GDT_ENTRY(0x4090, 0, 0x18) /* PVH_CANARY_SEL */
gdt_end:
.balign 4
.balign 16
canary:
.fill 48, 1, 0
early_stack:
.fill 256, 1, 0
early_stack_end:

@ -1678,7 +1678,6 @@ static void bfq_requests_merged(struct request_queue *q, struct request *rq,
if (!RB_EMPTY_NODE(&rq->rb_node))
goto end;
spin_lock_irq(&bfqq->bfqd->lock);
/*
* If next and rq belong to the same bfq_queue and next is older
@ -1702,7 +1701,6 @@ static void bfq_requests_merged(struct request_queue *q, struct request *rq,
bfq_remove_request(q, next);
spin_unlock_irq(&bfqq->bfqd->lock);
end:
bfqg_stats_update_io_merged(bfqq_group(bfqq), next->cmd_flags);
}

@ -881,16 +881,16 @@ EXPORT_SYMBOL(bio_add_page);
*/
int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
{
unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt, idx;
struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
struct page **pages = (struct page **)bv;
size_t offset, diff;
size_t offset;
ssize_t size;
size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
if (unlikely(size <= 0))
return size ? size : -EFAULT;
nr_pages = (size + offset + PAGE_SIZE - 1) / PAGE_SIZE;
idx = nr_pages = (size + offset + PAGE_SIZE - 1) / PAGE_SIZE;
/*
* Deep magic below: We need to walk the pinned pages backwards
@ -903,17 +903,15 @@ int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
bio->bi_iter.bi_size += size;
bio->bi_vcnt += nr_pages;
diff = (nr_pages * PAGE_SIZE - offset) - size;
while (nr_pages--) {
bv[nr_pages].bv_page = pages[nr_pages];
bv[nr_pages].bv_len = PAGE_SIZE;
bv[nr_pages].bv_offset = 0;
while (idx--) {
bv[idx].bv_page = pages[idx];
bv[idx].bv_len = PAGE_SIZE;
bv[idx].bv_offset = 0;
}
bv[0].bv_offset += offset;
bv[0].bv_len -= offset;
if (diff)
bv[bio->bi_vcnt - 1].bv_len -= diff;
bv[nr_pages - 1].bv_len -= nr_pages * PAGE_SIZE - offset - size;
iov_iter_advance(iter, size);
return 0;
@ -1891,6 +1889,7 @@ struct bio *bio_split(struct bio *bio, int sectors,
bio_integrity_trim(split);
bio_advance(bio, split->bi_iter.bi_size);
bio->bi_iter.bi_done = 0;
if (bio_flagged(bio, BIO_TRACE_COMPLETION))
bio_set_flag(split, BIO_TRACE_COMPLETION);

@ -781,7 +781,6 @@ EXPORT_SYMBOL(blk_alloc_queue);
int blk_queue_enter(struct request_queue *q, bool nowait)
{
while (true) {
int ret;
if (percpu_ref_tryget_live(&q->q_usage_counter))
return 0;
@ -798,13 +797,11 @@ int blk_queue_enter(struct request_queue *q, bool nowait)
*/
smp_rmb();
ret = wait_event_interruptible(q->mq_freeze_wq,
!atomic_read(&q->mq_freeze_depth) ||
blk_queue_dying(q));
wait_event(q->mq_freeze_wq,
!atomic_read(&q->mq_freeze_depth) ||
blk_queue_dying(q));
if (blk_queue_dying(q))
return -ENODEV;
if (ret)
return ret;
}
}

@ -263,5 +263,46 @@ error:
return ret;
}
EXPORT_SYMBOL_GPL(verify_pkcs7_signature);
#endif /* CONFIG_SYSTEM_DATA_VERIFICATION */
/**
* verify_signature_one - Verify a signature with keys from given keyring
* @sig: The signature to be verified
* @trusted_keys: Trusted keys to use (NULL for builtin trusted keys only,
* (void *)1UL for all trusted keys).
* @keyid: key description (not partial)
*/
int verify_signature_one(const struct public_key_signature *sig,
struct key *trusted_keys, const char *keyid)
{
key_ref_t ref;
struct key *key;
int ret;
if (!sig)
return -EBADMSG;
if (!trusted_keys) {
trusted_keys = builtin_trusted_keys;
} else if (trusted_keys == (void *)1UL) {
#ifdef CONFIG_SECONDARY_TRUSTED_KEYRING
trusted_keys = secondary_trusted_keys;
#else
trusted_keys = builtin_trusted_keys;
#endif
}
ref = keyring_search(make_key_ref(trusted_keys, 1),
&key_type_asymmetric, keyid);
if (IS_ERR(ref)) {
pr_err("Asymmetric key (%s) not found in keyring(%s)\n",
keyid, trusted_keys->description);
return -ENOKEY;
}
key = key_ref_to_ptr(ref);
ret = verify_signature(key, sig);
key_put(key);
return ret;
}
EXPORT_SYMBOL_GPL(verify_signature_one);

@ -1183,8 +1183,10 @@ int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags,
/* make one iovec available as scatterlist */
err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen);
if (err < 0)
if (err < 0) {
rsgl->sg_num_bytes = 0;
return err;
}
/* chain the new scatterlist with previous one */
if (areq->last_rsgl)

@ -108,6 +108,7 @@ static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
CRYPTO_TFM_RES_MASK);
out:
memzero_explicit(&keys, sizeof(keys));
return err;
badkey:

@ -90,6 +90,7 @@ static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 *
CRYPTO_TFM_RES_MASK);
out:
memzero_explicit(&keys, sizeof(keys));
return err;
badkey:

@ -69,6 +69,10 @@ ACPI_MODULE_NAME("acpi_lpss");
#define LPSS_SAVE_CTX BIT(4)
#define LPSS_NO_D3_DELAY BIT(5)
/* Crystal Cove PMIC shares same ACPI ID between different platforms */
#define BYT_CRC_HRV 2
#define CHT_CRC_HRV 3
struct lpss_private_data;
struct lpss_device_desc {
@ -162,7 +166,7 @@ static void byt_pwm_setup(struct lpss_private_data *pdata)
if (!adev->pnp.unique_id || strcmp(adev->pnp.unique_id, "1"))
return;
if (!acpi_dev_present("INT33FD", NULL, -1))
if (!acpi_dev_present("INT33FD", NULL, BYT_CRC_HRV))
pwm_add_table(byt_pwm_lookup, ARRAY_SIZE(byt_pwm_lookup));
}

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save