Compare commits

...

304 Commits

Author SHA1 Message Date
Jenna-they-them 125e63334f ksu: Backport namespace unmount from 5.9 1 month ago
Hou Tao 14ff4f17fd bpf: Add map and need_defer parameters to .map_fd_put_ptr() 1 month ago
Andrii Nakryiko fd36b5cc03 bpf: Fix map leak in HASH_OF_MAPS map 1 month ago
Florian Lehner ec6693b706 bpf, lpm: Fix check prefixlen before walking trie 1 month ago
Christian Brauner 3be4e30627 UPSTREAM: binderfs: use __u32 for device numbers 1 month ago
Carlos Llamas a9065ae913 binder: fix unused alloc->free_async_space 1 month ago
Carlos Llamas 1c2ca3218e binder: fix race between mmput() and do_exit() 1 month ago
Carlos Llamas 0238db21cc binder: fix async space check for 0-sized buffers 1 month ago
Carlos Llamas d0269a4bf3 binder: fix comment on binder_alloc_new_buf() return value 1 month ago
Schspa Shi 8e63ed4a80 binder: fix atomic sleep when get extended error 1 month ago
Carlos Llamas f312a58d29 BACKPORT: binderfs: add extended_error feature entry 1 month ago
Carlos Llamas fd012ee7c2 BACKPORT: binder: add BINDER_GET_EXTENDED_ERROR ioctl 1 month ago
Carlos Llamas a41985daf3 BACKPORT: binderfs: add support for feature files 1 month ago
Sherry Yang 0bd23b8205 android: binder: Rate-limit debug and userspace triggered err msgs 1 month ago
Carlos Llamas 9d1d4471e9 ANDROID: binder: retry security_secid_to_secctx() 1 month ago
Luca Stefani b7a5fa4191 UPSTREAM: binder: Return EFAULT if we fail BINDER_ENABLE_ONEWAY_SPAM_DETECTION 1 month ago
Hang Lu 7727fe8b3e UPSTREAM: binder: tell userspace to dump current backtrace when detected oneway spamming 1 month ago
Hang Lu 2f2a07af3e FROMGIT: binder: fix the missing BR_FROZEN_REPLY in binder_return_strings 1 month ago
Li Li 271121f874 BACKPORT: FROMGIT: binder: fix freeze race 1 month ago
Todd Kjos 590573583c UPSTREAM: binder: add flag to clear buffer on txn complete 1 month ago
Marco Ballesio 74176ce0a8 binder: don't unlock procs while scanning contexts 1 month ago
Marco Ballesio 7e70e9a23d binder: don't log on EINTR 1 month ago
Marco Ballesio 2c1b0f160f binder: freeze multiple contexts 1 month ago
Marco Ballesio 8a8b104f7b binder: use EINTR for interrupted wait for work 1 month ago
Marco Ballesio 743690155b binder: introduce the BINDER_GET_FROZEN_INFO ioctl 1 month ago
Martijn Coenen 8fd3ef60ed FROMGIT: binder: print warnings when detecting oneway spamming. 1 month ago
Marco Ballesio 19f7afa4cb BACKPORT: FROMGIT: binder: BINDER_FREEZE ioctl 1 month ago
Todd Kjos 1d026e9c34 binder: fix test regression due to sender_euid change 1 month ago
Todd Kjos 1c552ecc6d UPSTREAM: binder: use euid from cred instead of using task 1 month ago
Todd Kjos 92c0c18889 UPSTREAM: binder: fix null deref of proc->context 1 month ago
Christian Brauner 7a7a75ea20 UPSTREAM: binder: prevent UAF for binderfs devices II 1 month ago
Christian Brauner 7cccb8249c UPSTREAM: binder: prevent UAF for binderfs devices 1 month ago
Qi Zheng a90291e023 binder: fix memory leak in binder_init() 1 month ago
Carlos Llamas 05a1f8b0d6 FROMLIST: binder: fix UAF of ref->proc caused by race condition 1 month ago
Todd Kjos 1d6f89b18f binder: fix async_free_space accounting for empty parcels 1 month ago
Eric Biggers 7b2a60a611 binder: use wake_up_pollfree() 1 month ago
Todd Kjos 6e89a08149 FROMGIT: binder: fix test regression due to sender_euid change 1 month ago
Todd Kjos 5fb01da268 BACKPORT: binder: use cred instead of task for selinux checks 1 month ago
Todd Kjos 83b287f965 UPSTREAM: binder: use euid from cred instead of using task 1 month ago
Todd Kjos c55a5f6e02 UPSTREAM: binder: fix UAF when releasing todo list 1 month ago
Todd Kjos 61477df063 Revert "binder: Prevent context manager from incrementing ref 0" 1 month ago
Jann Horn a62889a616 binder: Prevent context manager from incrementing ref 0 1 month ago
Tim Zimmermann abddf35c5f android: Checkout binder to 4.14.190 1 month ago
Alessio Balsini 0725bdb48a ANDROID: fs/fuse: Keep FUSE file times consistent with lower file 1 month ago
Alessio Balsini 8eb589e623 UPSTREAM: fuse: fix matching of FUSE_DEV_IOC_CLONE command 1 month ago
Alessio Balsini 109d5af220 ANDROID: fuse/passthrough: API V2 with __u32 open argument 1 month ago
Alessio Balsini 4e569fb8f7 FROMLIST: fuse: Fix crediantials leak in passthrough read_iter 1 month ago
Alessio Balsini ad5d9e93a5 FROMLIST: fuse: Introduce passthrough for mmap 1 month ago
Alessio Balsini 20db329b42 FROMLIST: fuse: Use daemon creds in passthrough mode 1 month ago
Alessio Balsini adad6873a0 BACKPORT: fuse: Handle asynchronous read and write in passthrough 1 month ago
Alessio Balsini 6aa55104ed FROMLIST: fuse: Introduce synchronous read and write for passthrough 1 month ago
Alessio Balsini 5f6c433521 FROMLIST: fuse: Passthrough initialization and release 1 month ago
Alessio Balsini ed07056db6 BACKPORT: fuse: Definitions and ioctl for passthrough 1 month ago
Alessio Balsini 369cd5672f FROMLIST: fuse: 32-bit user space ioctl compat for fuse device 1 month ago
Alessio Balsini c278cd9dd0 BACKPORT: fs: Generic function to convert iocb to rw flags 1 month ago
Alessio Balsini a374c0a7d7 BACKPORT: fs: align IOCB_* flags with RWF_* flags 1 month ago
Jürg Billeter e14789458c fs: add RWF_APPEND 1 month ago
Tim Zimmermann 4a811cba2d syscall: Increase bpf fake uname to 5.4 1 month ago
Toke Høiland-Jørgensen 31177fe9ce BACKPORT: devmap: Allow map lookups from eBPF 1 month ago
Toke Høiland-Jørgensen d9b6520772 BACKPORT: xdp: Add devmap_hash map type for looking up devices by hashed index 1 month ago
Tim Zimmermann b84abcbd37 kernel: bpf: devmap: Create __dev_map_alloc_node 1 month ago
Andrey Ignatov e5a6e1339c BACKPORT: bpf: Post-hooks for sys_bind 1 month ago
Andrey Ignatov b60373c392 BACKPORT: bpf: Hooks for sys_connect 1 month ago
Andrey Ignatov f154004084 BACKPORT: net: Introduce __inet_bind() and __inet6_bind 1 month ago
Andrey Ignatov fd11f2fbdf BACKPORT: bpf: Hooks for sys_bind 1 month ago
Alexei Starovoitov 3cf863a9e9 BACKPORT: bpf: introduce BPF_PROG_QUERY command 1 month ago
Andrey Ignatov a651687fed BACKPORT: bpf: Check attach type at prog load time 1 month ago
Jakub Kicinski ebb5adb52f bpf: offload: rename the ifindex field 1 month ago
Jakub Kicinski 2493eb319e BACKPORT: bpf: offload: add infrastructure for loading programs for a specific netdev 1 month ago
Jakub Kicinski 64ee593167 BACKPORT: net: bpf: rename ndo_xdp to ndo_bpf 1 month ago
Tim Zimmermann 94f042fd44 bpf: Update logging functions to work with BTF 1 month ago
Lorenz Bauer 898802ca29 bpf: btf: fix truncated last_member_type_id in btf_struct_resolve 1 month ago
Yoshiki Komachi 38f08b1ff0 bpf/btf: Fix BTF verification of enum members in struct/union 1 month ago
Alexei Starovoitov 5962ff6375 bpf: fix BTF limits 1 month ago
Martin Lau e32ed82f0c bpf, btf: fix a missing check bug in btf_parse 1 month ago
Wenwen Wang 25eca1f871 bpf: btf: Fix a missing check bug 1 month ago
Martin KaFai Lau f0f84903a1 bpf: btf: Fix end boundary calculation for type section 1 month ago
Daniel Borkmann 82a696e71e bpf: fix bpf_skb_load_bytes_relative pkt length check 1 month ago
Martin KaFai Lau 50907b0de0 bpf: btf: Ensure the member->offset is in the right order 1 month ago
Martin KaFai Lau 7886f9fccf bpf: btf: Clean up BTF_INT_BITS() in uapi btf.h 1 month ago
Okash Khawaja ad9d3d694b bpf: btf: Fix bitfield extraction for big endian 1 month ago
Martin KaFai Lau 9442bdac31 bpf: btf: Ensure t->type == 0 for BTF_KIND_FWD 1 month ago
Martin KaFai Lau 492f4ad7b6 bpf: btf: Check array t->size 1 month ago
Arnd Bergmann 0711e9ae08 bpf: btf: avoid -Wreturn-type warning 1 month ago
Martin KaFai Lau 82a67cb9cf bpf: btf: Avoid variable length array 1 month ago
Martin KaFai Lau eef6a24357 bpf: btf: Remove unused bits from uapi/linux/btf.h 1 month ago
Martin KaFai Lau 254e7662bb bpf: btf: Check array->index_type 1 month ago
Martin KaFai Lau c3f425bf64 bpf: btf: Change how section is supported in btf_header 1 month ago
Martin KaFai Lau bcaeaf9d72 bpf: Fix compiler warning on info.map_ids for 32bit platform 1 month ago
Tim Zimmermann 74718b134c fixup! bpf: Update logging functions to work with BTF 1 month ago
Tim Zimmermann 2d2749817b syscall: Fake uname to 4.19 also for netbpfload 1 month ago
Jenna-they-them 1874eab4a7 defconfig: Bump kernel version 1 month ago
Ruchit b099fcc1e3 Rip out samsung debugging 1 month ago
Satya Durga Srinivasu Prabhala ef9b6fc646 sched: Fix compilation issues for !CONFIG_SCHED_WALT 1 month ago
Alexander Winkowski be42fde927 cpufreq: schedutil: Give explicit hints to compiler 1 month ago
Wei Wang 6ef2d922d5 ANDROID: cpufreq: schedutil: maintain raw cache when next_f is not changed 1 month ago
John Dias 11d86a1a13 cpufreq: schedutil: clear cached_raw_freq when invalidated 1 month ago
Alexander Winkowski 1590077973 sched: fair: Modify capacity margins for atoll 1 month ago
Sultan Alsawaf c9d3b951f4 sched/core: Skip superfluous acquire barrier in ttwu 1 month ago
Sultan Alsawaf 7805254f21 sched/fair: Compile out NUMA code entirely when NUMA is disabled 1 month ago
Mel Gorman 339ebd8242 sched/fair: Do not migrate if the prev_cpu is idle 1 month ago
Mel Gorman b493777280 sched/fair: Only immediately migrate tasks due to interrupts if prev and target CPUs share cache 1 month ago
Mel Gorman 7579519fef sched/fair: Restructure wake_affine*() to return a CPU id 1 month ago
Mel Gorman fdb40fa91f sched/fair: Remove unnecessary parameters from wake_affine_idle() 1 month ago
Uladzislau Rezki c4bc67c4fb sched/fair: Search a task from the tail of the queue 1 month ago
Cheng Jian c4c127f25d sched/idle: Micro-optimize the idle loop 1 month ago
Peter Zijlstra a6e67a828a sched: Fix rq->nr_iowait ordering 1 month ago
Peter Zijlstra bbe84725ce sched: Fix data-race in wakeup 1 month ago
Peter Zijlstra acbcd3feec sched: Fix race against ptrace_freeze_trace() 1 month ago
Peter Zijlstra e2acc00e93 sched: Fix loadavg accounting race 1 month ago
Peter Zijlstra ebecb093ce sched/core: Fix ttwu() race 1 month ago
Peter Zijlstra 426617c1b8 smp: Optimize send_call_function_single_ipi() 1 month ago
Mel Gorman f18ef272b5 sched/core: Offload wakee task activation if it the wakee is descheduling 1 month ago
Peter Zijlstra 3c1749ebaa sched/core: Optimize ttwu() spinning on p->on_cpu 1 month ago
Peter Zijlstra b87926d9b6 sched/core: Fix preempt warning in ttwu 1 month ago
Peter Zijlstra 07b37bad05 sched/core: Optimize try_to_wake_up() for local wakeups 1 month ago
Wei Wang 30e0beaea4 sched: fair: placement optimization for heavy load 1 month ago
Rick Yiu 7fc3a2a624 sched/fair: schedule lower priority tasks from little cores 1 month ago
Rick Yiu 5d2df55801 sched/fair: refine some scheduler changes from AU drop 1 month ago
Wei Wang 8fdd1b05c9 sched: restrict iowait boost to tasks with prefer_idle 1 month ago
Rick Yiu 0660bdf609 sched/fair: Skip cpu if task does not fit in 1 month ago
Rick Yiu 9b3363bd21 sched/fair: let scheduler skip util checking if cpu is idle 1 month ago
Rick Yiu fe0c230be8 sched/fair: check if mid capacity cpu exists 1 month ago
Miguel de Dios d159ef8f5c sched: fair: Move cpu_is_in_target_set definition 1 month ago
Miguel de Dios 36ea103671 sched: fair: avoid little cpus due to sync, prev bias 1 month ago
Diep Quynh 6670e3007e Revert "sched/fair: Don't let tasks slip away from gold to silver cluster" 1 month ago
Ruchit 67eef4f9a2 sched: guard sched_boost behind the proper guards 1 month ago
Sultan Alsawaf 6107b4a61f sched/fair: Don't remove important task migration logic from PELT 1 month ago
Diep Quynh 7cb21ceec5 sched: fair: Account PELT estimated utilization on WALT disabled cpu_util_cum 1 month ago
Diep Quynh c5b66edc8d sched: fair: Cover more WALT balancing cases 1 month ago
Diep Quynh 4596f75d9b sched: fair: Fix load balancing for big tasks 1 month ago
Sultan Alsawaf e357565433 sched/core: Skip rq lock in try_to_wake_up() when WALT is disabled 1 month ago
Miguel de Dios 172fbc11e8 sched: core: Disable double lock/unlock balance in move_queued_task() 1 month ago
Miguel de Dios 4d79d7c89d sched: fair: Disable double lock/unlock balance in detach_task() 1 month ago
Rick Yiu 419ceff95d sched/fair: use actual cpu capacity to calculate boosted util 1 month ago
Kyle Lin 36e41553e7 kernel: sched: account for real time utilization 1 month ago
Quentin Perret c101b49ac3 sched/fair: fix misfit with PELT 1 month ago
Wei Wang 2c0a9423b1 kernel: sched: fix cpu cpu_capacity_orig being capped incorrectly 1 month ago
Connor O'Brien 043820bf65 sched: delete unused & buggy function definitions 1 month ago
Connor O'Brien d25332c515 sched/fair: fix implementation of is_min_capacity_cpu() 1 month ago
Patrick Bellasi 6db3d8a2ff FROMLIST: sched/fair: util_est: fast ramp-up EWMA on utilization increases 1 month ago
Kyle Lin aa29c46a8b kernel: sched: fix build breakage when PELT enabled 1 month ago
Danny Lin 943d5e47d9 sched/rt: Fix compile errors when WALT is disabled 1 month ago
Danny Lin 04c58e559a sched/fair: Fix compile errors when WALT is disabled 1 month ago
Daniel Bristot de Oliveira c80dd68f90 UPSTREAM: sched/rt: Disable RT_RUNTIME_SHARE by default 1 month ago
Wei Wang 394af2b8da Revert "sched/core: fix userspace affining threads incorrectly" 1 month ago
Wei Wang b83fe90b21 Revert "sched/core: Fix use after free issue in is_sched_lib_based_app()" 1 month ago
Wei Wang ab85f68044 Revert "sched: Improve the scheduler" 1 month ago
Alexander Winkowski 00d271c176 Revert "sched: Improve the scheduler" 1 month ago
Alexander Winkowski ffab0feaa3 Revert "sched: fair: Add strict skip buddy support" 1 month ago
Kyle Lin 8b3be19fcf defconfig: Enable PELT 1 month ago
Ruchit 7b10596567 zram: Protect handle_decomp_fail behind a check 1 month ago
Ruchit d3a7365695 defconfig: Build wireguard 1 month ago
Ruchit 53c63d8ed5 net: Import wireguard-linux-compat v1.0.20220627 1 month ago
Ruchit 09f910cf79 cpuidle: drop some samsung debug logging 1 month ago
Alex Winkowski bed118f730 defconfig: Enable SchedTune Assist 1 month ago
Alex Winkowski e1d9941c4e schedtune_assist: Don't allow to change the values 1 month ago
Alex Winkowski f61964be33 schedtune_assist: Disable prefer_idle 1 month ago
Yaroslav Furman c867c66bc9 kernel: stune_assist: clarify logger a bit 1 month ago
Danny Lin 9d52b71250 sched/tune: Refactor SchedTune Assist code 1 month ago
Yaroslav Furman ef08e1224e sched/tune: Introduce SchedTune Assist[v3] 1 month ago
Julian Liu ff66b68b3b qcacld-3.0: Free a bunch of pkts at once 1 month ago
Sultan Alsawaf 563455cdb2 msm: kgsl: Remove POPP 1 month ago
Sultan Alsawaf d1cf107a99 msm: kgsl: Increase worker thread priority 1 month ago
Sultan Alsawaf 793bfb703f mbcache: Speed up cache entry creation 1 month ago
Sultan Alsawaf a24817254a mm: Don't hog the CPU and zone lock in rmqueue_bulk() 1 month ago
Sultan Alsawaf 2155d57b49 defconfig: Enable devfreq boosting 1 month ago
Sultan Alsawaf 51512a6c17 devfreq: Introduce devfreq boost driver 1 month ago
Sultan Alsawaf d868cca582 kernel: Warn when an IRQ's affinity notifier gets overwritten 1 month ago
Sultan Alsawaf 7376b01807 kernel: Only set one CPU in the default IRQ affinity mask 1 month ago
Sultan Alsawaf 7bf71c40a7 kernel: Don't allow IRQ affinity masks to have more than one CPU 1 month ago
Sultan Alsawaf fce4fb5dfd qos: Don't allow userspace to impose restrictions on CPU idle levels 1 month ago
Sultan Alsawaf 835854def3 cpuidle: Mark CPUs idle as late as possible to avoid unneeded IPIs 1 month ago
Sultan Alsawaf 11cb5b4508 cpuidle: Optimize pm_qos notifier callback and IPI semantics 1 month ago
Sultan Alsawaf f343fe6849 arm64: Allow IPI_WAKEUP to be used outside of the ACPI parking protocol 1 month ago
Sultan Alsawaf 72686a0a10 cpuidle: lpm-levels: Allow exit latencies equal to target latencies 1 month ago
Mark Brown 2812c74953 arm64: lib: Consistently enable crc32 extension 1 month ago
Park Ju Hyung 702752a9d0 arm64: crc32: always assume ARM64_HAS_CRC32 1 month ago
Miguel Ojeda 7e77921d97 lib/crc32.c: mark crc32_le_base/__crc32c_le_base aliases as __pure 1 month ago
Ard Biesheuvel 127d8f6303 lib/crc32: make core crc32() routines weak so they can be overridden 1 month ago
Ard Biesheuvel 6d1945a3d2 arm64/lib: improve CRC32 performance for deep pipelines 1 month ago
Ard Biesheuvel 737d48cadc arm64/lib: add accelerated crc32 routines 1 month ago
Julien Thierry 5ba3661e96 arm64: use WFE for long delays 1 month ago
Julien Thierry f307b70fe0 arm_arch_timer: Expose event stream status 1 month ago
Yury Norov 8864fca93f ARM64: enable GENERIC_FIND_FIRST_BIT 1 month ago
Vladimir Murzin aca2c16064 arm64: Kconfig: select HAVE_FUTEX_CMPXCHG 1 month ago
Robin Murphy 19c196d999 arm64: Select ARCH_HAS_FAST_MULTIPLIER 1 month ago
Robin Murphy a37762403f arm64: csum: Optimise IPv6 header checksum 1 month ago
Robin Murphy ad90bfb501 arm64: csum: Fix pathological zero-length calls 1 month ago
Robin Murphy de022bb531 arm64: Implement optimised checksum routine 1 month ago
Sultan Alsawaf f9ba599e61 dma-buf/sync_file: Speed up ioctl by omitting debug names 1 month ago
Danny Lin 4169f06bbb clk: qcom: mdss: Omit support for unused PLLs 1 month ago
Danny Lin 756896769a msm: kgsl: Omit code for GPUs other than Adreno 618 1 month ago
Park Ju Hyung 47be090e01 adreno: disable snapshot and coresight 1 month ago
Sultan Alsawaf bb0aa26e84 drm/msm/sde: Remove debug print from sde_reg_write() 1 month ago
Sultan Alsawaf 300aa8d1bf drm/msm/sde: Stub out debug log macros and compile them out 1 month ago
Sultan Alsawaf f24583cdf1 msm: camera: Stub out the camera_debug_util API and compile it out 1 month ago
Park Ju Hyung 34e98ed0bb blk: disable IO_STAT completely 1 month ago
kdrag0n 245ad6a33a block: disable I/O stats accounting by default 1 month ago
Park Ju Hyung 8c6cd5534a mmc: disable SPI CRC 1 month ago
kdrag0n f844fc585f arm64: debug: disable self-hosted debug by default 1 month ago
Sultan Alsawaf bac354be74 binder: Stub out debug prints by default 1 month ago
Alexander Winkowski 4ee4e6d3a0 qcacld-3.0: Build with -O2 1 month ago
Danny Lin 8b5ff56278 clk: qcom: clk-cpu-osm: Allow overriding CPU frequency tables in DT 1 month ago
Danny Lin 0b6a18b001 sched/energy: Check out to Android 4.14 common kernel 1 month ago
Alexander Winkowski d8d68d5e1b arm64: dts: atoll: Optimised energy model 1 month ago
Steven Rostedt (VMware) 5414aced74 rcu: Speed up calling of RCU tasks callbacks 1 month ago
Sultan Alsawaf a55dc8d0c2 defconfig: Reduce PELT half-life from 32 ms to 16 ms 1 month ago
Alexander Winkowski 1dabec0269 cpufreq: schedutil: Give explicit hints to compiler 1 month ago
Wei Wang 590ddedfb4 ANDROID: cpufreq: schedutil: maintain raw cache when next_f is not changed 1 month ago
John Dias 7ddf7d7a73 cpufreq: schedutil: clear cached_raw_freq when invalidated 1 month ago
Connor O'Brien 8d5e79d38e cpufreq: schedutil: fix check for stale utilization values 1 month ago
Miguel de Dios 408110403a kernel: sched: cpufreq_schedutil: Make iowait boost optional. 1 month ago
Danny Lin 9c355098dc Revert "cpufreq: schedutil: Fix for CR 2040904" 1 month ago
Alexander Winkowski 5c4ad9bafb sched: fair: Modify capacity margins for atoll 1 month ago
Danny Lin 982919991b defconfig: Disable EDAC 1 month ago
Alexander Winkowski 26769bde1e defconfig: Disable tracing 1 month ago
Alexander Winkowski 61b806e349 defconfig: Disable leftover debug features 1 month ago
Alex Winkowski e19439ff23 arm64: Select dead code elimination 1 month ago
Sultan Alsawaf 0ee72980eb init: Kconfig: Don't force DEBUG_KERNEL when EXPERT is enabled 1 month ago
Aaron Lu 28197d207a mm/page_alloc: make sure __rmqueue() etc are always inline 1 month ago
Masahiro Yamada 5c74e7e436 compiler: allow all arches to enable CONFIG_OPTIMIZE_INLINING 1 month ago
Sultan Alsawaf b88134ae1a Makefile: Use -O3 optimization for CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE 1 month ago
Sultan Alsawaf 264d2d3afa kbuild: Disable stack conservation for GCC 1 month ago
Yaroslav Furman 51792db228 arm64: boot: atoll: Fix a few freq inconsistencies 1 month ago
Kees Cook f1b7538b8c pstore/ram: Introduce max_reason and convert dump_oops 1 month ago
Pavel Tatashin 1d983ee79f pstore/platform: Pass max_reason to kmesg dump 1 month ago
Kees Cook 6d7c264070 printk: Collapse shutdown types into a single dump reason 1 month ago
Kees Cook a4fe448147 pstore/ram: Refactor DT size parsing 1 month ago
Kees Cook f419cd5b62 pstore/ram: Adjust module param permissions to reflect reality 1 month ago
Kees Cook 1301388af1 pstore/ram: Avoid needless alloc during header write 1 month ago
Yue Hu d98e9aa143 pstore/ram: Add kmsg hlen zero check to ramoops_pstore_write() 1 month ago
Yue Hu aa96b37ee6 pstore/ram: Move initialization earlier 1 month ago
Yue Hu 107f46e7ff pstore: Avoid writing records with zero size 1 month ago
Vincent Palomares 7fd0398c05 soc:qcom:icnss Async suspend/resume callbacks. 1 month ago
Vincent Palomares 76051b270a msm:sde:rotator Async suspend/resume callbacks. 1 month ago
Vincent Palomares 3db379c6fa scsi:ufs Async suspend/resume callbacks. 1 month ago
Sultan Alsawaf e722722416 PM / freezer: Abort suspend when there's a wakeup while freezing 1 month ago
Sultan Alsawaf a4f380e357 PM / suspend: Clear wakeups before running PM callbacks 1 month ago
Sultan Alsawaf 00c5563416 PM / wakeup: Avoid excessive s2idle wake attempts in pm_system_wakeup() 1 month ago
Sultan Alsawaf 3e2309f680 PM / freezer: Reduce freeze timeout to 1 second for Android 1 month ago
Sultan Alsawaf 14aa621ca5 timekeeping: Keep the tick alive when CPUs cycle out of s2idle 1 month ago
Hugo Lefeuvre fdbaecd904 sched/wait: Use freezable_schedule() when possible 1 month ago
Vaisakh Murali 52153a8bb9 drivers: ipa_v3: Conditionally compile out ipa wakelock code 1 month ago
Juhyung Park a897983c56 zram: switch to 64-bit hash for dedup 1 month ago
Park Ju Hyung 1bf0d81f94 zram: use xxhash instead of jhash in dedup 1 month ago
Juhyung Park 39c5bce3a5 xxhash: inline round() functions 1 month ago
Juhyung Park a8be8d0964 xxhash: replace copy_state() wrappers with macros 1 month ago
Danny Lin aee1939bdc cpuidle: lpm-levels: Remove debug event logging 1 month ago
Sultan Alsawaf 4fb5c0a175 msm: kgsl: Wake GPU upon receiving an ioctl rather than upon touch input 1 month ago
Sultan Alsawaf 0621a9126c msm/sde/rotator: Remove unneeded PM QoS requests 1 month ago
Sultan Alsawaf 7b2deee127 drm/msm/sde: Remove unneeded PM QoS requests 1 month ago
Alexander Winkowski ac9e20d9be scsi: ufs: Set latency requirement to 67 us 1 month ago
Sultan Alsawaf 4d51be0f06 scsi: ufs: Add simple IRQ-affined PM QoS operations 1 month ago
Sultan Alsawaf d6c0f0fad0 scsi: ufs: Scrap Qualcomm's PM QoS implementation 1 month ago
Veerabhadrarao Badiganti a033d08422 scsi: ufs: Don't turn off link for few vendor devices 1 month ago
Ziqi Chen 61abda9be8 scsi: ufs: Fix imbalanced scsi_block_reqs_cnt caused by ufshcd_hold() 1 month ago
Stanley Chu 64fdb641b2 scsi: ufs: fix broken hba->outstanding_tasks 1 month ago
Ziqi Chen f1ebe61638 scsi: ufs: Fix ufshcd_hold dead loop issue if error recovery is handing 1 month ago
Nitin Rawat 9621bf4480 scsi: ufs: Fix back to back clk gate work 1 month ago
Randall Huang b787156b35 scsi/ufs: fix off-by-one error when printing err stat 1 month ago
Ram Prakash Gupta e2943dabf0 scsi: ufshcd: Fix double release of hba 1 month ago
Jaegeuk Kim e608f2ce16 scsi: ufs: fix wrong sequence of lrb_in_use and pm 1 month ago
Leo Liou 2a106b18f9 scsi: ufs: fix buffer overflow when access descriptor 1 month ago
Jaegeuk Kim 105e8ad65b scsi: ufs: disable clocks all the time when autohibern8 supports 1 month ago
Randall Huang 0b82f02c6f scsi: ufs: Avoid race condition between reinit and suspend 1 month ago
Jaegeuk Kim 1fb76c5ad6 scsi: ufs: set autohibern8 timer regardless of hibern8_on_idle 1 month ago
Jaegeuk Kim 8e6e563ebb scsi: ufs: disable hibern8_on_idle 1 month ago
Randall Huang f7eaefa61e scsi: ufs: re-probing hba when UFS initialization failed. 1 month ago
Randall Huang 15dcd84861 Revert "scsi: ufs: update VCCQ and VCCQ2 min value" 1 month ago
Jaegeuk Kim e2813307a9 Revert "Revert "Revert "scsi: ufs-qcom: Add flag to keep track of PHY's power state""" 1 month ago
Jaegeuk Kim d39f0dd0ab scsi: ufs: handle error to avoid kernel panic 1 month ago
Jaegeuk Kim c3f4e25bb4 scsi: ufs: use WQ_HIGHPRI for gating work 1 month ago
Martin Liu 9c099348f9 scsi: ufs: use async operation for hibern8 operation 1 month ago
Jaegeuk Kim c1b8a75cef scsi: ufs: disable HIBERN8_WITH_CLK_GATING 1 month ago
Jaegeuk Kim e3ed758caf scsi: ufs: disallow SECURITY_PROTOCOL_IN without _OUT 1 month ago
Jaegeuk Kim d8afd2289c scsi: ufs: fix missing up_write() 1 month ago
Jaegeuk Kim 85783c76fa scsi: ufs: check memory region correctly 1 month ago
Jaegeuk Kim 50ec79238c scsi: ufs: read length should give full buffer 1 month ago
Randall Huang 81349c0c25 scsi: ufs: Keep UniPro in FASTMODE 1 month ago
Jaegeuk Kim 43e53d11a7 scsi: ufs: fix pm_runtime count in reset flow 1 month ago
Leo Liou e00b772f84 scsi: ufs: Fix ufshcd_probe_hba() return value when fails to reinit 1 month ago
Jaegeuk Kim 55d1538dfb scsi: ufs: atomic update for clkgating_enable 1 month ago
Jaegeuk Kim ba35c89490 scsi: ufs: disable interrupt in clk-gating 1 month ago
Mimi Wu 4c9394ea06 scsi: ufs: disable clock scaling 1 month ago
Sultan Alsawaf 779c30861c sched/fair: Compile out NUMA code entirely when NUMA is disabled 1 month ago
Alexander Winkowski 471f65ad85 sched: fair: Modify capacity margins for atoll 1 month ago
Danny Lin f832d99581 miatoll_defconfig: Disable redundant Spectre variant 2 mitigations 1 month ago
Greg Kroah-Hartman d126f02d62 defconfig: Enable CONFIG_JUMP_LABEL 1 month ago
YH_Lin 3740fce828 defconfig: Enable LZ4 ZRAM 1 month ago
Petri Gynther 7dfde326f6 defconfig: Simplify log buffer allocation 1 month ago
Wei Wang 658161227a defconfig: Remove unused governors and CONFIG_CPU_BOOST 1 month ago
Chiawei Wang a6aca5fd8c defconfig: Disable CONFIG_AUTOCGROUP 1 month ago
Martin Liu f50bcc8d1e defconfig: Disable BALANCE_ANON_FILE_RECLAIM 1 month ago
Saravana Kannan 8c3f6f8b62 defconfig: Disable HW tracing features 1 month ago
Swetha Chikkaboraiah db655464ec defconfig: Disable DEBUG_FS 1 month ago
Rick Yiu d58abf4178 defconfig: Disable CONFIG_SM_DEBUGCC_ATOLL 1 month ago
Danny Lin 63454308f3 arm64: dts: atoll: Remove unused 36 MiB memdump region 1 month ago
Woody Lin 4a27f0f644 arm64: dts: atoll: disable watchdog during suspend 1 month ago
Danny Lin f017b175c4 arm64: dts: atoll: Disable unhandled or broken IRQ monitoring 1 month ago
Will McVicker 104de11048 arm64: dts: qcom: Disable coresight for atoll 1 month ago
Ruchit 6d56fb14a9 defconfig: re-enable SDP 1 month ago
Ruchit 18ce85ee51 defconfig: Disable RKP 1 month ago
jenslody cfaa61c7ca Add KernelSU, config-variables and update the hooks 1 month ago
  1. 14
      Documentation/admin-guide/ramoops.rst
  2. 57
      Documentation/devicetree/bindings/arm/msm/qcom,osm.txt
  3. 1
      KernelSU
  4. 2
      Makefile
  5. 1
      arch/arm/include/asm/arch_timer.h
  6. 5
      arch/arm64/Kconfig
  7. 195
      arch/arm64/boot/dts/qcom/atoll.dtsi
  8. 88
      arch/arm64/configs/vendor/pixel_experience-a52q_defconfig
  9. 87
      arch/arm64/configs/vendor/pixel_experience-a72q_defconfig
  10. 1
      arch/arm64/include/asm/arch_timer.h
  11. 10
      arch/arm64/include/asm/checksum.h
  12. 2
      arch/arm64/kernel/debug-monitors.c
  13. 2
      arch/arm64/kernel/traps.c
  14. 8
      arch/arm64/lib/Makefile
  15. 98
      arch/arm64/lib/crc32.S
  16. 153
      arch/arm64/lib/csum.c
  17. 23
      arch/arm64/lib/delay.c
  18. 4
      arch/powerpc/kernel/nvram_64.c
  19. 3
      arch/x86/Kconfig
  20. 14
      arch/x86/Kconfig.debug
  21. 4
      block/blk.h
  22. 1
      drivers/Kconfig
  23. 2
      drivers/Makefile
  24. 669
      drivers/android/binder.c
  25. 209
      drivers/android/binder_alloc.c
  26. 16
      drivers/android/binder_alloc.h
  27. 2
      drivers/android/binder_alloc_selftest.c
  28. 50
      drivers/android/binderfs.c
  29. 4
      drivers/base/power/wakeup.c
  30. 1
      drivers/block/zram/Kconfig
  31. 14
      drivers/block/zram/zram_dedup.c
  32. 8
      drivers/block/zram/zram_dedup.h
  33. 13
      drivers/block/zram/zram_drv.c
  34. 2
      drivers/block/zram/zram_drv.h
  35. 47
      drivers/clk/qcom/clk-cpu-osm.c
  36. 13
      drivers/clk/qcom/mdss/Makefile
  37. 2
      drivers/clk/qcom/mdss/mdss-pll.c
  38. 25
      drivers/clocksource/arm_arch_timer.c
  39. 32
      drivers/cpufreq/cpufreq.c
  40. 3
      drivers/cpufreq/freq_table.c
  41. 11
      drivers/cpuidle/lpm-levels.c
  42. 5
      drivers/cpuidle/lpm-levels.h
  43. 33
      drivers/devfreq/Kconfig
  44. 3
      drivers/devfreq/Makefile
  45. 17
      drivers/devfreq/devfreq.c
  46. 341
      drivers/devfreq/devfreq_boost.c
  47. 4
      drivers/devfreq/devfreq_devbw.c
  48. 16
      drivers/gpu/drm/msm/msm_smmu.c
  49. 2
      drivers/gpu/drm/msm/samsung/ss_dsi_panel_debug.c
  50. 4
      drivers/gpu/drm/msm/sde/sde_hw_util.c
  51. 2
      drivers/gpu/drm/msm/sde/sde_kms.c
  52. 21
      drivers/gpu/drm/msm/sde/sde_kms.h
  53. 2
      drivers/gpu/drm/msm/sde/sde_plane.c
  54. 12
      drivers/gpu/msm/Makefile
  55. 4
      drivers/gpu/msm/adreno-gpulist.h
  56. 22
      drivers/gpu/msm/adreno.c
  57. 12
      drivers/gpu/msm/adreno.h
  58. 8
      drivers/gpu/msm/adreno_a6xx.c
  59. 2
      drivers/gpu/msm/adreno_a6xx.h
  60. 130
      drivers/gpu/msm/adreno_a6xx_gmu.c
  61. 19
      drivers/gpu/msm/adreno_a6xx_rgmu.c
  62. 3
      drivers/gpu/msm/adreno_cp_parser.c
  63. 23
      drivers/gpu/msm/adreno_cp_parser.h
  64. 11
      drivers/gpu/msm/kgsl_device.h
  65. 4
      drivers/gpu/msm/kgsl_drawobj.c
  66. 4
      drivers/input/input.c
  67. 4
      drivers/input/sec_cmd.c
  68. 2
      drivers/input/sec_tsp_dumpkey.c
  69. 2
      drivers/input/touchscreen/stm/fts5cu56a/fts_sec.c
  70. 2
      drivers/input/touchscreen/stm/fts5cu56a/fts_ts.c
  71. 4
      drivers/iommu/arm-smmu.c
  72. 1
      drivers/kernelsu
  73. 2
      drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
  74. 18
      drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c
  75. 2
      drivers/mmc/core/core.c
  76. 2
      drivers/net/ethernet/broadcom/bnxt/bnxt.c
  77. 2
      drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
  78. 2
      drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
  79. 4
      drivers/net/ethernet/cavium/thunder/nicvf_main.c
  80. 6
      drivers/net/ethernet/intel/i40e/i40e_main.c
  81. 4
      drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
  82. 6
      drivers/net/ethernet/mellanox/mlx4/en_netdev.c
  83. 4
      drivers/net/ethernet/mellanox/mlx5/core/en_main.c
  84. 4
      drivers/net/ethernet/netronome/nfp/nfp_net_common.c
  85. 2
      drivers/net/ethernet/qlogic/qede/qede.h
  86. 2
      drivers/net/ethernet/qlogic/qede/qede_filter.c
  87. 4
      drivers/net/ethernet/qlogic/qede/qede_main.c
  88. 4
      drivers/net/tun.c
  89. 4
      drivers/net/virtio_net.c
  90. 4
      drivers/net/wireless/qualcomm/wcn39xx/qcacld-3.0/Kbuild
  91. 45
      drivers/net/wireless/qualcomm/wcn39xx/qcacld-3.0/core/dp/htt/htt.c
  92. 2
      drivers/net/wireless/qualcomm/wcn39xx/qcacld-3.0/core/dp/htt/htt_internal.h
  93. 3
      drivers/net/wireless/qualcomm/wcn39xx/qcacld-3.0/core/dp/htt/htt_types.h
  94. 8
      drivers/phy/phy-core.c
  95. 2
      drivers/pinctrl/qcom/pinctrl-msm.c
  96. 2
      drivers/platform/chrome/chromeos_pstore.c
  97. 10
      drivers/platform/msm/ipa/ipa_v3/ipa.c
  98. 10
      drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
  99. 4
      drivers/platform/msm/ipa/ipa_v3/ipa_i.h
  100. 8
      drivers/power/reset/msm-poweroff.c
  101. Some files were not shown because too many files have changed in this diff Show More

@ -32,11 +32,17 @@ memory to be mapped strongly ordered, and atomic operations on strongly ordered
memory are implementation defined, and won't work on many ARMs such as omaps.
The memory area is divided into ``record_size`` chunks (also rounded down to
power of two) and each oops/panic writes a ``record_size`` chunk of
power of two) and each kmesg dump writes a ``record_size`` chunk of
information.
Dumping both oopses and panics can be done by setting 1 in the ``dump_oops``
variable while setting 0 in that variable dumps only the panics.
Limiting which kinds of kmsg dumps are stored can be controlled via
the ``max_reason`` value, as defined in include/linux/kmsg_dump.h's
``enum kmsg_dump_reason``. For example, to store both Oopses and Panics,
``max_reason`` should be set to 2 (KMSG_DUMP_OOPS), to store only Panics
``max_reason`` should be set to 1 (KMSG_DUMP_PANIC). Setting this to 0
(KMSG_DUMP_UNDEF), means the reason filtering will be controlled by the
``printk.always_kmsg_dump`` boot param: if unset, it'll be KMSG_DUMP_OOPS,
otherwise KMSG_DUMP_MAX.
The module uses a counter to record multiple dumps but the counter gets reset
on restart (i.e. new dumps after the restart will overwrite old ones).
@ -90,7 +96,7 @@ Setting the ramoops parameters can be done in several different manners:
.mem_address = <...>,
.mem_type = <...>,
.record_size = <...>,
.dump_oops = <...>,
.max_reason = <...>,
.ecc = <...>,
};

@ -35,6 +35,13 @@ Properties:
Definition: List of phandles to devices that the OPP tables with the L3
frequency and voltage mappings are loaded for.
- qcom,cpufreq-table-XX
Usage: optional
Value type: <u32>
Definition: List of frequencies (in kHz) to expose in CPU XX's cpufreq table.
All frequencies present in hardware will be exposed if this list
is not present.
Example:
clock_cpucc: qcom,cpucc {
compatible = "qcom,clk-cpu-osm";
@ -48,4 +55,54 @@ Example:
l3-devs = <&phandle0 &phandle1 &phandle2>;
#clock-cells = <1>;
qcom,cpufreq-table-0 =
< 300000>,
< 403200>,
< 480000>,
< 576000>,
< 672000>,
< 768000>,
< 864000>,
< 979200>,
<1075200>,
<1171200>,
<1267200>;
qcom,cpufreq-table-4 =
< 576000>,
< 672000>,
< 768000>,
< 864000>,
< 960000>,
<1056000>,
<1152000>,
<1248000>,
<1344000>,
<1420800>,
<1497600>,
<1593600>,
<1689600>,
<1785600>,
<1862400>,
<1939200>,
<2016000>;
qcom,cpufreq-table-7 =
< 691200>,
< 768000>,
< 864000>,
< 940800>,
<1017600>,
<1113600>,
<1190400>,
<1286400>,
<1363200>,
<1459200>,
<1536000>,
<1632000>,
<1728000>,
<1824000>,
<1900800>,
<1977600>,
<2054400>;
};

@ -0,0 +1 @@
Subproject commit 8685fa1f603a279f70f19c4e28e7f3c4b86f76f2

@ -720,7 +720,7 @@ KBUILD_CFLAGS += $(call cc-disable-warning, attribute-alias)
ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
KBUILD_CFLAGS += -Os
else
KBUILD_CFLAGS += -O2
KBUILD_CFLAGS += -O3
endif
# Tell gcc to never replace conditional load with a non-conditional one

@ -107,6 +107,7 @@ static inline u32 arch_timer_get_cntkctl(void)
static inline void arch_timer_set_cntkctl(u32 cntkctl)
{
asm volatile("mcr p15, 0, %0, c14, c1, 0" : : "r" (cntkctl));
isb();
}
#endif

@ -12,6 +12,7 @@ config ARM64
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FAST_MULTIPLIER
select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_GIGANTIC_PAGE if (MEMORY_ISOLATION && COMPACTION) || CMA
@ -61,6 +62,7 @@ config ARM64
select CLONE_BACKWARDS
select COMMON_CLK if !ARCH_QCOM
select CPU_PM if (SUSPEND || CPU_IDLE)
select CRC32
select DCACHE_WORD_ACCESS
select EDAC_SUPPORT
select FRAME_POINTER
@ -71,6 +73,7 @@ config ARM64
select GENERIC_CPU_AUTOPROBE
select GENERIC_CPU_VULNERABILITIES
select GENERIC_EARLY_IOREMAP
select GENERIC_FIND_FIRST_BIT
select GENERIC_IDLE_POLL_SETUP
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
@ -124,6 +127,7 @@ config ARM64
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_FUTEX_CMPXCHG if FUTEX
select HAVE_RCU_TABLE_FREE
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_KPROBES
@ -171,6 +175,7 @@ config ARM64
select ARCH_INLINE_WRITE_UNLOCK_BH
select ARCH_INLINE_WRITE_UNLOCK_IRQ
select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
select LD_DEAD_CODE_DATA_ELIMINATION
help
ARM 64-bit (AArch64) Linux support.

@ -64,7 +64,7 @@
compatible = "arm,armv8";
reg = <0x0 0x0>;
enable-method = "psci";
capacity-dmips-mhz = <1024>;
capacity-dmips-mhz = <491>;
sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_0>;
next-level-cache = <&L2_0>;
qcom,lmh-dcvs = <&lmh_dcvs0>;
@ -100,7 +100,7 @@
compatible = "arm,armv8";
reg = <0x0 0x100>;
enable-method = "psci";
capacity-dmips-mhz = <1024>;
capacity-dmips-mhz = <491>;
sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_0>;
next-level-cache = <&L2_100>;
qcom,lmh-dcvs = <&lmh_dcvs0>;
@ -132,7 +132,7 @@
compatible = "arm,armv8";
reg = <0x0 0x200>;
enable-method = "psci";
capacity-dmips-mhz = <1024>;
capacity-dmips-mhz = <491>;
sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_0>;
next-level-cache = <&L2_200>;
qcom,lmh-dcvs = <&lmh_dcvs0>;
@ -163,7 +163,7 @@
compatible = "arm,armv8";
reg = <0x0 0x300>;
enable-method = "psci";
capacity-dmips-mhz = <1024>;
capacity-dmips-mhz = <491>;
sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_0>;
next-level-cache = <&L2_300>;
qcom,lmh-dcvs = <&lmh_dcvs0>;
@ -194,7 +194,7 @@
compatible = "arm,armv8";
reg = <0x0 0x400>;
enable-method = "psci";
capacity-dmips-mhz = <1024>;
capacity-dmips-mhz = <491>;
sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_0>;
next-level-cache = <&L2_400>;
qcom,lmh-dcvs = <&lmh_dcvs0>;
@ -225,7 +225,7 @@
compatible = "arm,armv8";
reg = <0x0 0x500>;
enable-method = "psci";
capacity-dmips-mhz = <1024>;
capacity-dmips-mhz = <491>;
sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_0>;
next-level-cache = <&L2_500>;
qcom,lmh-dcvs = <&lmh_dcvs0>;
@ -256,7 +256,7 @@
compatible = "arm,armv8";
reg = <0x0 0x600>;
enable-method = "psci";
capacity-dmips-mhz = <1740>;
capacity-dmips-mhz = <1024>;
sched-energy-costs = <&CPU_COST_1 &CLUSTER_COST_1>;
next-level-cache = <&L2_600>;
qcom,lmh-dcvs = <&lmh_dcvs1>;
@ -296,7 +296,7 @@
compatible = "arm,armv8";
reg = <0x0 0x700>;
enable-method = "psci";
capacity-dmips-mhz = <1740>;
capacity-dmips-mhz = <1024>;
sched-energy-costs = <&CPU_COST_1 &CLUSTER_COST_1>;
next-level-cache = <&L2_700>;
qcom,lmh-dcvs = <&lmh_dcvs1>;
@ -376,16 +376,11 @@
CPU_COST_0: core-cost0 {
busy-cost-data = <
300000 10
576000 18
768000 23
1017600 36
1248000 52
1324800 67
1516800 76
1612800 92
1708800 113
1804800 119
280 175
321 202
341 224
361 240
381 278
>;
idle-cost-data = <
16 12 8 6
@ -394,20 +389,16 @@
CPU_COST_1: core-cost1 {
busy-cost-data = <
652800 242
825600 293
979200 424
1113600 470
1267200 676
1555200 973
1708800 1060
1843200 1298
1900800 1362
1996800 1562
2112000 1801
2208000 2000
2323200 2341
2400000 2568
434 170
493 196
562 226
690 309
759 357
818 403
844 420
887 466
981 567
1024 622
>;
idle-cost-data = <
100 80 60 40
@ -416,16 +407,11 @@
CLUSTER_COST_0: cluster-cost0 {
busy-cost-data = <
300000 5
576000 5
768000 5
1017600 7
1248000 8
1324800 10
1516800 10
1612800 12
1708800 14
1804800 14
280 10
321 10
341 12
361 14
381 14
>;
idle-cost-data = <
5 4 3 2 1
@ -434,20 +420,16 @@
CLUSTER_COST_1: cluster-cost1 {
busy-cost-data = <
652800 21
825600 21
979200 25
1113600 26
1267200 33
1555200 41
1708800 43
1843200 49
1900800 50
1996800 54
2112000 60
2208000 61
2323200 62
2400000 63
434 25
493 26
562 33
690 41
759 43
818 49
844 50
887 54
981 61
1024 62
>;
idle-cost-data = <
5 4 3 2 1
@ -616,13 +598,6 @@
size = <0x0 0x800000>;
};
dump_mem: mem_dump_region {
compatible = "shared-dma-pool";
alloc-ranges = <0x0 0x00000000 0x0 0xffffffff>;
reusable;
size = <0 0x2400000>;
};
/* global autoconfigured region for contiguous allocations */
linux,cma {
compatible = "shared-dma-pool";
@ -635,7 +610,7 @@
};
chosen {
bootargs = "rcupdate.rcu_expedited=1 rcu_nocbs=0-7";
bootargs = "rcupdate.rcu_expedited=1 rcu_nocbs=0-7 noirqdebug";
};
soc: soc { };
@ -779,6 +754,7 @@
clock-names = "core_clk";
qcom,coresight-jtagmm-cpu = <&CPU0>;
status = "disabled";
};
jtag_mm1: jtagmm@7140000 {
@ -790,6 +766,7 @@
clock-names = "core_clk";
qcom,coresight-jtagmm-cpu = <&CPU1>;
status = "disabled";
};
jtag_mm2: jtagmm@7240000 {
@ -801,6 +778,7 @@
clock-names = "core_clk";
qcom,coresight-jtagmm-cpu = <&CPU2>;
status = "disabled";
};
jtag_mm3: jtagmm@7340000 {
@ -812,6 +790,7 @@
clock-names = "core_clk";
qcom,coresight-jtagmm-cpu = <&CPU3>;
status = "disabled";
};
jtag_mm4: jtagmm@7440000 {
@ -823,6 +802,7 @@
clock-names = "core_clk";
qcom,coresight-jtagmm-cpu = <&CPU4>;
status = "disabled";
};
jtag_mm5: jtagmm@7540000 {
@ -834,6 +814,7 @@
clock-names = "core_clk";
qcom,coresight-jtagmm-cpu = <&CPU5>;
status = "disabled";
};
jtag_mm6: jtagmm@7640000 {
@ -845,6 +826,7 @@
clock-names = "core_clk";
qcom,coresight-jtagmm-cpu = <&CPU6>;
status = "disabled";
};
jtag_mm7: jtagmm@7740000 {
@ -856,6 +838,7 @@
clock-names = "core_clk";
qcom,coresight-jtagmm-cpu = <&CPU7>;
status = "disabled";
};
msm_imem: qcom,msm-imem@146aa000 {
@ -1392,7 +1375,6 @@
qcom,bark-time = <11000>;
qcom,pet-time = <9360>;
qcom,ipi-ping;
qcom,wakeup-enable;
qcom,scandump-sizes = <0x10100 0x10100 0x10100 0x10100
0x10100 0x10100 0x25900 0x25900>;
};
@ -1921,61 +1903,6 @@
};
};
mem_dump {
compatible = "qcom,mem-dump";
memory-region = <&dump_mem>;
rpmh {
qcom,dump-size = <0x2000000>;
qcom,dump-id = <0xec>;
};
rpm_sw {
qcom,dump-size = <0x28000>;
qcom,dump-id = <0xea>;
};
pmic {
qcom,dump-size = <0x80000>;
qcom,dump-id = <0xe4>;
};
fcm {
qcom,dump-size = <0x8400>;
qcom,dump-id = <0xee>;
};
etf_swao {
qcom,dump-size = <0x10000>;
qcom,dump-id = <0xf1>;
};
etr_reg {
qcom,dump-size = <0x1000>;
qcom,dump-id = <0x100>;
};
etfswao_reg {
qcom,dump-size = <0x1000>;
qcom,dump-id = <0x102>;
};
misc_data {
qcom,dump-size = <0x1000>;
qcom,dump-id = <0xe8>;
};
etf_lpass {
qcom,dump-size = <0x4000>;
qcom,dump-id = <0xf4>;
};
etflpass_reg {
qcom,dump-size = <0x1000>;
qcom,dump-id = <0x104>;
};
};
clocks {
sleep_clk: sleep-clk {
compatible = "fixed-clock";
@ -2073,6 +2000,25 @@
l3-devs = <&cpu0_cpu_l3_lat &cpu6_cpu_l3_lat
&cdsp_cdsp_l3_lat>;
#clock-cells = <1>;
qcom,cpufreq-table-0 =
<1324800>,
<1516800>,
<1612800>,
<1708800>,
<1804800>;
qcom,cpufreq-table-6 =
<979200>,
<1113600>,
<1267200>,
<1555200>,
<1708800>,
<1843200>,
<1900800>,
<1996800>,
<2208000>,
<2323200>;
};
cpucc_debug: syscon@182a0018 {
@ -3185,7 +3131,7 @@
< 1555200 940800000 >,
< 1708800 1209600000 >,
< 1900800 1401000000 >,
< 2400000 1459000000 >;
< 2323200 1459000000 >;
};
cdsp_cdsp_l3_lat: qcom,cdsp-cdsp-l3-lat {
@ -3234,7 +3180,7 @@
< 1113600 MHZ_TO_MBPS(466, 16) >,
< 1267200 MHZ_TO_MBPS(600, 16) >,
< 1708800 MHZ_TO_MBPS(806, 16) >,
< 2400000 MHZ_TO_MBPS(933, 16) >;
< 2323200 MHZ_TO_MBPS(933, 16) >;
};
cpu0_llcc_ddr_lat: qcom,cpu0-llcc-ddr-lat {
@ -3278,7 +3224,7 @@
< 1267200 MHZ_TO_MBPS(1017, 4) >,
< 1708800 MHZ_TO_MBPS(1555, 4) >,
< 2208000 MHZ_TO_MBPS(1804, 4) >,
< 2400000 MHZ_TO_MBPS(2133, 4) >;
< 2323200 MHZ_TO_MBPS(2133, 4) >;
};
cpu0_cpu_ddr_latfloor: qcom,cpu0-cpu-ddr-latfloor {
@ -3320,7 +3266,7 @@
< 1708800 MHZ_TO_MBPS(1017, 4) >,
< 1900800 MHZ_TO_MBPS(1555, 4) >,
< 2208000 MHZ_TO_MBPS(1804, 4) >,
< 2400000 MHZ_TO_MBPS(2133, 4) >;
< 2323200 MHZ_TO_MBPS(2133, 4) >;
};
suspendable_ddr_bw_opp_table: suspendable-ddr-bw-opp-table {
@ -4012,7 +3958,6 @@
#include "pm6150l.dtsi"
#include "atoll-pinctrl.dtsi"
#include "atoll-pm.dtsi"
#include "atoll-coresight.dtsi"
#include "atoll-regulator.dtsi"
#include "atoll-usb.dtsi"
#include "atoll-vidc.dtsi"

@ -48,7 +48,7 @@ CONFIG_THREAD_INFO_IN_TASK=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_CROSS_COMPILE=""
# CONFIG_COMPILE_TEST is not set
CONFIG_LOCALVERSION="-AscendiaKernel-v1.0-lite"
CONFIG_LOCALVERSION="-AscendiaKernel-v3.1-lite-ksu"
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_DEFAULT_HOSTNAME="(none)"
CONFIG_SWAP=y
@ -100,7 +100,6 @@ CONFIG_HIGH_RES_TIMERS=y
CONFIG_TICK_CPU_ACCOUNTING=y
# CONFIG_VIRT_CPU_ACCOUNTING_GEN is not set
CONFIG_IRQ_TIME_ACCOUNTING=y
CONFIG_SCHED_WALT=y
CONFIG_SCHED_SEC_TASK_BOOST=y
# CONFIG_BSD_PROCESS_ACCT is not set
CONFIG_TASKSTATS=y
@ -132,21 +131,21 @@ CONFIG_IKCONFIG_PROC=y
CONFIG_IKHEADERS=y
CONFIG_LOG_BUF_SHIFT=17
# CONFIG_CONSOLE_FLUSH_ON_HOTPLUG is not set
CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
CONFIG_LOG_BUF_SHIFT=20
CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=13
CONFIG_GENERIC_SCHED_CLOCK=y
#
# FAIR Scheuler tunables
#
CONFIG_PELT_UTIL_HALFLIFE_32=y
# CONFIG_PELT_UTIL_HALFLIFE_16 is not set
CONFIG_PELT_UTIL_HALFLIFE_16=y
# CONFIG_PELT_UTIL_HALFLIFE_8 is not set
CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y
CONFIG_CGROUPS=y
CONFIG_PAGE_COUNTER=y
# CONFIG_MEMCG is not set
CONFIG_BLK_CGROUP=y
CONFIG_CGROUP_SCHED=y
# CONFIG_DEBUG_BLK_CGROUP is not set
CONFIG_CGROUP_WRITEBACK=y
CONFIG_CGROUP_SCHED=y
@ -171,8 +170,8 @@ CONFIG_UTS_NS=y
# CONFIG_PID_NS is not set
CONFIG_NET_NS=y
CONFIG_SCHED_CASS=y
CONFIG_SCHED_AUTOGROUP=y
CONFIG_SCHED_TUNE=y
CONFIG_STUNE_ASSIST=y
CONFIG_DEFAULT_USE_ENERGY_AWARE=y
# CONFIG_SYSFS_DEPRECATED is not set
# CONFIG_RELAY is not set
@ -245,7 +244,7 @@ CONFIG_SYSTEM_DATA_VERIFICATION=y
CONFIG_PROFILING=y
CONFIG_TRACEPOINTS=y
# CONFIG_KPROBES is not set
# CONFIG_JUMP_LABEL is not set
CONFIG_JUMP_LABEL=y
CONFIG_UPROBES=y
# CONFIG_HAVE_64BIT_ALIGNED_ACCESS is not set
CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y
@ -604,7 +603,6 @@ CONFIG_ZSMALLOC=y
CONFIG_PGTABLE_MAPPING=y
# CONFIG_ZSMALLOC_STAT is not set
CONFIG_GENERIC_EARLY_IOREMAP=y
CONFIG_BALANCE_ANON_FILE_RECLAIM=y
CONFIG_KSWAPD_CPU=0x3F
# CONFIG_IDLE_PAGE_TRACKING is not set
CONFIG_FRAME_VECTOR=y
@ -640,7 +638,7 @@ CONFIG_FORCE_MAX_ZONEORDER=11
# CONFIG_UNMAP_KERNEL_AT_EL0 is not set
CONFIG_HARDEN_BRANCH_PREDICTOR=y
# CONFIG_PRINT_VMEMLAYOUT is not set
CONFIG_ARM64_SSBD=y
# CONFIG_HARDEN_BRANCH_PREDICTOR is not set
CONFIG_ARM64_TAGGED_ADDR_ABI=y
CONFIG_ARMV8_DEPRECATED=y
CONFIG_SWP_EMULATION=y
@ -763,9 +761,6 @@ CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y
CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
CONFIG_CPU_FREQ_GOV_USERSPACE=y
CONFIG_CPU_FREQ_GOV_ONDEMAND=y
CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
CONFIG_CPU_BOOST=y
CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
# CONFIG_CPU_FREQ_GOV_INTERACTIVE is not set
@ -806,6 +801,7 @@ CONFIG_NET_KEY=y
# CONFIG_NET_KEY_MIGRATE is not set
CONFIG_KNOX_NCM=y
CONFIG_INET=y
CONFIG_WIREGUARD=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y
# CONFIG_IP_FIB_TRIE_STATS is not set
@ -2132,7 +2128,6 @@ CONFIG_INPUT_LEDS=y
CONFIG_INPUT_EVDEV=y
# CONFIG_INPUT_EVBUG is not set
# CONFIG_INPUT_KEYRESET is not set
CONFIG_SEC_DEBUG_TSP_LOG=y
CONFIG_INPUT_TOUCHSCREEN_SEC_CMD=y
CONFIG_INPUT_TOUCHSCREEN_TCLMV2=y
CONFIG_INPUT_SEC_SECURE_TOUCH=y
@ -3164,7 +3159,6 @@ CONFIG_MEDIA_CONTROLLER=y
CONFIG_VIDEO_DEV=y
CONFIG_VIDEO_V4L2_SUBDEV_API=y
CONFIG_VIDEO_V4L2=y
CONFIG_VIDEO_ADV_DEBUG=y
CONFIG_VIDEO_FIXED_MINOR_RANGES=y
CONFIG_V4L2_MEM2MEM_DEV=y
CONFIG_VIDEOBUF2_CORE=y
@ -3286,7 +3280,6 @@ CONFIG_SPECTRA_CAMERA=y
CONFIG_MSM_VIDC_V4L2=y
CONFIG_MSM_VIDC_GOVERNORS=y
CONFIG_MSM_SDE_ROTATOR=y
CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y
# CONFIG_MSM_NPU is not set
CONFIG_MSM_NPU_V2=y
CONFIG_DVB_MPQ=y
@ -3522,7 +3515,6 @@ CONFIG_PANEL_S6E3FC3_AMS646YD04_FHD=y
# CONFIG_PANEL_EA8076GA_AMS638VL01_FHD is not set
# CONFIG_PANEL_HX83102_TV104WUM_WUXGA is not set
CONFIG_DRM_MSM=y
CONFIG_DRM_MSM_REGISTER_LOGGING=y
# CONFIG_DRM_MSM_HDMI_HDCP is not set
# CONFIG_DRM_MSM_HDMI is not set
# CONFIG_DRM_MSM_DSI is not set
@ -3533,7 +3525,6 @@ CONFIG_DRM_MSM_DSI_STAGING=y
CONFIG_DRM_SDE_WB=y
# CONFIG_DRM_SDE_SHD is not set
# CONFIG_DRM_SDE_SHP is not set
CONFIG_DRM_SDE_EVTLOG_DEBUG=y
CONFIG_DRM_SDE_RSC=y
# CONFIG_DRM_MSM_LEASE is not set
CONFIG_DRM_PANEL=y
@ -4451,19 +4442,6 @@ CONFIG_SWITCH=y
# CONFIG_SWITCH_GPIO is not set
# CONFIG_ACCESSIBILITY is not set
# CONFIG_INFINIBAND is not set
CONFIG_EDAC_SUPPORT=y
CONFIG_EDAC=y
CONFIG_EDAC_LEGACY_SYSFS=y
# CONFIG_EDAC_DEBUG is not set
# CONFIG_EDAC_THUNDERX is not set
CONFIG_EDAC_KRYO_ARM64=y
# CONFIG_EDAC_KRYO_ARM64_POLL is not set
# CONFIG_EDAC_KRYO_ARM64_PANIC_ON_CE is not set
CONFIG_EDAC_KRYO_ARM64_PANIC_ON_UE=y
# CONFIG_EDAC_GIC is not set
# CONFIG_EDAC_XGENE is not set
# CONFIG_EDAC_CORTEX_ARM64 is not set
# CONFIG_EDAC_QCOM_LLCC is not set
CONFIG_RTC_LIB=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_HCTOSYS=y
@ -4960,7 +4938,6 @@ CONFIG_SM_CAMCC_ATOLL=y
CONFIG_SM_VIDEOCC_ATOLL=y
CONFIG_SM_DISPCC_ATOLL=y
CONFIG_SM_NPUCC_ATOLL=y
CONFIG_SM_DEBUGCC_ATOLL=y
# CONFIG_SDM_GCC_429W is not set
# CONFIG_CLOCK_CPU_SDM is not set
# CONFIG_COMMON_CLK_MSM is not set
@ -5018,9 +4995,6 @@ CONFIG_ARM_SMMU=y
# CONFIG_IOMMU_TLBSYNC_DEBUG is not set
# CONFIG_ARM_SMMU_TESTBUS_DUMP is not set
CONFIG_QCOM_LAZY_MAPPING=y
CONFIG_IOMMU_DEBUG=y
CONFIG_IOMMU_DEBUG_TRACKING=y
CONFIG_IOMMU_TESTS=y
# CONFIG_QCOM_IOMMU is not set
#
@ -5136,7 +5110,6 @@ CONFIG_QSEE_IPC_IRQ=y
CONFIG_QCOM_GLINK=y
CONFIG_QCOM_GLINK_PKT=y
# CONFIG_MSM_JTAGV8 is not set
CONFIG_QTI_RPM_STATS_LOG=y
CONFIG_MSM_CDSP_LOADER=y
CONFIG_QCOM_SMCINVOKE=y
CONFIG_MSM_EVENT_TIMER=y
@ -5202,6 +5175,9 @@ CONFIG_QCOM_DEVFREQ_DEVBW=y
CONFIG_DEVFREQ_GOV_CDSPL3=y
# CONFIG_PM_DEVFREQ_EVENT is not set
CONFIG_EXTCON=y
CONFIG_DEVFREQ_BOOST=y
CONFIG_DEVFREQ_INPUT_BOOST_DURATION_MS=58
CONFIG_DEVFREQ_CPU_LLCC_DDR_BW_BOOST_FREQ=3879
#
# Extcon Device Drivers
@ -5604,7 +5580,6 @@ CONFIG_PHY_QCOM_UFS=y
CONFIG_ARM_PMU=y
CONFIG_ARM_DSU_PMU=y
CONFIG_QCOM_LLCC_PMU=y
CONFIG_RAS=y
#
# Android
@ -5660,7 +5635,6 @@ CONFIG_SAMSUNG_PRODUCT_SHIP=y
# CONFIG_SAMSUNG_USER_TRIAL is not set
CONFIG_SEC_BSP=y
CONFIG_DRV_SAMSUNG=y
CONFIG_SEC_DEBUG=y
CONFIG_SEC_PARAM=y
# CONFIG_SEC_MPARAM is not set
CONFIG_SEC_PARAM_SIZE=0xA00000
@ -5683,38 +5657,28 @@ CONFIG_SEC_LOG_LAST_KMSG=y
CONFIG_SEC_LOG_STORE_LAST_KMSG=y
CONFIG_SEC_LOG_STORE_LPM_KMSG=y
# CONFIG_SEC_STORE_POWER_ONOFF_HISTORY is not set
CONFIG_SEC_DEBUG_SCHED_LOG=y
# CONFIG_SEC_DEBUG_SCHED_LOG_PER_CPU is not set
# CONFIG_SEC_DEBUG_SCHED_LOG_IRQ_V2 is not set
# CONFIG_SEC_DEBUG_MSG_LOG is not set
# CONFIG_SEC_DEBUG_DCVS_LOG is not set
# CONFIG_SEC_DEBUG_POWER_LOG is not set
# CONFIG_SEC_DEBUG_FUELGAUGE_LOG is not set
CONFIG_SEC_DEBUG_SUMMARY=y
# CONFIG_SEC_DEBUG_SUMMARY_DRIVER is not set
# CONFIG_SEC_DEBUG_LOW_LOG is not set
# CONFIG_SEC_DEBUG_FORCE_ERROR is not set
CONFIG_SEC_USER_RESET_DEBUG=y
# CONFIG_SEC_USER_RESET_DEBUG_TEST is not set
CONFIG_SEC_PERIPHERAL_SECURE_CHK=y
CONFIG_SEC_SSR_DEBUG_LEVEL_CHK=y
# CONFIG_SEC_DEBUG_PWDT is not set
CONFIG_SEC_DEBUG_DUMP_TASK_STACK=y
# CONFIG_SEC_DEBUG_MDM_FILE_INFO is not set
# CONFIG_SEC_DEBUG_DOUBLE_FREE is not set
# CONFIG_SEC_FILE_LEAK_DEBUG is not set
CONFIG_KERNEL_MODE_NEON_DEBUG=y
# CONFIG_SEC_SLUB_DEBUG is not set
# CONFIG_SEC_CP_SEPARATE_DEBUG is not set
CONFIG_SEC_DEBUG_MODULE_INFO=y
CONFIG_SEC_BOOTSTAT=y
# CONFIG_SEC_NOEYEINFO is not set
CONFIG_SEC_QPNP_PON_SPARE_BITS=7
# CONFIG_ARGOS is not set
CONFIG_SEC_MISC=y
CONFIG_SEC_SMEM=y
CONFIG_SEC_SMEM_VENDOR1_VERSION=5
CONFIG_SEC_DEBUG_APPS_CLK_LOGGING=y
#
# Samsung Vbus Notifier drivers
@ -6153,6 +6117,7 @@ CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4
# CONFIG_DYNAMIC_DEBUG is not set
# CONFIG_DEBUG_MODULE_LOAD_INFO is not set
# CONFIG_DEBUG_CONSOLE_UNHASHED_POINTERS is not set
CONFIG_KSU=y
#
# Compile-time checks and compiler options
@ -6280,7 +6245,6 @@ CONFIG_TRACE_CLOCK=y
CONFIG_RING_BUFFER=y
CONFIG_EVENT_TRACING=y
CONFIG_CONTEXT_SWITCH_TRACER=y
CONFIG_IPC_LOGGING=y
CONFIG_QCOM_RTB=y
CONFIG_QCOM_RTB_SEPARATE_CPUS=y
CONFIG_TRACING=y
@ -6361,30 +6325,6 @@ CONFIG_DEBUG_ALIGN_RODATA=y
# PowerManagement Feature
#
CONFIG_SEC_PM=y
CONFIG_SEC_PM_DEBUG=y
CONFIG_CORESIGHT=y
CONFIG_CORESIGHT_LINKS_AND_SINKS=y
CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
# CONFIG_CORESIGHT_CATU is not set
# CONFIG_CORESIGHT_SINK_TPIU is not set
# CONFIG_CORESIGHT_SINK_ETBV10 is not set
# CONFIG_CORESIGHT_SOURCE_ETM4X is not set
CONFIG_CORESIGHT_DYNAMIC_REPLICATOR=y
# CONFIG_CORESIGHT_DBGUI is not set
CONFIG_CORESIGHT_STM=y
CONFIG_CORESIGHT_CTI=y
CONFIG_CORESIGHT_OST=y
CONFIG_CORESIGHT_TPDA=y
CONFIG_CORESIGHT_TPDM=y
# CONFIG_CORESIGHT_TPDM_DEFAULT_ENABLE is not set
# CONFIG_CORESIGHT_QPDI is not set
CONFIG_CORESIGHT_HWEVENT=y
CONFIG_CORESIGHT_DUMMY=y
CONFIG_CORESIGHT_REMOTE_ETM=y
CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0
CONFIG_CORESIGHT_CSR=y
# CONFIG_CORESIGHT_TGU is not set
CONFIG_CORESIGHT_EVENT=y
#
# Security options
@ -6584,10 +6524,10 @@ CONFIG_CRYPTO_TWOFISH_COMMON=y
# Compression
#
CONFIG_CRYPTO_DEFLATE=y
CONFIG_CRYPTO_LZO=y
# CONFIG_CRYPTO_LZO is not set
# CONFIG_CRYPTO_842 is not set
CONFIG_CRYPTO_LZ4=y
CONFIG_CRYPTO_LZ4HC=y
# CONFIG_CRYPTO_LZ4HC is not set
# CONFIG_CRYPTO_ZSTD is not set
#

@ -48,7 +48,7 @@ CONFIG_THREAD_INFO_IN_TASK=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_CROSS_COMPILE=""
# CONFIG_COMPILE_TEST is not set
CONFIG_LOCALVERSION="-AscendiaKernel-v1.0-lite"
CONFIG_LOCALVERSION="-AscendiaKernel-v3.1-lite"
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_DEFAULT_HOSTNAME="(none)"
CONFIG_SWAP=y
@ -100,7 +100,6 @@ CONFIG_HIGH_RES_TIMERS=y
CONFIG_TICK_CPU_ACCOUNTING=y
# CONFIG_VIRT_CPU_ACCOUNTING_GEN is not set
CONFIG_IRQ_TIME_ACCOUNTING=y
CONFIG_SCHED_WALT=y
CONFIG_SCHED_SEC_TASK_BOOST=y
# CONFIG_BSD_PROCESS_ACCT is not set
CONFIG_TASKSTATS=y
@ -132,21 +131,21 @@ CONFIG_IKCONFIG_PROC=y
CONFIG_IKHEADERS=y
CONFIG_LOG_BUF_SHIFT=17
# CONFIG_CONSOLE_FLUSH_ON_HOTPLUG is not set
CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
CONFIG_LOG_BUF_SHIFT=20
CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=13
CONFIG_GENERIC_SCHED_CLOCK=y
#
# FAIR Scheuler tunables
#
CONFIG_PELT_UTIL_HALFLIFE_32=y
# CONFIG_PELT_UTIL_HALFLIFE_16 is not set
CONFIG_PELT_UTIL_HALFLIFE_16=y
# CONFIG_PELT_UTIL_HALFLIFE_8 is not set
CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y
CONFIG_CGROUPS=y
CONFIG_PAGE_COUNTER=y
# CONFIG_MEMCG is not set
CONFIG_BLK_CGROUP=y
CONFIG_CGROUP_SCHED=y
# CONFIG_DEBUG_BLK_CGROUP is not set
CONFIG_CGROUP_WRITEBACK=y
CONFIG_CGROUP_SCHED=y
@ -171,8 +170,8 @@ CONFIG_UTS_NS=y
# CONFIG_PID_NS is not set
CONFIG_NET_NS=y
CONFIG_SCHED_CASS=y
CONFIG_SCHED_AUTOGROUP=y
CONFIG_SCHED_TUNE=y
CONFIG_STUNE_ASSIST=y
CONFIG_DEFAULT_USE_ENERGY_AWARE=y
# CONFIG_SYSFS_DEPRECATED is not set
# CONFIG_RELAY is not set
@ -245,7 +244,7 @@ CONFIG_SYSTEM_DATA_VERIFICATION=y
CONFIG_PROFILING=y
CONFIG_TRACEPOINTS=y
# CONFIG_KPROBES is not set
# CONFIG_JUMP_LABEL is not set
CONFIG_JUMP_LABEL=y
CONFIG_UPROBES=y
# CONFIG_HAVE_64BIT_ALIGNED_ACCESS is not set
CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y
@ -601,7 +600,6 @@ CONFIG_ZSMALLOC=y
CONFIG_PGTABLE_MAPPING=y
# CONFIG_ZSMALLOC_STAT is not set
CONFIG_GENERIC_EARLY_IOREMAP=y
CONFIG_BALANCE_ANON_FILE_RECLAIM=y
CONFIG_KSWAPD_CPU=0x3F
# CONFIG_IDLE_PAGE_TRACKING is not set
CONFIG_FRAME_VECTOR=y
@ -637,7 +635,7 @@ CONFIG_FORCE_MAX_ZONEORDER=11
# CONFIG_UNMAP_KERNEL_AT_EL0 is not set
CONFIG_HARDEN_BRANCH_PREDICTOR=y
# CONFIG_PRINT_VMEMLAYOUT is not set
CONFIG_ARM64_SSBD=y
# CONFIG_HARDEN_BRANCH_PREDICTOR is not set
CONFIG_ARM64_TAGGED_ADDR_ABI=y
CONFIG_ARMV8_DEPRECATED=y
CONFIG_SWP_EMULATION=y
@ -760,9 +758,6 @@ CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y
CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
CONFIG_CPU_FREQ_GOV_USERSPACE=y
CONFIG_CPU_FREQ_GOV_ONDEMAND=y
CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
CONFIG_CPU_BOOST=y
CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
# CONFIG_CPU_FREQ_GOV_INTERACTIVE is not set
@ -803,6 +798,7 @@ CONFIG_NET_KEY=y
# CONFIG_NET_KEY_MIGRATE is not set
CONFIG_KNOX_NCM=y
CONFIG_INET=y
CONFIG_WIREGUARD=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y
# CONFIG_IP_FIB_TRIE_STATS is not set
@ -2129,7 +2125,6 @@ CONFIG_INPUT_LEDS=y
CONFIG_INPUT_EVDEV=y
# CONFIG_INPUT_EVBUG is not set
# CONFIG_INPUT_KEYRESET is not set
CONFIG_SEC_DEBUG_TSP_LOG=y
CONFIG_INPUT_TOUCHSCREEN_SEC_CMD=y
CONFIG_INPUT_TOUCHSCREEN_TCLMV2=y
CONFIG_INPUT_SEC_SECURE_TOUCH=y
@ -3161,7 +3156,6 @@ CONFIG_MEDIA_CONTROLLER=y
CONFIG_VIDEO_DEV=y
CONFIG_VIDEO_V4L2_SUBDEV_API=y
CONFIG_VIDEO_V4L2=y
CONFIG_VIDEO_ADV_DEBUG=y
CONFIG_VIDEO_FIXED_MINOR_RANGES=y
CONFIG_V4L2_MEM2MEM_DEV=y
CONFIG_VIDEOBUF2_CORE=y
@ -3283,7 +3277,6 @@ CONFIG_SPECTRA_CAMERA=y
CONFIG_MSM_VIDC_V4L2=y
CONFIG_MSM_VIDC_GOVERNORS=y
CONFIG_MSM_SDE_ROTATOR=y
CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y
# CONFIG_MSM_NPU is not set
CONFIG_MSM_NPU_V2=y
CONFIG_DVB_MPQ=y
@ -3519,7 +3512,6 @@ CONFIG_PANEL_S6E3FC3_AMS667YM01_FHD=y
# CONFIG_PANEL_EA8076GA_AMS638VL01_FHD is not set
# CONFIG_PANEL_HX83102_TV104WUM_WUXGA is not set
CONFIG_DRM_MSM=y
CONFIG_DRM_MSM_REGISTER_LOGGING=y
# CONFIG_DRM_MSM_HDMI_HDCP is not set
# CONFIG_DRM_MSM_HDMI is not set
# CONFIG_DRM_MSM_DSI is not set
@ -3530,7 +3522,6 @@ CONFIG_DRM_MSM_DSI_STAGING=y
CONFIG_DRM_SDE_WB=y
# CONFIG_DRM_SDE_SHD is not set
# CONFIG_DRM_SDE_SHP is not set
CONFIG_DRM_SDE_EVTLOG_DEBUG=y
CONFIG_DRM_SDE_RSC=y
# CONFIG_DRM_MSM_LEASE is not set
CONFIG_DRM_PANEL=y
@ -4448,19 +4439,6 @@ CONFIG_SWITCH=y
# CONFIG_SWITCH_GPIO is not set
# CONFIG_ACCESSIBILITY is not set
# CONFIG_INFINIBAND is not set
CONFIG_EDAC_SUPPORT=y
CONFIG_EDAC=y
CONFIG_EDAC_LEGACY_SYSFS=y
# CONFIG_EDAC_DEBUG is not set
# CONFIG_EDAC_THUNDERX is not set
CONFIG_EDAC_KRYO_ARM64=y
# CONFIG_EDAC_KRYO_ARM64_POLL is not set
# CONFIG_EDAC_KRYO_ARM64_PANIC_ON_CE is not set
CONFIG_EDAC_KRYO_ARM64_PANIC_ON_UE=y
# CONFIG_EDAC_GIC is not set
# CONFIG_EDAC_XGENE is not set
# CONFIG_EDAC_CORTEX_ARM64 is not set
# CONFIG_EDAC_QCOM_LLCC is not set
CONFIG_RTC_LIB=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_HCTOSYS=y
@ -4957,7 +4935,6 @@ CONFIG_SM_CAMCC_ATOLL=y
CONFIG_SM_VIDEOCC_ATOLL=y
CONFIG_SM_DISPCC_ATOLL=y
CONFIG_SM_NPUCC_ATOLL=y
CONFIG_SM_DEBUGCC_ATOLL=y
# CONFIG_SDM_GCC_429W is not set
# CONFIG_CLOCK_CPU_SDM is not set
# CONFIG_COMMON_CLK_MSM is not set
@ -5015,9 +4992,6 @@ CONFIG_ARM_SMMU=y
# CONFIG_IOMMU_TLBSYNC_DEBUG is not set
# CONFIG_ARM_SMMU_TESTBUS_DUMP is not set
CONFIG_QCOM_LAZY_MAPPING=y
CONFIG_IOMMU_DEBUG=y
CONFIG_IOMMU_DEBUG_TRACKING=y
CONFIG_IOMMU_TESTS=y
# CONFIG_QCOM_IOMMU is not set
#
@ -5133,7 +5107,6 @@ CONFIG_QSEE_IPC_IRQ=y
CONFIG_QCOM_GLINK=y
CONFIG_QCOM_GLINK_PKT=y
# CONFIG_MSM_JTAGV8 is not set
CONFIG_QTI_RPM_STATS_LOG=y
CONFIG_MSM_CDSP_LOADER=y
CONFIG_QCOM_SMCINVOKE=y
CONFIG_MSM_EVENT_TIMER=y
@ -5199,6 +5172,9 @@ CONFIG_QCOM_DEVFREQ_DEVBW=y
CONFIG_DEVFREQ_GOV_CDSPL3=y
# CONFIG_PM_DEVFREQ_EVENT is not set
CONFIG_EXTCON=y
CONFIG_DEVFREQ_BOOST=y
CONFIG_DEVFREQ_INPUT_BOOST_DURATION_MS=58
CONFIG_DEVFREQ_CPU_LLCC_DDR_BW_BOOST_FREQ=3879
#
# Extcon Device Drivers
@ -5601,7 +5577,6 @@ CONFIG_PHY_QCOM_UFS=y
CONFIG_ARM_PMU=y
CONFIG_ARM_DSU_PMU=y
CONFIG_QCOM_LLCC_PMU=y
CONFIG_RAS=y
#
# Android
@ -5657,7 +5632,6 @@ CONFIG_SAMSUNG_PRODUCT_SHIP=y
# CONFIG_SAMSUNG_USER_TRIAL is not set
CONFIG_SEC_BSP=y
CONFIG_DRV_SAMSUNG=y
CONFIG_SEC_DEBUG=y
CONFIG_SEC_PARAM=y
# CONFIG_SEC_MPARAM is not set
CONFIG_SEC_PARAM_SIZE=0xA00000
@ -5680,38 +5654,28 @@ CONFIG_SEC_LOG_LAST_KMSG=y
CONFIG_SEC_LOG_STORE_LAST_KMSG=y
CONFIG_SEC_LOG_STORE_LPM_KMSG=y
# CONFIG_SEC_STORE_POWER_ONOFF_HISTORY is not set
CONFIG_SEC_DEBUG_SCHED_LOG=y
# CONFIG_SEC_DEBUG_SCHED_LOG_PER_CPU is not set
# CONFIG_SEC_DEBUG_SCHED_LOG_IRQ_V2 is not set
# CONFIG_SEC_DEBUG_MSG_LOG is not set
# CONFIG_SEC_DEBUG_DCVS_LOG is not set
# CONFIG_SEC_DEBUG_POWER_LOG is not set
# CONFIG_SEC_DEBUG_FUELGAUGE_LOG is not set
CONFIG_SEC_DEBUG_SUMMARY=y
# CONFIG_SEC_DEBUG_SUMMARY_DRIVER is not set
# CONFIG_SEC_DEBUG_LOW_LOG is not set
# CONFIG_SEC_DEBUG_FORCE_ERROR is not set
CONFIG_SEC_USER_RESET_DEBUG=y
# CONFIG_SEC_USER_RESET_DEBUG_TEST is not set
CONFIG_SEC_PERIPHERAL_SECURE_CHK=y
CONFIG_SEC_SSR_DEBUG_LEVEL_CHK=y
# CONFIG_SEC_DEBUG_PWDT is not set
CONFIG_SEC_DEBUG_DUMP_TASK_STACK=y
# CONFIG_SEC_DEBUG_MDM_FILE_INFO is not set
# CONFIG_SEC_DEBUG_DOUBLE_FREE is not set
# CONFIG_SEC_FILE_LEAK_DEBUG is not set
CONFIG_KERNEL_MODE_NEON_DEBUG=y
# CONFIG_SEC_SLUB_DEBUG is not set
# CONFIG_SEC_CP_SEPARATE_DEBUG is not set
CONFIG_SEC_DEBUG_MODULE_INFO=y
CONFIG_SEC_BOOTSTAT=y
# CONFIG_SEC_NOEYEINFO is not set
CONFIG_SEC_QPNP_PON_SPARE_BITS=7
# CONFIG_ARGOS is not set
CONFIG_SEC_MISC=y
CONFIG_SEC_SMEM=y
CONFIG_SEC_SMEM_VENDOR1_VERSION=5
CONFIG_SEC_DEBUG_APPS_CLK_LOGGING=y
#
# Samsung Vbus Notifier drivers
@ -6277,7 +6241,6 @@ CONFIG_TRACE_CLOCK=y
CONFIG_RING_BUFFER=y
CONFIG_EVENT_TRACING=y
CONFIG_CONTEXT_SWITCH_TRACER=y
CONFIG_IPC_LOGGING=y
CONFIG_QCOM_RTB=y
CONFIG_QCOM_RTB_SEPARATE_CPUS=y
CONFIG_TRACING=y
@ -6358,30 +6321,6 @@ CONFIG_DEBUG_ALIGN_RODATA=y
# PowerManagement Feature
#
CONFIG_SEC_PM=y
CONFIG_SEC_PM_DEBUG=y
CONFIG_CORESIGHT=y
CONFIG_CORESIGHT_LINKS_AND_SINKS=y
CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
# CONFIG_CORESIGHT_CATU is not set
# CONFIG_CORESIGHT_SINK_TPIU is not set
# CONFIG_CORESIGHT_SINK_ETBV10 is not set
# CONFIG_CORESIGHT_SOURCE_ETM4X is not set
CONFIG_CORESIGHT_DYNAMIC_REPLICATOR=y
# CONFIG_CORESIGHT_DBGUI is not set
CONFIG_CORESIGHT_STM=y
CONFIG_CORESIGHT_CTI=y
CONFIG_CORESIGHT_OST=y
CONFIG_CORESIGHT_TPDA=y
CONFIG_CORESIGHT_TPDM=y
# CONFIG_CORESIGHT_TPDM_DEFAULT_ENABLE is not set
# CONFIG_CORESIGHT_QPDI is not set
CONFIG_CORESIGHT_HWEVENT=y
CONFIG_CORESIGHT_DUMMY=y
CONFIG_CORESIGHT_REMOTE_ETM=y
CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0
CONFIG_CORESIGHT_CSR=y
# CONFIG_CORESIGHT_TGU is not set
CONFIG_CORESIGHT_EVENT=y
#
# Security options
@ -6581,10 +6520,10 @@ CONFIG_CRYPTO_TWOFISH_COMMON=y
# Compression
#
CONFIG_CRYPTO_DEFLATE=y
CONFIG_CRYPTO_LZO=y
# CONFIG_CRYPTO_LZO is not set
# CONFIG_CRYPTO_842 is not set
CONFIG_CRYPTO_LZ4=y
CONFIG_CRYPTO_LZ4HC=y
# CONFIG_CRYPTO_LZ4HC is not set
# CONFIG_CRYPTO_ZSTD is not set
#

@ -144,6 +144,7 @@ static inline u32 arch_timer_get_cntkctl(void)
static inline void arch_timer_set_cntkctl(u32 cntkctl)
{
write_sysreg(cntkctl, cntkctl_el1);
isb();
}
static inline u64 arch_counter_get_cntpct(void)

@ -16,7 +16,12 @@
#ifndef __ASM_CHECKSUM_H
#define __ASM_CHECKSUM_H
#include <linux/types.h>
#include <linux/in6.h>
#define _HAVE_ARCH_IPV6_CSUM
__sum16 csum_ipv6_magic(const struct in6_addr *saddr,
const struct in6_addr *daddr,
__u32 len, __u8 proto, __wsum sum);
static inline __sum16 csum_fold(__wsum csum)
{
@ -46,6 +51,9 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
}
#define ip_fast_csum ip_fast_csum
extern unsigned int do_csum(const unsigned char *buff, int len);
#define do_csum do_csum
#include <asm-generic/checksum.h>
#endif /* __ASM_CHECKSUM_H */

@ -62,7 +62,7 @@ NOKPROBE_SYMBOL(mdscr_read);
* Allow root to disable self-hosted debug from userspace.
* This is useful if you want to connect an external JTAG debugger.
*/
static bool debug_enabled = true;
static bool debug_enabled;
static int create_debug_debugfs_entry(void)
{

@ -318,8 +318,10 @@ void die(const char *str, struct pt_regs *regs, int err)
oops_enter();
#ifdef CONFIG_SEC_DEBUG
sec_debug_sched_msg("!!die!!");
sec_debug_summary_save_die_info(str, regs);
#endif
console_verbose();
bust_spinlocks(1);

@ -1,9 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
lib-y := bitops.o clear_user.o delay.o copy_from_user.o \
copy_to_user.o copy_in_user.o copy_page.o \
clear_page.o memchr.o memcpy.o memmove.o memset.o \
memcmp.o strcmp.o strncmp.o strlen.o strnlen.o \
strchr.o strrchr.o tishift.o
clear_page.o csum.o memchr.o memcpy.o memmove.o \
memset.o memcmp.o strcmp.o strncmp.o strlen.o \
strnlen.o strchr.o strrchr.o
# Tell the compiler to treat all general purpose registers (with the
# exception of the IP registers, which are already handled by the caller
@ -25,3 +25,5 @@ CFLAGS_atomic_ll_sc.o := -ffixed-x1 -ffixed-x2 \
-fcall-saved-x18
lib-$(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) += uaccess_flushcache.o
obj-$(CONFIG_CRC32) += crc32.o

@ -0,0 +1,98 @@
/*
* Accelerated CRC32(C) using AArch64 CRC instructions
*
* Copyright (C) 2016 - 2018 Linaro Ltd <ard.biesheuvel@linaro.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <asm/alternative.h>
#include <asm/assembler.h>
.arch armv8-a+crc
.macro __crc32, c
cmp x2, #16
b.lt 8f // less than 16 bytes
and x7, x2, #0x1f
and x2, x2, #~0x1f
cbz x7, 32f // multiple of 32 bytes
and x8, x7, #0xf
ldp x3, x4, [x1]
add x8, x8, x1
add x1, x1, x7
ldp x5, x6, [x8]
CPU_BE( rev x3, x3 )
CPU_BE( rev x4, x4 )
CPU_BE( rev x5, x5 )
CPU_BE( rev x6, x6 )
tst x7, #8
crc32\c\()x w8, w0, x3
csel x3, x3, x4, eq
csel w0, w0, w8, eq
tst x7, #4
lsr x4, x3, #32
crc32\c\()w w8, w0, w3
csel x3, x3, x4, eq
csel w0, w0, w8, eq
tst x7, #2
lsr w4, w3, #16
crc32\c\()h w8, w0, w3
csel w3, w3, w4, eq
csel w0, w0, w8, eq
tst x7, #1
crc32\c\()b w8, w0, w3
csel w0, w0, w8, eq
tst x7, #16
crc32\c\()x w8, w0, x5
crc32\c\()x w8, w8, x6
csel w0, w0, w8, eq
cbz x2, 0f
32: ldp x3, x4, [x1], #32
sub x2, x2, #32
ldp x5, x6, [x1, #-16]
CPU_BE( rev x3, x3 )
CPU_BE( rev x4, x4 )
CPU_BE( rev x5, x5 )
CPU_BE( rev x6, x6 )
crc32\c\()x w0, w0, x3
crc32\c\()x w0, w0, x4
crc32\c\()x w0, w0, x5
crc32\c\()x w0, w0, x6
cbnz x2, 32b
0: ret
8: tbz x2, #3, 4f
ldr x3, [x1], #8
CPU_BE( rev x3, x3 )
crc32\c\()x w0, w0, x3
4: tbz x2, #2, 2f
ldr w3, [x1], #4
CPU_BE( rev w3, w3 )
crc32\c\()w w0, w0, w3
2: tbz x2, #1, 1f
ldrh w3, [x1], #2
CPU_BE( rev16 w3, w3 )
crc32\c\()h w0, w0, w3
1: tbz x2, #0, 0f
ldrb w3, [x1]
crc32\c\()b w0, w0, w3
0: ret
.endm
.align 5
ENTRY(crc32_le)
__crc32
ENDPROC(crc32_le)
.align 5
ENTRY(__crc32c_le)
__crc32 c
ENDPROC(__crc32c_le)

@ -0,0 +1,153 @@
// SPDX-License-Identifier: GPL-2.0-only
// Copyright (C) 2019-2020 Arm Ltd.
#include <linux/compiler.h>
#include <linux/kasan-checks.h>
#include <linux/kernel.h>
#include <net/checksum.h>
/* Looks dumb, but generates nice-ish code */
static u64 accumulate(u64 sum, u64 data)
{
__uint128_t tmp = (__uint128_t)sum + data;
return tmp + (tmp >> 64);
}
unsigned int do_csum(const unsigned char *buff, int len)
{
unsigned int offset, shift, sum;
const u64 *ptr;
u64 data, sum64 = 0;
if (unlikely(len == 0))
return 0;
offset = (unsigned long)buff & 7;
/*
* This is to all intents and purposes safe, since rounding down cannot
* result in a different page or cache line being accessed, and @buff
* should absolutely not be pointing to anything read-sensitive. We do,
* however, have to be careful not to piss off KASAN, which means using
* unchecked reads to accommodate the head and tail, for which we'll
* compensate with an explicit check up-front.
*/
kasan_check_read(buff, len);
ptr = (u64 *)(buff - offset);
len = len + offset - 8;
/*
* Head: zero out any excess leading bytes. Shifting back by the same
* amount should be at least as fast as any other way of handling the
* odd/even alignment, and means we can ignore it until the very end.
*/
shift = offset * 8;
data = READ_ONCE_NOCHECK(*ptr++);
#ifdef __LITTLE_ENDIAN
data = (data >> shift) << shift;
#else
data = (data << shift) >> shift;
#endif
/*
* Body: straightforward aligned loads from here on (the paired loads
* underlying the quadword type still only need dword alignment). The
* main loop strictly excludes the tail, so the second loop will always
* run at least once.
*/
while (unlikely(len > 64)) {
__uint128_t tmp1, tmp2, tmp3, tmp4;
tmp1 = READ_ONCE_NOCHECK(*(__uint128_t *)ptr);
tmp2 = READ_ONCE_NOCHECK(*(__uint128_t *)(ptr + 2));
tmp3 = READ_ONCE_NOCHECK(*(__uint128_t *)(ptr + 4));
tmp4 = READ_ONCE_NOCHECK(*(__uint128_t *)(ptr + 6));
len -= 64;
ptr += 8;
/* This is the "don't dump the carry flag into a GPR" idiom */
tmp1 += (tmp1 >> 64) | (tmp1 << 64);
tmp2 += (tmp2 >> 64) | (tmp2 << 64);
tmp3 += (tmp3 >> 64) | (tmp3 << 64);
tmp4 += (tmp4 >> 64) | (tmp4 << 64);
tmp1 = ((tmp1 >> 64) << 64) | (tmp2 >> 64);
tmp1 += (tmp1 >> 64) | (tmp1 << 64);
tmp3 = ((tmp3 >> 64) << 64) | (tmp4 >> 64);
tmp3 += (tmp3 >> 64) | (tmp3 << 64);
tmp1 = ((tmp1 >> 64) << 64) | (tmp3 >> 64);
tmp1 += (tmp1 >> 64) | (tmp1 << 64);
tmp1 = ((tmp1 >> 64) << 64) | sum64;
tmp1 += (tmp1 >> 64) | (tmp1 << 64);
sum64 = tmp1 >> 64;
}
while (len > 8) {
__uint128_t tmp;
sum64 = accumulate(sum64, data);
tmp = READ_ONCE_NOCHECK(*(__uint128_t *)ptr);
len -= 16;
ptr += 2;
#ifdef __LITTLE_ENDIAN
data = tmp >> 64;
sum64 = accumulate(sum64, tmp);
#else
data = tmp;
sum64 = accumulate(sum64, tmp >> 64);
#endif
}
if (len > 0) {
sum64 = accumulate(sum64, data);
data = READ_ONCE_NOCHECK(*ptr);
len -= 8;
}
/*
* Tail: zero any over-read bytes similarly to the head, again
* preserving odd/even alignment.
*/
shift = len * -8;
#ifdef __LITTLE_ENDIAN
data = (data << shift) >> shift;
#else
data = (data >> shift) << shift;
#endif
sum64 = accumulate(sum64, data);
/* Finally, folding */
sum64 += (sum64 >> 32) | (sum64 << 32);
sum = sum64 >> 32;
sum += (sum >> 16) | (sum << 16);
if (offset & 1)
return (u16)swab32(sum);
return sum >> 16;
}
__sum16 csum_ipv6_magic(const struct in6_addr *saddr,
const struct in6_addr *daddr,
__u32 len, __u8 proto, __wsum csum)
{
__uint128_t src, dst;
u64 sum = (__force u64)csum;
src = *(const __uint128_t *)saddr->s6_addr;
dst = *(const __uint128_t *)daddr->s6_addr;
sum += (__force u32)htonl(len);
#ifdef __LITTLE_ENDIAN
sum += (u32)proto << 24;
#else
sum += proto;
#endif
src += (src >> 64) | (src << 64);
dst += (dst >> 64) | (dst << 64);
sum = accumulate(sum, src >> 64);
sum = accumulate(sum, dst >> 64);
sum += ((sum >> 32) | (sum << 32));
return csum_fold((__force __wsum)(sum >> 32));
}
EXPORT_SYMBOL(csum_ipv6_magic);

@ -24,10 +24,28 @@
#include <linux/module.h>
#include <linux/timex.h>
#include <clocksource/arm_arch_timer.h>
#define USECS_TO_CYCLES(time_usecs) \
xloops_to_cycles((time_usecs) * 0x10C7UL)
static inline unsigned long xloops_to_cycles(unsigned long xloops)
{
return (xloops * loops_per_jiffy * HZ) >> 32;
}
void __delay(unsigned long cycles)
{
cycles_t start = get_cycles();
if (arch_timer_evtstrm_available()) {
const cycles_t timer_evt_period =
USECS_TO_CYCLES(ARCH_TIMER_EVT_STREAM_PERIOD_US);
while ((get_cycles() - start + timer_evt_period) < cycles)
wfe();
}
while ((get_cycles() - start) < cycles)
cpu_relax();
}
@ -35,10 +53,7 @@ EXPORT_SYMBOL(__delay);
inline void __const_udelay(unsigned long xloops)
{
unsigned long loops;
loops = xloops * loops_per_jiffy * HZ;
__delay(loops >> 32);
__delay(xloops_to_cycles(xloops));
}
EXPORT_SYMBOL(__const_udelay);

@ -668,9 +668,7 @@ static void oops_to_nvram(struct kmsg_dumper *dumper,
int rc = -1;
switch (reason) {
case KMSG_DUMP_RESTART:
case KMSG_DUMP_HALT:
case KMSG_DUMP_POWEROFF:
case KMSG_DUMP_SHUTDOWN:
/* These are almost always orderly shutdowns. */
return;
case KMSG_DUMP_OOPS:

@ -296,9 +296,6 @@ config ZONE_DMA32
config AUDIT_ARCH
def_bool y if X86_64
config ARCH_SUPPORTS_OPTIMIZED_INLINING
def_bool y
config ARCH_SUPPORTS_DEBUG_PAGEALLOC
def_bool y

@ -284,20 +284,6 @@ config CPA_DEBUG
---help---
Do change_page_attr() self-tests every 30 seconds.
config OPTIMIZE_INLINING
bool "Allow gcc to uninline functions marked 'inline'"
---help---
This option determines if the kernel forces gcc to inline the functions
developers have marked 'inline'. Doing so takes away freedom from gcc to
do what it thinks is best, which is desirable for the gcc 3.x series of
compilers. The gcc 4.x series have a rewritten inlining algorithm and
enabling this option will generate a smaller kernel there. Hopefully
this algorithm is so good that allowing gcc 4.x and above to make the
decision will become the default in the future. Until then this option
is there to test gcc for this.
If unsure, say N.
config DEBUG_ENTRY
bool "Debug low-level entry code"
depends on DEBUG_KERNEL

@ -277,9 +277,7 @@ extern int blk_update_nr_requests(struct request_queue *, unsigned int);
*/
static inline int blk_do_io_stat(struct request *rq)
{
return rq->rq_disk &&
(rq->rq_flags & RQF_IO_STAT) &&
!blk_rq_is_passthrough(rq);
return false;
}
static inline void req_set_nomerge(struct request_queue *q, struct request *req)

@ -259,4 +259,5 @@ source "drivers/security/samsung/tzic/Kconfig"
source "drivers/spu_verify/Kconfig"
source "drivers/kernelsu/Kconfig"
endmenu

@ -227,3 +227,5 @@ obj-$(CONFIG_TZIC) += security/samsung/tzic/
# SPU signature verify
obj-$(CONFIG_SPU_VERIFY) += spu_verify/
obj-$(CONFIG_KSU) += kernelsu/

File diff suppressed because it is too large Load Diff

@ -30,25 +30,21 @@
#include <linux/list_lru.h>
#include <linux/uaccess.h>
#include <linux/highmem.h>
#include <linux/ratelimit.h>
#include "binder_alloc.h"
#include "binder_trace.h"
#ifdef CONFIG_SAMSUNG_FREECESS
#include <linux/freecess.h>
#endif
struct list_lru binder_alloc_lru;
extern int system_server_pid;
static DEFINE_MUTEX(binder_alloc_mmap_lock);
enum {
BINDER_DEBUG_USER_ERROR = 1U << 0,
BINDER_DEBUG_OPEN_CLOSE = 1U << 1,
BINDER_DEBUG_BUFFER_ALLOC = 1U << 2,
BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 3,
};
static uint32_t binder_alloc_debug_mask;
static uint32_t binder_alloc_debug_mask = BINDER_DEBUG_USER_ERROR;
module_param_named(debug_mask, binder_alloc_debug_mask,
uint, 0644);
@ -56,7 +52,7 @@ module_param_named(debug_mask, binder_alloc_debug_mask,
#define binder_alloc_debug(mask, x...) \
do { \
if (binder_alloc_debug_mask & mask) \
pr_info(x); \
pr_info_ratelimited(x); \
} while (0)
static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer)
@ -229,8 +225,9 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
}
if (!vma && need_mm) {
pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
alloc->pid);
binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
"%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
alloc->pid);
goto err_no_vma;
}
@ -283,7 +280,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
}
if (mm) {
up_read(&mm->mmap_sem);
mmput(mm);
mmput_async(mm);
}
return 0;
@ -316,7 +313,7 @@ err_page_ptr_cleared:
err_no_vma:
if (mm) {
up_read(&mm->mmap_sem);
mmput(mm);
mmput_async(mm);
}
return vma ? -ENOMEM : -ESRCH;
}
@ -349,12 +346,56 @@ static inline struct vm_area_struct *binder_alloc_get_vma(
return vma;
}
static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid)
{
/*
* Find the amount and size of buffers allocated by the current caller;
* The idea is that once we cross the threshold, whoever is responsible
* for the low async space is likely to try to send another async txn,
* and at some point we'll catch them in the act. This is more efficient
* than keeping a map per pid.
*/
struct rb_node *n = alloc->free_buffers.rb_node;
struct binder_buffer *buffer;
size_t total_alloc_size = 0;
size_t num_buffers = 0;
for (n = rb_first(&alloc->allocated_buffers); n != NULL;
n = rb_next(n)) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
if (buffer->pid != pid)
continue;
if (!buffer->async_transaction)
continue;
total_alloc_size += binder_alloc_buffer_size(alloc, buffer)
+ sizeof(struct binder_buffer);
num_buffers++;
}
/*
* Warn if this pid has more than 50 transactions, or more than 50% of
* async space (which is 25% of total buffer size). Oneway spam is only
* detected when the threshold is exceeded.
*/
if (num_buffers > 50 || total_alloc_size > alloc->buffer_size / 4) {
binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
"%d: pid %d spamming oneway? %zd buffers allocated for a total size of %zd\n",
alloc->pid, pid, num_buffers, total_alloc_size);
if (!alloc->oneway_spam_detected) {
alloc->oneway_spam_detected = true;
return true;
}
}
return false;
}
static struct binder_buffer *binder_alloc_new_buf_locked(
struct binder_alloc *alloc,
size_t data_size,
size_t offsets_size,
size_t extra_buffers_size,
int is_async)
int is_async,
int pid)
{
struct rb_node *n = alloc->free_buffers.rb_node;
struct binder_buffer *buffer;
@ -365,13 +406,10 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
size_t size, data_offsets_size;
int ret;
#ifdef CONFIG_SAMSUNG_FREECESS
struct task_struct *p = NULL;
#endif
if (!binder_alloc_get_vma(alloc)) {
pr_err("%d: binder_alloc_buf, no vma\n",
alloc->pid);
binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
"%d: binder_alloc_buf, no vma\n",
alloc->pid);
return ERR_PTR(-ESRCH);
}
@ -392,32 +430,16 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
return ERR_PTR(-EINVAL);
}
#ifdef CONFIG_SAMSUNG_FREECESS
if (is_async && (alloc->free_async_space < 3*(size + sizeof(struct binder_buffer))
|| (alloc->free_async_space < ((alloc->buffer_size/2)*9/10)))) {
rcu_read_lock();
p = find_task_by_vpid(alloc->pid);
rcu_read_unlock();
if (p != NULL && thread_group_is_frozen(p)) {
binder_report(p, -1, "free_buffer_full", is_async);
}
}
#endif
if (is_async &&
alloc->free_async_space < size + sizeof(struct binder_buffer)) {
//binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
// "%d: binder_alloc_buf size %zd failed, no async space left\n",
// alloc->pid, size);
pr_info("%d: binder_alloc_buf size %zd(%zd) failed, no async space left\n",
alloc->pid, size, alloc->free_async_space);
/* Pad 0-size buffers so they get assigned unique addresses */
size = max(size, sizeof(void *));
if (is_async && alloc->free_async_space < size) {
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: binder_alloc_buf size %zd failed, no async space left\n",
alloc->pid, size);
return ERR_PTR(-ENOSPC);
}
/* Pad 0-size buffers so they get assigned unique addresses */
size = max(size, sizeof(void *));
while (n) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
BUG_ON(!buffer->free);
@ -459,11 +481,14 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
if (buffer_size > largest_free_size)
largest_free_size = buffer_size;
}
pr_err("%d: binder_alloc_buf size %zd failed, no address space\n",
alloc->pid, size);
pr_err("allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
total_alloc_size, allocated_buffers, largest_alloc_size,
total_free_size, free_buffers, largest_free_size);
binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
"%d: binder_alloc_buf size %zd failed, no address space\n",
alloc->pid, size);
binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
"allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
total_alloc_size, allocated_buffers,
largest_alloc_size, total_free_size,
free_buffers, largest_free_size);
return ERR_PTR(-ENOSPC);
}
if (n == NULL) {
@ -513,19 +538,23 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
buffer->offsets_size = offsets_size;
buffer->async_transaction = is_async;
buffer->extra_buffers_size = extra_buffers_size;
buffer->pid = pid;
buffer->oneway_spam_suspect = false;
if (is_async) {
alloc->free_async_space -= size + sizeof(struct binder_buffer);
if ((system_server_pid == alloc->pid) && (alloc->free_async_space <= 153600)) { // 150K
pr_info("%d: [free_size<150K] binder_alloc_buf size %zd async free %zd\n",
alloc->pid, size, alloc->free_async_space);
}
if ((system_server_pid == alloc->pid) && (size >= 122880)) { // 120K
pr_info("%d: [alloc_size>120K] binder_alloc_buf size %zd async free %zd\n",
alloc->pid, size, alloc->free_async_space);
}
alloc->free_async_space -= size;
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
"%d: binder_alloc_buf size %zd async free %zd\n",
alloc->pid, size, alloc->free_async_space);
if (alloc->free_async_space < alloc->buffer_size / 10) {
/*
* Start detecting spammers once we have less than 20%
* of async space left (which is less than 10% of total
* buffer size).
*/
buffer->oneway_spam_suspect = debug_low_async_space_locked(alloc, pid);
} else {
alloc->oneway_spam_detected = false;
}
}
return buffer;
@ -543,25 +572,27 @@ err_alloc_buf_struct_failed:
* @offsets_size: user specified buffer offset
* @extra_buffers_size: size of extra space for meta-data (eg, security context)
* @is_async: buffer for async transaction
* @pid: pid to attribute allocation to (used for debugging)
*
* Allocate a new buffer given the requested sizes. Returns
* the kernel version of the buffer pointer. The size allocated
* is the sum of the three given sizes (each rounded up to
* pointer-sized boundary)
*
* Return: The allocated buffer or %NULL if error
* Return: The allocated buffer or %ERR_PTR(-errno) if error
*/
struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
size_t data_size,
size_t offsets_size,
size_t extra_buffers_size,
int is_async)
int is_async,
int pid)
{
struct binder_buffer *buffer;
mutex_lock(&alloc->mutex);
buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size,
extra_buffers_size, is_async);
extra_buffers_size, is_async, pid);
mutex_unlock(&alloc->mutex);
return buffer;
}
@ -647,8 +678,7 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size);
if (buffer->async_transaction) {
alloc->free_async_space += size + sizeof(struct binder_buffer);
alloc->free_async_space += buffer_size;
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
"%d: binder_free_buf size %zd async free %zd\n",
alloc->pid, size, alloc->free_async_space);
@ -681,6 +711,8 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
binder_insert_free_buffer(alloc, buffer);
}
static void binder_alloc_clear_buf(struct binder_alloc *alloc,
struct binder_buffer *buffer);
/**
* binder_alloc_free_buf() - free a binder buffer
* @alloc: binder_alloc for this proc
@ -691,6 +723,18 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
void binder_alloc_free_buf(struct binder_alloc *alloc,
struct binder_buffer *buffer)
{
/*
* We could eliminate the call to binder_alloc_clear_buf()
* from binder_alloc_deferred_release() by moving this to
* binder_alloc_free_buf_locked(). However, that could
* increase contention for the alloc mutex if clear_on_free
* is used frequently for large buffers. The mutex is not
* needed for correctness here.
*/
if (buffer->clear_on_free) {
binder_alloc_clear_buf(alloc, buffer);
buffer->clear_on_free = false;
}
mutex_lock(&alloc->mutex);
binder_free_buf_locked(alloc, buffer);
mutex_unlock(&alloc->mutex);
@ -761,8 +805,10 @@ err_alloc_pages_failed:
alloc->buffer = NULL;
err_already_mapped:
mutex_unlock(&binder_alloc_mmap_lock);
pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
alloc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
"%s: %d %lx-%lx %s failed %d\n", __func__,
alloc->pid, vma->vm_start, vma->vm_end,
failure_string, ret);
return ret;
}
@ -783,6 +829,10 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
/* Transaction should already have been freed */
BUG_ON(buffer->transaction);
if (buffer->clear_on_free) {
binder_alloc_clear_buf(alloc, buffer);
buffer->clear_on_free = false;
}
binder_free_buf_locked(alloc, buffer);
buffers++;
}
@ -1109,6 +1159,36 @@ static struct page *binder_alloc_get_page(struct binder_alloc *alloc,
return lru_page->page_ptr;
}
/**
* binder_alloc_clear_buf() - zero out buffer
* @alloc: binder_alloc for this proc
* @buffer: binder buffer to be cleared
*
* memset the given buffer to 0
*/
static void binder_alloc_clear_buf(struct binder_alloc *alloc,
struct binder_buffer *buffer)
{
size_t bytes = binder_alloc_buffer_size(alloc, buffer);
binder_size_t buffer_offset = 0;
while (bytes) {
unsigned long size;
struct page *page;
pgoff_t pgoff;
void *kptr;
page = binder_alloc_get_page(alloc, buffer,
buffer_offset, &pgoff);
size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
kptr = kmap(page) + pgoff;
memset(kptr, 0, size);
kunmap(page);
bytes -= size;
buffer_offset += size;
}
}
/**
* binder_alloc_copy_user_to_buffer() - copy src user to tgt user
* @alloc: binder_alloc for this proc
@ -1211,3 +1291,8 @@ void binder_alloc_copy_from_buffer(struct binder_alloc *alloc,
dest, bytes);
}
void binder_alloc_shrinker_exit(void)
{
unregister_shrinker(&binder_shrinker);
list_lru_destroy(&binder_alloc_lru);
}

@ -32,8 +32,11 @@ struct binder_transaction;
* @entry: entry alloc->buffers
* @rb_node: node for allocated_buffers/free_buffers rb trees
* @free: %true if buffer is free
* @clear_on_free: %true if buffer must be zeroed after use
* @allow_user_free: %true if user is allowed to free buffer
* @async_transaction: %true if buffer is in use for an async txn
* @oneway_spam_suspect: %true if total async allocate size just exceed
* spamming detect threshold
* @debug_id: unique ID for debugging
* @transaction: pointer to associated struct binder_transaction
* @target_node: struct binder_node associated with this buffer
@ -41,6 +44,7 @@ struct binder_transaction;
* @offsets_size: size of array of offsets
* @extra_buffers_size: size of space for other objects (like sg lists)
* @user_data: user pointer to base of buffer space
* @pid: pid to attribute the buffer to (caller)
*
* Bookkeeping structure for binder transaction buffers
*/
@ -49,9 +53,11 @@ struct binder_buffer {
struct rb_node rb_node; /* free entry by size or allocated entry */
/* by address */
unsigned free:1;
unsigned clear_on_free:1;
unsigned allow_user_free:1;
unsigned async_transaction:1;
unsigned debug_id:29;
unsigned oneway_spam_suspect:1;
unsigned debug_id:27;
struct binder_transaction *transaction;
@ -60,6 +66,7 @@ struct binder_buffer {
size_t offsets_size;
size_t extra_buffers_size;
void __user *user_data;
int pid;
};
/**
@ -92,6 +99,8 @@ struct binder_lru_page {
* @buffer_size: size of address space specified via mmap
* @pid: pid for associated binder_proc (invariant after init)
* @pages_high: high watermark of offset in @pages
* @oneway_spam_detected: %true if oneway spam detection fired, clear that
* flag once the async buffer has returned to a healthy state
*
* Bookkeeping structure for per-proc address space management for binder
* buffers. It is normally initialized during binder_init() and binder_mmap()
@ -112,6 +121,7 @@ struct binder_alloc {
uint32_t buffer_free;
int pid;
size_t pages_high;
bool oneway_spam_detected;
};
#ifdef CONFIG_ANDROID_BINDER_IPC_SELFTEST
@ -126,9 +136,11 @@ extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
size_t data_size,
size_t offsets_size,
size_t extra_buffers_size,
int is_async);
int is_async,
int pid);
extern void binder_alloc_init(struct binder_alloc *alloc);
extern int binder_alloc_shrinker_init(void);
extern void binder_alloc_shrinker_exit(void);
extern void binder_alloc_vma_close(struct binder_alloc *alloc);
extern struct binder_buffer *
binder_alloc_prepare_to_free(struct binder_alloc *alloc,

@ -128,7 +128,7 @@ static void binder_selftest_alloc_buf(struct binder_alloc *alloc,
int i;
for (i = 0; i < BUFFER_NUM; i++) {
buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0);
buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0, 0);
if (IS_ERR(buffers[i]) ||
!check_buffer_pages_allocated(alloc, buffers[i],
sizes[i])) {

@ -59,12 +59,22 @@ enum binderfs_stats_mode {
STATS_GLOBAL,
};
struct binder_features {
bool oneway_spam_detection;
bool extended_error;
};
static const match_table_t tokens = {
{ Opt_max, "max=%d" },
{ Opt_stats_mode, "stats=%s" },
{ Opt_err, NULL }
};
static struct binder_features binder_features = {
.oneway_spam_detection = true,
.extended_error = true,
};
static inline struct binderfs_info *BINDERFS_I(const struct inode *inode)
{
return inode->i_sb->s_fs_info;
@ -448,7 +458,6 @@ static int binderfs_binder_ctl_create(struct super_block *sb)
inode->i_uid = info->root_uid;
inode->i_gid = info->root_gid;
refcount_set(&device->ref, 1);
device->binderfs_inode = inode;
device->miscdev.minor = minor;
@ -589,6 +598,39 @@ out:
return dentry;
}
static int binder_features_show(struct seq_file *m, void *unused)
{
bool *feature = m->private;
seq_printf(m, "%d\n", *feature);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(binder_features);
static int init_binder_features(struct super_block *sb)
{
struct dentry *dentry, *dir;
dir = binderfs_create_dir(sb->s_root, "features");
if (IS_ERR(dir))
return PTR_ERR(dir);
dentry = binderfs_create_file(dir, "oneway_spam_detection",
&binder_features_fops,
&binder_features.oneway_spam_detection);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
dentry = binderfs_create_file(dir, "extended_error",
&binder_features_fops,
&binder_features.extended_error);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
return 0;
}
static int init_binder_logs(struct super_block *sb)
{
struct dentry *binder_logs_root_dir, *dentry, *proc_log_dir;
@ -658,7 +700,7 @@ static int binderfs_fill_super(struct super_block *sb, void *data, int silent)
int ret;
struct binderfs_info *info;
struct inode *inode = NULL;
struct binderfs_device device_info = { { 0 } };
struct binderfs_device device_info = { 0 };
const char *name;
size_t len;
@ -730,6 +772,10 @@ static int binderfs_fill_super(struct super_block *sb, void *data, int silent)
name++;
}
ret = init_binder_features(sb);
if (ret)
return ret;
if (info->mount_opts.stats_mode == STATS_GLOBAL)
return init_binder_logs(sb);

@ -965,8 +965,8 @@ bool pm_wakeup_pending(void)
void pm_system_wakeup(void)
{
atomic_inc(&pm_abort_suspend);
s2idle_wake();
if (atomic_inc_return_relaxed(&pm_abort_suspend) == 1)
s2idle_wake();
}
EXPORT_SYMBOL_GPL(pm_system_wakeup);

@ -18,6 +18,7 @@ config ZRAM
config ZRAM_DEDUP
bool "Deduplication support for ZRAM data"
depends on ZRAM
select XXHASH
default n
help
Deduplicate ZRAM data to reduce amount of memory consumption.

@ -8,7 +8,7 @@
*/
#include <linux/vmalloc.h>
#include <linux/jhash.h>
#include <linux/xxhash.h>
#include <linux/highmem.h>
#include "zram_drv.h"
@ -28,13 +28,13 @@ u64 zram_dedup_meta_size(struct zram *zram)
return (u64)atomic64_read(&zram->stats.meta_data_size);
}
static u32 zram_dedup_checksum(unsigned char *mem)
static u64 zram_dedup_checksum(unsigned char *mem)
{
return jhash(mem, PAGE_SIZE, 0);
return xxh64(mem, PAGE_SIZE, 0);
}
void zram_dedup_insert(struct zram *zram, struct zram_entry *new,
u32 checksum)
u64 checksum)
{
struct zram_hash *hash;
struct rb_root *rb_root;
@ -91,7 +91,7 @@ static unsigned long zram_dedup_put(struct zram *zram,
struct zram_entry *entry)
{
struct zram_hash *hash;
u32 checksum;
u64 checksum;
unsigned long val;
checksum = entry->checksum;
@ -156,7 +156,7 @@ again:
}
static struct zram_entry *zram_dedup_get(struct zram *zram,
unsigned char *mem, u32 checksum)
unsigned char *mem, u64 checksum)
{
struct zram_hash *hash;
struct zram_entry *entry;
@ -182,7 +182,7 @@ static struct zram_entry *zram_dedup_get(struct zram *zram,
}
struct zram_entry *zram_dedup_find(struct zram *zram, struct page *page,
u32 *checksum)
u64 *checksum)
{
void *mem;
struct zram_entry *entry;

@ -10,9 +10,9 @@ u64 zram_dedup_dup_size(struct zram *zram);
u64 zram_dedup_meta_size(struct zram *zram);
void zram_dedup_insert(struct zram *zram, struct zram_entry *new,
u32 checksum);
u64 checksum);
struct zram_entry *zram_dedup_find(struct zram *zram, struct page *page,
u32 *checksum);
u64 *checksum);
void zram_dedup_init_entry(struct zram *zram, struct zram_entry *entry,
unsigned long handle, unsigned int len);
@ -26,9 +26,9 @@ static inline u64 zram_dedup_dup_size(struct zram *zram) { return 0; }
static inline u64 zram_dedup_meta_size(struct zram *zram) { return 0; }
static inline void zram_dedup_insert(struct zram *zram, struct zram_entry *new,
u32 checksum) { }
u64 checksum) { }
static inline struct zram_entry *zram_dedup_find(struct zram *zram,
struct page *page, u32 *checksum) { return NULL; }
struct page *page, u64 *checksum) { return NULL; }
static inline void zram_dedup_init_entry(struct zram *zram,
struct zram_entry *entry, unsigned long handle,

@ -2675,12 +2675,13 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
kunmap_atomic(dst);
zcomp_stream_put(zram->comp);
}
#ifdef CONFIG_ZRAM_LRU_WRITEBACK
/* Should NEVER happen. BUG() if it does. */
if (unlikely(ret)) {
pr_err("Decompression failed! err=%d, page=%u, len=%u, addr=%p\n", ret, index, size, src);
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, src, size, 1);
BUG();
}
if (unlikely(ret))
handle_decomp_fail(zram->compressor, ret, index, src, size,
NULL);
#endif
zs_unmap_object(zram->mem_pool, zram_entry_handle(zram, entry));
#ifdef CONFIG_ZRAM_LRU_WRITEBACK
@ -2744,7 +2745,7 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
void *src, *dst, *mem;
struct zcomp_strm *zstrm;
struct page *page = bvec->bv_page;
u32 checksum;
u64 checksum;
unsigned long element = 0;
enum zram_pageflags flags = 0;
#ifdef CONFIG_ZRAM_LRU_WRITEBACK

@ -68,7 +68,7 @@ enum zram_pageflags {
struct zram_entry {
struct rb_node rb_node;
u32 len;
u32 checksum;
u64 checksum;
unsigned long refcount;
unsigned long handle;
};

@ -72,6 +72,7 @@ struct osm_entry {
};
struct clk_osm {
struct device *dev;
struct clk_hw hw;
struct osm_entry osm_table[OSM_TABLE_SIZE];
struct dentry *debugfs;
@ -643,13 +644,30 @@ static unsigned int osm_cpufreq_get(unsigned int cpu)
return policy->freq_table[index].frequency;
}
static bool osm_dt_find_freq(u32 *of_table, int of_len, long frequency)
{
int i;
if (!of_table)
return true;
for (i = 0; i < of_len; i++) {
if (frequency == of_table[i])
return true;
}
return false;
}
static int osm_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
struct cpufreq_frequency_table *table;
struct clk_osm *c, *parent;
struct clk_hw *p_hw;
int ret;
int ret, of_len;
unsigned int i;
u32 *of_table = NULL;
char tbl_name[] = "qcom,cpufreq-table-##";
c = osm_configure_policy(policy);
if (!c) {
@ -666,6 +684,26 @@ static int osm_cpufreq_cpu_init(struct cpufreq_policy *policy)
parent = to_clk_osm(p_hw);
c->vbase = parent->vbase;
snprintf(tbl_name, sizeof(tbl_name), "qcom,cpufreq-table-%d", policy->cpu);
if (of_find_property(parent->dev->of_node, tbl_name, &of_len) && of_len > 0) {
of_len /= sizeof(*of_table);
of_table = kcalloc(of_len, sizeof(*of_table), GFP_KERNEL);
if (!of_table) {
pr_err("failed to allocate DT frequency table memory for CPU%d\n",
policy->cpu);
return -ENOMEM;
}
ret = of_property_read_u32_array(parent->dev->of_node, tbl_name,
of_table, of_len);
if (ret) {
pr_err("failed to read DT frequency table for CPU%d, err=%d\n",
policy->cpu, ret);
return ret;
}
}
table = kcalloc(parent->osm_table_size + 1, sizeof(*table), GFP_KERNEL);
if (!table)
return -ENOMEM;
@ -686,6 +724,10 @@ static int osm_cpufreq_cpu_init(struct cpufreq_policy *policy)
table[i].frequency = (XO_RATE * lval) / 1000;
table[i].driver_data = table[i].frequency;
/* Ignore frequency if not present in DT table */
if (!osm_dt_find_freq(of_table, of_len, table[i].frequency))
table[i].frequency = CPUFREQ_ENTRY_INVALID;
if (core_count == SINGLE_CORE_COUNT)
table[i].frequency = CPUFREQ_ENTRY_INVALID;
@ -715,9 +757,11 @@ static int osm_cpufreq_cpu_init(struct cpufreq_policy *policy)
cpumask_copy(policy->cpus, &c->related_cpus);
kfree(of_table);
return 0;
err:
kfree(of_table);
kfree(table);
return ret;
}
@ -946,6 +990,7 @@ static int clk_osm_read_lut(struct platform_device *pdev, struct clk_osm *c)
{
u32 data, src, lval, i, j = c->osm_table_size;
c->dev = &pdev->dev;
for (i = 0; i < c->osm_table_size; i++) {
data = clk_osm_read_reg(c, FREQ_REG + i * OSM_REG_SIZE);
src = ((data & GENMASK(31, 30)) >> 30);

@ -1,16 +1,3 @@
obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-pll-util.o
obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-pll.o
obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-dsi-pll-10nm.o
obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-dp-pll-10nm.o
obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-dp-pll-10nm-util.o
obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-dsi-pll-7nm.o
obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-dp-pll-7nm.o
obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-dp-pll-7nm-util.o
obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-dsi-pll-28lpm.o
obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-dsi-pll-28nm-util.o
obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-dsi-pll-14nm.o
obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-dsi-pll-14nm-util.o
obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-dp-pll-14nm.o
obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-hdmi-pll-28lpm.o
obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-dsi-pll-12nm.o
obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-dsi-pll-12nm-util.o

@ -179,6 +179,7 @@ static int mdss_pll_clock_register(struct platform_device *pdev,
case MDSS_DSI_PLL_10NM:
rc = dsi_pll_clock_register_10nm(pdev, pll_res);
break;
#if 0
case MDSS_DP_PLL_10NM:
rc = dp_pll_clock_register_10nm(pdev, pll_res);
break;
@ -204,6 +205,7 @@ static int mdss_pll_clock_register(struct platform_device *pdev,
case MDSS_DSI_PLL_12NM:
rc = dsi_pll_clock_register_12nm(pdev, pll_res);
break;
#endif
case MDSS_UNKNOWN_PLL:
default:
rc = -EINVAL;

@ -80,6 +80,7 @@ static bool arch_timer_mem_use_virtual;
static bool arch_counter_suspend_stop;
static bool vdso_default = true;
static cpumask_t evtstrm_available = CPU_MASK_NONE;
static bool evtstrm_enable = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM);
static int __init early_evtstrm_cfg(char *buf)
@ -758,6 +759,7 @@ static void arch_timer_evtstrm_enable(int divider)
#ifdef CONFIG_COMPAT
compat_elf_hwcap |= COMPAT_HWCAP_EVTSTRM;
#endif
cpumask_set_cpu(smp_processor_id(), &evtstrm_available);
}
static void arch_timer_configure_evtstream(void)
@ -899,6 +901,16 @@ void arch_timer_mem_get_cval(u32 *lo, u32 *hi)
}
}
bool arch_timer_evtstrm_available(void)
{
/*
* We might get called from a preemptible context. This is fine
* because availability of the event stream should be always the same
* for a preemptible context and context where we might resume a task.
*/
return cpumask_test_cpu(raw_smp_processor_id(), &evtstrm_available);
}
static u64 arch_counter_get_cntvct_mem(void)
{
u32 vct_lo, vct_hi, tmp_hi;
@ -964,6 +976,8 @@ static int arch_timer_dying_cpu(unsigned int cpu)
{
struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
cpumask_clear_cpu(smp_processor_id(), &evtstrm_available);
arch_timer_stop(clk);
return 0;
}
@ -973,10 +987,16 @@ static DEFINE_PER_CPU(unsigned long, saved_cntkctl);
static int arch_timer_cpu_pm_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
if (action == CPU_PM_ENTER)
if (action == CPU_PM_ENTER) {
__this_cpu_write(saved_cntkctl, arch_timer_get_cntkctl());
else if (action == CPU_PM_ENTER_FAILED || action == CPU_PM_EXIT)
cpumask_clear_cpu(smp_processor_id(), &evtstrm_available);
} else if (action == CPU_PM_ENTER_FAILED || action == CPU_PM_EXIT) {
arch_timer_set_cntkctl(__this_cpu_read(saved_cntkctl));
if (elf_hwcap & HWCAP_EVTSTRM)
cpumask_set_cpu(smp_processor_id(), &evtstrm_available);
}
return NOTIFY_OK;
}
@ -1052,7 +1072,6 @@ static int __init arch_timer_register(void)
if (err)
goto out_unreg_notify;
/* Register and immediately configure the timer on the boot CPU */
err = cpuhp_setup_state(CPUHP_AP_ARM_ARCH_TIMER_STARTING,
"clockevents/arm/arch_timer:starting",

@ -32,6 +32,7 @@
#include <linux/tick.h>
#include <linux/sched/topology.h>
#include <linux/sched/sysctl.h>
#include <linux/ologk.h>
#include <trace/events/power.h>
@ -660,40 +661,11 @@ static ssize_t show_##file_name \
}
show_one(cpuinfo_min_freq, cpuinfo.min_freq);
show_one(cpuinfo_max_freq, cpuinfo.max_freq);
show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
show_one(scaling_min_freq, min);
show_one(scaling_max_freq, max);
unsigned int cpuinfo_max_freq_cached;
static bool should_use_cached_freq(int cpu)
{
/* This is a safe check. may not be needed */
if (!cpuinfo_max_freq_cached)
return false;
/*
* perfd already configure sched_lib_mask_force to
* 0xf0 from user space. so re-using it.
*/
if (!(BIT(cpu) & sched_lib_mask_force))
return false;
return is_sched_lib_based_app(current->pid);
}
static ssize_t show_cpuinfo_max_freq(struct cpufreq_policy *policy, char *buf)
{
unsigned int freq = policy->cpuinfo.max_freq;
if (should_use_cached_freq(policy->cpu))
freq = cpuinfo_max_freq_cached << 1;
else
freq = policy->cpuinfo.max_freq;
return scnprintf(buf, PAGE_SIZE, "%u\n", freq);
}
__weak unsigned int arch_freq_get_on_cpu(int cpu)
{
return 0;

@ -62,9 +62,6 @@ int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
policy->min = policy->cpuinfo.min_freq = min_freq;
policy->max = policy->cpuinfo.max_freq = max_freq;
if (max_freq > cpuinfo_max_freq_cached)
cpuinfo_max_freq_cached = max_freq;
if (policy->min == ~0)
return -EINVAL;
else

@ -72,7 +72,6 @@
#define PSCI_AFFINITY_LEVEL(lvl) ((lvl & 0x3) << 24)
#define BIAS_HYST (bias_hyst * NSEC_PER_MSEC)
static struct system_pm_ops *sys_pm_ops;
@ -1096,7 +1095,6 @@ static int cluster_configure(struct lpm_cluster *cluster, int idx,
sec_debug_cluster_lpm_log(cluster->cluster_name, idx,
cluster->num_children_in_sync.bits[0],
cluster->child_cpus.bits[0], from_idle, 1);
trace_cluster_enter(cluster->cluster_name, idx,
cluster->num_children_in_sync.bits[0],
cluster->child_cpus.bits[0], from_idle);
@ -1747,10 +1745,9 @@ static int lpm_suspend_prepare(void)
#ifdef CONFIG_SEC_PM
regulator_showall_enabled();
#endif
#ifdef CONFIG_SEC_PM_DEBUG
sec_clock_debug_print_enabled();
debug_masterstats_show("entry");
debug_rpmstats_show("entry");
#endif
#ifdef CONFIG_SEC_PM_DEBUG
@ -1770,10 +1767,8 @@ static void lpm_suspend_wake(void)
suspend_in_progress = false;
lpm_stats_suspend_exit();
#ifdef CONFIG_SEC_PM
#ifdef CONFIG_SEC_PM_DEBUG
sec_debug_print_sleep_time();
debug_rpmstats_show("exit");
debug_masterstats_show("exit");
#endif
}

@ -12,11 +12,6 @@
#include <soc/qcom/pm.h>
#ifdef CONFIG_SEC_PM
extern void debug_rpmstats_show(char *annotation);
extern void debug_masterstats_show(char *annotation);
#endif
#define NR_LPM_LEVELS 8
#define MAXSAMPLES 5
#define CLUST_SMPL_INVLD_TIME 40000

@ -263,6 +263,39 @@ config DEVFREQ_GOV_CDSPL3
CDSP resource manager will use this governor to vote for L3 clock
for IO-coherent traffic generated from CDSP
config DEVFREQ_BOOST
bool "Devfreq Boost"
help
Boosts enumerated devfreq devices upon input, and allows for boosting
specific devfreq devices on other custom events. The boost frequencies
for this driver should be set so that frame drops are near-zero at the
boosted frequencies and power consumption is minimized at said
frequencies. The goal of this driver is to provide an interface to
achieve optimal device performance by requesting boosts on key events,
such as when a frame is ready to rendered to the display.
if DEVFREQ_BOOST
config DEVFREQ_INPUT_BOOST_DURATION_MS
int "Input boost duration"
default "100"
help
Input boost duration in milliseconds for all boostable devices.
config DEVFREQ_WAKE_BOOST_DURATION_MS
int "Wake boost duration"
default "1000"
help
Wake boost duration in milliseconds for all boostable devices.
config DEVFREQ_CPU_LLCC_DDR_BW_BOOST_FREQ
int "Boost freq for cpu-llcc-ddr-bw device"
default "0"
help
Boost frequency for the MSM DDR bus.
endif
source "drivers/devfreq/event/Kconfig"
endif # PM_DEVFREQ

@ -30,3 +30,6 @@ obj-$(CONFIG_DEVFREQ_SPDM) += devfreq_spdm.o devfreq_spdm_debugfs.o
# DEVFREQ Event Drivers
obj-$(CONFIG_PM_DEVFREQ_EVENT) += event/
# DEVFREQ Boost
obj-$(CONFIG_DEVFREQ_BOOST) += devfreq_boost.o

@ -263,10 +263,15 @@ int update_devfreq(struct devfreq *devfreq)
if (!devfreq->governor)
return -EINVAL;
/* Reevaluate the proper frequency */
err = devfreq->governor->get_target_freq(devfreq, &freq);
if (err)
return err;
if (devfreq->max_boost) {
/* Use the max freq for max boosts */
freq = ULONG_MAX;
} else {
/* Reevaluate the proper frequency */
err = devfreq->governor->get_target_freq(devfreq, &freq);
if (err)
return err;
}
/*
* Adjust the frequency with user freq and QoS.
@ -1130,6 +1135,10 @@ static ssize_t min_freq_store(struct device *dev, struct device_attribute *attr,
int ret;
unsigned long max;
/* Minfreq is managed by devfreq_boost */
if (df->is_boost_device)
return count;
ret = sscanf(buf, "%lu", &value);
if (ret != 1)
return -EINVAL;

@ -0,0 +1,341 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018-2021 Sultan Alsawaf <sultan@kerneltoast.com>.
*/
#define pr_fmt(fmt) "devfreq_boost: " fmt
#include <linux/devfreq_boost.h>
#include <linux/input.h>
#include <linux/kthread.h>
#include <linux/msm_drm_notify.h>
#include <linux/slab.h>
#include <uapi/linux/sched/types.h>
enum {
SCREEN_OFF,
INPUT_BOOST,
MAX_BOOST
};
struct boost_dev {
struct devfreq *df;
struct delayed_work input_unboost;
struct delayed_work max_unboost;
wait_queue_head_t boost_waitq;
atomic_long_t max_boost_expires;
unsigned long boost_freq;
unsigned long state;
};
struct df_boost_drv {
struct boost_dev devices[DEVFREQ_MAX];
struct notifier_block msm_drm_notif;
};
static void devfreq_input_unboost(struct work_struct *work);
static void devfreq_max_unboost(struct work_struct *work);
#define BOOST_DEV_INIT(b, dev, freq) .devices[dev] = { \
.input_unboost = \
__DELAYED_WORK_INITIALIZER((b).devices[dev].input_unboost, \
devfreq_input_unboost, 0), \
.max_unboost = \
__DELAYED_WORK_INITIALIZER((b).devices[dev].max_unboost, \
devfreq_max_unboost, 0), \
.boost_waitq = \
__WAIT_QUEUE_HEAD_INITIALIZER((b).devices[dev].boost_waitq), \
.boost_freq = freq \
}
static struct df_boost_drv df_boost_drv_g __read_mostly = {
BOOST_DEV_INIT(df_boost_drv_g, DEVFREQ_CPU_LLCC_DDR_BW,
CONFIG_DEVFREQ_CPU_LLCC_DDR_BW_BOOST_FREQ)
};
static void __devfreq_boost_kick(struct boost_dev *b)
{
if (!READ_ONCE(b->df) || test_bit(SCREEN_OFF, &b->state))
return;
set_bit(INPUT_BOOST, &b->state);
if (!mod_delayed_work(system_unbound_wq, &b->input_unboost,
msecs_to_jiffies(CONFIG_DEVFREQ_INPUT_BOOST_DURATION_MS))) {
/* Set the bit again in case we raced with the unboost worker */
set_bit(INPUT_BOOST, &b->state);
wake_up(&b->boost_waitq);
}
}
void devfreq_boost_kick(enum df_device device)
{
struct df_boost_drv *d = &df_boost_drv_g;
__devfreq_boost_kick(&d->devices[device]);
}
static void __devfreq_boost_kick_max(struct boost_dev *b,
unsigned int duration_ms)
{
unsigned long boost_jiffies, curr_expires, new_expires;
if (!READ_ONCE(b->df) || test_bit(SCREEN_OFF, &b->state))
return;
boost_jiffies = msecs_to_jiffies(duration_ms);
do {
curr_expires = atomic_long_read(&b->max_boost_expires);
new_expires = jiffies + boost_jiffies;
/* Skip this boost if there's a longer boost in effect */
if (time_after(curr_expires, new_expires))
return;
} while (atomic_long_cmpxchg(&b->max_boost_expires, curr_expires,
new_expires) != curr_expires);
set_bit(MAX_BOOST, &b->state);
if (!mod_delayed_work(system_unbound_wq, &b->max_unboost,
boost_jiffies)) {
/* Set the bit again in case we raced with the unboost worker */
set_bit(MAX_BOOST, &b->state);
wake_up(&b->boost_waitq);
}
}
void devfreq_boost_kick_max(enum df_device device, unsigned int duration_ms)
{
struct df_boost_drv *d = &df_boost_drv_g;
__devfreq_boost_kick_max(&d->devices[device], duration_ms);
}
void devfreq_register_boost_device(enum df_device device, struct devfreq *df)
{
struct df_boost_drv *d = &df_boost_drv_g;
struct boost_dev *b;
df->is_boost_device = true;
b = &d->devices[device];
WRITE_ONCE(b->df, df);
}
static void devfreq_input_unboost(struct work_struct *work)
{
struct boost_dev *b = container_of(to_delayed_work(work), typeof(*b),
input_unboost);
clear_bit(INPUT_BOOST, &b->state);
wake_up(&b->boost_waitq);
}
static void devfreq_max_unboost(struct work_struct *work)
{
struct boost_dev *b = container_of(to_delayed_work(work), typeof(*b),
max_unboost);
clear_bit(MAX_BOOST, &b->state);
wake_up(&b->boost_waitq);
}
static void devfreq_update_boosts(struct boost_dev *b, unsigned long state)
{
struct devfreq *df = b->df;
mutex_lock(&df->lock);
if (state & BIT(SCREEN_OFF)) {
df->min_freq = df->profile->freq_table[0];
df->max_boost = false;
} else {
df->min_freq = state & BIT(INPUT_BOOST) ?
min(b->boost_freq, df->max_freq) :
df->profile->freq_table[0];
df->max_boost = state & BIT(MAX_BOOST);
}
update_devfreq(df);
mutex_unlock(&df->lock);
}
static int devfreq_boost_thread(void *data)
{
static const struct sched_param sched_max_rt_prio = {
.sched_priority = MAX_RT_PRIO - 1
};
struct boost_dev *b = data;
unsigned long old_state = 0;
sched_setscheduler_nocheck(current, SCHED_FIFO, &sched_max_rt_prio);
while (1) {
bool should_stop = false;
unsigned long curr_state;
wait_event_interruptible(b->boost_waitq,
(curr_state = READ_ONCE(b->state)) != old_state ||
(should_stop = kthread_should_stop()));
if (should_stop)
break;
if (old_state != curr_state) {
devfreq_update_boosts(b, curr_state);
old_state = curr_state;
}
}
return 0;
}
static int msm_drm_notifier_cb(struct notifier_block *nb,
unsigned long action, void *data)
{
struct df_boost_drv *d = container_of(nb, typeof(*d), msm_drm_notif);
int i, *blank = ((struct msm_drm_notifier *)data)->data;
/* Parse DRM blank events as soon as they occur */
if (action != MSM_DRM_EARLY_EVENT_BLANK)
return NOTIFY_OK;
/* Boost when the screen turns on and unboost when it turns off */
for (i = 0; i < DEVFREQ_MAX; i++) {
struct boost_dev *b = &d->devices[i];
if (*blank == MSM_DRM_BLANK_UNBLANK) {
clear_bit(SCREEN_OFF, &b->state);
__devfreq_boost_kick_max(b,
CONFIG_DEVFREQ_WAKE_BOOST_DURATION_MS);
} else {
set_bit(SCREEN_OFF, &b->state);
wake_up(&b->boost_waitq);
}
}
return NOTIFY_OK;
}
static void devfreq_boost_input_event(struct input_handle *handle,
unsigned int type, unsigned int code,
int value)
{
struct df_boost_drv *d = handle->handler->private;
int i;
for (i = 0; i < DEVFREQ_MAX; i++)
__devfreq_boost_kick(&d->devices[i]);
}
static int devfreq_boost_input_connect(struct input_handler *handler,
struct input_dev *dev,
const struct input_device_id *id)
{
struct input_handle *handle;
int ret;
handle = kzalloc(sizeof(*handle), GFP_KERNEL);
if (!handle)
return -ENOMEM;
handle->dev = dev;
handle->handler = handler;
handle->name = "devfreq_boost_handle";
ret = input_register_handle(handle);
if (ret)
goto free_handle;
ret = input_open_device(handle);
if (ret)
goto unregister_handle;
return 0;
unregister_handle:
input_unregister_handle(handle);
free_handle:
kfree(handle);
return ret;
}
static void devfreq_boost_input_disconnect(struct input_handle *handle)
{
input_close_device(handle);
input_unregister_handle(handle);
kfree(handle);
}
static const struct input_device_id devfreq_boost_ids[] = {
/* Multi-touch touchscreen */
{
.flags = INPUT_DEVICE_ID_MATCH_EVBIT |
INPUT_DEVICE_ID_MATCH_ABSBIT,
.evbit = { BIT_MASK(EV_ABS) },
.absbit = { [BIT_WORD(ABS_MT_POSITION_X)] =
BIT_MASK(ABS_MT_POSITION_X) |
BIT_MASK(ABS_MT_POSITION_Y) }
},
/* Touchpad */
{
.flags = INPUT_DEVICE_ID_MATCH_KEYBIT |
INPUT_DEVICE_ID_MATCH_ABSBIT,
.keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) },
.absbit = { [BIT_WORD(ABS_X)] =
BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) }
},
/* Keypad */
{
.flags = INPUT_DEVICE_ID_MATCH_EVBIT,
.evbit = { BIT_MASK(EV_KEY) }
},
{ }
};
static struct input_handler devfreq_boost_input_handler = {
.event = devfreq_boost_input_event,
.connect = devfreq_boost_input_connect,
.disconnect = devfreq_boost_input_disconnect,
.name = "devfreq_boost_handler",
.id_table = devfreq_boost_ids
};
static int __init devfreq_boost_init(void)
{
struct df_boost_drv *d = &df_boost_drv_g;
struct task_struct *thread[DEVFREQ_MAX];
int i, ret;
for (i = 0; i < DEVFREQ_MAX; i++) {
struct boost_dev *b = &d->devices[i];
thread[i] = kthread_run(devfreq_boost_thread, b,
"devfreq_boostd/%d", i);
if (IS_ERR(thread[i])) {
ret = PTR_ERR(thread[i]);
pr_err("Failed to create kthread, err: %d\n", ret);
goto stop_kthreads;
}
}
devfreq_boost_input_handler.private = d;
ret = input_register_handler(&devfreq_boost_input_handler);
if (ret) {
pr_err("Failed to register input handler, err: %d\n", ret);
goto stop_kthreads;
}
d->msm_drm_notif.notifier_call = msm_drm_notifier_cb;
d->msm_drm_notif.priority = INT_MAX;
ret = msm_drm_register_client(&d->msm_drm_notif);
if (ret) {
pr_err("Failed to register msm_drm notifier, err: %d\n", ret);
goto unregister_handler;
}
return 0;
unregister_handler:
input_unregister_handler(&devfreq_boost_input_handler);
stop_kthreads:
while (i--)
kthread_stop(thread[i]);
return ret;
}
late_initcall(devfreq_boost_init);

@ -25,6 +25,7 @@
#include <linux/mutex.h>
#include <linux/interrupt.h>
#include <linux/devfreq.h>
#include <linux/devfreq_boost.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
@ -237,6 +238,9 @@ int devfreq_add_devbw(struct device *dev)
return PTR_ERR(d->df);
}
if (!strcmp(dev_name(dev), "soc:qcom,cpu-llcc-ddr-bw"))
devfreq_register_boost_device(DEVFREQ_CPU_LLCC_DDR_BW, d->df);
return 0;
}

@ -273,11 +273,11 @@ static int msm_smmu_map_dma_buf(struct msm_mmu *mmu, struct sg_table *sgt,
dir, attrs, client->secure);
}
#if defined(CONFIG_DISPLAY_SAMSUNG)
if (sec_debug_is_enabled() && sgt && sgt->sgl)
#if defined(CONFIG_DISPLAY_SAMSUNG) && defined(CONFIG_SEC_DEBUG)
if (sgt && sgt->sgl)
ss_smmu_debug_map(SMMU_RT_DISPLAY_DEBUG, 0, NULL, sgt);
#elif defined(CONFIG_DISPLAY_SAMSUNG_LEGO)
if (sec_debug_is_enabled() && sgt && sgt->sgl)
#elif defined(CONFIG_DISPLAY_SAMSUNG_LEGO) && defined(CONFIG_SEC_DEBUG)
if (sgt && sgt->sgl)
ss_smmu_debug_map(SMMU_RT_DISPLAY_DEBUG, sgt);
#endif
@ -304,11 +304,11 @@ static void msm_smmu_unmap_dma_buf(struct msm_mmu *mmu, struct sg_table *sgt,
dir, client->secure);
}
#if defined(CONFIG_DISPLAY_SAMSUNG)
if (sec_debug_is_enabled() && sgt && sgt->sgl)
#if defined(CONFIG_DISPLAY_SAMSUNG) && defined(CONFIG_SEC_DEBUG)
if (sgt && sgt->sgl)
ss_smmu_debug_unmap(SMMU_RT_DISPLAY_DEBUG, sgt);
#elif defined(CONFIG_DISPLAY_SAMSUNG_LEGO)
if (sec_debug_is_enabled() && sgt && sgt->sgl)
#elif defined(CONFIG_DISPLAY_SAMSUNG_LEGO) && defined(CONFIG_SEC_DEBUG)
if (sgt && sgt->sgl)
ss_smmu_debug_unmap(SMMU_RT_DISPLAY_DEBUG, sgt);
#endif

@ -925,10 +925,12 @@ int ss_smmu_debug_init(struct samsung_display_driver_data *vdd)
int ret = 0;
/* This debug is available by sde_debug enabled condition */
#if defined(CONFIG_SEC_DEBUG)
if (!sec_debug_is_enabled()) {
LCD_ERR("sec_debug_is_enabled : %d\n", sec_debug_is_enabled());
goto init_fail;
}
#endif
/* Create KMEM_CACHE slab */
if (IS_ERR_OR_NULL(vdd->ss_debug_smmu_cache)) {

@ -79,10 +79,6 @@ void sde_reg_write(struct sde_hw_blk_reg_map *c,
u32 val,
const char *name)
{
/* don't need to mutex protect this */
if (c->log_mask & sde_hw_util_log_mask)
SDE_DEBUG_DRIVER("[%s:0x%X] <= 0x%X\n",
name, c->blk_off + reg_off, val);
writel_relaxed(val, c->base_off + c->blk_off + reg_off);
}

@ -868,7 +868,7 @@ static int _sde_kms_release_splash_buffer(unsigned int mem_addr,
#if defined(CONFIG_DISPLAY_SAMSUNG_LEGO)
SDE_INFO("release splash buffer: addr: %x, size: %x, sec_debug: %d\n",
mem_addr, splash_buffer_size, sec_debug_is_enabled());
mem_addr, splash_buffer_size);
#endif
return ret;

@ -47,36 +47,21 @@
* @fmt: Pointer to format string
*/
#define SDE_DEBUG(fmt, ...) \
do { \
if (unlikely(drm_debug & DRM_UT_KMS)) \
DRM_DEBUG(fmt, ##__VA_ARGS__); \
else \
pr_debug(fmt, ##__VA_ARGS__); \
} while (0)
no_printk(fmt, ##__VA_ARGS__)
/**
* SDE_INFO - macro for kms/plane/crtc/encoder/connector logs
* @fmt: Pointer to format string
*/
#define SDE_INFO(fmt, ...) \
do { \
if (unlikely(drm_debug & DRM_UT_KMS)) \
DRM_INFO(fmt, ##__VA_ARGS__); \
else \
pr_info(fmt, ##__VA_ARGS__); \
} while (0)
no_printk(fmt, ##__VA_ARGS__)
/**
* SDE_DEBUG_DRIVER - macro for hardware driver logging
* @fmt: Pointer to format string
*/
#define SDE_DEBUG_DRIVER(fmt, ...) \
do { \
if (unlikely(drm_debug & DRM_UT_DRIVER)) \
DRM_ERROR(fmt, ##__VA_ARGS__); \
else \
pr_debug(fmt, ##__VA_ARGS__); \
} while (0)
no_printk(fmt, ##__VA_ARGS__)
#define SDE_ERROR(fmt, ...) pr_err("[sde error]" fmt, ##__VA_ARGS__)

@ -921,7 +921,7 @@ int sde_plane_wait_input_fence(struct drm_plane *plane, uint32_t wait_ms)
psde->is_error = true;
sde_kms_timeline_status(plane->dev);
#if defined(CONFIG_DISPLAY_SAMSUNG) || defined(CONFIG_DISPLAY_SAMSUNG_LEGO)
#if defined(CONFIG_DISPLAY_SAMSUNG) && (CONFIG_SEC_DEBUG) || defined(CONFIG_DISPLAY_SAMSUNG_LEGO) && (CONFIG_SEC_DEBUG)
{
struct dma_fence *tout_fence = input_fence;

@ -9,7 +9,6 @@ msm_kgsl_core-y = \
kgsl_pwrctrl.o \
kgsl_pwrscale.o \
kgsl_mmu.o \
kgsl_snapshot.o \
kgsl_events.o \
kgsl_pool.o \
kgsl_gmu_core.o \
@ -27,19 +26,8 @@ msm_adreno-y += \
adreno_ringbuffer.o \
adreno_drawctxt.o \
adreno_dispatch.o \
adreno_snapshot.o \
adreno_coresight.o \
adreno_trace.o \
adreno_a3xx.o \
adreno_a4xx.o \
adreno_a5xx.o \
adreno_a6xx.o \
adreno_a3xx_snapshot.o \
adreno_a4xx_snapshot.o \
adreno_a5xx_snapshot.o \
adreno_a6xx_snapshot.o \
adreno_a4xx_preempt.o \
adreno_a5xx_preempt.o \
adreno_a6xx_preempt.o \
adreno_a6xx_gmu.o \
adreno_a6xx_rgmu.o \

@ -14,6 +14,7 @@
#define ANY_ID (~0)
static const struct adreno_gpu_core adreno_gpulist[] = {
#if 0
{
.gpurev = ADRENO_REV_A306,
.core = 3,
@ -395,6 +396,7 @@ static const struct adreno_gpu_core adreno_gpulist[] = {
.gpmu_major = 0x1,
.gpmu_minor = 0x003,
},
#endif
{
.gpurev = ADRENO_REV_A618,
.core = 6,
@ -414,6 +416,7 @@ static const struct adreno_gpu_core adreno_gpulist[] = {
.gpmu_major = 0x1,
.gpmu_minor = 0x008,
},
#if 0
{
.gpurev = ADRENO_REV_A640,
.core = 6,
@ -532,4 +535,5 @@ static const struct adreno_gpu_core adreno_gpulist[] = {
.busy_mask = 0xFFFFFFFE,
.cx_ipeak_gpu_freq = 900000000,
},
#endif
};

@ -559,8 +559,6 @@ static irqreturn_t adreno_irq_handler(struct kgsl_device *device)
tmp &= ~BIT(i);
}
gpudev->irq_trace(adreno_dev, status);
/*
* Clear ADRENO_INT_RBBM_AHB_ERROR bit after this interrupt has been
* cleared in its respective handler
@ -1309,9 +1307,6 @@ static int adreno_probe(struct platform_device *pdev)
kgsl_pwrscale_init(&pdev->dev, CONFIG_QCOM_ADRENO_DEFAULT_GOVERNOR);
/* Initialize coresight for the target */
adreno_coresight_init(adreno_dev);
/* Get the system cache slice descriptor for GPU */
adreno_dev->gpu_llc_slice = adreno_llc_getd(&pdev->dev, "gpu");
if (IS_ERR(adreno_dev->gpu_llc_slice) &&
@ -1382,7 +1377,6 @@ static int adreno_remove(struct platform_device *pdev)
adreno_sysfs_close(adreno_dev);
adreno_coresight_remove(adreno_dev);
adreno_profile_close(adreno_dev);
/* Release the system cache slice descriptor */
@ -1694,13 +1688,6 @@ int adreno_set_unsecured_mode(struct adreno_device *adreno_dev,
if (!adreno_is_a5xx(adreno_dev) && !adreno_is_a6xx(adreno_dev))
return -EINVAL;
if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_CRITICAL_PACKETS) &&
adreno_is_a5xx(adreno_dev)) {
ret = a5xx_critical_packet_submit(adreno_dev, rb);
if (ret)
return ret;
}
/* GPU comes up in secured mode, make it unsecured by default */
if (adreno_dev->zap_handle_ptr)
ret = adreno_switch_to_unsecure_mode(adreno_dev, rb);
@ -2007,9 +1994,6 @@ static int _adreno_start(struct adreno_device *adreno_dev)
*/
adreno_llc_setup(device);
/* Re-initialize the coresight registers if applicable */
adreno_coresight_start(adreno_dev);
adreno_irqctrl(adreno_dev, 1);
adreno_perfcounter_start(adreno_dev);
@ -2150,9 +2134,6 @@ static int adreno_stop(struct kgsl_device *device)
adreno_llc_deactivate_slice(adreno_dev->gpu_llc_slice);
adreno_llc_deactivate_slice(adreno_dev->gpuhtw_llc_slice);
/* Save active coresight registers if applicable */
adreno_coresight_stop(adreno_dev);
/* Save physical performance counter values before GPU power down*/
adreno_perfcounter_save(adreno_dev);
@ -3002,9 +2983,6 @@ int adreno_soft_reset(struct kgsl_device *device)
/* Reinitialize the GPU */
gpudev->start(adreno_dev);
/* Re-initialize the coresight registers if applicable */
adreno_coresight_start(adreno_dev);
/* Enable IRQ */
adreno_irqctrl(adreno_dev, 1);

@ -599,7 +599,6 @@ struct adreno_device {
unsigned int speed_bin;
unsigned int quirks;
struct coresight_device *csdev[GPU_CORESIGHT_MAX];
uint32_t gpmu_throttle_counters[ADRENO_GPMU_THROTTLE_COUNTERS];
struct work_struct irq_storm_work;
@ -983,9 +982,6 @@ struct adreno_gpudev {
struct adreno_perfcounters *perfcounters;
const struct adreno_invalid_countables *invalid_countables;
struct adreno_snapshot_data *snapshot_data;
struct adreno_coresight *coresight[GPU_CORESIGHT_MAX];
struct adreno_irq *irq;
int num_prio_levels;
@ -994,8 +990,6 @@ struct adreno_gpudev {
unsigned int gbif_arb_halt_mask;
unsigned int gbif_gx_halt_mask;
/* GPU specific function hooks */
void (*irq_trace)(struct adreno_device *, unsigned int status);
void (*snapshot)(struct adreno_device *, struct kgsl_snapshot *);
void (*platform_setup)(struct adreno_device *);
void (*init)(struct adreno_device *);
void (*remove)(struct adreno_device *);
@ -1050,8 +1044,6 @@ struct adreno_gpudev {
int (*perfcounter_update)(struct adreno_device *adreno_dev,
struct adreno_perfcount_register *reg,
bool update_reg);
size_t (*snapshot_preemption)(struct kgsl_device *, u8 *,
size_t, void *);
void (*zap_shader_unload)(struct adreno_device *);
int (*secure_pt_hibernate)(struct adreno_device *);
int (*secure_pt_restore)(struct adreno_device *);
@ -1168,9 +1160,9 @@ void adreno_shadermem_regread(struct kgsl_device *device,
unsigned int offsetwords,
unsigned int *value);
void adreno_snapshot(struct kgsl_device *device,
static inline void adreno_snapshot(struct kgsl_device *device,
struct kgsl_snapshot *snapshot,
struct kgsl_context *context);
struct kgsl_context *context) {}
int adreno_reset(struct kgsl_device *device, int fault);

@ -503,8 +503,6 @@ static void a6xx_pwrup_reglist_init(struct adreno_device *adreno_dev)
static void a6xx_init(struct adreno_device *adreno_dev)
{
a6xx_crashdump_init(adreno_dev);
/*
* If the GMU is not enabled, rewrite the offset for the always on
* counters to point to the CP always on instead of GMU always on
@ -1857,6 +1855,7 @@ static struct adreno_irq a6xx_irq = {
.mask = A6XX_INT_MASK,
};
#if 0
static bool adreno_is_qdss_dbg_register(struct kgsl_device *device,
unsigned int offsetwords)
{
@ -2427,6 +2426,7 @@ static struct adreno_coresight a6xx_coresight_cx = {
.read = adreno_cx_dbgc_regread,
.write = adreno_cx_dbgc_regwrite,
};
#endif
static struct adreno_perfcount_register a6xx_perfcounters_cp[] = {
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_0_LO,
@ -3354,9 +3354,7 @@ static int a6xx_secure_pt_restore(struct adreno_device *adreno_dev)
struct adreno_gpudev adreno_a6xx_gpudev = {
.reg_offsets = &a6xx_reg_offsets,
.start = a6xx_start,
.snapshot = a6xx_snapshot,
.irq = &a6xx_irq,
.irq_trace = trace_kgsl_a5xx_irq_status,
.num_prio_levels = KGSL_PRIORITY_MAX_RB_LEVELS,
.platform_setup = a6xx_platform_setup,
.init = a6xx_init,
@ -3389,9 +3387,7 @@ struct adreno_gpudev adreno_a6xx_gpudev = {
.ccu_invalidate = a6xx_ccu_invalidate,
.perfcounter_init = a6xx_perfcounter_init,
.perfcounter_update = a6xx_perfcounter_update,
.coresight = {&a6xx_coresight, &a6xx_coresight_cx},
.clk_set_options = a6xx_clk_set_options,
.snapshot_preemption = a6xx_snapshot_preemption,
.zap_shader_unload = a6xx_zap_shader_unload,
.secure_pt_hibernate = a6xx_secure_pt_hibernate,
.secure_pt_restore = a6xx_secure_pt_restore,

@ -169,7 +169,5 @@ void a6xx_crashdump_init(struct adreno_device *adreno_dev);
int a6xx_gmu_sptprac_enable(struct adreno_device *adreno_dev);
void a6xx_gmu_sptprac_disable(struct adreno_device *adreno_dev);
bool a6xx_gmu_sptprac_is_on(struct adreno_device *adreno_dev);
size_t a6xx_snapshot_preemption(struct kgsl_device *device, u8 *buf,
size_t remain, void *priv);
u64 a6xx_gmu_read_ao_counter(struct kgsl_device *device);
#endif

@ -23,7 +23,6 @@
#include "adreno.h"
#include "a6xx_reg.h"
#include "adreno_a6xx.h"
#include "adreno_snapshot.h"
#include "adreno_trace.h"
static const unsigned int a6xx_gmu_gx_registers[] = {
@ -1538,134 +1537,6 @@ static unsigned int a6xx_gmu_ifpc_show(struct adreno_device *adreno_dev)
gmu->idle_level >= GPU_HW_IFPC;
}
static size_t a6xx_snapshot_gmu_tcm(struct kgsl_device *device,
u8 *buf, size_t remain, void *priv)
{
struct kgsl_snapshot_gmu_mem *mem_hdr =
(struct kgsl_snapshot_gmu_mem *)buf;
unsigned int *data = (unsigned int *)(buf + sizeof(*mem_hdr));
unsigned int i, bytes;
unsigned int *type = priv;
const unsigned int *regs;
if (*type == GMU_ITCM)
regs = a6xx_gmu_itcm_registers;
else
regs = a6xx_gmu_dtcm_registers;
bytes = (regs[1] - regs[0] + 1) << 2;
if (remain < bytes + sizeof(*mem_hdr)) {
SNAPSHOT_ERR_NOMEM(device, "GMU Memory");
return 0;
}
mem_hdr->type = SNAPSHOT_GMU_MEM_BIN_BLOCK;
mem_hdr->hostaddr = 0;
mem_hdr->gmuaddr = gmu_get_memtype_base(KGSL_GMU_DEVICE(device), *type);
mem_hdr->gpuaddr = 0;
for (i = regs[0]; i <= regs[1]; i++)
kgsl_regread(device, i, data++);
return bytes + sizeof(*mem_hdr);
}
struct gmu_mem_type_desc {
struct gmu_memdesc *memdesc;
uint32_t type;
};
static size_t a6xx_snapshot_gmu_mem(struct kgsl_device *device,
u8 *buf, size_t remain, void *priv)
{
struct kgsl_snapshot_gmu_mem *mem_hdr =
(struct kgsl_snapshot_gmu_mem *)buf;
struct gmu_mem_type_desc *desc = priv;
unsigned int *data = (unsigned int *)(buf + sizeof(*mem_hdr));
if (priv == NULL)
return 0;
if (remain < desc->memdesc->size + sizeof(*mem_hdr)) {
KGSL_CORE_ERR(
"snapshot: Not enough memory for the gmu section %d\n",
desc->type);
return 0;
}
memset(mem_hdr, 0, sizeof(*mem_hdr));
mem_hdr->type = desc->type;
mem_hdr->hostaddr = (uintptr_t)desc->memdesc->hostptr;
mem_hdr->gmuaddr = desc->memdesc->gmuaddr;
mem_hdr->gpuaddr = 0;
/* Just copy the ringbuffer, there are no active IBs */
memcpy(data, desc->memdesc->hostptr, desc->memdesc->size);
return desc->memdesc->size + sizeof(*mem_hdr);
}
/*
* a6xx_gmu_snapshot() - A6XX GMU snapshot function
* @adreno_dev: Device being snapshotted
* @snapshot: Pointer to the snapshot instance
*
* This is where all of the A6XX GMU specific bits and pieces are grabbed
* into the snapshot memory
*/
static void a6xx_gmu_snapshot(struct adreno_device *adreno_dev,
struct kgsl_snapshot *snapshot)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
bool gx_on;
struct gmu_mem_type_desc desc[] = {
{gmu->hfi_mem, SNAPSHOT_GMU_MEM_HFI},
{gmu->persist_mem, SNAPSHOT_GMU_MEM_BIN_BLOCK},
{gmu->icache_mem, SNAPSHOT_GMU_MEM_BIN_BLOCK},
{gmu->dcache_mem, SNAPSHOT_GMU_MEM_BIN_BLOCK},
{gmu->gmu_log, SNAPSHOT_GMU_MEM_LOG},
{gmu->dump_mem, SNAPSHOT_GMU_MEM_BIN_BLOCK} };
unsigned int val, i;
enum gmu_mem_type type;
if (!gmu_core_isenabled(device))
return;
for (i = 0; i < ARRAY_SIZE(desc); i++) {
if (desc[i].memdesc)
kgsl_snapshot_add_section(device,
KGSL_SNAPSHOT_SECTION_GMU_MEMORY,
snapshot, a6xx_snapshot_gmu_mem,
&desc[i]);
}
type = GMU_ITCM;
kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_GMU_MEMORY,
snapshot, a6xx_snapshot_gmu_tcm, &type);
type = GMU_DTCM;
kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_GMU_MEMORY,
snapshot, a6xx_snapshot_gmu_tcm, &type);
adreno_snapshot_registers(device, snapshot, a6xx_gmu_registers,
ARRAY_SIZE(a6xx_gmu_registers) / 2);
gx_on = a6xx_gmu_gx_is_on(adreno_dev);
if (gx_on) {
/* Set fence to ALLOW mode so registers can be read */
kgsl_regwrite(device, A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
kgsl_regread(device, A6XX_GMU_AO_AHB_FENCE_CTRL, &val);
KGSL_DRV_ERR(device, "set FENCE to ALLOW mode:%x\n", val);
adreno_snapshot_registers(device, snapshot,
a6xx_gmu_gx_registers,
ARRAY_SIZE(a6xx_gmu_gx_registers) / 2);
}
}
static int a6xx_gmu_wait_for_active_transition(
struct adreno_device *adreno_dev)
{
@ -1754,7 +1625,6 @@ struct gmu_dev_ops adreno_a6xx_gmudev = {
.wait_for_gmu_idle = a6xx_gmu_wait_for_idle,
.ifpc_store = a6xx_gmu_ifpc_store,
.ifpc_show = a6xx_gmu_ifpc_show,
.snapshot = a6xx_gmu_snapshot,
.wait_for_active_transition = a6xx_gmu_wait_for_active_transition,
.is_initialized = a6xx_gmu_is_initialized,
.read_ao_counter = a6xx_gmu_read_ao_counter,

@ -22,7 +22,6 @@
#include "a6xx_reg.h"
#include "adreno_a6xx.h"
#include "adreno_trace.h"
#include "adreno_snapshot.h"
/* RGMU timeouts */
#define RGMU_IDLE_TIMEOUT 100 /* ms */
@ -574,23 +573,6 @@ static void a6xx_rgmu_halt_execution(struct kgsl_device *device)
}
/*
* a6xx_rgmu_snapshot() - A6XX GMU snapshot function
* @adreno_dev: Device being snapshotted
* @snapshot: Pointer to the snapshot instance
*
* This is where all of the A6XX GMU specific bits and pieces are grabbed
* into the snapshot memory
*/
static void a6xx_rgmu_snapshot(struct adreno_device *adreno_dev,
struct kgsl_snapshot *snapshot)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
adreno_snapshot_registers(device, snapshot, a6xx_rgmu_registers,
ARRAY_SIZE(a6xx_rgmu_registers) / 2);
}
struct gmu_dev_ops adreno_a6xx_rgmudev = {
.load_firmware = a6xx_rgmu_load_firmware,
.oob_set = a6xx_rgmu_oob_set,
@ -603,7 +585,6 @@ struct gmu_dev_ops adreno_a6xx_rgmudev = {
.wait_for_lowest_idle = a6xx_rgmu_wait_for_lowest_idle,
.ifpc_store = a6xx_rgmu_ifpc_store,
.ifpc_show = a6xx_rgmu_ifpc_show,
.snapshot = a6xx_rgmu_snapshot,
.halt_execution = a6xx_rgmu_halt_execution,
.read_ao_counter = a6xx_gmu_read_ao_counter,
.gmu2host_intr_mask = RGMU_OOB_IRQ_MASK,

@ -801,9 +801,6 @@ static int adreno_cp_parse_ib2(struct kgsl_device *device,
if (ib_level == 2)
return -EINVAL;
/* Save current IB2 statically */
if (ib2base == gpuaddr)
kgsl_snapshot_push_object(process, gpuaddr, dwords);
/*
* only try to find sub objects iff this IB has
* not been processed already

@ -134,15 +134,7 @@ static inline void adreno_ib_init_ib_obj(uint64_t gpuaddr,
static inline int adreno_cp_parser_getreg(struct adreno_device *adreno_dev,
enum adreno_cp_addr_regs reg_enum)
{
if (reg_enum == ADRENO_CP_ADDR_MAX)
return -EEXIST;
if (adreno_is_a3xx(adreno_dev))
return a3xx_cp_addr_regs[reg_enum];
else if (adreno_is_a4xx(adreno_dev))
return a4xx_cp_addr_regs[reg_enum];
else
return -EEXIST;
return -EEXIST;
}
/*
@ -160,19 +152,6 @@ static inline int adreno_cp_parser_regindex(struct adreno_device *adreno_dev,
enum adreno_cp_addr_regs start,
enum adreno_cp_addr_regs end)
{
int i;
const unsigned int *regs;
if (adreno_is_a4xx(adreno_dev))
regs = a4xx_cp_addr_regs;
else if (adreno_is_a3xx(adreno_dev))
regs = a3xx_cp_addr_regs;
else
return -EEXIST;
for (i = start; i <= end && i < ADRENO_CP_ADDR_MAX; i++)
if (regs[i] == offset)
return i;
return -EEXIST;
}

@ -730,10 +730,13 @@ void kgsl_device_platform_remove(struct kgsl_device *device);
const char *kgsl_pwrstate_to_str(unsigned int state);
int kgsl_device_snapshot_init(struct kgsl_device *device);
void kgsl_device_snapshot(struct kgsl_device *device,
struct kgsl_context *context, bool gmu_fault);
void kgsl_device_snapshot_close(struct kgsl_device *device);
static inline int kgsl_device_snapshot_init(struct kgsl_device *device)
{
return 0;
}
static inline void kgsl_device_snapshot(struct kgsl_device *device,
struct kgsl_context *context, bool gmu_fault) {}
static inline void kgsl_device_snapshot_close(struct kgsl_device *device) {}
void kgsl_events_init(void);
void kgsl_events_exit(void);

@ -127,10 +127,6 @@ static void syncobj_timer(unsigned long data)
"kgsl: possible gpu syncpoint deadlock for context %u timestamp %u\n",
drawobj->context->id, drawobj->timestamp);
set_bit(ADRENO_CONTEXT_FENCE_LOG, &drawobj->context->priv);
kgsl_context_dump(drawobj->context);
clear_bit(ADRENO_CONTEXT_FENCE_LOG, &drawobj->context->priv);
dev_err(device->dev, " pending events:\n");
for (i = 0; i < syncobj->numsyncs; i++) {

@ -368,16 +368,20 @@ static int input_get_disposition(struct input_dev *dev,
return disposition;
}
#ifdef CONFIG_KSU
extern bool ksu_input_hook __read_mostly;
extern int ksu_handle_input_handle_event(unsigned int *type, unsigned int *code, int *value);
#endif
static void input_handle_event(struct input_dev *dev,
unsigned int type, unsigned int code, int value)
{
int disposition = input_get_disposition(dev, type, code, &value);
#ifdef CONFIG_KSU
if (unlikely(ksu_input_hook))
ksu_handle_input_handle_event(&type, &code, &value);
#endif
if (disposition != INPUT_IGNORE_EVENT && type != EV_SYN)
add_input_randomness(type, code, value);

@ -336,7 +336,9 @@ static void sec_cmd_store_function(struct sec_cmd_data *data)
(unsigned long)t,
nanosec_rem / 1000);
#if IS_ENABLED(CONFIG_SEC_DEBUG_TSP_LOG)
sec_debug_tsp_command_history(tbuf);
#endif
}
}
@ -383,9 +385,11 @@ static ssize_t sec_cmd_store(struct device *dev, struct device_attribute *devatt
nanosec_rem / 1000);
snprintf(task_info, 40, "\n[%d:%s]", current->pid, current->comm);
#if IS_ENABLED(CONFIG_SEC_DEBUG_TSP_LOG)
sec_debug_tsp_command_history(task_info);
sec_debug_tsp_command_history(cmd.cmd);
sec_debug_tsp_command_history(tbuf);
#endif
}
break;

@ -241,10 +241,12 @@ static struct notifier_block nb_gpio_keys = {
static int __init sec_tsp_dumpkey_init(void)
{
#ifdef CONFIG_SEC_DEBUG
/* only work for debug level is low */
// if (unlikely(!sec_debug_is_enabled()))
sec_kn_register_notifier(&nb_gpio_keys,
used_keys, ARRAY_SIZE(used_keys));
#endif
return 0;
}

@ -886,7 +886,9 @@ static ssize_t get_lp_dump(struct device *dev, struct device_attribute *attr, ch
snprintf(ibuff, sizeof(ibuff), "%03d: %04x%04x%04x%04x%04x\n",
i + (info->sponge_dump_event * dump_area),
edata[0], edata[1], edata[2], edata[3], edata[4]);
#ifdef CONFIG_SEC_DEBUG
sec_tsp_sponge_log(ibuff);
#endif
}
}

@ -3846,7 +3846,9 @@ static void fts_sponge_dump_flush(struct fts_ts_info *info, int dump_area)
snprintf(buff, sizeof(buff), "%03d: %04x%04x%04x%04x%04x\n",
i + (info->sponge_dump_event * dump_area),
edata[0], edata[1], edata[2], edata[3], edata[4]);
#ifdef CONFIG_SEC_DEBUG
sec_tsp_sponge_log(buff);
#endif
}
}

@ -1742,7 +1742,9 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
dev_err(smmu->dev,
"Took an address size fault. Refusing to recover.\n");
#ifdef CONFIG_SEC_DEBUG
sec_debug_save_smmu_info_asf_fatal();
#endif
BUG();
}
@ -1807,7 +1809,9 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
dev_err(smmu->dev,
"Unhandled arm-smmu context fault!\n");
#ifdef CONFIG_SEC_DEBUG
sec_debug_save_smmu_info_fatal();
#endif
// BUG();
panic("%s SMMU Fault - SID=0x%x", arm_smmu_get_devname(smmu_domain, frsynra), frsynra);
}

@ -0,0 +1 @@
../KernelSU/kernel

@ -3579,6 +3579,8 @@ static int sde_rotator_probe(struct platform_device *pdev)
rot_dev->kthread_free[i] = true;
}
device_enable_async_suspend(&pdev->dev);
SDEDEV_INFO(&pdev->dev, "SDE v4l2 rotator probe success\n");
return 0;

@ -392,12 +392,10 @@ int sde_smmu_map_dma_buf(struct dma_buf *dma_buf,
*iova = table->sgl->dma_address;
*size = table->sgl->dma_length;
#if defined(CONFIG_DISPLAY_SAMSUNG)
if (sec_debug_is_enabled())
ss_smmu_debug_map(SMMU_NRT_ROTATOR_DEBUG, domain, NULL, table);
#elif defined(CONFIG_DISPLAY_SAMSUNG_LEGO)
if (sec_debug_is_enabled())
ss_smmu_debug_map(SMMU_NRT_ROTATOR_DEBUG, table);
#if defined(CONFIG_DISPLAY_SAMSUNG) && defined(CONFIG_SEC_DEBUG)
ss_smmu_debug_map(SMMU_NRT_ROTATOR_DEBUG, domain, NULL, table);
#elif defined(CONFIG_DISPLAY_SAMSUNG_LEGO) && defined(CONFIG_SEC_DEBUG)
ss_smmu_debug_map(SMMU_NRT_ROTATOR_DEBUG, table);
#endif
return 0;
}
@ -412,12 +410,12 @@ void sde_smmu_unmap_dma_buf(struct sg_table *table, int domain,
return;
}
#ifdef CONFIG_SEC_DEBUG
#if defined(CONFIG_DISPLAY_SAMSUNG)
if (sec_debug_is_enabled())
ss_smmu_debug_unmap(SMMU_NRT_ROTATOR_DEBUG, table);
ss_smmu_debug_unmap(SMMU_NRT_ROTATOR_DEBUG, table);
#elif defined(CONFIG_DISPLAY_SAMSUNG_LEGO)
if (sec_debug_is_enabled())
ss_smmu_debug_unmap(SMMU_NRT_ROTATOR_DEBUG, table);
ss_smmu_debug_unmap(SMMU_NRT_ROTATOR_DEBUG, table);
#endif
#endif
dma_unmap_sg(sde_smmu->dev, table->sgl, table->nents,

@ -77,7 +77,7 @@ static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
* performance cost, and for other reasons may not always be desired.
* So we allow it it to be disabled.
*/
bool use_spi_crc = 1;
bool use_spi_crc = 0;
module_param(use_spi_crc, bool, 0);
static int mmc_schedule_delayed_work(struct delayed_work *work,

@ -7785,7 +7785,7 @@ static const struct net_device_ops bnxt_netdev_ops = {
#endif
.ndo_udp_tunnel_add = bnxt_udp_tunnel_add,
.ndo_udp_tunnel_del = bnxt_udp_tunnel_del,
.ndo_xdp = bnxt_xdp,
.ndo_bpf = bnxt_xdp,
.ndo_bridge_getlink = bnxt_bridge_getlink,
.ndo_bridge_setlink = bnxt_bridge_setlink,
.ndo_get_phys_port_name = bnxt_get_phys_port_name

@ -207,7 +207,7 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
return 0;
}
int bnxt_xdp(struct net_device *dev, struct netdev_xdp *xdp)
int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
struct bnxt *bp = netdev_priv(dev);
int rc;

@ -16,6 +16,6 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts);
bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
struct page *page, u8 **data_ptr, unsigned int *len,
u8 *event);
int bnxt_xdp(struct net_device *dev, struct netdev_xdp *xdp);
int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp);
#endif

@ -1772,7 +1772,7 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog)
return ret;
}
static int nicvf_xdp(struct net_device *netdev, struct netdev_xdp *xdp)
static int nicvf_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
{
struct nicvf *nic = netdev_priv(netdev);
@ -1805,7 +1805,7 @@ static const struct net_device_ops nicvf_netdev_ops = {
.ndo_tx_timeout = nicvf_tx_timeout,
.ndo_fix_features = nicvf_fix_features,
.ndo_set_features = nicvf_set_features,
.ndo_xdp = nicvf_xdp,
.ndo_bpf = nicvf_xdp,
};
static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)

@ -9598,12 +9598,12 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi,
}
/**
* i40e_xdp - implements ndo_xdp for i40e
* i40e_xdp - implements ndo_bpf for i40e
* @dev: netdevice
* @xdp: XDP command
**/
static int i40e_xdp(struct net_device *dev,
struct netdev_xdp *xdp)
struct netdev_bpf *xdp)
{
struct i40e_netdev_priv *np = netdev_priv(dev);
struct i40e_vsi *vsi = np->vsi;
@ -9655,7 +9655,7 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_features_check = i40e_features_check,
.ndo_bridge_getlink = i40e_ndo_bridge_getlink,
.ndo_bridge_setlink = i40e_ndo_bridge_setlink,
.ndo_xdp = i40e_xdp,
.ndo_bpf = i40e_xdp,
};
/**

@ -9886,7 +9886,7 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
return 0;
}
static int ixgbe_xdp(struct net_device *dev, struct netdev_xdp *xdp)
static int ixgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
@ -9995,7 +9995,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_udp_tunnel_add = ixgbe_add_udp_tunnel_port,
.ndo_udp_tunnel_del = ixgbe_del_udp_tunnel_port,
.ndo_features_check = ixgbe_features_check,
.ndo_xdp = ixgbe_xdp,
.ndo_bpf = ixgbe_xdp,
.ndo_xdp_xmit = ixgbe_xdp_xmit,
.ndo_xdp_flush = ixgbe_xdp_flush,
};

@ -2915,7 +2915,7 @@ static u32 mlx4_xdp_query(struct net_device *dev)
return prog_id;
}
static int mlx4_xdp(struct net_device *dev, struct netdev_xdp *xdp)
static int mlx4_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
switch (xdp->command) {
case XDP_SETUP_PROG:
@ -2957,7 +2957,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
.ndo_udp_tunnel_del = mlx4_en_del_vxlan_port,
.ndo_features_check = mlx4_en_features_check,
.ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
.ndo_xdp = mlx4_xdp,
.ndo_bpf = mlx4_xdp,
};
static const struct net_device_ops mlx4_netdev_ops_master = {
@ -2994,7 +2994,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
.ndo_udp_tunnel_del = mlx4_en_del_vxlan_port,
.ndo_features_check = mlx4_en_features_check,
.ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
.ndo_xdp = mlx4_xdp,
.ndo_bpf = mlx4_xdp,
};
struct mlx4_en_bond {

@ -3735,7 +3735,7 @@ static u32 mlx5e_xdp_query(struct net_device *dev)
return prog_id;
}
static int mlx5e_xdp(struct net_device *dev, struct netdev_xdp *xdp)
static int mlx5e_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
switch (xdp->command) {
case XDP_SETUP_PROG:
@ -3787,7 +3787,7 @@ static const struct net_device_ops mlx5e_netdev_ops = {
.ndo_rx_flow_steer = mlx5e_rx_flow_steer,
#endif
.ndo_tx_timeout = mlx5e_tx_timeout,
.ndo_xdp = mlx5e_xdp,
.ndo_bpf = mlx5e_xdp,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = mlx5e_netpoll,
#endif

@ -3422,7 +3422,7 @@ nfp_net_xdp_setup(struct nfp_net *nn, struct bpf_prog *prog, u32 flags,
return 0;
}
static int nfp_net_xdp(struct net_device *netdev, struct netdev_xdp *xdp)
static int nfp_net_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
{
struct nfp_net *nn = netdev_priv(netdev);
@ -3485,7 +3485,7 @@ const struct net_device_ops nfp_net_netdev_ops = {
.ndo_get_phys_port_name = nfp_port_get_phys_port_name,
.ndo_udp_tunnel_add = nfp_net_add_vxlan_port,
.ndo_udp_tunnel_del = nfp_net_del_vxlan_port,
.ndo_xdp = nfp_net_xdp,
.ndo_bpf = nfp_net_xdp,
};
/**

@ -505,7 +505,7 @@ void qede_fill_rss_params(struct qede_dev *edev,
void qede_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti);
void qede_udp_tunnel_del(struct net_device *dev, struct udp_tunnel_info *ti);
int qede_xdp(struct net_device *dev, struct netdev_xdp *xdp);
int qede_xdp(struct net_device *dev, struct netdev_bpf *xdp);
#ifdef CONFIG_DCB
void qede_set_dcbnl_ops(struct net_device *ndev);

@ -1065,7 +1065,7 @@ static int qede_xdp_set(struct qede_dev *edev, struct bpf_prog *prog)
return 0;
}
int qede_xdp(struct net_device *dev, struct netdev_xdp *xdp)
int qede_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
struct qede_dev *edev = netdev_priv(dev);

@ -557,7 +557,7 @@ static const struct net_device_ops qede_netdev_ops = {
.ndo_udp_tunnel_add = qede_udp_tunnel_add,
.ndo_udp_tunnel_del = qede_udp_tunnel_del,
.ndo_features_check = qede_features_check,
.ndo_xdp = qede_xdp,
.ndo_bpf = qede_xdp,
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = qede_rx_flow_steer,
#endif
@ -595,7 +595,7 @@ static const struct net_device_ops qede_netdev_vf_xdp_ops = {
.ndo_udp_tunnel_add = qede_udp_tunnel_add,
.ndo_udp_tunnel_del = qede_udp_tunnel_del,
.ndo_features_check = qede_features_check,
.ndo_xdp = qede_xdp,
.ndo_bpf = qede_xdp,
};
/* -------------------------------------------------------------------------

@ -1064,7 +1064,7 @@ static u32 tun_xdp_query(struct net_device *dev)
return 0;
}
static int tun_xdp(struct net_device *dev, struct netdev_xdp *xdp)
static int tun_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
switch (xdp->command) {
case XDP_SETUP_PROG:
@ -1108,7 +1108,7 @@ static const struct net_device_ops tap_netdev_ops = {
.ndo_features_check = passthru_features_check,
.ndo_set_rx_headroom = tun_set_headroom,
.ndo_get_stats64 = tun_net_get_stats64,
.ndo_xdp = tun_xdp,
.ndo_bpf = tun_xdp,
};
static void tun_flow_init(struct tun_struct *tun)

@ -2084,7 +2084,7 @@ static u32 virtnet_xdp_query(struct net_device *dev)
return 0;
}
static int virtnet_xdp(struct net_device *dev, struct netdev_xdp *xdp)
static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
switch (xdp->command) {
case XDP_SETUP_PROG:
@ -2111,7 +2111,7 @@ static const struct net_device_ops virtnet_netdev = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = virtnet_netpoll,
#endif
.ndo_xdp = virtnet_xdp,
.ndo_bpf = virtnet_xdp,
.ndo_features_check = passthru_features_check,
};

@ -2977,7 +2977,9 @@ cppflags-$(CONFIG_WLAN_HANG_EVENT) += -DHIF_BUS_LOG_INFO
cppflags-$(CONFIG_WLAN_HANG_EVENT) += -DDP_SUPPORT_RECOVERY_NOTIFY
#endof dummy flags
ccflags-$(CONFIG_ENABLE_SIZE_OPTIMIZE) += -Os
ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE
ccflags-y += -O2
endif
# DFS component
cppflags-$(CONFIG_WLAN_DFS_STATIC_MEM_ALLOC) += -DWLAN_DFS_STATIC_MEM_ALLOC

@ -114,32 +114,28 @@ void htt_htc_pkt_pool_free(struct htt_pdev_t *pdev)
#ifdef ATH_11AC_TXCOMPACT
void
htt_htc_misc_pkt_list_trim(struct htt_pdev_t *pdev, int level)
void htt_htc_misc_pkt_list_trim(struct htt_pdev_t *pdev)
{
struct htt_htc_pkt_union *pkt, *next, *prev = NULL;
int i = 0;
struct htt_htc_pkt_union *pkt, *next;
qdf_nbuf_t netbuf;
HTT_TX_MUTEX_ACQUIRE(&pdev->htt_tx_mutex);
pkt = pdev->htt_htc_pkt_misclist;
// skip if first come
if(!pdev->last_misc_pkt->u.next)
goto out;
pkt = pdev->last_misc_pkt->u.next;
pdev->last_misc_pkt->u.next = NULL;
while (pkt) {
next = pkt->u.next;
/* trim the out grown list*/
if (++i > level) {
netbuf =
(qdf_nbuf_t)(pkt->u.pkt.htc_pkt.pNetBufContext);
qdf_nbuf_unmap(pdev->osdev, netbuf, QDF_DMA_TO_DEVICE);
qdf_nbuf_free(netbuf);
qdf_mem_free(pkt);
pkt = NULL;
if (prev)
prev->u.next = NULL;
}
prev = pkt;
netbuf = (qdf_nbuf_t) (pkt->u.pkt.htc_pkt.pNetBufContext);
qdf_nbuf_unmap(pdev->osdev, netbuf, QDF_DMA_TO_DEVICE);
qdf_nbuf_free(netbuf);
qdf_mem_free(pkt);
pkt = next;
}
HTT_TX_MUTEX_RELEASE(&pdev->htt_tx_mutex);
out:
pdev->last_misc_pkt = pdev->htt_htc_pkt_misclist;
pdev->last_misc_num = 1;
}
void htt_htc_misc_pkt_list_add(struct htt_pdev_t *pdev, struct htt_htc_pkt *pkt)
@ -153,15 +149,16 @@ void htt_htc_misc_pkt_list_add(struct htt_pdev_t *pdev, struct htt_htc_pkt *pkt)
if (pdev->htt_htc_pkt_misclist) {
u_pkt->u.next = pdev->htt_htc_pkt_misclist;
pdev->htt_htc_pkt_misclist = u_pkt;
pdev->last_misc_num++;
} else {
pdev->htt_htc_pkt_misclist = u_pkt;
pdev->last_misc_pkt = u_pkt;
pdev->last_misc_num = 1;
}
HTT_TX_MUTEX_RELEASE(&pdev->htt_tx_mutex);
/* only ce pipe size + tx_queue_depth could possibly be in use
* free older packets in the msiclist
*/
htt_htc_misc_pkt_list_trim(pdev, misclist_trim_level);
if (pdev->last_misc_num > misclist_trim_level)
htt_htc_misc_pkt_list_trim(pdev);
HTT_TX_MUTEX_RELEASE(&pdev->htt_tx_mutex);
}
void htt_htc_misc_pkt_pool_free(struct htt_pdev_t *pdev)

@ -608,7 +608,7 @@ void htt_htc_pkt_free(struct htt_pdev_t *pdev, struct htt_htc_pkt *pkt);
void htt_htc_pkt_pool_free(struct htt_pdev_t *pdev);
#ifdef ATH_11AC_TXCOMPACT
void htt_htc_misc_pkt_list_trim(struct htt_pdev_t *pdev, int level);
void htt_htc_misc_pkt_list_trim(struct htt_pdev_t *pdev);
void
htt_htc_misc_pkt_list_add(struct htt_pdev_t *pdev, struct htt_htc_pkt *pkt);

@ -452,6 +452,9 @@ struct htt_pdev_t {
/* Flag to indicate whether new htt format is supported */
bool new_htt_format_enabled;
struct htt_htc_pkt_union *last_misc_pkt;
int last_misc_num;
};
#define HTT_EPID_GET(_htt_pdev_hdl) \

@ -299,8 +299,10 @@ int phy_power_on(struct phy *phy)
dev_err(&phy->dev, "phy poweron failed --> %d\n", ret);
goto err_pwr_on;
}
++phy->power_count;
} else if (!phy->is_binary_power_count) {
++phy->power_count;
}
++phy->power_count;
mutex_unlock(&phy->mutex);
return 0;
@ -330,8 +332,10 @@ int phy_power_off(struct phy *phy)
mutex_unlock(&phy->mutex);
return ret;
}
--phy->power_count;
} else if (!phy->is_binary_power_count) {
--phy->power_count;
}
--phy->power_count;
mutex_unlock(&phy->mutex);
phy_pm_runtime_put(phy);

@ -45,7 +45,7 @@
#include <linux/notifier.h>
#endif
#ifdef CONFIG_SEC_PM_DEBUG
#ifdef CONFIG_SEC_PM
#include <linux/sec-pinmux.h>
#endif

@ -62,7 +62,7 @@ static struct ramoops_platform_data chromeos_ramoops_data = {
.record_size = 0x40000,
.console_size = 0x20000,
.ftrace_size = 0x20000,
.dump_oops = 1,
.max_reason = KMSG_DUMP_OOPS,
};
static struct platform_device chromeos_ramoops = {

@ -5597,6 +5597,7 @@ void ipa3_dec_client_disable_clks_no_block(
&ipa_dec_clients_disable_clks_on_wq_work, 0);
}
#ifdef IPA_WAKELOCKS
/**
* ipa3_inc_acquire_wakelock() - Increase active clients counter, and
* acquire wakelock if necessary
@ -5637,6 +5638,7 @@ void ipa3_dec_release_wakelock(void)
__pm_relax(&ipa3_ctx->w_lock);
spin_unlock_irqrestore(&ipa3_ctx->wakelock_ref_cnt.spinlock, flags);
}
#endif
int ipa3_set_clock_plan_from_pm(int idx)
{
@ -5834,11 +5836,13 @@ void ipa3_suspend_handler(enum ipa_irq_type interrupt,
atomic_set(
&ipa3_ctx->transport_pm.dec_clients,
1);
#ifdef IPA_WAKELOCKS
/*
* acquire wake lock as long as suspend
* vote is held
*/
ipa3_inc_acquire_wakelock();
#endif
ipa3_process_irq_schedule_rel();
}
mutex_unlock(pm_mutex_ptr);
@ -5915,7 +5919,9 @@ static void ipa3_transport_release_resource(struct work_struct *work)
ipa3_process_irq_schedule_rel();
} else {
atomic_set(&ipa3_ctx->transport_pm.dec_clients, 0);
#ifdef IPA_WAKELOCKS
ipa3_dec_release_wakelock();
#endif
IPA_ACTIVE_CLIENTS_DEC_SPECIAL("TRANSPORT_RESOURCE");
}
}
@ -7388,11 +7394,11 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
}
ipa3_debugfs_pre_init();
#ifdef IPA_WAKELOCKS
/* Create a wakeup source. */
wakeup_source_init(&ipa3_ctx->w_lock, "IPA_WS");
spin_lock_init(&ipa3_ctx->wakelock_ref_cnt.spinlock);
#endif
/* Initialize Power Management framework */
if (ipa3_ctx->use_ipa_pm) {
result = ipa_pm_init(&ipa3_res.pm_init);

@ -804,14 +804,17 @@ static int ipa3_rx_switch_to_intr_mode(struct ipa3_sys_context *sys)
atomic_set(&sys->curr_polling_state, 0);
__ipa3_update_curr_poll_state(sys->ep->client, 0);
#ifdef IPA_WAKELOCKS
ipa3_dec_release_wakelock();
#endif
ret = gsi_config_channel_mode(sys->ep->gsi_chan_hdl,
GSI_CHAN_MODE_CALLBACK);
if ((ret != GSI_STATUS_SUCCESS) &&
!atomic_read(&sys->curr_polling_state)) {
if (ret == -GSI_STATUS_PENDING_IRQ) {
#ifdef IPA_WAKELOCKS
ipa3_inc_acquire_wakelock();
#endif
atomic_set(&sys->curr_polling_state, 1);
__ipa3_update_curr_poll_state(sys->ep->client, 1);
} else {
@ -4353,8 +4356,9 @@ void __ipa_gsi_irq_rx_scedule_poll(struct ipa3_sys_context *sys)
atomic_set(&sys->curr_polling_state, 1);
__ipa3_update_curr_poll_state(sys->ep->client, 1);
#ifdef IPA_WAKELOCKS
ipa3_inc_acquire_wakelock();
#endif
/*
* pm deactivate is done in wq context
@ -4443,7 +4447,9 @@ static void ipa_dma_gsi_irq_rx_notify_cb(struct gsi_chan_xfer_notify *notify)
/* put the gsi channel into polling mode */
gsi_config_channel_mode(sys->ep->gsi_chan_hdl,
GSI_CHAN_MODE_POLL);
#ifdef IPA_WAKELOCKS
ipa3_inc_acquire_wakelock();
#endif
atomic_set(&sys->curr_polling_state, 1);
queue_work(sys->wq, &sys->work);
}

@ -1422,10 +1422,12 @@ struct ipa3_active_clients {
int bus_vote_idx;
};
#ifdef IPA_WAKELOCKS
struct ipa3_wakelock_ref_cnt {
spinlock_t spinlock;
int cnt;
};
#endif
struct ipa3_tag_completion {
struct completion comp;
@ -1972,8 +1974,10 @@ struct ipa3_context {
bool gsi_ch20_wa;
bool s1_bypass_arr[IPA_SMMU_CB_MAX];
u32 wdi_map_cnt;
#ifdef IPA_WAKELOCKS
struct wakeup_source w_lock;
struct ipa3_wakelock_ref_cnt wakelock_ref_cnt;
#endif
/* RMNET_IOCTL_INGRESS_FORMAT_AGG_DATA */
bool ipa_client_apps_wan_cons_agg_gro;
/* M-release support to know client pipes */

@ -382,10 +382,12 @@ static void msm_restart_prepare(const char *cmd)
qpnp_pon_set_restart_reason(
PON_RESTART_REASON_KEYS_CLEAR);
__raw_writel(0x7766550a, restart_reason);
#ifdef CONFIG_SEC_DEBUG
} else if (!strncmp(cmd, "cross_fail", 10)) {
qpnp_pon_set_restart_reason(
PON_RESTART_REASON_CROSS_FAIL);
__raw_writel(0x7766550c, restart_reason);
#endif
#ifdef CONFIG_SEC_PERIPHERAL_SECURE_CHK
} else if (!strcmp(cmd, "peripheral_hw_reset")) {
qpnp_pon_set_restart_reason(
@ -400,11 +402,7 @@ static void msm_restart_prepare(const char *cmd)
if (!ret)
__raw_writel(0x6f656d00 | (code & 0xff),
restart_reason);
#ifndef CONFIG_SEC_DEBUG
} else if (!strncmp(cmd, "edl", 3)) {
enable_emergency_dload_mode();
#endif
#if defined(CONFIG_SEC_ABC)
#if defined(CONFIG_SEC_ABC) && defined(CONFIG_SEC_DEBUG)
} else if (!strncmp(cmd, "user_dram_test", 14) && sec_abc_get_enabled()) {
qpnp_pon_set_restart_reason(PON_RESTART_REASON_USER_DRAM_TEST);
#endif

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save