From 55f3d4ac2a4d297158ae3ac9d83bdfa8613a147c Mon Sep 17 00:00:00 2001 From: Maulik Shah Date: Thu, 12 Dec 2019 17:09:25 +0530 Subject: [PATCH] cpuidle: lpm-levels: Enable LPM support for non psci target Add non psci legacy lpm support. Modify and align changes for clock event, MPM and cpu hotplug for LPM callback notification. Remove deprecated scheduler c-state(idle cpu), d-state(idle cluster) setting from lpm driver. Snapshot is taken from msm-4.9 kernel version @commit b9ad452666da39. ("soc: qcom: bgrsb: Increase time out for RSB channel opening"). Change-Id: I8958ab4f098cc6d875071e3f100b8b74845e0cfa Signed-off-by: Haribabu Gattem Signed-off-by: Suresh Kumar Allam --- .../bindings/arm/msm/lpm-workarounds.txt | 55 + drivers/cpuidle/Makefile | 4 + drivers/cpuidle/lpm-levels-legacy.c | 1537 +++++++++++++++++ drivers/cpuidle/lpm-levels-legacy.h | 152 ++ drivers/cpuidle/lpm-levels-of-legacy.c | 1014 +++++++++++ drivers/cpuidle/lpm-workarounds.c | 147 ++ drivers/cpuidle/lpm-workarounds.h | 20 + drivers/soc/qcom/Kconfig | 15 +- include/soc/qcom/pm-legacy.h | 219 +++ include/trace/events/trace_msm_low_power.h | 21 +- 10 files changed, 3181 insertions(+), 3 deletions(-) create mode 100644 Documentation/devicetree/bindings/arm/msm/lpm-workarounds.txt create mode 100644 drivers/cpuidle/lpm-levels-legacy.c create mode 100644 drivers/cpuidle/lpm-levels-legacy.h create mode 100644 drivers/cpuidle/lpm-levels-of-legacy.c create mode 100644 drivers/cpuidle/lpm-workarounds.c create mode 100644 drivers/cpuidle/lpm-workarounds.h create mode 100644 include/soc/qcom/pm-legacy.h diff --git a/Documentation/devicetree/bindings/arm/msm/lpm-workarounds.txt b/Documentation/devicetree/bindings/arm/msm/lpm-workarounds.txt new file mode 100644 index 000000000000..0304035492a1 --- /dev/null +++ b/Documentation/devicetree/bindings/arm/msm/lpm-workarounds.txt @@ -0,0 +1,55 @@ +* LPM Workarounds + +The required properties are: + +- compatible: "qcom,lpm-workarounds" + +The optional properties are: +- reg: The physical address and the size of the l1_l2_gcc and l2_pwr_sts + regitsters of performance cluster. + +- reg-names: "l2_pwr_sts" - string to identify l2_pwr_sts physical address. + "l1_l2_gcc" - string to identify l1_l2_gcc physical address. + +- qcom,lpm-wa-cx-turbo-unvote: Indicates the workaround to unvote CX turbo + vote when system is coming out of rpm assisted power collaspe. + lpm-cx-supply is required if this is present. + +- lpm-cx-supply: will hold handle for CX regulator supply which is used + to unvote. + +- qcom,lpm-wa-skip-l2-spm: Due to a hardware bug on 8939 and 8909, secure + world needs to disable and enable L2 SPM to get the proper context + in secure watchdog bite cases. With this workaround there is a race + in programming L2 SPM between HLOS and secure world. This leads to + stability issues. To avoid this program L2 SPM only in secure world + based on the L2 mode flag passed. Set lpm-wa-skip-l2-spm node if this + is required. + +- qcom,lpm-wa-dynamic-clock-gating: Due to a hardware bug on 8952, L1/L2 dynamic + clock gating needs to be enabled by software for performance cluster + cores and L2. Set lpm-wa-dynamic-clock-gating node if this workaround is + required. + +- qcom,cpu-offline-mask: Dynamic clock gating should be enabled when cluster is + in L2 PC. Each bit of cpu-offline-mask lists the cpu no. to hotplug by KTM + driver. + +- qcom,non-boot-cpu-index: will hold index of non boot cluster cpu. + +- qcom,l1-l2-gcc-secure: indicates L1/L2 clock enabling register is secure. + +Example: + +qcom,lpm-workarounds { + compatible = "qcom,lpm-workarounds"; + reg = <0x0B011018 0x4>, + <0x0B011088 0x4>; + reg-names = "l2-pwr-sts", "l1-l2-gcc"; + lpm-cx-supply = <&pm8916_s2_corner>; + qcom,lpm-wa-cx-turbo-unvote; + qcom,lpm-wa-skip-l2-spm; + qcom,lpm-wa-dynamic-clock-gating; + qcom,cpu-offline-mask = "0xF"; + qcom,non-boot-cpu-index = <4>; +} diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile index 85202045df55..09dd9ed58a03 100644 --- a/drivers/cpuidle/Makefile +++ b/drivers/cpuidle/Makefile @@ -29,4 +29,8 @@ obj-$(CONFIG_MIPS_CPS_CPUIDLE) += cpuidle-cps.o # POWERPC drivers obj-$(CONFIG_PSERIES_CPUIDLE) += cpuidle-pseries.o obj-$(CONFIG_POWERNV_CPUIDLE) += cpuidle-powernv.o +ifeq ($(CONFIG_MSM_PM_LEGACY), y) +obj-y += lpm-levels-legacy.o lpm-levels-of-legacy.o lpm-workarounds.o +else obj-$(CONFIG_MSM_PM) += lpm-levels.o lpm-levels-of.o +endif diff --git a/drivers/cpuidle/lpm-levels-legacy.c b/drivers/cpuidle/lpm-levels-legacy.c new file mode 100644 index 000000000000..79b495d850a4 --- /dev/null +++ b/drivers/cpuidle/lpm-levels-legacy.c @@ -0,0 +1,1537 @@ +/* Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "lpm-levels-legacy.h" +#include "lpm-workarounds.h" +#include +#define CREATE_TRACE_POINTS +#include +#if defined(CONFIG_COMMON_CLK) +#include "../clk/clk.h" +#elif defined(CONFIG_COMMON_CLK_MSM) +#include "../../drivers/clk/msm/clock.h" +#endif /* CONFIG_COMMON_CLK */ +#include + +#define SCLK_HZ (32768) +#define SCM_HANDOFF_LOCK_ID "S:7" +#define PSCI_POWER_STATE(reset) (reset << 30) +#define PSCI_AFFINITY_LEVEL(lvl) ((lvl & 0x3) << 24) +static remote_spinlock_t scm_handoff_lock; + +enum { + MSM_LPM_LVL_DBG_SUSPEND_LIMITS = BIT(0), + MSM_LPM_LVL_DBG_IDLE_LIMITS = BIT(1), +}; + +enum debug_event { + CPU_ENTER, + CPU_EXIT, + CLUSTER_ENTER, + CLUSTER_EXIT, + PRE_PC_CB, + CPU_HP_STARTING, + CPU_HP_DYING, +}; + +struct lpm_debug { + cycle_t time; + enum debug_event evt; + int cpu; + uint32_t arg1; + uint32_t arg2; + uint32_t arg3; + uint32_t arg4; +}; + +static struct system_pm_ops *sys_pm_ops; +struct lpm_cluster *lpm_root_node; + +static DEFINE_PER_CPU(struct lpm_cluster*, cpu_cluster); +static bool suspend_in_progress; +static struct hrtimer lpm_hrtimer; +static struct lpm_debug *lpm_debug; +static phys_addr_t lpm_debug_phys; + +static const int num_dbg_elements = 0x100; + +static void cluster_unprepare(struct lpm_cluster *cluster, + const struct cpumask *cpu, int child_idx, bool from_idle, + int64_t time, bool success); +static void cluster_prepare(struct lpm_cluster *cluster, + const struct cpumask *cpu, int child_idx, bool from_idle, + int64_t time); + +static bool menu_select; +module_param_named( + menu_select, menu_select, bool, 0664 +); + +static bool print_parsed_dt; +module_param_named( + print_parsed_dt, print_parsed_dt, bool, 0664 +); + +static bool sleep_disabled; +module_param_named(sleep_disabled, + sleep_disabled, bool, 0664); + +s32 msm_cpuidle_get_deep_idle_latency(void) +{ + return 10; +} +EXPORT_SYMBOL(msm_cpuidle_get_deep_idle_latency); + +uint32_t register_system_pm_ops(struct system_pm_ops *pm_ops) +{ + if (sys_pm_ops) + return -EUSERS; + + sys_pm_ops = pm_ops; + + return 0; +} + +static uint32_t least_cluster_latency(struct lpm_cluster *cluster, + struct latency_level *lat_level) +{ + struct list_head *list; + struct lpm_cluster_level *level; + struct lpm_cluster *n; + struct power_params *pwr_params; + uint32_t latency = 0; + int i; + + if (!cluster->list.next) { + for (i = 0; i < cluster->nlevels; i++) { + level = &cluster->levels[i]; + pwr_params = &level->pwr; + if (lat_level->reset_level == level->reset_level) { + if ((latency > pwr_params->latency_us) + || (!latency)) + latency = pwr_params->latency_us; + break; + } + } + } else { + list_for_each(list, &cluster->parent->child) { + n = list_entry(list, typeof(*n), list); + if (lat_level->level_name) { + if (strcmp(lat_level->level_name, + n->cluster_name)) + continue; + } + for (i = 0; i < n->nlevels; i++) { + level = &n->levels[i]; + pwr_params = &level->pwr; + if (lat_level->reset_level == + level->reset_level) { + if ((latency > pwr_params->latency_us) + || (!latency)) + latency = + pwr_params->latency_us; + break; + } + } + } + } + return latency; +} + +static uint32_t least_cpu_latency(struct list_head *child, + struct latency_level *lat_level) +{ + struct list_head *list; + struct lpm_cpu_level *level; + struct power_params *pwr_params; + struct lpm_cpu *cpu; + struct lpm_cluster *n; + uint32_t latency = 0; + int i; + + list_for_each(list, child) { + n = list_entry(list, typeof(*n), list); + if (lat_level->level_name) { + if (strcmp(lat_level->level_name, n->cluster_name)) + continue; + } + cpu = n->cpu; + for (i = 0; i < cpu->nlevels; i++) { + level = &cpu->levels[i]; + pwr_params = &level->pwr; + if (lat_level->reset_level == level->reset_level) { + if ((latency > pwr_params->latency_us) + || (!latency)) + latency = pwr_params->latency_us; + break; + } + } + } + return latency; +} + +static struct lpm_cluster *cluster_aff_match(struct lpm_cluster *cluster, + int affinity_level) +{ + struct lpm_cluster *n; + + if ((cluster->aff_level == affinity_level) + || ((cluster->cpu) && (affinity_level == 0))) + return cluster; + else if (!cluster->cpu) { + n = list_entry(cluster->child.next, typeof(*n), list); + return cluster_aff_match(n, affinity_level); + } else + return NULL; +} + +int lpm_get_latency(struct latency_level *level, uint32_t *latency) +{ + struct lpm_cluster *cluster; + uint32_t val; + + if (!lpm_root_node) { + pr_err("%s: lpm_probe not completed\n", __func__); + return -EAGAIN; + } + + if ((level->affinity_level < 0) + || (level->affinity_level > lpm_root_node->aff_level) + || (level->reset_level < LPM_RESET_LVL_RET) + || (level->reset_level > LPM_RESET_LVL_PC) + || !latency) + return -EINVAL; + + cluster = cluster_aff_match(lpm_root_node, level->affinity_level); + if (!cluster) { + pr_err("%s:No matching cluster found for affinity_level:%d\n", + __func__, level->affinity_level); + return -EINVAL; + } + + if (level->affinity_level == 0) + val = least_cpu_latency(&cluster->parent->child, level); + else + val = least_cluster_latency(cluster, level); + + if (!val) { + pr_err("%s:No mode with affinity_level:%d reset_level:%d\n", + __func__, level->affinity_level, level->reset_level); + return -EINVAL; + } + + *latency = val; + + return 0; +} +EXPORT_SYMBOL(lpm_get_latency); + +static void update_debug_pc_event(enum debug_event event, uint32_t arg1, + uint32_t arg2, uint32_t arg3, uint32_t arg4) +{ + struct lpm_debug *dbg; + int idx; + static DEFINE_SPINLOCK(debug_lock); + static int pc_event_index; + + if (!lpm_debug) + return; + + spin_lock(&debug_lock); + idx = pc_event_index++; + dbg = &lpm_debug[idx & (num_dbg_elements - 1)]; + + dbg->evt = event; + dbg->time = arch_counter_get_cntpct(); + dbg->cpu = raw_smp_processor_id(); + dbg->arg1 = arg1; + dbg->arg2 = arg2; + dbg->arg3 = arg3; + dbg->arg4 = arg4; + spin_unlock(&debug_lock); +} + +static enum hrtimer_restart lpm_hrtimer_cb(struct hrtimer *h) +{ + return HRTIMER_NORESTART; +} + +static void msm_pm_set_timer(uint32_t modified_time_us) +{ + u64 modified_time_ns = modified_time_us * NSEC_PER_USEC; + ktime_t modified_ktime = ns_to_ktime(modified_time_ns); + + lpm_hrtimer.function = lpm_hrtimer_cb; + hrtimer_start(&lpm_hrtimer, modified_ktime, HRTIMER_MODE_REL_PINNED); +} + +int set_l2_mode(struct low_power_ops *ops, int mode, + struct lpm_cluster_level *level) +{ + int lpm = mode; + int rc = 0; + bool notify_rpm = level->notify_rpm; + struct low_power_ops *cpu_ops = per_cpu(cpu_cluster, + smp_processor_id())->lpm_dev; + + if (cpu_ops->tz_flag & MSM_SCM_L2_OFF || + cpu_ops->tz_flag & MSM_SCM_L2_GDHS) + coresight_cti_ctx_restore(); + + switch (mode) { + case MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE: + case MSM_SPM_MODE_POWER_COLLAPSE: + case MSM_SPM_MODE_FASTPC: + if (level->no_cache_flush) + cpu_ops->tz_flag = MSM_SCM_L2_GDHS; + else + cpu_ops->tz_flag = MSM_SCM_L2_OFF; + coresight_cti_ctx_save(); + break; + case MSM_SPM_MODE_GDHS: + cpu_ops->tz_flag = MSM_SCM_L2_GDHS; + coresight_cti_ctx_save(); + break; + case MSM_SPM_MODE_CLOCK_GATING: + case MSM_SPM_MODE_RETENTION: + case MSM_SPM_MODE_DISABLED: + cpu_ops->tz_flag = MSM_SCM_L2_ON; + break; + default: + cpu_ops->tz_flag = MSM_SCM_L2_ON; + lpm = MSM_SPM_MODE_DISABLED; + break; + } + + if (lpm_wa_get_skip_l2_spm()) + rc = msm_spm_config_low_power_mode_addr(ops->spm, lpm, + notify_rpm); + else + rc = msm_spm_config_low_power_mode(ops->spm, lpm, notify_rpm); + + if (rc) + pr_err("%s: Failed to set L2 low power mode %d, ERR %d", + __func__, lpm, rc); + + return rc; +} + +int set_l3_mode(struct low_power_ops *ops, int mode, + struct lpm_cluster_level *level) +{ + bool notify_rpm = level->notify_rpm; + struct low_power_ops *cpu_ops = per_cpu(cpu_cluster, + smp_processor_id())->lpm_dev; + + switch (mode) { + case MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE: + case MSM_SPM_MODE_POWER_COLLAPSE: + case MSM_SPM_MODE_FASTPC: + cpu_ops->tz_flag |= MSM_SCM_L3_PC_OFF; + break; + default: + break; + } + return msm_spm_config_low_power_mode(ops->spm, mode, notify_rpm); +} + + +int set_system_mode(struct low_power_ops *ops, int mode, + struct lpm_cluster_level *level) +{ + bool notify_rpm = level->notify_rpm; + + return msm_spm_config_low_power_mode(ops->spm, mode, notify_rpm); +} + +static int set_device_mode(struct lpm_cluster *cluster, int ndevice, + struct lpm_cluster_level *level) +{ + struct low_power_ops *ops; + + if (use_psci) + return 0; + + ops = &cluster->lpm_dev[ndevice]; + if (ops && ops->set_mode) + return ops->set_mode(ops, level->mode[ndevice], + level); + else + return -EINVAL; +} + +static int cpu_power_select(struct cpuidle_device *dev, + struct lpm_cpu *cpu) +{ + int best_level = 0; + uint32_t latency_us = pm_qos_request_for_cpu(PM_QOS_CPU_DMA_LATENCY, + dev->cpu); + s64 sleep_us = ktime_to_us(tick_nohz_get_sleep_length()); + uint32_t modified_time_us = 0; + uint32_t next_event_us = 0; + int i; + uint32_t lvl_latency_us = 0; + uint32_t *residency = get_per_cpu_max_residency(dev->cpu); + + if (!cpu) + return best_level; + + if ((sleep_disabled && !cpu_isolated(dev->cpu)) || sleep_us < 0) + return 0; + + next_event_us = (uint32_t)(ktime_to_us(get_next_event_time(dev->cpu))); + + for (i = 0; i < cpu->nlevels; i++) { + struct lpm_cpu_level *level = &cpu->levels[i]; + struct power_params *pwr_params = &level->pwr; + uint32_t next_wakeup_us = (uint32_t)sleep_us; + enum msm_pm_sleep_mode mode = level->mode; + bool allow; + + allow = lpm_cpu_mode_allow(dev->cpu, i, true); + + if (!allow) + continue; + + lvl_latency_us = pwr_params->latency_us; + + if (latency_us < lvl_latency_us) + break; + + if (next_event_us) { + if (next_event_us < lvl_latency_us) + break; + + if (((next_event_us - lvl_latency_us) < sleep_us) || + (next_event_us < sleep_us)) + next_wakeup_us = next_event_us - lvl_latency_us; + } + + best_level = i; + + if (next_event_us && next_event_us < sleep_us && + (mode != MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT)) + modified_time_us + = next_event_us - lvl_latency_us; + else + modified_time_us = 0; + + if (next_wakeup_us <= residency[i]) + break; + } + + if (modified_time_us) + msm_pm_set_timer(modified_time_us); + + trace_cpu_power_select(best_level, sleep_us, latency_us, next_event_us); + + return best_level; +} + +static uint64_t get_cluster_sleep_time(struct lpm_cluster *cluster, + struct cpumask *mask, bool from_idle) +{ + int cpu; + int next_cpu = raw_smp_processor_id(); + ktime_t next_event; + struct cpumask online_cpus_in_cluster; + + next_event.tv64 = KTIME_MAX; + if (!from_idle) { + if (mask) + cpumask_copy(mask, cpumask_of(raw_smp_processor_id())); + return ~0ULL; + } + + cpumask_and(&online_cpus_in_cluster, + &cluster->num_children_in_sync, cpu_online_mask); + + for_each_cpu(cpu, &online_cpus_in_cluster) { + ktime_t *next_event_c; + + next_event_c = get_next_event_cpu(cpu); + if (next_event_c->tv64 < next_event.tv64) { + next_event.tv64 = next_event_c->tv64; + next_cpu = cpu; + } + } + + if (mask) + cpumask_copy(mask, cpumask_of(next_cpu)); + + + if (ktime_to_us(next_event) > ktime_to_us(ktime_get())) + return ktime_to_us(ktime_sub(next_event, ktime_get())); + else + return 0; +} + +static int cluster_select(struct lpm_cluster *cluster, bool from_idle) +{ + int best_level = -1; + int i; + struct cpumask mask; + uint32_t latency_us = ~0U; + uint32_t sleep_us; + + if (!cluster) + return -EINVAL; + + sleep_us = (uint32_t)get_cluster_sleep_time(cluster, NULL, from_idle); + + if (cpumask_and(&mask, cpu_online_mask, &cluster->child_cpus)) + latency_us = pm_qos_request_for_cpumask(PM_QOS_CPU_DMA_LATENCY, + &mask); + + /* + * If atleast one of the core in the cluster is online, the cluster + * low power modes should be determined by the idle characteristics + * even if the last core enters the low power mode as a part of + * hotplug. + */ + + if (!from_idle && num_online_cpus() > 1 && + cpumask_intersects(&cluster->child_cpus, cpu_online_mask)) + from_idle = true; + + for (i = 0; i < cluster->nlevels; i++) { + struct lpm_cluster_level *level = &cluster->levels[i]; + struct power_params *pwr_params = &level->pwr; + + if (!lpm_cluster_mode_allow(cluster, i, from_idle)) + continue; + + if (level->last_core_only && + cpumask_weight(cpu_online_mask) > 1) + continue; + + if (!cpumask_equal(&cluster->num_children_in_sync, + &level->num_cpu_votes)) + continue; + + if (from_idle && latency_us < pwr_params->latency_us) + break; + + if (sleep_us < pwr_params->time_overhead_us) + break; + + if (suspend_in_progress && from_idle && level->notify_rpm) + continue; + + if (level->notify_rpm) { + if (!(sys_pm_ops && sys_pm_ops->sleep_allowed)) + continue; + if (!sys_pm_ops->sleep_allowed()) + continue; + } + + best_level = i; + + if (from_idle && sleep_us <= pwr_params->max_residency) + break; + } + + return best_level; +} + +static void cluster_notify(struct lpm_cluster *cluster, + struct lpm_cluster_level *level, bool enter) +{ + if (level->is_reset && enter) + cpu_cluster_pm_enter(cluster->aff_level); + else if (level->is_reset && !enter) + cpu_cluster_pm_exit(cluster->aff_level); +} + +static unsigned int get_next_online_cpu(bool from_idle) +{ + unsigned int cpu; + ktime_t next_event; + unsigned int next_cpu = raw_smp_processor_id(); + + if (!from_idle) + return next_cpu; + next_event.tv64 = KTIME_MAX; + for_each_online_cpu(cpu) { + ktime_t *next_event_c; + + next_event_c = get_next_event_cpu(cpu); + if (next_event_c->tv64 < next_event.tv64) { + next_event.tv64 = next_event_c->tv64; + next_cpu = cpu; + } + } + return next_cpu; +} + +static int cluster_configure(struct lpm_cluster *cluster, int idx, + bool from_idle) +{ + struct lpm_cluster_level *level = &cluster->levels[idx]; + struct cpumask cpumask; + unsigned int cpu; + int ret, i; + + if (!cpumask_equal(&cluster->num_children_in_sync, &cluster->child_cpus) + || is_IPI_pending(&cluster->num_children_in_sync)) { + return -EPERM; + } + + if (idx != cluster->default_level) { + update_debug_pc_event(CLUSTER_ENTER, idx, + cluster->num_children_in_sync.bits[0], + cluster->child_cpus.bits[0], from_idle); + trace_cluster_enter(cluster->cluster_name, idx, + cluster->num_children_in_sync.bits[0], + cluster->child_cpus.bits[0], from_idle); + lpm_stats_cluster_enter(cluster->stats, idx); + } + + for (i = 0; i < cluster->ndevices; i++) { + ret = set_device_mode(cluster, i, level); + if (ret) + goto failed_set_mode; + } + + if (level->notify_rpm) { + struct cpumask *nextcpu; + + cpu = get_next_online_cpu(from_idle); + cpumask_copy(&cpumask, cpumask_of(cpu)); + nextcpu = level->disable_dynamic_routing ? NULL : &cpumask; + + if (sys_pm_ops && sys_pm_ops->enter) { + ret = sys_pm_ops->enter(nextcpu); + if (ret) + goto failed_set_mode; + } + + if (cluster->no_saw_devices && !use_psci) + msm_spm_set_rpm_hs(true); + } + + /* Notify cluster enter event after successfully config completion */ + cluster_notify(cluster, level, true); + + cluster->last_level = idx; + return 0; + +failed_set_mode: + + for (i = 0; i < cluster->ndevices; i++) { + int rc = 0; + + level = &cluster->levels[cluster->default_level]; + rc = set_device_mode(cluster, i, level); + WARN_ON(rc); + } + return ret; +} + +static void cluster_prepare(struct lpm_cluster *cluster, + const struct cpumask *cpu, int child_idx, bool from_idle, + int64_t start_time) +{ + int i; + + if (!cluster) + return; + + if (cluster->min_child_level > child_idx) + return; + + spin_lock(&cluster->sync_lock); + cpumask_or(&cluster->num_children_in_sync, cpu, + &cluster->num_children_in_sync); + + for (i = 0; i < cluster->nlevels; i++) { + struct lpm_cluster_level *lvl = &cluster->levels[i]; + + if (child_idx >= lvl->min_child_level) + cpumask_or(&lvl->num_cpu_votes, cpu, + &lvl->num_cpu_votes); + } + + /* + * cluster_select() does not make any configuration changes. So its ok + * to release the lock here. If a core wakes up for a rude request, + * it need not wait for another to finish its cluster selection and + * configuration process + */ + + if (!cpumask_equal(&cluster->num_children_in_sync, + &cluster->child_cpus)) + goto failed; + + i = cluster_select(cluster, from_idle); + + if (i < 0) + goto failed; + + if (cluster_configure(cluster, i, from_idle)) + goto failed; + + cluster->stats->sleep_time = start_time; + cluster_prepare(cluster->parent, &cluster->num_children_in_sync, i, + from_idle, start_time); + + spin_unlock(&cluster->sync_lock); + + if (!use_psci) { + struct lpm_cluster_level *level = &cluster->levels[i]; + + if (level->notify_rpm) + if (sys_pm_ops && sys_pm_ops->update_wakeup) + sys_pm_ops->update_wakeup(from_idle); + } + + return; +failed: + spin_unlock(&cluster->sync_lock); + cluster->stats->sleep_time = 0; +} + +static void cluster_unprepare(struct lpm_cluster *cluster, + const struct cpumask *cpu, int child_idx, bool from_idle, + int64_t end_time, bool success) +{ + struct lpm_cluster_level *level; + bool first_cpu; + int last_level, i, ret; + + if (!cluster) + return; + + if (cluster->min_child_level > child_idx) + return; + + spin_lock(&cluster->sync_lock); + last_level = cluster->default_level; + first_cpu = cpumask_equal(&cluster->num_children_in_sync, + &cluster->child_cpus); + cpumask_andnot(&cluster->num_children_in_sync, + &cluster->num_children_in_sync, cpu); + + for (i = 0; i < cluster->nlevels; i++) { + struct lpm_cluster_level *lvl = &cluster->levels[i]; + + if (child_idx >= lvl->min_child_level) + cpumask_andnot(&lvl->num_cpu_votes, + &lvl->num_cpu_votes, cpu); + } + + if (!first_cpu || cluster->last_level == cluster->default_level) + goto unlock_return; + + if (cluster->stats->sleep_time) + cluster->stats->sleep_time = end_time - + cluster->stats->sleep_time; + lpm_stats_cluster_exit(cluster->stats, cluster->last_level, success); + + level = &cluster->levels[cluster->last_level]; + if (level->notify_rpm) { + if (sys_pm_ops && sys_pm_ops->exit) + sys_pm_ops->exit(success); + + /* If RPM bumps up CX to turbo, unvote CX turbo vote + * during exit of rpm assisted power collapse to + * reduce the power impact + */ + lpm_wa_cx_unvote_send(); + + if (cluster->no_saw_devices && !use_psci) + msm_spm_set_rpm_hs(false); + + } + + update_debug_pc_event(CLUSTER_EXIT, cluster->last_level, + cluster->num_children_in_sync.bits[0], + cluster->child_cpus.bits[0], from_idle); + trace_cluster_exit(cluster->cluster_name, cluster->last_level, + cluster->num_children_in_sync.bits[0], + cluster->child_cpus.bits[0], from_idle); + + last_level = cluster->last_level; + cluster->last_level = cluster->default_level; + + for (i = 0; i < cluster->ndevices; i++) { + level = &cluster->levels[cluster->default_level]; + ret = set_device_mode(cluster, i, level); + + WARN_ON(ret); + + } + + cluster_notify(cluster, &cluster->levels[last_level], false); + cluster_unprepare(cluster->parent, &cluster->child_cpus, + last_level, from_idle, end_time, success); +unlock_return: + spin_unlock(&cluster->sync_lock); +} + +static inline void cpu_prepare(struct lpm_cluster *cluster, int cpu_index, + bool from_idle) +{ + struct lpm_cpu_level *cpu_level = &cluster->cpu->levels[cpu_index]; + bool jtag_save_restore = + cluster->cpu->levels[cpu_index].jtag_save_restore; + + /* Use broadcast timer for aggregating sleep mode within a cluster. + * A broadcast timer could be used in the following scenarios + * 1) The architected timer HW gets reset during certain low power + * modes and the core relies on a external(broadcast) timer to wake up + * from sleep. This information is passed through device tree. + * 2) The CPU low power mode could trigger a system low power mode. + * The low power module relies on Broadcast timer to aggregate the + * next wakeup within a cluster, in which case, CPU switches over to + * use broadcast timer. + */ + if (from_idle && (cpu_level->use_bc_timer || + (cpu_index >= cluster->min_child_level))) + tick_broadcast_enter(); + + if (from_idle && ((cpu_level->mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE) + || (cpu_level->mode == + MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE) + || (cpu_level->is_reset))) + cpu_pm_enter(); + + /* + * Save JTAG registers for 8996v1.0 & 8996v2.x in C4 LPM + */ + if (jtag_save_restore) + msm_jtag_save_state(); +} + +static inline void cpu_unprepare(struct lpm_cluster *cluster, int cpu_index, + bool from_idle) +{ + struct lpm_cpu_level *cpu_level = &cluster->cpu->levels[cpu_index]; + bool jtag_save_restore = + cluster->cpu->levels[cpu_index].jtag_save_restore; + + if (from_idle && (cpu_level->use_bc_timer || + (cpu_index >= cluster->min_child_level))) + tick_broadcast_exit(); + + if (from_idle && ((cpu_level->mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE) + || (cpu_level->mode == + MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE) + || cpu_level->is_reset)) + cpu_pm_exit(); + + /* + * Restore JTAG registers for 8996v1.0 & 8996v2.x in C4 LPM + */ + if (jtag_save_restore) + msm_jtag_restore_state(); +} + +#if defined(CONFIG_ARM_PSCI) || !defined(CONFIG_CPU_V7) +static int get_cluster_id(struct lpm_cluster *cluster, int *aff_lvl) +{ + int state_id = 0; + + if (!cluster) + return 0; + + spin_lock(&cluster->sync_lock); + + if (!cpumask_equal(&cluster->num_children_in_sync, + &cluster->child_cpus)) + goto unlock_and_return; + + state_id |= get_cluster_id(cluster->parent, aff_lvl); + + if (cluster->last_level != cluster->default_level) { + struct lpm_cluster_level *level + = &cluster->levels[cluster->last_level]; + + state_id |= (level->psci_id & cluster->psci_mode_mask) + << cluster->psci_mode_shift; + (*aff_lvl)++; + } +unlock_and_return: + spin_unlock(&cluster->sync_lock); + return state_id; +} +#endif + +#if !defined(CONFIG_CPU_V7) +asmlinkage int __invoke_psci_fn_smc(u64, u64, u64, u64); +static bool psci_enter_sleep(struct lpm_cluster *cluster, + int idx, bool from_idle) + +{ + bool ret; + /* + * idx = 0 is the default LPM state + */ + if (!idx) { + stop_critical_timings(); + wfi(); + start_critical_timings(); + ret = true; + } else { + int affinity_level = 0; + int state_id = get_cluster_id(cluster, &affinity_level); + int power_state = + PSCI_POWER_STATE(cluster->cpu->levels[idx].is_reset); + bool success = false; + + if (cluster->cpu->levels[idx].hyp_psci) { + stop_critical_timings(); + __invoke_psci_fn_smc(0xC4000021, 0, 0, 0); + start_critical_timings(); + return 1; + } + + affinity_level = PSCI_AFFINITY_LEVEL(affinity_level); + state_id |= (power_state | affinity_level + | cluster->cpu->levels[idx].psci_id); + + update_debug_pc_event(CPU_ENTER, state_id, + 0xdeaffeed, 0xdeaffeed, true); + stop_critical_timings(); + success = !arm_cpuidle_suspend(state_id); + start_critical_timings(); + update_debug_pc_event(CPU_EXIT, state_id, + success, 0xdeaffeed, true); + ret = success; + } + return ret; +} +#elif defined(CONFIG_ARM_PSCI) +static bool psci_enter_sleep(struct lpm_cluster *cluster, + int idx, bool from_idle) +{ + bool ret; + + if (!idx) { + stop_critical_timings(); + wfi(); + start_critical_timings(); + ret = true; + } else { + int affinity_level = 0; + int state_id = get_cluster_id(cluster, &affinity_level); + int power_state = + PSCI_POWER_STATE(cluster->cpu->levels[idx].is_reset); + bool success = false; + + affinity_level = PSCI_AFFINITY_LEVEL(affinity_level); + state_id |= (power_state | affinity_level + | cluster->cpu->levels[idx].psci_id); + + update_debug_pc_event(CPU_ENTER, state_id, + 0xdeaffeed, 0xdeaffeed, true); + stop_critical_timings(); + success = !arm_cpuidle_suspend(state_id); + start_critical_timings(); + update_debug_pc_event(CPU_EXIT, state_id, + success, 0xdeaffeed, true); + ret = success; + } + return ret; +} +#else +static bool psci_enter_sleep(struct lpm_cluster *cluster, + int idx, bool from_idle) +{ + WARN_ONCE(true, "PSCI cpu_suspend ops not supported\n"); + return false; +} +#endif + +static int lpm_cpuidle_select(struct cpuidle_driver *drv, + struct cpuidle_device *dev) +{ + struct lpm_cluster *cluster = per_cpu(cpu_cluster, dev->cpu); + int idx; + + if (!cluster) + return 0; + + idx = cpu_power_select(dev, cluster->cpu); + + return idx; +} + +static int lpm_cpuidle_enter(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int idx) +{ + struct lpm_cluster *cluster = per_cpu(cpu_cluster, dev->cpu); + bool success = true; + const struct cpumask *cpumask = get_cpu_mask(dev->cpu); + ktime_t start = ktime_get(); + int64_t start_time = ktime_to_ns(ktime_get()), end_time; + + if (idx < 0) + return -EINVAL; + + cpu_prepare(cluster, idx, true); + cluster_prepare(cluster, cpumask, idx, true, ktime_to_ns(ktime_get())); + + trace_cpu_idle_enter(idx); + lpm_stats_cpu_enter(idx, start_time); + + if (need_resched()) + goto exit; + + if (!use_psci) { + if (idx > 0) + update_debug_pc_event(CPU_ENTER, idx, 0xdeaffeed, + 0xdeaffeed, true); + success = msm_cpu_pm_enter_sleep(cluster->cpu->levels[idx].mode, + true); + + if (idx > 0) + update_debug_pc_event(CPU_EXIT, idx, success, + 0xdeaffeed, true); + } else { + success = psci_enter_sleep(cluster, idx, true); + } + +exit: + end_time = ktime_to_ns(ktime_get()); + lpm_stats_cpu_exit(idx, end_time, success); + + cluster_unprepare(cluster, cpumask, idx, true, end_time, success); + cpu_unprepare(cluster, idx, true); + + trace_cpu_idle_exit(idx, success); + dev->last_residency = ktime_us_delta(ktime_get(), start); + local_irq_enable(); + + return idx; +} + +#ifdef CONFIG_CPU_IDLE_MULTIPLE_DRIVERS +static int cpuidle_register_cpu(struct cpuidle_driver *drv, + struct cpumask *mask) +{ + struct cpuidle_device *device; + int cpu, ret; + + + if (!mask || !drv) + return -EINVAL; + + drv->cpumask = mask; + ret = cpuidle_register_driver(drv); + if (ret) { + pr_err("Failed to register cpuidle driver %d\n", ret); + goto failed_driver_register; + } + + for_each_cpu(cpu, mask) { + device = &per_cpu(cpuidle_dev, cpu); + device->cpu = cpu; + + ret = cpuidle_register_device(device); + if (ret) { + pr_err("Failed to register cpuidle driver for cpu:%u\n", + cpu); + goto failed_driver_register; + } + } + return ret; +failed_driver_register: + for_each_cpu(cpu, mask) + cpuidle_unregister_driver(drv); + return ret; +} +#else +static int cpuidle_register_cpu(struct cpuidle_driver *drv, + struct cpumask *mask) +{ + return cpuidle_register(drv, NULL); +} +#endif + +static struct cpuidle_governor lpm_governor = { + .name = "qcom", + .rating = 30, + .select = lpm_cpuidle_select, + .owner = THIS_MODULE, +}; + +static int cluster_cpuidle_register(struct lpm_cluster *cl) +{ + int i = 0, ret = 0; + unsigned int cpu; + struct lpm_cluster *p = NULL; + + if (!cl->cpu) { + struct lpm_cluster *n; + + list_for_each_entry(n, &cl->child, list) { + ret = cluster_cpuidle_register(n); + if (ret) + break; + } + return ret; + } + + cl->drv = kzalloc(sizeof(*cl->drv), GFP_KERNEL); + if (!cl->drv) + return -ENOMEM; + + cl->drv->name = "msm_idle"; + + for (i = 0; i < cl->cpu->nlevels; i++) { + struct cpuidle_state *st = &cl->drv->states[i]; + struct lpm_cpu_level *cpu_level = &cl->cpu->levels[i]; + + snprintf(st->name, CPUIDLE_NAME_LEN, "C%u\n", i); + snprintf(st->desc, CPUIDLE_DESC_LEN, "%s", cpu_level->name); + st->flags = 0; + st->exit_latency = cpu_level->pwr.latency_us; + st->power_usage = cpu_level->pwr.ss_power; + st->target_residency = 0; + st->enter = lpm_cpuidle_enter; + } + + cl->drv->state_count = cl->cpu->nlevels; + cl->drv->safe_state_index = 0; + for_each_cpu(cpu, &cl->child_cpus) + per_cpu(cpu_cluster, cpu) = cl; + + for_each_possible_cpu(cpu) { + if (cpu_online(cpu)) + continue; + p = per_cpu(cpu_cluster, cpu); + while (p) { + int j; + + spin_lock(&p->sync_lock); + cpumask_set_cpu(cpu, &p->num_children_in_sync); + for (j = 0; j < p->nlevels; j++) + cpumask_copy(&p->levels[j].num_cpu_votes, + &p->num_children_in_sync); + spin_unlock(&p->sync_lock); + p = p->parent; + } + } + ret = cpuidle_register_cpu(cl->drv, &cl->child_cpus); + + if (ret) { + kfree(cl->drv); + return -ENOMEM; + } + return 0; +} + +/** + * init_lpm - initializes the governor + */ +static int __init init_lpm(void) +{ + return cpuidle_register_governor(&lpm_governor); +} + +postcore_initcall(init_lpm); + +static void register_cpu_lpm_stats(struct lpm_cpu *cpu, + struct lpm_cluster *parent) +{ + const char **level_name; + int i; + + level_name = kcalloc(cpu->nlevels, sizeof(*level_name), GFP_KERNEL); + + if (!level_name) + return; + + for (i = 0; i < cpu->nlevels; i++) + level_name[i] = cpu->levels[i].name; + + lpm_stats_config_level("cpu", level_name, cpu->nlevels, + parent->stats, &parent->child_cpus); + + kfree(level_name); +} + +static void register_cluster_lpm_stats(struct lpm_cluster *cl, + struct lpm_cluster *parent) +{ + const char **level_name; + int i; + struct lpm_cluster *child; + + if (!cl) + return; + + level_name = kcalloc(cl->nlevels, sizeof(*level_name), GFP_KERNEL); + + if (!level_name) + return; + + for (i = 0; i < cl->nlevels; i++) + level_name[i] = cl->levels[i].level_name; + + cl->stats = lpm_stats_config_level(cl->cluster_name, level_name, + cl->nlevels, parent ? parent->stats : NULL, NULL); + + kfree(level_name); + + if (cl->cpu) { + register_cpu_lpm_stats(cl->cpu, cl); + return; + } + + list_for_each_entry(child, &cl->child, list) + register_cluster_lpm_stats(child, cl); +} + +static int lpm_suspend_prepare(void) +{ + suspend_in_progress = true; + lpm_stats_suspend_enter(); + + return 0; +} + +static void lpm_suspend_wake(void) +{ + suspend_in_progress = false; + lpm_stats_suspend_exit(); +} + +static int lpm_suspend_enter(suspend_state_t state) +{ + int cpu = raw_smp_processor_id(); + struct lpm_cluster *cluster = per_cpu(cpu_cluster, cpu); + struct lpm_cpu *lpm_cpu = cluster->cpu; + const struct cpumask *cpumask = get_cpu_mask(cpu); + int idx; + bool success = true; + + for (idx = lpm_cpu->nlevels - 1; idx >= 0; idx--) { + + if (lpm_cpu_mode_allow(cpu, idx, false)) + break; + } + if (idx < 0) { + pr_err("Failed suspend\n"); + return 0; + } + cpu_prepare(cluster, idx, false); + cluster_prepare(cluster, cpumask, idx, false, 0); + if (idx > 0) + update_debug_pc_event(CPU_ENTER, idx, 0xdeaffeed, + 0xdeaffeed, false); + + /* + * Print the clocks which are enabled during system suspend + * This debug information is useful to know which are the + * clocks that are enabled and preventing the system level + * LPMs(XO and Vmin). + */ + clock_debug_print_enabled(true); + + if (!use_psci) + msm_cpu_pm_enter_sleep(cluster->cpu->levels[idx].mode, false); + else + success = psci_enter_sleep(cluster, idx, true); + + if (idx > 0) + update_debug_pc_event(CPU_EXIT, idx, true, 0xdeaffeed, + false); + + cluster_unprepare(cluster, cpumask, idx, false, 0, success); + cpu_unprepare(cluster, idx, false); + return 0; +} + +static int lpm_dying_cpu(unsigned int cpu) +{ + struct lpm_cluster *cluster = per_cpu(cpu_cluster, cpu); + + update_debug_pc_event(CPU_HP_DYING, cpu, + cluster->num_children_in_sync.bits[0], + cluster->child_cpus.bits[0], false); + cluster_prepare(cluster, get_cpu_mask(cpu), NR_LPM_LEVELS, false, 0); + return 0; +} + +static int lpm_starting_cpu(unsigned int cpu) +{ + struct lpm_cluster *cluster = per_cpu(cpu_cluster, cpu); + + update_debug_pc_event(CPU_HP_STARTING, cpu, + cluster->num_children_in_sync.bits[0], + cluster->child_cpus.bits[0], false); + cluster_unprepare(cluster, get_cpu_mask(cpu), NR_LPM_LEVELS, + false, 0, true); + return 0; +} + +static const struct platform_suspend_ops lpm_suspend_ops = { + .enter = lpm_suspend_enter, + .valid = suspend_valid_only_mem, + .prepare_late = lpm_suspend_prepare, + .wake = lpm_suspend_wake, +}; + +static int lpm_probe(struct platform_device *pdev) +{ + int ret; + int size; + struct kobject *module_kobj = NULL; + struct md_region md_entry; + + get_online_cpus(); + lpm_root_node = lpm_of_parse_cluster(pdev); + + if (IS_ERR_OR_NULL(lpm_root_node)) { + pr_err("%s(): Failed to probe low power modes\n", __func__); + put_online_cpus(); + return PTR_ERR(lpm_root_node); + } + + if (print_parsed_dt) + cluster_dt_walkthrough(lpm_root_node); + + /* + * Register hotplug notifier before broadcast time to ensure there + * to prevent race where a broadcast timer might not be setup on for a + * core. BUG in existing code but no known issues possibly because of + * how late lpm_levels gets initialized. + */ + suspend_set_ops(&lpm_suspend_ops); + hrtimer_init(&lpm_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + + ret = remote_spin_lock_init(&scm_handoff_lock, SCM_HANDOFF_LOCK_ID); + if (ret) { + pr_err("%s: Failed initializing scm_handoff_lock (%d)\n", + __func__, ret); + put_online_cpus(); + return ret; + } + size = num_dbg_elements * sizeof(struct lpm_debug); + lpm_debug = dma_alloc_coherent(&pdev->dev, size, + &lpm_debug_phys, GFP_KERNEL); + register_cluster_lpm_stats(lpm_root_node, NULL); + + ret = cluster_cpuidle_register(lpm_root_node); + put_online_cpus(); + if (ret) { + pr_err("%s()Failed to register with cpuidle framework\n", + __func__); + goto failed; + } + ret = cpuhp_setup_state(CPUHP_AP_QCOM_SLEEP_STARTING, + "AP_QCOM_SLEEP_STARTING", + lpm_starting_cpu, lpm_dying_cpu); + if (ret) + goto failed; + + module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME); + if (!module_kobj) { + pr_err("%s: cannot find kobject for module %s\n", + __func__, KBUILD_MODNAME); + ret = -ENOENT; + goto failed; + } + + ret = create_cluster_lvl_nodes(lpm_root_node, module_kobj); + if (ret) { + pr_err("%s(): Failed to create cluster level nodes\n", + __func__); + goto failed; + } + + /* Add lpm_debug to Minidump*/ + strlcpy(md_entry.name, "KLPMDEBUG", sizeof(md_entry.name)); + md_entry.virt_addr = (uintptr_t)lpm_debug; + md_entry.phys_addr = lpm_debug_phys; + md_entry.size = size; + if (msm_minidump_add_region(&md_entry)) + pr_info("Failed to add lpm_debug in Minidump\n"); + + return 0; +failed: + free_cluster_node(lpm_root_node); + lpm_root_node = NULL; + return ret; +} + +static const struct of_device_id lpm_mtch_tbl[] = { + {.compatible = "qcom,lpm-levels"}, + {}, +}; + +static struct platform_driver lpm_driver = { + .probe = lpm_probe, + .driver = { + .name = "lpm-levels", + .owner = THIS_MODULE, + .of_match_table = lpm_mtch_tbl, + }, +}; + +static int __init lpm_levels_module_init(void) +{ + int rc; + + rc = platform_driver_register(&lpm_driver); + if (rc) { + pr_info("Error registering %s\n", lpm_driver.driver.name); + goto fail; + } + +fail: + return rc; +} +late_initcall(lpm_levels_module_init); + +enum msm_pm_l2_scm_flag lpm_cpu_pre_pc_cb(unsigned int cpu) +{ + struct lpm_cluster *cluster = per_cpu(cpu_cluster, cpu); + enum msm_pm_l2_scm_flag retflag = MSM_SCM_L2_ON; + + /* + * No need to acquire the lock if probe isn't completed yet + * In the event of the hotplug happening before lpm probe, we want to + * flush the cache to make sure that L2 is flushed. In particular, this + * could cause incoherencies for a cluster architecture. This wouldn't + * affect the idle case as the idle driver wouldn't be registered + * before the probe function + */ + if (!cluster) + return MSM_SCM_L2_OFF; + + /* + * Assumes L2 only. What/How parameters gets passed into TZ will + * determine how this function reports this info back in msm-pm.c + */ + spin_lock(&cluster->sync_lock); + + if (!cluster->lpm_dev) { + retflag = MSM_SCM_L2_OFF; + goto unlock_and_return; + } + + if (!cpumask_equal(&cluster->num_children_in_sync, + &cluster->child_cpus)) + goto unlock_and_return; + + if (cluster->lpm_dev) + retflag = cluster->lpm_dev->tz_flag; + /* + * The scm_handoff_lock will be release by the secure monitor. + * It is used to serialize power-collapses from this point on, + * so that both Linux and the secure context have a consistent + * view regarding the number of running cpus (cpu_count). + * + * It must be acquired before releasing the cluster lock. + */ +unlock_and_return: + update_debug_pc_event(PRE_PC_CB, retflag, 0xdeadbeef, 0xdeadbeef, + 0xdeadbeef); + trace_pre_pc_cb(retflag); + remote_spin_lock_rlock_id(&scm_handoff_lock, + REMOTE_SPINLOCK_TID_START + cpu); + spin_unlock(&cluster->sync_lock); + return retflag; +} + +/** + * lpm_cpu_hotplug_enter(): Called by dying CPU to terminate in low power mode + * + * @cpu: cpuid of the dying CPU + * + * Called from platform_cpu_kill() to terminate hotplug in a low power mode + */ +void lpm_cpu_hotplug_enter(unsigned int cpu) +{ + enum msm_pm_sleep_mode mode = MSM_PM_SLEEP_MODE_NR; + struct lpm_cluster *cluster = per_cpu(cpu_cluster, cpu); + int i; + int idx = -1; + + /* + * If lpm isn't probed yet, try to put cpu into the one of the modes + * available + */ + if (!cluster) { + if (msm_spm_is_mode_avail( + MSM_SPM_MODE_POWER_COLLAPSE)){ + mode = MSM_PM_SLEEP_MODE_POWER_COLLAPSE; + } else if (msm_spm_is_mode_avail( + MSM_SPM_MODE_FASTPC)) { + mode = MSM_PM_SLEEP_MODE_FASTPC; + } else if (msm_spm_is_mode_avail( + MSM_SPM_MODE_RETENTION)) { + mode = MSM_PM_SLEEP_MODE_RETENTION; + } else { + pr_err("No mode avail for cpu%d hotplug\n", cpu); + WARN_ON(1); + return; + } + } else { + struct lpm_cpu *lpm_cpu; + uint32_t ss_pwr = ~0U; + + lpm_cpu = cluster->cpu; + for (i = 0; i < lpm_cpu->nlevels; i++) { + if (ss_pwr < lpm_cpu->levels[i].pwr.ss_power) + continue; + ss_pwr = lpm_cpu->levels[i].pwr.ss_power; + idx = i; + mode = lpm_cpu->levels[i].mode; + } + + if (mode == MSM_PM_SLEEP_MODE_NR) + return; + + WARN_ON(idx < 0); + cluster_prepare(cluster, get_cpu_mask(cpu), idx, false, 0); + } + + msm_cpu_pm_enter_sleep(mode, false); +} diff --git a/drivers/cpuidle/lpm-levels-legacy.h b/drivers/cpuidle/lpm-levels-legacy.h new file mode 100644 index 000000000000..07bcbb05bab3 --- /dev/null +++ b/drivers/cpuidle/lpm-levels-legacy.h @@ -0,0 +1,152 @@ +/* Copyright (c) 2014-2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include + +#define NR_LPM_LEVELS 8 + +extern bool use_psci; + +struct lpm_lookup_table { + uint32_t modes; + const char *mode_name; +}; + +struct power_params { + uint32_t latency_us; /* Enter + Exit latency */ + uint32_t ss_power; /* Steady state power */ + uint32_t energy_overhead; /* Enter + exit over head */ + uint32_t time_overhead_us; /* Enter + exit overhead */ + uint32_t residencies[NR_LPM_LEVELS]; + uint32_t max_residency; +}; + +struct lpm_cpu_level { + const char *name; + enum msm_pm_sleep_mode mode; + bool use_bc_timer; + struct power_params pwr; + unsigned int psci_id; + bool is_reset; + bool jtag_save_restore; + bool hyp_psci; + int reset_level; +}; + +struct lpm_cpu { + struct lpm_cpu_level levels[NR_LPM_LEVELS]; + int nlevels; + unsigned int psci_mode_shift; + unsigned int psci_mode_mask; + struct lpm_cluster *parent; +}; + +struct lpm_level_avail { + bool idle_enabled; + bool suspend_enabled; + struct kobject *kobj; + struct kobj_attribute idle_enabled_attr; + struct kobj_attribute suspend_enabled_attr; + void *data; + int idx; + bool cpu_node; +}; + +struct lpm_cluster_level { + const char *level_name; + int *mode; /* SPM mode to enter */ + int min_child_level; + struct cpumask num_cpu_votes; + struct power_params pwr; + bool notify_rpm; + bool disable_dynamic_routing; + bool sync_level; + bool last_core_only; + struct lpm_level_avail available; + unsigned int psci_id; + bool is_reset; + int reset_level; + bool no_cache_flush; +}; + +struct low_power_ops { + struct msm_spm_device *spm; + int (*set_mode)(struct low_power_ops *ops, int mode, + struct lpm_cluster_level *level); + enum msm_pm_l2_scm_flag tz_flag; +}; + +struct lpm_cluster { + struct list_head list; + struct list_head child; + const char *cluster_name; + const char **name; + unsigned long aff_level; /* Affinity level of the node */ + struct low_power_ops *lpm_dev; + int ndevices; + struct lpm_cluster_level levels[NR_LPM_LEVELS]; + int nlevels; + enum msm_pm_l2_scm_flag l2_flag; + int min_child_level; + int default_level; + int last_level; + struct lpm_cpu *cpu; + struct cpuidle_driver *drv; + spinlock_t sync_lock; + struct cpumask child_cpus; + struct cpumask num_children_in_sync; + struct lpm_cluster *parent; + struct lpm_stats *stats; + unsigned int psci_mode_shift; + unsigned int psci_mode_mask; + bool no_saw_devices; +}; + +int set_l2_mode(struct low_power_ops *ops, int mode, + struct lpm_cluster_level *level); +int set_system_mode(struct low_power_ops *ops, int mode, + struct lpm_cluster_level *level); +int set_l3_mode(struct low_power_ops *ops, int mode, + struct lpm_cluster_level *level); +void lpm_suspend_wake_time(uint64_t wakeup_time); + +struct lpm_cluster *lpm_of_parse_cluster(struct platform_device *pdev); +void free_cluster_node(struct lpm_cluster *cluster); +void cluster_dt_walkthrough(struct lpm_cluster *cluster); + +int create_cluster_lvl_nodes(struct lpm_cluster *p, struct kobject *kobj); +bool lpm_cpu_mode_allow(unsigned int cpu, + unsigned int mode, bool from_idle); +bool lpm_cluster_mode_allow(struct lpm_cluster *cluster, + unsigned int mode, bool from_idle); +uint32_t *get_per_cpu_max_residency(int cpu); +extern struct lpm_cluster *lpm_root_node; + +#ifdef CONFIG_SMP +extern DEFINE_PER_CPU(bool, pending_ipi); +static inline bool is_IPI_pending(const struct cpumask *mask) +{ + unsigned int cpu; + + for_each_cpu(cpu, mask) { + if per_cpu(pending_ipi, cpu) + return true; + } + return false; +} +#else +static inline bool is_IPI_pending(const struct cpumask *mask) +{ + return false; +} +#endif diff --git a/drivers/cpuidle/lpm-levels-of-legacy.c b/drivers/cpuidle/lpm-levels-of-legacy.c new file mode 100644 index 000000000000..9d5aaf3cb96b --- /dev/null +++ b/drivers/cpuidle/lpm-levels-of-legacy.c @@ -0,0 +1,1014 @@ +/* Copyright (c) 2014-2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "lpm-levels-legacy.h" + +bool use_psci; +enum lpm_type { + IDLE = 0, + SUSPEND, + LPM_TYPE_NR +}; + +struct lpm_type_str { + enum lpm_type type; + char *str; +}; + +static const struct lpm_type_str lpm_types[] = { + {IDLE, "idle_enabled"}, + {SUSPEND, "suspend_enabled"}, +}; + +static DEFINE_PER_CPU(uint32_t *, max_residency); +static struct lpm_level_avail *cpu_level_available[NR_CPUS]; +static struct platform_device *lpm_pdev; + +static void *get_enabled_ptr(struct kobj_attribute *attr, + struct lpm_level_avail *avail) +{ + void *arg = NULL; + + if (!strcmp(attr->attr.name, lpm_types[IDLE].str)) + arg = (void *) &avail->idle_enabled; + else if (!strcmp(attr->attr.name, lpm_types[SUSPEND].str)) + arg = (void *) &avail->suspend_enabled; + + return arg; +} + +static struct lpm_level_avail *get_avail_ptr(struct kobject *kobj, + struct kobj_attribute *attr) +{ + struct lpm_level_avail *avail = NULL; + + if (!strcmp(attr->attr.name, lpm_types[IDLE].str)) + avail = container_of(attr, struct lpm_level_avail, + idle_enabled_attr); + else if (!strcmp(attr->attr.name, lpm_types[SUSPEND].str)) + avail = container_of(attr, struct lpm_level_avail, + suspend_enabled_attr); + + return avail; +} + +static void set_optimum_cpu_residency(struct lpm_cpu *cpu, int cpu_id, + bool probe_time) +{ + int i, j; + bool mode_avail; + uint32_t *residency = per_cpu(max_residency, cpu_id); + + for (i = 0; i < cpu->nlevels; i++) { + struct power_params *pwr = &cpu->levels[i].pwr; + + mode_avail = probe_time || + lpm_cpu_mode_allow(cpu_id, i, true); + + if (!mode_avail) { + residency[i] = 0; + continue; + } + + residency[i] = ~0; + for (j = i + 1; j < cpu->nlevels; j++) { + mode_avail = probe_time || + lpm_cpu_mode_allow(cpu_id, j, true); + + if (mode_avail && + (residency[i] > pwr->residencies[j]) && + (pwr->residencies[j] != 0)) + residency[i] = pwr->residencies[j]; + } + } +} + +static void set_optimum_cluster_residency(struct lpm_cluster *cluster, + bool probe_time) +{ + int i, j; + bool mode_avail; + + for (i = 0; i < cluster->nlevels; i++) { + struct power_params *pwr = &cluster->levels[i].pwr; + + mode_avail = probe_time || + lpm_cluster_mode_allow(cluster, i, + true); + + if (!mode_avail) { + pwr->max_residency = 0; + continue; + } + + pwr->max_residency = ~0; + for (j = i+1; j < cluster->nlevels; j++) { + mode_avail = probe_time || + lpm_cluster_mode_allow(cluster, j, + true); + if (mode_avail && + (pwr->max_residency > pwr->residencies[j]) && + (pwr->residencies[j] != 0)) + pwr->max_residency = pwr->residencies[j]; + } + } +} + +uint32_t *get_per_cpu_max_residency(int cpu) +{ + return per_cpu(max_residency, cpu); +} + +static ssize_t lpm_enable_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + int ret = 0; + struct kernel_param kp; + + kp.arg = get_enabled_ptr(attr, get_avail_ptr(kobj, attr)); + ret = param_get_bool(buf, &kp); + if (ret > 0) { + strlcat(buf, "\n", PAGE_SIZE); + ret++; + } + + return ret; +} + +static ssize_t lpm_enable_store(struct kobject *kobj, + struct kobj_attribute *attr, const char *buf, size_t len) +{ + int ret = 0; + struct kernel_param kp; + struct lpm_level_avail *avail; + + avail = get_avail_ptr(kobj, attr); + if (WARN_ON(!avail)) + return -EINVAL; + + kp.arg = get_enabled_ptr(attr, avail); + ret = param_set_bool(buf, &kp); + + if (avail->cpu_node) + set_optimum_cpu_residency(avail->data, avail->idx, false); + else + set_optimum_cluster_residency(avail->data, false); + + return ret ? ret : len; +} + +static int create_lvl_avail_nodes(const char *name, + struct kobject *parent, struct lpm_level_avail *avail, + void *data, int index, bool cpu_node) +{ + struct attribute_group *attr_group = NULL; + struct attribute **attr = NULL; + struct kobject *kobj = NULL; + int ret = 0; + + kobj = kobject_create_and_add(name, parent); + if (!kobj) + return -ENOMEM; + + attr_group = devm_kzalloc(&lpm_pdev->dev, sizeof(*attr_group), + GFP_KERNEL); + if (!attr_group) { + ret = -ENOMEM; + goto failed; + } + + attr = devm_kzalloc(&lpm_pdev->dev, + sizeof(*attr) * (LPM_TYPE_NR + 1), GFP_KERNEL); + if (!attr) { + ret = -ENOMEM; + goto failed; + } + + sysfs_attr_init(&avail->idle_enabled_attr.attr); + avail->idle_enabled_attr.attr.name = lpm_types[IDLE].str; + avail->idle_enabled_attr.attr.mode = 0644; + avail->idle_enabled_attr.show = lpm_enable_show; + avail->idle_enabled_attr.store = lpm_enable_store; + + sysfs_attr_init(&avail->suspend_enabled_attr.attr); + avail->suspend_enabled_attr.attr.name = lpm_types[SUSPEND].str; + avail->suspend_enabled_attr.attr.mode = 0644; + avail->suspend_enabled_attr.show = lpm_enable_show; + avail->suspend_enabled_attr.store = lpm_enable_store; + + attr[0] = &avail->idle_enabled_attr.attr; + attr[1] = &avail->suspend_enabled_attr.attr; + attr[2] = NULL; + attr_group->attrs = attr; + + ret = sysfs_create_group(kobj, attr_group); + if (ret) { + ret = -ENOMEM; + goto failed; + } + + avail->idle_enabled = true; + avail->suspend_enabled = true; + avail->kobj = kobj; + avail->data = data; + avail->idx = index; + avail->cpu_node = cpu_node; + + return ret; + +failed: + kobject_put(kobj); + return ret; +} + +static int create_cpu_lvl_nodes(struct lpm_cluster *p, struct kobject *parent) +{ + int cpu; + int i, cpu_idx; + struct kobject **cpu_kobj = NULL; + struct lpm_level_avail *level_list = NULL; + char cpu_name[20] = {0}; + int ret = 0; + + cpu_kobj = devm_kzalloc(&lpm_pdev->dev, sizeof(*cpu_kobj) * + cpumask_weight(&p->child_cpus), GFP_KERNEL); + if (!cpu_kobj) + return -ENOMEM; + + cpu_idx = 0; + for_each_cpu(cpu, &p->child_cpus) { + snprintf(cpu_name, sizeof(cpu_name), "cpu%d", cpu); + cpu_kobj[cpu_idx] = kobject_create_and_add(cpu_name, parent); + if (!cpu_kobj[cpu_idx]) { + ret = -ENOMEM; + goto release_kobj; + } + + level_list = devm_kzalloc(&lpm_pdev->dev, + p->cpu->nlevels * sizeof(*level_list), + GFP_KERNEL); + if (!level_list) { + ret = -ENOMEM; + goto release_kobj; + } + + for (i = 0; i < p->cpu->nlevels; i++) { + + ret = create_lvl_avail_nodes(p->cpu->levels[i].name, + cpu_kobj[cpu_idx], &level_list[i], + (void *)p->cpu, cpu, true); + if (ret) + goto release_kobj; + } + + cpu_level_available[cpu] = level_list; + cpu_idx++; + } + + return ret; + +release_kobj: + for (i = 0; i < cpumask_weight(&p->child_cpus); i++) + kobject_put(cpu_kobj[i]); + + return ret; +} + +int create_cluster_lvl_nodes(struct lpm_cluster *p, struct kobject *kobj) +{ + int ret = 0; + struct lpm_cluster *child = NULL; + int i; + struct kobject *cluster_kobj = NULL; + + if (!p) + return -ENODEV; + + cluster_kobj = kobject_create_and_add(p->cluster_name, kobj); + if (!cluster_kobj) + return -ENOMEM; + + for (i = 0; i < p->nlevels; i++) { + ret = create_lvl_avail_nodes(p->levels[i].level_name, + cluster_kobj, &p->levels[i].available, + (void *)p, 0, false); + if (ret) + return ret; + } + + list_for_each_entry(child, &p->child, list) { + ret = create_cluster_lvl_nodes(child, cluster_kobj); + if (ret) + return ret; + } + + if (p->cpu) { + ret = create_cpu_lvl_nodes(p, cluster_kobj); + if (ret) + return ret; + } + + return 0; +} + +bool lpm_cpu_mode_allow(unsigned int cpu, + unsigned int index, bool from_idle) +{ + struct lpm_level_avail *avail = cpu_level_available[cpu]; + + if (!lpm_pdev || !avail) + return !from_idle; + + return !!(from_idle ? avail[index].idle_enabled : + avail[index].suspend_enabled); +} + +bool lpm_cluster_mode_allow(struct lpm_cluster *cluster, + unsigned int mode, bool from_idle) +{ + struct lpm_level_avail *avail = &cluster->levels[mode].available; + + if (!lpm_pdev || !avail) + return false; + + return !!(from_idle ? avail->idle_enabled : + avail->suspend_enabled); +} + +static int parse_legacy_cluster_params(struct device_node *node, + struct lpm_cluster *c) +{ + int i; + char *key; + int ret; + struct lpm_match { + char *devname; + int (*set_mode)(struct low_power_ops *, int, + struct lpm_cluster_level *); + }; + struct lpm_match match_tbl[] = { + {"l2", set_l2_mode}, + {"cci", set_system_mode}, + {"l3", set_l3_mode}, + {"cbf", set_system_mode}, + }; + + + key = "qcom,spm-device-names"; + c->ndevices = of_property_count_strings(node, key); + + if (c->ndevices < 0) { + pr_info("%s(): Ignoring cluster params\n", __func__); + c->no_saw_devices = true; + c->ndevices = 0; + return 0; + } + + c->name = devm_kzalloc(&lpm_pdev->dev, c->ndevices * sizeof(*c->name), + GFP_KERNEL); + c->lpm_dev = devm_kzalloc(&lpm_pdev->dev, + c->ndevices * sizeof(*c->lpm_dev), + GFP_KERNEL); + if (!c->name || !c->lpm_dev) { + ret = -ENOMEM; + goto failed; + } + + for (i = 0; i < c->ndevices; i++) { + char device_name[20]; + int j; + + ret = of_property_read_string_index(node, key, i, &c->name[i]); + if (ret) + goto failed; + snprintf(device_name, sizeof(device_name), "%s-%s", + c->cluster_name, c->name[i]); + + c->lpm_dev[i].spm = msm_spm_get_device_by_name(device_name); + + if (IS_ERR_OR_NULL(c->lpm_dev[i].spm)) { + pr_err("Failed to get spm device by name:%s\n", + device_name); + ret = PTR_ERR(c->lpm_dev[i].spm); + goto failed; + } + for (j = 0; j < ARRAY_SIZE(match_tbl); j++) { + if (!strcmp(c->name[i], match_tbl[j].devname)) + c->lpm_dev[i].set_mode = match_tbl[j].set_mode; + } + + if (!c->lpm_dev[i].set_mode) { + ret = -ENODEV; + goto failed; + } + } + + key = "qcom,default-level"; + if (of_property_read_u32(node, key, &c->default_level)) + c->default_level = 0; + return 0; +failed: + pr_err("%s(): Failed reading %s\n", __func__, key); + return ret; +} + +static int parse_cluster_params(struct device_node *node, + struct lpm_cluster *c) +{ + char *key; + int ret; + + key = "label"; + ret = of_property_read_string(node, key, &c->cluster_name); + if (ret) { + pr_err("%s(): Cannot read required param %s\n", __func__, key); + return ret; + } + + if (use_psci) { + key = "qcom,psci-mode-shift"; + ret = of_property_read_u32(node, key, + &c->psci_mode_shift); + if (ret) { + pr_err("%s(): Failed to read param: %s\n", + __func__, key); + return ret; + } + + key = "qcom,psci-mode-mask"; + ret = of_property_read_u32(node, key, + &c->psci_mode_mask); + if (ret) { + pr_err("%s(): Failed to read param: %s\n", + __func__, key); + return ret; + } + + /* Set ndevice to 1 as default */ + c->ndevices = 1; + + return 0; + } else + return parse_legacy_cluster_params(node, c); +} + +static int parse_lpm_mode(const char *str) +{ + int i; + struct lpm_lookup_table mode_lookup[] = { + {MSM_SPM_MODE_POWER_COLLAPSE, "pc"}, + {MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE, "spc"}, + {MSM_SPM_MODE_FASTPC, "fpc"}, + {MSM_SPM_MODE_GDHS, "gdhs"}, + {MSM_SPM_MODE_RETENTION, "retention"}, + {MSM_SPM_MODE_CLOCK_GATING, "wfi"}, + {MSM_SPM_MODE_DISABLED, "active"} + }; + + for (i = 0; i < ARRAY_SIZE(mode_lookup); i++) + if (!strcmp(str, mode_lookup[i].mode_name)) + return mode_lookup[i].modes; + return -EINVAL; +} + +static int parse_power_params(struct device_node *node, + struct power_params *pwr) +{ + char *key; + int ret; + + key = "qcom,latency-us"; + ret = of_property_read_u32(node, key, &pwr->latency_us); + if (ret) + goto fail; + + key = "qcom,ss-power"; + ret = of_property_read_u32(node, key, &pwr->ss_power); + if (ret) + goto fail; + + key = "qcom,energy-overhead"; + ret = of_property_read_u32(node, key, &pwr->energy_overhead); + if (ret) + goto fail; + + key = "qcom,time-overhead"; + ret = of_property_read_u32(node, key, &pwr->time_overhead_us); + if (ret) + goto fail; + +fail: + if (ret) + pr_err("%s(): %s Error reading %s\n", __func__, node->name, + key); + return ret; +} + +static int parse_cluster_level(struct device_node *node, + struct lpm_cluster *cluster) +{ + int i = 0; + struct lpm_cluster_level *level = &cluster->levels[cluster->nlevels]; + int ret = -ENOMEM; + char *key; + + key = "label"; + ret = of_property_read_string(node, key, &level->level_name); + if (ret) + goto failed; + + if (use_psci) { + char *k = "qcom,psci-mode"; + + ret = of_property_read_u32(node, k, &level->psci_id); + if (ret) + goto failed; + + level->is_reset = of_property_read_bool(node, "qcom,is-reset"); + } else if (!cluster->no_saw_devices) { + key = "no saw-devices"; + + level->mode = devm_kzalloc(&lpm_pdev->dev, + cluster->ndevices * sizeof(*level->mode), + GFP_KERNEL); + if (!level->mode) { + pr_err("Memory allocation failed\n"); + goto failed; + } + + for (i = 0; i < cluster->ndevices; i++) { + const char *spm_mode; + char key[25] = {0}; + + snprintf(key, 25, "qcom,spm-%s-mode", cluster->name[i]); + ret = of_property_read_string(node, key, &spm_mode); + if (ret) + goto failed; + + level->mode[i] = parse_lpm_mode(spm_mode); + + if (level->mode[i] < 0) + goto failed; + + if (level->mode[i] == MSM_SPM_MODE_POWER_COLLAPSE + || level->mode[i] == + MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE) + level->is_reset |= true; + } + } + + key = "label"; + ret = of_property_read_string(node, key, &level->level_name); + if (ret) + goto failed; + + if (cluster->nlevels != cluster->default_level) { + key = "min child idx"; + ret = of_property_read_u32(node, "qcom,min-child-idx", + &level->min_child_level); + if (ret) + goto failed; + + if (cluster->min_child_level > level->min_child_level) + cluster->min_child_level = level->min_child_level; + } + + level->notify_rpm = of_property_read_bool(node, "qcom,notify-rpm"); + level->disable_dynamic_routing = of_property_read_bool(node, + "qcom,disable-dynamic-int-routing"); + level->last_core_only = of_property_read_bool(node, + "qcom,last-core-only"); + level->no_cache_flush = of_property_read_bool(node, + "qcom,no-cache-flush"); + + key = "parse_power_params"; + ret = parse_power_params(node, &level->pwr); + if (ret) + goto failed; + + key = "qcom,reset-level"; + ret = of_property_read_u32(node, key, &level->reset_level); + if (ret == -EINVAL) + level->reset_level = LPM_RESET_LVL_NONE; + else if (ret) + goto failed; + + cluster->nlevels++; + return 0; +failed: + pr_err("Failed %s() key = %s ret = %d\n", __func__, key, ret); + return ret; +} + +static int parse_cpu_spm_mode(const char *mode_name) +{ + struct lpm_lookup_table pm_sm_lookup[] = { + {MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT, + "wfi"}, + {MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE, + "standalone_pc"}, + {MSM_PM_SLEEP_MODE_POWER_COLLAPSE, + "pc"}, + {MSM_PM_SLEEP_MODE_RETENTION, + "retention"}, + {MSM_PM_SLEEP_MODE_FASTPC, + "fpc"}, + }; + int i; + int ret = -EINVAL; + + for (i = 0; i < ARRAY_SIZE(pm_sm_lookup); i++) { + if (!strcmp(mode_name, pm_sm_lookup[i].mode_name)) { + ret = pm_sm_lookup[i].modes; + break; + } + } + return ret; +} + +static int parse_cpu_mode(struct device_node *n, struct lpm_cpu_level *l) +{ + char *key; + int ret; + + key = "qcom,spm-cpu-mode"; + ret = of_property_read_string(n, key, &l->name); + if (ret) { + pr_err("Failed %s %d\n", n->name, __LINE__); + return ret; + } + + if (use_psci) { + key = "qcom,psci-cpu-mode"; + + ret = of_property_read_u32(n, key, &l->psci_id); + if (ret) { + pr_err("Failed reading %s on device %s\n", key, + n->name); + return ret; + } + key = "qcom,hyp-psci"; + + l->hyp_psci = of_property_read_bool(n, key); + } else { + l->mode = parse_cpu_spm_mode(l->name); + + if (l->mode < 0) + return l->mode; + } + return 0; + +} + +static int get_cpumask_for_node(struct device_node *node, struct cpumask *mask) +{ + struct device_node *cpu_node; + int cpu; + int idx = 0; + + cpu_node = of_parse_phandle(node, "qcom,cpu", idx++); + if (!cpu_node) { + pr_info("%s: No CPU phandle, assuming single cluster\n", + node->full_name); + /* + * Not all targets have the cpu node populated in the device + * tree. If cpu node is not populated assume all possible + * nodes belong to this cluster + */ + cpumask_copy(mask, cpu_possible_mask); + return 0; + } + + while (cpu_node) { + for_each_possible_cpu(cpu) { + if (of_get_cpu_node(cpu, NULL) == cpu_node) { + cpumask_set_cpu(cpu, mask); + break; + } + } + of_node_put(cpu_node); + cpu_node = of_parse_phandle(node, "qcom,cpu", idx++); + } + + return 0; +} + +static int calculate_residency(struct power_params *base_pwr, + struct power_params *next_pwr) +{ + int32_t residency = (int32_t)(next_pwr->energy_overhead - + base_pwr->energy_overhead) - + ((int32_t)(next_pwr->ss_power * next_pwr->time_overhead_us) + - (int32_t)(base_pwr->ss_power * base_pwr->time_overhead_us)); + + residency /= (int32_t)(base_pwr->ss_power - next_pwr->ss_power); + + if (residency < 0) { + pr_err("%s: residency < 0 for LPM\n", + __func__); + return next_pwr->time_overhead_us; + } + + return residency < next_pwr->time_overhead_us ? + next_pwr->time_overhead_us : residency; +} + +static int parse_cpu_levels(struct device_node *node, struct lpm_cluster *c) +{ + struct device_node *n; + int ret = -ENOMEM; + int i, j; + char *key; + + c->cpu = devm_kzalloc(&lpm_pdev->dev, sizeof(*c->cpu), GFP_KERNEL); + if (!c->cpu) + return ret; + + c->cpu->parent = c; + if (use_psci) { + + key = "qcom,psci-mode-shift"; + + ret = of_property_read_u32(node, key, &c->cpu->psci_mode_shift); + if (ret) { + pr_err("Failed reading %s on device %s\n", key, + node->name); + return ret; + } + key = "qcom,psci-mode-mask"; + + ret = of_property_read_u32(node, key, &c->cpu->psci_mode_mask); + if (ret) { + pr_err("Failed reading %s on device %s\n", key, + node->name); + return ret; + } + } + for_each_child_of_node(node, n) { + struct lpm_cpu_level *l = &c->cpu->levels[c->cpu->nlevels]; + + c->cpu->nlevels++; + + ret = parse_cpu_mode(n, l); + if (ret < 0) { + pr_info("Failed %s\n", l->name); + goto failed; + } + + ret = parse_power_params(n, &l->pwr); + if (ret) + goto failed; + + key = "qcom,use-broadcast-timer"; + l->use_bc_timer = of_property_read_bool(n, key); + + l->is_reset = of_property_read_bool(n, "qcom,is-reset"); + + key = "qcom,jtag-save-restore"; + l->jtag_save_restore = of_property_read_bool(n, key); + + key = "qcom,reset-level"; + ret = of_property_read_u32(n, key, &l->reset_level); + if (ret == -EINVAL) + l->reset_level = LPM_RESET_LVL_NONE; + else if (ret) + goto failed; + of_node_put(n); + } + for (i = 0; i < c->cpu->nlevels; i++) { + for (j = 0; j < c->cpu->nlevels; j++) { + if (i >= j) { + c->cpu->levels[i].pwr.residencies[j] = 0; + continue; + } + + c->cpu->levels[i].pwr.residencies[j] = + calculate_residency(&c->cpu->levels[i].pwr, + &c->cpu->levels[j].pwr); + + pr_err("%s: idx %d %u\n", __func__, j, + c->cpu->levels[i].pwr.residencies[j]); + } + } + + return 0; +failed: + of_node_put(n); + pr_err("%s(): Failed with error code:%d\n", __func__, ret); + return ret; +} + +void free_cluster_node(struct lpm_cluster *cluster) +{ + struct lpm_cluster *cl, *m; + + list_for_each_entry_safe(cl, m, &cluster->child, list) { + list_del(&cl->list); + free_cluster_node(cl); + }; + + cluster->ndevices = 0; +} + +/* + * TODO: + * Expects a CPU or a cluster only. This ensures that affinity + * level of a cluster is consistent with reference to its + * child nodes. + */ +static struct lpm_cluster *parse_cluster(struct device_node *node, + struct lpm_cluster *parent) +{ + struct lpm_cluster *c; + struct device_node *n; + char *key; + int ret = 0; + int i, j; + + c = devm_kzalloc(&lpm_pdev->dev, sizeof(*c), GFP_KERNEL); + if (!c) + return ERR_PTR(-ENOMEM); + + ret = parse_cluster_params(node, c); + + if (ret) + goto failed_parse_params; + + INIT_LIST_HEAD(&c->child); + c->parent = parent; + spin_lock_init(&c->sync_lock); + c->min_child_level = NR_LPM_LEVELS; + + for_each_child_of_node(node, n) { + + if (!n->name) + continue; + key = "qcom,pm-cluster-level"; + if (!of_node_cmp(n->name, key)) { + if (parse_cluster_level(n, c)) { + of_node_put(n); + goto failed_parse_cluster; + } + of_node_put(n); + continue; + } + + key = "qcom,pm-cluster"; + if (!of_node_cmp(n->name, key)) { + struct lpm_cluster *child; + + if (c->no_saw_devices) + pr_info("%s: SAW device not provided.\n", + __func__); + + child = parse_cluster(n, c); + if (!child) { + of_node_put(n); + goto failed_parse_cluster; + } + + list_add(&child->list, &c->child); + cpumask_or(&c->child_cpus, &c->child_cpus, + &child->child_cpus); + c->aff_level = child->aff_level + 1; + of_node_put(n); + continue; + } + + key = "qcom,pm-cpu"; + if (!of_node_cmp(n->name, key)) { + /* + * Parse the the cpu node only if a pm-cpu node + * is available, though the mask is defined @ the + * cluster level + */ + if (get_cpumask_for_node(node, &c->child_cpus)) + goto failed_parse_cluster; + + if (parse_cpu_levels(n, c)) { + of_node_put(n); + goto failed_parse_cluster; + } + + c->aff_level = 1; + of_node_put(n); + + for_each_cpu(i, &c->child_cpus) { + per_cpu(max_residency, i) = devm_kzalloc( + &lpm_pdev->dev, + sizeof(uint32_t) * c->cpu->nlevels, + GFP_KERNEL); + if (!per_cpu(max_residency, i)) + return ERR_PTR(-ENOMEM); + set_optimum_cpu_residency(c->cpu, i, true); + } + } + } + + if (cpumask_intersects(&c->child_cpus, cpu_online_mask)) + c->last_level = c->default_level; + else + c->last_level = c->nlevels-1; + + for (i = 0; i < c->nlevels; i++) { + for (j = 0; j < c->nlevels; j++) { + if (i >= j) { + c->levels[i].pwr.residencies[j] = 0; + continue; + } + c->levels[i].pwr.residencies[j] = calculate_residency( + &c->levels[i].pwr, &c->levels[j].pwr); + } + } + set_optimum_cluster_residency(c, true); + return c; + +failed_parse_cluster: + pr_err("Failed parse cluster:%s\n", key); + if (parent) + list_del(&c->list); + free_cluster_node(c); +failed_parse_params: + pr_err("Failed parse params\n"); + return NULL; +} +struct lpm_cluster *lpm_of_parse_cluster(struct platform_device *pdev) +{ + struct device_node *top = NULL; + struct lpm_cluster *c; + + use_psci = of_property_read_bool(pdev->dev.of_node, "qcom,use-psci"); + + top = of_find_node_by_name(pdev->dev.of_node, "qcom,pm-cluster"); + if (!top) { + pr_err("Failed to find root node\n"); + return ERR_PTR(-ENODEV); + } + + lpm_pdev = pdev; + c = parse_cluster(top, NULL); + of_node_put(top); + return c; +} + +void cluster_dt_walkthrough(struct lpm_cluster *cluster) +{ + struct list_head *list; + int i, j; + static int id; + char str[10] = {0}; + + if (!cluster) + return; + + for (i = 0; i < id; i++) + snprintf(str+i, 10 - i, "\t"); + pr_info("%d\n", __LINE__); + + for (i = 0; i < cluster->nlevels; i++) { + struct lpm_cluster_level *l = &cluster->levels[i]; + + pr_info("%d ndevices:%d\n", __LINE__, cluster->ndevices); + for (j = 0; j < cluster->ndevices; j++) + pr_info("%sDevice: %pK id:%pK\n", str, + &cluster->name[j], &l->mode[i]); + } + + if (cluster->cpu) { + pr_info("%d\n", __LINE__); + for (j = 0; j < cluster->cpu->nlevels; j++) + pr_info("%s\tCPU mode: %s id:%d\n", str, + cluster->cpu->levels[j].name, + cluster->cpu->levels[j].mode); + } + + id++; + + + list_for_each(list, &cluster->child) { + struct lpm_cluster *n; + + pr_info("%d\n", __LINE__); + n = list_entry(list, typeof(*n), list); + cluster_dt_walkthrough(n); + } + id--; +} diff --git a/drivers/cpuidle/lpm-workarounds.c b/drivers/cpuidle/lpm-workarounds.c new file mode 100644 index 000000000000..f990094f1281 --- /dev/null +++ b/drivers/cpuidle/lpm-workarounds.c @@ -0,0 +1,147 @@ +/* Copyright (c) 2014-2016, 2018-2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static struct regulator *lpm_cx_reg; +static struct work_struct dummy_vote_work; +static struct workqueue_struct *lpm_wa_wq; +static bool lpm_wa_cx_turbo_unvote; +static bool skip_l2_spm; + +/* While exiting from RPM assisted power collapse on some targets like MSM8939 + * the CX is bumped to turbo mode by RPM. To reduce the power impact, APSS + * low power driver need to remove the CX turbo vote. + */ +static void send_dummy_cx_vote(struct work_struct *w) +{ + if (lpm_cx_reg) { + regulator_set_voltage(lpm_cx_reg, + RPM_REGULATOR_CORNER_SUPER_TURBO, + RPM_REGULATOR_CORNER_SUPER_TURBO); + + regulator_set_voltage(lpm_cx_reg, + RPM_REGULATOR_CORNER_NONE, + RPM_REGULATOR_CORNER_SUPER_TURBO); + } +} + +/* + * lpm_wa_cx_unvote_send(): Unvote for CX turbo mode + */ +void lpm_wa_cx_unvote_send(void) +{ + if (lpm_wa_cx_turbo_unvote) + queue_work(lpm_wa_wq, &dummy_vote_work); +} +EXPORT_SYMBOL(lpm_wa_cx_unvote_send); + +static int lpm_wa_cx_unvote_init(struct platform_device *pdev) +{ + int ret = 0; + + lpm_cx_reg = devm_regulator_get(&pdev->dev, "lpm-cx"); + if (IS_ERR(lpm_cx_reg)) { + ret = PTR_ERR(lpm_cx_reg); + if (ret != -EPROBE_DEFER) + pr_err("Unable to get the CX regulator\n"); + return ret; + } + + INIT_WORK(&dummy_vote_work, send_dummy_cx_vote); + + lpm_wa_wq = alloc_workqueue("lpm-wa", + WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI, 1); + + return ret; +} + +static int lpm_wa_cx_unvote_exit(void) +{ + if (lpm_wa_wq) + destroy_workqueue(lpm_wa_wq); + + return 0; +} + +bool lpm_wa_get_skip_l2_spm(void) +{ + return skip_l2_spm; +} +EXPORT_SYMBOL(lpm_wa_get_skip_l2_spm); + +static int lpm_wa_probe(struct platform_device *pdev) +{ + int ret = 0; + + lpm_wa_cx_turbo_unvote = of_property_read_bool(pdev->dev.of_node, + "qcom,lpm-wa-cx-turbo-unvote"); + if (lpm_wa_cx_turbo_unvote) { + ret = lpm_wa_cx_unvote_init(pdev); + if (ret) { + pr_err("%s: Failed to initialize lpm_wa_cx_unvote (%d)\n", + __func__, ret); + return ret; + } + } + + skip_l2_spm = of_property_read_bool(pdev->dev.of_node, + "qcom,lpm-wa-skip-l2-spm"); + + return ret; +} + +static int lpm_wa_remove(struct platform_device *pdev) +{ + int ret = 0; + + if (lpm_wa_cx_turbo_unvote) + ret = lpm_wa_cx_unvote_exit(); + + return ret; +} + +static const struct of_device_id lpm_wa_mtch_tbl[] = { + {.compatible = "qcom,lpm-workarounds"}, + {}, +}; + +static struct platform_driver lpm_wa_driver = { + .probe = lpm_wa_probe, + .remove = lpm_wa_remove, + .driver = { + .name = "lpm-workarounds", + .owner = THIS_MODULE, + .of_match_table = lpm_wa_mtch_tbl, + }, +}; + +static int __init lpm_wa_module_init(void) +{ + int ret; + + ret = platform_driver_register(&lpm_wa_driver); + if (ret) + pr_info("Error registering %s\n", lpm_wa_driver.driver.name); + + return ret; +} +late_initcall(lpm_wa_module_init); diff --git a/drivers/cpuidle/lpm-workarounds.h b/drivers/cpuidle/lpm-workarounds.h new file mode 100644 index 000000000000..b5c33effd484 --- /dev/null +++ b/drivers/cpuidle/lpm-workarounds.h @@ -0,0 +1,20 @@ +/* Copyright (c) 2014-2016, 2018-2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __LPM_WA_H +#define __LPM_WA_H + +void lpm_wa_cx_unvote_send(void); +bool lpm_wa_get_skip_l2_spm(void); + +#endif /* __LPM_WA_H */ diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig index 80b6c8efa4ec..c9f4f71b08b6 100644 --- a/drivers/soc/qcom/Kconfig +++ b/drivers/soc/qcom/Kconfig @@ -788,6 +788,17 @@ config MSM_PM determines last CPU to call into PSCI for cluster Low power modes. +config MSM_PM_LEGACY + depends on PM + select MSM_IDLE_STATS if DEBUG_FS + select CPU_IDLE_MULTIPLE_DRIVERS + bool "Qualcomm Technologies, Inc. (QTI) platform specific Legacy PM driver" + help + Platform specific legacy power driver to manage + cores and l2 low power modes. It interface with + various system driver and put the cores into + low power modes. + config MSM_NOPM default y if !PM bool @@ -821,7 +832,7 @@ config QCOM_FSA4480_I2C for accessing the device, switching between USB and Audio modes, changing orientation. -if MSM_PM +if (MSM_PM || MSM_PM_LEGACY) menuconfig MSM_IDLE_STATS bool "Collect idle statistics" help @@ -854,7 +865,7 @@ config MSM_SUSPEND_STATS_FIRST_BUCKET histogram. This is for collecting statistics on suspend. endif # MSM_IDLE_STATS -endif # MSM_PM +endif # MSM_PM || MSM_PM_LEGACY source "drivers/soc/qcom/memshare/Kconfig" source "drivers/soc/qcom/hab/Kconfig" diff --git a/include/soc/qcom/pm-legacy.h b/include/soc/qcom/pm-legacy.h new file mode 100644 index 000000000000..469e0eb08a25 --- /dev/null +++ b/include/soc/qcom/pm-legacy.h @@ -0,0 +1,219 @@ +/* + * Copyright (C) 2007 Google, Inc. + * Copyright (c) 2009-2016, 2018-2020, The Linux Foundation. All rights reserved. + * Author: San Mehat + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __ARCH_ARM_MACH_MSM_PM_H +#define __ARCH_ARM_MACH_MSM_PM_H + +#include +#include +#include +#include +#include + +#if !defined(CONFIG_SMP) +#define msm_secondary_startup NULL +#elif defined(CONFIG_CPU_V7) +#define msm_secondary_startup secondary_startup +#else +#define msm_secondary_startup secondary_holding_pen +#endif + +enum msm_pm_sleep_mode { + MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT, + MSM_PM_SLEEP_MODE_RETENTION, + MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE, + MSM_PM_SLEEP_MODE_POWER_COLLAPSE, + MSM_PM_SLEEP_MODE_FASTPC, + MSM_PM_SLEEP_MODE_POWER_COLLAPSE_SUSPEND, + MSM_PM_SLEEP_MODE_NR, + MSM_PM_SLEEP_MODE_NOT_SELECTED, +}; + +enum msm_pm_l2_scm_flag { + MSM_SCM_L2_ON = 0, + MSM_SCM_L2_OFF = 1, + MSM_SCM_L2_GDHS = 3, + MSM_SCM_L3_PC_OFF = 4, +}; + +#define MSM_PM_MODE(cpu, mode_nr) ((cpu) *MSM_PM_SLEEP_MODE_NR + (mode_nr)) + +struct msm_pm_time_params { + uint32_t latency_us; + uint32_t sleep_us; + uint32_t next_event_us; + uint32_t modified_time_us; +}; + +struct msm_pm_sleep_status_data { + void __iomem *base_addr; + uint32_t mask; +}; + +struct latency_level { + int affinity_level; + int reset_level; + const char *level_name; +}; + +/** + * lpm_cpu_pre_pc_cb(): API to get the L2 flag to pass to TZ + * + * @cpu: cpuid of the CPU going down. + * + * Returns the l2 flush flag enum that is passed down to TZ during power + * collaps + */ +enum msm_pm_l2_scm_flag lpm_cpu_pre_pc_cb(unsigned int cpu); + +/** + * msm_pm_sleep_mode_allow() - API to determine if sleep mode is allowed. + * @cpu: CPU on which to check for the sleep mode. + * @mode: Sleep Mode to check for. + * @idle: Idle or Suspend Sleep Mode. + * + * Helper function to determine if a Idle or Suspend + * Sleep mode is allowed for a specific CPU. + * + * Return: 1 for allowed; 0 if not allowed. + */ +int msm_pm_sleep_mode_allow(unsigned int cpu, unsigned int mode, bool idle); + +/** + * msm_pm_sleep_mode_supported() - API to determine if sleep mode is + * supported. + * @cpu: CPU on which to check for the sleep mode. + * @mode: Sleep Mode to check for. + * @idle: Idle or Suspend Sleep Mode. + * + * Helper function to determine if a Idle or Suspend + * Sleep mode is allowed and enabled for a specific CPU. + * + * Return: 1 for supported; 0 if not supported. + */ +int msm_pm_sleep_mode_supported(unsigned int cpu, unsigned int mode, bool idle); + +struct msm_pm_cpr_ops { + void (*cpr_suspend)(void); + void (*cpr_resume)(void); +}; + +void __init msm_pm_set_tz_retention_flag(unsigned int flag); +void msm_pm_enable_retention(bool enable); +bool msm_pm_retention_enabled(void); +bool msm_cpu_pm_enter_sleep(enum msm_pm_sleep_mode mode, bool from_idle); +static inline void msm_arch_idle(void) +{ + /* memory barrier */ + mb(); + wfi(); +} + +#ifdef CONFIG_MSM_PM_LEGACY + +void msm_pm_set_rpm_wakeup_irq(unsigned int irq); +int msm_pm_wait_cpu_shutdown(unsigned int cpu); +int __init msm_pm_sleep_status_init(void); +void lpm_cpu_hotplug_enter(unsigned int cpu); +s32 msm_cpuidle_get_deep_idle_latency(void); +int msm_pm_collapse(unsigned long unused); + +/** + * lpm_get_latency() - API to get latency for a low power mode + * @latency_level: pointer to structure with below elements + * affinity_level: The level (CPU/L2/CCI etc.) for which the + * latency is required. + * LPM_AFF_LVL_CPU : CPU level + * LPM_AFF_LVL_L2 : L2 level + * LPM_AFF_LVL_CCI : CCI level + * reset_level: Can be passed "LPM_RESET_LVL_GDHS" for + * low power mode with control logic power collapse or + * "LPM_RESET_LVL_PC" for low power mode with control and + * memory logic power collapse or "LPM_RESET_LVL_RET" for + * retention mode. + * level_name: Pointer to the cluster name for which the latency + * is required or NULL if the minimum value out of all the + * clusters is to be returned. For CPU level, the name of the + * L2 cluster to be passed. For CCI it has no effect. + * @latency: address to get the latency value. + * + * latency value will be for the particular cluster or the minimum + * value out of all the clusters at the particular affinity_level + * and reset_level. + * + * Return: 0 for success; Error number for failure. + */ +int lpm_get_latency(struct latency_level *level, uint32_t *latency); + +#else +static inline void msm_pm_set_rpm_wakeup_irq(unsigned int irq) {} +static inline int msm_pm_wait_cpu_shutdown(unsigned int cpu) { return 0; } +static inline int msm_pm_sleep_status_init(void) { return 0; }; + +static inline void lpm_cpu_hotplug_enter(unsigned int cpu) +{ + msm_arch_idle(); +}; + +static inline s32 msm_cpuidle_get_deep_idle_latency(void) { return 0; } +#define msm_pm_collapse NULL + +static inline int lpm_get_latency(struct latency_level *level, + uint32_t *latency) +{ + return 0; +} +#endif + +#ifdef CONFIG_HOTPLUG_CPU +void qcom_cpu_die_legacy(unsigned int cpu); +int qcom_cpu_kill_legacy(unsigned int cpu); +int msm_platform_secondary_init(unsigned int cpu); +#else +static inline int msm_platform_secondary_init(unsigned int cpu) { return 0; } +static inline void qcom_cpu_die_legacy(unsigned int cpu) {} +static inline int qcom_cpu_kill_legacy(unsigned int cpu) { return 0; } +#endif + +enum msm_pm_time_stats_id { + MSM_PM_STAT_REQUESTED_IDLE = 0, + MSM_PM_STAT_IDLE_SPIN, + MSM_PM_STAT_IDLE_WFI, + MSM_PM_STAT_RETENTION, + MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE, + MSM_PM_STAT_IDLE_FAILED_STANDALONE_POWER_COLLAPSE, + MSM_PM_STAT_IDLE_POWER_COLLAPSE, + MSM_PM_STAT_IDLE_FAILED_POWER_COLLAPSE, + MSM_PM_STAT_SUSPEND, + MSM_PM_STAT_FAILED_SUSPEND, + MSM_PM_STAT_NOT_IDLE, + MSM_PM_STAT_COUNT +}; + +#ifdef CONFIG_MSM_IDLE_STATS +void msm_pm_add_stats(enum msm_pm_time_stats_id *enable_stats, int size); +void msm_pm_add_stat(enum msm_pm_time_stats_id id, int64_t t); +void msm_pm_l2_add_stat(uint32_t id, int64_t t); +#else +static inline void msm_pm_add_stats(enum msm_pm_time_stats_id *enable_stats, + int size) {} +static inline void msm_pm_add_stat(enum msm_pm_time_stats_id id, int64_t t) {} +static inline void msm_pm_l2_add_stat(uint32_t id, int64_t t) {} +#endif + +void msm_pm_set_cpr_ops(struct msm_pm_cpr_ops *ops); +extern dma_addr_t msm_pc_debug_counters_phys; +#endif /* __ARCH_ARM_MACH_MSM_PM_H */ diff --git a/include/trace/events/trace_msm_low_power.h b/include/trace/events/trace_msm_low_power.h index c25da0e5e71c..3ec2010fb951 100644 --- a/include/trace/events/trace_msm_low_power.h +++ b/include/trace/events/trace_msm_low_power.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2012, 2014-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012, 2014-2017, 2019-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -250,6 +250,25 @@ TRACE_EVENT(cluster_pred_hist, __entry->sample, __entry->tmr) ); +TRACE_EVENT(pre_pc_cb, + + TP_PROTO(int tzflag), + + TP_ARGS(tzflag), + + TP_STRUCT__entry( + __field(int, tzflag) + ), + + TP_fast_assign( + __entry->tzflag = tzflag; + ), + + TP_printk("tzflag:%d", + __entry->tzflag + ) +); + #endif #define TRACE_INCLUDE_FILE trace_msm_low_power #include