cpuidle: lpm-levels: Use static residency values

Current residency values are calculated based on LPM
parameters such as time overhead, energy overhead, steady
state power and latency. Most of these are unused after
calculating residency.

Remove residency calculation and unused LPM parameters.
Add minimum residency parameter to be used by LPM driver.

Change-Id: I1fc8321bf2ef0454d59c148849a810fb1ecae99c
Signed-off-by: Maulik Shah <mkshah@codeaurora.org>
tirimbino
Maulik Shah 7 years ago
parent 77880ea2bd
commit 0bf967fe7c
  1. 116
      Documentation/devicetree/bindings/arm/msm/lpm-levels.txt
  2. 202
      drivers/cpuidle/lpm-levels-of.c
  3. 63
      drivers/cpuidle/lpm-levels.c
  4. 9
      drivers/cpuidle/lpm-levels.h

@ -50,14 +50,10 @@ Optional properties:
- qcom,min-child-idx: The minimum level that a child CPU should be in
before this level can be chosen. This property is required for all
non-default level.
- qcom,latency-us: The latency in handling the interrupt if this level
was chosen, in uSec
- qcom,ss-power: The steady state power expelled when the processor is
in this level in mWatts
- qcom,energy-overhead: The energy used up in entering and exiting
this level in mWatts.uSec
- qcom,time-overhead: The time spent in entering and exiting this
level in uS
- qcom,entry-latency-us: The latency to enter LPM level, in uSec
- qcom,exit-latency-us: The latency to exit LPM level, in uSec
- qcom,min-residency-us: The minimum residency value from which entering
to low power mode is beneficial, in uSec
Optional properties:
- qcom,notify-rpm: When set, the driver configures the sleep and wake
@ -107,14 +103,10 @@ Optional properties:
- reg: The numeric cpu level id
- label: Name to identify the low power mode in stats
- qcom,psci-cpu-mode: ID to be passed into PSCI firmware.
- qcom,latency-us: The latency in handling the interrupt if this level
was chosen, in uSec
- qcom,ss-power: The steady state power expelled when the processor is
in this level in mWatts
- qcom,energy-overhead: The energy used up in entering and exiting
this level in mWatts.uSec
- qcom,time-overhead: The time spent in entering and exiting this
level in uS
- qcom,entry-latency-us: The latency to enter LPM level, in uSec
- qcom,exit-latency-us: The latency to exit LPM level, in uSec
- qcom,min-residency-us: The minimum residency value from which entering
to low power mode is beneficial, in uSec
Optional properties:
- qcom,is-reset: This boolean property maps to "power state" bit in PSCI
@ -144,31 +136,27 @@ Optional properties:
reg = <0>;
label = "l3-wfi";
qcom,psci-mode = <0x1>;
qcom,latency-us = <51>;
qcom,ss-power = <452>;
qcom,energy-overhead = <69355>;
qcom,time-overhead = <99>;
qcom,entry-latency-us = <48>;
qcom,exit-latency-us = <51>;
qcom,min-residency-us = <99>;
};
qcom,pm-cluster-level@1 { /* D2 */
reg = <1>;
label = "l3-dyn-ret";
qcom,psci-mode = <0x2>;
qcom,latency-us = <659>;
qcom,ss-power = <434>;
qcom,energy-overhead = <465725>;
qcom,time-overhead = <976>;
qcom,min-child-idx = <1>;
qcom,entry-latency-us = <317>;
qcom,exit-latency-us = <659>;
qcom,min-residency-us = <4065>;
};
qcom,pm-cluster-level@2 { /* D4, D3 is not supported */
reg = <2>;
label = "l3-pc";
qcom,psci-mode = <0x4>;
qcom,latency-us = <4562>;
qcom,ss-power = <408>;
qcom,energy-overhead = <2421840>;
qcom,time-overhead = <5376>;
qcom,entry-latency-us = <814>;
qcom,exit-latency-us = <4562>;
qcom,min-residency-us = <7085>;
qcom,min-child-idx = <2>;
qcom,is-reset;
};
@ -177,10 +165,9 @@ Optional properties:
reg = <3>;
label = "cx-off";
qcom,psci-mode = <0x224>;
qcom,latency-us = <5562>;
qcom,ss-power = <308>;
qcom,energy-overhead = <2521840>;
qcom,time-overhead = <6376>;
qcom,entry-latency-us = <814>;
qcom,exit-latency-us = <5562>;
qcom,min-residency-us = <9987>;
qcom,min-child-idx = <3>;
qcom,is-reset;
qcom,notify-rpm;
@ -190,10 +177,9 @@ Optional properties:
reg = <4>;
label = "llcc-off";
qcom,psci-mode = <0xC24>;
qcom,latency-us = <6562>;
qcom,ss-power = <108>;
qcom,energy-overhead = <2621840>;
qcom,time-overhead = <7376>;
qcom,entry-latency-us = <814>;
qcom,exit-latency-us = <6562>;
qcom,min-residency-us = <10100>;
qcom,min-child-idx = <3>;
qcom,is-reset;
qcom,notify-rpm;
@ -210,30 +196,27 @@ Optional properties:
reg = <0>;
label = "wfi";
qcom,psci-cpu-mode = <0x1>;
qcom,latency-us = <43>;
qcom,ss-power = <454>;
qcom,energy-overhead = <38639>;
qcom,time-overhead = <83>;
qcom,entry-latency-us = <40>;
qcom,exit-latency-us = <43>;
qcom,min-residency-us = <100>;
};
qcom,pm-cpu-level@1 { /* C2D */
reg = <1>;
label = "ret";
qcom,psci-cpu-mode = <0x2>;
qcom,latency-us = <86>;
qcom,ss-power = <449>;
qcom,energy-overhead = <78456>;
qcom,time-overhead = <167>;
qcom,entry-latency-us = <81>;
qcom,exit-latency-us = <86>;
qcom,min-residency-us = <965>;
};
qcom,pm-cpu-level@2 { /* C3 */
reg = <2>;
label = "pc";
qcom,psci-cpu-mode = <0x3>;
qcom,latency-us = <612>;
qcom,ss-power = <436>;
qcom,energy-overhead = <418225>;
qcom,time-overhead = <885>;
qcom,entry-latency-us = <273>;
qcom,exit-latency-us = <612>;
qcom,min-residency-us = <1890>;
qcom,is-reset;
};
@ -241,10 +224,9 @@ Optional properties:
reg = <3>;
label = "rail-pc";
qcom,psci-cpu-mode = <0x4>;
qcom,latency-us = <700>;
qcom,ss-power = <400>;
qcom,energy-overhead = <428225>;
qcom,time-overhead = <1000>;
qcom,entry-latency-us = <300>;
qcom,exit-latency-us = <700>;
qcom,min-residency-us = <3934>;
qcom,is-reset;
};
};
@ -260,30 +242,27 @@ Optional properties:
reg = <0>;
label = "wfi";
qcom,psci-cpu-mode = <0x1>;
qcom,latency-us = <43>;
qcom,ss-power = <454>;
qcom,energy-overhead = <38639>;
qcom,time-overhead = <83>;
qcom,entry-latency-us = <40>;
qcom,exit-latency-us = <43>;
qcom,min-residency-us = <83>;
};
qcom,pm-cpu-level@1 { /* C2D */
reg = <1>;
label = "ret";
qcom,psci-cpu-mode = <0x2>;
qcom,latency-us = <86>;
qcom,ss-power = <449>;
qcom,energy-overhead = <78456>;
qcom,time-overhead = <167>;
qcom,entry-latency-us = <81>;
qcom,exit-latency-us = <86>;
qcom,min-residency-us = <637>;
};
qcom,pm-cpu-level@2 { /* C3 */
reg = <2>;
label = "pc";
qcom,psci-cpu-mode = <0x3>;
qcom,latency-us = <612>;
qcom,ss-power = <436>;
qcom,energy-overhead = <418225>;
qcom,time-overhead = <885>;
qcom,entry-latency-us = <273>;
qcom,exit-latency-us = <612>;
qcom,min-residency-us = <952>;
qcom,is-reset;
};
@ -291,10 +270,9 @@ Optional properties:
reg = <3>;
label = "rail-pc";
qcom,psci-cpu-mode = <0x4>;
qcom,latency-us = <700>;
qcom,ss-power = <400>;
qcom,energy-overhead = <428225>;
qcom,time-overhead = <1000>;
qcom,entry-latency-us = <300>;
qcom,exit-latency-us = <700>;
qcom,min-residency-us = <4488>;
qcom,is-reset;
};
};

@ -39,11 +39,9 @@ struct lpm_type_str {
static const struct lpm_type_str lpm_types[] = {
{IDLE, "idle_enabled"},
{SUSPEND, "suspend_enabled"},
{LATENCY, "latency_us"},
{LATENCY, "exit_latency_us"},
};
static DEFINE_PER_CPU(uint32_t *, max_residency);
static DEFINE_PER_CPU(uint32_t *, min_residency);
static struct lpm_level_avail *cpu_level_available[NR_CPUS];
static struct platform_device *lpm_pdev;
@ -78,99 +76,6 @@ static struct lpm_level_avail *get_avail_ptr(struct kobject *kobj,
return avail;
}
static void set_optimum_cpu_residency(struct lpm_cpu *cpu, int cpu_id,
bool probe_time)
{
int i, j;
bool mode_avail;
uint32_t *maximum_residency = per_cpu(max_residency, cpu_id);
uint32_t *minimum_residency = per_cpu(min_residency, cpu_id);
for (i = 0; i < cpu->nlevels; i++) {
struct power_params *pwr = &cpu->levels[i].pwr;
mode_avail = probe_time ||
lpm_cpu_mode_allow(cpu_id, i, true);
if (!mode_avail) {
maximum_residency[i] = 0;
minimum_residency[i] = 0;
continue;
}
maximum_residency[i] = ~0;
for (j = i + 1; j < cpu->nlevels; j++) {
mode_avail = probe_time ||
lpm_cpu_mode_allow(cpu_id, j, true);
if (mode_avail &&
(maximum_residency[i] > pwr->residencies[j]) &&
(pwr->residencies[j] != 0))
maximum_residency[i] = pwr->residencies[j];
}
minimum_residency[i] = pwr->time_overhead_us;
for (j = i-1; j >= 0; j--) {
if (probe_time || lpm_cpu_mode_allow(cpu_id, j, true)) {
minimum_residency[i] = maximum_residency[j] + 1;
break;
}
}
}
}
static void set_optimum_cluster_residency(struct lpm_cluster *cluster,
bool probe_time)
{
int i, j;
bool mode_avail;
for (i = 0; i < cluster->nlevels; i++) {
struct power_params *pwr = &cluster->levels[i].pwr;
mode_avail = probe_time ||
lpm_cluster_mode_allow(cluster, i,
true);
if (!mode_avail) {
pwr->max_residency = 0;
pwr->min_residency = 0;
continue;
}
pwr->max_residency = ~0;
for (j = i+1; j < cluster->nlevels; j++) {
mode_avail = probe_time ||
lpm_cluster_mode_allow(cluster, j,
true);
if (mode_avail &&
(pwr->max_residency > pwr->residencies[j]) &&
(pwr->residencies[j] != 0))
pwr->max_residency = pwr->residencies[j];
}
pwr->min_residency = pwr->time_overhead_us;
for (j = i-1; j >= 0; j--) {
if (probe_time ||
lpm_cluster_mode_allow(cluster, j, true)) {
pwr->min_residency =
cluster->levels[j].pwr.max_residency + 1;
break;
}
}
}
}
uint32_t *get_per_cpu_max_residency(int cpu)
{
return per_cpu(max_residency, cpu);
}
uint32_t *get_per_cpu_min_residency(int cpu)
{
return per_cpu(min_residency, cpu);
}
static ssize_t lpm_latency_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
@ -181,7 +86,7 @@ static ssize_t lpm_latency_show(struct kobject *kobj,
if (WARN_ON(!avail))
return -EINVAL;
kp.arg = &avail->latency_us;
kp.arg = &avail->exit_latency;
ret = param_get_uint(buf, &kp);
if (ret > 0) {
@ -229,11 +134,6 @@ ssize_t lpm_enable_store(struct kobject *kobj, struct kobj_attribute *attr,
kp.arg = get_enabled_ptr(attr, avail);
ret = param_set_bool(buf, &kp);
if (avail->cpu_node)
set_optimum_cpu_residency(avail->data, avail->idx, false);
else
set_optimum_cluster_residency(avail->data, false);
return ret ? ret : len;
}
@ -349,8 +249,8 @@ static int create_cpu_lvl_nodes(struct lpm_cluster *p, struct kobject *parent)
* be available at all times.
*/
for (i = 1; i < lpm_cpu->nlevels; i++) {
level_list[i].latency_us =
p->levels[i].pwr.latency_us;
level_list[i].exit_latency =
p->levels[i].pwr.exit_latency;
ret = create_lvl_avail_nodes(
lpm_cpu->levels[i].name,
cpu_kobj[cpu_idx],
@ -389,7 +289,8 @@ int create_cluster_lvl_nodes(struct lpm_cluster *p, struct kobject *kobj)
return -ENOMEM;
for (i = 0; i < p->nlevels; i++) {
p->levels[i].available.latency_us = p->levels[i].pwr.latency_us;
p->levels[i].available.exit_latency =
p->levels[i].pwr.exit_latency;
ret = create_lvl_avail_nodes(p->levels[i].level_name,
cluster_kobj, &p->levels[i].available,
(void *)p, 0, false);
@ -487,23 +388,18 @@ static int parse_power_params(struct device_node *node,
char *key;
int ret;
key = "qcom,latency-us";
ret = of_property_read_u32(node, key, &pwr->latency_us);
key = "qcom,entry-latency-us";
ret = of_property_read_u32(node, key, &pwr->entry_latency);
if (ret)
goto fail;
key = "qcom,ss-power";
ret = of_property_read_u32(node, key, &pwr->ss_power);
key = "qcom,exit-latency-us";
ret = of_property_read_u32(node, key, &pwr->exit_latency);
if (ret)
goto fail;
key = "qcom,energy-overhead";
ret = of_property_read_u32(node, key, &pwr->energy_overhead);
if (ret)
goto fail;
key = "qcom,time-overhead";
ret = of_property_read_u32(node, key, &pwr->time_overhead_us);
key = "qcom,min-residency-us";
ret = of_property_read_u32(node, key, &pwr->min_residency);
if (ret)
goto fail;
@ -621,30 +517,11 @@ static int get_cpumask_for_node(struct device_node *node, struct cpumask *mask)
return 0;
}
static int calculate_residency(struct power_params *base_pwr,
struct power_params *next_pwr)
{
int32_t residency = (int32_t)(next_pwr->energy_overhead -
base_pwr->energy_overhead) -
((int32_t)(next_pwr->ss_power * next_pwr->time_overhead_us)
- (int32_t)(base_pwr->ss_power * base_pwr->time_overhead_us));
residency /= (int32_t)(base_pwr->ss_power - next_pwr->ss_power);
if (residency < 0) {
pr_err("Residency < 0 for LPM\n");
return next_pwr->time_overhead_us;
}
return residency < next_pwr->time_overhead_us ?
next_pwr->time_overhead_us : residency;
}
static int parse_cpu(struct device_node *node, struct lpm_cpu *cpu)
{
struct device_node *n;
int ret, i, j;
int ret, i;
const char *key;
for_each_child_of_node(node, n) {
@ -678,36 +555,11 @@ static int parse_cpu(struct device_node *node, struct lpm_cpu *cpu)
of_node_put(n);
}
for (i = 0; i < cpu->nlevels; i++) {
for (j = 0; j < cpu->nlevels; j++) {
if (i >= j) {
cpu->levels[i].pwr.residencies[j] = 0;
continue;
}
cpu->levels[i].pwr.residencies[j] =
calculate_residency(&cpu->levels[i].pwr,
&cpu->levels[j].pwr);
for (i = 1; i < cpu->nlevels; i++)
cpu->levels[i-1].pwr.max_residency =
cpu->levels[i].pwr.min_residency - 1;
pr_info("idx %d %u\n", j,
cpu->levels[i].pwr.residencies[j]);
}
}
for_each_cpu(i, &cpu->related_cpus) {
per_cpu(max_residency, i) = devm_kzalloc(&lpm_pdev->dev,
sizeof(uint32_t) * cpu->nlevels, GFP_KERNEL);
if (!per_cpu(max_residency, i))
return -ENOMEM;
per_cpu(min_residency, i) = devm_kzalloc(&lpm_pdev->dev,
sizeof(uint32_t) * cpu->nlevels, GFP_KERNEL);
if (!per_cpu(min_residency, i))
return -ENOMEM;
set_optimum_cpu_residency(cpu, i, true);
}
cpu->levels[i-1].pwr.max_residency = UINT_MAX;
return 0;
}
@ -801,8 +653,7 @@ struct lpm_cluster *parse_cluster(struct device_node *node,
struct lpm_cluster *c;
struct device_node *n;
char *key;
int ret = 0;
int i, j;
int ret = 0, i;
c = devm_kzalloc(&lpm_pdev->dev, sizeof(*c), GFP_KERNEL);
if (!c)
@ -868,17 +719,12 @@ struct lpm_cluster *parse_cluster(struct device_node *node,
else
c->last_level = c->nlevels-1;
for (i = 0; i < c->nlevels; i++) {
for (j = 0; j < c->nlevels; j++) {
if (i >= j) {
c->levels[i].pwr.residencies[j] = 0;
continue;
}
c->levels[i].pwr.residencies[j] = calculate_residency(
&c->levels[i].pwr, &c->levels[j].pwr);
}
}
set_optimum_cluster_residency(c, true);
for (i = 1; i < c->nlevels; i++)
c->levels[i-1].pwr.max_residency =
c->levels[i].pwr.min_residency - 1;
c->levels[i-1].pwr.max_residency = UINT_MAX;
return c;
failed_parse_cluster:

@ -162,9 +162,9 @@ static uint32_t least_cluster_latency(struct lpm_cluster *cluster,
level = &cluster->levels[i];
pwr_params = &level->pwr;
if (lat_level->reset_level == level->reset_level) {
if ((latency > pwr_params->latency_us)
if ((latency > pwr_params->exit_latency)
|| (!latency))
latency = pwr_params->latency_us;
latency = pwr_params->exit_latency;
break;
}
}
@ -181,10 +181,10 @@ static uint32_t least_cluster_latency(struct lpm_cluster *cluster,
pwr_params = &level->pwr;
if (lat_level->reset_level ==
level->reset_level) {
if ((latency > pwr_params->latency_us)
if ((latency > pwr_params->exit_latency)
|| (!latency))
latency =
pwr_params->latency_us;
pwr_params->exit_latency;
break;
}
}
@ -216,9 +216,9 @@ static uint32_t least_cpu_latency(struct list_head *child,
pwr_params = &level->pwr;
if (lat_level->reset_level
== level->reset_level) {
if ((lat > pwr_params->latency_us)
if ((lat > pwr_params->exit_latency)
|| (!lat))
lat = pwr_params->latency_us;
lat = pwr_params->exit_latency;
break;
}
}
@ -427,8 +427,6 @@ static uint64_t lpm_cpuidle_predict(struct cpuidle_device *dev,
uint64_t max, avg, stddev;
int64_t thresh = LLONG_MAX;
struct lpm_history *history = &per_cpu(hist, dev->cpu);
uint32_t *min_residency = get_per_cpu_min_residency(dev->cpu);
uint32_t *max_residency = get_per_cpu_max_residency(dev->cpu);
if (!lpm_prediction || !cpu->lpm_prediction)
return 0;
@ -505,12 +503,15 @@ again:
*/
if (history->htmr_wkup != 1) {
for (j = 1; j < cpu->nlevels; j++) {
struct lpm_cpu_level *level = &cpu->levels[j];
uint32_t min_residency = level->pwr.min_residency;
uint32_t max_residency = level->pwr.max_residency;
uint32_t failed = 0;
uint64_t total = 0;
for (i = 0; i < MAXSAMPLES; i++) {
if ((history->mode[i] == j) &&
(history->resi[i] < min_residency[j])) {
(history->resi[i] < min_residency)) {
failed++;
total += history->resi[i];
}
@ -519,9 +520,9 @@ again:
*idx_restrict = j;
do_div(total, failed);
for (i = 0; i < j; i++) {
if (total < max_residency[i]) {
if (total < max_residency) {
*idx_restrict = i+1;
total = max_residency[i];
total = max_residency;
break;
}
}
@ -601,8 +602,8 @@ static int cpu_power_select(struct cpuidle_device *dev,
uint64_t predicted = 0;
uint32_t htime = 0, idx_restrict_time = 0;
uint32_t next_wakeup_us = (uint32_t)sleep_us;
uint32_t *min_residency = get_per_cpu_min_residency(dev->cpu);
uint32_t *max_residency = get_per_cpu_max_residency(dev->cpu);
uint32_t min_residency, max_residency;
struct power_params *pwr_params;
if ((sleep_disabled && !cpu_isolated(dev->cpu)) || sleep_us < 0)
return 0;
@ -615,8 +616,6 @@ static int cpu_power_select(struct cpuidle_device *dev,
goto done_select;
for (i = 0; i < cpu->nlevels; i++) {
struct lpm_cpu_level *level = &cpu->levels[i];
struct power_params *pwr_params = &level->pwr;
bool allow;
allow = i ? lpm_cpu_mode_allow(dev->cpu, i, true) : true;
@ -624,7 +623,10 @@ static int cpu_power_select(struct cpuidle_device *dev,
if (!allow)
continue;
lvl_latency_us = pwr_params->latency_us;
pwr_params = &cpu->levels[i].pwr;
lvl_latency_us = pwr_params->exit_latency;
min_residency = pwr_params->min_residency;
max_residency = pwr_params->max_residency;
if (latency_us < lvl_latency_us)
break;
@ -644,11 +646,11 @@ static int cpu_power_select(struct cpuidle_device *dev,
* deeper low power modes than clock gating do not
* call prediction.
*/
if (next_wakeup_us > max_residency[i]) {
if (next_wakeup_us > max_residency) {
predicted = lpm_cpuidle_predict(dev, cpu,
&idx_restrict, &idx_restrict_time);
if (predicted && (predicted < min_residency[i]))
predicted = min_residency[i];
if (predicted && (predicted < min_residency))
predicted = min_residency;
} else
invalidate_predict_history(dev);
}
@ -663,8 +665,8 @@ static int cpu_power_select(struct cpuidle_device *dev,
else
modified_time_us = 0;
if (predicted ? (predicted <= max_residency[i])
: (next_wakeup_us <= max_residency[i]))
if (predicted ? (predicted <= max_residency)
: (next_wakeup_us <= max_residency))
break;
}
@ -675,17 +677,22 @@ static int cpu_power_select(struct cpuidle_device *dev,
* Start timer to avoid staying in shallower mode forever
* incase of misprediciton
*/
pwr_params = &cpu->levels[best_level].pwr;
min_residency = pwr_params->min_residency;
max_residency = pwr_params->max_residency;
if ((predicted || (idx_restrict != (cpu->nlevels + 1)))
&& ((best_level >= 0)
&& (best_level < (cpu->nlevels-1)))) {
htime = predicted + cpu->tmr_add;
if (htime == cpu->tmr_add)
htime = idx_restrict_time;
else if (htime > max_residency[best_level])
htime = max_residency[best_level];
else if (htime > max_residency)
htime = max_residency;
if ((next_wakeup_us > htime) &&
((next_wakeup_us - htime) > max_residency[best_level]))
((next_wakeup_us - htime) > max_residency))
histtimer_start(htime);
}
@ -962,10 +969,11 @@ static int cluster_select(struct lpm_cluster *cluster, bool from_idle,
&level->num_cpu_votes))
continue;
if (from_idle && latency_us < pwr_params->latency_us)
if (from_idle && latency_us < pwr_params->exit_latency)
break;
if (sleep_us < pwr_params->time_overhead_us)
if (sleep_us < (pwr_params->exit_latency +
pwr_params->entry_latency))
break;
if (suspend_in_progress && from_idle && level->notify_rpm)
@ -1504,8 +1512,7 @@ static int cluster_cpuidle_register(struct lpm_cluster *cl)
snprintf(st->desc, CPUIDLE_DESC_LEN, "%s",
cpu_level->name);
st->flags = 0;
st->exit_latency = cpu_level->pwr.latency_us;
st->power_usage = cpu_level->pwr.ss_power;
st->exit_latency = cpu_level->pwr.exit_latency;
st->target_residency = 0;
st->enter = lpm_cpuidle_enter;
if (i == lpm_cpu->nlevels - 1)

@ -26,11 +26,8 @@
#define PREMATURE_CNT_HIGH 5
struct power_params {
uint32_t latency_us; /* Enter + Exit latency */
uint32_t ss_power; /* Steady state power */
uint32_t energy_overhead; /* Enter + exit over head */
uint32_t time_overhead_us; /* Enter + exit overhead */
uint32_t residencies[NR_LPM_LEVELS];
uint32_t entry_latency; /* Entry latency */
uint32_t exit_latency; /* Exit latency */
uint32_t min_residency;
uint32_t max_residency;
};
@ -62,7 +59,7 @@ struct lpm_cpu {
struct lpm_level_avail {
bool idle_enabled;
bool suspend_enabled;
uint32_t latency_us;
uint32_t exit_latency;
struct kobject *kobj;
struct kobj_attribute idle_enabled_attr;
struct kobj_attribute suspend_enabled_attr;

Loading…
Cancel
Save