cpufreq: schedutil: Fix for CR 2040904

This change fixes CR 2040904.

Change-Id: Id7e3848df2de77f6fd9388eee9830888004172b2
Signed-off-by: Saravana Kannan <skannan@codeaurora.org>
Signed-off-by: Rohit Gupta <rohgup@codeaurora.org>
[Fix merge conflicts caused, comment explaining >=]
Signed-off-by: Jonathan Avila <avilaj@codeaurora.org>
tirimbino
Rohit Gupta 8 years ago
parent 05d2ca2420
commit b8b6f565c0
  1. 155
      kernel/sched/cpufreq_schedutil.c

@ -24,6 +24,9 @@
struct sugov_tunables {
struct gov_attr_set attr_set;
unsigned int rate_limit_us;
unsigned int hispeed_load;
unsigned int hispeed_freq;
bool pl;
};
struct sugov_policy {
@ -37,6 +40,8 @@ struct sugov_policy {
s64 freq_update_delay_ns;
unsigned int next_freq;
unsigned int cached_raw_freq;
unsigned long hispeed_util;
unsigned long max;
/* The next fields are only needed if fast switch cannot be used. */
struct irq_work irq_work;
@ -139,6 +144,7 @@ static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
}
}
#define TARGET_LOAD 80
/**
* get_next_freq - Compute a new frequency for a given cpufreq policy.
* @sg_policy: schedutil policy object to compute the new frequency for.
@ -257,13 +263,46 @@ static bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu)
static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
#endif /* CONFIG_NO_HZ_COMMON */
#define NL_RATIO 75
#define DEFAULT_HISPEED_LOAD 90
static void sugov_walt_adjust(struct sugov_cpu *sg_cpu, unsigned long *util,
unsigned long *max)
{
struct sugov_policy *sg_policy = sg_cpu->sg_policy;
unsigned long cap_cur = capacity_curr_of(sg_cpu->cpu);
bool is_migration = sg_cpu->flags & SCHED_CPUFREQ_INTERCLUSTER_MIG;
unsigned long nl = sg_cpu->walt_load.nl;
unsigned long cpu_util = sg_cpu->util;
bool is_hiload;
is_hiload = (cpu_util >= mult_frac(cap_cur,
sg_policy->tunables->hispeed_load,
100));
if (is_hiload && !is_migration)
*util = max(*util, sg_policy->hispeed_util);
if (is_hiload && nl >= mult_frac(cpu_util, NL_RATIO, 100))
*util = *max;
if (sg_policy->tunables->pl)
*util = max(*util, sg_cpu->walt_load.pl);
}
static unsigned long freq_to_util(struct sugov_policy *sg_policy,
unsigned int freq)
{
return mult_frac(sg_policy->max, freq,
sg_policy->policy->cpuinfo.max_freq);
}
static void sugov_update_single(struct update_util_data *hook, u64 time,
unsigned int flags)
{
struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
struct sugov_policy *sg_policy = sg_cpu->sg_policy;
struct cpufreq_policy *policy = sg_policy->policy;
unsigned long util, max;
unsigned long util, max, hs_util;
unsigned int next_f;
bool busy;
@ -276,11 +315,26 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
busy = sugov_cpu_is_busy(sg_cpu);
raw_spin_lock(&sg_policy->update_lock);
if (flags & SCHED_CPUFREQ_RT_DL) {
next_f = policy->cpuinfo.max_freq;
} else {
sugov_get_util(&util, &max, sg_cpu->cpu);
if (sg_policy->max != max) {
sg_policy->max = max;
hs_util = freq_to_util(sg_policy,
sg_policy->tunables->hispeed_freq);
hs_util = mult_frac(hs_util, TARGET_LOAD, 100);
sg_policy->hispeed_util = hs_util;
}
sg_cpu->util = util;
sg_cpu->max = max;
sg_cpu->flags = flags;
sugov_iowait_boost(sg_cpu, &util, &max);
sugov_walt_adjust(sg_cpu, &util, &max);
next_f = get_next_freq(sg_policy, util, max);
/*
* Do not reduce the frequency if the CPU has not been idle
@ -294,6 +348,7 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
}
}
sugov_update_commit(sg_policy, time, next_f);
raw_spin_unlock(&sg_policy->update_lock);
}
static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
@ -325,14 +380,22 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
if (j_sg_cpu->flags & SCHED_CPUFREQ_RT_DL)
return policy->cpuinfo.max_freq;
/*
* If the util value for all CPUs in a policy is 0, just using >
* will result in a max value of 1. WALT stats can later update
* the aggregated util value, causing get_next_freq() to compute
* freq = max_freq * 1.25 * (util / max) for nonzero util,
* leading to spurious jumps to fmax.
*/
j_util = j_sg_cpu->util;
j_max = j_sg_cpu->max;
if (j_util * max > j_max * util) {
if (j_util * max >= j_max * util) {
util = j_util;
max = j_max;
}
sugov_iowait_boost(j_sg_cpu, &util, &max);
sugov_walt_adjust(j_sg_cpu, &util, &max);
}
return get_next_freq(sg_policy, util, max);
@ -343,7 +406,7 @@ static void sugov_update_shared(struct update_util_data *hook, u64 time,
{
struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
struct sugov_policy *sg_policy = sg_cpu->sg_policy;
unsigned long util, max;
unsigned long util, max, hs_util;
unsigned int next_f;
sugov_get_util(&util, &max, sg_cpu->cpu);
@ -352,6 +415,14 @@ static void sugov_update_shared(struct update_util_data *hook, u64 time,
raw_spin_lock(&sg_policy->update_lock);
if (sg_policy->max != max) {
sg_policy->max = max;
hs_util = freq_to_util(sg_policy,
sg_policy->tunables->hispeed_freq);
hs_util = mult_frac(hs_util, TARGET_LOAD, 100);
sg_policy->hispeed_util = hs_util;
}
sg_cpu->util = util;
sg_cpu->max = max;
sg_cpu->flags = flags;
@ -440,10 +511,86 @@ static ssize_t rate_limit_us_store(struct gov_attr_set *attr_set, const char *bu
return count;
}
static ssize_t hispeed_load_show(struct gov_attr_set *attr_set, char *buf)
{
struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
return scnprintf(buf, PAGE_SIZE, "%u\n", tunables->hispeed_load);
}
static ssize_t hispeed_load_store(struct gov_attr_set *attr_set,
const char *buf, size_t count)
{
struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
if (kstrtouint(buf, 10, &tunables->hispeed_load))
return -EINVAL;
tunables->hispeed_load = min(100U, tunables->hispeed_load);
return count;
}
static ssize_t hispeed_freq_show(struct gov_attr_set *attr_set, char *buf)
{
struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
return scnprintf(buf, PAGE_SIZE, "%u\n", tunables->hispeed_freq);
}
static ssize_t hispeed_freq_store(struct gov_attr_set *attr_set,
const char *buf, size_t count)
{
struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
unsigned int val;
struct sugov_policy *sg_policy;
unsigned long hs_util;
unsigned long flags;
if (kstrtouint(buf, 10, &val))
return -EINVAL;
tunables->hispeed_freq = val;
list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook) {
raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
hs_util = freq_to_util(sg_policy,
sg_policy->tunables->hispeed_freq);
hs_util = mult_frac(hs_util, TARGET_LOAD, 100);
sg_policy->hispeed_util = hs_util;
raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
}
return count;
}
static ssize_t pl_show(struct gov_attr_set *attr_set, char *buf)
{
struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
return scnprintf(buf, PAGE_SIZE, "%u\n", tunables->pl);
}
static ssize_t pl_store(struct gov_attr_set *attr_set, const char *buf,
size_t count)
{
struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
if (kstrtobool(buf, &tunables->pl))
return -EINVAL;
return count;
}
static struct governor_attr rate_limit_us = __ATTR_RW(rate_limit_us);
static struct governor_attr hispeed_load = __ATTR_RW(hispeed_load);
static struct governor_attr hispeed_freq = __ATTR_RW(hispeed_freq);
static struct governor_attr pl = __ATTR_RW(pl);
static struct attribute *sugov_attributes[] = {
&rate_limit_us.attr,
&hispeed_load.attr,
&hispeed_freq.attr,
&pl.attr,
NULL
};
@ -591,6 +738,8 @@ static int sugov_init(struct cpufreq_policy *policy)
}
tunables->rate_limit_us = cpufreq_policy_transition_delay_us(policy);
tunables->hispeed_load = DEFAULT_HISPEED_LOAD;
tunables->hispeed_freq = 0;
policy->governor_data = sg_policy;
sg_policy->tunables = tunables;

Loading…
Cancel
Save