cpufreq: schedutil: clear cached_raw_freq when invalidated

The cpufreq_schedutil governor keeps a cache of the last
raw frequency that was mapped to a supported device frequency.
If the next request for a frequency matches the cached
value, the policy's next_freq value is reused. But there
are paths that can update the raw cached value without
updating the next_freq value, and there are paths that
can set the next_freq value without setting the raw
cached value. On those paths, the cached value
must be reset.

The case that has been observed is when a frequency request
reaches sugov_update_commit but is then rejected by to
the sugov_up_down_rate_limit check.

Bug: 116279565
Change-Id: I7c585339a04ff1732054d6e5b36a57e2d41266aa
Signed-off-by: John Dias <joaodias@google.com>
Signed-off-by: Miguel de Dios <migueldedios@google.com>
Signed-off-by: Alexander Winkowski <dereference23@outlook.com>
fourteen
John Dias 7 years ago committed by Jenna
parent aa6676efa5
commit 977116cf8b
  1. 19
      kernel/sched/cpufreq_schedutil.c

@ -162,8 +162,11 @@ static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
if (sg_policy->next_freq == next_freq)
return;
if (sugov_up_down_rate_limit(sg_policy, time, next_freq))
if (sugov_up_down_rate_limit(sg_policy, time, next_freq)) {
/* Don't cache a raw freq that didn't become next_freq */
sg_policy->cached_raw_freq = 0;
return;
}
sg_policy->next_freq = next_freq;
sg_policy->last_freq_update_time = time;
@ -331,6 +334,8 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
busy = use_pelt() && sugov_cpu_is_busy(sg_cpu);
if (flags & SCHED_CPUFREQ_RT_DL) {
/* clear cache when it's bypassed */
sg_policy->cached_raw_freq = 0;
next_f = policy->cpuinfo.max_freq;
} else {
sugov_get_util(&util, &max, sg_cpu->cpu);
@ -377,8 +382,11 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
j_sg_cpu->iowait_boost_pending = false;
continue;
}
if (j_sg_cpu->flags & SCHED_CPUFREQ_RT_DL)
if (j_sg_cpu->flags & SCHED_CPUFREQ_RT_DL) {
/* clear cache when it's bypassed */
sg_policy->cached_raw_freq = 0;
return policy->cpuinfo.max_freq;
}
j_util = j_sg_cpu->util;
j_max = j_sg_cpu->max;
@ -419,10 +427,13 @@ static void sugov_update_shared(struct update_util_data *hook, u64 time,
if (sugov_should_update_freq(sg_policy, time) &&
!(flags & SCHED_CPUFREQ_CONTINUE)) {
if (flags & SCHED_CPUFREQ_RT_DL)
if (flags & SCHED_CPUFREQ_RT_DL) {
next_f = sg_policy->policy->cpuinfo.max_freq;
else
/* clear cache when it's bypassed */
sg_policy->cached_raw_freq = 0;
} else {
next_f = sugov_next_freq_shared(sg_cpu, time);
}
sugov_update_commit(sg_policy, time, next_f);
}

Loading…
Cancel
Save