From e3c84745621148ee71e278ce80fe8453c9698d8a Mon Sep 17 00:00:00 2001 From: John Dias Date: Wed, 13 Jun 2018 22:16:28 -0700 Subject: [PATCH] cpufreq: schedutil: clear cached_raw_freq when invalidated The cpufreq_schedutil governor keeps a cache of the last raw frequency that was mapped to a supported device frequency. If the next request for a frequency matches the cached value, the policy's next_freq value is reused. But there are paths that can update the raw cached value without updating the next_freq value, and there are paths that can set the next_freq value without setting the raw cached value. On those paths, the cached value must be reset. The case that has been observed is when a frequency request reaches sugov_update_commit but is then rejected by to the sugov_up_down_rate_limit check. Bug: 116279565 Change-Id: I7c585339a04ff1732054d6e5b36a57e2d41266aa Signed-off-by: John Dias Signed-off-by: Miguel de Dios Signed-off-by: Alexander Winkowski --- kernel/sched/cpufreq_schedutil.c | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index 366ac2d783ed..99761dd2f21c 100755 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -164,8 +164,8 @@ static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time, return; if (sugov_up_down_rate_limit(sg_policy, time, next_freq)) { - /* Restore cached freq as next_freq is not changed */ - sg_policy->cached_raw_freq = sg_policy->prev_cached_raw_freq; + /* Don't cache a raw freq that didn't become next_freq */ + sg_policy->cached_raw_freq = 0; return; } @@ -334,8 +334,9 @@ static void sugov_update_single(struct update_util_data *hook, u64 time, busy = use_pelt() && sugov_cpu_is_busy(sg_cpu); - if (0) { - sg_policy->cached_raw_freq = sg_policy->prev_cached_raw_freq; + if (flags & SCHED_CPUFREQ_RT_DL) { + /* clear cache when it's bypassed */ + sg_policy->cached_raw_freq = 0; next_f = policy->cpuinfo.max_freq; } else { sugov_get_util(&util, &max, sg_cpu->cpu); @@ -382,8 +383,9 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time) j_sg_cpu->iowait_boost_pending = false; continue; } - if (0) { - sg_policy->cached_raw_freq = sg_policy->prev_cached_raw_freq; + if (j_sg_cpu->flags & SCHED_CPUFREQ_RT_DL) { + /* clear cache when it's bypassed */ + sg_policy->cached_raw_freq = 0; return policy->cpuinfo.max_freq; } @@ -424,9 +426,10 @@ static void sugov_update_shared(struct update_util_data *hook, u64 time, if (sugov_should_update_freq(sg_policy, time) && !(flags & SCHED_CPUFREQ_CONTINUE)) { - if (0) { + if (flags & SCHED_CPUFREQ_RT_DL) { next_f = sg_policy->policy->cpuinfo.max_freq; - sg_policy->cached_raw_freq = sg_policy->prev_cached_raw_freq; + /* clear cache when it's bypassed */ + sg_policy->cached_raw_freq = 0; } else { next_f = sugov_next_freq_shared(sg_cpu, time); }