@ -42,6 +42,7 @@ struct sugov_policy {
s64 freq_update_delay_ns ;
unsigned int next_freq ;
unsigned int cached_raw_freq ;
unsigned int prev_cached_raw_freq ;
/* The next fields are only needed if fast switch cannot be used. */
struct irq_work irq_work ;
@ -163,8 +164,8 @@ static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
return ;
if ( sugov_up_down_rate_limit ( sg_policy , time , next_freq ) ) {
/* Don't cache a raw freq that didn't become next_freq */
sg_policy - > cached_raw_freq = 0 ;
/* Restore cached freq as next_freq is not changed */
sg_policy - > cached_raw_freq = sg_policy - > prev_cached_raw_freq ;
return ;
}
@ -221,6 +222,7 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy,
if ( freq = = sg_policy - > cached_raw_freq & & sg_policy - > next_freq ! = UINT_MAX )
return sg_policy - > next_freq ;
sg_policy - > prev_cached_raw_freq = sg_policy - > cached_raw_freq ;
sg_policy - > cached_raw_freq = freq ;
return cpufreq_driver_resolve_freq ( policy , freq ) ;
}
@ -334,8 +336,7 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
busy = use_pelt ( ) & & sugov_cpu_is_busy ( sg_cpu ) ;
if ( flags & SCHED_CPUFREQ_RT_DL ) {
/* clear cache when it's bypassed */
sg_policy - > cached_raw_freq = 0 ;
sg_policy - > cached_raw_freq = sg_policy - > prev_cached_raw_freq ;
next_f = policy - > cpuinfo . max_freq ;
} else {
sugov_get_util ( & util , & max , sg_cpu - > cpu ) ;
@ -350,8 +351,8 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
sg_policy - > next_freq ! = UINT_MAX ) {
next_f = sg_policy - > next_freq ;
/* Rese t cached freq as next_freq has changed */
sg_policy - > cached_raw_freq = 0 ;
/* Restore cached freq as next_freq has changed */
sg_policy - > cached_raw_freq = sg_policy - > prev_cached_raw_freq ;
}
}
sugov_update_commit ( sg_policy , time , next_f ) ;
@ -383,8 +384,7 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
continue ;
}
if ( j_sg_cpu - > flags & SCHED_CPUFREQ_RT_DL ) {
/* clear cache when it's bypassed */
sg_policy - > cached_raw_freq = 0 ;
sg_policy - > cached_raw_freq = sg_policy - > prev_cached_raw_freq ;
return policy - > cpuinfo . max_freq ;
}
@ -429,8 +429,7 @@ static void sugov_update_shared(struct update_util_data *hook, u64 time,
! ( flags & SCHED_CPUFREQ_CONTINUE ) ) {
if ( flags & SCHED_CPUFREQ_RT_DL ) {
next_f = sg_policy - > policy - > cpuinfo . max_freq ;
/* clear cache when it's bypassed */
sg_policy - > cached_raw_freq = 0 ;
sg_policy - > cached_raw_freq = sg_policy - > prev_cached_raw_freq ;
} else {
next_f = sugov_next_freq_shared ( sg_cpu , time ) ;
}
@ -838,6 +837,7 @@ static int sugov_start(struct cpufreq_policy *policy)
sg_policy - > work_in_progress = false ;
sg_policy - > need_freq_update = false ;
sg_policy - > cached_raw_freq = 0 ;
sg_policy - > prev_cached_raw_freq = 0 ;
for_each_cpu ( cpu , policy - > cpus ) {
struct sugov_cpu * sg_cpu = & per_cpu ( sugov_cpu , cpu ) ;