diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index bf6252c572e9..375c778eedd2 100755 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3836,11 +3836,22 @@ util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep) if (ue.enqueued & UTIL_AVG_UNCHANGED) return; + /* + * Reset EWMA on utilization increases, the moving average is used only + * to smooth utilization decreases. + */ + ue.enqueued = (task_util(p) | UTIL_AVG_UNCHANGED); + if (sched_feat(UTIL_EST_FASTUP)) { + if (ue.ewma < ue.enqueued) { + ue.ewma = ue.enqueued; + goto done; + } + } + /* * Skip update of task's estimated utilization when its EWMA is * already ~1% close to its last activation value. */ - ue.enqueued = (task_util(p) | UTIL_AVG_UNCHANGED); last_ewma_diff = ue.enqueued - ue.ewma; if (within_margin(last_ewma_diff, (SCHED_CAPACITY_SCALE / 100))) return; @@ -3865,6 +3876,7 @@ util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep) ue.ewma <<= UTIL_EST_WEIGHT_SHIFT; ue.ewma += last_ewma_diff; ue.ewma >>= UTIL_EST_WEIGHT_SHIFT; +done: WRITE_ONCE(p->se.avg.util_est, ue); trace_sched_util_est_task(p, &p->se.avg); diff --git a/kernel/sched/features.h b/kernel/sched/features.h index 9589232c65e1..50079fa6cde9 100755 --- a/kernel/sched/features.h +++ b/kernel/sched/features.h @@ -90,6 +90,7 @@ SCHED_FEAT(WA_BIAS, true) * UtilEstimation. Use estimated CPU utilization. */ SCHED_FEAT(UTIL_EST, true) +SCHED_FEAT(UTIL_EST_FASTUP, true) /* * Energy aware scheduling. Use platform energy model to guide scheduling