sched/walt: Fix the memory leak of idle task load pointers

The memory for task load pointers are allocated twice for each
idle thread except for the boot CPU. This happens during boot
from idle_threads_init()->idle_init() in the following 2 paths.

1. idle_init()->fork_idle()->copy_process()->
		sched_fork()->init_new_task_load()

2. idle_init()->fork_idle()-> init_idle()->init_new_task_load()

The memory allocation for all tasks happens through the 1st path,
so use the same for idle tasks and kill the 2nd path. Since
the idle thread of boot CPU does not go through fork_idle(),
allocate the memory for it separately.

Change-Id: I4696a414ffe07d4114b56d326463026019e278f1
Signed-off-by: Pavankumar Kondeti <pkondeti@codeaurora.org>
tirimbino
Pavankumar Kondeti 6 years ago
parent f6f026ce6f
commit 385f9679c4
  1. 2
      include/linux/sched/task.h
  2. 2
      kernel/fork.c
  3. 9
      kernel/sched/core.c
  4. 5
      kernel/sched/walt.c
  5. 4
      kernel/sched/walt.h
  6. 2
      kernel/smpboot.c

@ -30,7 +30,7 @@ extern int lockdep_tasklist_lock_is_held(void);
#endif /* #ifdef CONFIG_PROVE_RCU */ #endif /* #ifdef CONFIG_PROVE_RCU */
extern asmlinkage void schedule_tail(struct task_struct *prev); extern asmlinkage void schedule_tail(struct task_struct *prev);
extern void init_idle(struct task_struct *idle, int cpu, bool cpu_up); extern void init_idle(struct task_struct *idle, int cpu);
extern int sched_fork(unsigned long clone_flags, struct task_struct *p); extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
extern void sched_dead(struct task_struct *p); extern void sched_dead(struct task_struct *p);

@ -2017,7 +2017,7 @@ struct task_struct *fork_idle(int cpu)
cpu_to_node(cpu)); cpu_to_node(cpu));
if (!IS_ERR(task)) { if (!IS_ERR(task)) {
init_idle_pids(task->pids); init_idle_pids(task->pids);
init_idle(task, cpu, false); init_idle(task, cpu);
} }
return task; return task;

@ -2466,7 +2466,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
unsigned long flags; unsigned long flags;
int cpu; int cpu;
init_new_task_load(p, false); init_new_task_load(p);
cpu = get_cpu(); cpu = get_cpu();
__sched_fork(clone_flags, p); __sched_fork(clone_flags, p);
/* /*
@ -5511,14 +5511,12 @@ void show_state_filter(unsigned long state_filter)
* NOTE: this function does not set the idle thread's NEED_RESCHED * NOTE: this function does not set the idle thread's NEED_RESCHED
* flag, to make booting more robust. * flag, to make booting more robust.
*/ */
void init_idle(struct task_struct *idle, int cpu, bool cpu_up) void init_idle(struct task_struct *idle, int cpu)
{ {
struct rq *rq = cpu_rq(cpu); struct rq *rq = cpu_rq(cpu);
unsigned long flags; unsigned long flags;
__sched_fork(0, idle); __sched_fork(0, idle);
if (!cpu_up)
init_new_task_load(idle, true);
raw_spin_lock_irqsave(&idle->pi_lock, flags); raw_spin_lock_irqsave(&idle->pi_lock, flags);
raw_spin_lock(&rq->lock); raw_spin_lock(&rq->lock);
@ -6524,7 +6522,8 @@ void __init sched_init(void)
* but because we are the idle thread, we just pick up running again * but because we are the idle thread, we just pick up running again
* when this runqueue becomes "idle". * when this runqueue becomes "idle".
*/ */
init_idle(current, smp_processor_id(), false); init_idle(current, smp_processor_id());
init_new_task_load(current);
calc_load_update = jiffies + LOAD_FREQ; calc_load_update = jiffies + LOAD_FREQ;

@ -2000,7 +2000,7 @@ int sched_set_init_task_load(struct task_struct *p, int init_load_pct)
return 0; return 0;
} }
void init_new_task_load(struct task_struct *p, bool idle_task) void init_new_task_load(struct task_struct *p)
{ {
int i; int i;
u32 init_load_windows = sched_init_task_load_windows; u32 init_load_windows = sched_init_task_load_windows;
@ -2019,9 +2019,6 @@ void init_new_task_load(struct task_struct *p, bool idle_task)
/* Don't have much choice. CPU frequency would be bogus */ /* Don't have much choice. CPU frequency would be bogus */
BUG_ON(!p->ravg.curr_window_cpu || !p->ravg.prev_window_cpu); BUG_ON(!p->ravg.curr_window_cpu || !p->ravg.prev_window_cpu);
if (idle_task)
return;
if (init_load_pct) { if (init_load_pct) {
init_load_windows = div64_u64((u64)init_load_pct * init_load_windows = div64_u64((u64)init_load_pct *
(u64)sched_ravg_window, 100); (u64)sched_ravg_window, 100);

@ -153,7 +153,7 @@ extern void fixup_walt_sched_stats_common(struct rq *rq, struct task_struct *p,
extern void inc_rq_walt_stats(struct rq *rq, struct task_struct *p); extern void inc_rq_walt_stats(struct rq *rq, struct task_struct *p);
extern void dec_rq_walt_stats(struct rq *rq, struct task_struct *p); extern void dec_rq_walt_stats(struct rq *rq, struct task_struct *p);
extern void fixup_busy_time(struct task_struct *p, int new_cpu); extern void fixup_busy_time(struct task_struct *p, int new_cpu);
extern void init_new_task_load(struct task_struct *p, bool idle_task); extern void init_new_task_load(struct task_struct *p);
extern void mark_task_starting(struct task_struct *p); extern void mark_task_starting(struct task_struct *p);
extern void set_window_start(struct rq *rq); extern void set_window_start(struct rq *rq);
void account_irqtime(int cpu, struct task_struct *curr, u64 delta, void account_irqtime(int cpu, struct task_struct *curr, u64 delta,
@ -350,7 +350,7 @@ static inline void walt_dec_cumulative_runnable_avg(struct rq *rq,
} }
static inline void fixup_busy_time(struct task_struct *p, int new_cpu) { } static inline void fixup_busy_time(struct task_struct *p, int new_cpu) { }
static inline void init_new_task_load(struct task_struct *p, bool idle_task) static inline void init_new_task_load(struct task_struct *p)
{ {
} }

@ -32,7 +32,7 @@ struct task_struct *idle_thread_get(unsigned int cpu)
if (!tsk) if (!tsk)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
init_idle(tsk, cpu, true); init_idle(tsk, cpu);
return tsk; return tsk;
} }

Loading…
Cancel
Save