diff --git a/include/linux/sched.h b/include/linux/sched.h index d4f9d82c69e0..2af0a8859d64 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -896,6 +896,7 @@ struct task_struct { unsigned sched_reset_on_fork:1; unsigned sched_contributes_to_load:1; unsigned sched_migrated:1; + unsigned sched_task_hot:1; /* Force alignment to the next boundary: */ unsigned :0; diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 3b2cfdb8d788..cd9b411706b5 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -8921,6 +8921,8 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env) int tsk_cache_hot; lockdep_assert_rq_held(env->src_rq); + if (p->sched_task_hot) + p->sched_task_hot = 0; /* * We do not migrate tasks that are: @@ -8993,10 +8995,8 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env) if (tsk_cache_hot <= 0 || env->sd->nr_balance_failed > env->sd->cache_nice_tries) { - if (tsk_cache_hot == 1) { - schedstat_inc(env->sd->lb_hot_gained[env->idle]); - schedstat_inc(p->stats.nr_forced_migrations); - } + if (tsk_cache_hot == 1) + p->sched_task_hot = 1; return 1; } @@ -9011,6 +9011,12 @@ static void detach_task(struct task_struct *p, struct lb_env *env) { lockdep_assert_rq_held(env->src_rq); + if (p->sched_task_hot) { + p->sched_task_hot = 0; + schedstat_inc(env->sd->lb_hot_gained[env->idle]); + schedstat_inc(p->stats.nr_forced_migrations); + } + deactivate_task(env->src_rq, p, DEQUEUE_NOCLOCK); set_task_cpu(p, env->dst_cpu); } @@ -9171,6 +9177,9 @@ static int detach_tasks(struct lb_env *env) continue; next: + if (p->sched_task_hot) + schedstat_inc(p->stats.nr_failed_migrations_hot); + list_move(&p->se.group_node, tasks); }