sched: Add support for lazy preemption

It has become an obsession to mitigate the determinism vs. throughput
loss of RT. Looking at the mainline semantics of preemption points
gives a hint why RT sucks throughput wise for ordinary SCHED_OTHER
tasks. One major issue is the wakeup of tasks which are right away
preempting the waking task while the waking task holds a lock on which
the woken task will block right after having preempted the wakee. In
mainline this is prevented due to the implicit preemption disable of
spin/rw_lock held regions. On RT this is not possible due to the fully
preemptible nature of sleeping spinlocks.

Though for a SCHED_OTHER task preempting another SCHED_OTHER task this
is really not a correctness issue. RT folks are concerned about
SCHED_FIFO/RR tasks preemption and not about the purely fairness
driven SCHED_OTHER preemption latencies.

So I introduced a lazy preemption mechanism which only applies to
SCHED_OTHER tasks preempting another SCHED_OTHER task. Aside of the
existing preempt_count each tasks sports now a preempt_lazy_count
which is manipulated on lock acquiry and release. This is slightly
incorrect as for lazyness reasons I coupled this on
migrate_disable/enable so some other mechanisms get the same treatment
(e.g. get_cpu_light).

Now on the scheduler side instead of setting NEED_RESCHED this sets
NEED_RESCHED_LAZY in case of a SCHED_OTHER/SCHED_OTHER preemption and
therefor allows to exit the waking task the lock held region before
the woken task preempts. That also works better for cross CPU wakeups
as the other side can stay in the adaptive spinning loop.

For RT class preemption there is no change. This simply sets
NEED_RESCHED and forgoes the lazy preemption counter.

 Initial test do not expose any observable latency increasement, but
history shows that I've been proven wrong before :)

The lazy preemption mode is per default on, but with
CONFIG_SCHED_DEBUG enabled it can be disabled via:

 # echo NO_PREEMPT_LAZY >/sys/kernel/debug/sched_features

and reenabled via

 # echo PREEMPT_LAZY >/sys/kernel/debug/sched_features

The test results so far are very machine and workload dependent, but
there is a clear trend that it enhances the non RT workload
performance.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
Thomas Gleixner
2012-10-26 18:50:54 +01:00
committed by Sebastian Andrzej Siewior
parent 2396212724
commit b1773eac3f
12 changed files with 310 additions and 33 deletions
+51 -3
View File
@@ -197,6 +197,20 @@ extern void preempt_count_sub(int val);
#define preempt_count_inc() preempt_count_add(1)
#define preempt_count_dec() preempt_count_sub(1)
#ifdef CONFIG_PREEMPT_LAZY
#define add_preempt_lazy_count(val) do { preempt_lazy_count() += (val); } while (0)
#define sub_preempt_lazy_count(val) do { preempt_lazy_count() -= (val); } while (0)
#define inc_preempt_lazy_count() add_preempt_lazy_count(1)
#define dec_preempt_lazy_count() sub_preempt_lazy_count(1)
#define preempt_lazy_count() (current_thread_info()->preempt_lazy_count)
#else
#define add_preempt_lazy_count(val) do { } while (0)
#define sub_preempt_lazy_count(val) do { } while (0)
#define inc_preempt_lazy_count() do { } while (0)
#define dec_preempt_lazy_count() do { } while (0)
#define preempt_lazy_count() (0)
#endif
#ifdef CONFIG_PREEMPT_COUNT
#define preempt_disable() \
@@ -205,6 +219,12 @@ do { \
barrier(); \
} while (0)
#define preempt_lazy_disable() \
do { \
inc_preempt_lazy_count(); \
barrier(); \
} while (0)
#define sched_preempt_enable_no_resched() \
do { \
barrier(); \
@@ -242,6 +262,18 @@ do { \
__preempt_schedule(); \
} while (0)
/*
* open code preempt_check_resched() because it is not exported to modules and
* used by local_unlock() or bpf_enable_instrumentation().
*/
#define preempt_lazy_enable() \
do { \
dec_preempt_lazy_count(); \
barrier(); \
if (should_resched(0)) \
__preempt_schedule(); \
} while (0)
#else /* !CONFIG_PREEMPTION */
#define preempt_enable() \
do { \
@@ -249,6 +281,12 @@ do { \
preempt_count_dec(); \
} while (0)
#define preempt_lazy_enable() \
do { \
dec_preempt_lazy_count(); \
barrier(); \
} while (0)
#define preempt_enable_notrace() \
do { \
barrier(); \
@@ -289,6 +327,9 @@ do { \
#define preempt_enable_notrace() barrier()
#define preemptible() 0
#define preempt_lazy_disable() barrier()
#define preempt_lazy_enable() barrier()
#endif /* CONFIG_PREEMPT_COUNT */
#ifdef MODULE
@@ -307,7 +348,7 @@ do { \
} while (0)
#define preempt_fold_need_resched() \
do { \
if (tif_need_resched()) \
if (tif_need_resched_now()) \
set_preempt_need_resched(); \
} while (0)
@@ -423,8 +464,15 @@ extern void migrate_enable(void);
#else
static inline void migrate_disable(void) { }
static inline void migrate_enable(void) { }
static inline void migrate_disable(void)
{
preempt_lazy_disable();
}
static inline void migrate_enable(void)
{
preempt_lazy_enable();
}
#endif /* CONFIG_SMP */
+37
View File
@@ -2080,6 +2080,43 @@ static inline int test_tsk_need_resched(struct task_struct *tsk)
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}
#ifdef CONFIG_PREEMPT_LAZY
static inline void set_tsk_need_resched_lazy(struct task_struct *tsk)
{
set_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY);
}
static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk)
{
clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY);
}
static inline int test_tsk_need_resched_lazy(struct task_struct *tsk)
{
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY));
}
static inline int need_resched_lazy(void)
{
return test_thread_flag(TIF_NEED_RESCHED_LAZY);
}
static inline int need_resched_now(void)
{
return test_thread_flag(TIF_NEED_RESCHED);
}
#else
static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk) { }
static inline int need_resched_lazy(void) { return 0; }
static inline int need_resched_now(void)
{
return test_thread_flag(TIF_NEED_RESCHED);
}
#endif
/*
* cond_resched() and cond_resched_lock(): latency reduction via
* explicit rescheduling in places that are safe. The return
+63 -1
View File
@@ -178,6 +178,26 @@ static __always_inline unsigned long read_ti_thread_flags(struct thread_info *ti
#endif /* !CONFIG_GENERIC_ENTRY */
#ifdef _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H
# ifdef CONFIG_PREEMPT_LAZY
static __always_inline bool tif_need_resched(void)
{
return read_thread_flags() & (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY);
}
static __always_inline bool tif_need_resched_now(void)
{
return arch_test_bit(TIF_NEED_RESCHED,
(unsigned long *)(&current_thread_info()->flags));
}
static __always_inline bool tif_need_resched_lazy(void)
{
return arch_test_bit(TIF_NEED_RESCHED_LAZY,
(unsigned long *)(&current_thread_info()->flags));
}
# else /* !CONFIG_PREEMPT_LAZY */
static __always_inline bool tif_need_resched(void)
{
@@ -185,7 +205,38 @@ static __always_inline bool tif_need_resched(void)
(unsigned long *)(&current_thread_info()->flags));
}
#else
static __always_inline bool tif_need_resched_now(void)
{
return tif_need_resched();
}
static __always_inline bool tif_need_resched_lazy(void)
{
return false;
}
# endif /* CONFIG_PREEMPT_LAZY */
#else /* !_ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H */
# ifdef CONFIG_PREEMPT_LAZY
static __always_inline bool tif_need_resched(void)
{
return read_thread_flags() & (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY);
}
static __always_inline bool tif_need_resched_now(void)
{
return test_bit(TIF_NEED_RESCHED,
(unsigned long *)(&current_thread_info()->flags));
}
static __always_inline bool tif_need_resched_lazy(void)
{
return test_bit(TIF_NEED_RESCHED_LAZY,
(unsigned long *)(&current_thread_info()->flags));
}
# else /* !CONFIG_PREEMPT_LAZY */
static __always_inline bool tif_need_resched(void)
{
@@ -193,6 +244,17 @@ static __always_inline bool tif_need_resched(void)
(unsigned long *)(&current_thread_info()->flags));
}
static __always_inline bool tif_need_resched_now(void)
{
return tif_need_resched();
}
static __always_inline bool tif_need_resched_lazy(void)
{
return false;
}
# endif /* !CONFIG_PREEMPT_LAZY */
#endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H */
#ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES
+9 -1
View File
@@ -81,6 +81,7 @@ struct trace_entry {
unsigned char flags;
unsigned char preempt_count;
int pid;
unsigned char preempt_lazy_count;
};
#define TRACE_EVENT_TYPE_MAX \
@@ -169,9 +170,10 @@ static inline void tracing_generic_entry_update(struct trace_entry *entry,
unsigned int trace_ctx)
{
entry->preempt_count = trace_ctx & 0xff;
entry->preempt_lazy_count = (trace_ctx >> 16) & 0xff;
entry->pid = current->pid;
entry->type = type;
entry->flags = trace_ctx >> 16;
entry->flags = trace_ctx >> 24;
}
unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status);
@@ -182,7 +184,13 @@ enum trace_flag_type {
TRACE_FLAG_NEED_RESCHED = 0x04,
TRACE_FLAG_HARDIRQ = 0x08,
TRACE_FLAG_SOFTIRQ = 0x10,
#ifdef CONFIG_PREEMPT_LAZY
TRACE_FLAG_PREEMPT_RESCHED = 0x00,
TRACE_FLAG_NEED_RESCHED_LAZY = 0x20,
#else
TRACE_FLAG_NEED_RESCHED_LAZY = 0x00,
TRACE_FLAG_PREEMPT_RESCHED = 0x20,
#endif
TRACE_FLAG_NMI = 0x40,
TRACE_FLAG_BH_OFF = 0x80,
};
+6
View File
@@ -1,5 +1,11 @@
# SPDX-License-Identifier: GPL-2.0-only
config HAVE_PREEMPT_LAZY
bool
config PREEMPT_LAZY
def_bool y if HAVE_PREEMPT_LAZY && PREEMPT_RT
config PREEMPT_NONE_BUILD
bool
+78 -1
View File
@@ -1062,6 +1062,46 @@ void resched_curr(struct rq *rq)
trace_sched_wake_idle_without_ipi(cpu);
}
#ifdef CONFIG_PREEMPT_LAZY
static int tsk_is_polling(struct task_struct *p)
{
#ifdef TIF_POLLING_NRFLAG
return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG);
#else
return 0;
#endif
}
void resched_curr_lazy(struct rq *rq)
{
struct task_struct *curr = rq->curr;
int cpu;
if (!sched_feat(PREEMPT_LAZY)) {
resched_curr(rq);
return;
}
if (test_tsk_need_resched(curr))
return;
if (test_tsk_need_resched_lazy(curr))
return;
set_tsk_need_resched_lazy(curr);
cpu = cpu_of(rq);
if (cpu == smp_processor_id())
return;
/* NEED_RESCHED_LAZY must be visible before we test polling */
smp_mb();
if (!tsk_is_polling(curr))
smp_send_reschedule(cpu);
}
#endif
void resched_cpu(int cpu)
{
struct rq *rq = cpu_rq(cpu);
@@ -2420,6 +2460,7 @@ void migrate_disable(void)
preempt_disable();
this_rq()->nr_pinned++;
p->migration_disabled = 1;
preempt_lazy_disable();
preempt_enable();
}
EXPORT_SYMBOL_GPL(migrate_disable);
@@ -2455,6 +2496,7 @@ void migrate_enable(void)
barrier();
p->migration_disabled = 0;
this_rq()->nr_pinned--;
preempt_lazy_enable();
preempt_enable();
}
EXPORT_SYMBOL_GPL(migrate_enable);
@@ -4781,6 +4823,9 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
p->on_cpu = 0;
#endif
init_task_preempt_count(p);
#ifdef CONFIG_HAVE_PREEMPT_LAZY
task_thread_info(p)->preempt_lazy_count = 0;
#endif
#ifdef CONFIG_SMP
plist_node_init(&p->pushable_tasks, MAX_PRIO);
RB_CLEAR_NODE(&p->pushable_dl_tasks);
@@ -6658,6 +6703,7 @@ static void __sched notrace __schedule(unsigned int sched_mode)
next = pick_next_task(rq, prev, &rf);
clear_tsk_need_resched(prev);
clear_tsk_need_resched_lazy(prev);
clear_preempt_need_resched();
#ifdef CONFIG_SCHED_DEBUG
rq->last_seen_need_resched_ns = 0;
@@ -6884,6 +6930,30 @@ static void __sched notrace preempt_schedule_common(void)
} while (need_resched());
}
#ifdef CONFIG_PREEMPT_LAZY
/*
* If TIF_NEED_RESCHED is then we allow to be scheduled away since this is
* set by a RT task. Oterwise we try to avoid beeing scheduled out as long as
* preempt_lazy_count counter >0.
*/
static __always_inline int preemptible_lazy(void)
{
if (test_thread_flag(TIF_NEED_RESCHED))
return 1;
if (current_thread_info()->preempt_lazy_count)
return 0;
return 1;
}
#else
static inline int preemptible_lazy(void)
{
return 1;
}
#endif
#ifdef CONFIG_PREEMPTION
/*
* This is the entry point to schedule() from in-kernel preemption
@@ -6897,6 +6967,8 @@ asmlinkage __visible void __sched notrace preempt_schedule(void)
*/
if (likely(!preemptible()))
return;
if (!preemptible_lazy())
return;
preempt_schedule_common();
}
NOKPROBE_SYMBOL(preempt_schedule);
@@ -6944,6 +7016,9 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
if (likely(!preemptible()))
return;
if (!preemptible_lazy())
return;
do {
/*
* Because the function tracer can trace preempt_count_sub()
@@ -9359,7 +9434,9 @@ void __init init_idle(struct task_struct *idle, int cpu)
/* Set the preempt count _outside_ the spinlocks! */
init_idle_preempt_count(idle, cpu);
#ifdef CONFIG_HAVE_PREEMPT_LAZY
task_thread_info(idle)->preempt_lazy_count = 0;
#endif
/*
* The idle tasks have their own, simple scheduling class:
*/
+6 -6
View File
@@ -985,7 +985,7 @@ static void update_deadline(struct cfs_rq *cfs_rq, struct sched_entity *se)
* The task has consumed its request, reschedule.
*/
if (cfs_rq->nr_running > 1) {
resched_curr(rq_of(cfs_rq));
resched_curr_lazy(rq_of(cfs_rq));
clear_buddies(cfs_rq, se);
}
}
@@ -5267,7 +5267,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
* validating it and just reschedule.
*/
if (queued) {
resched_curr(rq_of(cfs_rq));
resched_curr_lazy(rq_of(cfs_rq));
return;
}
/*
@@ -5413,7 +5413,7 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
* hierarchy can be throttled
*/
if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
resched_curr(rq_of(cfs_rq));
resched_curr_lazy(rq_of(cfs_rq));
}
static __always_inline
@@ -6378,7 +6378,7 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
if (delta < 0) {
if (task_current(rq, p))
resched_curr(rq);
resched_curr_lazy(rq);
return;
}
hrtick_start(rq, delta);
@@ -8072,7 +8072,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
return;
preempt:
resched_curr(rq);
resched_curr_lazy(rq);
}
#ifdef CONFIG_SMP
@@ -12368,7 +12368,7 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
*/
if (task_current(rq, p)) {
if (p->prio > oldprio)
resched_curr(rq);
resched_curr_lazy(rq);
} else
check_preempt_curr(rq, p, 0);
}
+3
View File
@@ -37,6 +37,9 @@ SCHED_FEAT(NONTASK_CAPACITY, true)
#ifdef CONFIG_PREEMPT_RT
SCHED_FEAT(TTWU_QUEUE, false)
# ifdef CONFIG_PREEMPT_LAZY
SCHED_FEAT(PREEMPT_LAZY, true)
# endif
#else
/*
+9
View File
@@ -2437,6 +2437,15 @@ extern void reweight_task(struct task_struct *p, int prio);
extern void resched_curr(struct rq *rq);
extern void resched_cpu(int cpu);
#ifdef CONFIG_PREEMPT_LAZY
extern void resched_curr_lazy(struct rq *rq);
#else
static inline void resched_curr_lazy(struct rq *rq)
{
resched_curr(rq);
}
#endif
extern struct rt_bandwidth def_rt_bandwidth;
extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
+31 -19
View File
@@ -2720,11 +2720,19 @@ unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
if (softirq_count() >> (SOFTIRQ_SHIFT + 1))
trace_flags |= TRACE_FLAG_BH_OFF;
if (tif_need_resched())
if (tif_need_resched_now())
trace_flags |= TRACE_FLAG_NEED_RESCHED;
#ifdef CONFIG_PREEMPT_LAZY
/* Run out of bits. Share the LAZY and PREEMPT_RESCHED */
if (need_resched_lazy())
trace_flags |= TRACE_FLAG_NEED_RESCHED_LAZY;
#else
if (test_preempt_need_resched())
trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
return (trace_flags << 16) | (min_t(unsigned int, pc & 0xff, 0xf)) |
#endif
return (trace_flags << 24) | (min_t(unsigned int, pc & 0xff, 0xf)) |
(preempt_lazy_count() & 0xff) << 16 |
(min_t(unsigned int, migration_disable_value(), 0xf)) << 4;
}
@@ -4317,15 +4325,17 @@ unsigned long trace_total_entries(struct trace_array *tr)
static void print_lat_help_header(struct seq_file *m)
{
seq_puts(m, "# _------=> CPU# \n"
"# / _-----=> irqs-off/BH-disabled\n"
"# | / _----=> need-resched \n"
"# || / _---=> hardirq/softirq \n"
"# ||| / _--=> preempt-depth \n"
"# |||| / _-=> migrate-disable \n"
"# ||||| / delay \n"
"# cmd pid |||||| time | caller \n"
"# \\ / |||||| \\ | / \n");
seq_puts(m, "# _--------=> CPU# \n"
"# / _-------=> irqs-off/BH-disabled\n"
"# | / _------=> need-resched \n"
"# || / _-----=> need-resched-lazy\n"
"# ||| / _----=> hardirq/softirq \n"
"# |||| / _---=> preempt-depth \n"
"# ||||| / _--=> preempt-lazy-depth\n"
"# |||||| / _-=> migrate-disable \n"
"# ||||||| / delay \n"
"# cmd pid |||||||| time | caller \n"
"# \\ / |||||||| \\ | / \n");
}
static void print_event_info(struct array_buffer *buf, struct seq_file *m)
@@ -4359,14 +4369,16 @@ static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file
print_event_info(buf, m);
seq_printf(m, "# %.*s _-----=> irqs-off/BH-disabled\n", prec, space);
seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
seq_printf(m, "# %.*s||| / _-=> migrate-disable\n", prec, space);
seq_printf(m, "# %.*s|||| / delay\n", prec, space);
seq_printf(m, "# TASK-PID %.*s CPU# ||||| TIMESTAMP FUNCTION\n", prec, " TGID ");
seq_printf(m, "# | | %.*s | ||||| | |\n", prec, " | ");
seq_printf(m, "# %.*s _-------=> irqs-off/BH-disabled\n", prec, space);
seq_printf(m, "# %.*s / _------=> need-resched\n", prec, space);
seq_printf(m, "# %.*s| / _-----=> need-resched-lazy\n", prec, space);
seq_printf(m, "# %.*s|| / _----=> hardirq/softirq\n", prec, space);
seq_printf(m, "# %.*s||| / _---=> preempt-depth\n", prec, space);
seq_printf(m, "# %.*s|||| / _--=> preempt-lazy-depth\n", prec, space);
seq_printf(m, "# %.*s||||| / _-=> migrate-disable\n", prec, space);
seq_printf(m, "# %.*s|||||| / delay\n", prec, space);
seq_printf(m, "# TASK-PID %.*s CPU# ||||||| TIMESTAMP FUNCTION\n", prec, " TGID ");
seq_printf(m, "# | | %.*s | ||||||| | |\n", prec, " | ");
}
void
+1
View File
@@ -210,6 +210,7 @@ static int trace_define_common_fields(void)
/* Holds both preempt_count and migrate_disable */
__common_field(unsigned char, preempt_count);
__common_field(int, pid);
__common_field(unsigned char, preempt_lazy_count);
return ret;
}
+16 -2
View File
@@ -445,6 +445,7 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
{
char hardsoft_irq;
char need_resched;
char need_resched_lazy;
char irqs_off;
int hardirq;
int softirq;
@@ -465,20 +466,27 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
switch (entry->flags & (TRACE_FLAG_NEED_RESCHED |
TRACE_FLAG_PREEMPT_RESCHED)) {
#ifndef CONFIG_PREEMPT_LAZY
case TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_PREEMPT_RESCHED:
need_resched = 'N';
break;
#endif
case TRACE_FLAG_NEED_RESCHED:
need_resched = 'n';
break;
#ifndef CONFIG_PREEMPT_LAZY
case TRACE_FLAG_PREEMPT_RESCHED:
need_resched = 'p';
break;
#endif
default:
need_resched = '.';
break;
}
need_resched_lazy =
(entry->flags & TRACE_FLAG_NEED_RESCHED_LAZY) ? 'L' : '.';
hardsoft_irq =
(nmi && hardirq) ? 'Z' :
nmi ? 'z' :
@@ -487,14 +495,20 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
softirq ? 's' :
'.' ;
trace_seq_printf(s, "%c%c%c",
irqs_off, need_resched, hardsoft_irq);
trace_seq_printf(s, "%c%c%c%c",
irqs_off, need_resched, need_resched_lazy,
hardsoft_irq);
if (entry->preempt_count & 0xf)
trace_seq_printf(s, "%x", entry->preempt_count & 0xf);
else
trace_seq_putc(s, '.');
if (entry->preempt_lazy_count)
trace_seq_printf(s, "%x", entry->preempt_lazy_count);
else
trace_seq_putc(s, '.');
if (entry->preempt_count & 0xf0)
trace_seq_printf(s, "%x", entry->preempt_count >> 4);
else