x86/mm: Add global ASID allocation helper functions

Add functions to manage global ASID space. Multithreaded processes that are
simultaneously active on 4 or more CPUs can get a global ASID, resulting in the
same PCID being used for that process on every CPU.

This in turn will allow the kernel to use hardware-assisted TLB flushing
through AMD INVLPGB or Intel RAR for these processes.

  [ bp:
   - Extend use_global_asid() comment
   - s/X86_BROADCAST_TLB_FLUSH/BROADCAST_TLB_FLUSH/g
   - other touchups ]

Signed-off-by: Rik van Riel <riel@surriel.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Link: https://lore.kernel.org/r/20250226030129.530345-8-riel@surriel.com
This commit is contained in:
Rik van Riel
2025-02-25 22:00:42 -05:00
committed by Ingo Molnar
parent 72a920eacd
commit d504d1247e
4 changed files with 202 additions and 3 deletions
+12
View File
@@ -69,6 +69,18 @@ typedef struct {
u16 pkey_allocation_map;
s16 execute_only_pkey;
#endif
#ifdef CONFIG_BROADCAST_TLB_FLUSH
/*
* The global ASID will be a non-zero value when the process has
* the same ASID across all CPUs, allowing it to make use of
* hardware-assisted remote TLB invalidation like AMD INVLPGB.
*/
u16 global_asid;
/* The process is transitioning to a new global ASID number. */
bool asid_transition;
#endif
} mm_context_t;
#define INIT_MM_CONTEXT(mm) \
+2
View File
@@ -139,6 +139,8 @@ static inline void mm_reset_untag_mask(struct mm_struct *mm)
#define enter_lazy_tlb enter_lazy_tlb
extern void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
extern void mm_free_global_asid(struct mm_struct *mm);
/*
* Init a new mm. Used on mm copies, like at fork()
* and on mm's that are brand-new, like at execve().
+37
View File
@@ -6,6 +6,7 @@
#include <linux/mmu_notifier.h>
#include <linux/sched.h>
#include <asm/barrier.h>
#include <asm/processor.h>
#include <asm/cpufeature.h>
#include <asm/special_insns.h>
@@ -234,6 +235,42 @@ void flush_tlb_one_kernel(unsigned long addr);
void flush_tlb_multi(const struct cpumask *cpumask,
const struct flush_tlb_info *info);
static inline bool is_dyn_asid(u16 asid)
{
return asid < TLB_NR_DYN_ASIDS;
}
#ifdef CONFIG_BROADCAST_TLB_FLUSH
static inline u16 mm_global_asid(struct mm_struct *mm)
{
u16 asid;
if (!cpu_feature_enabled(X86_FEATURE_INVLPGB))
return 0;
asid = smp_load_acquire(&mm->context.global_asid);
/* mm->context.global_asid is either 0, or a global ASID */
VM_WARN_ON_ONCE(asid && is_dyn_asid(asid));
return asid;
}
static inline void mm_assign_global_asid(struct mm_struct *mm, u16 asid)
{
/*
* Notably flush_tlb_mm_range() -> broadcast_tlb_flush() ->
* finish_asid_transition() needs to observe asid_transition = true
* once it observes global_asid.
*/
mm->context.asid_transition = true;
smp_store_release(&mm->context.global_asid, asid);
}
#else
static inline u16 mm_global_asid(struct mm_struct *mm) { return 0; }
static inline void mm_assign_global_asid(struct mm_struct *mm, u16 asid) { }
#endif /* CONFIG_BROADCAST_TLB_FLUSH */
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#endif
+151 -3
View File
@@ -74,13 +74,15 @@
* use different names for each of them:
*
* ASID - [0, TLB_NR_DYN_ASIDS-1]
* the canonical identifier for an mm
* the canonical identifier for an mm, dynamically allocated on each CPU
* [TLB_NR_DYN_ASIDS, MAX_ASID_AVAILABLE-1]
* the canonical, global identifier for an mm, identical across all CPUs
*
* kPCID - [1, TLB_NR_DYN_ASIDS]
* kPCID - [1, MAX_ASID_AVAILABLE]
* the value we write into the PCID part of CR3; corresponds to the
* ASID+1, because PCID 0 is special.
*
* uPCID - [2048 + 1, 2048 + TLB_NR_DYN_ASIDS]
* uPCID - [2048 + 1, 2048 + MAX_ASID_AVAILABLE]
* for KPTI each mm has two address spaces and thus needs two
* PCID values, but we can still do with a single ASID denomination
* for each mm. Corresponds to kPCID + 2048.
@@ -251,6 +253,152 @@ static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen,
*need_flush = true;
}
/*
* Global ASIDs are allocated for multi-threaded processes that are
* active on multiple CPUs simultaneously, giving each of those
* processes the same PCID on every CPU, for use with hardware-assisted
* TLB shootdown on remote CPUs, like AMD INVLPGB or Intel RAR.
*
* These global ASIDs are held for the lifetime of the process.
*/
static DEFINE_RAW_SPINLOCK(global_asid_lock);
static u16 last_global_asid = MAX_ASID_AVAILABLE;
static DECLARE_BITMAP(global_asid_used, MAX_ASID_AVAILABLE);
static DECLARE_BITMAP(global_asid_freed, MAX_ASID_AVAILABLE);
static int global_asid_available = MAX_ASID_AVAILABLE - TLB_NR_DYN_ASIDS - 1;
/*
* When the search for a free ASID in the global ASID space reaches
* MAX_ASID_AVAILABLE, a global TLB flush guarantees that previously
* freed global ASIDs are safe to re-use.
*
* This way the global flush only needs to happen at ASID rollover
* time, and not at ASID allocation time.
*/
static void reset_global_asid_space(void)
{
lockdep_assert_held(&global_asid_lock);
invlpgb_flush_all_nonglobals();
/*
* The TLB flush above makes it safe to re-use the previously
* freed global ASIDs.
*/
bitmap_andnot(global_asid_used, global_asid_used,
global_asid_freed, MAX_ASID_AVAILABLE);
bitmap_clear(global_asid_freed, 0, MAX_ASID_AVAILABLE);
/* Restart the search from the start of global ASID space. */
last_global_asid = TLB_NR_DYN_ASIDS;
}
static u16 allocate_global_asid(void)
{
u16 asid;
lockdep_assert_held(&global_asid_lock);
/* The previous allocation hit the edge of available address space */
if (last_global_asid >= MAX_ASID_AVAILABLE - 1)
reset_global_asid_space();
asid = find_next_zero_bit(global_asid_used, MAX_ASID_AVAILABLE, last_global_asid);
if (asid >= MAX_ASID_AVAILABLE && !global_asid_available) {
/* This should never happen. */
VM_WARN_ONCE(1, "Unable to allocate global ASID despite %d available\n",
global_asid_available);
return 0;
}
/* Claim this global ASID. */
__set_bit(asid, global_asid_used);
last_global_asid = asid;
global_asid_available--;
return asid;
}
/*
* Check whether a process is currently active on more than @threshold CPUs.
* This is a cheap estimation on whether or not it may make sense to assign
* a global ASID to this process, and use broadcast TLB invalidation.
*/
static bool mm_active_cpus_exceeds(struct mm_struct *mm, int threshold)
{
int count = 0;
int cpu;
/* This quick check should eliminate most single threaded programs. */
if (cpumask_weight(mm_cpumask(mm)) <= threshold)
return false;
/* Slower check to make sure. */
for_each_cpu(cpu, mm_cpumask(mm)) {
/* Skip the CPUs that aren't really running this process. */
if (per_cpu(cpu_tlbstate.loaded_mm, cpu) != mm)
continue;
if (per_cpu(cpu_tlbstate_shared.is_lazy, cpu))
continue;
if (++count > threshold)
return true;
}
return false;
}
/*
* Assign a global ASID to the current process, protecting against
* races between multiple threads in the process.
*/
static void use_global_asid(struct mm_struct *mm)
{
u16 asid;
guard(raw_spinlock_irqsave)(&global_asid_lock);
/* This process is already using broadcast TLB invalidation. */
if (mm_global_asid(mm))
return;
/*
* The last global ASID was consumed while waiting for the lock.
*
* If this fires, a more aggressive ASID reuse scheme might be
* needed.
*/
if (!global_asid_available) {
VM_WARN_ONCE(1, "Ran out of global ASIDs\n");
return;
}
asid = allocate_global_asid();
if (!asid)
return;
mm_assign_global_asid(mm, asid);
}
void mm_free_global_asid(struct mm_struct *mm)
{
if (!cpu_feature_enabled(X86_FEATURE_INVLPGB))
return;
if (!mm_global_asid(mm))
return;
guard(raw_spinlock_irqsave)(&global_asid_lock);
/* The global ASID can be re-used only after flush at wrap-around. */
#ifdef CONFIG_BROADCAST_TLB_FLUSH
__set_bit(mm->context.global_asid, global_asid_freed);
mm->context.global_asid = 0;
global_asid_available++;
#endif
}
/*
* Given an ASID, flush the corresponding user ASID. We can delay this
* until the next time we switch to it.