LKP reported a build failure as below on the following patch "mm/slub,
percpu: correct the calculation of early percpu allocation size"
~~~~~~
In file included from <command-line>:
In function 'alloc_kmem_cache_cpus',
inlined from 'kmem_cache_open' at mm/slub.c:4340:6:
>> >> include/linux/compiler_types.h:357:45: error: call to '__compiletime_assert_474' declared with attribute error:
BUILD_BUG_ON failed: PERCPU_DYNAMIC_EARLY_SIZE < NR_KMALLOC_TYPES * KMALLOC_SHIFT_HIGH * sizeof(struct kmem_cache_cpu)
357 | _compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__)
~~~~~~
From the kernel config file provided by LKP, the building was made on
arm64 with below Kconfig item enabled:
CONFIG_ZONE_DMA=y
CONFIG_SLUB_CPU_PARTIAL=y
CONFIG_DEBUG_LOCK_ALLOC=y
CONFIG_SLUB_STATS=y
CONFIG_ARM64_PAGE_SHIFT=16
CONFIG_ARM64_64K_PAGES=y
Then we will have:
NR_KMALLOC_TYPES:4
KMALLOC_SHIFT_HIGH:17
sizeof(struct kmem_cache_cpu):184
The product of them is 12512, which is bigger than PERCPU_DYNAMIC_EARLY_SIZE,
12K. Hence, the BUILD_BUG_ON in alloc_kmem_cache_cpus() is triggered.
Earlier, in commit 099a19d91ca4 ("percpu: allow limited allocation
before slab is online"), PERCPU_DYNAMIC_EARLY_SIZE was introduced and
set to 12K which is equal to the then PERPCU_DYNAMIC_RESERVE.
Later, in commit 1a4d76076cda ("percpu: implement asynchronous chunk
population"), PERPCU_DYNAMIC_RESERVE was increased by 8K, while
PERCPU_DYNAMIC_EARLY_SIZE was kept unchanged.
So, here increase PERCPU_DYNAMIC_EARLY_SIZE by 8K too to accommodate to
the slub's requirement.
Reported-by: kernel test robot <lkp@intel.com>
Signed-off-by: Baoquan He <bhe@redhat.com>
Acked-by: Dennis Zhou <dennis@kernel.org>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
143 lines
4.5 KiB
C
143 lines
4.5 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef __LINUX_PERCPU_H
|
|
#define __LINUX_PERCPU_H
|
|
|
|
#include <linux/mmdebug.h>
|
|
#include <linux/preempt.h>
|
|
#include <linux/smp.h>
|
|
#include <linux/cpumask.h>
|
|
#include <linux/pfn.h>
|
|
#include <linux/init.h>
|
|
|
|
#include <asm/percpu.h>
|
|
|
|
/* enough to cover all DEFINE_PER_CPUs in modules */
|
|
#ifdef CONFIG_MODULES
|
|
#define PERCPU_MODULE_RESERVE (8 << 10)
|
|
#else
|
|
#define PERCPU_MODULE_RESERVE 0
|
|
#endif
|
|
|
|
/* minimum unit size, also is the maximum supported allocation size */
|
|
#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10)
|
|
|
|
/* minimum allocation size and shift in bytes */
|
|
#define PCPU_MIN_ALLOC_SHIFT 2
|
|
#define PCPU_MIN_ALLOC_SIZE (1 << PCPU_MIN_ALLOC_SHIFT)
|
|
|
|
/*
|
|
* The PCPU_BITMAP_BLOCK_SIZE must be the same size as PAGE_SIZE as the
|
|
* updating of hints is used to manage the nr_empty_pop_pages in both
|
|
* the chunk and globally.
|
|
*/
|
|
#define PCPU_BITMAP_BLOCK_SIZE PAGE_SIZE
|
|
#define PCPU_BITMAP_BLOCK_BITS (PCPU_BITMAP_BLOCK_SIZE >> \
|
|
PCPU_MIN_ALLOC_SHIFT)
|
|
|
|
/*
|
|
* Percpu allocator can serve percpu allocations before slab is
|
|
* initialized which allows slab to depend on the percpu allocator.
|
|
* The following two parameters decide how much resource to
|
|
* preallocate for this. Keep PERCPU_DYNAMIC_RESERVE equal to or
|
|
* larger than PERCPU_DYNAMIC_EARLY_SIZE.
|
|
*/
|
|
#define PERCPU_DYNAMIC_EARLY_SLOTS 128
|
|
#define PERCPU_DYNAMIC_EARLY_SIZE (20 << 10)
|
|
|
|
/*
|
|
* PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy
|
|
* back on the first chunk for dynamic percpu allocation if arch is
|
|
* manually allocating and mapping it for faster access (as a part of
|
|
* large page mapping for example).
|
|
*
|
|
* The following values give between one and two pages of free space
|
|
* after typical minimal boot (2-way SMP, single disk and NIC) with
|
|
* both defconfig and a distro config on x86_64 and 32. More
|
|
* intelligent way to determine this would be nice.
|
|
*/
|
|
#if BITS_PER_LONG > 32
|
|
#define PERCPU_DYNAMIC_RESERVE (28 << 10)
|
|
#else
|
|
#define PERCPU_DYNAMIC_RESERVE (20 << 10)
|
|
#endif
|
|
|
|
extern void *pcpu_base_addr;
|
|
extern const unsigned long *pcpu_unit_offsets;
|
|
|
|
struct pcpu_group_info {
|
|
int nr_units; /* aligned # of units */
|
|
unsigned long base_offset; /* base address offset */
|
|
unsigned int *cpu_map; /* unit->cpu map, empty
|
|
* entries contain NR_CPUS */
|
|
};
|
|
|
|
struct pcpu_alloc_info {
|
|
size_t static_size;
|
|
size_t reserved_size;
|
|
size_t dyn_size;
|
|
size_t unit_size;
|
|
size_t atom_size;
|
|
size_t alloc_size;
|
|
size_t __ai_size; /* internal, don't use */
|
|
int nr_groups; /* 0 if grouping unnecessary */
|
|
struct pcpu_group_info groups[];
|
|
};
|
|
|
|
enum pcpu_fc {
|
|
PCPU_FC_AUTO,
|
|
PCPU_FC_EMBED,
|
|
PCPU_FC_PAGE,
|
|
|
|
PCPU_FC_NR,
|
|
};
|
|
extern const char * const pcpu_fc_names[PCPU_FC_NR];
|
|
|
|
extern enum pcpu_fc pcpu_chosen_fc;
|
|
|
|
typedef int (pcpu_fc_cpu_to_node_fn_t)(int cpu);
|
|
typedef int (pcpu_fc_cpu_distance_fn_t)(unsigned int from, unsigned int to);
|
|
|
|
extern struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
|
|
int nr_units);
|
|
extern void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai);
|
|
|
|
extern void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
|
|
void *base_addr);
|
|
|
|
#ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
|
|
extern int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
|
|
size_t atom_size,
|
|
pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
|
|
pcpu_fc_cpu_to_node_fn_t cpu_to_nd_fn);
|
|
#endif
|
|
|
|
#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
|
|
void __init pcpu_populate_pte(unsigned long addr);
|
|
extern int __init pcpu_page_first_chunk(size_t reserved_size,
|
|
pcpu_fc_cpu_to_node_fn_t cpu_to_nd_fn);
|
|
#endif
|
|
|
|
extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align) __alloc_size(1);
|
|
extern bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr);
|
|
extern bool is_kernel_percpu_address(unsigned long addr);
|
|
|
|
#if !defined(CONFIG_SMP) || !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
|
|
extern void __init setup_per_cpu_areas(void);
|
|
#endif
|
|
|
|
extern void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp) __alloc_size(1);
|
|
extern void __percpu *__alloc_percpu(size_t size, size_t align) __alloc_size(1);
|
|
extern void free_percpu(void __percpu *__pdata);
|
|
extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
|
|
|
|
#define alloc_percpu_gfp(type, gfp) \
|
|
(typeof(type) __percpu *)__alloc_percpu_gfp(sizeof(type), \
|
|
__alignof__(type), gfp)
|
|
#define alloc_percpu(type) \
|
|
(typeof(type) __percpu *)__alloc_percpu(sizeof(type), \
|
|
__alignof__(type))
|
|
|
|
extern unsigned long pcpu_nr_pages(void);
|
|
|
|
#endif /* __LINUX_PERCPU_H */
|