Files
Ard Biesheuvel f0e99852cb ANDROID: arm64: module: add RELA metadata for FIPS140 use
The fips140.ko module relies on ELF metadata that is normally discarded
by the module loader. In order to reduce the impact of the associated
changes on non-FIPS140 kernels, the module loader will be modified to
simply copy this metadata if it encounters a module whose name is
'fips140'. For this to work, we need a couple of fields in struct
mod_arch_specific so that this module can find this data at module init
time.

Adding these fields will change the type signature of struct module, and
hence affect the stable ABI. The FIPS140 module build depends on LTO,
and so its Kconfig symbol can only be enabled if LTO is enabled as well.
This implies that adding these mod_arch_specific fields conditionally,
based on CONFIG_CRYPTO_FIPS140_INTEGRITY_CHECK, would result in an ABI
fork between LTO and !LTO builds, which is obviously undesirable.

So let's just add these fields unconditionally. This will take 24 bytes
of kernel memory per module, but given that the size of struct module
(which is the only place where struct mod_arch_specific is allocated) is
rounded up to cacheline size (128 bytes on arm64), and this change does
not push it over the edge, the net result is that no additional memory
is used (struct module is currently 1 KiB)

Bug: 153614920
Change-Id: Ia617762f37c0bf7324e899b72fd317e382e39c03
Signed-off-by: Ard Biesheuvel <ardb@google.com>
2021-05-07 07:12:43 -07:00

75 lines
1.9 KiB
C

/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2012 ARM Ltd.
*/
#ifndef __ASM_MODULE_H
#define __ASM_MODULE_H
#include <asm-generic/module.h>
#ifdef CONFIG_ARM64_MODULE_PLTS
struct mod_plt_sec {
int plt_shndx;
int plt_num_entries;
int plt_max_entries;
};
struct mod_arch_specific {
struct mod_plt_sec core;
struct mod_plt_sec init;
/* for CONFIG_DYNAMIC_FTRACE */
struct plt_entry *ftrace_trampolines;
/* for FIPS 140 certified kernel module */
const Elf64_Rela *text_relocations;
const Elf64_Rela *rodata_relocations;
int num_text_relocations;
int num_rodata_relocations;
};
#endif
u64 module_emit_plt_entry(struct module *mod, Elf64_Shdr *sechdrs,
void *loc, const Elf64_Rela *rela,
Elf64_Sym *sym);
u64 module_emit_veneer_for_adrp(struct module *mod, Elf64_Shdr *sechdrs,
void *loc, u64 val);
#ifdef CONFIG_RANDOMIZE_BASE
extern u64 module_alloc_base;
#else
#define module_alloc_base ((u64)_etext - MODULES_VSIZE)
#endif
struct plt_entry {
/*
* A program that conforms to the AArch64 Procedure Call Standard
* (AAPCS64) must assume that a veneer that alters IP0 (x16) and/or
* IP1 (x17) may be inserted at any branch instruction that is
* exposed to a relocation that supports long branches. Since that
* is exactly what we are dealing with here, we are free to use x16
* as a scratch register in the PLT veneers.
*/
__le32 adrp; /* adrp x16, .... */
__le32 add; /* add x16, x16, #0x.... */
__le32 br; /* br x16 */
};
static inline bool is_forbidden_offset_for_adrp(void *place)
{
return IS_ENABLED(CONFIG_ARM64_ERRATUM_843419) &&
cpus_have_const_cap(ARM64_WORKAROUND_843419) &&
((u64)place & 0xfff) >= 0xff8;
}
struct plt_entry get_plt_entry(u64 dst, void *pc);
bool plt_entries_equal(const struct plt_entry *a, const struct plt_entry *b);
static inline bool plt_entry_is_initialized(const struct plt_entry *e)
{
return e->adrp || e->add || e->br;
}
#endif /* __ASM_MODULE_H */