KVM: x86: Merge 'selftests' into 'cet' to pick up ex_str()

Merge the queue of KVM selftests changes for 6.18 to pick up the ex_str()
helper so that it can be used to pretty print expected versus actual
exceptions in a new MSR selftest.  CET virtualization will add support for
several MSRs with non-trivial semantics, along with new uAPI for accessing
the guest's Shadow Stack Pointer (SSP) from userspace.
This commit is contained in:
Sean Christopherson
2025-09-23 09:00:18 -07:00
16 changed files with 303 additions and 109 deletions
+11 -6
View File
@@ -260,13 +260,18 @@ int __open_path_or_exit(const char *path, int flags, const char *enoent_help);
int open_path_or_exit(const char *path, int flags);
int open_kvm_dev_path_or_exit(void);
bool get_kvm_param_bool(const char *param);
bool get_kvm_intel_param_bool(const char *param);
bool get_kvm_amd_param_bool(const char *param);
int kvm_get_module_param_integer(const char *module_name, const char *param);
bool kvm_get_module_param_bool(const char *module_name, const char *param);
int get_kvm_param_integer(const char *param);
int get_kvm_intel_param_integer(const char *param);
int get_kvm_amd_param_integer(const char *param);
static inline bool get_kvm_param_bool(const char *param)
{
return kvm_get_module_param_bool("kvm", param);
}
static inline int get_kvm_param_integer(const char *param)
{
return kvm_get_module_param_integer("kvm", param);
}
unsigned int kvm_check_cap(long cap);
@@ -5,8 +5,11 @@
#ifndef SELFTEST_KVM_PMU_H
#define SELFTEST_KVM_PMU_H
#include <stdbool.h>
#include <stdint.h>
#include <linux/bits.h>
#define KVM_PMU_EVENT_FILTER_MAX_EVENTS 300
/*
@@ -61,6 +64,11 @@
#define INTEL_ARCH_BRANCHES_RETIRED RAW_EVENT(0xc4, 0x00)
#define INTEL_ARCH_BRANCHES_MISPREDICTED RAW_EVENT(0xc5, 0x00)
#define INTEL_ARCH_TOPDOWN_SLOTS RAW_EVENT(0xa4, 0x01)
#define INTEL_ARCH_TOPDOWN_BE_BOUND RAW_EVENT(0xa4, 0x02)
#define INTEL_ARCH_TOPDOWN_BAD_SPEC RAW_EVENT(0x73, 0x00)
#define INTEL_ARCH_TOPDOWN_FE_BOUND RAW_EVENT(0x9c, 0x01)
#define INTEL_ARCH_TOPDOWN_RETIRING RAW_EVENT(0xc2, 0x02)
#define INTEL_ARCH_LBR_INSERTS RAW_EVENT(0xe4, 0x01)
#define AMD_ZEN_CORE_CYCLES RAW_EVENT(0x76, 0x00)
#define AMD_ZEN_INSTRUCTIONS_RETIRED RAW_EVENT(0xc0, 0x00)
@@ -80,6 +88,11 @@ enum intel_pmu_architectural_events {
INTEL_ARCH_BRANCHES_RETIRED_INDEX,
INTEL_ARCH_BRANCHES_MISPREDICTED_INDEX,
INTEL_ARCH_TOPDOWN_SLOTS_INDEX,
INTEL_ARCH_TOPDOWN_BE_BOUND_INDEX,
INTEL_ARCH_TOPDOWN_BAD_SPEC_INDEX,
INTEL_ARCH_TOPDOWN_FE_BOUND_INDEX,
INTEL_ARCH_TOPDOWN_RETIRING_INDEX,
INTEL_ARCH_LBR_INSERTS_INDEX,
NR_INTEL_ARCH_EVENTS,
};
@@ -94,4 +107,17 @@ enum amd_pmu_zen_events {
extern const uint64_t intel_pmu_arch_events[];
extern const uint64_t amd_pmu_zen_events[];
enum pmu_errata {
INSTRUCTIONS_RETIRED_OVERCOUNT,
BRANCHES_RETIRED_OVERCOUNT,
};
extern uint64_t pmu_errata_mask;
void kvm_init_pmu_errata(void);
static inline bool this_pmu_has_errata(enum pmu_errata errata)
{
return pmu_errata_mask & BIT_ULL(errata);
}
#endif /* SELFTEST_KVM_PMU_H */
@@ -34,6 +34,8 @@ extern uint64_t guest_tsc_khz;
#define NMI_VECTOR 0x02
const char *ex_str(int vector);
#define X86_EFLAGS_FIXED (1u << 1)
#define X86_CR4_VME (1ul << 0)
@@ -265,7 +267,7 @@ struct kvm_x86_cpu_property {
#define X86_PROPERTY_PMU_NR_GP_COUNTERS KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 8, 15)
#define X86_PROPERTY_PMU_GP_COUNTERS_BIT_WIDTH KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 16, 23)
#define X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 24, 31)
#define X86_PROPERTY_PMU_EVENTS_MASK KVM_X86_CPU_PROPERTY(0xa, 0, EBX, 0, 7)
#define X86_PROPERTY_PMU_EVENTS_MASK KVM_X86_CPU_PROPERTY(0xa, 0, EBX, 0, 12)
#define X86_PROPERTY_PMU_FIXED_COUNTERS_BITMASK KVM_X86_CPU_PROPERTY(0xa, 0, ECX, 0, 31)
#define X86_PROPERTY_PMU_NR_FIXED_COUNTERS KVM_X86_CPU_PROPERTY(0xa, 0, EDX, 0, 4)
#define X86_PROPERTY_PMU_FIXED_COUNTERS_BIT_WIDTH KVM_X86_CPU_PROPERTY(0xa, 0, EDX, 5, 12)
@@ -332,6 +334,11 @@ struct kvm_x86_pmu_feature {
#define X86_PMU_FEATURE_BRANCH_INSNS_RETIRED KVM_X86_PMU_FEATURE(EBX, 5)
#define X86_PMU_FEATURE_BRANCHES_MISPREDICTED KVM_X86_PMU_FEATURE(EBX, 6)
#define X86_PMU_FEATURE_TOPDOWN_SLOTS KVM_X86_PMU_FEATURE(EBX, 7)
#define X86_PMU_FEATURE_TOPDOWN_BE_BOUND KVM_X86_PMU_FEATURE(EBX, 8)
#define X86_PMU_FEATURE_TOPDOWN_BAD_SPEC KVM_X86_PMU_FEATURE(EBX, 9)
#define X86_PMU_FEATURE_TOPDOWN_FE_BOUND KVM_X86_PMU_FEATURE(EBX, 10)
#define X86_PMU_FEATURE_TOPDOWN_RETIRING KVM_X86_PMU_FEATURE(EBX, 11)
#define X86_PMU_FEATURE_LBR_INSERTS KVM_X86_PMU_FEATURE(EBX, 12)
#define X86_PMU_FEATURE_INSNS_RETIRED_FIXED KVM_X86_PMU_FEATURE(ECX, 0)
#define X86_PMU_FEATURE_CPU_CYCLES_FIXED KVM_X86_PMU_FEATURE(ECX, 1)
@@ -1179,6 +1186,12 @@ struct idt_entry {
void vm_install_exception_handler(struct kvm_vm *vm, int vector,
void (*handler)(struct ex_regs *));
/*
* Exception fixup morphs #DE to an arbitrary magic vector so that '0' can be
* used to signal "no expcetion".
*/
#define KVM_MAGIC_DE_VECTOR 0xff
/* If a toddler were to say "abracadabra". */
#define KVM_EXCEPTION_MAGIC 0xabacadabaULL
@@ -1314,6 +1327,26 @@ static inline uint8_t xsetbv_safe(uint32_t index, uint64_t value)
bool kvm_is_tdp_enabled(void);
static inline bool get_kvm_intel_param_bool(const char *param)
{
return kvm_get_module_param_bool("kvm_intel", param);
}
static inline bool get_kvm_amd_param_bool(const char *param)
{
return kvm_get_module_param_bool("kvm_amd", param);
}
static inline int get_kvm_intel_param_integer(const char *param)
{
return kvm_get_module_param_integer("kvm_intel", param);
}
static inline int get_kvm_amd_param_integer(const char *param)
{
return kvm_get_module_param_integer("kvm_amd", param);
}
static inline bool kvm_is_pmu_enabled(void)
{
return get_kvm_param_bool("enable_pmu");
+6 -36
View File
@@ -24,7 +24,7 @@ uint32_t guest_random_seed;
struct guest_random_state guest_rng;
static uint32_t last_guest_seed;
static int vcpu_mmap_sz(void);
static size_t vcpu_mmap_sz(void);
int __open_path_or_exit(const char *path, int flags, const char *enoent_help)
{
@@ -95,7 +95,7 @@ static ssize_t get_module_param(const char *module_name, const char *param,
return bytes_read;
}
static int get_module_param_integer(const char *module_name, const char *param)
int kvm_get_module_param_integer(const char *module_name, const char *param)
{
/*
* 16 bytes to hold a 64-bit value (1 byte per char), 1 byte for the
@@ -119,7 +119,7 @@ static int get_module_param_integer(const char *module_name, const char *param)
return atoi_paranoid(value);
}
static bool get_module_param_bool(const char *module_name, const char *param)
bool kvm_get_module_param_bool(const char *module_name, const char *param)
{
char value;
ssize_t r;
@@ -135,36 +135,6 @@ static bool get_module_param_bool(const char *module_name, const char *param)
TEST_FAIL("Unrecognized value '%c' for boolean module param", value);
}
bool get_kvm_param_bool(const char *param)
{
return get_module_param_bool("kvm", param);
}
bool get_kvm_intel_param_bool(const char *param)
{
return get_module_param_bool("kvm_intel", param);
}
bool get_kvm_amd_param_bool(const char *param)
{
return get_module_param_bool("kvm_amd", param);
}
int get_kvm_param_integer(const char *param)
{
return get_module_param_integer("kvm", param);
}
int get_kvm_intel_param_integer(const char *param)
{
return get_module_param_integer("kvm_intel", param);
}
int get_kvm_amd_param_integer(const char *param)
{
return get_module_param_integer("kvm_amd", param);
}
/*
* Capability
*
@@ -1321,14 +1291,14 @@ void vm_guest_mem_fallocate(struct kvm_vm *vm, uint64_t base, uint64_t size,
}
/* Returns the size of a vCPU's kvm_run structure. */
static int vcpu_mmap_sz(void)
static size_t vcpu_mmap_sz(void)
{
int dev_fd, ret;
dev_fd = open_kvm_dev_path_or_exit();
ret = ioctl(dev_fd, KVM_GET_VCPU_MMAP_SIZE, NULL);
TEST_ASSERT(ret >= sizeof(struct kvm_run),
TEST_ASSERT(ret >= 0 && ret >= sizeof(struct kvm_run),
KVM_IOCTL_ERROR(KVM_GET_VCPU_MMAP_SIZE, ret));
close(dev_fd);
@@ -1369,7 +1339,7 @@ struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
TEST_ASSERT_VM_VCPU_IOCTL(vcpu->fd >= 0, KVM_CREATE_VCPU, vcpu->fd, vm);
TEST_ASSERT(vcpu_mmap_sz() >= sizeof(*vcpu->run), "vcpu mmap size "
"smaller than expected, vcpu_mmap_sz: %i expected_min: %zi",
"smaller than expected, vcpu_mmap_sz: %zi expected_min: %zi",
vcpu_mmap_sz(), sizeof(*vcpu->run));
vcpu->run = (struct kvm_run *) mmap(NULL, vcpu_mmap_sz(),
PROT_READ | PROT_WRITE, MAP_SHARED, vcpu->fd, 0);
+49
View File
@@ -8,6 +8,7 @@
#include <linux/kernel.h>
#include "kvm_util.h"
#include "processor.h"
#include "pmu.h"
const uint64_t intel_pmu_arch_events[] = {
@@ -19,6 +20,11 @@ const uint64_t intel_pmu_arch_events[] = {
INTEL_ARCH_BRANCHES_RETIRED,
INTEL_ARCH_BRANCHES_MISPREDICTED,
INTEL_ARCH_TOPDOWN_SLOTS,
INTEL_ARCH_TOPDOWN_BE_BOUND,
INTEL_ARCH_TOPDOWN_BAD_SPEC,
INTEL_ARCH_TOPDOWN_FE_BOUND,
INTEL_ARCH_TOPDOWN_RETIRING,
INTEL_ARCH_LBR_INSERTS,
};
kvm_static_assert(ARRAY_SIZE(intel_pmu_arch_events) == NR_INTEL_ARCH_EVENTS);
@@ -29,3 +35,46 @@ const uint64_t amd_pmu_zen_events[] = {
AMD_ZEN_BRANCHES_MISPREDICTED,
};
kvm_static_assert(ARRAY_SIZE(amd_pmu_zen_events) == NR_AMD_ZEN_EVENTS);
/*
* For Intel Atom CPUs, the PMU events "Instruction Retired" or
* "Branch Instruction Retired" may be overcounted for some certain
* instructions, like FAR CALL/JMP, RETF, IRET, VMENTRY/VMEXIT/VMPTRLD
* and complex SGX/SMX/CSTATE instructions/flows.
*
* The detailed information can be found in the errata (section SRF7):
* https://edc.intel.com/content/www/us/en/design/products-and-solutions/processors-and-chipsets/sierra-forest/xeon-6700-series-processor-with-e-cores-specification-update/errata-details/
*
* For the Atom platforms before Sierra Forest (including Sierra Forest),
* Both 2 events "Instruction Retired" and "Branch Instruction Retired" would
* be overcounted on these certain instructions, but for Clearwater Forest
* only "Instruction Retired" event is overcounted on these instructions.
*/
static uint64_t get_pmu_errata(void)
{
if (!this_cpu_is_intel())
return 0;
if (this_cpu_family() != 0x6)
return 0;
switch (this_cpu_model()) {
case 0xDD: /* Clearwater Forest */
return BIT_ULL(INSTRUCTIONS_RETIRED_OVERCOUNT);
case 0xAF: /* Sierra Forest */
case 0x4D: /* Avaton, Rangely */
case 0x5F: /* Denverton */
case 0x86: /* Jacobsville */
return BIT_ULL(INSTRUCTIONS_RETIRED_OVERCOUNT) |
BIT_ULL(BRANCHES_RETIRED_OVERCOUNT);
default:
return 0;
}
}
uint64_t pmu_errata_mask;
void kvm_init_pmu_errata(void)
{
pmu_errata_mask = get_pmu_errata();
}
@@ -6,6 +6,7 @@
#include "linux/bitmap.h"
#include "test_util.h"
#include "kvm_util.h"
#include "pmu.h"
#include "processor.h"
#include "sev.h"
@@ -23,6 +24,39 @@ bool host_cpu_is_intel;
bool is_forced_emulation_enabled;
uint64_t guest_tsc_khz;
const char *ex_str(int vector)
{
switch (vector) {
#define VEC_STR(v) case v##_VECTOR: return "#" #v
case DE_VECTOR: return "no exception";
case KVM_MAGIC_DE_VECTOR: return "#DE";
VEC_STR(DB);
VEC_STR(NMI);
VEC_STR(BP);
VEC_STR(OF);
VEC_STR(BR);
VEC_STR(UD);
VEC_STR(NM);
VEC_STR(DF);
VEC_STR(TS);
VEC_STR(NP);
VEC_STR(SS);
VEC_STR(GP);
VEC_STR(PF);
VEC_STR(MF);
VEC_STR(AC);
VEC_STR(MC);
VEC_STR(XM);
VEC_STR(VE);
VEC_STR(CP);
VEC_STR(HV);
VEC_STR(VC);
VEC_STR(SX);
default: return "#??";
#undef VEC_STR
}
}
static void regs_dump(FILE *stream, struct kvm_regs *regs, uint8_t indent)
{
fprintf(stream, "%*srax: 0x%.16llx rbx: 0x%.16llx "
@@ -557,7 +591,7 @@ static bool kvm_fixup_exception(struct ex_regs *regs)
return false;
if (regs->vector == DE_VECTOR)
return false;
regs->vector = KVM_MAGIC_DE_VECTOR;
regs->rip = regs->r11;
regs->r9 = regs->vector;
@@ -638,6 +672,7 @@ void kvm_arch_vm_post_create(struct kvm_vm *vm)
sync_global_to_guest(vm, host_cpu_is_intel);
sync_global_to_guest(vm, host_cpu_is_amd);
sync_global_to_guest(vm, is_forced_emulation_enabled);
sync_global_to_guest(vm, pmu_errata_mask);
if (is_sev_vm(vm)) {
struct kvm_sev_init init = { 0 };
@@ -1269,6 +1304,8 @@ void kvm_selftest_arch_init(void)
host_cpu_is_intel = this_cpu_is_intel();
host_cpu_is_amd = this_cpu_is_amd();
is_forced_emulation_enabled = kvm_is_forced_emulation_enabled();
kvm_init_pmu_errata();
}
bool sys_clocksource_is_based_on_tsc(void)
@@ -291,7 +291,7 @@ int main(int argc, char *argv[])
ksft_test_result_pass("%s\n", testlist[idx].subfunc_name);
free(array);
} else {
ksft_test_result_skip("%s feature is not avaialable\n",
ksft_test_result_skip("%s feature is not available\n",
testlist[idx].subfunc_name);
}
}
+63 -19
View File
@@ -8,14 +8,21 @@
* to set RFLAGS.CF based on whether or not the input is even or odd, so that
* instructions like ADC and SBB are deterministic.
*/
#define fastop(__insn) \
"bt $0, %[bt_val]\n\t" \
__insn "\n\t" \
"pushfq\n\t" \
"pop %[flags]\n\t"
#define flags_constraint(flags_val) [flags]"=r"(flags_val)
#define bt_constraint(__bt_val) [bt_val]"rm"((uint32_t)__bt_val)
#define guest_execute_fastop_1(FEP, insn, __val, __flags) \
({ \
__asm__ __volatile__("bt $0, %[val]\n\t" \
FEP insn " %[val]\n\t" \
"pushfq\n\t" \
"pop %[flags]\n\t" \
: [val]"+r"(__val), [flags]"=r"(__flags) \
: : "cc", "memory"); \
__asm__ __volatile__(fastop(FEP insn " %[val]") \
: [val]"+r"(__val), flags_constraint(__flags) \
: bt_constraint(__val) \
: "cc", "memory"); \
})
#define guest_test_fastop_1(insn, type_t, __val) \
@@ -36,12 +43,10 @@
#define guest_execute_fastop_2(FEP, insn, __input, __output, __flags) \
({ \
__asm__ __volatile__("bt $0, %[output]\n\t" \
FEP insn " %[input], %[output]\n\t" \
"pushfq\n\t" \
"pop %[flags]\n\t" \
: [output]"+r"(__output), [flags]"=r"(__flags) \
: [input]"r"(__input) : "cc", "memory"); \
__asm__ __volatile__(fastop(FEP insn " %[input], %[output]") \
: [output]"+r"(__output), flags_constraint(__flags) \
: [input]"r"(__input), bt_constraint(__output) \
: "cc", "memory"); \
})
#define guest_test_fastop_2(insn, type_t, __val1, __val2) \
@@ -63,12 +68,10 @@
#define guest_execute_fastop_cl(FEP, insn, __shift, __output, __flags) \
({ \
__asm__ __volatile__("bt $0, %[output]\n\t" \
FEP insn " %%cl, %[output]\n\t" \
"pushfq\n\t" \
"pop %[flags]\n\t" \
: [output]"+r"(__output), [flags]"=r"(__flags) \
: "c"(__shift) : "cc", "memory"); \
__asm__ __volatile__(fastop(FEP insn " %%cl, %[output]") \
: [output]"+r"(__output), flags_constraint(__flags) \
: "c"(__shift), bt_constraint(__output) \
: "cc", "memory"); \
})
#define guest_test_fastop_cl(insn, type_t, __val1, __val2) \
@@ -89,6 +92,42 @@
ex_flags, insn, shift, (uint64_t)input, flags); \
})
#define guest_execute_fastop_div(__KVM_ASM_SAFE, insn, __a, __d, __rm, __flags) \
({ \
uint64_t ign_error_code; \
uint8_t vector; \
\
__asm__ __volatile__(fastop(__KVM_ASM_SAFE(insn " %[denom]")) \
: "+a"(__a), "+d"(__d), flags_constraint(__flags), \
KVM_ASM_SAFE_OUTPUTS(vector, ign_error_code) \
: [denom]"rm"(__rm), bt_constraint(__rm) \
: "cc", "memory", KVM_ASM_SAFE_CLOBBERS); \
vector; \
})
#define guest_test_fastop_div(insn, type_t, __val1, __val2) \
({ \
type_t _a = __val1, _d = __val1, rm = __val2; \
type_t a = _a, d = _d, ex_a = _a, ex_d = _d; \
uint64_t flags, ex_flags; \
uint8_t v, ex_v; \
\
ex_v = guest_execute_fastop_div(KVM_ASM_SAFE, insn, ex_a, ex_d, rm, ex_flags); \
v = guest_execute_fastop_div(KVM_ASM_SAFE_FEP, insn, a, d, rm, flags); \
\
GUEST_ASSERT_EQ(v, ex_v); \
__GUEST_ASSERT(v == ex_v, \
"Wanted vector 0x%x for '%s 0x%lx:0x%lx/0x%lx', got 0x%x", \
ex_v, insn, (uint64_t)_a, (uint64_t)_d, (uint64_t)rm, v); \
__GUEST_ASSERT(a == ex_a && d == ex_d, \
"Wanted 0x%lx:0x%lx for '%s 0x%lx:0x%lx/0x%lx', got 0x%lx:0x%lx",\
(uint64_t)ex_a, (uint64_t)ex_d, insn, (uint64_t)_a, \
(uint64_t)_d, (uint64_t)rm, (uint64_t)a, (uint64_t)d); \
__GUEST_ASSERT(v || ex_v || (flags == ex_flags), \
"Wanted flags 0x%lx for '%s 0x%lx:0x%lx/0x%lx', got 0x%lx", \
ex_flags, insn, (uint64_t)_a, (uint64_t)_d, (uint64_t)rm, flags);\
})
static const uint64_t vals[] = {
0,
1,
@@ -115,14 +154,16 @@ do { \
guest_test_fastop_2("add" suffix, type_t, vals[i], vals[j]); \
guest_test_fastop_2("adc" suffix, type_t, vals[i], vals[j]); \
guest_test_fastop_2("and" suffix, type_t, vals[i], vals[j]); \
if (sizeof(type_t) != 1) { \
guest_test_fastop_2("bsf" suffix, type_t, vals[i], vals[j]); \
guest_test_fastop_2("bsr" suffix, type_t, vals[i], vals[j]); \
guest_test_fastop_2("bt" suffix, type_t, vals[i], vals[j]); \
guest_test_fastop_2("btc" suffix, type_t, vals[i], vals[j]); \
guest_test_fastop_2("btr" suffix, type_t, vals[i], vals[j]); \
guest_test_fastop_2("bts" suffix, type_t, vals[i], vals[j]); \
guest_test_fastop_2("cmp" suffix, type_t, vals[i], vals[j]); \
guest_test_fastop_2("imul" suffix, type_t, vals[i], vals[j]); \
} \
guest_test_fastop_2("cmp" suffix, type_t, vals[i], vals[j]); \
guest_test_fastop_2("or" suffix, type_t, vals[i], vals[j]); \
guest_test_fastop_2("sbb" suffix, type_t, vals[i], vals[j]); \
guest_test_fastop_2("sub" suffix, type_t, vals[i], vals[j]); \
@@ -136,12 +177,15 @@ do { \
guest_test_fastop_cl("sar" suffix, type_t, vals[i], vals[j]); \
guest_test_fastop_cl("shl" suffix, type_t, vals[i], vals[j]); \
guest_test_fastop_cl("shr" suffix, type_t, vals[i], vals[j]); \
\
guest_test_fastop_div("div" suffix, type_t, vals[i], vals[j]); \
} \
} \
} while (0)
static void guest_code(void)
{
guest_test_fastops(uint8_t, "b");
guest_test_fastops(uint16_t, "w");
guest_test_fastops(uint32_t, "l");
guest_test_fastops(uint64_t, "q");
@@ -45,7 +45,7 @@ static void test_hv_cpuid(struct kvm_vcpu *vcpu, bool evmcs_expected)
TEST_ASSERT((entry->function >= 0x40000000) &&
(entry->function <= 0x40000082),
"function %x is our of supported range",
"function %x is out of supported range",
entry->function);
TEST_ASSERT(entry->index == 0,
@@ -54,12 +54,12 @@ static void guest_msr(struct msr_data *msr)
if (msr->fault_expected)
__GUEST_ASSERT(vector == GP_VECTOR,
"Expected #GP on %sMSR(0x%x), got vector '0x%x'",
msr->write ? "WR" : "RD", msr->idx, vector);
"Expected #GP on %sMSR(0x%x), got %s",
msr->write ? "WR" : "RD", msr->idx, ex_str(vector));
else
__GUEST_ASSERT(!vector,
"Expected success on %sMSR(0x%x), got vector '0x%x'",
msr->write ? "WR" : "RD", msr->idx, vector);
"Expected success on %sMSR(0x%x), got %s",
msr->write ? "WR" : "RD", msr->idx, ex_str(vector));
if (vector || is_write_only_msr(msr->idx))
goto done;
@@ -102,12 +102,12 @@ static void guest_hcall(vm_vaddr_t pgs_gpa, struct hcall_data *hcall)
vector = __hyperv_hypercall(hcall->control, input, output, &res);
if (hcall->ud_expected) {
__GUEST_ASSERT(vector == UD_VECTOR,
"Expected #UD for control '%lu', got vector '0x%x'",
hcall->control, vector);
"Expected #UD for control '%lu', got %s",
hcall->control, ex_str(vector));
} else {
__GUEST_ASSERT(!vector,
"Expected no exception for control '%lu', got vector '0x%x'",
hcall->control, vector);
"Expected no exception for control '%lu', got %s",
hcall->control, ex_str(vector));
GUEST_ASSERT_EQ(res, hcall->expect);
}
@@ -30,12 +30,12 @@ do { \
\
if (fault_wanted) \
__GUEST_ASSERT((vector) == UD_VECTOR, \
"Expected #UD on " insn " for testcase '0x%x', got '0x%x'", \
testcase, vector); \
"Expected #UD on " insn " for testcase '0x%x', got %s", \
testcase, ex_str(vector)); \
else \
__GUEST_ASSERT(!(vector), \
"Expected success on " insn " for testcase '0x%x', got '0x%x'", \
testcase, vector); \
"Expected success on " insn " for testcase '0x%x', got %s", \
testcase, ex_str(vector)); \
} while (0)
static void guest_monitor_wait(void *arg)
@@ -75,6 +75,11 @@ static struct kvm_intel_pmu_event intel_event_to_feature(uint8_t idx)
[INTEL_ARCH_BRANCHES_RETIRED_INDEX] = { X86_PMU_FEATURE_BRANCH_INSNS_RETIRED, X86_PMU_FEATURE_NULL },
[INTEL_ARCH_BRANCHES_MISPREDICTED_INDEX] = { X86_PMU_FEATURE_BRANCHES_MISPREDICTED, X86_PMU_FEATURE_NULL },
[INTEL_ARCH_TOPDOWN_SLOTS_INDEX] = { X86_PMU_FEATURE_TOPDOWN_SLOTS, X86_PMU_FEATURE_TOPDOWN_SLOTS_FIXED },
[INTEL_ARCH_TOPDOWN_BE_BOUND_INDEX] = { X86_PMU_FEATURE_TOPDOWN_BE_BOUND, X86_PMU_FEATURE_NULL },
[INTEL_ARCH_TOPDOWN_BAD_SPEC_INDEX] = { X86_PMU_FEATURE_TOPDOWN_BAD_SPEC, X86_PMU_FEATURE_NULL },
[INTEL_ARCH_TOPDOWN_FE_BOUND_INDEX] = { X86_PMU_FEATURE_TOPDOWN_FE_BOUND, X86_PMU_FEATURE_NULL },
[INTEL_ARCH_TOPDOWN_RETIRING_INDEX] = { X86_PMU_FEATURE_TOPDOWN_RETIRING, X86_PMU_FEATURE_NULL },
[INTEL_ARCH_LBR_INSERTS_INDEX] = { X86_PMU_FEATURE_LBR_INSERTS, X86_PMU_FEATURE_NULL },
};
kvm_static_assert(ARRAY_SIZE(__intel_event_to_feature) == NR_INTEL_ARCH_EVENTS);
@@ -158,10 +163,18 @@ static void guest_assert_event_count(uint8_t idx, uint32_t pmc, uint32_t pmc_msr
switch (idx) {
case INTEL_ARCH_INSTRUCTIONS_RETIRED_INDEX:
GUEST_ASSERT_EQ(count, NUM_INSNS_RETIRED);
/* Relax precise count check due to VM-EXIT/VM-ENTRY overcount issue */
if (this_pmu_has_errata(INSTRUCTIONS_RETIRED_OVERCOUNT))
GUEST_ASSERT(count >= NUM_INSNS_RETIRED);
else
GUEST_ASSERT_EQ(count, NUM_INSNS_RETIRED);
break;
case INTEL_ARCH_BRANCHES_RETIRED_INDEX:
GUEST_ASSERT_EQ(count, NUM_BRANCH_INSNS_RETIRED);
/* Relax precise count check due to VM-EXIT/VM-ENTRY overcount issue */
if (this_pmu_has_errata(BRANCHES_RETIRED_OVERCOUNT))
GUEST_ASSERT(count >= NUM_BRANCH_INSNS_RETIRED);
else
GUEST_ASSERT_EQ(count, NUM_BRANCH_INSNS_RETIRED);
break;
case INTEL_ARCH_LLC_REFERENCES_INDEX:
case INTEL_ARCH_LLC_MISSES_INDEX:
@@ -171,9 +184,12 @@ static void guest_assert_event_count(uint8_t idx, uint32_t pmc, uint32_t pmc_msr
fallthrough;
case INTEL_ARCH_CPU_CYCLES_INDEX:
case INTEL_ARCH_REFERENCE_CYCLES_INDEX:
case INTEL_ARCH_TOPDOWN_BE_BOUND_INDEX:
case INTEL_ARCH_TOPDOWN_FE_BOUND_INDEX:
GUEST_ASSERT_NE(count, 0);
break;
case INTEL_ARCH_TOPDOWN_SLOTS_INDEX:
case INTEL_ARCH_TOPDOWN_RETIRING_INDEX:
__GUEST_ASSERT(count >= NUM_INSNS_RETIRED,
"Expected top-down slots >= %u, got count = %lu",
NUM_INSNS_RETIRED, count);
@@ -313,7 +329,7 @@ static void guest_test_arch_events(void)
}
static void test_arch_events(uint8_t pmu_version, uint64_t perf_capabilities,
uint8_t length, uint8_t unavailable_mask)
uint8_t length, uint32_t unavailable_mask)
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
@@ -322,6 +338,9 @@ static void test_arch_events(uint8_t pmu_version, uint64_t perf_capabilities,
if (!pmu_version)
return;
unavailable_mask &= GENMASK(X86_PROPERTY_PMU_EVENTS_MASK.hi_bit,
X86_PROPERTY_PMU_EVENTS_MASK.lo_bit);
vm = pmu_vm_create_with_one_vcpu(&vcpu, guest_test_arch_events,
pmu_version, perf_capabilities);
@@ -346,8 +365,8 @@ static void test_arch_events(uint8_t pmu_version, uint64_t perf_capabilities,
#define GUEST_ASSERT_PMC_MSR_ACCESS(insn, msr, expect_gp, vector) \
__GUEST_ASSERT(expect_gp ? vector == GP_VECTOR : !vector, \
"Expected %s on " #insn "(0x%x), got vector %u", \
expect_gp ? "#GP" : "no fault", msr, vector) \
"Expected %s on " #insn "(0x%x), got %s", \
expect_gp ? "#GP" : "no fault", msr, ex_str(vector)) \
#define GUEST_ASSERT_PMC_VALUE(insn, msr, val, expected) \
__GUEST_ASSERT(val == expected, \
@@ -576,6 +595,26 @@ static void test_intel_counters(void)
PMU_CAP_FW_WRITES,
};
/*
* To keep the total runtime reasonable, test only a handful of select,
* semi-arbitrary values for the mask of unavailable PMU events. Test
* 0 (all events available) and all ones (no events available) as well
* as alternating bit sequencues, e.g. to detect if KVM is checking the
* wrong bit(s).
*/
const uint32_t unavailable_masks[] = {
0x0,
0xffffffffu,
0xaaaaaaaau,
0x55555555u,
0xf0f0f0f0u,
0x0f0f0f0fu,
0xa0a0a0a0u,
0x0a0a0a0au,
0x50505050u,
0x05050505u,
};
/*
* Test up to PMU v5, which is the current maximum version defined by
* Intel, i.e. is the last version that is guaranteed to be backwards
@@ -613,16 +652,7 @@ static void test_intel_counters(void)
pr_info("Testing arch events, PMU version %u, perf_caps = %lx\n",
v, perf_caps[i]);
/*
* To keep the total runtime reasonable, test every
* possible non-zero, non-reserved bitmap combination
* only with the native PMU version and the full bit
* vector length.
*/
if (v == pmu_version) {
for (k = 1; k < (BIT(NR_INTEL_ARCH_EVENTS) - 1); k++)
test_arch_events(v, perf_caps[i], NR_INTEL_ARCH_EVENTS, k);
}
/*
* Test single bits for all PMU version and lengths up
* the number of events +1 (to verify KVM doesn't do
@@ -631,11 +661,8 @@ static void test_intel_counters(void)
* ones i.e. all events being available and unavailable.
*/
for (j = 0; j <= NR_INTEL_ARCH_EVENTS + 1; j++) {
test_arch_events(v, perf_caps[i], j, 0);
test_arch_events(v, perf_caps[i], j, 0xff);
for (k = 0; k < NR_INTEL_ARCH_EVENTS; k++)
test_arch_events(v, perf_caps[i], j, BIT(k));
for (k = 1; k < ARRAY_SIZE(unavailable_masks); k++)
test_arch_events(v, perf_caps[i], j, unavailable_masks[k]);
}
pr_info("Testing GP counters, PMU version %u, perf_caps = %lx\n",
@@ -214,8 +214,10 @@ static void remove_event(struct __kvm_pmu_event_filter *f, uint64_t event)
do { \
uint64_t br = pmc_results.branches_retired; \
uint64_t ir = pmc_results.instructions_retired; \
bool br_matched = this_pmu_has_errata(BRANCHES_RETIRED_OVERCOUNT) ? \
br >= NUM_BRANCHES : br == NUM_BRANCHES; \
\
if (br && br != NUM_BRANCHES) \
if (br && !br_matched) \
pr_info("%s: Branch instructions retired = %lu (expected %u)\n", \
__func__, br, NUM_BRANCHES); \
TEST_ASSERT(br, "%s: Branch instructions retired = %lu (expected > 0)", \
@@ -29,7 +29,7 @@ static union perf_capabilities {
u64 pebs_baseline:1;
u64 perf_metrics:1;
u64 pebs_output_pt_available:1;
u64 anythread_deprecated:1;
u64 pebs_timing_info:1;
};
u64 capabilities;
} host_cap;
@@ -44,6 +44,7 @@ static const union perf_capabilities immutable_caps = {
.pebs_arch_reg = 1,
.pebs_format = -1,
.pebs_baseline = 1,
.pebs_timing_info = 1,
};
static const union perf_capabilities format_caps = {
@@ -56,8 +57,8 @@ static void guest_test_perf_capabilities_gp(uint64_t val)
uint8_t vector = wrmsr_safe(MSR_IA32_PERF_CAPABILITIES, val);
__GUEST_ASSERT(vector == GP_VECTOR,
"Expected #GP for value '0x%lx', got vector '0x%x'",
val, vector);
"Expected #GP for value '0x%lx', got %s",
val, ex_str(vector));
}
static void guest_code(uint64_t current_val)
@@ -120,8 +120,8 @@ static void test_icr(struct xapic_vcpu *x)
__test_icr(x, icr | i);
/*
* Send all flavors of IPIs to non-existent vCPUs. TODO: use number of
* vCPUs, not vcpu.id + 1. Arbitrarily use vector 0xff.
* Send all flavors of IPIs to non-existent vCPUs. Arbitrarily use
* vector 0xff.
*/
icr = APIC_INT_ASSERT | 0xff;
for (i = 0; i < 0xff; i++) {
@@ -81,13 +81,13 @@ static void guest_code(void)
vector = xsetbv_safe(0, XFEATURE_MASK_FP);
__GUEST_ASSERT(!vector,
"Expected success on XSETBV(FP), got vector '0x%x'",
vector);
"Expected success on XSETBV(FP), got %s",
ex_str(vector));
vector = xsetbv_safe(0, supported_xcr0);
__GUEST_ASSERT(!vector,
"Expected success on XSETBV(0x%lx), got vector '0x%x'",
supported_xcr0, vector);
"Expected success on XSETBV(0x%lx), got %s",
supported_xcr0, ex_str(vector));
for (i = 0; i < 64; i++) {
if (supported_xcr0 & BIT_ULL(i))
@@ -95,8 +95,8 @@ static void guest_code(void)
vector = xsetbv_safe(0, supported_xcr0 | BIT_ULL(i));
__GUEST_ASSERT(vector == GP_VECTOR,
"Expected #GP on XSETBV(0x%llx), supported XCR0 = %lx, got vector '0x%x'",
BIT_ULL(i), supported_xcr0, vector);
"Expected #GP on XSETBV(0x%llx), supported XCR0 = %lx, got %s",
BIT_ULL(i), supported_xcr0, ex_str(vector));
}
GUEST_DONE();