KVM: arm64: Calculate cptr_el2 traps on activating traps
Similar to VHE, calculate the value of cptr_el2 from scratch on
activate traps. This removes the need to store cptr_el2 in every
vcpu structure. Moreover, some traps, such as whether the guest
owns the fp registers, need to be set on every vcpu run.
Reported-by: James Clark <james.clark@linaro.org>
Fixes: 5294afdbf4 ("KVM: arm64: Exclude FP ownership from kvm_vcpu_arch")
Signed-off-by: Fuad Tabba <tabba@google.com>
Link: https://lore.kernel.org/r/20241216105057.579031-13-tabba@google.com
Signed-off-by: Marc Zyngier <maz@kernel.org>
This commit is contained in:
@@ -708,7 +708,6 @@ struct kvm_vcpu_arch {
|
||||
u64 hcr_el2;
|
||||
u64 hcrx_el2;
|
||||
u64 mdcr_el2;
|
||||
u64 cptr_el2;
|
||||
|
||||
/* Exception Information */
|
||||
struct kvm_vcpu_fault_info fault;
|
||||
|
||||
@@ -1546,7 +1546,6 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
|
||||
}
|
||||
|
||||
vcpu_reset_hcr(vcpu);
|
||||
vcpu->arch.cptr_el2 = kvm_get_reset_cptr_el2(vcpu);
|
||||
|
||||
/*
|
||||
* Handle the "start in power-off" case.
|
||||
|
||||
@@ -83,44 +83,6 @@ static void pvm_init_traps_hcr(struct kvm_vcpu *vcpu)
|
||||
vcpu->arch.hcr_el2 = val;
|
||||
}
|
||||
|
||||
static void pvm_init_traps_cptr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
u64 val = vcpu->arch.cptr_el2;
|
||||
|
||||
if (!has_hvhe()) {
|
||||
val |= CPTR_NVHE_EL2_RES1;
|
||||
val &= ~(CPTR_NVHE_EL2_RES0);
|
||||
}
|
||||
|
||||
if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, AMU, IMP))
|
||||
val |= CPTR_EL2_TAM;
|
||||
|
||||
/* SVE can be disabled by userspace even if supported. */
|
||||
if (!vcpu_has_sve(vcpu)) {
|
||||
if (has_hvhe())
|
||||
val &= ~(CPACR_ELx_ZEN);
|
||||
else
|
||||
val |= CPTR_EL2_TZ;
|
||||
}
|
||||
|
||||
/* No SME support in KVM. */
|
||||
BUG_ON(kvm_has_feat(kvm, ID_AA64PFR1_EL1, SME, IMP));
|
||||
if (has_hvhe())
|
||||
val &= ~(CPACR_ELx_SMEN);
|
||||
else
|
||||
val |= CPTR_EL2_TSM;
|
||||
|
||||
if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, TraceVer, IMP)) {
|
||||
if (has_hvhe())
|
||||
val |= CPACR_EL1_TTA;
|
||||
else
|
||||
val |= CPTR_EL2_TTA;
|
||||
}
|
||||
|
||||
vcpu->arch.cptr_el2 = val;
|
||||
}
|
||||
|
||||
static void pvm_init_traps_mdcr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
@@ -191,7 +153,6 @@ static int pkvm_vcpu_init_traps(struct pkvm_hyp_vcpu *hyp_vcpu)
|
||||
struct kvm_vcpu *vcpu = &hyp_vcpu->vcpu;
|
||||
int ret;
|
||||
|
||||
vcpu->arch.cptr_el2 = kvm_get_reset_cptr_el2(vcpu);
|
||||
vcpu->arch.mdcr_el2 = 0;
|
||||
|
||||
pkvm_vcpu_reset_hcr(vcpu);
|
||||
@@ -204,7 +165,6 @@ static int pkvm_vcpu_init_traps(struct pkvm_hyp_vcpu *hyp_vcpu)
|
||||
return ret;
|
||||
|
||||
pvm_init_traps_hcr(vcpu);
|
||||
pvm_init_traps_cptr(vcpu);
|
||||
pvm_init_traps_mdcr(vcpu);
|
||||
|
||||
return 0;
|
||||
@@ -644,8 +604,6 @@ unlock:
|
||||
return ret;
|
||||
}
|
||||
|
||||
hyp_vcpu->vcpu.arch.cptr_el2 = kvm_get_reset_cptr_el2(&hyp_vcpu->vcpu);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -35,33 +35,46 @@ DEFINE_PER_CPU(unsigned long, kvm_hyp_vector);
|
||||
|
||||
extern void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc);
|
||||
|
||||
static void __activate_traps(struct kvm_vcpu *vcpu)
|
||||
static void __activate_cptr_traps(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u64 val;
|
||||
u64 val = CPTR_EL2_TAM; /* Same bit irrespective of E2H */
|
||||
|
||||
___activate_traps(vcpu, vcpu->arch.hcr_el2);
|
||||
__activate_traps_common(vcpu);
|
||||
if (has_hvhe()) {
|
||||
val |= CPACR_ELx_TTA;
|
||||
|
||||
val = vcpu->arch.cptr_el2;
|
||||
val |= CPTR_EL2_TAM; /* Same bit irrespective of E2H */
|
||||
val |= has_hvhe() ? CPACR_EL1_TTA : CPTR_EL2_TTA;
|
||||
if (cpus_have_final_cap(ARM64_SME)) {
|
||||
if (has_hvhe())
|
||||
val &= ~CPACR_ELx_SMEN;
|
||||
else
|
||||
val |= CPTR_EL2_TSM;
|
||||
if (guest_owns_fp_regs()) {
|
||||
val |= CPACR_ELx_FPEN;
|
||||
if (vcpu_has_sve(vcpu))
|
||||
val |= CPACR_ELx_ZEN;
|
||||
}
|
||||
} else {
|
||||
val |= CPTR_EL2_TTA | CPTR_NVHE_EL2_RES1;
|
||||
|
||||
/*
|
||||
* Always trap SME since it's not supported in KVM.
|
||||
* TSM is RES1 if SME isn't implemented.
|
||||
*/
|
||||
val |= CPTR_EL2_TSM;
|
||||
|
||||
if (!vcpu_has_sve(vcpu) || !guest_owns_fp_regs())
|
||||
val |= CPTR_EL2_TZ;
|
||||
|
||||
if (!guest_owns_fp_regs())
|
||||
val |= CPTR_EL2_TFP;
|
||||
}
|
||||
|
||||
if (!guest_owns_fp_regs()) {
|
||||
if (has_hvhe())
|
||||
val &= ~(CPACR_ELx_FPEN | CPACR_ELx_ZEN);
|
||||
else
|
||||
val |= CPTR_EL2_TFP | CPTR_EL2_TZ;
|
||||
|
||||
if (!guest_owns_fp_regs())
|
||||
__activate_traps_fpsimd32(vcpu);
|
||||
}
|
||||
|
||||
kvm_write_cptr_el2(val);
|
||||
}
|
||||
|
||||
static void __activate_traps(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
___activate_traps(vcpu, vcpu->arch.hcr_el2);
|
||||
__activate_traps_common(vcpu);
|
||||
__activate_cptr_traps(vcpu);
|
||||
|
||||
write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el2);
|
||||
|
||||
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
|
||||
|
||||
Reference in New Issue
Block a user