1
0
Fork 0
mirror of https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git synced 2025-01-26 02:25:00 -05:00
linux/tools/testing/selftests/kvm/lib/aarch64/ucall.c
Peter Gonda 426729b2cf KVM: selftests: Add ucall pool based implementation
To play nice with guests whose stack memory is encrypted, e.g. AMD SEV,
introduce a new "ucall pool" implementation that passes the ucall struct
via dedicated memory (which can be mapped shared, a.k.a. as plain text).

Because not all architectures have access to the vCPU index in the guest,
use a bitmap with atomic accesses to track which entries in the pool are
free/used.  A list+lock could also work in theory, but synchronizing the
individual pointers to the guest would be a mess.

Note, there's no need to rewalk the bitmap to ensure success.  If all
vCPUs are simply allocating, success is guaranteed because there are
enough entries for all vCPUs.  If one or more vCPUs are freeing and then
reallocating, success is guaranteed because vCPUs _always_ walk the
bitmap from 0=>N; if vCPU frees an entry and then wins a race to
re-allocate, then either it will consume the entry it just freed (bit is
the first free bit), or the losing vCPU is guaranteed to see the freed
bit (winner consumes an earlier bit, which the loser hasn't yet visited).

Reviewed-by: Andrew Jones <andrew.jones@linux.dev>
Signed-off-by: Peter Gonda <pgonda@google.com>
Co-developed-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Link: https://lore.kernel.org/r/20221006003409.649993-8-seanjc@google.com
2022-11-16 16:58:53 -08:00

41 lines
1,014 B
C

// SPDX-License-Identifier: GPL-2.0
/*
* ucall support. A ucall is a "hypercall to userspace".
*
* Copyright (C) 2018, Red Hat, Inc.
*/
#include "kvm_util.h"
/*
* ucall_exit_mmio_addr holds per-VM values (global data is duplicated by each
* VM), it must not be accessed from host code.
*/
static vm_vaddr_t *ucall_exit_mmio_addr;
void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa)
{
virt_pg_map(vm, mmio_gpa, mmio_gpa);
vm->ucall_mmio_addr = mmio_gpa;
write_guest_global(vm, ucall_exit_mmio_addr, (vm_vaddr_t *)mmio_gpa);
}
void ucall_arch_do_ucall(vm_vaddr_t uc)
{
WRITE_ONCE(*ucall_exit_mmio_addr, uc);
}
void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
if (run->exit_reason == KVM_EXIT_MMIO &&
run->mmio.phys_addr == vcpu->vm->ucall_mmio_addr) {
TEST_ASSERT(run->mmio.is_write && run->mmio.len == sizeof(uint64_t),
"Unexpected ucall exit mmio address access");
return (void *)(*((uint64_t *)run->mmio.data));
}
return NULL;
}