Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

refactor: move arch specific code from vm.rs into modules #5031

Merged
merged 12 commits into from
Feb 21, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
113 changes: 15 additions & 98 deletions src/vmm/src/builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -175,10 +175,6 @@
.map_err(VmmError::Vm)
.map_err(StartMicrovmError::Internal)?;

let vcpus_exit_evt = EventFd::new(libc::EFD_NONBLOCK)
.map_err(VmmError::EventFd)
.map_err(Internal)?;

let resource_allocator = ResourceAllocator::new()?;

// Instantiate the MMIO device manager.
Expand All @@ -187,13 +183,13 @@
// Instantiate ACPI device manager.
let acpi_device_manager = ACPIDeviceManager::new();

// For x86_64 we need to create the interrupt controller before calling `KVM_CREATE_VCPUS`
// while on aarch64 we need to do it the other way around.
#[cfg(target_arch = "x86_64")]
let (vcpus, pio_device_manager) = {
setup_interrupt_controller(&mut vm)?;
let vcpus = create_vcpus(&kvm, &vm, vcpu_count, &vcpus_exit_evt).map_err(Internal)?;
let (vcpus, vcpus_exit_evt) = vm
.create_vcpus(vcpu_count)
.map_err(VmmError::Vm)
.map_err(Internal)?;

#[cfg(target_arch = "x86_64")]

Check warning on line 191 in src/vmm/src/builder.rs

View check run for this annotation

Codecov / codecov/patch

src/vmm/src/builder.rs#L191

Added line #L191 was not covered by tests
let pio_device_manager = {
// Make stdout non blocking.
set_stdout_nonblocking();

Expand All @@ -208,25 +204,10 @@
.map_err(Internal)?;

// create pio dev manager with legacy devices
let pio_device_manager = {
// TODO Remove these unwraps.
let mut pio_dev_mgr = PortIODeviceManager::new(serial_device, reset_evt).unwrap();
pio_dev_mgr.register_devices(vm.fd()).unwrap();
pio_dev_mgr
};

(vcpus, pio_device_manager)
};

// On aarch64, the vCPUs need to be created (i.e call KVM_CREATE_VCPU) before setting up the
// IRQ chip because the `KVM_CREATE_VCPU` ioctl will return error if the IRQCHIP
// was already initialized.
// Search for `kvm_arch_vcpu_create` in arch/arm/kvm/arm.c.
#[cfg(target_arch = "aarch64")]
let vcpus = {
let vcpus = create_vcpus(&kvm, &vm, vcpu_count, &vcpus_exit_evt).map_err(Internal)?;
setup_interrupt_controller(&mut vm, vcpu_count)?;
vcpus
// TODO Remove these unwraps.
let mut pio_dev_mgr = PortIODeviceManager::new(serial_device, reset_evt).unwrap();
pio_dev_mgr.register_devices(vm.fd()).unwrap();
pio_dev_mgr
};

let vmm = Vmm {
Expand Down Expand Up @@ -436,7 +417,7 @@
/// Could not set TSC scaling within the snapshot: {0}
SetTsc(#[from] crate::vstate::vcpu::SetTscError),
/// Failed to restore microVM state: {0}
RestoreState(#[from] crate::vstate::vm::RestoreStateError),
RestoreState(#[from] crate::vstate::vm::ArchVmError),
/// Failed to update microVM configuration: {0}
VmUpdateConfig(#[from] MachineConfigError),
/// Failed to restore MMIO device: {0}
Expand Down Expand Up @@ -671,22 +652,6 @@
})
}

/// Sets up the irqchip for a x86_64 microVM.
#[cfg(target_arch = "x86_64")]
pub fn setup_interrupt_controller(vm: &mut Vm) -> Result<(), StartMicrovmError> {
vm.setup_irqchip()
.map_err(VmmError::Vm)
.map_err(StartMicrovmError::Internal)
}

/// Sets up the irqchip for a aarch64 microVM.
#[cfg(target_arch = "aarch64")]
pub fn setup_interrupt_controller(vm: &mut Vm, vcpu_count: u8) -> Result<(), StartMicrovmError> {
vm.setup_irqchip(vcpu_count)
.map_err(VmmError::Vm)
.map_err(StartMicrovmError::Internal)
}

/// Sets up the serial device.
pub fn setup_serial_device(
event_manager: &mut EventManager,
Expand Down Expand Up @@ -744,21 +709,6 @@
.map_err(VmmError::RegisterMMIODevice)
}

fn create_vcpus(
kvm: &Kvm,
vm: &Vm,
vcpu_count: u8,
exit_evt: &EventFd,
) -> Result<Vec<Vcpu>, VmmError> {
let mut vcpus = Vec::with_capacity(vcpu_count as usize);
for cpu_idx in 0..vcpu_count {
let exit_evt = exit_evt.try_clone().map_err(VmmError::EventFd)?;
let vcpu = Vcpu::new(cpu_idx, vm, kvm, exit_evt).map_err(VmmError::VcpuCreate)?;
vcpus.push(vcpu);
}
Ok(vcpus)
}

/// Configures the system for booting Linux.
#[cfg_attr(target_arch = "aarch64", allow(unused))]
pub fn configure_system_for_boot(
Expand Down Expand Up @@ -1065,14 +1015,15 @@
use crate::devices::virtio::{TYPE_BALLOON, TYPE_BLOCK, TYPE_RNG};
use crate::mmds::data_store::{Mmds, MmdsVersion};
use crate::mmds::ns::MmdsNetworkStack;
use crate::test_utils::{arch_mem, single_region_mem, single_region_mem_at};
use crate::test_utils::{single_region_mem, single_region_mem_at};
use crate::vmm_config::balloon::{BalloonBuilder, BalloonDeviceConfig, BALLOON_DEV_ID};
use crate::vmm_config::boot_source::DEFAULT_KERNEL_CMDLINE;
use crate::vmm_config::drive::{BlockBuilder, BlockDeviceConfig};
use crate::vmm_config::entropy::{EntropyDeviceBuilder, EntropyDeviceConfig};
use crate::vmm_config::net::{NetBuilder, NetworkInterfaceConfig};
use crate::vmm_config::vsock::tests::default_config;
use crate::vmm_config::vsock::{VsockBuilder, VsockDeviceConfig};
use crate::vstate::vm::tests::setup_vm_with_memory;

#[derive(Debug)]
pub(crate) struct CustomBlockConfig {
Expand Down Expand Up @@ -1128,16 +1079,8 @@
}

pub(crate) fn default_vmm() -> Vmm {
let guest_memory = arch_mem(128 << 20);
let (kvm, mut vm, guest_memory) = setup_vm_with_memory(128 << 20);

let vcpus_exit_evt = EventFd::new(libc::EFD_NONBLOCK)
.map_err(VmmError::EventFd)
.map_err(StartMicrovmError::Internal)
.unwrap();

let kvm = Kvm::new(vec![]).unwrap();
let mut vm = Vm::new(&kvm).unwrap();
vm.memory_init(&guest_memory).unwrap();
let mmio_device_manager = MMIODeviceManager::new();
let acpi_device_manager = ACPIDeviceManager::new();
#[cfg(target_arch = "x86_64")]
Expand All @@ -1156,15 +1099,7 @@
)
.unwrap();

#[cfg(target_arch = "x86_64")]
setup_interrupt_controller(&mut vm).unwrap();

#[cfg(target_arch = "aarch64")]
{
let exit_evt = EventFd::new(libc::EFD_NONBLOCK).unwrap();
let _vcpu = Vcpu::new(1, &vm, &kvm, exit_evt).unwrap();
setup_interrupt_controller(&mut vm, 1).unwrap();
}
let (_, vcpus_exit_evt) = vm.create_vcpus(1).unwrap();

Vmm {
events_observer: Some(std::io::stdin()),
Expand Down Expand Up @@ -1383,24 +1318,6 @@
);
}

#[test]
fn test_create_vcpus() {
let vcpu_count = 2;
let guest_memory = arch_mem(128 << 20);

let kvm = Kvm::new(vec![]).expect("Cannot create Kvm");
#[allow(unused_mut)]
let mut vm = Vm::new(&kvm).unwrap();
vm.memory_init(&guest_memory).unwrap();
let evfd = EventFd::new(libc::EFD_NONBLOCK).unwrap();

#[cfg(target_arch = "x86_64")]
setup_interrupt_controller(&mut vm).unwrap();

let vcpu_vec = create_vcpus(&kvm, &vm, vcpu_count, &evfd).unwrap();
assert_eq!(vcpu_vec.len(), vcpu_count as usize);
}

#[test]
fn test_attach_net_devices() {
let mut event_manager = EventManager::new().expect("Unable to create EventManager");
Expand Down
4 changes: 2 additions & 2 deletions src/vmm/src/device_manager/legacy.rs
Original file line number Diff line number Diff line change
Expand Up @@ -248,8 +248,8 @@ mod tests {

#[test]
fn test_register_legacy_devices() {
let (_, mut vm, _) = setup_vm_with_memory(0x1000);
crate::builder::setup_interrupt_controller(&mut vm).unwrap();
let (_, vm, _) = setup_vm_with_memory(0x1000);
vm.setup_irqchip().unwrap();
let mut ldm = PortIODeviceManager::new(
Arc::new(Mutex::new(BusDevice::Serial(SerialDevice {
serial: Serial::with_events(
Expand Down
17 changes: 10 additions & 7 deletions src/vmm/src/device_manager/mmio.rs
Original file line number Diff line number Diff line change
Expand Up @@ -549,7 +549,7 @@ mod tests {
use crate::test_utils::multi_region_mem;
use crate::vstate::kvm::Kvm;
use crate::vstate::memory::{GuestAddress, GuestMemoryMmap};
use crate::{builder, Vm};
use crate::Vm;

const QUEUE_SIZES: &[u16] = &[64];

Expand Down Expand Up @@ -660,6 +660,7 @@ mod tests {
}

#[test]
#[cfg_attr(target_arch = "x86_64", allow(unused_mut))]
fn test_register_virtio_device() {
let start_addr1 = GuestAddress(0x0);
let start_addr2 = GuestAddress(0x1000);
Expand All @@ -673,9 +674,9 @@ mod tests {
let mut cmdline = kernel_cmdline::Cmdline::new(4096).unwrap();
let dummy = Arc::new(Mutex::new(DummyDevice::new()));
#[cfg(target_arch = "x86_64")]
builder::setup_interrupt_controller(&mut vm).unwrap();
vm.setup_irqchip().unwrap();
#[cfg(target_arch = "aarch64")]
builder::setup_interrupt_controller(&mut vm, 1).unwrap();
vm.setup_irqchip(1).unwrap();

device_manager
.register_virtio_test_device(
Expand All @@ -690,6 +691,7 @@ mod tests {
}

#[test]
#[cfg_attr(target_arch = "x86_64", allow(unused_mut))]
fn test_register_too_many_devices() {
let start_addr1 = GuestAddress(0x0);
let start_addr2 = GuestAddress(0x1000);
Expand All @@ -702,9 +704,9 @@ mod tests {

let mut cmdline = kernel_cmdline::Cmdline::new(4096).unwrap();
#[cfg(target_arch = "x86_64")]
builder::setup_interrupt_controller(&mut vm).unwrap();
vm.setup_irqchip().unwrap();
#[cfg(target_arch = "aarch64")]
builder::setup_interrupt_controller(&mut vm, 1).unwrap();
vm.setup_irqchip(1).unwrap();

for _i in crate::arch::IRQ_BASE..=crate::arch::IRQ_MAX {
device_manager
Expand Down Expand Up @@ -745,6 +747,7 @@ mod tests {
}

#[test]
#[cfg_attr(target_arch = "x86_64", allow(unused_mut))]
fn test_device_info() {
let start_addr1 = GuestAddress(0x0);
let start_addr2 = GuestAddress(0x1000);
Expand All @@ -756,9 +759,9 @@ mod tests {
let mem_clone = guest_mem.clone();

#[cfg(target_arch = "x86_64")]
builder::setup_interrupt_controller(&mut vm).unwrap();
vm.setup_irqchip().unwrap();
#[cfg(target_arch = "aarch64")]
builder::setup_interrupt_controller(&mut vm, 1).unwrap();
vm.setup_irqchip(1).unwrap();

let mut device_manager = MMIODeviceManager::new();
let mut resource_allocator = ResourceAllocator::new().unwrap();
Expand Down
15 changes: 1 addition & 14 deletions src/vmm/src/persist.rs
Original file line number Diff line number Diff line change
Expand Up @@ -114,22 +114,14 @@ pub struct GuestRegionUffdMapping {
/// Errors related to saving and restoring Microvm state.
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum MicrovmStateError {
/// Compatibility checks failed: {0}
IncompatibleState(String),
/// Provided MicroVM state is invalid.
InvalidInput,
/// Operation not allowed: {0}
NotAllowed(String),
/// Cannot restore devices: {0}
RestoreDevices(DevicePersistError),
/// Cannot restore Vcpu state: {0}
RestoreVcpuState(vstate::vcpu::VcpuError),
/// Cannot restore Vm state: {0}
RestoreVmState(vstate::vm::VmError),
/// Cannot save Vcpu state: {0}
SaveVcpuState(vstate::vcpu::VcpuError),
/// Cannot save Vm state: {0}
SaveVmState(vstate::vm::VmError),
SaveVmState(vstate::vm::ArchVmError),
/// Cannot signal Vcpu: {0}
SignalVcpu(VcpuSendEventError),
/// Vcpu is in unexpected state.
Expand All @@ -142,9 +134,6 @@ pub enum MicrovmStateError {
pub enum CreateSnapshotError {
/// Cannot get dirty bitmap: {0}
DirtyBitmap(VmmError),
#[rustfmt::skip]
/// Cannot translate microVM version to snapshot data version
UnsupportedVersion,
/// Cannot write memory file: {0}
Memory(MemoryError),
/// Cannot perform {0} on the memory backing file: {1}
Expand All @@ -155,8 +144,6 @@ pub enum CreateSnapshotError {
SerializeMicrovmState(crate::snapshot::SnapshotError),
/// Cannot perform {0} on the snapshot backing file: {1}
SnapshotBackingFile(&'static str, io::Error),
/// Size mismatch when writing diff snapshot on top of base layer: base layer size is {0} but diff layer is size {1}.
SnapshotBackingFileLengthMismatch(u64, u64),
}

/// Snapshot version
Expand Down
15 changes: 6 additions & 9 deletions src/vmm/src/vstate/kvm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,9 +23,6 @@ pub enum KvmError {
configured on the /dev/kvm file's ACL. */
Kvm(kvm_ioctls::Error),
#[cfg(target_arch = "x86_64")]
/// Failed to get MSR index list to save into snapshots: {0}
GetMsrsToSave(crate::arch::x86_64::msr::MsrError),
#[cfg(target_arch = "x86_64")]
/// Failed to get supported cpuid: {0}
GetSupportedCpuId(kvm_ioctls::Error),
/// The number of configured slots is bigger than the maximum reported by KVM
Expand All @@ -45,9 +42,6 @@ pub struct Kvm {
#[cfg(target_arch = "x86_64")]
/// Supported CpuIds.
pub supported_cpuid: CpuId,
#[cfg(target_arch = "x86_64")]
/// Msrs needed to be saved on snapshot creation.
pub msrs_to_save: MsrList,
}

impl Kvm {
Expand Down Expand Up @@ -82,19 +76,22 @@ impl Kvm {
let supported_cpuid = kvm_fd
.get_supported_cpuid(KVM_MAX_CPUID_ENTRIES)
.map_err(KvmError::GetSupportedCpuId)?;
let msrs_to_save = crate::arch::x86_64::msr::get_msrs_to_save(&kvm_fd)
.map_err(KvmError::GetMsrsToSave)?;

Ok(Kvm {
fd: kvm_fd,
max_memslots,
kvm_cap_modifiers,
supported_cpuid,
msrs_to_save,
})
}
}

/// Msrs needed to be saved on snapshot creation.
#[cfg(target_arch = "x86_64")]
pub fn msrs_to_save(&self) -> Result<MsrList, crate::arch::x86_64::msr::MsrError> {
crate::arch::x86_64::msr::get_msrs_to_save(&self.fd)
}

/// Check guest memory does not have more regions than kvm allows.
pub fn check_memory(&self, guest_mem: &GuestMemoryMmap) -> Result<(), KvmError> {
if guest_mem.num_regions() > self.max_memslots {
Expand Down
4 changes: 0 additions & 4 deletions src/vmm/src/vstate/memory.rs
Original file line number Diff line number Diff line change
Expand Up @@ -36,10 +36,6 @@ pub type GuestMmapRegion = vm_memory::MmapRegion<Option<AtomicBitmap>>;
/// Errors associated with dumping guest memory to file.
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum MemoryError {
/// Cannot create memory: {0}
CreateMemory(VmMemoryError),
/// Cannot create memory region: {0}
CreateRegion(MmapRegionError),
/// Cannot fetch system's page size: {0}
PageSize(errno::Error),
/// Cannot dump memory: {0}
Expand Down
Loading