diff --git a/petri/src/vm/hyperv/hyperv.psm1 b/petri/src/vm/hyperv/hyperv.psm1 index 0cd4bb9d8d..8a9325cbd8 100644 --- a/petri/src/vm/hyperv/hyperv.psm1 +++ b/petri/src/vm/hyperv/hyperv.psm1 @@ -222,6 +222,21 @@ function New-CustomVM # } [hashtable] $ScsiControllers = $null, + # must be a hashtable with format: + # NvmeControllers => { + # Vsid => { + # Vtl, + # Drives => @( + # @{ Nsid; DiskPath }, + # ... + # ) + # }, + # ... + # } + # Drives are pre-sorted by NSID. The emulator assigns NSIDs 1..N + # by argument order. + [hashtable] $NvmeControllers = $null, + # must be a hashtable with format: # IdeControllers => { # ControllerNumber => { @@ -352,6 +367,27 @@ function New-CustomVM } } + if ($NvmeControllers) { + if (-not (Get-Module -ListAvailable HvlDeviceHost)) { + throw ("NVMe emulator support requires the HvlDeviceHost " + + "PowerShell module. Ensure hvldevicehost.dll is installed " + + "and the module is available on this host.") + } + Import-Module HvlDeviceHost -ErrorAction Stop + foreach ($controller in $NvmeControllers.GetEnumerator()) { + $vsid = $controller.Name + $targetVtl = $controller.Value["Vtl"] + $drives = $controller.Value["Drives"] + # Drives arrive pre-sorted by NSID from the Rust layer. + $vhdPaths = @($drives | ForEach-Object { $_["DiskPath"] }) + $resourceSettings += New-NvmeEmulatorRasd ` + -VhdPaths $vhdPaths ` + -TargetVtl $targetVtl ` + -Vsid ([Guid]$vsid) ` + | ConvertTo-CimEmbeddedString + } + } + $vm = ($vmms | Invoke-CimMethod -Name "DefineSystem" -Arguments @{ "SystemSettings" = ($vssd | ConvertTo-CimEmbeddedString); "ResourceSettings" = $resourceSettings @@ -1418,4 +1454,4 @@ function Get-CimInstancePath { ) return $path -} \ No newline at end of file +} diff --git a/petri/src/vm/hyperv/mod.rs b/petri/src/vm/hyperv/mod.rs index 67b5af1959..00ff1266cf 100644 --- a/petri/src/vm/hyperv/mod.rs +++ b/petri/src/vm/hyperv/mod.rs @@ -221,8 +221,8 @@ impl PetriVmmBackend for HyperVPetriBackend { } } - // Map SCSI - let mut scsi_controllers = HashMap::new(); + // Map VMBus storage controllers (SCSI and NVMe). + let mut storage_controllers = HashMap::new(); for ( vsid, VmbusStorageController { @@ -232,10 +232,6 @@ impl PetriVmmBackend for HyperVPetriBackend { }, ) in config.vmbus_storage_controllers.iter() { - if !matches!(controller_type, crate::VmbusStorageType::Scsi) { - todo!("other storage types for hyper-v") - } - let mut hyperv_drives = HashMap::new(); for (lun, Drive { disk, is_dvd }) in drives { hyperv_drives.insert( @@ -246,9 +242,32 @@ impl PetriVmmBackend for HyperVPetriBackend { }, ); } - scsi_controllers.insert( + + let vmbus_controller_type = match controller_type { + crate::VmbusStorageType::Scsi => powershell::HyperVVmbusStorageType::Scsi, + crate::VmbusStorageType::Nvme => { + for (nsid, drive) in &hyperv_drives { + if drive.is_dvd { + anyhow::bail!("NVMe emulator does not support DVD drives"); + } + if drive.disk.is_none() { + anyhow::bail!("NVMe drive cannot be empty (NSID {})", nsid); + } + } + powershell::HyperVVmbusStorageType::Nvme + } + _ => { + todo!( + "storage type {:?} not yet supported for hyper-v", + controller_type + ) + } + }; + + storage_controllers.insert( *vsid, - powershell::HyperVScsiController { + powershell::HyperVVmbusStorageController { + controller_type: vmbus_controller_type, target_vtl: *target_vtl, drives: hyperv_drives, }, @@ -338,8 +357,7 @@ impl PetriVmmBackend for HyperVPetriBackend { firmware_file: igvm_file.clone(), firmware_parameters: openhcl_command_line, guest_state_path, - scsi_controllers, - ide_controllers, + storage_controllers, com_3: supports_com3, imc_hiv, management_vtl_settings, diff --git a/petri/src/vm/hyperv/powershell.rs b/petri/src/vm/hyperv/powershell.rs index e666f21828..c9275566ad 100644 --- a/petri/src/vm/hyperv/powershell.rs +++ b/petri/src/vm/hyperv/powershell.rs @@ -8,7 +8,6 @@ use crate::OpenHclServicingFlags; use crate::PetriVmConfig; use crate::PetriVmProperties; use crate::VmScreenshotMeta; -use crate::Vtl; use crate::run_host_cmd; use crate::vm::append_cmdline; use anyhow::Context; @@ -290,8 +289,8 @@ pub struct HyperVNewCustomVMArgs { pub hw_threads_per_core: Option, /// Processors per socket pub max_processors_per_numa_node: Option, - /// SCSI controllers and associated drives/disks - pub scsi_controllers: HashMap, + /// VMBus storage controllers (SCSI and NVMe), keyed by VSID + pub storage_controllers: HashMap, /// IDE controllers and associated drives/disks pub ide_controllers: HashMap>, /// Temporary file containing initial machine configuration data @@ -306,11 +305,21 @@ pub struct HyperVNewCustomVMArgs { pub management_vtl_settings: Option, } -/// Hyper-V SCSI controller -pub struct HyperVScsiController { - /// The VTL to assign the storage controller to - pub target_vtl: Vtl, - /// Drives (with any inserted disks) attached to this storage controller +/// VMBus storage controller type +pub enum HyperVVmbusStorageType { + /// SCSI controller (Msvm_ResourceAllocationSettingData) + Scsi, + /// NVMe emulator controller (created via closed-source HvlDeviceHost module) + Nvme, +} + +/// VMBus storage controller configuration (SCSI or NVMe), keyed by VSID. +pub struct HyperVVmbusStorageController { + /// Controller type + pub controller_type: HyperVVmbusStorageType, + /// Target VTL + pub target_vtl: crate::Vtl, + /// Drives attached to this controller, keyed by LUN (SCSI) or namespace ID (NVMe). pub drives: HashMap, } @@ -565,7 +574,7 @@ impl HyperVNewCustomVMArgs { firmware_file: None, firmware_parameters: None, guest_state_path: None, - scsi_controllers: HashMap::new(), + storage_controllers: HashMap::new(), ide_controllers: HashMap::new(), com_3: false, imc_hiv: None, @@ -596,9 +605,28 @@ pub async fn run_new_customvm(ps_mod: &Path, args: HyperVNewCustomVMArgs) -> any } }); - let scsi_controllers = (!args.scsi_controllers.is_empty()).then(|| { - ps::HashTable::new(args.scsi_controllers.into_iter().map( - |(vsid, HyperVScsiController { target_vtl, drives })| { + // Partition storage controllers into SCSI and NVMe. + let mut scsi_map: HashMap = HashMap::new(); + let mut nvme_map: HashMap = HashMap::new(); + for (vsid, controller) in args.storage_controllers { + match controller.controller_type { + HyperVVmbusStorageType::Scsi => { + scsi_map.insert(vsid, controller); + } + HyperVVmbusStorageType::Nvme => { + nvme_map.insert(vsid, controller); + } + } + } + + let scsi_controllers = (!scsi_map.is_empty()).then(|| { + ps::HashTable::new(scsi_map.into_iter().map( + |( + vsid, + HyperVVmbusStorageController { + target_vtl, drives, .. + }, + )| { ( format!("\"{vsid}\""), ps::Value::new(ps::HashTable::new([ @@ -645,11 +673,66 @@ pub async fn run_new_customvm(ps_mod: &Path, args: HyperVNewCustomVMArgs) -> any )) }); + // Serialize NVMe controllers as a hashtable keyed by VSID. + // Each value: @{ Vtl = N; Drives = @(@{Nsid = 1; DiskPath = "..."}, ...) } + // New-CustomVM imports HvlDeviceHost internally and calls New-NvmeEmulatorRasd. + let nvme_controllers = if nvme_map.is_empty() { + None + } else { + let mut nvme_entries = Vec::new(); + for ( + vsid, + HyperVVmbusStorageController { + target_vtl, drives, .. + }, + ) in nvme_map + { + // Sort drives by namespace ID and validate they are exactly + // 1..N — the emulator assigns NSIDs sequentially by VHD + // argument order. + let mut sorted_drives: Vec<_> = drives.into_iter().collect(); + sorted_drives.sort_by_key(|(nsid, _)| *nsid); + let expected: Vec = (1..=sorted_drives.len() as u32).collect(); + let actual: Vec = sorted_drives.iter().map(|(nsid, _)| *nsid).collect(); + anyhow::ensure!( + actual == expected, + "NVMe namespace IDs must be 1..{}, got {:?}", + expected.len(), + actual + ); + nvme_entries.push(( + format!("\"{vsid}\""), + ps::Value::new(ps::HashTable::new([ + ("Vtl", ps::Value::new(target_vtl as u32)), + ( + "Drives", + ps::Value::new(ps::Array::new(sorted_drives.into_iter().map( + |(nsid, HyperVDrive { disk, .. })| { + ps::HashTable::new([ + ("Nsid", ps::Value::new(nsid)), + ( + "DiskPath", + ps::Value::new( + disk.expect("NVMe drives must have disk paths"), + ), + ), + ]) + }, + ))), + ), + ])), + )); + } + Some(ps::HashTable::new(nvme_entries)) + }; + + let builder = PowerShellBuilder::new() + .cmdlet("Import-Module") + .positional(ps_mod) + .next(); + let vmid = run_host_cmd( - PowerShellBuilder::new() - .cmdlet("Import-Module") - .positional(ps_mod) - .next() + builder .cmdlet("New-CustomVM") .arg("VMName", args.name) .arg_opt("Generation", args.generation) @@ -686,6 +769,7 @@ pub async fn run_new_customvm(ps_mod: &Path, args: HyperVNewCustomVMArgs) -> any ) .arg_opt("ScsiControllers", scsi_controllers) .arg_opt("IdeControllers", ide_controllers) + .arg_opt("NvmeControllers", nvme_controllers) .arg_opt("ImcHive", args.imc_hiv.as_ref().map(|f| f.path())) .arg("Com1", args.com_1) .arg("Com3", args.com_3) diff --git a/vmm_tests/vmm_tests/tests/tests/x86_64/storage.rs b/vmm_tests/vmm_tests/tests/tests/x86_64/storage.rs index 227ebab123..b142d0cefa 100644 --- a/vmm_tests/vmm_tests/tests/tests/x86_64/storage.rs +++ b/vmm_tests/vmm_tests/tests/tests/x86_64/storage.rs @@ -381,7 +381,83 @@ async fn storvsp_hyperv( Ok(()) } -/// Test an OpenHCL Linux Stripe VM with two SCSI disk assigned to VTL2 via NVMe Emulator +/// Test a Hyper-V OpenHCL Linux VM with an NVMe emulator device assigned to +/// VTL2, relayed to VTL0 via SCSI. Validates that the guest can discover and +/// perform IO on the disk. +#[cfg(windows)] +#[vmm_test(unstable_hyperv_openhcl_uefi_x64(vhd(ubuntu_2504_server_x64)))] +async fn storvsp_nvme_hyperv( + config: PetriVmBuilder, +) -> Result<(), anyhow::Error> { + let vtl0_nvme_lun = 0; + let nvme_nsid = 1; + let nvme_vsid = Guid::new_random(); + let scsi_instance = Guid::new_random(); + const NVME_DISK_SECTORS: u64 = 0x5_0000; + const SECTOR_SIZE: u64 = 512; + const EXPECTED_NVME_DISK_SIZE_BYTES: u64 = NVME_DISK_SECTORS * SECTOR_SIZE; + + // Assumptions made by test infra & routines: + // + // 1. Some test-infra added disks are 64MiB in size. Since we find disks by size, + // ensure that our test disks are a different size. + // 2. Disks under test need to be at least 100MiB for the IO tests (see [`test_storage_linux`]), + // with some arbitrary buffer (5MiB in this case). + static_assertions::const_assert_ne!(EXPECTED_NVME_DISK_SIZE_BYTES, 64 * 1024 * 1024); + static_assertions::const_assert!(EXPECTED_NVME_DISK_SIZE_BYTES > 105 * 1024 * 1024); + + let mut vhd = + tempfile::NamedTempFile::with_suffix("nvme.vhd").context("create temp nvme vhd")?; + vhd.as_file() + .set_len(EXPECTED_NVME_DISK_SIZE_BYTES) + .context("set file length")?; + + disk_vhd1::Vhd1Disk::make_fixed(vhd.as_file_mut()).context("make fixed")?; + + // Close the handle without deleting the file, so Hyper-V can open it. + let vhd_path = vhd.into_temp_path(); + + let (vm, agent) = config + .with_vmbus_redirect(true) + .add_vmbus_storage_controller(&nvme_vsid, petri::Vtl::Vtl2, petri::VmbusStorageType::Nvme) + .add_vmbus_drive( + petri::Drive::new(Some(petri::Disk::Persistent(vhd_path.to_path_buf())), false), + &nvme_vsid, + Some(nvme_nsid), + ) + .add_vtl2_storage_controller( + Vtl2StorageControllerBuilder::new(ControllerType::Scsi) + .with_instance_id(scsi_instance) + .add_lun( + Vtl2LunBuilder::disk() + .with_location(vtl0_nvme_lun) + .with_physical_device(Vtl2StorageBackingDeviceBuilder::new( + ControllerType::Nvme, + nvme_vsid, + nvme_nsid, + )), + ) + .build(), + ) + .run() + .await?; + + test_storage_linux( + &agent, + scsi_instance, + vec![ExpectedGuestDevice { + lun: vtl0_nvme_lun, + disk_size_sectors: NVME_DISK_SECTORS as usize, + friendly_name: "nvme".to_string(), + }], + ) + .await?; + + agent.power_off().await?; + vm.wait_for_clean_teardown().await?; + + Ok(()) +} #[openvmm_test( openhcl_linux_direct_x64, openhcl_uefi_x64(vhd(ubuntu_2504_server_x64))