diff --git a/MdeModulePkg/Bus/Pci/NvmExpressDxe/NvmExpress.c b/MdeModulePkg/Bus/Pci/NvmExpressDxe/NvmExpress.c index 8b7af26c8a..a22713dab9 100644 --- a/MdeModulePkg/Bus/Pci/NvmExpressDxe/NvmExpress.c +++ b/MdeModulePkg/Bus/Pci/NvmExpressDxe/NvmExpress.c @@ -172,14 +172,20 @@ EnumerateNvmeDevNamespace ( Device->BlockIo.WriteBlocks = NvmeBlockIoWriteBlocks; Device->BlockIo.FlushBlocks = NvmeBlockIoFlushBlocks; - // - // Create BlockIo2 Protocol instance - // - Device->BlockIo2.Media = &Device->Media; - Device->BlockIo2.Reset = NvmeBlockIoResetEx; - Device->BlockIo2.ReadBlocksEx = NvmeBlockIoReadBlocksEx; - Device->BlockIo2.WriteBlocksEx = NvmeBlockIoWriteBlocksEx; - Device->BlockIo2.FlushBlocksEx = NvmeBlockIoFlushBlocksEx; + // MU_CHANGE [BEGIN] - Request Number of Queues from Controller + if (Private->NumberOfDataQueuePairs > 1) { + // We have multiple data queues, so we can support the BlockIo2 protocol + + // Create BlockIo2 Protocol instance + Device->BlockIo2.Media = &Device->Media; + Device->BlockIo2.Reset = NvmeBlockIoResetEx; + Device->BlockIo2.ReadBlocksEx = NvmeBlockIoReadBlocksEx; + Device->BlockIo2.WriteBlocksEx = NvmeBlockIoWriteBlocksEx; + Device->BlockIo2.FlushBlocksEx = NvmeBlockIoFlushBlocksEx; + } + + // MU_CHANGE [END] - Request Number of Queues from Controller + InitializeListHead (&Device->AsyncQueue); // MU_CHANGE Start - Add Media Sanitize @@ -254,14 +260,13 @@ EnumerateNvmeDevNamespace ( // Device->DeviceHandle = NULL; + // MU_CHANGE [BEGIN] - Request Number of Queues from Controller Status = gBS->InstallMultipleProtocolInterfaces ( &Device->DeviceHandle, &gEfiDevicePathProtocolGuid, Device->DevicePath, &gEfiBlockIoProtocolGuid, &Device->BlockIo, - &gEfiBlockIo2ProtocolGuid, - &Device->BlockIo2, &gEfiDiskInfoProtocolGuid, &Device->DiskInfo, NULL @@ -271,6 +276,21 @@ EnumerateNvmeDevNamespace ( goto Exit; } + if (Private->NumberOfDataQueuePairs > 1) { + // We have multiple data queues, so we can support the BlockIo2 protocol + Status = gBS->InstallMultipleProtocolInterfaces ( + &Device->DeviceHandle, + &gEfiBlockIo2ProtocolGuid, + &Device->BlockIo2 + ); + if (EFI_ERROR (Status)) { + DEBUG ((DEBUG_ERROR, "%a: Failed to install BlockIo2 protocol\n", __func__)); + goto Exit; + } + } + + // MU_CHANGE [END] - Request Number of Queues from Controller + // // Check if the NVMe controller supports the Security Send and Security Receive commands // @@ -288,12 +308,23 @@ EnumerateNvmeDevNamespace ( Device->DevicePath, &gEfiBlockIoProtocolGuid, &Device->BlockIo, - &gEfiBlockIo2ProtocolGuid, - &Device->BlockIo2, &gEfiDiskInfoProtocolGuid, &Device->DiskInfo, NULL ); + + // MU_CHANGE [BEGIN] - Request Number of Queues from Controller + if (Private->NumberOfDataQueuePairs > 1) { + // We have multiple data queues, so we need to uninstall the BlockIo2 protocol + gBS->UninstallMultipleProtocolInterfaces ( + Device->DeviceHandle, + &gEfiBlockIo2ProtocolGuid, + &Device->BlockIo2 + ); + } + + // MU_CHANGE [END] - Request Number of Queues from Controller + goto Exit; } } @@ -477,6 +508,29 @@ UnregisterNvmeNamespace ( Handle ); + // MU_CHANGE [BEGIN] - Request Number of Queues from Controller + // + // If BlockIo2 is installed, uninstall it. + // + if (Device->Controller->NumberOfDataQueuePairs > 1) { + Status = gBS->UninstallProtocolInterface ( + Handle, + &gEfiBlockIo2ProtocolGuid, + &Device->BlockIo2 + ); + if (EFI_ERROR (Status)) { + gBS->OpenProtocol ( + Controller, + &gEfiNvmExpressPassThruProtocolGuid, + (VOID **)&DummyInterface, + This->DriverBindingHandle, + Handle, + EFI_OPEN_PROTOCOL_BY_CHILD_CONTROLLER + ); + return Status; + } + } + // // The Nvm Express driver installs the BlockIo and DiskInfo in the DriverBindingStart(). // Here should uninstall both of them. @@ -487,12 +541,11 @@ UnregisterNvmeNamespace ( Device->DevicePath, &gEfiBlockIoProtocolGuid, &Device->BlockIo, - &gEfiBlockIo2ProtocolGuid, - &Device->BlockIo2, &gEfiDiskInfoProtocolGuid, &Device->DiskInfo, NULL ); + // MU_CHANGE [END] - Request Number of Queues from Controller if (EFI_ERROR (Status)) { gBS->OpenProtocol ( @@ -957,9 +1010,11 @@ NvmExpressDriverBindingStart ( EFI_PHYSICAL_ADDRESS MappedAddr; UINTN Bytes; EFI_NVM_EXPRESS_PASS_THRU_PROTOCOL *Passthru; - // MU_CHANGE - Support alternative hardware queue sizes in NVME driver - UINTN QueuePageCount = PcdGetBool (PcdSupportAlternativeQueueSize) ? - NVME_ALTERNATIVE_TOTAL_QUEUE_BUFFER_IN_PAGES : 6; + // MU_CHANGE [BEGIN] - Allocate IO Queue Buffer + NVME_AQA *Aqa; + UINTN AdminQueuePageCount; + + // MU_CHANGE [END] - Allocate IO Queue Buffer DEBUG ((DEBUG_INFO, "NvmExpressDriverBindingStart: start\n")); @@ -1031,11 +1086,42 @@ NvmExpressDriverBindingStart ( DEBUG ((DEBUG_WARN, "NvmExpressDriverBindingStart: failed to enable 64-bit DMA (%r)\n", Status)); } - // MU_CHANGE - Support alternative hardware queue sizes in NVME driver + // MU_CHANGE [BEGIN] - Allocate IO Queue Buffer + // + // Set the Admin Queue Atttributes + // + Aqa = AllocateZeroPool (sizeof (NVME_AQA)); + + if (Aqa == NULL) { + DEBUG ((DEBUG_ERROR, "NvmExpressDriverBindingStart: allocating pool for Nvme Aqa Data failed!\n")); + Status = EFI_OUT_OF_RESOURCES; + goto Exit; + } + + // Set the sizes of the admin submission & completion queues in number of entries + // MU_CHANGE [BEGIN] - Support alternative hardware queue sizes in NVME driver + Aqa->Asqs = PcdGetBool (PcdSupportAlternativeQueueSize) ? MIN (NVME_ALTERNATIVE_MAX_QUEUE_SIZE, Private->Cap.Mqes) : NVME_ASQ_SIZE; + Aqa->Rsvd1 = 0; + Aqa->Acqs = PcdGetBool (PcdSupportAlternativeQueueSize) ? MIN (NVME_ALTERNATIVE_MAX_QUEUE_SIZE, Private->Cap.Mqes) : NVME_ACQ_SIZE; + Aqa->Rsvd2 = 0; + // MU_CHANGE [END] - Support alternative hardware queue sizes in NVME driver + + // + // Save Queue Pair Data for admin queues in controller data structure + // + Private->SqData[0].NumberOfEntries = Aqa->Asqs; + Private->CqData[0].NumberOfEntries = Aqa->Acqs; // - // Depending on PCD disablement, either support the default or alternative - // queue sizes. + // Set admin queue entry size to default + // + Private->SqData[0].EntrySize = NVME_IOSQES_MIN; + Private->CqData[0].EntrySize = NVME_IOCQES_MIN; + + // Calculate the number of pages required for the admin queues + AdminQueuePageCount = EFI_SIZE_TO_PAGES (Private->SqData[0].NumberOfEntries * LShiftU64 (2, Private->SqData[0].EntrySize)) + + EFI_SIZE_TO_PAGES (Private->CqData[0].NumberOfEntries * LShiftU64 (2, Private->CqData[0].EntrySize)); + // MU_CHANGE [END] - Allocate IO Queue Buffer // // Default: // 6 x 4kB aligned buffers will be carved out of this buffer. @@ -1048,6 +1134,7 @@ NvmExpressDriverBindingStart ( // // Allocate 6 pages of memory, then map it for bus master read and write. // + // MU_CHANGE [BEGIN] - Support alternative hardware queue sizes in NVME driver // Alternative: // 15 x 4kB aligned buffers will be carved out of this buffer. // 1st 4kB boundary is the start of the admin submission queue. @@ -1058,21 +1145,22 @@ NvmExpressDriverBindingStart ( // 15th 4kB boundary is the start of I/O completion queue #2. // // Allocate 15 pages of memory, then map it for bus master read and write. + // MU_CHANGE [END] - Support alternative hardware queue sizes in NVME driver // Status = PciIo->AllocateBuffer ( PciIo, AllocateAnyPages, EfiBootServicesData, - QueuePageCount, + AdminQueuePageCount, (VOID **)&Private->Buffer, 0 ); + // MU_CHANGE [END] - Allocate IO Queue Buffer if (EFI_ERROR (Status)) { goto Exit; } - // MU_CHANGE - Support alternative hardware queue sizes in NVME driver - Bytes = EFI_PAGES_TO_SIZE (QueuePageCount); + Bytes = EFI_PAGES_TO_SIZE (AdminQueuePageCount); // MU_CHANGE - Allocate IO Queue Buffer Status = PciIo->Map ( PciIo, EfiPciIoOperationBusMasterCommonBuffer, @@ -1081,9 +1169,8 @@ NvmExpressDriverBindingStart ( &MappedAddr, &Private->Mapping ); - - // MU_CHANGE - Support alternative hardware queue sizes in NVME driver - if (EFI_ERROR (Status) || (Bytes != EFI_PAGES_TO_SIZE (QueuePageCount))) { + // MU_CHANGE - Allocate IO Queue Buffer + if (EFI_ERROR (Status) || (Bytes != EFI_PAGES_TO_SIZE (AdminQueuePageCount))) { goto Exit; } @@ -1104,34 +1191,42 @@ NvmExpressDriverBindingStart ( InitializeListHead (&Private->AsyncPassThruQueue); InitializeListHead (&Private->UnsubmittedSubtasks); - Status = NvmeControllerInit (Private); + Status = NvmeControllerInit (Private, Aqa); // MU_CHANGE - Allocate IO Queue Buffer if (EFI_ERROR (Status)) { goto Exit; } + // MU_CHANGE [BEGIN] - Request Number of Queues from Controller + // // Start the asynchronous I/O completion monitor + // The ProcessAsyncTaskList event and NVME_HC_ASYNC_TIMER timer are only used for the BlockIo2 protocol, + // which is only installed when the number of IO queues is greater than 1 // - Status = gBS->CreateEvent ( - EVT_TIMER | EVT_NOTIFY_SIGNAL, - TPL_NOTIFY, - ProcessAsyncTaskList, - Private, - &Private->TimerEvent - ); - if (EFI_ERROR (Status)) { - goto Exit; - } + if (Private->NumberOfDataQueuePairs > 1) { + Status = gBS->CreateEvent ( + EVT_TIMER | EVT_NOTIFY_SIGNAL, + TPL_NOTIFY, + ProcessAsyncTaskList, + Private, + &Private->TimerEvent + ); + if (EFI_ERROR (Status)) { + goto Exit; + } - Status = gBS->SetTimer ( - Private->TimerEvent, - TimerPeriodic, - NVME_HC_ASYNC_TIMER - ); - if (EFI_ERROR (Status)) { - goto Exit; + Status = gBS->SetTimer ( + Private->TimerEvent, + TimerPeriodic, + NVME_HC_ASYNC_TIMER + ); + if (EFI_ERROR (Status)) { + goto Exit; + } } + // MU_CHANGE [END] - Request Number of Queues from Controller + Status = gBS->InstallMultipleProtocolInterfaces ( &Controller, &gEfiNvmExpressPassThruProtocolGuid, @@ -1193,8 +1288,14 @@ NvmExpressDriverBindingStart ( } if ((Private != NULL) && (Private->Buffer != NULL)) { - // MU_CHANGE - Support alternative hardware queue sizes in NVME driver - PciIo->FreeBuffer (PciIo, QueuePageCount, Private->Buffer); + // MU_CHANGE [BEGIN] - Allocate IO Queue Buffer + Status = PciIo->FreeBuffer (PciIo, AdminQueuePageCount, Private->Buffer); + + if (EFI_STATUS_ERROR (Status)) { + DEBUG ((DEBUG_ERROR, "%a: FreeBuffer failed with %r\n", __func__, Status)); + } + + // MU_CHANGE [END] - Allocate IO Queue Buffer } if ((Private != NULL) && (Private->ControllerData != NULL)) { @@ -1270,9 +1371,7 @@ NvmExpressDriverBindingStop ( EFI_NVM_EXPRESS_PASS_THRU_PROTOCOL *PassThru; BOOLEAN IsEmpty; EFI_TPL OldTpl; - // MU_CHANGE - Support alternative hardware queue sizes in NVME driver - UINT16 QueuePageCount = PcdGetBool (PcdSupportAlternativeQueueSize) ? - NVME_ALTERNATIVE_TOTAL_QUEUE_BUFFER_IN_PAGES : 6; + UINTN QueuePageCount; // MU_CHANGE - Allocate IO Queue Buffer if (NumberOfChildren == 0) { Status = gBS->OpenProtocol ( @@ -1318,11 +1417,36 @@ NvmExpressDriverBindingStop ( Private->PciIo->Unmap (Private->PciIo, Private->Mapping); } + // MU_CHANGE [BEGIN] - Allocate IO Queue Buffer + QueuePageCount = EFI_SIZE_TO_PAGES (Private->SqData[0].NumberOfEntries * LShiftU64 (2, Private->SqData[0].EntrySize)) + + EFI_SIZE_TO_PAGES (Private->CqData[0].NumberOfEntries * LShiftU64 (2, Private->CqData[0].EntrySize)); + if (Private->Buffer != NULL) { - // MU_CHANGE - Support alternative hardware queue sizes in NVME driver - Private->PciIo->FreeBuffer (Private->PciIo, QueuePageCount, Private->Buffer); + Status = Private->PciIo->FreeBuffer (Private->PciIo, QueuePageCount, Private->Buffer); + + if (EFI_ERROR (Status)) { + DEBUG ((DEBUG_ERROR, "%a: FreeBuffer Buffer failed %r\n", __func__, Status)); + } + } + + if (Private->DataQueueMapping != NULL) { + Status = Private->PciIo->Unmap (Private->PciIo, Private->DataQueueMapping); + + if (EFI_ERROR (Status)) { + DEBUG ((DEBUG_ERROR, "%a: Unmap DataQueueMapping failed %r\n", __func__, Status)); + } } + if (Private->DataQueueBuffer != NULL) { + Status = Private->PciIo->FreeBuffer (Private->PciIo, QueuePageCount*Private->NumberOfDataQueuePairs, Private->DataQueueBuffer); + + if (EFI_ERROR (Status)) { + DEBUG ((DEBUG_ERROR, "%a: FreeBuffer DataQueueBuffer failed %r\n", __func__, Status)); + } + } + + // MU_CHANGE [END] - Allocate IO Queue Buffer + FreePool (Private->ControllerData); FreePool (Private); } diff --git a/MdeModulePkg/Bus/Pci/NvmExpressDxe/NvmExpress.h b/MdeModulePkg/Bus/Pci/NvmExpressDxe/NvmExpress.h index 02cf123052..2e343c0c81 100644 --- a/MdeModulePkg/Bus/Pci/NvmExpressDxe/NvmExpress.h +++ b/MdeModulePkg/Bus/Pci/NvmExpressDxe/NvmExpress.h @@ -47,6 +47,7 @@ typedef struct _NVME_CONTROLLER_PRIVATE_DATA NVME_CONTROLLER_PRIVATE_DATA; typedef struct _NVME_DEVICE_PRIVATE_DATA NVME_DEVICE_PRIVATE_DATA; +typedef struct _NVME_QUEUE_SIZE_DATA NVME_QUEUE_SIZE_DATA; // MU_CHANGE - Allocate IO Queue Buffer #include "NvmExpressBlockIo.h" #include "NvmExpressDiskInfo.h" @@ -67,6 +68,11 @@ extern EFI_DRIVER_SUPPORTED_EFI_VERSION_PROTOCOL gNvmExpressDriverSupportedEfiV #define NVME_CSQ_SIZE 1 // Number of I/O submission queue entries, which is 0-based #define NVME_CCQ_SIZE 1 // Number of I/O completion queue entries, which is 0-based +// MU_CHANGE [BEGIN] - Allocate IO Queue Buffer +#define NVME_IOSQES_MIN 6 // Minimum I/O submission queue entry size +#define NVME_IOCQES_MIN 4 // Minimum I/O completion queue entry size +// MU_CHANGE [END] - Allocate IO Queue Buffer + // // Number of asynchronous I/O submission queue entries, which is 0-based. // The asynchronous I/O submission queue size is 4kB in total. @@ -78,7 +84,13 @@ extern EFI_DRIVER_SUPPORTED_EFI_VERSION_PROTOCOL gNvmExpressDriverSupportedEfiV // #define NVME_ASYNC_CCQ_SIZE 255 -#define NVME_MAX_QUEUES 3 // Number of queues supported by the driver +// MU_CHANGE [BEGIN] - Request Number of Queues from Controller +// Maximum number of queue pairs supported by the driver, including the admin queues. +// Queue 0 - Admin +// Queue 1 - Blocking I/O (BlockIo Protocol) +// Queue 2 - Asynchronous I/O (BlockIo2 Protocol) +// MU_CHANGE [END] - Request Number of Queues from Controller +#define NVME_MAX_QUEUES 3 // MU_CHANGE Start - Add Media Sanitize // @@ -137,6 +149,16 @@ extern EFI_DRIVER_SUPPORTED_EFI_VERSION_PROTOCOL gNvmExpressDriverSupportedEfiV // Unique signature for private data structure. // #define NVME_CONTROLLER_PRIVATE_DATA_SIGNATURE SIGNATURE_32 ('N','V','M','E') +// MU_CHANGE [BEGIN] - Allocate IO Queue Buffer +// +// Nvme queue data +// +struct _NVME_QUEUE_SIZE_DATA { + UINT32 NumberOfEntries; // in number of entries + UINT8 EntrySize; // in bytes, as a power of 2 +}; + +// MU_CHANGE [END] - Allocate IO Queue Buffer // // Nvme private data structure. @@ -161,6 +183,24 @@ struct _NVME_CONTROLLER_PRIVATE_DATA { // NVME_ADMIN_CONTROLLER_DATA *ControllerData; + // MU_CHANGE [BEGIN] - Request Number of Queues from Controller + // + // Number of Queues Allocated by the controller + // UEFI always uses a 1:1 submission:completion queue allocation so we + // use NumberOfDataQueuePairs to represent the number of data queue pairs allocated. + // NumberOfDataQueuePairs = Nsqa = Ncqa + // + UINT32 NumberOfDataQueuePairs; + // MU_CHANGE [END] - Request Number of Queues from Controller + + // MU_CHANGE [BEGIN] - Allocate IO Queue Buffer + // + // Queue Size Data + // + NVME_QUEUE_SIZE_DATA SqData[NVME_MAX_QUEUES]; + NVME_QUEUE_SIZE_DATA CqData[NVME_MAX_QUEUES]; + // MU_CHANGE [END] - Allocate IO Queue Buffer + // // 6 x 4kB aligned buffers will be carved out of this buffer. // 1st 4kB boundary is the start of the admin submission queue. @@ -173,6 +213,11 @@ struct _NVME_CONTROLLER_PRIVATE_DATA { UINT8 *Buffer; UINT8 *BufferPciAddr; + // MU_CHANGE [BEGIN] - Allocate IO Queue Buffer + UINT8 *DataQueueBuffer; + UINT8 *DataQueueBufferPciAddr; + // MU_CHANGE [END] - Allocate IO Queue Buffer + // // Pointers to 4kB aligned submission & completion queues. // @@ -202,6 +247,7 @@ struct _NVME_CONTROLLER_PRIVATE_DATA { NVME_CAP Cap; VOID *Mapping; + VOID *DataQueueMapping; // MU_CHANGE - Allocate IO Queue Buffer // // For Non-blocking operations. diff --git a/MdeModulePkg/Bus/Pci/NvmExpressDxe/NvmExpressBlockIo.c b/MdeModulePkg/Bus/Pci/NvmExpressDxe/NvmExpressBlockIo.c index e0a85c1cb3..3f1b9a12ca 100644 --- a/MdeModulePkg/Bus/Pci/NvmExpressDxe/NvmExpressBlockIo.c +++ b/MdeModulePkg/Bus/Pci/NvmExpressDxe/NvmExpressBlockIo.c @@ -169,6 +169,13 @@ NvmeRead ( BOOLEAN IsEmpty; EFI_TPL OldTpl; + // MU_CHANGE [BEGIN] - Request Number of Queues from Controller + if (&Device->AsyncQueue == NULL) { + return EFI_UNSUPPORTED; + } + + // MU_CHANGE [END] - Request Number of Queues from Controller + // // Wait for the device's asynchronous I/O queue to become empty. // @@ -255,6 +262,13 @@ NvmeWrite ( BOOLEAN IsEmpty; EFI_TPL OldTpl; + // MU_CHANGE [BEGIN] - Request Number of Queues from Controller + if (&Device->AsyncQueue == NULL) { + return EFI_UNSUPPORTED; + } + + // MU_CHANGE [END] - Request Number of Queues from Controller + // // Wait for the device's asynchronous I/O queue to become empty. // @@ -992,7 +1006,7 @@ NvmeBlockIoReset ( Private = Device->Controller; - Status = NvmeControllerInit (Private); + Status = NvmeControllerReset (Private); // MU_CHANGE [BEGIN] - Allocate IO Queue Buffer if (EFI_ERROR (Status)) { Status = EFI_DEVICE_ERROR; @@ -1260,7 +1274,7 @@ NvmeBlockIoResetEx ( OldTpl = gBS->RaiseTPL (TPL_CALLBACK); - Status = NvmeControllerInit (Private); + Status = NvmeControllerReset (Private); // MU_CHANGE - Allocate IO Queue Buffer if (EFI_ERROR (Status)) { Status = EFI_DEVICE_ERROR; @@ -1540,6 +1554,13 @@ NvmeBlockIoFlushBlocksEx ( Device = NVME_DEVICE_PRIVATE_DATA_FROM_BLOCK_IO2 (This); + // MU_CHANGE [BEGIN] - Request Number of Queues from Controller + if (&Device->AsyncQueue == NULL) { + return EFI_UNSUPPORTED; + } + + // MU_CHANGE [END] - Request Number of Queues from Controller + // // Wait for the asynchronous I/O queue to become empty. // diff --git a/MdeModulePkg/Bus/Pci/NvmExpressDxe/NvmExpressHci.c b/MdeModulePkg/Bus/Pci/NvmExpressDxe/NvmExpressHci.c index 62d0dff491..afba008e6d 100644 --- a/MdeModulePkg/Bus/Pci/NvmExpressDxe/NvmExpressHci.c +++ b/MdeModulePkg/Bus/Pci/NvmExpressDxe/NvmExpressHci.c @@ -180,6 +180,52 @@ ReadNvmeControllerStatus ( return EFI_SUCCESS; } +// MU_CHANGE [BEGIN] - Allocate IO Queue Buffer + +/** + Read Nvm Express admin queue attributes register. + + @param Private The pointer to the NVME_CONTROLLER_PRIVATE_DATA data structure. + @param Aqa The buffer used to store the content to be read from admin queue attributes register. + + @return EFI_SUCCESS Successfully read data from the admin queue attributes register. + @return EFI_DEVICE_ERROR Fail to read data from the admin queue attributes register. + +**/ +EFI_STATUS +ReadNvmeAdminQueueAttributes ( + IN NVME_CONTROLLER_PRIVATE_DATA *Private, + OUT NVME_AQA *Aqa + ) +{ + EFI_PCI_IO_PROTOCOL *PciIo; + EFI_STATUS Status; + UINT32 Data; + + PciIo = Private->PciIo; + Status = PciIo->Mem.Read ( + PciIo, + EfiPciIoWidthUint32, + NVME_BAR, + NVME_AQA_OFFSET, + 1, + &Data + ); + + if (EFI_ERROR (Status)) { + return Status; + } + + WriteUnaligned32 ((UINT32 *)Aqa, Data); + + DEBUG ((DEBUG_INFO, "%a: Admin Submission Queue Size (Number of Entries): %d\n", __func__, Aqa->Asqs)); + DEBUG ((DEBUG_INFO, "%a: Admin Completion Queue Size (Number of Entries): %d\n", __func__, Aqa->Acqs)); + + return EFI_SUCCESS; +} + +// MU_CHANGE [END] - Allocate IO Queue Buffer + /** Write Nvm Express admin queue attributes register. @@ -200,6 +246,15 @@ WriteNvmeAdminQueueAttributes ( EFI_STATUS Status; UINT32 Data; + // MU_CHANGE [BEGIN] - Allocate IO Queue Buffer + // + // Save Aqa to Private data for later use. + // Note we are using the Spec-defined minimum SQES and CQES here. + // + Private->SqData[0].NumberOfEntries = Aqa->Asqs; + Private->CqData[0].NumberOfEntries = Aqa->Acqs; + // MU_CHANGE [END] - Allocate IO Queue Buffer + PciIo = Private->PciIo; Data = ReadUnaligned32 ((UINT32 *)Aqa); Status = PciIo->Mem.Write ( @@ -381,10 +436,14 @@ NvmeDisableController ( return Status; } +// MU_CHANGE [BEGIN] - Allocate IO Queue Buffer + /** - Enable the Nvm Express controller. + Enable the Nvm Express controller. Allocate and write the Controller Configuration data. @param Private The pointer to the NVME_CONTROLLER_PRIVATE_DATA data structure. + @param IoSqEs The I/O Submission Queue Entry Size. + @param IoCqEs The I/O Completion Queue Entry Size. @return EFI_SUCCESS Successfully enable the controller. @return EFI_DEVICE_ERROR Fail to enable the controller. @@ -393,9 +452,12 @@ NvmeDisableController ( **/ EFI_STATUS NvmeEnableController ( - IN NVME_CONTROLLER_PRIVATE_DATA *Private + IN NVME_CONTROLLER_PRIVATE_DATA *Private, + IN UINT8 IoSqEs, + IN UINT8 IoCqEs ) { + // MU_CHANGE [END] - Allocate IO Queue Buffer NVME_CC Cc; NVME_CSTS Csts; EFI_STATUS Status; @@ -409,9 +471,11 @@ NvmeEnableController ( // CC.AMS, CC.MPS and CC.CSS are all set to 0. // ZeroMem (&Cc, sizeof (NVME_CC)); + // MU_CHANGE [BEGIN] - Allocate IO Queue Buffer Cc.En = 1; - Cc.Iosqes = 6; - Cc.Iocqes = 4; + Cc.Iosqes = IoSqEs; + Cc.Iocqes = IoCqEs; + // MU_CHANGE [END] - Allocate IO Queue Buffer Status = WriteNvmeControllerConfiguration (Private, &Cc); if (EFI_ERROR (Status)) { @@ -566,8 +630,96 @@ NvmeIdentifyNamespace ( return Status; } +// MU_CHANGE [BEGIN] - Request Number of Queues from Controller + +/** + Send the Set Features Command to the controller for the number of queues requested. + Note that the number of queues allocated may be different from the number of queues requested. + The number of data queue pairs allocated is returned and stored in the controller private data structure + using the NumberOfDataQueuePairs field. + + @param Private The pointer to the NVME_CONTROLLER_PRIVATE_DATA data structure. + @param Ndqpr The number of data queue pairs requested. + + @return EFI_SUCCESS Successfully set the number of queues. + @return EFI_DEVICE_ERROR Fail to set the number of queues. + +**/ +EFI_STATUS +NvmeSetFeaturesNumberOfQueues ( + IN OUT NVME_CONTROLLER_PRIVATE_DATA *Private, + IN UINT16 Ndqpr + ) +{ + EFI_NVM_EXPRESS_PASS_THRU_COMMAND_PACKET CommandPacket; + EFI_NVM_EXPRESS_COMMAND Command; + EFI_NVM_EXPRESS_COMPLETION Completion; + EFI_STATUS Status; + NVME_ADMIN_SET_FEATURES_CDW10 SetFeatures; + NVME_ADMIN_SET_FEATURES_NUM_QUEUES NumberOfQueuesRequested; + NVME_ADMIN_SET_FEATURES_NUM_QUEUES NumberOfQueuesAllocated; + + Status = EFI_SUCCESS; + + ZeroMem (&CommandPacket, sizeof (EFI_NVM_EXPRESS_PASS_THRU_COMMAND_PACKET)); + ZeroMem (&Command, sizeof (EFI_NVM_EXPRESS_COMMAND)); + ZeroMem (&Completion, sizeof (EFI_NVM_EXPRESS_COMPLETION)); + ZeroMem (&SetFeatures, sizeof (NVME_ADMIN_SET_FEATURES)); + ZeroMem (&NumberOfQueuesRequested, sizeof (NVME_ADMIN_SET_FEATURES_NUM_QUEUES)); + ZeroMem (&NumberOfQueuesAllocated, sizeof (NVME_ADMIN_SET_FEATURES_NUM_QUEUES)); + + CommandPacket.NvmeCmd = &Command; + CommandPacket.NvmeCompletion = &Completion; + CommandPacket.CommandTimeout = NVME_GENERIC_TIMEOUT; + CommandPacket.QueueType = NVME_ADMIN_QUEUE; + Command.Nsid = 0; // NSID must be set to 0h or FFFFFFFFh for an admin command + Command.Cdw0.Opcode = NVME_ADMIN_SET_FEATURES_CMD; + + // Populate the Set Features Cdw10 and Cdw11 according to Nvm Express 1.3d Spec + // Note we subtract 1 from the requested number of queues to get the 0-based value + SetFeatures.Bits.Fid = NVME_FEATURE_NUMBER_OF_QUEUES; + NumberOfQueuesRequested.Bits.Ncq = Ndqpr -1; + NumberOfQueuesRequested.Bits.Nsq = Ndqpr -1; + CommandPacket.NvmeCmd->Cdw10 = SetFeatures.Uint32; + CommandPacket.NvmeCmd->Cdw11 = NumberOfQueuesRequested.Uint32; + + CommandPacket.NvmeCmd->Flags = CDW10_VALID | CDW11_VALID; + + DEBUG ((DEBUG_INFO, "Number of Data Queue Pairs Requested: %d\n", Ndqpr)); + + // Send the Set Features Command for Number of Queues + Status = Private->Passthru.PassThru ( + &Private->Passthru, + 0, + &CommandPacket, + NULL + ); + + if (EFI_ERROR (Status)) { + DEBUG ((DEBUG_ERROR, "Set Features Command for Number of Queues failed with Status %r\n", Status)); + return Status; + } + + // + // Save the number of queues allocated, adding 1 to account for it being a 0-based value. + // E.g. if 1 pair of data queues is allocated Nsq=0, Ncq=0, then NumberOfDataQueuePairs=1. + // These numbers do not include the admin queues. + // This driver at maximum supports 2 pairs of data queues. So we will take the minimum of the requested and allocated values. + // TODO what if the controller allocates more queues than requested? And we only allocate/support the number requested? + // + // This driver at maximum supports 2 pairs of data queues. So we will take the minimum of the requested and allocated values. + // TODO what if the controller allocates more queues than requested? And we only allocate/support the number requested? + NumberOfQueuesAllocated.Uint32 = CommandPacket.NvmeCompletion->DW0; + Private->NumberOfDataQueuePairs = NumberOfQueuesAllocated.Bits.Nsq + 1; + + DEBUG ((DEBUG_INFO, "Number of Data Queue Pairs Allocated: %d, \n", Private->NumberOfDataQueuePairs)); + return Status; +} + +// MU_CHANGE [END] - Request Number of Queues from Controller + /** - Create io completion queue. + Create io completion queue(s). @param Private The pointer to the NVME_CONTROLLER_PRIVATE_DATA data structure. @@ -591,7 +743,11 @@ NvmeCreateIoCompletionQueue ( Status = EFI_SUCCESS; Private->CreateIoQueue = TRUE; - for (Index = 1; Index < NVME_MAX_QUEUES; Index++) { + // MU_CHANGE [BEGIN] - Request Number of Queues from Controller + // Start from Index 1 because Index 0 is reserved for admin queue + for (Index = 1; Index <= Private->NumberOfDataQueuePairs; Index++) { + // MU_CHANGE [END] - Request Number of Queues from Controller + ZeroMem (&CommandPacket, sizeof (EFI_NVM_EXPRESS_PASS_THRU_COMMAND_PACKET)); ZeroMem (&Command, sizeof (EFI_NVM_EXPRESS_COMMAND)); ZeroMem (&Completion, sizeof (EFI_NVM_EXPRESS_COMPLETION)); @@ -606,18 +762,18 @@ NvmeCreateIoCompletionQueue ( CommandPacket.CommandTimeout = NVME_GENERIC_TIMEOUT; CommandPacket.QueueType = NVME_ADMIN_QUEUE; + // MU_CHANGE [BEGIN] - Use the Mqes value from the Cap register // MU_CHANGE [BEGIN] - Support alternative hardware queue sizes in NVME driver if (PcdGetBool (PcdSupportAlternativeQueueSize)) { QueueSize = MIN (NVME_ALTERNATIVE_MAX_QUEUE_SIZE, Private->Cap.Mqes); } else if (Index == 1) { - QueueSize = NVME_CCQ_SIZE; - } else if (Private->Cap.Mqes > NVME_ASYNC_CCQ_SIZE) { - QueueSize = NVME_ASYNC_CCQ_SIZE; + // MU_CHANGE [END] - Support alternative hardware queue sizes in NVME driver + QueueSize = MIN (NVME_CCQ_SIZE, Private->Cap.Mqes); } else { - QueueSize = Private->Cap.Mqes; + QueueSize = MIN (NVME_ASYNC_CCQ_SIZE, Private->Cap.Mqes); } - // MU_CHANGE [END] + // MU_CHANGE [END] - Use the Mqes value from the Cap register CrIoCq.Qid = Index; CrIoCq.Qsize = QueueSize; @@ -632,6 +788,9 @@ NvmeCreateIoCompletionQueue ( NULL ); if (EFI_ERROR (Status)) { + // MU_CHANGE [BEGIN] - Request Number of Queues from Controller + DEBUG ((DEBUG_ERROR, "%a: Create Completion Queue Command %d failed with Status %r\n", __func__, Index, Status)); + // MU_CHANGE [END] - Request Number of Queues from Controller break; } } @@ -666,7 +825,10 @@ NvmeCreateIoSubmissionQueue ( Status = EFI_SUCCESS; Private->CreateIoQueue = TRUE; - for (Index = 1; Index < NVME_MAX_QUEUES; Index++) { + // MU_CHANGE [BEGIN] - Request Number of Queues from Controller + // Start from Index 1 because Index 0 is reserved for admin queue + for (Index = 1; Index <= Private->NumberOfDataQueuePairs; Index++) { + // MU_CHANGE [END] - Request Number of Queues from Controller ZeroMem (&CommandPacket, sizeof (EFI_NVM_EXPRESS_PASS_THRU_COMMAND_PACKET)); ZeroMem (&Command, sizeof (EFI_NVM_EXPRESS_COMMAND)); ZeroMem (&Completion, sizeof (EFI_NVM_EXPRESS_COMPLETION)); @@ -681,18 +843,18 @@ NvmeCreateIoSubmissionQueue ( CommandPacket.CommandTimeout = NVME_GENERIC_TIMEOUT; CommandPacket.QueueType = NVME_ADMIN_QUEUE; + // MU_CHANGE [BEGIN] - Use the Mqes value from the Cap register // MU_CHANGE [BEGIN] - Support alternative hardware queue sizes in NVME driver if (PcdGetBool (PcdSupportAlternativeQueueSize)) { QueueSize = MIN (NVME_ALTERNATIVE_MAX_QUEUE_SIZE, Private->Cap.Mqes); } else if (Index == 1) { - QueueSize = NVME_CCQ_SIZE; - } else if (Private->Cap.Mqes > NVME_ASYNC_CCQ_SIZE) { - QueueSize = NVME_ASYNC_CCQ_SIZE; + // MU_CHANGE [END] - Support alternative hardware queue sizes in NVME driver + QueueSize = MIN (NVME_CSQ_SIZE, Private->Cap.Mqes); } else { - QueueSize = Private->Cap.Mqes; + QueueSize = MIN (NVME_ASYNC_CSQ_SIZE, Private->Cap.Mqes); } - // MU_CHANGE [END] + // MU_CHANGE [END] - Use the Mqes value from the Cap register CrIoSq.Qid = Index; CrIoSq.Qsize = QueueSize; @@ -709,6 +871,9 @@ NvmeCreateIoSubmissionQueue ( NULL ); if (EFI_ERROR (Status)) { + // MU_CHANGE [BEGIN] - Request Number of Queues from Controller + DEBUG ((DEBUG_ERROR, "%a: Create Submission Queue Command %d failed with Status %r\n", __func__, Index, Status)); + // MU_CHANGE [END] - Request Number of Queues from Controller break; } } @@ -718,10 +883,146 @@ NvmeCreateIoSubmissionQueue ( return Status; } +// MU_CHANGE [BEGIN] - Allocate IO Queue Buffer + +/** + Initialize the Nvm Express controller Data (IO) Queues + + @param[in] Private The pointer to the NVME_CONTROLLER_PRIVATE_DATA data structure. + @param[in] Aqa The pointer to used to the NVME_AQA data structure. + + @retval EFI_SUCCESS The NVM Express Controller is initialized successfully. + @retval Others A device error occurred while initializing the controller. + +**/ +EFI_STATUS +NvmeControllerInitIoQueues ( + IN NVME_CONTROLLER_PRIVATE_DATA *Private + ) +{ + UINTN AsqsPages; + UINTN QueuePairPageCount; + UINTN Index; + EFI_STATUS Status; + + // Offset completion queue with submission queue size + AsqsPages = EFI_SIZE_TO_PAGES (Private->SqData[1].NumberOfEntries * LShiftU64 (2, Private->SqData[1].EntrySize)); + + // Calculate the number of pages required for the admin queues + QueuePairPageCount = AsqsPages + EFI_SIZE_TO_PAGES (Private->CqData[1].NumberOfEntries * LShiftU64 (2, Private->CqData[1].EntrySize)); + + // + // Address of Data I/O submission & completion queue(s). + // We are using the same table of buffer pointers that the admin queus are in, so we start the table from Index + 1, but we have a separate + // buffer so we start at the beginning of that buffer. + // + ZeroMem (Private->DataQueueBuffer, EFI_PAGES_TO_SIZE (QueuePairPageCount) * Private->NumberOfDataQueuePairs); + for (Index = 0; Index < Private->NumberOfDataQueuePairs; Index++) { + Private->SqBuffer[Index + 1] = (NVME_SQ *)(UINTN)(Private->DataQueueBuffer + Index * QueuePairPageCount * EFI_PAGE_SIZE); + Private->SqBufferPciAddr[Index + 1] = (NVME_SQ *)(UINTN)(Private->DataQueueBufferPciAddr + Index * QueuePairPageCount * EFI_PAGE_SIZE); + Private->CqBuffer[Index + 1] = (NVME_CQ *)(UINTN)(Private->DataQueueBuffer + (Index * QueuePairPageCount + AsqsPages) * EFI_PAGE_SIZE); + Private->CqBufferPciAddr[Index + 1] = (NVME_CQ *)(UINTN)(Private->DataQueueBufferPciAddr + (Index * QueuePairPageCount + AsqsPages) * EFI_PAGE_SIZE); + + DEBUG ((DEBUG_INFO, "Data IO Submission Queue (SqBuffer[%d]) = [%016X]\n", Index + 1, Private->SqBuffer[Index + 1])); + DEBUG ((DEBUG_INFO, "Data IO Completion Queue (CqBuffer[%d]) = [%016X]\n", Index + 1, Private->CqBuffer[Index + 1])); + } + + // + // Create I/O completion queue(s). + // + Status = NvmeCreateIoCompletionQueue (Private); + if (EFI_ERROR (Status)) { + return Status; + } + + // + // Create I/O Submission queue(s). + // + Status = NvmeCreateIoSubmissionQueue (Private); + + return Status; +} + +// MU_CHANGE [END] + +// MU_CHANGE [BEGIN] - Allocate IO Queue Buffer + +/** + Initialize the Nvm Express controller Admin Queues + + @param[in] Private The pointer to the NVME_CONTROLLER_PRIVATE_DATA data structure. + @param[in] Aqa The pointer to used to the NVME_AQA data structure. + + @retval EFI_SUCCESS The NVM Express Controller is initialized successfully. + @retval Others A device error occurred while initializing the controller. + +**/ +EFI_STATUS +NvmeControllerInitAdminQueues ( + IN NVME_CONTROLLER_PRIVATE_DATA *Private + ) +{ + NVME_ASQ Asq; + NVME_ACQ Acq; + UINTN AsqsPages; + UINTN QueuePairPageCount; + EFI_STATUS Status; + + // Offset completion queue with submission queue size + AsqsPages = EFI_SIZE_TO_PAGES (Private->SqData[0].NumberOfEntries * LShiftU64 (2, Private->SqData[0].EntrySize)); + + // + // Address of admin submission queue. + // + Asq = (UINT64)(UINTN)(Private->BufferPciAddr) & ~EFI_PAGE_MASK; + + // + // Address of admin completion queue. + // + Acq = (UINT64)(UINTN)(Private->BufferPciAddr + AsqsPages * EFI_PAGE_SIZE) & ~EFI_PAGE_MASK; + + // Calculate the number of pages required for the admin queues + QueuePairPageCount = AsqsPages + EFI_SIZE_TO_PAGES (Private->CqData[0].NumberOfEntries* LShiftU64 (2, Private->CqData[0].EntrySize)); + + // + // Address of Admin I/O submission & completion queues. + // + ZeroMem (Private->Buffer, EFI_PAGES_TO_SIZE (QueuePairPageCount)); + Private->SqBuffer[0] = (NVME_SQ *)(UINTN)(Private->Buffer); + Private->SqBufferPciAddr[0] = (NVME_SQ *)(UINTN)(Private->BufferPciAddr); + Private->CqBuffer[0] = (NVME_CQ *)(UINTN)(Private->Buffer + AsqsPages * EFI_PAGE_SIZE); + Private->CqBufferPciAddr[0] = (NVME_CQ *)(UINTN)(Private->BufferPciAddr + AsqsPages * EFI_PAGE_SIZE); + + DEBUG ((DEBUG_INFO, "Private->Buffer = [%016X]\n", (UINT64)(UINTN)Private->Buffer)); + DEBUG ((DEBUG_INFO, "Admin Submission Queue size (Number of Entries) = [%08X]\n", Private->SqData[0].NumberOfEntries)); + DEBUG ((DEBUG_INFO, "Admin Completion Queue size (Number of Entries) = [%08X]\n", Private->CqData[0].NumberOfEntries)); + DEBUG ((DEBUG_INFO, "Admin Submission Queue (SqBuffer[0]) = [%016X]\n", Private->SqBuffer[0])); + DEBUG ((DEBUG_INFO, "Admin Completion Queue (CqBuffer[0]) = [%016X]\n", Private->CqBuffer[0])); + + // + // Program admin submission queue address. + // + Status = WriteNvmeAdminSubmissionQueueBaseAddress (Private, &Asq); + + if (EFI_ERROR (Status)) { + return Status; + } + + // + // Program admin completion queue address. + // + Status = WriteNvmeAdminCompletionQueueBaseAddress (Private, &Acq); + + return Status; +} + +// MU_CHANGE [END] + /** Initialize the Nvm Express controller. @param[in] Private The pointer to the NVME_CONTROLLER_PRIVATE_DATA data structure. + @param[in] Aqa The pointer to used to the NVME_AQA data structure. // MU_CHANGE - Allocate IO Queue Buffer @retval EFI_SUCCESS The NVM Express Controller is initialized successfully. @retval Others A device error occurred while initializing the controller. @@ -729,18 +1030,20 @@ NvmeCreateIoSubmissionQueue ( **/ EFI_STATUS NvmeControllerInit ( - IN NVME_CONTROLLER_PRIVATE_DATA *Private + IN NVME_CONTROLLER_PRIVATE_DATA *Private, + IN NVME_AQA *Aqa // MU_CHANGE - Allocate IO Queue Buffer ) { - EFI_STATUS Status; - EFI_PCI_IO_PROTOCOL *PciIo; - UINT64 Supports; - NVME_AQA Aqa; - NVME_ASQ Asq; - NVME_ACQ Acq; - UINT16 VidDid[2]; // MU_CHANGE - Improve NVMe controller init robustness - UINT8 Sn[21]; - UINT8 Mn[41]; + EFI_STATUS Status; + EFI_PCI_IO_PROTOCOL *PciIo; + UINT64 Supports; + UINT16 VidDid[2]; // MU_CHANGE - Improve NVMe controller init robustness + UINT8 Sn[21]; + UINT8 Mn[41]; + UINTN QueuePairPageCount; + UINTN Bytes; + UINTN Index; + EFI_PHYSICAL_ADDRESS MappedAddr; // MU_CHANGE [BEGIN] - Improve NVMe controller init robustness PciIo = Private->PciIo; @@ -764,10 +1067,11 @@ NvmeControllerInit ( return EFI_DEVICE_ERROR; } + // MU_CHANGE [END] - Improve NVMe controller init robustness + // // Enable this controller. // - // MU_CHANGE [END] - Improve NVMe controller init robustness Status = PciIo->Attributes ( PciIo, EfiPciIoAttributeOperationSupported, @@ -806,7 +1110,6 @@ NvmeControllerInit ( // // Currently the driver only supports 4k page size. // - // MU_CHANGE [BEGIN] - Improve NVMe controller init robustness // Currently, this means Cap.Mpsmin must be zero for an EFI_PAGE_SHIFT size of 12. @@ -818,19 +1121,16 @@ NvmeControllerInit ( // MU_CHANGE [END] - Improve NVMe controller init robustness - Private->Cid[0] = 0; - Private->Cid[1] = 0; - Private->Cid[2] = 0; - Private->Pt[0] = 0; - Private->Pt[1] = 0; - Private->Pt[2] = 0; - Private->SqTdbl[0].Sqt = 0; - Private->SqTdbl[1].Sqt = 0; - Private->SqTdbl[2].Sqt = 0; - Private->CqHdbl[0].Cqh = 0; - Private->CqHdbl[1].Cqh = 0; - Private->CqHdbl[2].Cqh = 0; - Private->AsyncSqHead = 0; + // MU_CHANGE [BEGIN] - Allocate IO Queue Buffer + for (Index = 0; Index < NVME_MAX_QUEUES; Index++) { + Private->Cid[Index] = 0; + Private->Pt[Index] = 0; + Private->SqTdbl[Index].Sqt = 0; + Private->CqHdbl[Index].Cqh = 0; + } + + Private->AsyncSqHead = 0; + // MU_CHANGE [END] - Allocate IO Queue Buffer Status = NvmeDisableController (Private); @@ -839,103 +1139,211 @@ NvmeControllerInit ( } // - // set number of entries admin submission & completion queues. + // Program admin queue attributes. // - // MU_CHANGE [BEGIN] - Support alternative hardware queue sizes in NVME driver - Aqa.Asqs = PcdGetBool (PcdSupportAlternativeQueueSize) ? MIN (NVME_ALTERNATIVE_MAX_QUEUE_SIZE, Private->Cap.Mqes) : NVME_ASQ_SIZE; - Aqa.Rsvd1 = 0; - Aqa.Acqs = PcdGetBool (PcdSupportAlternativeQueueSize) ? MIN (NVME_ALTERNATIVE_MAX_QUEUE_SIZE, Private->Cap.Mqes) : NVME_ACQ_SIZE; - Aqa.Rsvd2 = 0; - // MU_CHANGE [END] + Status = WriteNvmeAdminQueueAttributes (Private, Aqa); // MU_CHANGE - Allocate IO Queue Buffer - // - // Address of admin submission queue. - // - Asq = (UINT64)(UINTN)(Private->BufferPciAddr) & ~0xFFF; + if (EFI_ERROR (Status)) { + return Status; + } + + // MU_CHANGE [BEGIN] - Allocate IO Queue Buffer + // Define the admin queue entry sizes + Private->SqData[0].EntrySize = NVME_IOSQES_MIN; + Private->CqData[0].EntrySize = NVME_IOCQES_MIN; + + Status = NvmeControllerInitAdminQueues (Private); + if (EFI_ERROR (Status)) { + return Status; + } + + // MU_CHANGE [END] - Allocate IO Queue Buffer + Status = NvmeEnableController (Private, Private->SqData[0].EntrySize, Private->CqData[0].EntrySize); + if (EFI_ERROR (Status)) { + return Status; + } // - // Address of admin completion queue. + // Allocate buffer for Identify Controller data // - // MU_CHANGE [BEGIN] - Support alternative hardware queue sizes in NVME driver - if (PcdGetBool (PcdSupportAlternativeQueueSize)) { - Acq = (UINT64)(UINTN)(Private->BufferPciAddr + 4 * EFI_PAGE_SIZE) & ~0xFFF; - } else { - Acq = (UINT64)(UINTN)(Private->BufferPciAddr + EFI_PAGE_SIZE) & ~0xFFF; + if (Private->ControllerData == NULL) { + Private->ControllerData = (NVME_ADMIN_CONTROLLER_DATA *)AllocateZeroPool (sizeof (NVME_ADMIN_CONTROLLER_DATA)); + + if (Private->ControllerData == NULL) { + return EFI_OUT_OF_RESOURCES; + } } // - // Address of I/O submission & completion queue. - // - if (PcdGetBool (PcdSupportAlternativeQueueSize)) { - ZeroMem (Private->Buffer, EFI_PAGES_TO_SIZE (NVME_ALTERNATIVE_TOTAL_QUEUE_BUFFER_IN_PAGES)); - Private->SqBuffer[0] = (NVME_SQ *)(UINTN)(Private->Buffer); - Private->SqBufferPciAddr[0] = (NVME_SQ *)(UINTN)(Private->BufferPciAddr); - Private->CqBuffer[0] = (NVME_CQ *)(UINTN)(Private->Buffer + 4 * EFI_PAGE_SIZE); - Private->CqBufferPciAddr[0] = (NVME_CQ *)(UINTN)(Private->BufferPciAddr + 4 * EFI_PAGE_SIZE); - Private->SqBuffer[1] = (NVME_SQ *)(UINTN)(Private->Buffer + 5 * EFI_PAGE_SIZE); - Private->SqBufferPciAddr[1] = (NVME_SQ *)(UINTN)(Private->BufferPciAddr + 5 * EFI_PAGE_SIZE); - Private->CqBuffer[1] = (NVME_CQ *)(UINTN)(Private->Buffer + 9 * EFI_PAGE_SIZE); - Private->CqBufferPciAddr[1] = (NVME_CQ *)(UINTN)(Private->BufferPciAddr + 9 * EFI_PAGE_SIZE); - Private->SqBuffer[2] = (NVME_SQ *)(UINTN)(Private->Buffer + 10 * EFI_PAGE_SIZE); - Private->SqBufferPciAddr[2] = (NVME_SQ *)(UINTN)(Private->BufferPciAddr + 10 * EFI_PAGE_SIZE); - Private->CqBuffer[2] = (NVME_CQ *)(UINTN)(Private->Buffer + 14 * EFI_PAGE_SIZE); - Private->CqBufferPciAddr[2] = (NVME_CQ *)(UINTN)(Private->BufferPciAddr + 14 * EFI_PAGE_SIZE); - } else { - ZeroMem (Private->Buffer, EFI_PAGES_TO_SIZE (6)); - Private->SqBuffer[0] = (NVME_SQ *)(UINTN)(Private->Buffer); - Private->SqBufferPciAddr[0] = (NVME_SQ *)(UINTN)(Private->BufferPciAddr); - Private->CqBuffer[0] = (NVME_CQ *)(UINTN)(Private->Buffer + 1 * EFI_PAGE_SIZE); - Private->CqBufferPciAddr[0] = (NVME_CQ *)(UINTN)(Private->BufferPciAddr + 1 * EFI_PAGE_SIZE); - Private->SqBuffer[1] = (NVME_SQ *)(UINTN)(Private->Buffer + 2 * EFI_PAGE_SIZE); - Private->SqBufferPciAddr[1] = (NVME_SQ *)(UINTN)(Private->BufferPciAddr + 2 * EFI_PAGE_SIZE); - Private->CqBuffer[1] = (NVME_CQ *)(UINTN)(Private->Buffer + 3 * EFI_PAGE_SIZE); - Private->CqBufferPciAddr[1] = (NVME_CQ *)(UINTN)(Private->BufferPciAddr + 3 * EFI_PAGE_SIZE); - Private->SqBuffer[2] = (NVME_SQ *)(UINTN)(Private->Buffer + 4 * EFI_PAGE_SIZE); - Private->SqBufferPciAddr[2] = (NVME_SQ *)(UINTN)(Private->BufferPciAddr + 4 * EFI_PAGE_SIZE); - Private->CqBuffer[2] = (NVME_CQ *)(UINTN)(Private->Buffer + 5 * EFI_PAGE_SIZE); - Private->CqBufferPciAddr[2] = (NVME_CQ *)(UINTN)(Private->BufferPciAddr + 5 * EFI_PAGE_SIZE); - } + // Get current Identify Controller Data + // + Status = NvmeIdentifyController (Private, Private->ControllerData); - // MU_CHANGE [END] + if (EFI_ERROR (Status)) { + FreePool (Private->ControllerData); + Private->ControllerData = NULL; + return EFI_NOT_FOUND; + } - DEBUG ((DEBUG_INFO, "Private->Buffer = [%016X]\n", (UINT64)(UINTN)Private->Buffer)); - DEBUG ((DEBUG_INFO, "Admin Submission Queue size (Aqa.Asqs) = [%08X]\n", Aqa.Asqs)); - DEBUG ((DEBUG_INFO, "Admin Completion Queue size (Aqa.Acqs) = [%08X]\n", Aqa.Acqs)); - DEBUG ((DEBUG_INFO, "Admin Submission Queue (SqBuffer[0]) = [%016X]\n", Private->SqBuffer[0])); - DEBUG ((DEBUG_INFO, "Admin Completion Queue (CqBuffer[0]) = [%016X]\n", Private->CqBuffer[0])); - DEBUG ((DEBUG_INFO, "Sync I/O Submission Queue (SqBuffer[1]) = [%016X]\n", Private->SqBuffer[1])); - DEBUG ((DEBUG_INFO, "Sync I/O Completion Queue (CqBuffer[1]) = [%016X]\n", Private->CqBuffer[1])); - DEBUG ((DEBUG_INFO, "Async I/O Submission Queue (SqBuffer[2]) = [%016X]\n", Private->SqBuffer[2])); - DEBUG ((DEBUG_INFO, "Async I/O Completion Queue (CqBuffer[2]) = [%016X]\n", Private->CqBuffer[2])); + // + // Dump NvmExpress Identify Controller Data + // + CopyMem (Sn, Private->ControllerData->Sn, sizeof (Private->ControllerData->Sn)); + Sn[20] = 0; + CopyMem (Mn, Private->ControllerData->Mn, sizeof (Private->ControllerData->Mn)); + Mn[40] = 0; + DEBUG ((DEBUG_INFO, " == NVME IDENTIFY CONTROLLER DATA ==\n")); + DEBUG ((DEBUG_INFO, " PCI VID : 0x%x\n", Private->ControllerData->Vid)); + DEBUG ((DEBUG_INFO, " PCI SSVID : 0x%x\n", Private->ControllerData->Ssvid)); + DEBUG ((DEBUG_INFO, " SN : %a\n", Sn)); + DEBUG ((DEBUG_INFO, " MN : %a\n", Mn)); + DEBUG ((DEBUG_INFO, " FR : 0x%x\n", *((UINT64 *)Private->ControllerData->Fr))); + DEBUG ((DEBUG_INFO, " TNVMCAP (high 8-byte) : 0x%lx\n", *((UINT64 *)(Private->ControllerData->Tnvmcap + 8)))); + DEBUG ((DEBUG_INFO, " TNVMCAP (low 8-byte) : 0x%lx\n", *((UINT64 *)Private->ControllerData->Tnvmcap))); + DEBUG ((DEBUG_INFO, " RAB : 0x%x\n", Private->ControllerData->Rab)); + DEBUG ((DEBUG_INFO, " IEEE : 0x%x\n", *(UINT32 *)Private->ControllerData->Ieee_oui)); + DEBUG ((DEBUG_INFO, " AERL : 0x%x\n", Private->ControllerData->Aerl)); + DEBUG ((DEBUG_INFO, " SQES : 0x%x\n", Private->ControllerData->Sqes)); + DEBUG ((DEBUG_INFO, " CQES : 0x%x\n", Private->ControllerData->Cqes)); + DEBUG ((DEBUG_INFO, " NN : 0x%x\n", Private->ControllerData->Nn)); + // MU_CHANGE [BEGIN] - Request Number of Queues from Controller // - // Program admin queue attributes. + // Send Set Features Command to request the maximum number of data queues. + // The controller is free to allocate a different number of queues from the number requested. + // The number of queues allocated is returned and stored in the controller private data structure + // using the NumberOfDataQueuePairs field. // - Status = WriteNvmeAdminQueueAttributes (Private, &Aqa); + Status = NvmeSetFeaturesNumberOfQueues (Private, NVME_MAX_QUEUES - 1, NVME_MAX_QUEUES - 1); if (EFI_ERROR (Status)) { return Status; } + // MU_CHANGE [END] - Request Number of Queues from Controller + + // MU_CHANGE [BEGIN] - Allocate IO Queue Buffer // - // Program admin submission queue address. + // Allocate Data Queues - note we are assuming the queue entry sizes are the same as the admin queue entry sizes for the sake of memory allocation. + // The identify controller data tells us in SQES and CQES what the controller's minimum and maximum queue entry sizes are. We haven't used this before since we + // use the spec-defined minimum queue entry sizes. + // We are also allocating based on the admin defined queue sizes in number of entries. + // Some scenarios may use different queue sizes, currently we only see the case where the driver needs IO queue sizes <= admin queue sizes. So this allocation should be sufficient. + // We may want to explore a more dynamic allocation in the future. // - Status = WriteNvmeAdminSubmissionQueueBaseAddress (Private, &Asq); + for (Index = 1; Index <= Private->NumberOfDataQueuePairs; Index++) { + Private->SqData[Index].NumberOfEntries = Private->SqData[0].NumberOfEntries; + Private->CqData[Index].NumberOfEntries = Private->CqData[0].NumberOfEntries; + Private->SqData[Index].EntrySize = Private->SqData[0].EntrySize; + Private->CqData[Index].EntrySize = Private->CqData[0].EntrySize; + } + + // Using the first data queue size for the number of pages required for the data queues + QueuePairPageCount = EFI_SIZE_TO_PAGES (Private->SqData[1].NumberOfEntries * LShiftU64 (2, Private->SqData[1].EntrySize)) + + EFI_SIZE_TO_PAGES (Private->CqData[1].NumberOfEntries * LShiftU64 (2, Private->CqData[1].EntrySize)); + Status = PciIo->AllocateBuffer ( + PciIo, + AllocateAnyPages, + EfiBootServicesData, + QueuePairPageCount * Private->NumberOfDataQueuePairs, + (VOID **)&Private->DataQueueBuffer, + 0 + ); if (EFI_ERROR (Status)) { return Status; } + Bytes = EFI_PAGES_TO_SIZE (QueuePairPageCount * Private->NumberOfDataQueuePairs); + Status = PciIo->Map ( + PciIo, + EfiPciIoOperationBusMasterCommonBuffer, + Private->DataQueueBuffer, + &Bytes, + &MappedAddr, + &Private->DataQueueMapping + ); + + if (EFI_ERROR (Status) || (Bytes != EFI_PAGES_TO_SIZE (QueuePairPageCount * Private->NumberOfDataQueuePairs))) { + return Status; + } + + Private->DataQueueBufferPciAddr = (UINT8 *)(UINTN)MappedAddr; + + Status = NvmeControllerInitIoQueues (Private); + // MU_CHANGE [END] - Allocate IO Queue Buffer + + return Status; +} + +// MU_CHANGE [BEGIN] - Allocate IO Queue Buffer + +/** + Reset the Nvm Express controller. + + @param[in] Private The pointer to the NVME_CONTROLLER_PRIVATE_DATA data structure. + + @retval EFI_SUCCESS The NVM Express Controller is initialized successfully. + @retval Others A device error occurred while initializing the controller. + +**/ +EFI_STATUS +NvmeControllerReset ( + IN NVME_CONTROLLER_PRIVATE_DATA *Private + ) +{ + EFI_STATUS Status; + EFI_PCI_IO_PROTOCOL *PciIo; + UINTN Index; + UINT16 VidDid[2]; + UINT8 Sn[21]; + UINT8 Mn[41]; + + DEBUG ((DEBUG_INFO, "%a: Begin Controller Reset\n", __func__)); + + PciIo = Private->PciIo; + // - // Program admin completion queue address. + // Verify the controller is still accessible // - Status = WriteNvmeAdminCompletionQueueBaseAddress (Private, &Acq); + Status = PciIo->Pci.Read ( + PciIo, + EfiPciIoWidthUint16, + PCI_VENDOR_ID_OFFSET, + ARRAY_SIZE (VidDid), + VidDid + ); + if (EFI_ERROR (Status)) { + ASSERT_EFI_ERROR (Status); + return EFI_DEVICE_ERROR; + } + + if ((VidDid[0] == EFI_PAGE_MASK) || (VidDid[1] == EFI_PAGE_MASK)) { + return EFI_DEVICE_ERROR; + } + + for (Index = 0; Index < NVME_MAX_QUEUES; Index++) { + Private->Cid[Index] = 0; + Private->Pt[Index] = 0; + Private->SqTdbl[Index].Sqt = 0; + Private->CqHdbl[Index].Cqh = 0; + } + + Private->AsyncSqHead = 0; + + Status = NvmeDisableController (Private); + + if (EFI_ERROR (Status)) { + return Status; + } + + Status = NvmeControllerInitAdminQueues (Private); if (EFI_ERROR (Status)) { return Status; } - Status = NvmeEnableController (Private); + Status = NvmeEnableController (Private, Private->SqData[0].EntrySize, Private->CqData[0].EntrySize); if (EFI_ERROR (Status)) { return Status; } @@ -966,9 +1374,11 @@ NvmeControllerInit ( // Dump NvmExpress Identify Controller Data // CopyMem (Sn, Private->ControllerData->Sn, sizeof (Private->ControllerData->Sn)); + // Serial Number and Model Number ire not null-terminated strings, but will be printed as ones. + // So here we add a null terminator to the end of their arrays. Sn[20] = 0; CopyMem (Mn, Private->ControllerData->Mn, sizeof (Private->ControllerData->Mn)); - Mn[40] = 0; + Mn[40] = 0; // Model Number is not a null-terminated string DEBUG ((DEBUG_INFO, " == NVME IDENTIFY CONTROLLER DATA ==\n")); DEBUG ((DEBUG_INFO, " PCI VID : 0x%x\n", Private->ControllerData->Vid)); DEBUG ((DEBUG_INFO, " PCI SSVID : 0x%x\n", Private->ControllerData->Ssvid)); @@ -984,6 +1394,16 @@ NvmeControllerInit ( DEBUG ((DEBUG_INFO, " CQES : 0x%x\n", Private->ControllerData->Cqes)); DEBUG ((DEBUG_INFO, " NN : 0x%x\n", Private->ControllerData->Nn)); + // + // Send Set Features Command to request the maximum number of data queue. + // The controller is free to allocate a different number of queues from the number requested. + // + Status = NvmeSetFeaturesNumberOfQueues (Private, NVME_MAX_QUEUES - 1, NVME_MAX_QUEUES - 1); + + if (EFI_ERROR (Status)) { + return Status; + } + // // Create two I/O completion queues. // One for blocking I/O, one for non-blocking I/O. @@ -1002,6 +1422,8 @@ NvmeControllerInit ( return Status; } +// MU_CHANGE [END] - Allocate IO Queue Buffer + /** This routine is called to properly shutdown the Nvm Express controller per NVMe spec. diff --git a/MdeModulePkg/Bus/Pci/NvmExpressDxe/NvmExpressHci.h b/MdeModulePkg/Bus/Pci/NvmExpressDxe/NvmExpressHci.h index a08c4e974e..287265e50e 100644 --- a/MdeModulePkg/Bus/Pci/NvmExpressDxe/NvmExpressHci.h +++ b/MdeModulePkg/Bus/Pci/NvmExpressDxe/NvmExpressHci.h @@ -18,10 +18,27 @@ // #define NVME_ASQ_BUF_OFFSET EFI_PAGE_SIZE +// MU_CHANGE [BEGIN] - Allocate IO Queue Buffer + +/** + Reset the Nvm Express controller. + + @param[in] Private The pointer to the NVME_CONTROLLER_PRIVATE_DATA data structure. + + @retval EFI_SUCCESS The NVM Express Controller is initialized successfully. + @retval Others A device error occurred while initializing the controller. + +**/ +EFI_STATUS +NvmeControllerReset ( + IN NVME_CONTROLLER_PRIVATE_DATA *Private + ); + /** Initialize the Nvm Express controller. @param[in] Private The pointer to the NVME_CONTROLLER_PRIVATE_DATA data structure. + @param[in] Aqa The pointer to used to the NVME_AQA data structure. @retval EFI_SUCCESS The NVM Express Controller is initialized successfully. @retval Others A device error occurred while initializing the controller. @@ -29,9 +46,12 @@ **/ EFI_STATUS NvmeControllerInit ( - IN NVME_CONTROLLER_PRIVATE_DATA *Private + IN NVME_CONTROLLER_PRIVATE_DATA *Private, + IN NVME_AQA *Aqa ); +// MU_CHANGE [END] - Allocate IO Queue Buffer + /** Get identify controller data. @@ -66,4 +86,24 @@ NvmeIdentifyNamespace ( IN VOID *Buffer ); +// MU_CHANGE [BEGIN] - Allocate IO Queue Buffer + +/** + Read Nvm Express admin queue attributes register. + + @param Private The pointer to the NVME_CONTROLLER_PRIVATE_DATA data structure. + @param Aqa The buffer used to store the content to be read from admin queue attributes register. + + @return EFI_SUCCESS Successfully read data from the admin queue attributes register. + @return EFI_DEVICE_ERROR Fail to read data from the admin queue attributes register. + +**/ +EFI_STATUS +ReadNvmeAdminQueueAttributes ( + IN NVME_CONTROLLER_PRIVATE_DATA *Private, + OUT NVME_AQA *Aqa + ); + +// MU_CHANGE [END] - Allocate IO Queue Buffer + #endif diff --git a/MdeModulePkg/Bus/Pci/NvmExpressDxe/NvmExpressPassthru.c b/MdeModulePkg/Bus/Pci/NvmExpressDxe/NvmExpressPassthru.c index d1c1e19602..28b36b64c4 100644 --- a/MdeModulePkg/Bus/Pci/NvmExpressDxe/NvmExpressPassthru.c +++ b/MdeModulePkg/Bus/Pci/NvmExpressDxe/NvmExpressPassthru.c @@ -866,7 +866,7 @@ NvmExpressPassThru ( // // Reset the NVMe controller. // - Status = NvmeControllerInit (Private); + Status = NvmeControllerReset (Private); // MU_CHANGE - Allocate IO Queue Buffer if (!EFI_ERROR (Status)) { Status = AbortAsyncPassThruTasks (Private); if (!EFI_ERROR (Status)) { diff --git a/MdePkg/Include/IndustryStandard/Nvme.h b/MdePkg/Include/IndustryStandard/Nvme.h index 6a4dc69c61..90829bf919 100644 --- a/MdePkg/Include/IndustryStandard/Nvme.h +++ b/MdePkg/Include/IndustryStandard/Nvme.h @@ -762,6 +762,27 @@ typedef struct { UINT32 Sv : 1; /* Save */ } NVME_ADMIN_SET_FEATURES; +// MU_CHANGE [BEGIN] - Add Set Features Command - Number of Queues +typedef union { + NVME_ADMIN_SET_FEATURES Bits; + UINT32 Uint32; +} NVME_ADMIN_SET_FEATURES_CDW10; + +// +// NvmExpress Admin Set Features Command - Number of Queues +// +typedef union { + struct { + // + // CDW 11 for Requested, DW0 for Allocated + // + UINT32 Nsq : 16; /* Number of Submission Queues */ + UINT32 Ncq : 16; /* Number of Completion Queues */ + } Bits; + UINT32 Uint32; +} NVME_ADMIN_SET_FEATURES_NUM_QUEUES; + +// MU_CHANGE [END] - Add Set Features Command - Number of Queues // // NvmExpress Admin Sanitize Command // @@ -984,6 +1005,30 @@ typedef enum { SanitizeOpcode = NVME_ADMIN_SANITIZE_CMD } NVME_ADMIN_COMMAND_OPCODE; +// MU_CHANGE [BEGIN] - Add Nvm Express Admin Feature Identifiers +// +// Nvm Express Admin Feature Identifiers +// Nvm Express Spec v1.3d Figure 129 +// +#define NVME_FEATURE_ARBITRATION 0x01 +#define NVME_FEATURE_POWER_MANAGEMENT 0x02 +#define NVME_FEATURE_LBA_RANGE_TYPE 0x03 +#define NVME_FEATURE_TEMPERATURE_THRESHOLD 0x04 +#define NVME_FEATURE_ERROR_RECOVERY 0x05 +#define NVME_FEATURE_VOLATILE_WRITE_CACHE 0x06 +#define NVME_FEATURE_NUMBER_OF_QUEUES 0x07 +#define NVME_FEATURE_INTERRUPT_COALESCING 0x08 +#define NVME_FEATURE_INTERRUPT_VECTOR_CONF 0x09 +#define NVME_FEATURE_WRITE_ATOMICITY 0x0A +#define NVME_FEATURE_ASYNC_EVENT_CONFIG 0x0B +#define NVME_FEATURE_AUTONOMOUS_POWER_STATE_TRANSITION 0x0C +#define NVME_FEATURE_HOST_MEMORY_BUFFER 0x0D +#define NVME_FEATURE_TIMESTAMP 0x0E +#define NVME_FEATURE_KEEP_ALIVE_TIMER 0x0F +#define NVME_FEATURE_HOST_CONTROLLED_THERMAL_MANAGEMENT 0x10 +#define NVME_FEATURE_NON_OPERATIONAL_POWER_STATE_CONFIG 0x11 + +// MU_CHANGE [END] - Add Nvm Express Admin Feature Identifiers // // Controller or Namespace Structure (CNS) field // (ref. spec. v1.1 figure 82).