diff --git a/source/adapters/level_zero/context.cpp b/source/adapters/level_zero/context.cpp index 0b56af6f4f..52e6a6e168 100644 --- a/source/adapters/level_zero/context.cpp +++ b/source/adapters/level_zero/context.cpp @@ -190,13 +190,6 @@ ur_result_t urContextSetExtendedDeleter( } // namespace ur::level_zero ur_result_t ur_context_handle_t_::initialize() { - - // We may allocate memory to this root device so create allocators. - if (SingleRootDevice && - DeviceMemPools.find(SingleRootDevice->ZeDevice) == DeviceMemPools.end()) { - createUSMAllocators(SingleRootDevice); - } - // Create the immediate command list to be used for initializations. // Created as synchronous so level-zero performs implicit synchronization and // there is no need to query for completion in the plugin @@ -207,7 +200,7 @@ ur_result_t ur_context_handle_t_::initialize() { // D2D migartion, if no P2P, is broken since it should use // immediate command-list for the specfic devices, and this single one. // - ur_device_handle_t Device = SingleRootDevice ? SingleRootDevice : Devices[0]; + ur_device_handle_t Device = Devices[0]; // Prefer to use copy engine for initialization copies, // if available and allowed (main copy engine with index 0). diff --git a/source/adapters/level_zero/context.hpp b/source/adapters/level_zero/context.hpp index abc019de35..04443e4f97 100644 --- a/source/adapters/level_zero/context.hpp +++ b/source/adapters/level_zero/context.hpp @@ -98,13 +98,6 @@ struct ur_context_handle_t_ : _ur_object { // compute and copy command list caches. ur_mutex ZeCommandListCacheMutex; - // If context contains one device or sub-devices of the same device, we want - // to save this device. - // This field is only set at ur_context_handle_t creation time, and cannot - // change. Therefore it can be accessed without holding a lock on this - // ur_context_handle_t. - ur_device_handle_t SingleRootDevice = nullptr; - // Cache of all currently available/completed command/copy lists. // Note that command-list can only be re-used on the same device. // diff --git a/source/adapters/level_zero/memory.cpp b/source/adapters/level_zero/memory.cpp index 5283ea4da3..0a7f489eab 100644 --- a/source/adapters/level_zero/memory.cpp +++ b/source/adapters/level_zero/memory.cpp @@ -1524,9 +1524,7 @@ ur_result_t urMemImageCreate( // own the image. // TODO: Implement explicit copying for acessing the image from other devices // in the context. - ur_device_handle_t Device = Context->SingleRootDevice - ? Context->SingleRootDevice - : Context->Devices[0]; + ur_device_handle_t Device = Context->Devices[0]; ze_image_handle_t ZeImage; ZE2UR_CALL(zeImageCreate, (Context->ZeContext, Device->ZeDevice, &ZeImageDesc, &ZeImage)); @@ -2073,58 +2071,22 @@ ur_result_t _ur_buffer::getZeHandle(char *&ZeHandle, access_mode_t AccessMode, LastDeviceWithValidAllocation = Device; return UR_RESULT_SUCCESS; } - // Reads user setting on how to deal with buffers in contexts where - // all devices have the same root-device. Returns "true" if the - // preference is to have allocate on each [sub-]device and migrate - // normally (copy) to other sub-devices as needed. Returns "false" - // if the preference is to have single root-device allocations - // serve the needs of all [sub-]devices, meaning potentially more - // cross-tile traffic. - // - static const bool SingleRootDeviceBufferMigration = [] { - const char *UrRet = - std::getenv("UR_L0_SINGLE_ROOT_DEVICE_BUFFER_MIGRATION"); - const char *PiRet = - std::getenv("SYCL_PI_LEVEL_ZERO_SINGLE_ROOT_DEVICE_BUFFER_MIGRATION"); - const char *EnvStr = UrRet ? UrRet : (PiRet ? PiRet : nullptr); - if (EnvStr) - return (std::stoi(EnvStr) != 0); - // The default is to migrate normally, which may not always be the - // best option (depends on buffer access patterns), but is an - // overall win on the set of the available benchmarks. - return true; - }(); // Peform actual device allocation as needed. if (!Allocation.ZeHandle) { - if (!SingleRootDeviceBufferMigration && UrContext->SingleRootDevice && - UrContext->SingleRootDevice != Device) { - // If all devices in the context are sub-devices of the same device - // then we reuse root-device allocation by all sub-devices in the - // context. - // TODO: we can probably generalize this and share root-device - // allocations by its own sub-devices even if not all other - // devices in the context have the same root. - UR_CALL(getZeHandle(ZeHandle, AccessMode, UrContext->SingleRootDevice, - phWaitEvents, numWaitEvents)); - Allocation.ReleaseAction = allocation_t::keep; - Allocation.ZeHandle = ZeHandle; - Allocation.Valid = true; - return UR_RESULT_SUCCESS; - } else { // Create device allocation - if (DisjointPoolConfigInstance.EnableBuffers) { - Allocation.ReleaseAction = allocation_t::free; - ur_usm_desc_t USMDesc{}; - USMDesc.align = getAlignment(); - ur_usm_pool_handle_t Pool{}; - UR_CALL(ur::level_zero::urUSMDeviceAlloc( - UrContext, Device, &USMDesc, Pool, Size, - reinterpret_cast(&ZeHandle))); - } else { - Allocation.ReleaseAction = allocation_t::free_native; - UR_CALL(ZeDeviceMemAllocHelper(reinterpret_cast(&ZeHandle), - UrContext, Device, Size)); - } + // Create device allocation + if (DisjointPoolConfigInstance.EnableBuffers) { + Allocation.ReleaseAction = allocation_t::free; + ur_usm_desc_t USMDesc{}; + USMDesc.align = getAlignment(); + ur_usm_pool_handle_t Pool{}; + UR_CALL(ur::level_zero::urUSMDeviceAlloc( + UrContext, Device, &USMDesc, Pool, Size, + reinterpret_cast(&ZeHandle))); + } else { + Allocation.ReleaseAction = allocation_t::free_native; + UR_CALL(ZeDeviceMemAllocHelper(reinterpret_cast(&ZeHandle), + UrContext, Device, Size)); } Allocation.ZeHandle = ZeHandle; } else {