Skip to content

Commit 213b203

Browse files
authored
MX GEMM - Parameterized Test Template (#2088)
* Tests for MX FP8 GEMM * Improve documentation
1 parent da54464 commit 213b203

File tree

12 files changed

+948
-7
lines changed

12 files changed

+948
-7
lines changed

include/ck/tensor_operation/gpu/device/impl/device_gemm_xdl_cshuffle_v3_mx.hpp

Lines changed: 9 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@ namespace ck {
2222
namespace tensor_operation {
2323
namespace device {
2424

25+
// clang-format off
2526
/**
2627
* \brief WIP: Implements XDL CShuffle V3 GEMM for microscale-compliant data types
2728
*
@@ -31,8 +32,8 @@ namespace device {
3132
* Assumptions:
3233
* - A and B data types are compliant with the OCP Microscaling Formats (MX) Specification
3334
* - Each scale applies to ScaleBlockSize elements in K direction
34-
* - A scale matrix is row-major
35-
* - B scale matrix is column-major
35+
* - A scale matrix is a row-major
36+
* - B scale matrix is a column-major
3637
* - Scale data types must have get_exponent_value() specialization, whereas lowest 8 bits of the
3738
* exponent will be interpreted as conventional biased Float32 exponent (E8M0)
3839
*
@@ -72,10 +73,10 @@ namespace device {
7273
* for(int mw = m0; mw < m0 + MWaves * MPerXDL; mw += MPerXDL){
7374
* for(int nw = n0; nw < n0 + NWaves * NPerXDL; nw += NPerXDL){
7475
* for(int k0 = kb; k0 < kb + KPerBlock; k0 += mfma.num_input_blks*KPack){
75-
* // MFMA accumulation for multirate instructions
76-
* for(int k_pack = k0; k_pack < k0 + mfma.num_input_blks*KPack; k_pack += KPack){
77-
* for(int k_mfma = k_pack; k_mfma < k_pack + KPack; k_mfma += mfma.k_per_blk){
78-
* // MFMA instruction
76+
* // MFMA accumulation
77+
* for(int k_pack = k0; k_pack < k0 + mfma.num_input_blks*KPack; k_pack += KPerXdlops){
78+
* // MFMA instruction
79+
* for(int k_mfma = k_pack; k_mfma < k_pack + KPerXdlops; k_mfma += mfma.k_per_blk){
7980
* for(int m = mw; m < mw + MPerXDL; m++){
8081
* for(int n = nw; n < nw + NPerXDL; n++){
8182
* for(int k = k_mfma; k < k_mfma + mfma.k_per_blk; k++){
@@ -96,6 +97,7 @@ namespace device {
9697
* \endcode
9798
*
9899
*/
100+
// clang-format on
99101
template <typename ALayout,
100102
typename BLayout,
101103
typename CLayout,
@@ -104,7 +106,7 @@ template <typename ALayout,
104106
typename BDataType,
105107
typename BScaleDataType,
106108
typename CDataType,
107-
typename GemmAccDataType,
109+
typename GemmAccDataType, // TODO: always float
108110
typename CShuffleDataType,
109111
typename AElementwiseOperation,
110112
typename BElementwiseOperation,
Lines changed: 111 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,111 @@
1+
// SPDX-License-Identifier: MIT
2+
// Copyright (c) 2025, Advanced Micro Devices, Inc. All rights reserved.
3+
4+
#pragma once
5+
6+
#include <vector>
7+
#include <memory>
8+
#include "ck/ck.hpp"
9+
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
10+
#include "ck/tensor_operation/gpu/device/impl/device_gemm_xdl_cshuffle_v3_mx.hpp"
11+
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
12+
13+
#include "ck/library/tensor_operation_instance/device_operation_instance_factory.hpp"
14+
15+
namespace ck {
16+
namespace tensor_operation {
17+
namespace device {
18+
namespace instance {
19+
20+
void add_device_gemm_mx_xdl_f8_f8_f16_mk_nk_mn_default_instances(
21+
std::vector<std::unique_ptr<DeviceGemmMX<Row,
22+
Col,
23+
Row,
24+
F8,
25+
e8m0_bexp_t,
26+
F8,
27+
e8m0_bexp_t,
28+
F16,
29+
32,
30+
PassThrough,
31+
PassThrough,
32+
PassThrough>>>& instances);
33+
34+
void add_device_gemm_mx_xdl_f8_f8_bf16_mk_nk_mn_default_instances(
35+
std::vector<std::unique_ptr<DeviceGemmMX<Row,
36+
Col,
37+
Row,
38+
F8,
39+
e8m0_bexp_t,
40+
F8,
41+
e8m0_bexp_t,
42+
BF16,
43+
32,
44+
PassThrough,
45+
PassThrough,
46+
PassThrough>>>& instances);
47+
48+
template <typename ADataType,
49+
typename AScaleDataType,
50+
typename BDataType,
51+
typename BScaleDataType,
52+
typename CDataType,
53+
index_t ScaleBlockSize,
54+
typename ALayout,
55+
typename BLayout,
56+
typename CLayout>
57+
struct DeviceOperationInstanceFactory<
58+
ck::tensor_operation::device::DeviceGemmMX<ALayout,
59+
BLayout,
60+
CLayout,
61+
ADataType,
62+
AScaleDataType,
63+
BDataType,
64+
BScaleDataType,
65+
CDataType,
66+
ScaleBlockSize,
67+
ck::tensor_operation::element_wise::PassThrough,
68+
ck::tensor_operation::element_wise::PassThrough,
69+
ck::tensor_operation::element_wise::PassThrough>>
70+
{
71+
using DeviceOp = DeviceGemmMX<ALayout,
72+
BLayout,
73+
CLayout,
74+
ADataType,
75+
AScaleDataType,
76+
BDataType,
77+
BScaleDataType,
78+
CDataType,
79+
ScaleBlockSize,
80+
ck::tensor_operation::element_wise::PassThrough,
81+
ck::tensor_operation::element_wise::PassThrough,
82+
ck::tensor_operation::element_wise::PassThrough>;
83+
84+
static auto GetInstances()
85+
{
86+
std::vector<std::unique_ptr<DeviceOp>> op_ptrs;
87+
88+
if constexpr(is_same_v<ALayout, Row> && is_same_v<BLayout, Col> && is_same_v<CLayout, Row>)
89+
{
90+
if constexpr(is_same_v<ADataType, F8> && is_same_v<BDataType, F8> &&
91+
is_same_v<CDataType, F16>)
92+
{
93+
94+
add_device_gemm_mx_xdl_f8_f8_f16_mk_nk_mn_default_instances(op_ptrs);
95+
}
96+
if constexpr(is_same_v<ADataType, F8> && is_same_v<BDataType, F8> &&
97+
is_same_v<CDataType, BF16>)
98+
{
99+
100+
add_device_gemm_mx_xdl_f8_f8_bf16_mk_nk_mn_default_instances(op_ptrs);
101+
}
102+
}
103+
104+
return op_ptrs;
105+
}
106+
};
107+
108+
} // namespace instance
109+
} // namespace device
110+
} // namespace tensor_operation
111+
} // namespace ck

library/src/tensor_operation_instance/gpu/CMakeLists.txt

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -60,6 +60,13 @@ function(add_instance_library INSTANCE_NAME)
6060
list(REMOVE_ITEM ARGN "${source}")
6161
endif()
6262
endforeach()
63+
# Do not build MX instances if gfx950 targets are not on the target list
64+
foreach(source IN LISTS ARGN)
65+
if(NOT INST_TARGETS MATCHES "gfx950" AND source MATCHES "_mx")
66+
message("removing MX instance ${source} ")
67+
list(REMOVE_ITEM ARGN "${source}")
68+
endif()
69+
endforeach()
6370
# Do not build WMMA instances if gfx11 targets are not on the target list
6471
foreach(source IN LISTS ARGN)
6572
if(NOT INST_TARGETS MATCHES "gfx11" AND NOT INST_TARGETS MATCHES "gfx12" AND source MATCHES "_wmma")
@@ -100,6 +107,8 @@ function(add_instance_library INSTANCE_NAME)
100107
list(REMOVE_ITEM INST_TARGETS gfx900 gfx906 gfx906:xnack- gfx908:xnack+ gfx908:xnack- gfx90a:xnack+ gfx90a:xnack- gfx908 gfx90a gfx942 gfx1030 gfx950)
101108
elseif(source MATCHES "mha")
102109
list(REMOVE_ITEM INST_TARGETS gfx900 gfx906 gfx906:xnack- gfx908:xnack- gfx908:xnack+ gfx908 gfx1030 gfx1100 gfx1101 gfx1102 gfx1103 gfx1150 gfx1151 gfx1152 gfx1200 gfx1201 gfx10.3-generic gfx11-generic gfx12-generic)
110+
elseif(source MATCHES "_mx")
111+
list(REMOVE_ITEM INST_TARGETS gfx900 gfx906 gfx906:xnack- gfx908:xnack- gfx908:xnack+ gfx90a:xnack+ gfx90a:xnack- gfx908 gfx90a gfx942 gfx1030 gfx1100 gfx1101 gfx1102 gfx1103 gfx1150 gfx1151 gfx1152 gfx1200 gfx1201 gfx10.3-generic gfx11-generic gfx12-generic)
103112
endif()
104113
#only build the fp8 gemm instances for gfx90a if the build argument is set, otherwise only build for gfx942/gfx950
105114
if(NOT CK_USE_FP8_ON_UNSUPPORTED_ARCH)
@@ -234,6 +243,10 @@ FOREACH(subdir_path ${dir_list})
234243
if(("${cmake_instance}" MATCHES "ONLY XDL_KERNELS") AND (NOT INST_TARGETS MATCHES "gfx9"))
235244
message("Found only xdl instances, but gfx9 is not on the targets list. Skipping.")
236245
set(add_inst 0)
246+
endif()
247+
if(("${cmake_instance}" MATCHES "ONLY MX_KERNELS") AND (NOT INST_TARGETS MATCHES "gfx950"))
248+
message("Found only MX instances, but gfx950 is not on the targets list. Skipping.")
249+
set(add_inst 0)
237250
endif()
238251
if(("${cmake_instance}" MATCHES "ONLY WMMA_KERNELS") AND (NOT INST_TARGETS MATCHES "gfx11") AND (NOT INST_TARGETS MATCHES "gfx12"))
239252
message("Found only wmma instances, but gfx11 is not on the targets list. Skipping.")
Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,14 @@
1+
# ONLY MX_KERNELS
2+
set(GEMM_MX_INSTANCES)
3+
4+
list(APPEND GEMM_MX_INSTANCES
5+
device_gemm_mx_xdl_f8_f8_f16/device_gemm_mx_xdl_f8_f8_f16_mk_nk_mn_default_instance.cpp
6+
device_gemm_mx_xdl_f8_f8_bf16/device_gemm_mx_xdl_f8_f8_bf16_mk_nk_mn_default_instance.cpp
7+
)
8+
9+
10+
set_source_files_properties(device_gemm_mx_xdl_f8_f8_f16/device_gemm_mx_xdl_f8_f8_f16_mk_nk_mn_default_instance.cpp PROPERTIES COMPILE_OPTIONS ";-mllvm;-greedy-reverse-local-assignment=1")
11+
set_source_files_properties(device_gemm_mx_xdl_f8_f8_bf16/device_gemm_mx_xdl_f8_f8_bf16_mk_nk_mn_default_instance.cpp PROPERTIES COMPILE_OPTIONS ";-mllvm;-greedy-reverse-local-assignment=1")
12+
13+
14+
add_instance_library(device_gemm_mx_instance ${GEMM_MX_INSTANCES})
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,63 @@
1+
// SPDX-License-Identifier: MIT
2+
// Copyright (c) 2025, Advanced Micro Devices, Inc. All rights reserved.
3+
4+
#include "ck/ck.hpp"
5+
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
6+
#include "ck/tensor_operation/gpu/device/gemm_specialization.hpp"
7+
#include "ck/tensor_operation/gpu/device/impl/device_gemm_xdl_cshuffle_v3_mx.hpp"
8+
9+
#include "ck/library/tensor_operation_instance/add_device_operation_instance.hpp"
10+
11+
namespace ck {
12+
namespace tensor_operation {
13+
namespace device {
14+
namespace instance {
15+
16+
using F8 = f8_t;
17+
using F16 = half_t;
18+
using BF16 = bhalf_t;
19+
using F32 = float;
20+
using E8M0 = ck::e8m0_bexp_t;
21+
22+
using Row = tensor_layout::gemm::RowMajor;
23+
using Col = tensor_layout::gemm::ColumnMajor;
24+
25+
template <index_t... Is>
26+
using S = Sequence<Is...>;
27+
28+
using PassThrough = element_wise::PassThrough;
29+
30+
static constexpr auto GemmDefault = GemmSpecialization::Default;
31+
static constexpr auto GemmKPadding = GemmSpecialization::KPadding;
32+
static constexpr auto GemmMNPadding = GemmSpecialization::MNPadding;
33+
static constexpr auto GemmMNKPadding = GemmSpecialization::MNKPadding;
34+
35+
static constexpr auto Intrawave = BlockGemmPipelineScheduler::Intrawave;
36+
static constexpr auto Interwave = BlockGemmPipelineScheduler::Interwave;
37+
38+
static constexpr auto ScaleBlockSize = 32;
39+
40+
template <BlockGemmPipelineScheduler BlkGemmPipeSched, GemmSpecialization GemmSpec>
41+
using device_gemm_mx_xdl_f8_f8_bf16_mk_nk_mn_instances = std::tuple<
42+
// clang-format off
43+
//#########################| ALayout| BLayout| CLayout|AData|AScale|BData|BScale| CData| AccData| Cshuffle| A| B| C| GEMM| Scale Block| Block| MPer| NPer| KPer| AK1| BK1|MPer| NPer| MXdl| NXdl| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockLds| BBlockTransfer| BBlockTransfer| BBlockTransfer| BlockTransfer| BBlockTransfer| BBlockTransfer| BBlockLds| CShuffle| CShuffle| CBlockTransferClusterLengths| CBlockTransfer| Block-wiseGemm| Block-wiseGemm|
44+
//#########################| | | | Type| Data| Type| Data| Type| Type| Type| Elementwise| Elementwise| Elementwise|Specialization| Size| Size| Block| Block| Block| | | XDL| XDL| Per| Per| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraM| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraN| MXdlPerWave| NXdlPerWave| _MBlock_MXdlPerWave_MWaveMPerXdl| ScalarPerVector| Pipeline| Pipeline|
45+
//#########################| | | | | Type| | Type| | | | Operation| Operation| Operation| | | | | | | | | | | Wave| Wave| Lengths_K0_M_K1| ArrangeOrder| | | PerVector| PerVector_K1| | Lengths_K0_N_K1| ArrangeOrder| | | PerVector| PerVector_K1| | PerShuffle| PerShuffle| _NBlock_NXdlPerWave_NWaveNPerXdl| _NWaveNPerXdl| Scheduler| Verision|
46+
//#########################| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
47+
#if defined(__gfx950__) || defined(CK_USE_NATIVE_MX_SUPPORT)
48+
DeviceGemmMX_Xdl_CShuffleV3< Row, Col, Row, F8, E8M0, F8, E8M0, BF16, F32, BF16, PassThrough, PassThrough, PassThrough, GemmSpec, ScaleBlockSize, 128, 128, 16, 128, 16, 16, 16, 16, 4, 1, S<8, 16, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 0, S<8, 16, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 0, 1, 1, S<1, 16, 1, 8>, 2, BlkGemmPipeSched, BlockGemmPipelineVersion::v1>,
49+
DeviceGemmMX_Xdl_CShuffleV3< Row, Col, Row, F8, E8M0, F8, E8M0, BF16, F32, BF16, PassThrough, PassThrough, PassThrough, GemmSpec, ScaleBlockSize, 256, 128, 128, 256, 16, 16, 32, 32, 2, 2, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, false, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, false, 1, 1, S<1, 32, 1, 8>, 8, BlkGemmPipeSched, BlockGemmPipelineVersion::v1>,
50+
DeviceGemmMX_Xdl_CShuffleV3< Row, Col, Row, F8, E8M0, F8, E8M0, BF16, F32, BF16, PassThrough, PassThrough, PassThrough, GemmSpec, ScaleBlockSize, 256, 256, 128, 64, 16, 16, 32, 32, 4, 2, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 0, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 0, 1, 1, S<1, 32, 1, 8>, 8, BlkGemmPipeSched, BlockGemmPipelineVersion::v1>,
51+
DeviceGemmMX_Xdl_CShuffleV3< Row, Col, Row, F8, E8M0, F8, E8M0, BF16, F32, BF16, PassThrough, PassThrough, PassThrough, GemmSpec, ScaleBlockSize, 256, 128, 128, 128, 16, 16, 32, 32, 2, 2, S<8, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 0, S<8, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 0, 1, 1, S<1, 32, 1, 8>, 8, BlkGemmPipeSched, BlockGemmPipelineVersion::v1>,
52+
DeviceGemmMX_Xdl_CShuffleV3< Row, Col, Row, F8, E8M0, F8, E8M0, BF16, F32, BF16, PassThrough, PassThrough, PassThrough, GemmSpec, ScaleBlockSize, 64, 16, 16, 512, 16, 16, 16, 16, 1, 1, S<8, 8, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 0, S<8, 8, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 0, 1, 1, S<1, 16, 1, 4>, 4, BlkGemmPipeSched, BlockGemmPipelineVersion::v1>
53+
54+
//Require verification
55+
//DeviceGemmMX_Xdl_CShuffleV3< Row, Col, Row, F8, E8M0, F8, E8M0, BF16, F32, BF16, PassThrough, PassThrough, PassThrough, GemmSpec, ScaleBlockSize, 256, 256, 256, 128, 16, 16, 16, 16, 8, 8, S<8, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 0, S<8, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 0, 1, 2, S<1, 32, 1, 8>, 8, BlkGemmPipeSched, BlockGemmPipelineVersion::v1>
56+
#endif
57+
// clang-format on
58+
>;
59+
60+
} // namespace instance
61+
} // namespace device
62+
} // namespace tensor_operation
63+
} // namespace ck
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,32 @@
1+
// SPDX-License-Identifier: MIT
2+
// Copyright (c) 2025, Advanced Micro Devices, Inc. All rights reserved.
3+
4+
#include "device_gemm_mx_xdl_f8_f8_bf16_mk_nk_mn.hpp"
5+
6+
namespace ck {
7+
namespace tensor_operation {
8+
namespace device {
9+
namespace instance {
10+
11+
void add_device_gemm_mx_xdl_f8_f8_bf16_mk_nk_mn_default_instances(
12+
std::vector<std::unique_ptr<DeviceGemmMX<Row,
13+
Col,
14+
Row,
15+
F8,
16+
E8M0,
17+
F8,
18+
E8M0,
19+
BF16,
20+
32,
21+
PassThrough,
22+
PassThrough,
23+
PassThrough>>>& instances)
24+
{
25+
add_device_operation_instances(
26+
instances, device_gemm_mx_xdl_f8_f8_bf16_mk_nk_mn_instances<Intrawave, GemmDefault>{});
27+
}
28+
29+
} // namespace instance
30+
} // namespace device
31+
} // namespace tensor_operation
32+
} // namespace ck

0 commit comments

Comments
 (0)