Skip to content

[frontend] Add linalg conv2d and maxpool_2d implementation #454

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 3 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 7 additions & 7 deletions examples/BuddyLeNet/buddy-lenet-import.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,17 +27,17 @@

from buddy.compiler.frontend import DynamoCompiler
from buddy.compiler.graph import GraphDriver
from buddy.compiler.graph.transform import simply_fuse, apply_classic_fusion
from buddy.compiler.ops import tosa
from buddy.compiler.graph.transform import simply_fuse
from buddy.compiler.ops import linalg
from model import LeNet

# Parse command-line arguments.
parser = argparse.ArgumentParser(description="LeNet model AOT importer")
parser.add_argument(
"--output-dir",
type=str,
default="./",
help="Directory to save output files."
"--output-dir",
type=str,
default="./",
help="Directory to save output files.",
)
args = parser.parse_args()

Expand All @@ -54,7 +54,7 @@

# Initialize Dynamo Compiler with specific configurations as an importer.
dynamo_compiler = DynamoCompiler(
primary_registry=tosa.ops_registry,
primary_registry=linalg.ops_registry, verbose=True
)

data = torch.randn([1, 1, 28, 28])
Expand Down
2 changes: 0 additions & 2 deletions examples/BuddyLeNet/buddy-lenet-main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,7 @@
#include <cstdlib>
#include <filesystem>
#include <fstream>
#include <limits>
#include <string>
#include <utility>
#include <vector>

constexpr size_t ParamsSize = 44426;
Expand Down
14 changes: 8 additions & 6 deletions frontend/Python/graph/type.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ class TensorDType(Enum):
- Bool: str
Represents the boolean data type.
"""

Int8 = "int8"
Int32 = "int32"
Int64 = "int64"
Expand All @@ -47,7 +47,7 @@ class TensorDType(Enum):

class TensorMeta:
"""
Store tensor metadata, including shape and data type, while overlooking raw
Store tensor metadata, including shape and data type, while overlooking raw
data.

Attributes:
Expand All @@ -58,7 +58,7 @@ class TensorMeta:

Methods:
- __init__(shape: tuple, dtype: str) -> None:
Initializes a new instance of the TensorMeta class with the specified
Initializes a new instance of the TensorMeta class with the specified
shape and data type.

Example:
Expand All @@ -79,6 +79,7 @@ def __init__(self, shape, dtype) -> None:
self.shape = shape
self.dtype = dtype


class DeviceType(Enum):
"""
Enumeration class representing different types of devices.
Expand All @@ -91,6 +92,7 @@ class DeviceType(Enum):
Each attribute represents a specific device type and is associated with a
string value.
"""
CPU = 'cpu'
GPU = 'gpu'
UNKNOW = 'unknow'

CPU = "cpu"
GPU = "gpu"
UNKNOWN = "unknown"
105 changes: 102 additions & 3 deletions frontend/Python/ops/linalg.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,13 +18,12 @@
#
# ===---------------------------------------------------------------------------

from typing import Dict, Tuple, List
from typing import Dict, Tuple

import mlir.ir as ir
from mlir.dialects import tosa, linalg, arith, tensor, math
import copy, array, sys
import copy, array
import numpy
import functools

from ..graph import *
from ..graph.graph import TensorDType
Expand Down Expand Up @@ -2531,6 +2530,104 @@ def slice_scatter_op(node: SliceScatterOp, symbol_table):
)

return insert_op.result
def convolution2d_op(
node: Conv2dOp, symbol_table: Dict[Tuple[str, int], ir.Operation]
):
"""
Import the convolution operation.
From Buddy Conv2dOp to MLIR Linarg `conv_2d_*` operation.
arg[0]: Tensor input
arg[1]: Tensor weight
arg[2]: Tensor? bias
arg[3]: SymInt[] stride
arg[4]: SymInt[] padding
arg[5]: SymInt[] dilation
arg[6]: bool transposed
arg[7]: SymInt[] output_padding
arg[8]: SymInt groups
"""
assert len(node.args) == 9
input_ = node.args[0]
filter_ = node.args[1]
bias = node.args[2]
strides = node.args[3]
dilations = node.args[5]

input_val = symbol_table.get((str(input_), 0))
filter_val = symbol_table.get((str(filter_), 0))
dtype = node.tensor_meta["dtype"]
result_element_type = mlir_element_type_get(dtype)
out_shape = node.tensor_meta["shape"]
strides_attr = ir._denseI64ArrayAttr(strides, None)
dilations_attr = ir._denseI64ArrayAttr(dilations, None)
conv2d_result = tensor.EmptyOp(out_shape, result_element_type).result
f32 = ir.F32Type.get()
zero = arith.ConstantOp(value=ir.FloatAttr.get(f32, 0.0), result=f32).result
conv2d_result = linalg.fill(zero, outs=[conv2d_result])
conv2d_nchw_op = linalg.conv_2d_nchw_fchw(
input_val,
filter_val,
outs=[conv2d_result],
strides=strides_attr,
dilations=dilations_attr,
)

op_to_return = conv2d_nchw_op
if len(node._parents) > 2:
bias_tensor = symbol_table.get((str(bias), 0))
init = tensor.EmptyOp(out_shape, result_element_type)
broadcasted = linalg.broadcast(
bias_tensor, outs=[init], dimensions=[0, 2, 3]
)
add_result = tensor.EmptyOp(out_shape, result_element_type)
op_to_return = linalg.add(op_to_return, broadcasted, outs=[add_result])

return op_to_return


def maxpool2d_op(
node: Conv2dOp, symbol_table: Dict[Tuple[str, int], ir.Operation]
):
input_ = node.args[0]
kernel_size = node.args[1]
strides = node.args[2]
dtype = node.tensor_meta["dtype"]
result_element_type = mlir_element_type_get(dtype)
result_shape = node.tensor_meta["shape"]

input_value = symbol_table.get((str(input_), 0))
kernel_size_value = tensor.EmptyOp(kernel_size, result_element_type)

strides_attr = ir._denseI64ArrayAttr(strides, None)

result = tensor.EmptyOp(result_shape, result_element_type)
f32 = ir.F32Type.get()

# FIXME: fix this magic value!
largest = arith.ConstantOp(
value=ir.FloatAttr.get(f32, numpy.finfo(numpy.float32).min), result=f32
)
result = linalg.fill(largest, outs=[result])

if len(node.args) > 3:
dilations = node.args[3]
dilations_attr = ir._denseI64ArrayAttr(dilations, None)
op = linalg.pooling_nchw_max(
input_value,
kernel_size_value,
outs=[result],
strides=strides_attr,
dilations=dilations_attr,
)
else:
op = linalg.pooling_nchw_max(
input_value,
kernel_size_value,
outs=[result],
strides=strides_attr,
)

return op


ops_registry = {
Expand Down Expand Up @@ -2575,4 +2672,6 @@ def slice_scatter_op(node: SliceScatterOp, symbol_table):
"EqualOp": equal_op,
"CopyOp": copy_op,
"SliceScatterOp": slice_scatter_op,
"Conv2dOp": convolution2d_op,
"MaxPool2dOp": maxpool2d_op,
}
Loading