diff --git a/app/CMakeLists.txt b/app/CMakeLists.txt index a5c4dce..b704bc8 100644 --- a/app/CMakeLists.txt +++ b/app/CMakeLists.txt @@ -1 +1,2 @@ add_subdirectory(example) +add_subdirectory(layer_example) \ No newline at end of file diff --git a/app/example/CMakeLists.txt b/app/example/CMakeLists.txt index 033d285..5ac1ad8 100644 --- a/app/example/CMakeLists.txt +++ b/app/example/CMakeLists.txt @@ -1 +1 @@ -add_executable(example main.cpp) +add_executable(example main.cpp) \ No newline at end of file diff --git a/app/layer_example/CMakeLists.txt b/app/layer_example/CMakeLists.txt new file mode 100644 index 0000000..f47ced7 --- /dev/null +++ b/app/layer_example/CMakeLists.txt @@ -0,0 +1,11 @@ +set(ARM_DIR "${CMAKE_SOURCE_DIR}/3rdparty/ComputeLibrary") + +add_executable(Concat ConcatLayer.cpp) + +include_directories(${ARM_DIR}) +include_directories(${ARM_DIR}/include) +target_link_directories(Concat PUBLIC ${ARM_DIR}/build) + +target_link_libraries(Concat arm_compute) + +add_dependencies(Concat build_compute_library) \ No newline at end of file diff --git a/app/layer_example/ConcatLayer.cpp b/app/layer_example/ConcatLayer.cpp new file mode 100644 index 0000000..3ff90d7 --- /dev/null +++ b/app/layer_example/ConcatLayer.cpp @@ -0,0 +1,36 @@ +#include +#include "arm_compute/runtime/NEON/NEFunctions.h" +#include "utils/Utils.h" + +using namespace arm_compute; +using namespace utils; + +int main() { + Tensor input1, input2; + Tensor output; + std::vector input; + + const int input_width = 3; + const int input_height = 3; + const int axis = 2; + + input1.allocator()->init(TensorInfo(TensorShape(input_width, input_height, 1), 1, DataType::F32)); + input2.allocator()->init(TensorInfo(TensorShape(input_width, input_height, 1), 1, DataType::F32)); + + input1.allocator()->allocate(); + input2.allocator()->allocate(); + + fill_random_tensor(input1, 0.f, 1.f); + fill_random_tensor(input2, 0.f, 1.f); + + input.push_back(&input1); + input.push_back(&input2); + + NEConcatenateLayer concat; + concat.configure(input, &output, axis); + output.allocator()->allocate(); + + concat.run(); + + output.print(std::cout); +} \ No newline at end of file diff --git a/include/layer/layer.h b/include/layer/layer.h new file mode 100644 index 0000000..cc4797f --- /dev/null +++ b/include/layer/layer.h @@ -0,0 +1,37 @@ +#ifndef LAYER_H +#define LAYER_H + +#include + +#include "arm_compute/runtime/NEON/NEFunctions.h" +#include "utils/Utils.h" + +using namespace arm_compute; +using namespace utils; + +struct LayerAttributes { + int id = -1; +}; + +class Layer { + protected: + int id_; + + public: + Layer() = default; + explicit Layer(const LayerAttributes& attrs) : id_(attrs.id) {} + virtual ~Layer() = default; + void setID(int id) { id_ = id; } + int getID() const { return id_; } + virtual std::string getInfoString() const; + virtual void exec(Tensor& input, Tensor& output) = 0; + virtual void exec(Tensor& input1, Tensor& input2, Tensor& output) = 0; + virtual void exec() = 0; + //virtual Shape get_output_shape() = 0; + + virtual std::string get_type_name() const = 0; + void addNeighbor(Layer* neighbor); + void removeNeighbor(Layer* neighbor); + std::list neighbors_; +}; +#endif \ No newline at end of file diff --git a/src/layer/ConcatenateLayer.cpp b/src/layer/ConcatenateLayer.cpp new file mode 100644 index 0000000..3d44103 --- /dev/null +++ b/src/layer/ConcatenateLayer.cpp @@ -0,0 +1,52 @@ +#ifndef ACL_CONCATENATE_LAYER_H +#define ACL_CONCATENATE_LAYER_H + +#include +#include +#include +#include + +#include "include/layer/layer.h" + +class ConcatenateLayer : public Layer { +private: + NEConcatenateLayer concat; + bool configured_ = false; + +public: + ConcatenateLayer(int id) { setID(id); } + + void configure(const std::vector& inputs_shapes, unsigned int axis, TensorShape& output_shape, + std::vector& input, Tensor& output) { + + if (inputs_shapes.empty()) { + throw std::runtime_error("Concat: Input shapes list cannot be empty."); + } + if (inputs_shapes.size() != input.size()) { + throw std::runtime_error("Concat: vector size mismatch."); + } + std::vector inpcopy; + for (int i = 0; i < input.size(); i++) { + input[i]->allocator()->init(TensorInfo(inputs_shapes[i], 1, DataType::F32)); + input[i]->allocator()->allocate(); + inpcopy.push_back(input[i]); + } + output.allocator()->init(TensorInfo(output_shape, 1, DataType::F32)); + concat.configure(inpcopy, &output, axis); + output.allocator()->allocate(); + configured_ = true; + } + + void exec() override { + if (!configured_) { + throw std::runtime_error("ConcatenateLayer: Layer not configured."); + } + concat.run(); + } + + std::string get_type_name() const override { + return "ConcatenateLayer"; + } +}; + +#endif \ No newline at end of file diff --git a/src/layer/ConvLayer.cpp b/src/layer/ConvLayer.cpp new file mode 100644 index 0000000..5771eb6 --- /dev/null +++ b/src/layer/ConvLayer.cpp @@ -0,0 +1,57 @@ +#ifndef ACL_CONVOLUTION_LAYER_SIMPLIFIED_H +#define ACL_CONVOLUTION_LAYER_SIMPLIFIED_H + +#include +#include +#include +#include + +#include "include/layer/layer.h" + +class ConvolutionLayer : public Layer { +private: + NEConvolutionLayer conv; + bool configured_ = false; + +public: + ConvolutionLayer(int id) { setID(id); } + + void configure( + const TensorShape& input_shape, + const TensorShape& weights_shape, + const TensorShape& biases_shape, + TensorShape& output_shape, + const PadStrideInfo& info, + Tensor& input, + Tensor& weights, + Tensor& biases, + Tensor& output + ) { + + input.allocator()->init(TensorInfo(input_shape, 1, DataType::F32)); + weights.allocator()->init(TensorInfo(weights_shape, 1, DataType::F32)); + biases.allocator()->init(TensorInfo(biases_shape, 1, DataType::F32)); + output.allocator()->init(TensorInfo(output_shape, 1, DataType::F32)); + + input.allocator()->allocate(); + weights.allocator()->allocate(); + biases.allocator()->allocate(); + output.allocator()->allocate(); + + conv.configure(&input, &weights, &biases, &output, info); + configured_ = true; + } + + void exec() override { + if (!configured_) { + throw std::runtime_error("ConvolutionLayer: Layer not configured."); + } + conv.run(); + } + + std::string get_type_name() const override { + return "ConvolutionLayer"; + } +}; + +#endif \ No newline at end of file diff --git a/src/layer/ElementwiseLayer.cpp b/src/layer/ElementwiseLayer.cpp new file mode 100644 index 0000000..619ff36 --- /dev/null +++ b/src/layer/ElementwiseLayer.cpp @@ -0,0 +1,142 @@ +#ifndef ACL_ELEMENTWISE_LAYER_H +#define ACL_ELEMENTWISE_LAYER_H + +#include +#include +#include +#include + +#include "include/layer/layer.h" + +using namespace arm_compute; +using namespace utils; + +enum class ElementwiseOp { + ADD, + DIV, + ABS, + SIGM, + SWISH, + SQUARED_DIFF +}; + +class ElementwiseLayer : public Layer { +private: + ElementwiseOp op_type; + NEActivationLayer act; + NEArithmeticAddition add; + NEElementwiseDivision div; + NEElementwiseSquaredDiff sqdiff; + bool configured_ = false; + +public: + ElementwiseLayer(int id, ElementwiseOp op) : op_type(op) { setID(id); } + + ElementwiseLayer() : ElementwiseLayer(0, ElementwiseOp::ADD) { } + + void configure(const TensorShape& input_shape, TensorShape& output_shape, Tensor& input, Tensor& output) { + input.allocator()->init(TensorInfo(input_shape, 1, DataType::F32)); + output.allocator()->init(TensorInfo(output_shape, 1, DataType::F32)); + + input.allocator()->allocate(); + output.allocator()->allocate(); + + switch (op_type) { + case ElementwiseOp::ABS: { + act.configure(&input, &output, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::ABS)); + act.run(); + break; + } + case ElementwiseOp::SIGM: { + act.configure(&input, &output, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC)); + act.run(); + break; + } + case ElementwiseOp::SWISH: { + act.configure(&input, &output, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::SWISH)); + act.run(); + break; + } + default: + throw std::runtime_error("ElementwiseLayer: This operation requires two inputs"); + } + configured_ = true; + } + + void configure(const TensorShape& input1_shape, const TensorShape& input2_shape, TensorShape& output_shape, + Tensor& input1, Tensor& input2, Tensor& output) { + if (input1_shape.total_size() != input2_shape.total_size()) { + throw std::runtime_error( + "ElementwiseLayer: Input shapes must have same total size"); + } + input1.allocator()->init(TensorInfo(input1_shape, 1, DataType::F32)); + input2.allocator()->init(TensorInfo(input2_shape, 1, DataType::F32)); + output.allocator()->init(TensorInfo(output_shape, 1, DataType::F32)); + + input1.allocator()->allocate(); + input2.allocator()->allocate(); + output.allocator()->allocate(); + + switch (op_type) { + case ElementwiseOp::ADD: { + add.configure(&input1, &input2, &output, ConvertPolicy::WRAP); + add.run(); + break; + } + case ElementwiseOp::DIV: { + div.configure(&input1, &input2, &output); + div.run(); + break; + } + case ElementwiseOp::SQUARED_DIFF: { + sqdiff.configure(&input1, &input2, &output); + sqdiff.run(); + break; + } + default: + throw std::runtime_error("ElementwiseLayer: This operation requires single input"); + } + configured_ = true; + } + + void exec() override { + if (!configured_) { + throw std::runtime_error("ElementwiseLayer: Layer not configured before exec."); + } + switch (op_type) { + case ElementwiseOp::ABS: + case ElementwiseOp::SIGM: + case ElementwiseOp::SWISH: + act.run(); + break; + case ElementwiseOp::ADD: { + add.run(); + break; + } + case ElementwiseOp::DIV: { + div.run(); + break; + } + case ElementwiseOp::SQUARED_DIFF: { + sqdiff.run(); + break; + } + default: + throw std::runtime_error("ElementwiseLayer: This operation requires single input"); + } + } + + std::string get_type_name() const override { + switch (op_type) { + case ElementwiseOp::ADD: return "ElementwiseAddLayer"; + case ElementwiseOp::DIV: return "ElementwiseDivLayer"; + case ElementwiseOp::ABS: return "ElementwiseAbsLayer"; + case ElementwiseOp::SIGM: return "ElementwiseSigmoidLayer"; + case ElementwiseOp::SWISH: return "ElementwiseSwishLayer"; + case ElementwiseOp::SQUARED_DIFF: return "ElementwiseSquaredDiffLayer"; + default:return "ElementwiseUnknownLayer"; + } + } +}; + +#endif \ No newline at end of file diff --git a/src/layer/MatMulLayer.cpp b/src/layer/MatMulLayer.cpp new file mode 100644 index 0000000..67bb4fa --- /dev/null +++ b/src/layer/MatMulLayer.cpp @@ -0,0 +1,48 @@ +#ifndef ACL_MATMUL_LAYER_H +#define ACL_MATMUL_LAYER_H + +#include +#include +#include +#include + +#include "include/layer/layer.h" + +using namespace arm_compute; +using namespace utils; + +class MatMulLayer : public Layer { +private: + NEMatMul m; + bool configured_ = false; + +public: + MatMulLayer(int id){ + setID(id); + } + + void configure(TensorShape& input_x_shape, TensorShape& input_y_shape, TensorShape& output_shape, + Tensor& input_x, Tensor& input_y, Tensor& output) { + + input_x.allocator()->init(TensorInfo(input_x_shape, 1, DataType::F32)); + input_y.allocator()->init(TensorInfo(input_y_shape, 1, DataType::F32)); + output.allocator()->init(TensorInfo(output_shape, 1, DataType::F32)); + + input_x.allocator()->allocate(); + input_y.allocator()->allocate(); + output.allocator()->allocate(); + m.configure(&input_x, &input_y, &output, MatMulInfo(), CpuMatMulSettings(), ActivationLayerInfo()); + configured_ = true; + } + + void exec() override { + if (!configured_) { + throw std::runtime_error("MatMulLayer: Layer not configured before exec."); + } + m.run(); + } + + std::string get_type_name() const override { return "MatMulLayer"; } +}; + +#endif \ No newline at end of file diff --git a/src/layer/PoolingLayer.cpp b/src/layer/PoolingLayer.cpp new file mode 100644 index 0000000..906abf7 --- /dev/null +++ b/src/layer/PoolingLayer.cpp @@ -0,0 +1,47 @@ +#ifndef ACL_POOLING_LAYER_H +#define ACL_POOLING_LAYER_H + +#include +#include +#include +#include + +#include "include/layer/layer.h" + +class PoolingLayer : public Layer { +private: + NEPoolingLayer pool; + bool configured_ = false; + +public: + PoolingLayer(int id) { + setID(id); + } + + void configure(TensorShape& input_shape, + TensorShape& output_shape, Tensor& input, Tensor& output) { + if (input_shape.num_dimensions() < 2) { + throw std::runtime_error("PoolingLayer: Input must be at least 2D"); + } + input.allocator()->init(TensorInfo(input_shape, 1, DataType::F32)); + output.allocator()->init(TensorInfo(output_shape, 1, DataType::F32)); + + input.allocator()->allocate(); + output.allocator()->allocate(); + + pool.configure(&input, &output, PoolingLayerInfo(PoolingType::MAX, DataLayout::NHWC)); + + configured_ = true; + } + + void exec() override { + if (!configured_) { + throw std::runtime_error("PoolingLayer: Layer not configured before exec."); + } + pool.run(); + } + + std::string get_type_name() const override { return "PoolingLayer"; } +}; + +#endif \ No newline at end of file diff --git a/src/layer/ReshapeLayer.cpp b/src/layer/ReshapeLayer.cpp new file mode 100644 index 0000000..b2af53a --- /dev/null +++ b/src/layer/ReshapeLayer.cpp @@ -0,0 +1,40 @@ +#ifndef ACL_RESHAPE_LAYER_H +#define ACL_RESHAPE_LAYER_H + +#include +#include +#include +#include + +#include "include/layer/layer.h" + +class ReshapeLayer : public Layer { +private: + NEReshapeLayer reshape; + bool configured_ = false; + +public: + ReshapeLayer(int id) { setID(id); } + + void configure(const TensorShape& input_shape, TensorShape& output_shape, Tensor& input, Tensor& output) { + input.allocator()->init(TensorInfo(input_shape, 1, DataType::F32)); + output.allocator()->init(TensorInfo(output_shape, 1, DataType::F32)); + + input.allocator()->allocate(); + output.allocator()->allocate(); + + reshape.configure(&input, &output); + configured_ = true; + } + + void exec() override { + if (!configured_) { + throw std::runtime_error("ReshapeLayer: Layer not configured."); + } + reshape.run(); + } + + std::string get_type_name() const override { return "ReshapeLayer"; } +}; + +#endif \ No newline at end of file diff --git a/src/layer/ResizeLayer.cpp b/src/layer/ResizeLayer.cpp new file mode 100644 index 0000000..ed3c934 --- /dev/null +++ b/src/layer/ResizeLayer.cpp @@ -0,0 +1,48 @@ +#ifndef ACL_RESIZE_LAYER_H +#define ACL_RESIZE_LAYER_H + +#include +#include +#include +#include + +#include "include/layer/layer.h" + +using namespace arm_compute; +using namespace utils; + +class ResizeLayer : public Layer { +private: + NEScale resize; + bool configured_ = false; + +public: + ResizeLayer(int id) { setID(id); } + void configure(TensorShape& input_shape, TensorShape& output_shape, Tensor& input, Tensor& output) { + input.allocator()->init(TensorInfo(input_shape, 1, DataType::F32)); + output.allocator()->init(TensorInfo(output_shape, 1, DataType::F32)); + + input.allocator()->allocate(); + output.allocator()->allocate(); + + resize.configure(&input, &output, + ScaleKernelInfo{ + InterpolationPolicy::NEAREST_NEIGHBOR, + BorderMode::REPLICATE, + PixelValue(), + SamplingPolicy::CENTER, + }); + configured_ = true; + } + + void exec() override { + if (!configured_) { + throw std::runtime_error("ResizeLayer: Layer not configured before exec."); + } + resize.run(); + } + + std::string get_type_name() const override { return "ResizeLayer"; } +}; + +#endif \ No newline at end of file diff --git a/src/layer/SliceLayer.cpp b/src/layer/SliceLayer.cpp new file mode 100644 index 0000000..8c9f9ab --- /dev/null +++ b/src/layer/SliceLayer.cpp @@ -0,0 +1,41 @@ +#ifndef ACL_SLICE_LAYER_H +#define ACL_SLICE_LAYER_H + +#include +#include +#include +#include + +#include "include/layer/layer.h" + +class SliceLayer : public Layer { +private: + NESlice slice; + bool configured_ = false; + +public: + SliceLayer(int id) { setID(id); } + + void configure(const TensorShape& input_shape, Coordinates starts, Coordinates ends, + TensorShape& output_shape, Tensor& input, Tensor& output) { + + input.allocator()->init(TensorInfo(input_shape, 1, DataType::F32)); + output.allocator()->init(TensorInfo(output_shape, 1, DataType::F32)); + + input.allocator()->allocate(); + output.allocator()->allocate(); + + slice.configure(&input, &output, starts, ends); + configured_ = true; + } + + void exec() override { + if (!configured_) { + throw std::runtime_error("SliceLayer: Layer not configured."); + } + slice.run(); + } + std::string get_type_name() const override { return "SliceLayer"; } +}; + +#endif \ No newline at end of file diff --git a/src/layer/SoftmaxLayer.cpp b/src/layer/SoftmaxLayer.cpp new file mode 100644 index 0000000..2475bf7 --- /dev/null +++ b/src/layer/SoftmaxLayer.cpp @@ -0,0 +1,43 @@ +#ifndef ACL_SOFTMAX_LAYER_H +#define ACL_SOFTMAX_LAYER_H + +#include +#include +#include +#include + +#include "include/layer/layer.h" + +using namespace arm_compute; +using namespace utils; + +class SoftmaxLayer : public Layer { +private: + NESoftmaxLayer m; + bool configured_ = false; + +public: + SoftmaxLayer(int id) { setID(id); } + + void configure(TensorShape& input_shape, TensorShape& output_shape, Tensor& input, Tensor& output) { + input.allocator()->init(TensorInfo(input_shape, 1, DataType::F32)); + output.allocator()->init(TensorInfo(output_shape, 1, DataType::F32)); + + input.allocator()->allocate(); + output.allocator()->allocate(); + + m.configure(&input, &output); + configured_ = true; + } + + void exec() override { + if (!configured_) { + throw std::runtime_error("SoftmaxLayer: Layer not configured before exec."); + } + m.run(); + } + + std::string get_type_name() const override { return "SoftmaxLayer"; } +}; + +#endif \ No newline at end of file diff --git a/src/layer/SplitLayer.cpp b/src/layer/SplitLayer.cpp new file mode 100644 index 0000000..45ea295 --- /dev/null +++ b/src/layer/SplitLayer.cpp @@ -0,0 +1,37 @@ +#ifndef ACL_SPLIT_LAYER_H +#define ACL_SPLIT_LAYER_H + +#include +#include +#include +#include + +#include "include/layer/layer.h" + +class SplitLayer : public Layer { +private: + NESplit split; + bool configured_ = false; + +public: + SplitLayer(int id) { setID(id); } + + void configure(const TensorShape& input_shape, unsigned int axis, Tensor& input, std::vector& outputs) { + input.allocator()->init(TensorInfo(input_shape, 1, DataType::F32)); + input.allocator()->allocate(); + + split.configure(&input, outputs, axis); + configured_ = true; + } + + void exec() override { + if (!configured_) { + throw std::runtime_error("SplitLayer: Layer not configured."); + } + split.run(); + } + + std::string get_type_name() const override { return "SplitLayer"; } +}; + +#endif \ No newline at end of file diff --git a/src/layer/TransposeLayer.cpp b/src/layer/TransposeLayer.cpp new file mode 100644 index 0000000..9d997bb --- /dev/null +++ b/src/layer/TransposeLayer.cpp @@ -0,0 +1,45 @@ +#ifndef ACL_TRANSPOSE_LAYER_H +#define ACL_TRANSPOSE_LAYER_H + +#include +#include +#include +#include + +#include "include/layer/layer.h" + +using namespace arm_compute; +using namespace utils; + +class TransposeLayer : public Layer { +private: + NETranspose t; + bool configured_ = false; + +public: + TransposeLayer(int id) { + setID(id); + } + + void configure(TensorShape& input_shape, TensorShape& output_shape, Tensor& input, Tensor& output) { + input.allocator()->init(TensorInfo(input_shape, 1, DataType::F32)); + output.allocator()->init(TensorInfo(output_shape, 1, DataType::F32)); + + input.allocator()->allocate(); + output.allocator()->allocate(); + + t.configure(&input, &output); + configured_ = true; + } + + void exec() override { + if (!configured_) { + throw std::runtime_error("TransposeLayer: Layer not configured before exec."); + } + t.run(); + } + + std::string get_type_name() const override { return "TransposeLayer"; } +}; + +#endif \ No newline at end of file diff --git a/src/layer/layer.cpp b/src/layer/layer.cpp new file mode 100644 index 0000000..ee9261b --- /dev/null +++ b/src/layer/layer.cpp @@ -0,0 +1,13 @@ +#include "include/layer/layer.h" + +void Layer::addNeighbor(Layer* neighbor) { + if (neighbor != nullptr) { + neighbors_.push_back(neighbor); + } +} + +void Layer::removeNeighbor(Layer* neighbor) { neighbors_.remove(neighbor); } + +std::string Layer::getInfoString() const { + return "Layer (ID: " + std::to_string(id_) + ")"; +} \ No newline at end of file