Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
27 commits
Select commit Hold shift + click to select a range
5f1561b
Add layer examples
chekalexey Jan 8, 2025
58a016c
Add layer examples
chekalexey Jan 8, 2025
a19ca04
fixed
chekalexey Feb 11, 2025
6f75b73
correction of inclusions
chekalexey Feb 25, 2025
968c92a
Update cmakelist
chekalexey Feb 25, 2025
7c87d3d
Add a ElementwiseLayer Example
chekalexey Mar 11, 2025
9bfa382
Add a pooling layer example
chekalexey Mar 18, 2025
f02551d
add a MatmulLayer example
chekalexey Mar 25, 2025
f81c4bc
Delete ElementwiseLayer.cpp
chekalexey Mar 25, 2025
87764d6
Delete ConvolutionLayer.cpp
chekalexey Mar 25, 2025
2561b5e
Delete PoolingLayer.cpp
chekalexey Mar 25, 2025
daaa472
Update CMakeLists
chekalexey Mar 25, 2025
aa8d4f1
Update CMakeLists
chekalexey Mar 25, 2025
2e2fe44
Update Cmakelist
chekalexey Mar 25, 2025
e874928
correction
chekalexey Mar 25, 2025
0cd5425
Update CMakeLists
chekalexey Apr 15, 2025
d7cf926
Add dependencies in CMakeLists
chekalexey Apr 22, 2025
51b0a08
Add a Reshape Layer example
chekalexey Apr 29, 2025
3bab99a
Add a Slice Layer example
chekalexey Apr 29, 2025
fa78bd3
Added a split layer example
chekalexey May 6, 2025
07de634
Added a Concat Layer example
chekalexey May 6, 2025
349c865
Implement Layer abstraction for all layers
chekalexey May 17, 2025
d0a66bd
Update matmul and pool layers
chekalexey Jul 22, 2025
bcf4536
Update other layers
chekalexey Jul 29, 2025
d0fffee
Moved SetId call
chekalexey Oct 13, 2025
c6fffd6
implement error handling in layer configuration
chekalexey Oct 20, 2025
d0ce2f7
fix clang-format
chekalexey Dec 1, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions app/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1 +1,2 @@
add_subdirectory(example)
add_subdirectory(layer_example)
2 changes: 1 addition & 1 deletion app/example/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1 +1 @@
add_executable(example main.cpp)
add_executable(example main.cpp)
11 changes: 11 additions & 0 deletions app/layer_example/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
set(ARM_DIR "${CMAKE_SOURCE_DIR}/3rdparty/ComputeLibrary")

add_executable(Concat ConcatLayer.cpp)

include_directories(${ARM_DIR})
include_directories(${ARM_DIR}/include)
target_link_directories(Concat PUBLIC ${ARM_DIR}/build)

target_link_libraries(Concat arm_compute)

add_dependencies(Concat build_compute_library)
42 changes: 42 additions & 0 deletions app/layer_example/ConcatLayer.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
#include <iostream>
#include <vector>

#include "arm_compute/core/Types.h"
#include "arm_compute/runtime/NEON/NEFunctions.h"
#include "arm_compute/runtime/NEON/NEScheduler.h"
#include "utils/Utils.h"

using namespace arm_compute;
using namespace utils;

int main() {
Tensor input1, input2;
Tensor output;
std::vector<const ITensor*> input;

const int input_width = 3;
const int input_height = 3;
const int axis = 2;

input1.allocator()->init(
TensorInfo(TensorShape(input_width, input_height, 1), 1, DataType::F32));
input2.allocator()->init(
TensorInfo(TensorShape(input_width, input_height, 1), 1, DataType::F32));

input1.allocator()->allocate();
input2.allocator()->allocate();

fill_random_tensor(input1, 0.F, 1.F);
fill_random_tensor(input2, 0.F, 1.F);

input.push_back(&input1);
input.push_back(&input2);

NEConcatenateLayer concat;
concat.configure(input, &output, axis);
output.allocator()->allocate();

concat.run();

output.print(std::cout);
}
35 changes: 35 additions & 0 deletions include/layer/layer.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
#ifndef LAYER_H
#define LAYER_H

#include <list>

#include "arm_compute/runtime/NEON/NEFunctions.h"
#include "utils/Utils.h"

using namespace arm_compute;
using namespace utils;

struct LayerAttributes {
int id = -1;
};

class Layer {
protected:
int id_;

public:
Layer() = default;
explicit Layer(const LayerAttributes& attrs) : id_(attrs.id) {}
explicit Layer(int id) : id_(id) {}
virtual ~Layer() = default;
void setID(int id) { id_ = id; }
int getID() const { return id_; }
virtual std::string getInfoString() const;
virtual void exec() = 0;

virtual std::string get_type_name() const = 0;
void addNeighbor(Layer* neighbor);
void removeNeighbor(Layer* neighbor);
std::list<Layer*> neighbors_;
};
#endif
60 changes: 60 additions & 0 deletions src/layer/ConcatenateLayer.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
#ifndef ACL_CONCATENATE_LAYER_H
#define ACL_CONCATENATE_LAYER_H

#include <exception>
#include <stdexcept>
#include <string>

#include "layer/layer.h"

class ConcatenateLayer : public Layer {
private:
NEConcatenateLayer concat_;
bool configured_ = false;

public:
ConcatenateLayer(int id) : Layer(id) {}

void configure(const std::vector<TensorShape>& inputs_shapes,
unsigned int axis, TensorShape& output_shape,
std::vector<Tensor*>& input, Tensor& output) {
try {
std::vector<const ITensor*> inpcopy;
std::vector<const ITensorInfo*> inp_info;

for (int i = 0; i < input.size(); i++) {
input[i]->allocator()->init(
TensorInfo(inputs_shapes[i], 1, DataType::F32));
input[i]->allocator()->allocate();
inp_info.push_back(input[i]->info());
inpcopy.push_back(input[i]);
}

output.allocator()->init(TensorInfo(output_shape, 1, DataType::F32));

if (!NEConcatenateLayer::validate(inp_info, output.info(), axis)) {
throw std::runtime_error("ConcatenateLayer: Validation failed");
}

output.allocator()->allocate();
concat_.configure(inpcopy, &output, axis);

configured_ = true;
} catch (const std::exception& e) {
configured_ = false;
std::cerr << "ConcatenateLayer configuration error: " << e.what()
<< std::endl;
}
}

void exec() override {
if (!configured_) {
throw std::runtime_error("ConcatenateLayer: Layer not configured.");
}
concat_.run();
}

std::string get_type_name() const override { return "ConcatenateLayer"; }
};

#endif
58 changes: 58 additions & 0 deletions src/layer/ConvLayer.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
#ifndef ACL_CONVOLUTION_LAYER_SIMPLIFIED_H
#define ACL_CONVOLUTION_LAYER_SIMPLIFIED_H

#include <exception>
#include <stdexcept>
#include <string>

#include "layer/layer.h"

class ConvolutionLayer : public Layer {
private:
NEConvolutionLayer conv_;
bool configured_ = false;

public:
ConvolutionLayer(int id) : Layer(id) {}

void configure(const TensorShape& input_shape,
const TensorShape& weights_shape,
const TensorShape& biases_shape, TensorShape& output_shape,
const PadStrideInfo& info, Tensor& input, Tensor& weights,
Tensor& biases, Tensor& output) {
try {
input.allocator()->init(TensorInfo(input_shape, 1, DataType::F32));
weights.allocator()->init(TensorInfo(weights_shape, 1, DataType::F32));
biases.allocator()->init(TensorInfo(biases_shape, 1, DataType::F32));
output.allocator()->init(TensorInfo(output_shape, 1, DataType::F32));

if (!NEConvolutionLayer::validate(input.info(), weights.info(),
biases.info(), output.info(), info)) {
throw std::runtime_error("ConvolutionLayer: Validation failed");
}

input.allocator()->allocate();
weights.allocator()->allocate();
biases.allocator()->allocate();
output.allocator()->allocate();

conv_.configure(&input, &weights, &biases, &output, info);
configured_ = true;
} catch (const std::exception& e) {
configured_ = false;
std::cerr << "ConvolutionLayer configuration error: " << e.what()
<< std::endl;
}
}

void exec() override {
if (!configured_) {
throw std::runtime_error("ConvolutionLayer: Layer not configured.");
}
conv_.run();
}

std::string get_type_name() const override { return "ConvolutionLayer"; }
};

#endif
Loading
Loading