forked from google/XNNPACK
-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathsubgraph-unary-tester.h
99 lines (87 loc) · 3.23 KB
/
subgraph-unary-tester.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <cstring>
#include <functional>
#include <limits>
#include <numeric>
#include <random>
#include <vector>
#include <gtest/gtest.h>
#include "xnnpack.h"
#include "xnnpack/node-type.h"
#include "xnnpack/operator.h"
#include "xnnpack/requantization.h"
#include "xnnpack/subgraph.h"
#include "replicable_random_device.h"
template <
typename InputType,
typename OutputType = InputType,
size_t min_dim = 0,
size_t max_dim = XNN_MAX_TENSOR_DIMS,
bool pad_output = false>
class UnaryTest : public ::testing::Test {
protected:
UnaryTest() {
shape_dist = std::uniform_int_distribution<size_t>(min_dim, max_dim);
dim_dist = std::uniform_int_distribution<size_t>(1, 9);
i8dist = std::uniform_int_distribution<int32_t>(
std::numeric_limits<int8_t>::min(), std::numeric_limits<int8_t>::max());
u8dist = std::uniform_int_distribution<int32_t>(
std::numeric_limits<uint8_t>::min(),
std::numeric_limits<uint8_t>::max());
u32dist = std::uniform_int_distribution<uint32_t>();
scale_dist = std::uniform_real_distribution<float>(0.1f, 10.0f);
f32dist = std::uniform_real_distribution<float>(0.01f, 1.0f);
dims = RandomShape();
AllocateInputsAndOutputs();
};
void AllocateInputsAndOutputs() {
channels = dims.empty() ? 1 : dims.back();
xnn_shape shape = {};
shape.num_dims = dims.size();
memcpy(shape.dim, dims.data(), dims.size() * sizeof(size_t));
batch_size = xnn_shape_multiply_non_channel_dims(&shape);
num_output_elements = batch_size * channels;
scale = scale_dist(rng);
signed_zero_point = i8dist(rng);
unsigned_zero_point = u8dist(rng);
input = std::vector<InputType>(num_output_elements + XNN_EXTRA_BYTES / sizeof(InputType));
const size_t output_padding = pad_output ? (XNN_EXTRA_BYTES / sizeof(InputType)) : 0;
operator_output = std::vector<OutputType>(num_output_elements + output_padding);
subgraph_output = std::vector<OutputType>(num_output_elements + output_padding);
}
std::vector<size_t> RandomShape() {
std::vector<size_t> dims(shape_dist(rng));
std::generate(dims.begin(), dims.end(), [&] { return dim_dist(rng); });
return dims;
}
static size_t NumElements(const std::vector<size_t>& dims)
{
return std::accumulate(dims.begin(), dims.end(), size_t(1), std::multiplies<size_t>());
}
xnnpack::ReplicableRandomDevice rng;
std::uniform_int_distribution<size_t> shape_dist;
std::uniform_int_distribution<size_t> dim_dist;
std::uniform_real_distribution<float> scale_dist;
std::uniform_int_distribution<int32_t> i8dist;
std::uniform_int_distribution<int32_t> u8dist;
std::uniform_int_distribution<uint32_t> u32dist;
std::uniform_real_distribution<float> f32dist;
std::vector<size_t> dims;
uint32_t input_id;
uint32_t output_id;
size_t channels;
size_t batch_size;
size_t num_output_elements;
float scale;
int32_t signed_zero_point;
int32_t unsigned_zero_point;
std::vector<InputType> input;
std::vector<OutputType> operator_output;
std::vector<OutputType> subgraph_output;
};