Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Access Matrix #19

Open
wants to merge 286 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
286 commits
Select commit Hold shift + click to select a range
2ca6330
Update feature.cc
liaopeiyuan Jul 13, 2021
3c8fb56
Update feature.cc
liaopeiyuan Jul 13, 2021
ab2f446
Update feature.cc
liaopeiyuan Jul 13, 2021
5445ea7
Update feature.cc
liaopeiyuan Jul 13, 2021
9665142
Update feature.cc
liaopeiyuan Jul 13, 2021
c4e5f37
Update module.py
liaopeiyuan Jul 13, 2021
8ead08e
[Ansor] print computeDAG
liaopeiyuan Jul 13, 2021
2cb26e6
Update feature.cc
liaopeiyuan Jul 13, 2021
87814ce
feature
liaopeiyuan Jul 27, 2021
53f5f39
Update feature.cc
liaopeiyuan Jul 27, 2021
dc9b5a1
resolve conflict
liaopeiyuan Jul 27, 2021
74b45d2
Update feature.cc
liaopeiyuan Aug 1, 2021
6e2872e
Update feature.cc
liaopeiyuan Aug 1, 2021
2bbfff7
cleaning-up
liaopeiyuan Aug 4, 2021
87c9fdb
Update lgbm_model.py
liaopeiyuan Aug 4, 2021
5d071cb
Update __init__.py
liaopeiyuan Aug 4, 2021
fc217ef
Update train_model.py
liaopeiyuan Aug 4, 2021
1535176
lightgbm hyperparameter optimization
liaopeiyuan Aug 5, 2021
87e1ca1
debug
liaopeiyuan Aug 5, 2021
5b2116f
Update lightgbm_hyperparameter_opt.py
liaopeiyuan Aug 5, 2021
46f3822
Merge branch 'main' of https://github.com/liaopeiyuan/tenset into main
liaopeiyuan Aug 5, 2021
415195a
prob with init params
liaopeiyuan Aug 5, 2021
0ae6ef5
hyperopt
liaopeiyuan Aug 5, 2021
5d22fba
hyperopt
liaopeiyuan Aug 5, 2021
6946924
Update lightgbm_hyperparameter_opt.py
liaopeiyuan Aug 5, 2021
1028b90
Update lightgbm_hyperparameter_opt.py
liaopeiyuan Aug 5, 2021
8006225
opt
liaopeiyuan Aug 5, 2021
3276ae2
Update lightgbm_hyperparameter_opt.py
liaopeiyuan Aug 5, 2021
4d5a5b5
Update lightgbm_hyperparameter_opt.py
liaopeiyuan Aug 8, 2021
d80b2e1
new parameters for tabnet
liaopeiyuan Aug 8, 2021
2c5dcd1
suppress verbosity
liaopeiyuan Aug 8, 2021
bf00d05
Graph Embedding
liaopeiyuan Aug 9, 2021
b9f18c8
Update graph_embedding.py
liaopeiyuan Aug 9, 2021
163ad12
Update graph_embedding.py
liaopeiyuan Aug 9, 2021
7276cf8
Update graph_embedding.py
liaopeiyuan Aug 9, 2021
9f55b24
Update graph_embedding.py
liaopeiyuan Aug 9, 2021
144185f
compute access matrix in feature
liaopeiyuan Aug 9, 2021
b4863eb
update
liaopeiyuan Aug 9, 2021
4cb8829
Update feature.cc
liaopeiyuan Aug 9, 2021
d55275f
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
f657585
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
1e92939
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
ee16dd1
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
b1c86b7
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
11b9384
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
b2f02b5
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
5a88f83
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
aa61824
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
e58d2b7
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
d2af636
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
6dcb816
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
5f5f710
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
607f1f7
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
f2d210c
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
5da17e8
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
dda5617
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
afee1a9
collect loop variables
liaopeiyuan Aug 9, 2021
075ed7d
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
db6d445
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
5f24fa3
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
7ca95f3
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
78179ae
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
c8fc3e5
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
9c6c6bc
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
d07a9da
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
5a77ef1
extract linear combinator of vars
liaopeiyuan Aug 9, 2021
9943063
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
42f2931
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
ccb0560
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
7445f09
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
b693ac0
Update feature.cc
liaopeiyuan Aug 9, 2021
2edf4f4
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
7b14cd9
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
df6e7dd
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
b530dc2
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
52300af
access matrix
liaopeiyuan Aug 9, 2021
d233a4c
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
17adec3
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
5d5b374
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
5b03b4d
Access matrix demo
liaopeiyuan Aug 9, 2021
00817a8
Update feature.h
liaopeiyuan Aug 9, 2021
da81f79
Update feature.cc
liaopeiyuan Aug 9, 2021
c6d73d5
increase buffer
liaopeiyuan Aug 9, 2021
15d5b18
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
1c86347
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
ec28052
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
30066b6
Update compute_dag.cc
Aug 16, 2021
d67a922
remove parallel for to debug
Aug 16, 2021
f816740
Update feature.cc
Aug 16, 2021
1326c56
Update feature.cc
Aug 16, 2021
6ada110
Update feature.cc
Aug 16, 2021
2edb69e
Update compute_dag.cc
Aug 16, 2021
70cb91c
Update compute_dag.cc
Aug 16, 2021
687860a
Update compute_dag.cc
Aug 16, 2021
4b556c6
Update compute_dag.cc
Aug 16, 2021
404fa63
Update compute_dag.cc
Aug 16, 2021
cff9177
Update compute_dag.cc
Aug 16, 2021
816a64a
Update compute_dag.cc
Aug 16, 2021
5444e25
3D outputs
Aug 16, 2021
4950c81
Update compute_dag.cc
Aug 16, 2021
7da6445
Update compute_dag.cc
Aug 16, 2021
792b5ce
Update compute_dag.cc
Aug 16, 2021
98dd862
suppress debug
Aug 16, 2021
e530813
Update compute_dag.cc
Aug 16, 2021
a66ed26
Update compute_dag.cc
Aug 16, 2021
535871c
Update make_dataset.py
liaopeiyuan Jul 12, 2021
fe07ec6
Update autogluon_model.py
liaopeiyuan Jul 12, 2021
3b56e8f
Update autogluon_model.py
liaopeiyuan Jul 12, 2021
b25bd61
Update autogluon_model.py
liaopeiyuan Jul 12, 2021
8b208ba
Update autogluon_model.py
liaopeiyuan Jul 12, 2021
48bb968
Update module.py
liaopeiyuan Jul 13, 2021
d73c475
Update module.py
liaopeiyuan Jul 13, 2021
9d5bbdc
Update module.py
liaopeiyuan Jul 13, 2021
aaca30a
Update module.py
liaopeiyuan Jul 13, 2021
fcbce68
Update module.py
liaopeiyuan Jul 13, 2021
3040842
Update feature.cc
liaopeiyuan Aug 1, 2021
6259354
Update feature.cc
liaopeiyuan Aug 1, 2021
75514d9
suppress verbosity
liaopeiyuan Aug 8, 2021
1b49016
Graph Embedding
liaopeiyuan Aug 9, 2021
8df8921
Update graph_embedding.py
liaopeiyuan Aug 9, 2021
56da25c
Update graph_embedding.py
liaopeiyuan Aug 9, 2021
c36e07a
Update graph_embedding.py
liaopeiyuan Aug 9, 2021
a1cd94a
Update graph_embedding.py
liaopeiyuan Aug 9, 2021
ac3d108
compute access matrix in feature
liaopeiyuan Aug 9, 2021
980d8d7
update
liaopeiyuan Aug 9, 2021
54f3c6b
Update feature.cc
liaopeiyuan Aug 9, 2021
f036425
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
62d4690
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
3285522
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
279347c
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
d89bf7c
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
cda9e97
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
2eb0549
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
ec77d58
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
fe01d6e
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
1972b7f
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
9a31f3d
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
f7346ea
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
ba64a67
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
b109a13
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
d9a563a
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
d43543c
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
65f9771
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
c0001ed
collect loop variables
liaopeiyuan Aug 9, 2021
b402198
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
d5e9095
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
d5813af
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
7148ab5
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
ccb914b
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
f18026d
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
0794e20
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
2b92485
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
ac6e6fc
extract linear combinator of vars
liaopeiyuan Aug 9, 2021
1bc435f
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
76cc84b
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
d7d6a45
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
84f7b0e
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
be1feb2
Update feature.cc
liaopeiyuan Aug 9, 2021
3a87613
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
c8c2592
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
481874a
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
c570f07
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
aaad982
access matrix
liaopeiyuan Aug 9, 2021
d3241ac
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
a3d0d4d
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
daaf4a8
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
87675de
Access matrix demo
liaopeiyuan Aug 9, 2021
e38de8d
Update feature.h
liaopeiyuan Aug 9, 2021
860a858
Update feature.cc
liaopeiyuan Aug 9, 2021
7c2ef3c
increase buffer
liaopeiyuan Aug 9, 2021
2748472
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
84e207c
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
100fe49
Update compute_dag.cc
liaopeiyuan Aug 9, 2021
a43f676
Update compute_dag.cc
Aug 16, 2021
49e98da
remove parallel for to debug
Aug 16, 2021
a16c849
Update feature.cc
Aug 16, 2021
40bfbe6
Update feature.cc
Aug 16, 2021
339fa84
Update feature.cc
Aug 16, 2021
73b2c67
Update compute_dag.cc
Aug 16, 2021
9d40d93
Update compute_dag.cc
Aug 16, 2021
108e2fb
Update compute_dag.cc
Aug 16, 2021
0597593
Update compute_dag.cc
Aug 16, 2021
bf3bfcf
Update compute_dag.cc
Aug 16, 2021
5aa433c
Update compute_dag.cc
Aug 16, 2021
b32baf7
Update compute_dag.cc
Aug 16, 2021
a0bda99
3D outputs
Aug 16, 2021
f417986
Update compute_dag.cc
Aug 16, 2021
f4fbc98
Update compute_dag.cc
Aug 16, 2021
109ab34
Update compute_dag.cc
Aug 16, 2021
eac52fd
suppress debug
Aug 16, 2021
8fa6544
Update compute_dag.cc
Aug 16, 2021
e7b4f72
Update compute_dag.cc
Aug 16, 2021
3ee5666
conflicts
liaopeiyuan Aug 19, 2021
fc288ec
refactoring
Aug 19, 2021
98f988c
refactor
Aug 19, 2021
2ce0468
Update feature.cc
Aug 19, 2021
332bbc9
Update task_scheduler.py
Aug 19, 2021
656fbfb
Create sparsemax.py
Aug 19, 2021
415bf7a
clean-up
Aug 19, 2021
e1c376b
reduce loopvars
Aug 19, 2021
4940990
update
Aug 20, 2021
641431d
Update feature.py
Aug 20, 2021
7e574c3
Update xgb_model.py
Aug 20, 2021
d7814d0
accmat
Aug 23, 2021
4890b3c
Update feature.cc
Aug 23, 2021
f2c4d5c
Update feature.cc
Aug 23, 2021
133f91e
Update feature.cc
Aug 23, 2021
e8134b5
Update feature.cc
Aug 23, 2021
1216175
Update task_scheduler.py
Aug 24, 2021
e91815a
access matrix flag
Aug 24, 2021
9054af3
Update feature.h
Aug 24, 2021
b6265b5
Update feature.cc
Aug 24, 2021
a77b322
Update feature.cc
Aug 24, 2021
50c89a7
Update feature.py
Aug 24, 2021
7e722da
python side
Aug 24, 2021
2c5240f
Update make_dataset.py
Aug 24, 2021
92d15ed
update
Aug 24, 2021
c94440d
update
Aug 24, 2021
fbb6610
Update dataset.py
Aug 24, 2021
e98c243
Update dataset.py
Aug 24, 2021
8597fae
update
Aug 24, 2021
0aa0b14
Update dataset.py
Aug 24, 2021
854980a
Update dataset.py
Aug 24, 2021
ed8a114
update
Aug 24, 2021
834d6c1
Update dataset.py
Aug 24, 2021
f52f19a
feature names
Aug 24, 2021
ca7f138
Update feature.cc
Aug 24, 2021
32edd3c
update
Aug 24, 2021
7d90bb0
Update xgb_model.py
Aug 24, 2021
6ca653f
Update xgb_model.py
Aug 24, 2021
89403c8
Update make_dataset.py
Aug 24, 2021
5387347
Update make_dataset.py
Aug 24, 2021
7c66b1f
Update make_dataset.py
Aug 24, 2021
5eb1b73
Update make_dataset.py
Aug 24, 2021
14b437f
Update make_dataset.py
Aug 24, 2021
0be2292
Update make_dataset.py
Aug 24, 2021
4439697
Update make_dataset.py
Aug 24, 2021
2dbd9db
Update make_dataset.py
Aug 24, 2021
a9877ff
update
Aug 24, 2021
5bc9047
Update feature.cc
Aug 24, 2021
e343d9f
Update train_model.py
Aug 24, 2021
c313f4a
Update xgb_model.py
Aug 24, 2021
0c92616
Update train_model.py
Aug 24, 2021
d4b1064
Update xgb_model.py
Aug 24, 2021
f2913d8
Update train_model.py
Aug 24, 2021
c690ae1
update
Aug 24, 2021
28612d0
Update mlp_model.py
Aug 24, 2021
175b6c4
update
Aug 24, 2021
0dad4ab
final clean-up
Aug 24, 2021
71ae3ec
Merge branch 'main' into access_matrix
Aug 31, 2021
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions include/tvm/auto_scheduler/compute_dag.h
Original file line number Diff line number Diff line change
Expand Up @@ -262,6 +262,13 @@ class ComputeDAG : public ObjectRef {
*/
String PrintStepsAsPython(const Array<Step>& transform_steps) const;

/*!
* \brief Compute the access matrix of compute DAG.
* \param enabled Enables feature extraction.
* \return The flattened access matrix.
*/
std::vector<int> ComputeAccessMatrix(bool enabled = true) const;

/*!
* \brief Print the compute DAG to a string. This is also used to generate the ComputeDAG hash.
* \param simple_mode Simple mode will only include the op names and brief compute.
Expand Down
13 changes: 7 additions & 6 deletions include/tvm/auto_scheduler/feature.h
Original file line number Diff line number Diff line change
Expand Up @@ -46,16 +46,17 @@ namespace auto_scheduler {
* \param cache_line_size The size of cache line in bytes
* \param max_n_bufs The maximum number of extracted buffers for one statement
* \param ret The returned feature vector
* \param access_matrix Access matrix feature vector
*/
void GetPerStoreFeature(const Stmt& stmt, int cache_line_size, int max_n_bufs,
std::vector<float>* ret);
std::vector<float>* ret, std::vector<int>* access_matrix, bool access_matrix_enabled);

/*
* \brief Get the names of elements in the feature vector. Use this for debug and inspection.
* \param max_n_bufs The maximum number of extracted buffers for one statement
* \param ret The returned names.
*/
void GetPerStoreFeatureName(int max_n_bufs, std::vector<std::string>* ret);
void GetPerStoreFeatureName(int max_n_bufs, std::vector<std::string>* ret, bool access_matrix);

/*!
* \brief Get per-store feature from states of the same task
Expand All @@ -68,7 +69,7 @@ void GetPerStoreFeatureName(int max_n_bufs, std::vector<std::string>* ret);
*/
void GetPerStoreFeaturesFromStates(const Array<State>& states, const SearchTask& task,
int skip_first_n_feature_extraction, int max_n_bufs,
std::vector<std::vector<float> >* features);
std::vector<std::vector<float> >* features, bool access_matrix);

/*!
* \brief Get per-store feature from states of different tasks
Expand All @@ -81,7 +82,7 @@ void GetPerStoreFeaturesFromStates(const Array<State>& states, const SearchTask&
*/
void GetPerStoreFeaturesFromStates(const Array<State>& states, const std::vector<SearchTask>& tasks,
int skip_first_n_feature_extraction, int max_n_bufs,
std::vector<std::vector<float> >* features);
std::vector<std::vector<float> >* features, bool access_matrix);

/*!
* \brief Get per-store features from a log file
Expand All @@ -96,7 +97,7 @@ void GetPerStoreFeaturesFromStates(const Array<State>& states, const std::vector
void GetPerStoreFeaturesFromFile(const std::string& filename, int max_lines, int max_n_bufs,
std::vector<std::vector<float> >* features,
std::vector<float>* normalized_throughputs,
std::vector<int>* task_ids);
std::vector<int>* task_ids, bool access_matrix);

/*!
* \brief Get per-store features from measurement input/result pairs
Expand All @@ -114,7 +115,7 @@ void GetPerStoreFeaturesFromMeasurePairs(const Array<MeasureInput>& inputs,
int skip_first_n_feature_extraction, int max_n_bufs,
std::vector<std::vector<float> >* features,
std::vector<float>* normalized_throughputs,
std::vector<int>* task_ids);
std::vector<int>* task_ids, bool access_matrix);

} // namespace auto_scheduler
} // namespace tvm
Expand Down
33 changes: 19 additions & 14 deletions python/tvm/auto_scheduler/cost_model/lgbm_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,8 @@ def __init__(
use_gpu=False,
few_shot_learning="base_only",
verbose_eval=25,
seed=None):
seed=None,
access_matrix=True):

global lgbm
try:
Expand All @@ -135,18 +136,21 @@ def __init__(
self.few_shot_learning = few_shot_learning
self.verbose_eval = verbose_eval
self.workload_embed_dict = dict()
self.access_matrix = access_matrix

# lgbm params
if params is None:
self.lgbm_params = {
'boosting_type': 'gbdt',
'num_leaves': 72,
'learning_rate': 0.1632095,
'feature_fraction': 0.84375,
'bagging_fraction': 0.89435,
'bagging_freq': 4,
'num_leaves': 31,
'learning_rate': 0.05,
'feature_fraction': 0.9,
'bagging_fraction': 0.8,
'bagging_freq': 5,
'verbose': 0,
'min_sum_hessian_in_leaf': 4,
'min_child_weight': 2,
'in_sum_in_hessian': 0,
'min_data_in_leaf': 0
}
else:
self.lgbm_params = params
Expand Down Expand Up @@ -189,7 +193,7 @@ def fit_local(self, train_set, valid_set=None):
elif self.few_shot_learning == "plus_per_task":
base_preds = self._predict_a_dataset(self.base_model, train_set)
for task in train_set.tasks():
diff_train_set = Dataset()
diff_train_set = Dataset(self.access_matrix)
diff_train_set.load_task_data(
task,
train_set.features[task],
Expand Down Expand Up @@ -249,7 +253,7 @@ def _fit_a_model(self, train_set, valid_set=None, valid_train_set=None):
verbose_eval=self.verbose_eval
)

feature_names = list(get_per_store_feature_names()) + ['max', 'min', 'add',
feature_names = list(get_per_store_feature_names(self.access_matrix)) + ['max', 'min', 'add',
'Conv2dOutput', 'conv2d_winograd', 'DepthwiseConv2d',
'dense', 'softmax', 'compute(b, i, j)']
feature_importances = bst.feature_importance()
Expand Down Expand Up @@ -285,7 +289,7 @@ def register_new_task(self, task):

def make_diff_set(self, base_model, dataset):
base_preds = self._predict_a_dataset(base_model, dataset)
diff_set = Dataset()
diff_set = Dataset(self.access_matrix)
for task in dataset.tasks():
diff_set.load_task_data(
task,
Expand Down Expand Up @@ -365,15 +369,16 @@ def save(self, filename):
class LGBModel(PythonBasedModel):
"""The wrapper of LGBModelInternal. So we can use it in end-to-end search."""
def __init__(self, few_shot_learning="base_only", verbose_eval=25,
num_warmup_sample=100, seed=None, disable_update=False):
num_warmup_sample=100, seed=None, disable_update=False, access_matrix=True):
super().__init__()

self.num_warmup_sample = num_warmup_sample
self.disable_update = disable_update
self.model = LGBModelInternal(few_shot_learning=few_shot_learning,
verbose_eval=verbose_eval,
seed=seed)
self.dataset = Dataset()
seed=seed, access_matrix=access_matrix)
self.dataset = Dataset(self.access_matrix)
self.access_matrix = access_matrix

def update(self, inputs, results):
if self.disable_update or len(inputs) <= 0:
Expand All @@ -384,7 +389,7 @@ def update(self, inputs, results):
logger.info("LGBModel Training time: %.2f s", time.time() - tic)

def predict(self, task, states):
features = get_per_store_features_from_states(states, task)
features = get_per_store_features_from_states(states, task, access_matrix=self.access_matrix)
if self.model is not None and len(self.dataset) > self.num_warmup_sample:
learning_task = LearningTask(task.workload_key, str(task.target))
eval_dataset = Dataset.create_one_task(learning_task, features, None)
Expand Down
19 changes: 10 additions & 9 deletions python/tvm/auto_scheduler/cost_model/mlp_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -320,7 +320,7 @@ def moving_average(average, update):

class MLPModelInternal:
def __init__(self, device=None, few_shot_learning="base_only", use_workload_embedding=True, use_target_embedding=False,
loss_type='lambdaRankLoss'):
loss_type='lambdaRankLoss', access_matrix=True):
if device is None:
if torch.cuda.device_count():
device = 'cuda:0'
Expand All @@ -330,7 +330,7 @@ def __init__(self, device=None, few_shot_learning="base_only", use_workload_embe
# Common parameters
self.net_params = {
"type": "SegmentSumMLP",
"in_dim": 164 + (10 if use_workload_embedding else 0),
"in_dim": 164 + (250 if access_matrix else 0) + (10 if use_workload_embedding else 0),
"hidden_dim": 256,
"out_dim": 1,
}
Expand Down Expand Up @@ -429,7 +429,7 @@ def fit_local(self, train_set, valid_set=None):
self.loss_func = torch.nn.MSELoss()
self.net_params['add_sigmoid'] = True
base_preds = self._predict_a_dataset(self.base_model, train_set)
diff_train_set = Dataset()
diff_train_set = Dataset(self.access_matrix)
for task in train_set.tasks():
diff_train_set.load_task_data(
task,
Expand All @@ -439,7 +439,7 @@ def fit_local(self, train_set, valid_set=None):

if valid_set:
base_preds = self._predict_a_dataset(self.base_model, valid_set)
diff_valid_set = Dataset()
diff_valid_set = Dataset(self.access_matrix)
for task in valid_set.tasks():
diff_valid_set.load_task_data(
task,
Expand All @@ -456,7 +456,7 @@ def fit_local(self, train_set, valid_set=None):
elif self.few_shot_learning == "plus_per_task":
base_preds = self._predict_a_dataset(self.base_model, train_set)
for task in train_set.tasks():
diff_train_set = Dataset()
diff_train_set = Dataset(self.access_matrix)
diff_train_set.load_task_data(
task,
train_set.features[task],
Expand Down Expand Up @@ -785,12 +785,13 @@ def find_class(self, module, name):
class MLPModel(PythonBasedModel):
"""The wrapper of MLPModelInternal. So we can use it in end-to-end search."""

def __init__(self, few_shot_learning="base_only", disable_update=False):
def __init__(self, few_shot_learning="base_only", disable_update=False, access_matrix=True):
super().__init__()

self.disable_update = disable_update
self.model = MLPModelInternal(few_shot_learning=few_shot_learning)
self.dataset = Dataset()
self.model = MLPModelInternal(few_shot_learning=few_shot_learning, access_matrix=access_matrix)
self.dataset = Dataset(self.access_matrix)
self.access_matrix = access_matrix

def update(self, inputs, results):
if self.disable_update or len(inputs) <= 0:
Expand All @@ -801,7 +802,7 @@ def update(self, inputs, results):
logger.info("MLPModel Training time: %.2f s", time.time() - tic)

def predict(self, task, states):
features = get_per_store_features_from_states(states, task)
features = get_per_store_features_from_states(states, task, access_matrix=self.access_matrix)
if self.model is not None:
learning_task = LearningTask(task.workload_key, str(task.target))
eval_dataset = Dataset.create_one_task(learning_task, features, None)
Expand Down
19 changes: 10 additions & 9 deletions python/tvm/auto_scheduler/cost_model/tabnet_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -784,7 +784,7 @@ def moving_average(average, update):

class TabNetModelInternal:
def __init__(self, use_gpu=True, device=None, few_shot_learning="base_only", use_workload_embedding=True, use_target_embedding=False,
loss_type='lambdaRankLoss'):
loss_type='lambdaRankLoss', access_matrix=True):
print('tabnet')
if device is None:
if torch.cuda.device_count() and use_gpu:
Expand All @@ -795,7 +795,7 @@ def __init__(self, use_gpu=True, device=None, few_shot_learning="base_only", use
# Common parameters
self.net_params = {
"type": "SegmentSumMLP",
"in_dim": 164 + (10 if use_workload_embedding else 0),
"in_dim": 164 + (250 if access_matrix else 0) + (10 if use_workload_embedding else 0),
"hidden_dim": 256,
"out_dim": 1,
}
Expand Down Expand Up @@ -873,7 +873,7 @@ def fit_local(self, train_set, valid_set=None):
self.loss_func = torch.nn.MSELoss()
self.net_params['add_sigmoid'] = True
base_preds = self._predict_a_dataset(self.base_model, train_set)
diff_train_set = Dataset()
diff_train_set = Dataset(self.access_matrix)
for task in train_set.tasks():
diff_train_set.load_task_data(
task,
Expand All @@ -883,7 +883,7 @@ def fit_local(self, train_set, valid_set=None):

if valid_set:
base_preds = self._predict_a_dataset(self.base_model, valid_set)
diff_valid_set = Dataset()
diff_valid_set = Dataset(self.access_matrix)
for task in valid_set.tasks():
diff_valid_set.load_task_data(
task,
Expand All @@ -900,7 +900,7 @@ def fit_local(self, train_set, valid_set=None):
elif self.few_shot_learning == "plus_per_task":
base_preds = self._predict_a_dataset(self.base_model, train_set)
for task in train_set.tasks():
diff_train_set = Dataset()
diff_train_set = Dataset(self.access_matrix)
diff_train_set.load_task_data(
task,
train_set.features[task],
Expand Down Expand Up @@ -1125,12 +1125,13 @@ def find_class(self, module, name):
class TabNetModel(PythonBasedModel):
"""The wrapper of TabNetModelInternal. So we can use it in end-to-end search."""

def __init__(self, few_shot_learning="base_only", disable_update=False):
def __init__(self, few_shot_learning="base_only", disable_update=False, access_matrix=True):
super().__init__()

self.disable_update = disable_update
self.model = TabNetModelInternal(few_shot_learning=few_shot_learning)
self.dataset = Dataset()
self.model = TabNetModelInternal(few_shot_learning=few_shot_learning, access_matrix=access_matrix)
self.dataset = Dataset(self.access_matrix)
self.access_matrix = access_matrix

def update(self, inputs, results):
if self.disable_update or len(inputs) <= 0:
Expand All @@ -1141,7 +1142,7 @@ def update(self, inputs, results):
logger.info("TabNetModel Training time: %.2f s", time.time() - tic)

def predict(self, task, states):
features = get_per_store_features_from_states(states, task)
features = get_per_store_features_from_states(states, task, access_matrix=self.access_matrix)
if self.model is not None:
learning_task = LearningTask(task.workload_key, str(task.target))
eval_dataset = Dataset.create_one_task(learning_task, features, None)
Expand Down
Loading