Skip to content

Commit

Permalink
Update data gen
Browse files Browse the repository at this point in the history
  • Loading branch information
henry-yeh committed Nov 5, 2023
1 parent ab47194 commit fe91d1a
Show file tree
Hide file tree
Showing 20 changed files with 29 additions and 21 deletions.
2 changes: 1 addition & 1 deletion bpp/README.md
Original file line number Diff line number Diff line change
@@ -1 +1 @@
Run utils.py to generate test datasets.
Run `python utils.py` to generate test datasets.
2 changes: 1 addition & 1 deletion cvrp/README.md
Original file line number Diff line number Diff line change
@@ -1 +1 @@
Run utils.py to generate test datasets.
Run `python utils.py` to generate test datasets.
4 changes: 2 additions & 2 deletions cvrp/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ def load_test_dataset(problem_size, device):

if __name__ == '__main__':
import pathlib
pathlib.Path('./data/cvrp').mkdir(parents=False, exist_ok=True)
pathlib.Path('../data/cvrp').mkdir(parents=False, exist_ok=True)
torch.manual_seed(123456)
for n in [20, 100, 500]:
inst_list = []
Expand All @@ -50,5 +50,5 @@ def load_test_dataset(problem_size, device):
inst = torch.cat((demands.unsqueeze(0), distances), dim=0) # (n+2, n+1)
inst_list.append(inst)
testDataset = torch.stack(inst_list)
torch.save(testDataset, f'./data/cvrp/testDataset-{n}.pt')
torch.save(testDataset, f'../data/cvrp/testDataset-{n}.pt')

Binary file removed data/op/testDataset-100.pt
Binary file not shown.
Binary file removed data/op/valDataset-100.pt
Binary file not shown.
Binary file removed data/sop/test100.pkl
Binary file not shown.
Binary file removed data/sop/test20.pkl
Binary file not shown.
Binary file removed data/sop/test50.pkl
Binary file not shown.
2 changes: 1 addition & 1 deletion mkp/README.md
Original file line number Diff line number Diff line change
@@ -1 +1 @@
Run utils.py to generate datasets.
Run `python utils.py` to generate datasets.
8 changes: 5 additions & 3 deletions mkp/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,9 @@ def load_test_dataset(problem_size, device):
return val_list

if __name__ == '__main__':
# generate test dataset
# generate val and test dataset
import pathlib
pathlib.Path('../data/mkp').mkdir(parents=False, exist_ok=True)
M = 5
torch.manual_seed(12345)
for problem_size in [50]:
Expand All @@ -58,7 +60,7 @@ def load_test_dataset(problem_size, device):
prize, weight = gen_instance(problem_size, M, 'cpu')
valDataset.append(torch.cat((prize.unsqueeze(1), weight), dim=1))
valDataset = torch.stack(valDataset)
torch.save(valDataset, f'./data/mkp/valDataset-{problem_size}.pt')
torch.save(valDataset, f'../data/mkp/valDataset-{problem_size}.pt')

torch.manual_seed(123456)
for problem_size in [50]:
Expand All @@ -67,7 +69,7 @@ def load_test_dataset(problem_size, device):
prize, weight = gen_instance(problem_size, M, 'cpu')
testDataset.append(torch.cat((prize.unsqueeze(1), weight), dim=1))
testDataset = torch.stack(testDataset)
torch.save(testDataset, f'./data/mkp/testDataset-{problem_size}.pt')
torch.save(testDataset, f'../data/mkp/testDataset-{problem_size}.pt')



2 changes: 1 addition & 1 deletion mkp_transformer/README.md
Original file line number Diff line number Diff line change
@@ -1 +1 @@
Run utils.py to generate datasets.
Run `python utils.py` to generate datasets.
6 changes: 3 additions & 3 deletions mkp_transformer/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ def load_test_dataset(problem_size, device):
if __name__ == '__main__':
# generate val and test dataset
import pathlib
pathlib.Path('./data/mkp_transformer').mkdir(parents=False, exist_ok=True)
pathlib.Path('../data/mkp_transformer').mkdir(parents=False, exist_ok=True)

torch.manual_seed(12345)
for problem_size in [300, 500]:
Expand All @@ -55,7 +55,7 @@ def load_test_dataset(problem_size, device):
price, weight = gen_instance(problem_size, 5)
testDataset.append(torch.cat((price.unsqueeze(0), weight), dim=0))
testDataset = torch.stack(testDataset)
torch.save(testDataset, f'./data/mkp_transformer/valDataset-{problem_size}.pt')
torch.save(testDataset, f'../data/mkp_transformer/valDataset-{problem_size}.pt')

torch.manual_seed(123456)
for problem_size in [300, 500]:
Expand All @@ -64,7 +64,7 @@ def load_test_dataset(problem_size, device):
price, weight = gen_instance(problem_size, 5)
testDataset.append(torch.cat((price.unsqueeze(0), weight), dim=0))
testDataset = torch.stack(testDataset)
torch.save(testDataset, f'./data/mkp_transformer/testDataset-{problem_size}.pt')
torch.save(testDataset, f'../data/mkp_transformer/testDataset-{problem_size}.pt')



1 change: 1 addition & 0 deletions op/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Run `python utils.py` to generate datasets.
6 changes: 4 additions & 2 deletions op/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,11 +71,13 @@ def load_test_dataset(n_node, k_sparse, device):
return val_list

if __name__ == "__main__":
import pathlib
pathlib.Path('../data/op').mkdir(parents=False, exist_ok=True)
torch.manual_seed(12345)
for problem_size in [100, 200, 300]:
coor = torch.rand(size=(30, problem_size, 2))
torch.save(coor, f"data/op/valDataset-{problem_size}.pt")
torch.save(coor, f"../data/op/valDataset-{problem_size}.pt")
torch.manual_seed(123456)
for problem_size in [100, 200, 300]:
coor = torch.rand(size=(100, problem_size, 2))
torch.save(coor, f"data/op/testDataset-{problem_size}.pt")
torch.save(coor, f"../data/op/testDataset-{problem_size}.pt")
2 changes: 1 addition & 1 deletion pctsp/README.md
Original file line number Diff line number Diff line change
@@ -1 +1 @@
Run utils.py to generate test datasets.
Run `python utils.py` to generate test datasets.
4 changes: 2 additions & 2 deletions pctsp/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,10 +50,10 @@ def load_test_dataset(n_node, device):
if __name__ == "__main__":
torch.manual_seed(123456)
import pathlib
pathlib.Path('./data/pctsp').mkdir(parents=False, exist_ok=True)
pathlib.Path('../data/pctsp').mkdir(parents=False, exist_ok=True)
for n in [20, 100, 500]:
testDataset = []
for _ in range(100):
dist_mat, prizes, penalties = gen_inst(n, 'cpu')
testDataset.append(torch.cat([dist_mat, prizes.unsqueeze(0), penalties.unsqueeze(0)], dim=0))
torch.save(torch.stack(testDataset), f"data/pctsp/testDataset-{n}.pt")
torch.save(torch.stack(testDataset), f"../data/pctsp/testDataset-{n}.pt")
2 changes: 1 addition & 1 deletion smtwtp/README.md
Original file line number Diff line number Diff line change
@@ -1 +1 @@
Run the utils.py to generate the test datasets.
Run `python utils.py` to generate the test datasets.
4 changes: 2 additions & 2 deletions smtwtp/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,13 +32,13 @@ def load_test_dataset(n_node, device):
if __name__ == '__main__':
torch.manual_seed(123456)
import pathlib
pathlib.Path('./data/smtwtp').mkdir(parents=False, exist_ok=True)
pathlib.Path('../data/smtwtp').mkdir(parents=False, exist_ok=True)
problem_sizes = [50, 100, 500]
dataset_size = 100
for p_size in problem_sizes:
dataset = []
for _ in range(dataset_size):
pyg_data, due_time, weights, processing_time = instance_gen(p_size, 'cpu')
dataset.append([pyg_data, due_time, weights, processing_time])
with open(f"data/smtwtp/test{p_size}.pkl", "wb") as f:
with open(f"../data/smtwtp/test{p_size}.pkl", "wb") as f:
pickle.dump(dataset, f)
1 change: 1 addition & 0 deletions sop/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Run `python utils.py` to generate the test datasets.
4 changes: 3 additions & 1 deletion sop/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,8 @@ def load_test_dataset(n_node, device):
return loaded_list

if __name__ == "__main__":
import pathlib
pathlib.Path('../data/sop').mkdir(parents=False, exist_ok=True)
torch.manual_seed(123456)
problem_sizes = [20, 50, 100]
dataset_size = 100
Expand All @@ -74,7 +76,7 @@ def load_test_dataset(n_node, device):
for _ in range(dataset_size):
distances, adj_mat, mask = training_instance_gen(p_size, 'cpu')
dataset.append([distances, adj_mat, mask])
with open(f"data/sop/test{p_size}.pkl", "wb") as f:
with open(f"../data/sop/test{p_size}.pkl", "wb") as f:
pickle.dump(dataset, f)


0 comments on commit fe91d1a

Please sign in to comment.