Skip to content

Commit 53a20bb

Browse files
updates
1 parent bcc0071 commit 53a20bb

File tree

5 files changed

+38
-95
lines changed

5 files changed

+38
-95
lines changed

ML/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@
3737
# os.environ["WANDB_SILENT"] = "true"
3838
PROJECT_NAME = "NLP-Disaster Tweets"
3939
device = torch.device("cuda")
40-
BATCH_SIZE = 32
40+
BATCH_SIZE = 16
4141
torch.backends.cudnn.benchmark = True
4242
torch.cuda.empty_cache()
4343
torch.manual_seed(42)

ML/helper_functions/test.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ def test(self) -> Dict:
2626
with torch.inference_mode():
2727
for X, y in self.test_dataloader:
2828
y = y[0]
29-
X = F.to_tensor(X, padding_value=1).to("cuda")
29+
X = F.to_tensor(X, padding_value=1).to(device)
3030
y = torch.tensor(y).to("cuda")
3131
preds = torch.argmax(torch.softmax(self.model(X), dim=1), dim=1)
3232
loss = self.criterion(preds.float(), y.view(-1, 1).squeeze(1).float())

ML/helper_functions/train.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -43,8 +43,6 @@ def train(self, run_name: str) -> None:
4343
loss.backward()
4444
self.optimizer.step()
4545
iterator.set_description(f"{i}/{len(self.train_dataloader)}")
46-
if self.lr_schedular:
47-
self.lr_schedular.step()
4846
iterator.set_description(f"Testing...")
4947
self.model.eval()
5048
wandb.log(

run.py

Lines changed: 35 additions & 90 deletions
Original file line numberDiff line numberDiff line change
@@ -1,94 +1,39 @@
11
from ML import *
22

3+
print(ROBERTA_BASE_ENCODER)
34

4-
def train(
5-
batch_size: int = 32,
6-
lr: float = 0.01,
7-
test_split: float = 0.25,
8-
optimizer=optim.Adam,
9-
epochs: int = 5,
10-
name: str = "",
11-
lr_schedular=None,
12-
transforms=None,
13-
):
14-
train_data_loader, test_data_loader, valid_data_loader = Load_Data(
15-
Main_DL,
16-
Valid_Loader,
17-
[
18-
"/media/user/Main/Programmer-RD-AI/Programming/Learning/JS/NLP-Disaster-Tweets/ML/data/train.csv",
19-
batch_size,
20-
transforms,
21-
],
22-
[
23-
"/media/user/Main/Programmer-RD-AI/Programming/Learning/JS/NLP-Disaster-Tweets/ML/data/test.csv",
24-
1,
25-
],
26-
test_split,
27-
42,
28-
).ld()
29-
model = TL().to(device)
30-
optimizer = optimizer(model.parameters(), lr=lr)
31-
criterion = nn.CrossEntropyLoss()
32-
config = {
33-
"model": model,
34-
"criterion": criterion,
35-
"optimizer": optimizer,
36-
"learning_rate": lr,
37-
}
38-
Train(
39-
model,
40-
epochs,
41-
config,
42-
train_data_loader,
43-
test_data_loader,
44-
valid_data_loader,
45-
criterion,
46-
optimizer,
47-
).train(f"{name}")
485

49-
50-
train(
51-
transforms=Transformer().transform(),
52-
batch_size=16,
53-
lr=1e-3,
54-
test_split=0.25,
55-
optimizer=optim.Adam,
56-
lr_schedular=None,
57-
name=f"1e-3",
58-
)
59-
train(
60-
transforms=Transformer().transform(),
61-
batch_size=16,
62-
lr=1e-4,
63-
test_split=0.25,
64-
optimizer=optim.Adam,
65-
lr_schedular=None,
66-
name=f"1e-4",
67-
)
68-
train(
69-
transforms=Transformer().transform(),
70-
batch_size=16,
71-
lr=1e-5,
72-
test_split=0.25,
73-
optimizer=optim.Adam,
74-
lr_schedular=None,
75-
name=f"1e-5",
76-
)
77-
train(
78-
transforms=Transformer().transform(),
79-
batch_size=16,
80-
lr=1e-6,
81-
test_split=0.25,
82-
optimizer=optim.Adam,
83-
lr_schedular=None,
84-
name=f"1e-6",
85-
)
86-
train(
87-
transforms=Transformer().transform(),
88-
batch_size=16,
89-
lr=1e-7,
90-
test_split=0.25,
91-
optimizer=optim.Adam,
92-
lr_schedular=None,
93-
name=f"1e-7",
94-
)
6+
train_data_loader, test_data_loader, valid_data_loader = Load_Data(
7+
Main_DL,
8+
Valid_Loader,
9+
[
10+
"/media/user/Main/Programmer-RD-AI/Programming/Learning/JS/NLP-Disaster-Tweets/ML/data/train.csv",
11+
16,
12+
Transformer().transform(),
13+
],
14+
[
15+
"/media/user/Main/Programmer-RD-AI/Programming/Learning/JS/NLP-Disaster-Tweets/ML/data/test.csv",
16+
1,
17+
],
18+
0.125,
19+
42,
20+
).ld()
21+
model = TL().to(device)
22+
optimizer = optim.Adam(model.parameters(), lr=1e-5)
23+
criterion = nn.CrossEntropyLoss()
24+
config = {
25+
"model": model,
26+
"criterion": criterion,
27+
"optimizer": optimizer,
28+
"learning_rate": 1e-5,
29+
}
30+
Train(
31+
model,
32+
25,
33+
config,
34+
train_data_loader,
35+
test_data_loader,
36+
valid_data_loader,
37+
criterion,
38+
optimizer,
39+
).train(f"final")

wandb/latest-run

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
run-20230728_100733-nffptso0
1+
run-20230728_113045-bthz2tjx

0 commit comments

Comments
 (0)