From 9496f5eaab361f9c6bf4d286048baa1225dcdde0 Mon Sep 17 00:00:00 2001 From: anonymone Date: Tue, 20 Aug 2019 08:34:53 +0000 Subject: [PATCH 1/2] fix bug Expected object of backend CUDA but got backend CPU for argument #3 'index' --- seq2seq/evaluator/evaluator.py | 2 +- seq2seq/trainer/supervised_trainer.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/seq2seq/evaluator/evaluator.py b/seq2seq/evaluator/evaluator.py index 504539d..7f47c58 100644 --- a/seq2seq/evaluator/evaluator.py +++ b/seq2seq/evaluator/evaluator.py @@ -35,7 +35,7 @@ def evaluate(self, model, data): match = 0 total = 0 - device = None if torch.cuda.is_available() else -1 + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") batch_iterator = torchtext.data.BucketIterator( dataset=data, batch_size=self.batch_size, sort=True, sort_key=lambda x: len(x.src), diff --git a/seq2seq/trainer/supervised_trainer.py b/seq2seq/trainer/supervised_trainer.py index 6745ce0..a5516b3 100644 --- a/seq2seq/trainer/supervised_trainer.py +++ b/seq2seq/trainer/supervised_trainer.py @@ -72,7 +72,7 @@ def _train_epoches(self, data, model, n_epochs, start_epoch, start_step, print_loss_total = 0 # Reset every print_every epoch_loss_total = 0 # Reset every epoch - device = None if torch.cuda.is_available() else -1 + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") batch_iterator = torchtext.data.BucketIterator( dataset=data, batch_size=self.batch_size, sort=False, sort_within_batch=True, From ad2819b322224bbf6050e564d96a3ff4c9b5a659 Mon Sep 17 00:00:00 2001 From: severuspeng Date: Thu, 26 Dec 2019 12:26:24 +0800 Subject: [PATCH 2/2] fix bugs that loggong can not displayed in console --- seq2seq/trainer/supervised_trainer.py | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/seq2seq/trainer/supervised_trainer.py b/seq2seq/trainer/supervised_trainer.py index a5516b3..6c666cc 100644 --- a/seq2seq/trainer/supervised_trainer.py +++ b/seq2seq/trainer/supervised_trainer.py @@ -46,8 +46,6 @@ def __init__(self, expt_dir='experiment', loss=NLLLoss(), batch_size=64, os.makedirs(self.expt_dir) self.batch_size = batch_size - self.logger = logging.getLogger(__name__) - def _train_batch(self, input_variable, input_lengths, target_variable, model, teacher_forcing_ratio): loss = self.loss # Forward propagation @@ -67,7 +65,6 @@ def _train_batch(self, input_variable, input_lengths, target_variable, model, te def _train_epoches(self, data, model, n_epochs, start_epoch, start_step, dev_data=None, teacher_forcing_ratio=0): - log = self.logger print_loss_total = 0 # Reset every print_every epoch_loss_total = 0 # Reset every epoch @@ -85,7 +82,7 @@ def _train_epoches(self, data, model, n_epochs, start_epoch, start_step, step = start_step step_elapsed = 0 for epoch in range(start_epoch, n_epochs + 1): - log.debug("Epoch: %d, Step: %d" % (epoch, step)) + logging.debug("Epoch: %d, Step: %d" % (epoch, step)) batch_generator = batch_iterator.__iter__() # consuming seen batches from previous training @@ -113,7 +110,7 @@ def _train_epoches(self, data, model, n_epochs, start_epoch, start_step, step / total_steps * 100, self.loss.name, print_loss_avg) - log.info(log_msg) + logging.info(log_msg) # Checkpoint if step % self.checkpoint_every == 0 or step == total_steps: @@ -136,7 +133,7 @@ def _train_epoches(self, data, model, n_epochs, start_epoch, start_step, else: self.optimizer.update(epoch_loss_avg, epoch) - log.info(log_msg) + logging.info(log_msg) def train(self, model, data, num_epochs=5, resume=False, dev_data=None, @@ -179,7 +176,7 @@ def train(self, model, data, num_epochs=5, optimizer = Optimizer(optim.Adam(model.parameters()), max_grad_norm=5) self.optimizer = optimizer - self.logger.info("Optimizer: %s, Scheduler: %s" % (self.optimizer.optimizer, self.optimizer.scheduler)) + logging.info("Optimizer: %s, Scheduler: %s" % (self.optimizer.optimizer, self.optimizer.scheduler)) self._train_epoches(data, model, num_epochs, start_epoch, step, dev_data=dev_data,