Skip to content

Commit

Permalink
Fixed params, again
Browse files Browse the repository at this point in the history
  • Loading branch information
tatp22 committed Jul 16, 2020
1 parent f8a47f4 commit 0e97577
Showing 1 changed file with 3 additions and 6 deletions.
9 changes: 3 additions & 6 deletions examples/pretrain_tutorial_lm.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,9 +12,9 @@
from torchtext.data.utils import get_tokenizer

config = OrderedDict(
batch_size=16,
batch_size=20,
gamma=0.95,
log_interval=100,
log_interval=200,
lr=5.0,
no_cuda=True,
num_epochs=30,
Expand Down Expand Up @@ -80,17 +80,14 @@ def main():
print("Epoch: {}, LR: {}, loss: {}, ppl: {}".format(epoch+1, scheduler.get_last_lr()[0], curr_loss, math.exp(curr_loss)))
logging_loss = 0

train_loss /= len(train_data)
print("Training loss: {}".format(train_loss))

with torch.no_grad():
model.eval()
test_loss = 0
for i in tqdm(range(0, test_data.size(0)-1, config["bptt"])):
data, targets = get_batch(test_data, i)
prediction = model(data)
loss = criterion(prediction.reshape(-1, config["num_tokens"]), targets)
test_loss += loss.item()
test_loss += (loss.item()*len(data))

test_loss /= (len(test_data)-1)
print("Epoch: {}, test_loss: {}, test_ppl: {}".format(epoch+1, test_loss, math.exp(test_loss)))
Expand Down

0 comments on commit 0e97577

Please sign in to comment.