mirror of
https://github.com/clearml/clearml
synced 2025-05-03 12:31:00 +00:00
Match Torch TensorboardX example to Tensorboard
This commit is contained in:
parent
cb139f2d17
commit
c06f72ae3a
@ -34,10 +34,10 @@ class Net(nn.Module):
|
|||||||
x = F.relu(self.fc1(x))
|
x = F.relu(self.fc1(x))
|
||||||
x = F.dropout(x, training=self.training)
|
x = F.dropout(x, training=self.training)
|
||||||
x = self.fc2(x)
|
x = self.fc2(x)
|
||||||
return F.log_softmax(x)
|
return F.log_softmax(x, dim=1)
|
||||||
|
|
||||||
|
|
||||||
def internal_train(model, epoch, train_loader, args, optimizer, writer):
|
def train(model, epoch, train_loader, args, optimizer, writer):
|
||||||
model.train()
|
model.train()
|
||||||
for batch_idx, (data, target) in enumerate(train_loader):
|
for batch_idx, (data, target) in enumerate(train_loader):
|
||||||
if args.cuda:
|
if args.cuda:
|
||||||
@ -63,13 +63,15 @@ def test(model, test_loader, args, optimizer, writer):
|
|||||||
for niter, (data, target) in enumerate(test_loader):
|
for niter, (data, target) in enumerate(test_loader):
|
||||||
if args.cuda:
|
if args.cuda:
|
||||||
data, target = data.cuda(), target.cuda()
|
data, target = data.cuda(), target.cuda()
|
||||||
data, target = Variable(data, volatile=True), Variable(target)
|
data, target = Variable(data), Variable(target)
|
||||||
output = model(data)
|
output = model(data)
|
||||||
test_loss += F.nll_loss(output, target, size_average=False).data.item() # sum up batch loss
|
test_loss += F.nll_loss(output, target, reduction='sum').data.item() # sum up batch loss
|
||||||
pred = output.data.max(1)[1] # get the index of the max log-probability
|
pred = output.data.max(1)[1] # get the index of the max log-probability
|
||||||
pred = pred.eq(target.data).cpu().sum()
|
pred = pred.eq(target.data).cpu().sum()
|
||||||
writer.add_scalar('Test/Loss', pred, niter)
|
writer.add_scalar('Test/Loss', pred, niter)
|
||||||
correct += pred
|
correct += pred
|
||||||
|
if niter % 100 == 0:
|
||||||
|
writer.add_image('test', data[0, :, :, :], niter)
|
||||||
|
|
||||||
test_loss /= len(test_loader.dataset)
|
test_loss /= len(test_loader.dataset)
|
||||||
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
|
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
|
||||||
@ -97,9 +99,9 @@ def main():
|
|||||||
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
|
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
|
||||||
help='how many batches to wait before logging training status')
|
help='how many batches to wait before logging training status')
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
task = Task.init(project_name='examples', task_name='pytorch with tensorboard')
|
task = Task.init(project_name='examples', task_name='pytorch with tensorboard', output_uri='/tmp/blah')
|
||||||
writer = SummaryWriter('runs')
|
writer = SummaryWriter('runs')
|
||||||
writer.add_text('lstm', 'This is an lstm', 0)
|
writer.add_text('TEXT', 'This is some text', 0)
|
||||||
args.cuda = not args.no_cuda and torch.cuda.is_available()
|
args.cuda = not args.no_cuda and torch.cuda.is_available()
|
||||||
|
|
||||||
torch.manual_seed(args.seed)
|
torch.manual_seed(args.seed)
|
||||||
@ -124,7 +126,7 @@ def main():
|
|||||||
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
|
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
|
||||||
|
|
||||||
for epoch in range(1, args.epochs + 1):
|
for epoch in range(1, args.epochs + 1):
|
||||||
internal_train(model, epoch, train_loader, args, optimizer, writer)
|
train(model, epoch, train_loader, args, optimizer, writer)
|
||||||
torch.save(model, os.path.join(gettempdir(), 'model{}'.format(epoch)))
|
torch.save(model, os.path.join(gettempdir(), 'model{}'.format(epoch)))
|
||||||
test(model, test_loader, args, optimizer, writer)
|
test(model, test_loader, args, optimizer, writer)
|
||||||
|
|
||||||
|
@ -33,7 +33,49 @@ class Net(nn.Module):
|
|||||||
x = F.relu(self.fc1(x))
|
x = F.relu(self.fc1(x))
|
||||||
x = F.dropout(x, training=self.training)
|
x = F.dropout(x, training=self.training)
|
||||||
x = self.fc2(x)
|
x = self.fc2(x)
|
||||||
return F.log_softmax(x)
|
return F.log_softmax(x, dim=1)
|
||||||
|
|
||||||
|
|
||||||
|
def train(model, epoch, train_loader, args, optimizer, writer):
|
||||||
|
model.train()
|
||||||
|
for batch_idx, (data, target) in enumerate(train_loader):
|
||||||
|
if args.cuda:
|
||||||
|
data, target = data.cuda(), target.cuda()
|
||||||
|
data, target = Variable(data), Variable(target)
|
||||||
|
optimizer.zero_grad()
|
||||||
|
output = model(data)
|
||||||
|
loss = F.nll_loss(output, target)
|
||||||
|
loss.backward()
|
||||||
|
optimizer.step()
|
||||||
|
if batch_idx % args.log_interval == 0:
|
||||||
|
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
|
||||||
|
epoch, batch_idx * len(data), len(train_loader.dataset),
|
||||||
|
100. * batch_idx / len(train_loader), loss.data.item()))
|
||||||
|
niter = epoch*len(train_loader)+batch_idx
|
||||||
|
writer.add_scalar('Train/Loss', loss.data.item(), niter)
|
||||||
|
|
||||||
|
|
||||||
|
def test(model, test_loader, args, optimizer, writer):
|
||||||
|
model.eval()
|
||||||
|
test_loss = 0
|
||||||
|
correct = 0
|
||||||
|
for niter, (data, target) in enumerate(test_loader):
|
||||||
|
if args.cuda:
|
||||||
|
data, target = data.cuda(), target.cuda()
|
||||||
|
data, target = Variable(data), Variable(target)
|
||||||
|
output = model(data)
|
||||||
|
test_loss += F.nll_loss(output, target, reduction='sum').data.item() # sum up batch loss
|
||||||
|
pred = output.data.max(1)[1] # get the index of the max log-probability
|
||||||
|
pred = pred.eq(target.data).cpu().sum()
|
||||||
|
writer.add_scalar('Test/Loss', pred, niter)
|
||||||
|
correct += pred
|
||||||
|
if niter % 100 == 0:
|
||||||
|
writer.add_image('test', data[0, :, :, :], niter)
|
||||||
|
|
||||||
|
test_loss /= len(test_loader.dataset)
|
||||||
|
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
|
||||||
|
test_loss, correct, len(test_loader.dataset),
|
||||||
|
100. * correct / len(test_loader.dataset)))
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
@ -60,7 +102,7 @@ def main():
|
|||||||
|
|
||||||
task = Task.init(project_name='examples', task_name='pytorch with tensorboardX')
|
task = Task.init(project_name='examples', task_name='pytorch with tensorboardX')
|
||||||
writer = SummaryWriter('runs')
|
writer = SummaryWriter('runs')
|
||||||
writer.add_text('lstm', 'This is an lstm', 0)
|
writer.add_text('TEXT', 'This is some text', 0)
|
||||||
|
|
||||||
torch.manual_seed(args.seed)
|
torch.manual_seed(args.seed)
|
||||||
if args.cuda:
|
if args.cuda:
|
||||||
@ -83,48 +125,10 @@ def main():
|
|||||||
model.cuda()
|
model.cuda()
|
||||||
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
|
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
|
||||||
|
|
||||||
def train(epoch):
|
|
||||||
model.train()
|
|
||||||
for batch_idx, (data, target) in enumerate(train_loader):
|
|
||||||
if args.cuda:
|
|
||||||
data, target = data.cuda(), target.cuda()
|
|
||||||
data, target = Variable(data), Variable(target)
|
|
||||||
optimizer.zero_grad()
|
|
||||||
output = model(data)
|
|
||||||
loss = F.nll_loss(output, target)
|
|
||||||
loss.backward()
|
|
||||||
optimizer.step()
|
|
||||||
if batch_idx % args.log_interval == 0:
|
|
||||||
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
|
|
||||||
epoch, batch_idx * len(data), len(train_loader.dataset),
|
|
||||||
100. * batch_idx / len(train_loader), loss.data.item()))
|
|
||||||
niter = epoch*len(train_loader)+batch_idx
|
|
||||||
writer.add_scalar('Train/Loss', loss.data.item(), niter)
|
|
||||||
|
|
||||||
def test():
|
|
||||||
model.eval()
|
|
||||||
test_loss = 0
|
|
||||||
correct = 0
|
|
||||||
for niter, (data, target) in enumerate(test_loader):
|
|
||||||
if args.cuda:
|
|
||||||
data, target = data.cuda(), target.cuda()
|
|
||||||
data, target = Variable(data, volatile=True), Variable(target)
|
|
||||||
output = model(data)
|
|
||||||
test_loss += F.nll_loss(output, target, size_average=False).data.item() # sum up batch loss
|
|
||||||
pred = output.data.max(1)[1] # get the index of the max log-probability
|
|
||||||
pred = pred.eq(target.data).cpu().sum()
|
|
||||||
writer.add_scalar('Test/Loss', pred, niter)
|
|
||||||
correct += pred
|
|
||||||
|
|
||||||
test_loss /= len(test_loader.dataset)
|
|
||||||
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
|
|
||||||
test_loss, correct, len(test_loader.dataset),
|
|
||||||
100. * correct / len(test_loader.dataset)))
|
|
||||||
|
|
||||||
for epoch in range(1, args.epochs + 1):
|
for epoch in range(1, args.epochs + 1):
|
||||||
train(epoch)
|
train(model, epoch, train_loader, args, optimizer, writer)
|
||||||
torch.save(model, os.path.join(gettempdir(), 'model{}'.format(epoch)))
|
torch.save(model, os.path.join(gettempdir(), 'model{}'.format(epoch)))
|
||||||
test()
|
test(model, test_loader, args, optimizer, writer)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
Loading…
Reference in New Issue
Block a user