diff --git a/examples/pytorch_tensorboardX.py b/examples/pytorch_tensorboardX.py
index adc01310..13133b53 100644
--- a/examples/pytorch_tensorboardX.py
+++ b/examples/pytorch_tensorboardX.py
@@ -15,47 +15,6 @@ from torch.autograd import Variable
 from tensorboardX import SummaryWriter
 
 from trains import Task
-task = Task.init(project_name='examples', task_name='pytorch with tensorboardX')
-
-
-writer = SummaryWriter('runs')
-writer.add_text('lstm', 'This is an lstm', 0)
-# Training settings
-parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
-parser.add_argument('--batch-size', type=int, default=64, metavar='N',
-                    help='input batch size for training (default: 64)')
-parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
-                    help='input batch size for testing (default: 1000)')
-parser.add_argument('--epochs', type=int, default=2, metavar='N',
-                    help='number of epochs to train (default: 10)')
-parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
-                    help='learning rate (default: 0.01)')
-parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
-                    help='SGD momentum (default: 0.5)')
-parser.add_argument('--no-cuda', action='store_true', default=False,
-                    help='disables CUDA training')
-parser.add_argument('--seed', type=int, default=1, metavar='S',
-                    help='random seed (default: 1)')
-parser.add_argument('--log-interval', type=int, default=10, metavar='N',
-                    help='how many batches to wait before logging training status')
-args = parser.parse_args()
-args.cuda = not args.no_cuda and torch.cuda.is_available()
-
-torch.manual_seed(args.seed)
-if args.cuda:
-    torch.cuda.manual_seed(args.seed)
-
-kwargs = {'num_workers': 4, 'pin_memory': True} if args.cuda else {}
-train_loader = torch.utils.data.DataLoader(datasets.MNIST('../data', train=True, download=True,
-                                                          transform=transforms.Compose([
-                                                              transforms.ToTensor(),
-                                                              transforms.Normalize((0.1307,), (0.3081,))])),
-                                           batch_size=args.batch_size, shuffle=True, **kwargs)
-test_loader = torch.utils.data.DataLoader(datasets.MNIST('../data', train=False,
-                                                         transform=transforms.Compose([
-                                                             transforms.ToTensor(),
-                                                             transforms.Normalize((0.1307,), (0.3081,))])),
-                                          batch_size=args.batch_size, shuffle=True, **kwargs)
 
 
 class Net(nn.Module):
@@ -77,53 +36,97 @@ class Net(nn.Module):
         return F.log_softmax(x)
 
 
-model = Net()
-if args.cuda:
-    model.cuda()
-optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
+def main():
+    # Training settings
+    parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
+    parser.add_argument('--batch-size', type=int, default=64, metavar='N',
+                        help='input batch size for training (default: 64)')
+    parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
+                        help='input batch size for testing (default: 1000)')
+    parser.add_argument('--epochs', type=int, default=2, metavar='N',
+                        help='number of epochs to train (default: 10)')
+    parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
+                        help='learning rate (default: 0.01)')
+    parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
+                        help='SGD momentum (default: 0.5)')
+    parser.add_argument('--no-cuda', action='store_true', default=False,
+                        help='disables CUDA training')
+    parser.add_argument('--seed', type=int, default=1, metavar='S',
+                        help='random seed (default: 1)')
+    parser.add_argument('--log-interval', type=int, default=10, metavar='N',
+                        help='how many batches to wait before logging training status')
+    args = parser.parse_args()
+    args.cuda = not args.no_cuda and torch.cuda.is_available()
+
+    task = Task.init(project_name='examples', task_name='pytorch with tensorboardX')
+    writer = SummaryWriter('runs')
+    writer.add_text('lstm', 'This is an lstm', 0)
+
+    torch.manual_seed(args.seed)
+    if args.cuda:
+        torch.cuda.manual_seed(args.seed)
+
+    kwargs = {'num_workers': 4, 'pin_memory': True} if args.cuda else {}
+    train_loader = torch.utils.data.DataLoader(datasets.MNIST('../data', train=True, download=True,
+                                                              transform=transforms.Compose([
+                                                                  transforms.ToTensor(),
+                                                                  transforms.Normalize((0.1307,), (0.3081,))])),
+                                               batch_size=args.batch_size, shuffle=True, **kwargs)
+    test_loader = torch.utils.data.DataLoader(datasets.MNIST('../data', train=False,
+                                                             transform=transforms.Compose([
+                                                                 transforms.ToTensor(),
+                                                                 transforms.Normalize((0.1307,), (0.3081,))])),
+                                              batch_size=args.batch_size, shuffle=True, **kwargs)
+
+    model = Net()
+    if args.cuda:
+        model.cuda()
+    optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
+
+    def train(epoch):
+        model.train()
+        for batch_idx, (data, target) in enumerate(train_loader):
+            if args.cuda:
+                data, target = data.cuda(), target.cuda()
+            data, target = Variable(data), Variable(target)
+            optimizer.zero_grad()
+            output = model(data)
+            loss = F.nll_loss(output, target)
+            loss.backward()
+            optimizer.step()
+            if batch_idx % args.log_interval == 0:
+                print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
+                    epoch, batch_idx * len(data), len(train_loader.dataset),
+                    100. * batch_idx / len(train_loader), loss.data.item()))
+                niter = epoch*len(train_loader)+batch_idx
+                writer.add_scalar('Train/Loss', loss.data.item(), niter)
+
+    def test():
+        model.eval()
+        test_loss = 0
+        correct = 0
+        for niter, (data, target) in enumerate(test_loader):
+            if args.cuda:
+                data, target = data.cuda(), target.cuda()
+            data, target = Variable(data, volatile=True), Variable(target)
+            output = model(data)
+            test_loss += F.nll_loss(output, target, size_average=False).data.item()  # sum up batch loss
+            pred = output.data.max(1)[1]  # get the index of the max log-probability
+            pred = pred.eq(target.data).cpu().sum()
+            writer.add_scalar('Test/Loss', pred, niter)
+            correct += pred
+
+        test_loss /= len(test_loader.dataset)
+        print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
+            test_loss, correct, len(test_loader.dataset),
+            100. * correct / len(test_loader.dataset)))
+
+    for epoch in range(1, args.epochs + 1):
+        train(epoch)
+        torch.save(model, os.path.join(gettempdir(), 'model{}'.format(epoch)))
+    test()
 
 
-def train(epoch):
-    model.train()
-    for batch_idx, (data, target) in enumerate(train_loader):
-        if args.cuda:
-            data, target = data.cuda(), target.cuda()
-        data, target = Variable(data), Variable(target)
-        optimizer.zero_grad()
-        output = model(data)
-        loss = F.nll_loss(output, target)
-        loss.backward()
-        optimizer.step()
-        if batch_idx % args.log_interval == 0:
-            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
-                epoch, batch_idx * len(data), len(train_loader.dataset),
-                100. * batch_idx / len(train_loader), loss.data.item()))
-            niter = epoch*len(train_loader)+batch_idx
-            writer.add_scalar('Train/Loss', loss.data.item(), niter)
-
-
-def test():
-    model.eval()
-    test_loss = 0
-    correct = 0
-    for niter, (data, target) in enumerate(test_loader):
-        if args.cuda:
-            data, target = data.cuda(), target.cuda()
-        data, target = Variable(data, volatile=True), Variable(target)
-        output = model(data)
-        test_loss += F.nll_loss(output, target, size_average=False).data.item() # sum up batch loss
-        pred = output.data.max(1)[1] # get the index of the max log-probability
-        pred = pred.eq(target.data).cpu().sum()
-        writer.add_scalar('Test/Loss', pred, niter)
-        correct += pred
-
-    test_loss /= len(test_loader.dataset)
-    print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
-        test_loss, correct, len(test_loader.dataset),
-        100. * correct / len(test_loader.dataset)))
-
-
-for epoch in range(1, args.epochs + 1):
-    train(epoch)
-    torch.save(model, os.path.join(gettempdir(), 'model{}'.format(epoch)))
-test()
+if __name__ == "__main__":
+    # Hack for supporting Windows OS - https://pytorch.org/docs/stable/notes/windows.html#usage-multiprocessing
+    main()