diff --git a/clearml/binding/jsonargs_bind.py b/clearml/binding/jsonargs_bind.py
index eff8483b..cb916f56 100644
--- a/clearml/binding/jsonargs_bind.py
+++ b/clearml/binding/jsonargs_bind.py
@@ -1,4 +1,5 @@
 import json
+import logging
 
 try:
     from jsonargparse import ArgumentParser
@@ -98,11 +99,13 @@ class PatchJsonArgParse(object):
             try:
                 PatchJsonArgParse._load_task_params()
                 params = PatchJsonArgParse.__remote_task_params_dict
+                print(params)
                 params_namespace = Namespace()
                 for k, v in params.items():
                     params_namespace[k] = v
                 return params_namespace
-            except Exception:
+            except Exception as e:
+                logging.getLogger(__file__).warning("Failed parsing jsonargparse arguments: {}".format(e))
                 return original_fn(obj, **kwargs)
         parsed_args = original_fn(obj, **kwargs)
         # noinspection PyBroadException
@@ -114,10 +117,14 @@ class PatchJsonArgParse(object):
                     PatchJsonArgParse._args_type[ns_name] = PatchJsonArgParse._command_type
                     subcommand = ns_val
             try:
-                import pytorch_lightning
+                import lightning
             except ImportError:
-                pytorch_lightning = None
-            if subcommand and subcommand in PatchJsonArgParse._args and pytorch_lightning:
+                try:
+                    import pytorch_lightning
+                    lightning = pytorch_lightning
+                except ImportError:
+                    lightning = None
+            if subcommand and subcommand in PatchJsonArgParse._args and lightning:
                 subcommand_args = flatten_dictionary(
                     PatchJsonArgParse._args[subcommand],
                     prefix=subcommand + PatchJsonArgParse._commands_sep,
@@ -127,8 +134,8 @@ class PatchJsonArgParse(object):
                 PatchJsonArgParse._args.update(subcommand_args)
             PatchJsonArgParse._args = {k: v for k, v in PatchJsonArgParse._args.items()}
             PatchJsonArgParse._update_task_args()
-        except Exception:
-            pass
+        except Exception as e:
+            logging.getLogger(__file__).warning("Failed parsing jsonargparse arguments: {}".format(e))
         return parsed_args
 
     @staticmethod
diff --git a/examples/frameworks/jsonargparse/pytorch_lightning_cli.py b/examples/frameworks/jsonargparse/pytorch_lightning_cli.py
index d73428e1..751a37a5 100644
--- a/examples/frameworks/jsonargparse/pytorch_lightning_cli.py
+++ b/examples/frameworks/jsonargparse/pytorch_lightning_cli.py
@@ -1,103 +1,14 @@
-# Copyright The PyTorch Lightning team.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Notice that this file has been modified to examplify the use of
-# ClearML when used with PyTorch Lightning
-
-import torch
-import torchvision.transforms as T
-from torch.nn import functional as F
-import torch.nn as nn
-from torchmetrics import Accuracy
-
-from torchvision.datasets.mnist import MNIST
-from pytorch_lightning import LightningModule
-from pytorch_lightning.utilities.cli import LightningCLI
+try:
+    from lightning.pytorch.cli import LightningCLI
+    from lightning.pytorch.demos.boring_classes import DemoModel, BoringDataModule
+except ImportError:
+    import sys
+    print("Module 'lightning' not installed (only available for Python 3.8+")
+    sys.exit(0)
 from clearml import Task
 
 
-class Net(nn.Module):
-    def __init__(self):
-        super().__init__()
-        self.conv1 = nn.Conv2d(1, 32, 3, 1)
-        self.conv2 = nn.Conv2d(32, 64, 3, 1)
-        self.dropout1 = nn.Dropout(0.25)
-        self.dropout2 = nn.Dropout(0.5)
-        self.fc1 = nn.Linear(9216, 128)
-        self.fc2 = nn.Linear(128, 10)
-
-    def forward(self, x):
-        x = self.conv1(x)
-        x = F.relu(x)
-        x = self.conv2(x)
-        x = F.relu(x)
-        x = F.max_pool2d(x, 2)
-        x = self.dropout1(x)
-        x = torch.flatten(x, 1)
-        x = self.fc1(x)
-        x = F.relu(x)
-        x = self.dropout2(x)
-        x = self.fc2(x)
-        output = F.log_softmax(x, dim=1)
-        return output
-
-
-class ImageClassifier(LightningModule):
-    def __init__(self, model=None, lr=1.0, gamma=0.7, batch_size=32):
-        super().__init__()
-        self.save_hyperparameters(ignore="model")
-        self.model = model or Net()
-        self.test_acc = Accuracy()
-
-    def forward(self, x):
-        return self.model(x)
-
-    def training_step(self, batch, batch_idx):
-        x, y = batch
-        logits = self.forward(x)
-        loss = F.nll_loss(logits, y.long())
-        return loss
-
-    def test_step(self, batch, batch_idx):
-        x, y = batch
-        logits = self.forward(x)
-        loss = F.nll_loss(logits, y.long())
-        self.test_acc(logits, y)
-        self.log("test_acc", self.test_acc)
-        self.log("test_loss", loss)
-
-    def configure_optimizers(self):
-        optimizer = torch.optim.Adadelta(self.model.parameters(), lr=self.hparams.lr)
-        return [optimizer], [torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=self.hparams.gamma)]
-
-    @property
-    def transform(self):
-        return T.Compose([T.ToTensor(), T.Normalize((0.1307,), (0.3081,))])
-
-    def prepare_data(self) -> None:
-        MNIST("./data", download=True)
-
-    def train_dataloader(self):
-        train_dataset = MNIST("./data", train=True, download=False, transform=self.transform)
-        return torch.utils.data.DataLoader(train_dataset, batch_size=self.hparams.batch_size)
-
-    def test_dataloader(self):
-        test_dataset = MNIST("./data", train=False, download=False, transform=self.transform)
-        return torch.utils.data.DataLoader(test_dataset, batch_size=self.hparams.batch_size)
-
-
 if __name__ == "__main__":
-    Task.add_requirements('requirements.txt')
-    task = Task.init(project_name="example", task_name="pytorch_lightning_jsonargparse")
-    LightningCLI(ImageClassifier, seed_everything_default=42, save_config_overwrite=True, run=True)
+    Task.add_requirements("requirements.txt")
+    Task.init(project_name="example", task_name="pytorch_lightning_jsonargparse")
+    LightningCLI(DemoModel, BoringDataModule)
diff --git a/examples/frameworks/jsonargparse/pytorch_lightning_cli.yml b/examples/frameworks/jsonargparse/pytorch_lightning_cli.yml
index fe8c31a1..ab2de927 100644
--- a/examples/frameworks/jsonargparse/pytorch_lightning_cli.yml
+++ b/examples/frameworks/jsonargparse/pytorch_lightning_cli.yml
@@ -1,12 +1,13 @@
 trainer:
   callbacks:
-    - class_path: pytorch_lightning.callbacks.LearningRateMonitor
+    - class_path: lightning.pytorch.callbacks.LearningRateMonitor
       init_args:
         logging_interval: epoch
-    - class_path: pytorch_lightning.callbacks.ModelCheckpoint
+    - class_path: lightning.pytorch.callbacks.ModelCheckpoint
       init_args:
         filename: best
         save_last: False
         save_top_k: 1
         monitor: loss
         mode: min
+  max_epochs: 10
diff --git a/examples/frameworks/jsonargparse/pytorch_lightning_cli_old.py b/examples/frameworks/jsonargparse/pytorch_lightning_cli_old.py
new file mode 100644
index 00000000..38cd91c6
--- /dev/null
+++ b/examples/frameworks/jsonargparse/pytorch_lightning_cli_old.py
@@ -0,0 +1,114 @@
+# Copyright The PyTorch Lightning team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Notice that this file has been modified to examplify the use of
+# ClearML when used with PyTorch Lightning
+import sys
+
+import torch
+import torchvision.transforms as T
+from torch.nn import functional as F
+import torch.nn as nn
+from torchmetrics import Accuracy
+
+from torchvision.datasets.mnist import MNIST
+from pytorch_lightning import LightningModule
+from clearml import Task
+try:
+    from pytorch_lightning.cli import LightningCLI
+except ImportError:
+    try:
+        from pytorch_lightning.utilities.cli import LightningCLI
+    except ImportError:
+        print("Looks like you are using pytorch_lightning>=2.0. This example only works with older versions")
+        sys.exit(0)
+
+
+class Net(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 32, 3, 1)
+        self.conv2 = nn.Conv2d(32, 64, 3, 1)
+        self.dropout1 = nn.Dropout(0.25)
+        self.dropout2 = nn.Dropout(0.5)
+        self.fc1 = nn.Linear(9216, 128)
+        self.fc2 = nn.Linear(128, 10)
+
+    def forward(self, x):
+        x = self.conv1(x)
+        x = F.relu(x)
+        x = self.conv2(x)
+        x = F.relu(x)
+        x = F.max_pool2d(x, 2)
+        x = self.dropout1(x)
+        x = torch.flatten(x, 1)
+        x = self.fc1(x)
+        x = F.relu(x)
+        x = self.dropout2(x)
+        x = self.fc2(x)
+        output = F.log_softmax(x, dim=1)
+        return output
+
+
+class ImageClassifier(LightningModule):
+    def __init__(self, model=None, lr=1.0, gamma=0.7, batch_size=32):
+        super().__init__()
+        self.save_hyperparameters(ignore="model")
+        self.model = model or Net()
+        try:
+            self.test_acc = Accuracy()
+        except TypeError:
+            self.test_acc = Accuracy("binary")
+
+    def forward(self, x):
+        return self.model(x)
+
+    def training_step(self, batch, batch_idx):
+        x, y = batch
+        logits = self.forward(x)
+        loss = F.nll_loss(logits, y.long())
+        return loss
+
+    def test_step(self, batch, batch_idx):
+        x, y = batch
+        logits = self.forward(x)
+        loss = F.nll_loss(logits, y.long())
+        self.test_acc(logits, y)
+        self.log("test_acc", self.test_acc)
+        self.log("test_loss", loss)
+
+    def configure_optimizers(self):
+        optimizer = torch.optim.Adadelta(self.model.parameters(), lr=self.hparams.lr)
+        return [optimizer], [torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=self.hparams.gamma)]
+
+    @property
+    def transform(self):
+        return T.Compose([T.ToTensor(), T.Normalize((0.1307,), (0.3081,))])
+
+    def prepare_data(self) -> None:
+        MNIST("./data", download=True)
+
+    def train_dataloader(self):
+        train_dataset = MNIST("./data", train=True, download=False, transform=self.transform)
+        return torch.utils.data.DataLoader(train_dataset, batch_size=self.hparams.batch_size)
+
+    def test_dataloader(self):
+        test_dataset = MNIST("./data", train=False, download=False, transform=self.transform)
+        return torch.utils.data.DataLoader(test_dataset, batch_size=self.hparams.batch_size)
+
+
+if __name__ == "__main__":
+    Task.add_requirements("requirements.txt")
+    Task.init(project_name="example", task_name="pytorch_lightning_jsonargparse")
+    LightningCLI(ImageClassifier, seed_everything_default=42, run=True)
diff --git a/examples/frameworks/jsonargparse/pytorch_lightning_cli_old.yml b/examples/frameworks/jsonargparse/pytorch_lightning_cli_old.yml
new file mode 100644
index 00000000..d5010ac4
--- /dev/null
+++ b/examples/frameworks/jsonargparse/pytorch_lightning_cli_old.yml
@@ -0,0 +1,12 @@
+trainer:
+  callbacks:
+    - class_path: pytorch_lightning.callbacks.LearningRateMonitor
+      init_args:
+        logging_interval: epoch
+    - class_path: pytorch_lightning.callbacks.ModelCheckpoint
+      init_args:
+        filename: best
+        save_last: False
+        save_top_k: 1
+        monitor: loss
+        mode: min
\ No newline at end of file
diff --git a/examples/frameworks/jsonargparse/requirements.txt b/examples/frameworks/jsonargparse/requirements.txt
index ee8b5de0..6b54e31b 100644
--- a/examples/frameworks/jsonargparse/requirements.txt
+++ b/examples/frameworks/jsonargparse/requirements.txt
@@ -1,7 +1,8 @@
-clearml
 jsonargparse
 pytorch_lightning
 torch
 torchmetrics
 torchvision
 docstring_parser
+pytorch-lightning[extra]
+lightning; python_version >= '3.8'
diff --git a/examples/frameworks/pytorch-lightning/pytorch_lightning_example.py b/examples/frameworks/pytorch-lightning/pytorch_lightning_example.py
index 91f2d085..337d829e 100644
--- a/examples/frameworks/pytorch-lightning/pytorch_lightning_example.py
+++ b/examples/frameworks/pytorch-lightning/pytorch_lightning_example.py
@@ -1,13 +1,14 @@
-import os
+import sys
 from argparse import ArgumentParser
-import torch
+
 import pytorch_lightning as pl
+import torch
 from torch.nn import functional as F
 from torch.utils.data import DataLoader, random_split
-from clearml import Task
-
-from torchvision.datasets.mnist import MNIST
 from torchvision import transforms
+from torchvision.datasets.mnist import MNIST
+
+from clearml import Task
 
 
 class LitClassifier(pl.LightningModule):
@@ -35,12 +36,13 @@ class LitClassifier(pl.LightningModule):
         y_hat = self(x)
         loss = F.cross_entropy(y_hat, y)
         self.log('valid_loss', loss)
+        return loss
 
     def test_step(self, batch, batch_idx):
         x, y = batch
         y_hat = self(x)
         loss = F.cross_entropy(y_hat, y)
-        self.log('test_loss', loss)
+        return loss
 
     def configure_optimizers(self):
         return torch.optim.Adam(self.parameters(), lr=self.hparams.learning_rate)
@@ -54,19 +56,17 @@ class LitClassifier(pl.LightningModule):
 
 
 if __name__ == '__main__':
-    # Connecting ClearML with the current process,
-    # from here on everything is logged automatically
-    task = Task.init(project_name="examples", task_name="PyTorch lightning MNIST example")
-
     pl.seed_everything(0)
 
     parser = ArgumentParser()
     parser.add_argument('--batch_size', default=32, type=int)
-    parser = pl.Trainer.add_argparse_args(parser)
-    parser.set_defaults(max_epochs=3)
+    parser.add_argument('--max_epochs', default=3, type=int)
+    sys.argv.extend(['--max_epochs', '2'])
     parser = LitClassifier.add_model_specific_args(parser)
     args = parser.parse_args()
 
+    Task.init(project_name="examples-internal", task_name="lightning checkpoint issue and argparser")
+
     # ------------
     # data
     # ------------
@@ -74,9 +74,9 @@ if __name__ == '__main__':
     mnist_test = MNIST('', train=False, download=True, transform=transforms.ToTensor())
     mnist_train, mnist_val = random_split(dataset, [55000, 5000])
 
-    train_loader = DataLoader(mnist_train, batch_size=args.batch_size, num_workers=os.cpu_count())
-    val_loader = DataLoader(mnist_val, batch_size=args.batch_size, num_workers=os.cpu_count())
-    test_loader = DataLoader(mnist_test, batch_size=args.batch_size, num_workers=os.cpu_count())
+    train_loader = DataLoader(mnist_train, batch_size=args.batch_size)
+    val_loader = DataLoader(mnist_val, batch_size=args.batch_size)
+    test_loader = DataLoader(mnist_test, batch_size=args.batch_size)
 
     # ------------
     # model
@@ -86,7 +86,7 @@ if __name__ == '__main__':
     # ------------
     # training
     # ------------
-    trainer = pl.Trainer.from_argparse_args(args)
+    trainer = pl.Trainer(max_epochs=args.max_epochs)
     trainer.fit(model, train_loader, val_loader)
 
     # ------------