Added text classification example and updated image and audio examples

This commit is contained in:
danmalowany-allegro 2020-06-22 14:13:49 +03:00
parent 8cb7c8130a
commit 10f11ba038
2 changed files with 697 additions and 0 deletions

View File

@ -0,0 +1,380 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "e-YsQrBjzNdX"
},
"outputs": [],
"source": [
"! pip install -U pip\n",
"! pip install -U torch==1.5.0\n",
"! pip install -U torchaudio==0.5.0\n",
"! pip install -U torchvision==0.6.0\n",
"! pip install -U matplotlib==3.2.1\n",
"! pip install -U trains>=0.15.0\n",
"! pip install -U pandas==1.0.4\n",
"! pip install -U numpy==1.18.4\n",
"! pip install -U tensorboard==2.2.1"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "T7T0Rf26zNdm"
},
"outputs": [],
"source": [
"import PIL\n",
"import io\n",
"\n",
"import pandas as pd\n",
"import numpy as np\n",
"from pathlib2 import Path\n",
"import matplotlib.pyplot as plt\n",
"\n",
"import torch\n",
"import torch.nn as nn\n",
"import torch.nn.functional as F\n",
"import torch.optim as optim\n",
"from torch.utils.data import Dataset\n",
"from torch.utils.tensorboard import SummaryWriter\n",
"\n",
"import torchaudio\n",
"from torchvision.transforms import ToTensor\n",
"\n",
"from trains import Task\n",
"\n",
"%matplotlib inline"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"task = Task.init(project_name='Audio Example', task_name='audio classifier')\n",
"configuration_dict = {'number_of_epochs': 10, 'batch_size': 4, 'dropout': 0.25, 'base_lr': 0.001}\n",
"configuration_dict = task.connect(configuration_dict) # enabling configuration override by trains\n",
"print(configuration_dict) # printing actual configuration (after override in remote mode)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "msiz7QdvzNeA",
"scrolled": true
},
"outputs": [],
"source": [
"# Download UrbanSound8K dataset (https://urbansounddataset.weebly.com/urbansound8k.html)\n",
"path_to_UrbanSound8K = './data/UrbanSound8K'"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "wXtmZe7yzNeS"
},
"outputs": [],
"source": [
"class UrbanSoundDataset(Dataset):\n",
"#rapper for the UrbanSound8K dataset\n",
" def __init__(self, csv_path, file_path, folderList):\n",
" self.file_path = file_path\n",
" self.file_names = []\n",
" self.labels = []\n",
" self.folders = []\n",
" \n",
" #loop through the csv entries and only add entries from folders in the folder list\n",
" csvData = pd.read_csv(csv_path)\n",
" for i in range(0,len(csvData)):\n",
" if csvData.iloc[i, 5] in folderList:\n",
" self.file_names.append(csvData.iloc[i, 0])\n",
" self.labels.append(csvData.iloc[i, 6])\n",
" self.folders.append(csvData.iloc[i, 5])\n",
" \n",
" def __getitem__(self, index):\n",
" #format the file path and load the file\n",
" path = self.file_path / (\"fold\" + str(self.folders[index])) / self.file_names[index]\n",
" sound, sample_rate = torchaudio.load(path, out = None, normalization = True)\n",
"\n",
" # UrbanSound8K uses two channels, this will convert them to one\n",
" soundData = torch.mean(sound, dim=0, keepdim=True)\n",
" \n",
" #Make sure all files are the same size\n",
" if soundData.numel() < 160000:\n",
" fixedsize_data = torch.nn.functional.pad(soundData, (0, 160000 - soundData.numel()))\n",
" else:\n",
" fixedsize_data = soundData[0,:160000].reshape(1,160000)\n",
" \n",
" #downsample the audio\n",
" downsample_data = fixedsize_data[::5]\n",
" \n",
" melspectogram_transform = torchaudio.transforms.MelSpectrogram(sample_rate=sample_rate)\n",
" melspectogram = melspectogram_transform(downsample_data)\n",
" melspectogram_db = torchaudio.transforms.AmplitudeToDB()(melspectogram)\n",
"\n",
" return fixedsize_data, sample_rate, melspectogram_db, self.labels[index]\n",
" \n",
" def __len__(self):\n",
" return len(self.file_names)\n",
"\n",
"\n",
"csv_path = Path(path_to_UrbanSound8K) / 'metadata' / 'UrbanSound8K.csv'\n",
"file_path = Path(path_to_UrbanSound8K) / 'audio'\n",
"\n",
"train_set = UrbanSoundDataset(csv_path, file_path, range(1,10))\n",
"test_set = UrbanSoundDataset(csv_path, file_path, [10])\n",
"print(\"Train set size: \" + str(len(train_set)))\n",
"print(\"Test set size: \" + str(len(test_set)))\n",
"\n",
"train_loader = torch.utils.data.DataLoader(train_set, batch_size = configuration_dict.get('batch_size', 4), \n",
" shuffle = True, pin_memory=True, num_workers=1)\n",
"test_loader = torch.utils.data.DataLoader(test_set, batch_size = configuration_dict.get('batch_size', 4), \n",
" shuffle = False, pin_memory=True, num_workers=1)\n",
"\n",
"classes = ('air_conditioner', 'car_horn', 'children_playing', 'dog_bark', 'drilling', 'engine_idling', \n",
" 'gun_shot', 'jackhammer', 'siren', 'street_music')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "ylblw-k1zNeZ"
},
"outputs": [],
"source": [
"class Net(nn.Module):\n",
" def __init__(self, num_classes, dropout_value):\n",
" super(Net,self).__init__()\n",
" self.num_classes = num_classes\n",
" self.dropout_value = dropout_value\n",
" \n",
" self.C1 = nn.Conv2d(1,16,3)\n",
" self.C2 = nn.Conv2d(16,32,3)\n",
" self.C3 = nn.Conv2d(32,64,3)\n",
" self.C4 = nn.Conv2d(64,128,3)\n",
" self.maxpool1 = nn.MaxPool2d(2,2) \n",
" self.fc1 = nn.Linear(128*29*197,128)\n",
" self.fc2 = nn.Linear(128,self.num_classes)\n",
" self.dropout = nn.Dropout(self.dropout_value)\n",
" \n",
" def forward(self,x):\n",
" # add sequence of convolutional and max pooling layers\n",
" x = F.relu(self.C1(x))\n",
" x = self.maxpool1(F.relu(self.C2(x)))\n",
" x = F.relu(self.C3(x))\n",
" x = self.maxpool1(F.relu(self.C4(x)))\n",
" # flatten image input\n",
" x = x.view(-1,128*29*197)\n",
" x = F.relu(self.fc1(self.dropout(x)))\n",
" x = self.fc2(self.dropout(x))\n",
" return x\n",
" \n",
" \n",
"model = Net(len(classes), configuration_dict.get('dropout', 0.25))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "3yKYru14zNef"
},
"outputs": [],
"source": [
"optimizer = optim.SGD(model.parameters(), lr = configuration_dict.get('base_lr', 0.001), momentum = 0.9)\n",
"scheduler = optim.lr_scheduler.StepLR(optimizer, step_size = 3, gamma = 0.1)\n",
"criterion = nn.CrossEntropyLoss()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"device = torch.cuda.current_device() if torch.cuda.is_available() else torch.device('cpu')\n",
"print('Device to use: {}'.format(device))\n",
"model.to(device)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"tensorboard_writer = SummaryWriter('./tensorboard_logs')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def plot_signal(signal, title, cmap=None):\n",
" fig = plt.figure()\n",
" if signal.ndim == 1:\n",
" plt.plot(signal)\n",
" else:\n",
" plt.imshow(signal, cmap=cmap) \n",
" plt.title(title)\n",
" \n",
" plot_buf = io.BytesIO()\n",
" plt.savefig(plot_buf, format='jpeg')\n",
" plot_buf.seek(0)\n",
" plt.close(fig)\n",
" return ToTensor()(PIL.Image.open(plot_buf))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "Vdthqz3JzNem"
},
"outputs": [],
"source": [
"def train(model, epoch):\n",
" model.train()\n",
" for batch_idx, (sounds, sample_rate, inputs, labels) in enumerate(train_loader):\n",
" inputs = inputs.to(device)\n",
" labels = labels.to(device)\n",
"\n",
" # zero the parameter gradients\n",
" optimizer.zero_grad()\n",
"\n",
" # forward + backward + optimize\n",
" outputs = model(inputs)\n",
" _, predicted = torch.max(outputs, 1)\n",
" loss = criterion(outputs, labels)\n",
" loss.backward()\n",
" optimizer.step()\n",
" \n",
" iteration = epoch * len(train_loader) + batch_idx\n",
" if batch_idx % log_interval == 0: #print training stats\n",
" print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'\n",
" .format(epoch, batch_idx * len(inputs), len(train_loader.dataset), \n",
" 100. * batch_idx / len(train_loader), loss))\n",
" tensorboard_writer.add_scalar('training loss/loss', loss, iteration)\n",
" tensorboard_writer.add_scalar('learning rate/lr', optimizer.param_groups[0]['lr'], iteration)\n",
" \n",
" \n",
" if batch_idx % debug_interval == 0: # report debug image every \"debug_interval\" mini-batches\n",
" for n, (inp, pred, label) in enumerate(zip(inputs, predicted, labels)):\n",
" series = 'label_{}_pred_{}'.format(classes[label.cpu()], classes[pred.cpu()])\n",
" tensorboard_writer.add_image('Train MelSpectrogram samples/{}_{}_{}'.format(batch_idx, n, series), \n",
" plot_signal(inp.cpu().numpy().squeeze(), series, 'hot'), iteration)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "LBWoj7u5zNes"
},
"outputs": [],
"source": [
"def test(model, epoch):\n",
" model.eval()\n",
" class_correct = list(0. for i in range(10))\n",
" class_total = list(0. for i in range(10))\n",
" with torch.no_grad():\n",
" for idx, (sounds, sample_rate, inputs, labels) in enumerate(test_loader):\n",
" inputs = inputs.to(device)\n",
" labels = labels.to(device)\n",
"\n",
" outputs = model(inputs)\n",
"\n",
" _, predicted = torch.max(outputs, 1)\n",
" c = (predicted == labels)\n",
" for i in range(len(inputs)):\n",
" label = labels[i].item()\n",
" class_correct[label] += c[i].item()\n",
" class_total[label] += 1\n",
" \n",
" iteration = (epoch + 1) * len(train_loader)\n",
" if idx % debug_interval == 0: # report debug image every \"debug_interval\" mini-batches\n",
" for n, (sound, inp, pred, label) in enumerate(zip(sounds, inputs, predicted, labels)):\n",
" series = 'label_{}_pred_{}'.format(classes[label.cpu()], classes[pred.cpu()])\n",
" tensorboard_writer.add_audio('Test audio samples/{}_{}_{}'.format(idx, n, series), \n",
" sound, iteration, int(sample_rate[n]))\n",
" tensorboard_writer.add_image('Test MelSpectrogram samples/{}_{}_{}'.format(idx, n, series), \n",
" plot_signal(inp.cpu().numpy().squeeze(), series, 'hot'), iteration)\n",
"\n",
" total_accuracy = 100 * sum(class_correct)/sum(class_total)\n",
" print('[Iteration {}] Accuracy on the {} test images: {}%\\n'.format(epoch, sum(class_total), total_accuracy))\n",
" tensorboard_writer.add_scalar('accuracy/total', total_accuracy, iteration)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "X5lx3g_5zNey",
"scrolled": false
},
"outputs": [],
"source": [
"log_interval = 100\n",
"debug_interval = 200\n",
"for epoch in range(configuration_dict.get('number_of_epochs', 10)):\n",
" train(model, epoch)\n",
" test(model, epoch)\n",
" scheduler.step()"
]
}
],
"metadata": {
"colab": {
"name": "audio_classifier_tutorial.ipynb",
"provenance": []
},
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.4"
}
},
"nbformat": 4,
"nbformat_minor": 1
}

View File

@ -0,0 +1,317 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"! pip install -U pip\n",
"! pip install -U torch==1.5.0\n",
"! pip install -U torchtext==0.6.0\n",
"! pip install -U matplotlib==3.2.1\n",
"! pip install -U trains>=0.15.0\n",
"! pip install -U tensorboard==2.2.1"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import time\n",
"\n",
"import torch\n",
"import torch.nn as nn\n",
"import torch.nn.functional as F\n",
"import torchtext\n",
"from torchtext.datasets import text_classification\n",
"from torch.utils.data import DataLoader\n",
"from torch.utils.tensorboard import SummaryWriter\n",
"\n",
"from trains import Task\n",
"\n",
"%matplotlib inline"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"task = Task.init(project_name='Text Example', task_name='text classifier')\n",
"configuration_dict = {'number_of_epochs': 6, 'batch_size': 16, 'ngrams': 2, 'base_lr': 1.0}\n",
"configuration_dict = task.connect(configuration_dict) # enabling configuration override by trains\n",
"print(configuration_dict) # printing actual configuration (after override in remote mode)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"pycharm": {
"name": "#%%\n"
}
},
"outputs": [],
"source": [
"if not os.path.isdir('./data'):\n",
" os.mkdir('./data')\n",
"train_dataset, test_dataset = text_classification.DATASETS['AG_NEWS'](root='./data', \n",
" ngrams=configuration_dict.get('ngrams', 2))\n",
"vocabulary = train_dataset.get_vocab()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def generate_batch(batch):\n",
" label = torch.tensor([entry[0] for entry in batch])\n",
" # original data batch input are packed into a list and concatenated as a single tensor\n",
" text = [entry[1] for entry in batch]\n",
" # offsets is a tensor of delimiters to represent the beginning index of each sequence in the text tensor.\n",
" offsets = [0] + [len(entry) for entry in text] \n",
" \n",
" # torch.Tensor.cumsum returns the cumulative sum of elements in the dimension dim.\n",
" offsets = torch.tensor(offsets[:-1]).cumsum(dim=0)\n",
" text = torch.cat(text)\n",
" return text, offsets, label\n",
"\n",
"train_loader = torch.utils.data.DataLoader(train_dataset, batch_size = configuration_dict.get('batch_size', 16), \n",
" shuffle = True, pin_memory=True, collate_fn=generate_batch)\n",
"test_loader = torch.utils.data.DataLoader(test_dataset, batch_size = configuration_dict.get('batch_size', 16), \n",
" shuffle = False, pin_memory=True, collate_fn=generate_batch)\n",
"\n",
"classes = (\"World\", \"Sports\", \"Business\", \"Sci/Tec\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"class TextSentiment(nn.Module):\n",
" def __init__(self, vocab_size, embed_dim, num_class):\n",
" super().__init__()\n",
" self.embedding = nn.EmbeddingBag(vocab_size, embed_dim, sparse=True)\n",
" self.fc = nn.Linear(embed_dim, num_class)\n",
" self.init_weights()\n",
"\n",
" def init_weights(self):\n",
" initrange = 0.5\n",
" self.embedding.weight.data.uniform_(-initrange, initrange)\n",
" self.fc.weight.data.uniform_(-initrange, initrange)\n",
" self.fc.bias.data.zero_()\n",
"\n",
" def forward(self, text, offsets):\n",
" embedded = self.embedding(text, offsets)\n",
" return self.fc(embedded)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"VOCAB_SIZE = len(train_dataset.get_vocab())\n",
"EMBED_DIM = 32\n",
"NUN_CLASS = len(train_dataset.get_labels())\n",
"model = TextSentiment(VOCAB_SIZE, EMBED_DIM, NUN_CLASS)\n",
"\n",
"device = torch.cuda.current_device() if torch.cuda.is_available() else torch.device('cpu')\n",
"print('Device to use: {}'.format(device))\n",
"model.to(device)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"pycharm": {
"name": "#%%\n"
}
},
"outputs": [],
"source": [
"criterion = torch.nn.CrossEntropyLoss().to(device)\n",
"optimizer = torch.optim.SGD(model.parameters(), lr=configuration_dict.get('base_lr', 1.0))\n",
"scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 2, gamma=0.9)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"pycharm": {
"name": "#%%\n"
}
},
"outputs": [],
"source": [
"tensorboard_writer = SummaryWriter('./tensorboard_logs')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def train_func(data, epoch):\n",
" # Train the model\n",
" train_loss = 0\n",
" train_acc = 0\n",
" for batch_idx, (text, offsets, cls) in enumerate(data):\n",
" optimizer.zero_grad()\n",
" text, offsets, cls = text.to(device), offsets.to(device), cls.to(device)\n",
" output = model(text, offsets)\n",
" loss = criterion(output, cls)\n",
" train_loss += loss.item()\n",
" loss.backward()\n",
" optimizer.step()\n",
" train_acc += (output.argmax(1) == cls).sum().item()\n",
" \n",
" iteration = epoch * len(train_loader) + batch_idx\n",
" if batch_idx % log_interval == 0: \n",
" print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'\n",
" .format(epoch, batch_idx * len(cls), len(train_dataset), \n",
" 100. * batch_idx / len(train_loader), loss))\n",
" tensorboard_writer.add_scalar('training loss/loss', loss, iteration)\n",
" tensorboard_writer.add_scalar('learning rate/lr', optimizer.param_groups[0]['lr'], iteration)\n",
"\n",
" # Adjust the learning rate\n",
" scheduler.step()\n",
"\n",
" return train_loss / len(train_dataset), train_acc / len(train_dataset)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"pycharm": {
"name": "#%%\n"
}
},
"outputs": [],
"source": [
"def test(data, epoch):\n",
" loss = 0\n",
" acc = 0\n",
" for idx, (text, offsets, cls) in enumerate(data):\n",
" text, offsets, cls = text.to(device), offsets.to(device), cls.to(device)\n",
" with torch.no_grad():\n",
" output = model(text, offsets)\n",
" predicted = output.argmax(1)\n",
" loss = criterion(output, cls)\n",
" loss += loss.item()\n",
" acc += (predicted == cls).sum().item()\n",
" \n",
" iteration = (epoch + 1) * len(train_loader)\n",
" if idx % debug_interval == 0: # report debug text every \"debug_interval\" mini-batches\n",
" offsets = offsets.tolist() + [len(text)]\n",
" for n, (pred, label) in enumerate(zip(predicted, cls)):\n",
" ids_to_text = [vocabulary.itos[id] for id in text[offsets[n]:offsets[n+1]]]\n",
" series = '{}_{}_label_{}_pred_{}'.format(idx, n, classes[label], classes[pred])\n",
" tensorboard_writer.add_text('Test text samples/{}'.format(series), \n",
" ' '.join(ids_to_text), iteration)\n",
"\n",
" return loss / len(test_dataset), acc / len(test_dataset)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"pycharm": {
"name": "#%%\n"
}
},
"outputs": [],
"source": [
"log_interval = 200\n",
"debug_interval = 500\n",
"for epoch in range(configuration_dict.get('number_of_epochs', 6)):\n",
" start_time = time.time()\n",
" \n",
" train_loss, train_acc = train_func(train_loader, epoch)\n",
" test_loss, test_acc = test(test_loader, epoch)\n",
" \n",
" secs = int(time.time() - start_time)\n",
"\n",
" print('Epoch: %d' %(epoch + 1), \" | time in %d minutes, %d seconds\" %(secs / 60, secs % 60))\n",
" print(f'\\tLoss: {train_loss:.4f}(train)\\t|\\tAcc: {train_acc * 100:.1f}%(train)')\n",
" print(f'\\tLoss: {test_loss:.4f}(test)\\t|\\tAcc: {test_acc * 100:.1f}%(test)')\n",
" tensorboard_writer.add_scalar('accuracy/train', train_acc, (epoch + 1) * len(train_loader))\n",
" tensorboard_writer.add_scalar('accuracy/test', test_acc, (epoch + 1) * len(train_loader))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"pycharm": {
"name": "#%%\n"
}
},
"outputs": [],
"source": [
"import re\n",
"from torchtext.data.utils import ngrams_iterator\n",
"from torchtext.data.utils import get_tokenizer\n",
"\n",
"def predict(text, model, vocab, ngrams):\n",
" tokenizer = get_tokenizer(\"basic_english\")\n",
" with torch.no_grad():\n",
" text = torch.tensor([vocab[token]\n",
" for token in ngrams_iterator(tokenizer(text), ngrams)])\n",
" output = model(text, torch.tensor([0]))\n",
" return output.argmax(1).item()\n",
"\n",
"ex_text_str = \"MEMPHIS, Tenn. Four days ago, Jon Rahm was \\\n",
" enduring the seasons worst weather conditions on Sunday at The \\\n",
" Open on his way to a closing 75 at Royal Portrush, which \\\n",
" considering the wind and the rain was a respectable showing. \\\n",
" Thursdays first round at the WGC-FedEx St. Jude Invitational \\\n",
" was another story. With temperatures in the mid-80s and hardly any \\\n",
" wind, the Spaniard was 13 strokes better in a flawless round. \\\n",
" Thanks to his best putting performance on the PGA Tour, Rahm \\\n",
" finished with an 8-under 62 for a three-stroke lead, which \\\n",
" was even more impressive considering hed never played the \\\n",
" front nine at TPC Southwind.\"\n",
"\n",
"ans = predict(ex_text_str, model.to(\"cpu\"), vocabulary, configuration_dict.get('ngrams', 2))\n",
"print(\"This is a %s news\" %classes[ans])"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.4"
}
},
"nbformat": 4,
"nbformat_minor": 1
}