Fix kernels issue in pytorch example. Update trains version.

This commit is contained in:
allegroai 2020-09-01 18:01:39 +03:00
parent 5beecbb078
commit c234837ce2
2 changed files with 25 additions and 25 deletions

View File

@ -12,8 +12,8 @@
"\n",
"# pip install with locked versions\n",
"! pip install -U pandas==1.0.3\n",
"! pip install -U trains>=0.15.0\n",
"! pip install -U optuna==2.0.0rc0"
"! pip install -U trains>=0.16.1\n",
"! pip install -U optuna==2.0.0"
]
},
{
@ -35,7 +35,7 @@
"metadata": {},
"outputs": [],
"source": [
"task = Task.init(project_name='Hyper-Parameter Search', task_name='Hyper-Parameter Optimization')\n"
"task = Task.init(project_name='Hyperparameter Optimization with Optuna', task_name='Hyperparameter Search')\n"
]
},
{
@ -47,7 +47,7 @@
"#####################################################################\n",
"### Don't forget to replace this default id with your own task id ###\n",
"#####################################################################\n",
"TEMPLATE_TASK_ID = 'd551a9990cb5451c9c744cc58201c612'"
"TEMPLATE_TASK_ID = 'b634a59993f8477f9e22167bae662be4'"
]
},
{
@ -60,25 +60,26 @@
" base_task_id=TEMPLATE_TASK_ID, # This is the experiment we want to optimize\n",
" # here we define the hyper-parameters to optimize\n",
" hyper_parameters=[\n",
" UniformIntegerParameterRange('number_of_epochs', min_value=5, max_value=15, step_size=1),\n",
" UniformIntegerParameterRange('batch_size', min_value=2, max_value=12, step_size=2),\n",
" UniformIntegerParameterRange('number_of_epochs', min_value=2, max_value=12, step_size=2),\n",
" UniformIntegerParameterRange('batch_size', min_value=2, max_value=16, step_size=2),\n",
" UniformParameterRange('dropout', min_value=0, max_value=0.5, step_size=0.05),\n",
" UniformParameterRange('base_lr', min_value=0.0005, max_value=0.01, step_size=0.0005),\n",
" UniformParameterRange('base_lr', min_value=0.00025, max_value=0.01, step_size=0.00025),\n",
" ],\n",
" # this is the objective metric we want to maximize/minimize\n",
" # setting the objective metric we want to maximize/minimize\n",
" objective_metric_title='accuracy',\n",
" objective_metric_series='total',\n",
" objective_metric_sign='max', # maximize or minimize the objective metric\n",
" max_number_of_concurrent_tasks=3, # number of concurrent experiments\n",
" # setting optimizer - trains supports GridSearch, RandomSearch or OptimizerBOHB\n",
" optimizer_class=OptimizerOptuna, # can be replaced with OptimizerBOHB\n",
" execution_queue='default', # queue to schedule the experiments for execution\n",
" optimization_time_limit=30., # time limit for each experiment (optional, ignored by OptimizerBOHB)\n",
" pool_period_min=1, # Check the experiments every x minutes\n",
" # set the maximum number of experiments for the optimization.\n",
" # OptimizerBOHB sets the total number of iteration as total_max_jobs * max_iteration_per_job\n",
" total_max_jobs=12,\n",
" # setting OptimizerBOHB configuration (ignored by other optimizers)\n",
"\n",
" # setting optimizer - trains supports GridSearch, RandomSearch, OptimizerBOHB and OptimizerOptuna\n",
" optimizer_class=OptimizerOptuna,\n",
" \n",
" # Configuring optimization parameters\n",
" execution_queue='dan_queue', # queue to schedule the experiments for execution\n",
" max_number_of_concurrent_tasks=2, # number of concurrent experiments\n",
" optimization_time_limit=60., # set the time limit for the optimization process\n",
" compute_time_limit=120, # set the compute time limit (sum of execution time on all machines)\n",
" total_max_jobs=20, # set the maximum number of experiments for the optimization. \n",
" # Converted to total number of iteration for OptimizerBOHB\n",
" min_iteration_per_job=15000, # minimum number of iterations per experiment, till early stopping\n",
" max_iteration_per_job=150000, # maximum number of iterations per experiment\n",
")"
@ -90,7 +91,7 @@
"metadata": {},
"outputs": [],
"source": [
"optimizer.set_time_limit(in_minutes=90.0) # set the time limit for the optimization process\n",
"optimizer.set_report_period(1) # setting the time gap between two consecutive reports\n",
"optimizer.start() \n",
"optimizer.wait() # wait until process is done\n",
"optimizer.stop() # make sure background optimization stopped"

View File

@ -15,7 +15,7 @@
"! pip install -U torch==1.5.1\n",
"! pip install -U torchvision==0.6.1\n",
"! pip install -U numpy==1.18.4\n",
"! pip install -U trains>=0.15.0\n",
"! pip install -U trains>=0.16.1\n",
"! pip install -U tensorboard==2.2.1"
]
},
@ -83,11 +83,10 @@
"class Net(nn.Module):\n",
" def __init__(self):\n",
" super(Net, self).__init__()\n",
" self.conv1 = nn.Conv2d(3, 6, 5)\n",
" self.conv2 = nn.Conv2d(3, 6, 5)\n",
" self.conv1 = nn.Conv2d(3, 6, 3)\n",
" self.conv2 = nn.Conv2d(6, 16, 3)\n",
" self.pool = nn.MaxPool2d(2, 2)\n",
" self.conv2 = nn.Conv2d(6, 16, 5)\n",
" self.fc1 = nn.Linear(16 * 5 * 5, 120)\n",
" self.fc1 = nn.Linear(16 * 6 * 6, 120)\n",
" self.fc2 = nn.Linear(120, 84)\n",
" self.dorpout = nn.Dropout(p=configuration_dict.get('dropout', 0.25))\n",
" self.fc3 = nn.Linear(84, 10)\n",
@ -95,7 +94,7 @@
" def forward(self, x):\n",
" x = self.pool(F.relu(self.conv1(x)))\n",
" x = self.pool(F.relu(self.conv2(x)))\n",
" x = x.view(-1, 16 * 5 * 5)\n",
" x = x.view(-1, 16 * 6 * 6)\n",
" x = F.relu(self.fc1(x))\n",
" x = F.relu(self.fc2(x))\n",
" x = self.fc3(self.dorpout(x))\n",