mirror of
https://github.com/clearml/clearml
synced 2025-06-26 18:16:07 +00:00
small edits
This commit is contained in:
parent
0cb99b93ee
commit
9f9400ab18
@ -610,7 +610,7 @@ class SearchStrategy(object):
|
||||
:param int top_k: The number of Tasks (experiments) to return.
|
||||
:param all_metrics: Default False, only return the objective metric on the metrics dictionary.
|
||||
If True, return all scalar metrics of the experiment
|
||||
:param all_hyper_parameters: Default False. If True, return all the hyper-parameters from all the sections.
|
||||
:param all_hyper_parameters: Default False. If True, return all the hyperparameters from all the sections.
|
||||
:param only_completed: return only completed Tasks. Default False.
|
||||
|
||||
:return: A list of dictionaries ({task_id: '', hyper_parameters: {}, metrics: {}}), ordered by performance,
|
||||
@ -929,7 +929,7 @@ class SearchStrategy(object):
|
||||
|
||||
class GridSearch(SearchStrategy):
|
||||
"""
|
||||
Grid search strategy controller. Full grid sampling of every hyper-parameter combination.
|
||||
Grid search strategy controller. Full grid sampling of every hyperparameter combination.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
@ -1001,7 +1001,7 @@ class GridSearch(SearchStrategy):
|
||||
|
||||
class RandomSearch(SearchStrategy):
|
||||
"""
|
||||
Random search strategy controller. Random uniform sampling of hyper-parameters.
|
||||
Random search strategy controller. Random uniform sampling of hyperparameters.
|
||||
"""
|
||||
|
||||
# Number of already chosen random samples before assuming we covered the entire hyper-parameter space
|
||||
@ -1105,7 +1105,7 @@ class HyperParameterOptimizer(object):
|
||||
):
|
||||
# type: (...) -> ()
|
||||
"""
|
||||
Create a new hyper-parameter controller. The newly created object will launch and monitor the new experiments.
|
||||
Create a new hyperparameter controller. The newly created object will launch and monitor the new experiments.
|
||||
|
||||
:param str base_task_id: The Task ID to be used as template experiment to optimize.
|
||||
:param list hyper_parameters: The list of Parameter objects to optimize over.
|
||||
@ -1120,7 +1120,7 @@ class HyperParameterOptimizer(object):
|
||||
- ``min_global`` - Minimize the min value of *all* reported values for the specific title/series scalar.
|
||||
- ``max_global`` - Maximize the max value of *all* reported values for the specific title/series scalar.
|
||||
|
||||
:param class.SearchStrategy optimizer_class: The SearchStrategy optimizer to use for the hyper-parameter search
|
||||
:param class.SearchStrategy optimizer_class: The SearchStrategy optimizer to use for the hyperparameter search
|
||||
:param int max_number_of_concurrent_tasks: The maximum number of concurrent Tasks (experiments) running at the
|
||||
same time.
|
||||
:param str execution_queue: The execution queue to use for launching Tasks (experiments).
|
||||
@ -1516,7 +1516,7 @@ class HyperParameterOptimizer(object):
|
||||
:param int top_k: The number of Tasks (experiments) to return.
|
||||
:param all_metrics: Default False, only return the objective metric on the metrics dictionary.
|
||||
If True, return all scalar metrics of the experiment
|
||||
:param all_hyper_parameters: Default False. If True, return all the hyper-parameters from all the sections.
|
||||
:param all_hyper_parameters: Default False. If True, return all the hyperparameters from all the sections.
|
||||
:param only_completed: return only completed Tasks. Default False.
|
||||
|
||||
:return: A list of dictionaries ({task_id: '', hyper_parameters: {}, metrics: {}}), ordered by performance,
|
||||
|
@ -15,7 +15,7 @@ class RandomSeed(object):
|
||||
def set_random_seed(seed=1337):
|
||||
# type: (int) -> ()
|
||||
"""
|
||||
Set global seed for all hyper-parameter strategy random number sampling.
|
||||
Set global seed for all hyperparameter strategy random number sampling.
|
||||
|
||||
:param int seed: The random seed.
|
||||
"""
|
||||
@ -26,7 +26,7 @@ class RandomSeed(object):
|
||||
def get_random_seed():
|
||||
# type: () -> int
|
||||
"""
|
||||
Get the global seed for all hyper-parameter strategy random number sampling.
|
||||
Get the global seed for all hyperparameter strategy random number sampling.
|
||||
|
||||
:return: The random seed.
|
||||
"""
|
||||
|
@ -2160,7 +2160,7 @@ class OutputModel(BaseModel):
|
||||
# type: (str) -> None
|
||||
"""
|
||||
Set the URI of the storage destination for uploaded model weight files.
|
||||
Supported storage destinations include S3, Google Cloud Storage), and file locations.
|
||||
Supported storage destinations include S3, Google Cloud Storage, and file locations.
|
||||
|
||||
Using this method, file uploads are separate and then a link to each is stored in the model object.
|
||||
|
||||
|
@ -819,7 +819,7 @@ def report_surface(self, title, series, matrix, iteration, xlabels=None, ylabels
|
||||
### Images
|
||||
|
||||
Use to report an image and upload its contents to the bucket specified in the **ClearML** configuration file,
|
||||
or a [a default upload destination](#set-default-upload-destination), if you set a default.
|
||||
or a [default upload destination](#set-default-upload-destination), if you set a default.
|
||||
|
||||
First [get the current logger](#get-the-current-logger) and then use it (see an [example script](https://github.com/allegroai/clearml/blob/master/examples/manual_reporting.py)) with the following method.
|
||||
|
||||
@ -931,7 +931,7 @@ def report_image(self, title, series, iteration, local_path=None, matrix=None, m
|
||||
|
||||
In order for **ClearML** to log a dictionary of parameters, use the `Task.connect` method.
|
||||
|
||||
For example, to log the hyper-parameters <code>learning_rate</code>, <code>batch_size</code>, <code>display_step</code>, <code>model_path</code>, <code>n_hidden_1</code>, and <code>n_hidden_2</code>:
|
||||
For example, to log the hyperparameters <code>learning_rate</code>, <code>batch_size</code>, <code>display_step</code>, <code>model_path</code>, <code>n_hidden_1</code>, and <code>n_hidden_2</code>:
|
||||
|
||||
```python
|
||||
# Create a dictionary of parameters
|
||||
|
@ -65,7 +65,7 @@ if __name__ == '__main__':
|
||||
parser = LitClassifier.add_model_specific_args(parser)
|
||||
args = parser.parse_args()
|
||||
|
||||
Task.init(project_name="examples-internal", task_name="lightning checkpoint issue and argparser")
|
||||
Task.init(project_name="examples", task_name="pytorch lightning MNIST")
|
||||
|
||||
# ------------
|
||||
# data
|
||||
|
Loading…
Reference in New Issue
Block a user