Edit docstrings (#875)

This commit is contained in:
pollfly 2023-01-10 11:12:47 +02:00 committed by GitHub
parent c1989617cf
commit b39829a011
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
23 changed files with 137 additions and 137 deletions

View File

@ -157,7 +157,7 @@ class PipelineController(object):
:param bool add_pipeline_tags: (default: False) if True, add `pipe: <pipeline_task_id>` tag to all
steps (Tasks) created by this pipeline.
:param str target_project: If provided, all pipeline steps are cloned into the target project.
If True pipeline steps are stored into the pipeline project
If True, pipeline steps are stored into the pipeline project
:param bool auto_version_bump: If True (default), if the same pipeline version already exists
(with any difference from the current one), the current pipeline version will be bumped to a new version
version bump examples: 1.0.0 -> 1.0.1 , 1.2 -> 1.3, 10 -> 11 etc.
@ -404,7 +404,7 @@ class PipelineController(object):
use the base_task_project and base_task_name combination to retrieve the base_task_id to use for the step.
:param base_task_name: If base_task_id is not given,
use the base_task_project and base_task_name combination to retrieve the base_task_id to use for the step.
:param clone_base_task: If True (default) the pipeline will clone the base task, and modify/enqueue
:param clone_base_task: If True (default), the pipeline will clone the base task, and modify/enqueue
the cloned Task. If False, the base-task is used directly, notice it has to be in draft-mode (created).
:param continue_on_fail: (default False). If True, failed step will not cause the pipeline to stop
(or marked as failed). Notice, that steps that are connected (or indirectly connected)
@ -957,7 +957,7 @@ class PipelineController(object):
- ``True`` - Delete the local copy of the artifact.
- ``False`` - Do not delete. (default)
:param bool auto_pickle: If True (default) and the artifact_object is not one of the following types:
:param bool auto_pickle: If True (default), and the artifact_object is not one of the following types:
pathlib2.Path, dict, pandas.DataFrame, numpy.ndarray, PIL.Image, url (string), local_file (string)
the artifact_object will be pickled and uploaded as pickle file artifact (with file extension .pkl)
@ -986,7 +986,7 @@ class PipelineController(object):
unless one of the steps failed, then mark the pipeline as failed.
:param timeout: Wait timeout for the optimization thread to exit (minutes).
The default is ``None``, indicating do not wait terminate immediately.
The default is ``None``, indicating do not wait to terminate immediately.
:param mark_failed: If True, mark the pipeline task as failed. (default False)
:param mark_aborted: If True, mark the pipeline task as aborted. (default False)
"""
@ -1090,7 +1090,7 @@ class PipelineController(object):
def get_processed_nodes(self):
# type: () -> Sequence[PipelineController.Node]
"""
Return the a list of the processed pipeline nodes, each entry in the list is PipelineController.Node object.
Return a list of the processed pipeline nodes, each entry in the list is PipelineController.Node object.
:return: executed (excluding currently executing) nodes list
"""
@ -1099,7 +1099,7 @@ class PipelineController(object):
def get_running_nodes(self):
# type: () -> Sequence[PipelineController.Node]
"""
Return the a list of the currently running pipeline nodes,
Return a list of the currently running pipeline nodes,
each entry in the list is PipelineController.Node object.
:return: Currently running nodes list
@ -2595,7 +2595,7 @@ class PipelineController(object):
"""
return the pipeline components target folder name/id
:param return_project_id: if False (default) return target folder name, if True return project id
:param return_project_id: if False (default), return target folder name. If True, return project id
:return: project id/name (None if not valid)
"""
if not self._target_project:

View File

@ -292,7 +292,7 @@ class BaseJob(object):
"""
Return True, if job is waiting for execution
:return: True the task is currently is currently queued.
:return: True if the task is currently queued.
"""
return self.status() in (Task.TaskStatusEnum.queued, Task.TaskStatusEnum.created)
@ -482,9 +482,9 @@ class ClearmlJob(BaseJob):
:param list tags: additional tags to add to the newly cloned task
:param str parent: Set newly created Task parent task field, default: base_tak_id.
:param dict kwargs: additional Task creation parameters
:param bool disable_clone_task: if False (default) clone base task id.
:param bool disable_clone_task: if False (default), clone base task id.
If True, use the base_task_id directly (base-task must be in draft-mode / created),
:param bool allow_caching: If True check if we have a previously executed Task with the same specification
:param bool allow_caching: If True, check if we have a previously executed Task with the same specification.
If we do, use it and set internal is_cached flag. Default False (always create new Task).
:param str target_project: Optional, Set the target project name to create the cloned Task in.
"""

View File

@ -607,7 +607,7 @@ class SearchStrategy(object):
:param int top_k: The number of Tasks (experiments) to return.
:param all_metrics: Default False, only return the objective metric on the metrics dictionary.
If True, return all scalar metrics of the experiment
:param all_hyper_parameters: Default False. If True return all the hyper-parameters from all the sections.
:param all_hyper_parameters: Default False. If True, return all the hyper-parameters from all the sections.
:param only_completed: return only completed Tasks. Default False.
:return: A list of dictionaries ({task_id: '', hyper_parameters: {}, metrics: {}}), ordered by performance,
@ -1339,7 +1339,7 @@ class HyperParameterOptimizer(object):
Stop the HyperParameterOptimizer controller and the optimization thread.
:param float timeout: Wait timeout for the optimization thread to exit (minutes).
The default is ``None``, indicating do not wait terminate immediately.
The default is ``None``, indicating do not wait to terminate immediately.
:param wait_for_reporter: Wait for reporter to flush data.
"""
if not self._thread or not self._stop_event or not self.optimizer:
@ -1513,7 +1513,7 @@ class HyperParameterOptimizer(object):
:param int top_k: The number of Tasks (experiments) to return.
:param all_metrics: Default False, only return the objective metric on the metrics dictionary.
If True, return all scalar metrics of the experiment
:param all_hyper_parameters: Default False. If True return all the hyper-parameters from all the sections.
:param all_hyper_parameters: Default False. If True, return all the hyper-parameters from all the sections.
:param only_completed: return only completed Tasks. Default False.
:return: A list of dictionaries ({task_id: '', hyper_parameters: {}, metrics: {}}), ordered by performance,

View File

@ -534,7 +534,7 @@ class TaskScheduler(BaseScheduler):
if not provided, a name is randomly generated.
When timespec parameters are specified exclusively, they define the time between task launches (see
`year` and `weekdays` exceptions). When multiple timespec parameter are specified, the parameter representing
`year` and `weekdays` exceptions). When multiple timespec parameters are specified, the parameter representing
the longest duration defines the time between task launches, and the shorter timespec parameters define specific
times.
@ -579,7 +579,7 @@ class TaskScheduler(BaseScheduler):
:param limit_execution_time: Limit the execution time (in hours) of the specific job.
:param single_instance: If True, do not launch the Task job if the previous instance is still running
(skip until the next scheduled time period). Default False.
:param recurring: If False only launch the Task once (default: True, repeat)
:param recurring: If False, only launch the Task once (default: True, repeat)
:param execute_immediately: If True, schedule the Task to be executed immediately
then recurring based on the timing schedule arguments. Default False.
:param reuse_task: If True, re-enqueue the same Task (i.e. do not clone it) every time, default False.

View File

@ -426,7 +426,7 @@ class TriggerScheduler(BaseScheduler):
:param trigger_required_tags: Trigger only on tasks with the following additional tags (must include all tags)
:param trigger_on_status: Trigger on Task status change.
expect list of status strings, e.g. ['failed', 'published']
:param trigger_exclude_dev_tasks: If True only trigger on Tasks executed by clearml-agent (and not manually)
:param trigger_exclude_dev_tasks: If True, only trigger on Tasks executed by clearml-agent (and not manually)
:param trigger_on_metric: Trigger on metric/variant above/under threshold (metric=title, variant=series)
:param trigger_on_variant: Trigger on metric/variant above/under threshold (metric=title, variant=series)
:param trigger_on_threshold: Trigger on metric/variant above/under threshold (float number)

View File

@ -1829,7 +1829,7 @@ class GetStatsRequest(Request):
:type interval: int
:param items: List of metric keys and requested statistics
:type items: Sequence[StatItem]
:param split_by_variant: If true then break statistics by hardware sub types
:param split_by_variant: If True, then break statistics by hardware sub types
:type split_by_variant: bool
"""

View File

@ -657,7 +657,7 @@ class Reporter(InterfaceBase, AbstractContextManager, SetupUploadMixin, AsyncMan
:type ytitle: str
:param mode: 'lines' / 'markers' / 'lines+markers'
:type mode: str
:param reverse_xaxis: If true X axis will be displayed from high to low (reversed)
:param reverse_xaxis: If True, X axis will be displayed from high to low (reversed)
:type reverse_xaxis: bool
:param comment: comment underneath the title
:type comment: str
@ -814,7 +814,7 @@ class Reporter(InterfaceBase, AbstractContextManager, SetupUploadMixin, AsyncMan
:param str ytitle: optional y-axis title
:param xlabels: optional label per column of the matrix
:param ylabels: optional label per row of the matrix
:param bool yaxis_reversed: If False 0,0 is at the bottom left corner. If True 0,0 is at the Top left corner
:param bool yaxis_reversed: If False, 0,0 is at the bottom left corner. If True, 0,0 is at the top left corner
:param comment: comment underneath the title
:param layout_config: optional dictionary for layout configuration, passed directly to plotly
:type layout_config: dict or None

View File

@ -457,7 +457,7 @@ class Model(IdObjectBase, AsyncManagerMixin, _StorageUriMixin):
:param child: Should the new model be a child of this model (default True)
:param tags: Optional tags for the cloned model
:param task: Creating Task of the Model
:param ready: If True set the true flag for the newly created model
:param ready: If True, set the true flag for the newly created model
:return: The new model's ID
"""
data = self.data

View File

@ -71,7 +71,7 @@ class AccessMixin(object):
return self._get_task_property('execution.parameters')
def get_label_num_description(self):
""" Get a dictionary of label number to a string pairs representing all labels associated with this number
""" Get a dictionary of label number to string pairs representing all labels associated with this number
on the model labels.
"""
model_labels = self._get_task_property('execution.model_labels')

View File

@ -599,7 +599,7 @@ class _Arguments(object):
# type: (bool) -> Union[Type, Tuple[str]]
"""
Return the basic types supported by Argument casting
:param as_str: if True return string cast of the types
:param as_str: if True, return string cast of the types
:return: List of type objects supported for auto casting (serializing to string)
"""
supported_types = (int, float, bool, str, list, tuple, Enum)

View File

@ -84,8 +84,8 @@ class CreateAndPopulate(object):
:param base_task_id: Use a pre-existing task in the system, instead of a local repo/script.
Essentially clones an existing task and overrides arguments/requirements.
:param add_task_init_call: If True, a 'Task.init()' call is added to the script entry point in remote execution.
:param raise_on_missing_entries: If True raise ValueError on missing entries when populating
:param verbose: If True print verbose logging
:param raise_on_missing_entries: If True, raise ValueError on missing entries when populating
:param verbose: If True, print verbose logging
"""
if repo and len(urlparse(repo).scheme) <= 1 and not re.compile(self._VCS_SSH_REGEX).match(repo):
folder = repo
@ -133,7 +133,7 @@ class CreateAndPopulate(object):
"""
Create the new populated Task
:param dry_run: Optional, If True do not create an actual Task, instead return the Task definition as dict
:param dry_run: Optional, If True, do not create an actual Task, instead return the Task definition as dict
:return: newly created Task object
"""
local_entry_file = None
@ -603,7 +603,7 @@ if __name__ == '__main__':
examples: 's3://bucket/folder', 'https://server/' , 'gs://bucket/folder', 'azure://bucket', '/folder/'
:param helper_functions: Optional, a list of helper functions to make available
for the standalone function Task.
:param dry_run: If True do not create the Task, but return a dict of the Task's definitions
:param dry_run: If True, do not create the Task, but return a dict of the Task's definitions
:param _sanitize_function: Sanitization function for the function string.
:param _sanitize_helper_functions: Sanitization function for the helper function string.
:return: Newly created Task object

View File

@ -149,7 +149,7 @@ class Task(IdObjectBase, AccessMixin, SetupUploadMixin):
:param log_to_backend: If True, all calls to the task's log will be logged to the backend using the API.
This value can be overridden using the environment variable TRAINS_LOG_TASK_TO_BACKEND.
:type log_to_backend: bool
:param force_create: If True a new task will always be created (task_id, if provided, will be ignored)
:param force_create: If True, a new task will always be created (task_id, if provided, will be ignored)
:type force_create: bool
"""
SingletonLock.instantiate()
@ -638,7 +638,7 @@ class Task(IdObjectBase, AccessMixin, SetupUploadMixin):
"""
Reset the task. Task will be reloaded following a successful reset.
:param set_started_on_success: If True automatically set Task status to started after resetting it.
:param set_started_on_success: If True, automatically set Task status to started after resetting it.
:param force: If not true, call fails if the task status is 'completed'
"""
self.send(tasks.ResetRequest(task=self.id, force=force))
@ -678,8 +678,8 @@ class Task(IdObjectBase, AccessMixin, SetupUploadMixin):
to Completed (Use this function to close and change status of remotely executed tasks).
To simply change the Task's status to completed, use task.close()
:param bool ignore_errors: If True (default), ignore any errors raised
:param bool force: If True the task status will be changed to `stopped` regardless of the current Task state.
:param bool ignore_errors: If True default), ignore any errors raised
:param bool force: If True, the task status will be changed to `stopped` regardless of the current Task state.
:param str status_message: Optional, add status change message to the stop request.
This message will be stored as status_message on the Task's info panel
"""
@ -737,8 +737,8 @@ class Task(IdObjectBase, AccessMixin, SetupUploadMixin):
:param delete_artifacts_and_models: If True, artifacts and models would also be deleted (default True).
If callback is provided, this argument is ignored.
:param skip_models_used_by_other_tasks: If True, models used by other tasks would not be deleted (default True)
:param raise_on_error: If True an exception will be raised when encountering an error.
If False an error would be printed and no exception will be raised.
:param raise_on_error: If True, an exception will be raised when encountering an error.
If False, an error would be printed and no exception will be raised.
:param callback: An optional callback accepting a uri type (string) and a uri (string) that will be called
for each artifact and model. If provided, the delete_artifacts_and_models is ignored.
Return True to indicate the artifact/model should be deleted or False otherwise.
@ -917,7 +917,7 @@ class Task(IdObjectBase, AccessMixin, SetupUploadMixin):
"""
Update the Task's output model weights file. First, ClearML uploads the file to the preconfigured output
destination (see the Task's ``output.destination`` property or call the ``setup_upload`` method),
then ClearML updates the model object associated with the Task an API call. The API call uses with the URI
then ClearML updates the model object associated with the Task. The API call uses the URI
of the uploaded file, and other values provided by additional arguments.
Notice: A local model file will be uploaded to the task's `output_uri` destination,
@ -1060,7 +1060,7 @@ class Task(IdObjectBase, AccessMixin, SetupUploadMixin):
Notice the returned parameter dict is flat:
i.e. {'Args/param': 'value'} is the argument "param" from section "Args"
:param backwards_compatibility: If True (default) parameters without section name
:param backwards_compatibility: If True (default), parameters without section name
(API version < 2.9, clearml-server < 0.16) will be at dict root level.
If False, parameters without section name, will be nested under "Args/" key.
:param cast: If True, cast the parameter to the original type. Default False,
@ -1514,7 +1514,7 @@ class Task(IdObjectBase, AccessMixin, SetupUploadMixin):
:param list artifact_names: list of artifact names
:param bool raise_on_errors: if True, do not suppress connectivity related exceptions
:param bool delete_from_storage: If True try to delete the actual
:param bool delete_from_storage: If True, try to delete the actual
file from the external storage (e.g. S3, GS, Azure, File Server etc.)
:return: True if successful
@ -1528,7 +1528,7 @@ class Task(IdObjectBase, AccessMixin, SetupUploadMixin):
:param list artifact_names: list of artifact names
:param bool raise_on_errors: if True, do not suppress connectivity related exceptions
:param bool delete_from_storage: If True try to delete the actual
:param bool delete_from_storage: If True, try to delete the actual
file from the external storage (e.g. S3, GS, Azure, File Server etc.)
:return: True if successful
@ -1809,7 +1809,7 @@ class Task(IdObjectBase, AccessMixin, SetupUploadMixin):
"""
Archive the Task or remove it from the archived folder.
:param archive: If True archive the Task, If False make sure it is removed from the archived folder
:param archive: If True, archive the Task. If False, make sure it is removed from the archived folder
"""
with self._edit_lock:
system_tags = list(set(self.get_system_tags()) | {self.archived_tag}) \
@ -1821,7 +1821,7 @@ class Task(IdObjectBase, AccessMixin, SetupUploadMixin):
"""
Return the Archive state of the Task
:return: If True the Task is archived, otherwise it is not.
:return: If True, the Task is archived, otherwise it is not.
"""
return self.archived_tag in self.get_system_tags()
@ -2067,7 +2067,7 @@ class Task(IdObjectBase, AccessMixin, SetupUploadMixin):
def get_reported_single_values(self):
# type: () -> Dict[str, float]
"""
Get all repoted single values as a dictionary, where the keys are the names of the values
Get all reported single values as a dictionary, where the keys are the names of the values
and the values of the dictionary are the actual reported values.
:return: A dict containing the reported values
@ -2075,7 +2075,7 @@ class Task(IdObjectBase, AccessMixin, SetupUploadMixin):
if not Session.check_min_api_version("2.20"):
raise ValueError(
"Current 'clearml-server' does not support getting reported single values. "
"Please upgrade to the lastest version"
"Please upgrade to the latest version"
)
res = self.send(events.GetTaskSingleValueMetricsRequest(tasks=[self.id]))
res = res.wait()

View File

@ -219,8 +219,8 @@ class Artifact(object):
def get_local_copy(self, extract_archive=True, raise_on_error=False, force_download=False):
# type: (bool, bool, bool) -> str
"""
:param bool extract_archive: If True and artifact is of type 'archive' (compressed folder)
The returned path will be a temporary folder containing the archive content
:param bool extract_archive: If True and artifact is of type 'archive' (compressed folder),
the returned path will be a temporary folder containing the archive content
:param bool raise_on_error: If True and the artifact could not be downloaded,
raise ValueError, otherwise return None on failure and output log warning.
:param bool force_download: download file from remote even if exists in local cache

View File

@ -53,7 +53,7 @@ class PatchedMatplotlib:
def force_report_as_image(force):
# type: (bool) -> None
"""
Set force_report_as_image. If True all matplotlib are always converted to images
Set force_report_as_image. If True, all matplotlib are always converted to images.
Otherwise we try to convert them into interactive plotly plots.
:param force: True force
"""

View File

@ -381,8 +381,8 @@ class Dataset(object):
Wildcard matching, can be a single string or a list of wildcards.
:param local_base_folder: files will be located based on their relative path from local_base_folder
:param dataset_path: where in the dataset the folder/files should be located
:param recursive: If True match all wildcard files recursively
:param verbose: If True print to console files added/modified
:param recursive: If True, match all wildcard files recursively
:param verbose: If True, print to console files added/modified
:param max_workers: The number of threads to add the files with. Defaults to the number of logical cores
:return: number of files added
"""
@ -447,8 +447,8 @@ class Dataset(object):
:param dataset_path: The location in the dataset where the file will be downloaded into.
e.g: for source_url='s3://bucket/remote_folder/image.jpg' and dataset_path='s3_files',
'image.jpg' will be downloaded to 's3_files/image.jpg' (relative path to the dataset)
:param recursive: If True match all wildcard files recursively
:param verbose: If True print to console files added/modified
:param recursive: If True, match all wildcard files recursively
:param verbose: If True, print to console files added/modified
:param max_workers: The number of threads to add the external files with. Useful when `source_url` is
a sequence. Defaults to the number of logical cores
:return: Number of file links added
@ -496,8 +496,8 @@ class Dataset(object):
:param dataset_path: Remove files from the dataset.
The path is always relative to the dataset (e.g 'folder/file.bin').
External files can also be removed by their links (e.g. 's3://bucket/file')
:param recursive: If True match all wildcard files recursively
:param verbose: If True print to console files removed
:param recursive: If True, match all wildcard files recursively
:param verbose: If True, print to console files removed
:return: Number of files removed
"""
self._task.get_logger().report_text(
@ -542,14 +542,14 @@ class Dataset(object):
"""
Synchronize the dataset with a local folder. The dataset is synchronized from the
relative_base_folder (default: dataset root) and deeper with the specified local path.
Note that if a remote file is identified in as being modified when syncing, it will
Note that if a remote file is identified as being modified when syncing, it will
be added as a FileEntry, ready to be uploaded to the ClearML server. This version of the
file is considered "newer" and it will be downloaded instead of the one stored at its
remote address when calling Dataset.get_local_copy().
:param local_path: Local folder to sync (assumes all files and recursive)
:param dataset_path: Target dataset path to sync with (default the root of the dataset)
:param verbose: If true print to console files added/modified/removed
:param verbose: If True, print to console files added/modified/removed
:return: number of files removed, number of files modified/added
"""
def filter_f(f):
@ -609,8 +609,8 @@ class Dataset(object):
"""
Start file uploading, the function returns when all files are uploaded.
:param show_progress: If True show upload progress bar
:param verbose: If True print verbose progress report
:param show_progress: If True, show upload progress bar
:param verbose: If True, print verbose progress report
:param output_url: Target storage for the compressed dataset (default: file server)
Examples: `s3://bucket/data`, `gs://bucket/data` , `azure://bucket/data` , `/mnt/share/data`
:param compression: Compression algorithm for the Zipped dataset file (default: ZIP_DEFLATED)
@ -757,11 +757,11 @@ class Dataset(object):
def finalize(self, verbose=False, raise_on_error=True, auto_upload=False):
# type: (bool, bool, bool) -> bool
"""
Finalize the dataset publish dataset Task. upload must first called to verify there are not pending uploads.
Finalize the dataset publish dataset Task. Upload must first be called to verify that there are no pending uploads.
If files do need to be uploaded, it throws an exception (or return False)
:param verbose: If True print verbose progress report
:param raise_on_error: If True raise exception if dataset finalizing failed
:param verbose: If True, print verbose progress report
:param raise_on_error: If True, raise exception if dataset finalizing failed
:param auto_upload: Automatically upload dataset if not called yet, will upload to default location.
"""
# check we do not have files waiting for upload.
@ -849,7 +849,7 @@ class Dataset(object):
Publish the dataset
If dataset is not finalize, throw exception
:param raise_on_error: If True raise exception if dataset publishing failed
:param raise_on_error: If True, raise exception if dataset publishing failed
"""
# check we can publish this dataset
if not self.is_final():
@ -874,19 +874,19 @@ class Dataset(object):
Return a base folder with a read-only (immutable) local copy of the entire dataset
download and copy / soft-link, files from all the parent dataset versions. The dataset needs to be finalized
:param use_soft_links: If True use soft links, default False on windows True on Posix systems
:param use_soft_links: If True, use soft links, default False on windows True on Posix systems
:param part: Optional, if provided only download the selected part (index) of the Dataset.
First part number is `0` and last part is `num_parts-1`
Notice, if `num_parts` is not provided, number of parts will be equal to the total number of chunks
(i.e. sum over all chunks from the specified Dataset including all parent Datasets).
This argument is passed to parent datasets, as well as the implicit `num_parts`,
allowing users to get a partial copy of the entire dataset, for multi node/step processing.
:param num_parts: Optional, If specified normalize the number of chunks stored to the
:param num_parts: Optional, if specified, normalize the number of chunks stored to the
requested number of parts. Notice that the actual chunks used per part are rounded down.
Example: Assuming total 8 chunks for this dataset (including parent datasets),
and `num_parts=5`, the chunk index used per parts would be:
part=0 -> chunks[0,5], part=1 -> chunks[1,6], part=2 -> chunks[2,7], part=3 -> chunks[3, ]
:param raise_on_error: If True raise exception if dataset merging failed on any file
:param raise_on_error: If True, raise exception if dataset merging failed on any file
:param max_workers: Number of threads to be spawned when getting the dataset copy. Defaults
to the number of logical cores.
@ -926,12 +926,12 @@ class Dataset(object):
(i.e. sum over all chunks from the specified Dataset including all parent Datasets).
This argument is passed to parent datasets, as well as the implicit `num_parts`,
allowing users to get a partial copy of the entire dataset, for multi node/step processing.
:param num_parts: Optional, If specified normalize the number of chunks stored to the
:param num_parts: Optional, if specified, normalize the number of chunks stored to the
requested number of parts. Notice that the actual chunks used per part are rounded down.
Example: Assuming total 8 chunks for this dataset (including parent datasets),
and `num_parts=5`, the chunk index used per parts would be:
part=0 -> chunks[0,5], part=1 -> chunks[1,6], part=2 -> chunks[2,7], part=3 -> chunks[3, ]
:param raise_on_error: If True raise exception if dataset merging failed on any file
:param raise_on_error: If True, raise exception if dataset merging failed on any file
:param max_workers: Number of threads to be spawned when getting the dataset copy. Defaults
to the number of logical cores.
@ -966,7 +966,7 @@ class Dataset(object):
:param dataset_path: Only match files matching the dataset_path (including wildcards).
Example: 'folder/sub/*.json'
:param recursive: If True (default) matching dataset_path recursively
:param recursive: If True (default), matching dataset_path recursively
:param dataset_id: Filter list based on the dataset id containing the latest version of the file.
Default: None, do not filter files based on parent dataset.
@ -1109,7 +1109,7 @@ class Dataset(object):
:param local_copy_path: Specify local path containing a copy of the dataset,
If not provide use the cached folder
:param skip_hash: If True, skip hash checks and verify file size only
:param verbose: If True print errors while testing dataset files hash
:param verbose: If True, print errors while testing dataset files hash
:return: List of files with unmatched hashes
"""
local_path = local_copy_path or self.get_local_copy()
@ -1354,7 +1354,7 @@ class Dataset(object):
:param dataset_version: The version of the corresponding dataset. If set to `None` (default),
then get the dataset with the latest version
:param entire_dataset: If True, get all datasets that match the given `dataset_project`,
`dataset_name`, `dataset_version`. Note that `force` has to be True if this paramer is True
`dataset_name`, `dataset_version`. Note that `force` has to be True if this parameter is True
:param action: Corresponding action, used for logging/building error texts
:param shallow_search: If True, search only the first 500 results (first page)
@ -1417,9 +1417,9 @@ class Dataset(object):
:param dataset_name: The name of the dataset(s) to be deleted
:param force: If True, deleted the dataset(s) even when being used. Also required to be set to
True when `entire_dataset` is set.
:param dataset_version: The version of the dataset(s) to be deletedd
:param dataset_version: The version of the dataset(s) to be deleted
:param entire_dataset: If True, delete all datasets that match the given `dataset_project`,
`dataset_name`, `dataset_version`. Note that `force` has to be True if this paramer is True
`dataset_name`, `dataset_version`. Note that `force` has to be True if this parameter is True
:param shallow_search: If True, search only the first 500 results (first page)
"""
if not any([dataset_id, dataset_project, dataset_name]):
@ -1518,7 +1518,7 @@ class Dataset(object):
):
# type: (...) -> ()
"""
Move the dataset to a another project.
Move the dataset to another project.
:param new_dataset_project: New project to move the dataset(s) to
:param dataset_project: Project of the dataset(s) to move to new project
@ -1574,7 +1574,7 @@ class Dataset(object):
# type: (...) -> "Dataset"
"""
Get a specific Dataset. If multiple datasets are found, the dataset with the
highest semantic version is returned. If no semantic version if found, the most recently
highest semantic version is returned. If no semantic version is found, the most recently
updated dataset is returned. This functions raises an Exception in case no dataset
can be found and the ``auto_create=True`` flag is not set
@ -1587,7 +1587,7 @@ class Dataset(object):
:param include_archived: Include archived tasks and datasets also
:param auto_create: Create a new dataset if it does not exist yet
:param writable_copy: Get a newly created mutable dataset with the current one as its parent,
so new files can added to the instance.
so new files can be added to the instance.
:param dataset_version: Requested version of the Dataset
:param alias: Alias of the dataset. If set, the 'alias : dataset ID' key-value pair
will be set under the hyperparameters section 'Datasets'
@ -1831,7 +1831,7 @@ class Dataset(object):
:param partial_name: Specify partial match to a dataset name
:param tags: Specify user tags
:param ids: List specific dataset based on IDs list
:param only_completed: If False return dataset that are still in progress (uploading/edited etc.)
:param only_completed: If False return datasets that are still in progress (uploading/edited etc.)
:param recursive_project_search: If True and the `dataset_project` argument is set,
search inside subprojects as well.
If False, don't search inside subprojects (except for the special `.datasets` subproject)
@ -1896,8 +1896,8 @@ class Dataset(object):
Wildcard matching, can be a single string or a list of wildcards)
:param local_base_folder: files will be located based on their relative path from local_base_folder
:param dataset_path: where in the dataset the folder/files should be located
:param recursive: If True match all wildcard files recursively
:param verbose: If True print to console added files
:param recursive: If True, match all wildcard files recursively
:param verbose: If True, print to console added files
:param max_workers: The number of threads to add the files with. Defaults to the number of logical cores
"""
max_workers = max_workers or psutil.cpu_count()
@ -2138,13 +2138,13 @@ class Dataset(object):
it won't be downloaded from the remote storage unless it is added again using
Dataset.add_external_files().
:param force: If True extract dataset content even if target folder exists and is not empty
:param force: If True, extract dataset content even if target folder exists and is not empty
:param selected_chunks: Optional, if provided only download the selected chunks (index) of the Dataset.
Example: Assuming 8 chunks on this version
selected_chunks=[0,1,2]
:param lock_target_folder: If True, local the target folder so the next cleanup will not delete
Notice you should unlock it manually, or wait for the process to finish for auto unlocking.
:param cleanup_target_folder: If True remove target folder recursively
:param cleanup_target_folder: If True, remove target folder recursively
:param target_folder: If provided use the specified target folder, default, auto generate from Dataset ID.
:param max_workers: Number of threads to be spawned when getting dataset files. Defaults
to the number of virtual cores.
@ -2239,7 +2239,7 @@ class Dataset(object):
Download the dataset archive, and extract the zip content to a cached folder.
Notice no merging is done.
:param force: If True extract dataset content even if target folder exists and is not empty
:param force: If True, extract dataset content even if target folder exists and is not empty
:param selected_chunks: Optional, if provided only download the selected chunks (index) of the Dataset.
Example: Assuming 8 chunks on this version
selected_chunks=[0,1,2]
@ -2359,7 +2359,7 @@ class Dataset(object):
Notice, if `num_parts` is not provided, number of parts will be equal to the number of chunks.
This argument is passed to parent versions, as well as the implicit `num_parts`,
allowing users to get a partial copy of the entire dataset, for multi node/step processing.
:param num_parts: Optional, If specified normalize the number of chunks stored to the
:param num_parts: Optional, if specified, normalize the number of chunks stored to the
requested number of parts. Notice that the actual chunks used per part are rounded down.
Example: Assuming 8 chunks on this version, and `num_parts=5`, the chunk index used per parts would be:
part=0 -> chunks[0,5], part=1 -> chunks[1,6], part=2 -> chunks[2,7], part=3 -> chunks[3, ]

View File

@ -207,7 +207,7 @@ class Logger(object):
logger.report_vector(title='vector example', series='vector series', values=vector_series, iteration=0,
labels=['A','B'], xaxis='X axis label', yaxis='Y axis label')
You can view the vectors plots in the **ClearML Web-App (UI)**, **RESULTS** tab, **PLOTS** sub-tab.
You can view the vectors plot in the **ClearML Web-App (UI)**, **RESULTS** tab, **PLOTS** sub-tab.
:param title: The title (metric) of the plot.
:param series: The series name (variant) of the reported histogram.
@ -647,7 +647,7 @@ class Logger(object):
:param str yaxis: The y-axis title. (Optional)
:param list(str) xlabels: Labels for each column of the matrix. (Optional)
:param list(str) ylabels: Labels for each row of the matrix. (Optional)
:param bool yaxis_reversed: If False 0,0 is at the bottom left corner. If True 0,0 is at the Top left corner
:param bool yaxis_reversed: If False 0,0 is at the bottom left corner. If True, 0,0 is at the top left corner
:param str comment: A comment displayed with the plot, underneath the title.
:param dict extra_layout: optional dictionary for layout configuration, passed directly to plotly
See full details on the supported configuration: https://plotly.com/javascript/reference/heatmap/
@ -702,7 +702,7 @@ class Logger(object):
:param str yaxis: The y-axis title. (Optional)
:param list(str) xlabels: Labels for each column of the matrix. (Optional)
:param list(str) ylabels: Labels for each row of the matrix. (Optional)
:param bool yaxis_reversed: If False 0,0 is at the bottom left corner. If True 0,0 is at the Top left corner
:param bool yaxis_reversed: If False, 0,0 is at the bottom left corner. If True, 0,0 is at the top left corner
:param dict extra_layout: optional dictionary for layout configuration, passed directly to plotly
See full details on the supported configuration: https://plotly.com/javascript/reference/heatmap/
example: extra_layout={'xaxis': {'type': 'date', 'range': ['2020-01-01', '2020-01-31']}}
@ -914,12 +914,12 @@ class Logger(object):
:param str title: The title (metric) of the media.
:param str series: The series name (variant) of the reported media.
:param int iteration: The reported iteration / step.
:param str local_path: A path to an media file.
:param str local_path: A path to a media file.
:param stream: BytesIO stream to upload. If provided, ``file_extension`` must also be provided.
:param str url: A URL to the location of a pre-uploaded media.
:param file_extension: A file extension to use when ``stream`` is passed.
:param int max_history: The maximum number of media files to store per metric/variant combination
use negative value for unlimited. default is set in global configuration (default=5)
:param int max_history: The maximum number of media files to store per metric/variant combination.
Use negative value for unlimited. Default is set in global configuration (default=5)
:param bool delete_after_upload: After the file is uploaded, delete the local copy
- ``True`` - Delete
@ -984,7 +984,7 @@ class Logger(object):
:param str title: The title (metric) of the plot.
:param str series: The series name (variant) of the reported plot.
:param int iteration: The reported iteration / step.
:param dict figure: A ``plotly`` Figure object or a ``poltly`` dictionary
:param dict figure: A ``plotly`` Figure object or a ``plotly`` dictionary
"""
# if task was not started, we have to start it
self._start_task_if_needed()
@ -1023,11 +1023,11 @@ class Logger(object):
:param str series: The series name (variant) of the reported plot.
:param int iteration: The reported iteration / step.
:param MatplotlibFigure figure: A ``matplotlib`` Figure object
:param report_image: Default False. If True the plot will be uploaded as a debug sample (png image),
:param report_image: Default False. If True, the plot will be uploaded as a debug sample (png image),
and will appear under the debug samples tab (instead of the Plots tab).
:param report_interactive: If True (default) it will try to convert the matplotlib into interactive
plot in the UI. If False the matplotlib is saved as is and will
be non-interactive (with the exception of zooming in/out)
:param report_interactive: If True (default), it will try to convert the matplotlib into interactive
plot in the UI. If False, the matplotlib is saved as is and will
be non-interactive (except zooming in/out)
"""
# if task was not started, we have to start it
self._start_task_if_needed()
@ -1239,11 +1239,11 @@ class Logger(object):
def matplotlib_force_report_non_interactive(cls, force):
# type: (bool) -> None
"""
If True all matplotlib are always converted to non interactive static plots (images), appearing in under
If True, all matplotlib are always converted to non interactive static plots (images), appearing in under
the Plots section. If False (default), matplotlib figures are converted into interactive web UI plotly
figures, in case figure conversion fails, it defaults to non-interactive plots.
:param force: If True all matplotlib figures are converted automatically to non-interactive plots.
:param force: If True, all matplotlib figures are converted automatically to non-interactive plots.
"""
from clearml.backend_interface.metrics import Reporter
Reporter.matplotlib_force_report_non_interactive(force=force)

View File

@ -334,7 +334,7 @@ class BaseModel(object):
"""
Download the base model and return the locally stored filename.
:param bool raise_on_error: If True and the artifact could not be downloaded,
:param bool raise_on_error: If True, and the artifact could not be downloaded,
raise ValueError, otherwise return None on failure and output log warning.
:return: The locally stored file.
@ -353,7 +353,7 @@ class BaseModel(object):
- ``True`` - Download the model weights into a temporary directory, and return the temporary directory path.
- ``False`` - Return a list of the locally stored filenames. (Default)
:param bool raise_on_error: If True and the artifact could not be downloaded,
:param bool raise_on_error: If True, and the artifact could not be downloaded,
raise ValueError, otherwise return None on failure and output log warning.
:return: The model weights, or a list of the locally stored filenames.
@ -584,12 +584,12 @@ class Model(BaseModel):
"""
Retrieve a valid link to the model file(s).
If the model URL is a file system link, it will be returned directly.
If the model URL is points to a remote location (http/s3/gs etc.),
If the model URL points to a remote location (http/s3/gs etc.),
it will download the file(s) and return the temporary location of the downloaded model.
:param bool extract_archive: If True and the model is of type 'packaged' (e.g. TensorFlow compressed folder)
:param bool extract_archive: If True, and the model is of type 'packaged' (e.g. TensorFlow compressed folder)
The returned path will be a temporary folder containing the archive content
:param bool raise_on_error: If True and the artifact could not be downloaded,
:param bool raise_on_error: If True, and the artifact could not be downloaded,
raise ValueError, otherwise return None on failure and output log warning.
:return: A local path to the model (or a downloaded copy of it).
@ -636,8 +636,8 @@ class Model(BaseModel):
:param model_name: Optional Model name as shown in the model artifactory
:param tags: Optional filter models based on list of tags, example: ['production', 'verified', '-qa']
Notice use '-' prefix to filter out tags.
:param only_published: If True only return published models.
:param include_archived: If True return archived models.
:param only_published: If True, only return published models.
:param include_archived: If True, return archived models.
:param max_results: Optional return the last X models,
sorted by last update time (from the most recent to the least).
:param metadata: Filter based on metadata. This parameter is a dictionary. Notice that the type of the
@ -707,7 +707,7 @@ class Model(BaseModel):
Optional, delete the model weights file from the remote storage.
:param model: Model ID or Model object to remove
:param delete_weights_file: If True (default) delete the weights file from the remote storage
:param delete_weights_file: If True (default), delete the weights file from the remote storage
:param force: If True, remove model even if other Tasks are using this model. default False.
:param raise_on_errors: If True, throw ValueError if something went wrong, default False.
:return: True if Model was removed successfully
@ -767,8 +767,8 @@ class Model(BaseModel):
class InputModel(Model):
"""
Load an existing model in the system, search by model id.
The Model will be read-only and can be used to pre initialize a network
Load an existing model in the system, search by model ID.
The Model will be read-only and can be used to pre initialize a network.
We can connect the model to a task as input model, then when running remotely override it with the UI.
"""
@ -792,7 +792,7 @@ class InputModel(Model):
):
# type: (...) -> InputModel
"""
Create an InputModel object from a pre-trained model by specifying the URL of an initial weight files.
Create an InputModel object from a pre-trained model by specifying the URL of an initial weight file.
Optionally, input a configuration, label enumeration, name for the model, tags describing the model,
comment as a description of the model, indicate whether the model is a package, specify the model's
framework, and indicate whether to immediately set the model's status to ``Published``.
@ -1056,7 +1056,7 @@ class InputModel(Model):
:param name: Model name to search and load
:param project: Model project name to search model in
:param tags: Model tags list to filter by
:param only_published: If True filter out non-published (draft) models
:param only_published: If True, filter out non-published (draft) models
"""
if not model_id:
found_models = self.query_models(
@ -1391,7 +1391,7 @@ class OutputModel(BaseModel):
Set the URI of the storage destination for uploaded model weight files.
Supported storage destinations include S3, Google Cloud Storage), and file locations.
Using this method, files uploads are separate and then a link to each is stored in the model object.
Using this method, file uploads are separate and then a link to each is stored in the model object.
.. note::
For storage requiring credentials, the credentials are stored in the ClearML configuration file,

View File

@ -273,7 +273,7 @@ class StorageHelper(object):
Download a file from remote URL to a local storage, and return path to local copy,
:param remote_url: Remote URL. Example: https://example.com/file.jpg s3://bucket/folder/file.mp4 etc.
:param skip_zero_size_check: If True no error will be raised for files with zero bytes size.
:param skip_zero_size_check: If True, no error will be raised for files with zero bytes size.
:return: Path to local copy of the downloaded file. None if error occurred.
"""
helper = cls.get(remote_url)
@ -641,7 +641,7 @@ class StorageHelper(object):
def get_object_metadata(self, obj):
# type: (object) -> dict
"""
Get the metadata of the a remote object.
Get the metadata of the remote object.
The metadata is a dict containing the following keys: `name`, `size`.
:param object obj: The remote object
@ -1008,8 +1008,8 @@ class StorageHelper(object):
:param remote_url: Remote URL. Example: https://example.com/image.jpg or s3://bucket/folder/file.mp4 etc.
:param local_path: target location for downloaded file. Example: /tmp/image.jpg
:param overwrite_existing: If True and local_path exists, it will overwrite it, otherwise print warning
:param skip_zero_size_check: If True no error will be raised for files with zero bytes size.
:param overwrite_existing: If True, and local_path exists, it will overwrite it, otherwise print warning
:param skip_zero_size_check: If True, no error will be raised for files with zero bytes size.
:return: local_path if download was successful.
"""
helper = cls.get(remote_url)

View File

@ -41,7 +41,7 @@ class StorageManager(object):
:param str remote_url: remote url link (string)
:param str cache_context: Optional caching context identifier (string), default context 'global'
:param bool extract_archive: if True returned path will be a cached folder containing the archive's content,
:param bool extract_archive: if True, returned path will be a cached folder containing the archive's content,
currently only zip files are supported.
:param str name: name of the target file
:param bool force_download: download file from remote even if exists in local cache
@ -61,7 +61,7 @@ class StorageManager(object):
cls, local_file, remote_url, wait_for_upload=True, retries=None
): # type: (str, str, bool, Optional[int]) -> str
"""
Upload a local file to a remote location. remote url is the finale destination of the uploaded file.
Upload a local file to a remote location. remote url is the final destination of the uploaded file.
Examples:
@ -284,9 +284,9 @@ class StorageManager(object):
be created under the target local_folder. Supports S3/GS/Azure and shared filesystem.
Example: 's3://bucket/data/'
:param bool overwrite: If False, and target files exist do not download.
If True always download the remote files. Default False.
:param bool skip_zero_size_check: If True no error will be raised for files with zero bytes size.
:param bool silence_errors: If True, silence errors that might pop up when trying to downlaod
If True, always download the remote files. Default False.
:param bool skip_zero_size_check: If True, no error will be raised for files with zero bytes size.
:param bool silence_errors: If True, silence errors that might pop up when trying to download
files stored remotely. Default False
:return: Path to downloaded file or None on error
@ -372,9 +372,9 @@ class StorageManager(object):
:param match_wildcard: If specified only download files matching the `match_wildcard`
Example: `*.json`
:param bool overwrite: If False, and target files exist do not download.
If True always download the remote files. Default False.
:param bool skip_zero_size_check: If True no error will be raised for files with zero bytes size.
:param bool silence_errors: If True, silence errors that might pop up when trying to downlaod
If True, always download the remote files. Default False.
:param bool skip_zero_size_check: If True, no error will be raised for files with zero bytes size.
:param bool silence_errors: If True, silence errors that might pop up when trying to download
files stored remotely. Default False
:return: Target local folder
@ -466,7 +466,7 @@ class StorageManager(object):
def get_metadata(cls, remote_url, return_full_path=False):
# type: (str, bool) -> Optional[dict]
"""
Get the metadata of the a remote object.
Get the metadata of the remote object.
The metadata is a dict containing the following keys: `name`, `size`.
:param str remote_url: Source remote storage location, tree structure of `remote_url` will

View File

@ -339,7 +339,7 @@ class Task(_Task):
You can specify a Task ID to be used with `reuse_last_task_id='task_id_here'`
:param str output_uri: The default location for output models and other artifacts.
If True is passed, the default files_server will be used for model storage.
If True, the default files_server will be used for model storage.
In the default location, ClearML creates a subfolder for the output.
The subfolder structure is the following:
<output destination name> / <project name> / <task name>.<Task ID>
@ -360,7 +360,7 @@ class Task(_Task):
Reference" section.
:param auto_connect_arg_parser: Automatically connect an argparse object to the Task. Supported argument
parsers packages are: argparse, click, python-fire, jsonargparse.
parser packages are: argparse, click, python-fire, jsonargparse.
The values are:
@ -932,7 +932,7 @@ class Task(_Task):
:param list tags: Filter based on the requested list of tags (strings) (Task must have at least one of the
listed tags). To exclude a tag add "-" prefix to the tag. Example: ["best", "-debug"]
:param bool allow_archived: Only applicable if *not* using specific ``task_id``,
If True (default) allow to return archived Tasks, if False filter out archived Tasks
If True (default), allow to return archived Tasks, if False filter out archived Tasks
:param bool task_filter: Only applicable if *not* using specific ``task_id``,
Pass additional query filters, on top of project/name. See details in Task.get_tasks.
@ -984,7 +984,7 @@ class Task(_Task):
If None is passed, returns all tasks within the project
:param list tags: Filter based on the requested list of tags (strings) (Task must have all the listed tags)
To exclude a tag add "-" prefix to the tag. Example: ["best", "-debug"]
:param bool allow_archived: If True (default) allow to return archived Tasks, if False filter out archived Tasks
:param bool allow_archived: If True (default), allow to return archived Tasks, if False filter out archived Tasks
:param dict task_filter: filter and order Tasks. See service.tasks.GetAllRequest for details
`parent`: (str) filter by parent task-id matching
`search_text`: (str) free text search (in task fields comment/name/id)
@ -1159,8 +1159,8 @@ class Task(_Task):
Read-only dictionary of the Task's loaded/stored models.
:return: A dictionary-like object with "input"/"output" keys and input/output properties, pointing to a
list-like object containing of Model objects. Each list-like object also acts as a dictionary, mapping
model name to a appropriate model instance.
list-like object containing Model objects. Each list-like object also acts as a dictionary, mapping
model name to an appropriate model instance.
Get input/output models:
@ -1371,7 +1371,7 @@ class Task(_Task):
- ``status_message`` - Information about the status.
- ``status_changed`` - The last status change date and time in ISO 8601 format.
- ``last_update`` - The last time the Task was created, updated,
changed or events for this task were reported.
changed, or events for this task were reported.
- ``execution.queue`` - The Id of the queue where the Task is enqueued. ``null`` indicates not enqueued.
- ``updated`` - The number of Tasks updated (an integer or ``null``).
@ -1693,7 +1693,7 @@ class Task(_Task):
"""
Manually mark a Task as started (happens automatically)
:param bool force: If True the task status will be changed to `started` regardless of the current Task state.
:param bool force: If True, the task status will be changed to `started` regardless of the current Task state.
"""
# UI won't let us see metrics if we're not started
self.started(force=force)
@ -1704,7 +1704,7 @@ class Task(_Task):
"""
Manually mark a Task as stopped (also used in :meth:`_at_exit`)
:param bool force: If True the task status will be changed to `stopped` regardless of the current Task state.
:param bool force: If True, the task status will be changed to `stopped` regardless of the current Task state.
:param str status_message: Optional, add status change message to the stop request.
This message will be stored as status_message on the Task's info panel
"""
@ -1825,7 +1825,7 @@ class Task(_Task):
:param delete_artifacts_and_models: If True, artifacts and models would also be deleted (default True).
If callback is provided, this argument is ignored.
:param skip_models_used_by_other_tasks: If True, models used by other tasks would not be deleted (default True)
:param raise_on_error: If True an exception will be raised when encountering an error.
:param raise_on_error: If True, an exception will be raised when encountering an error.
If False an error would be printed and no exception will be raised.
:param callback: An optional callback accepting a uri type (string) and a uri (string) that will be called
for each artifact and model. If provided, the delete_artifacts_and_models is ignored.
@ -1943,7 +1943,7 @@ class Task(_Task):
- ``False`` - Do not delete. (default)
:param bool auto_pickle: If True (default) and the artifact_object is not one of the following types:
pathlib2.Path, dict, pandas.DataFrame, numpy.ndarray, PIL.Image, url (string), local_file (string)
pathlib2.Path, dict, pandas.DataFrame, numpy.ndarray, PIL.Image, url (string), local_file (string),
the artifact_object will be pickled and uploaded as pickle file artifact (with file extension .pkl)
:param Any preview: The artifact preview
@ -1962,7 +1962,7 @@ class Task(_Task):
- In case the ``serialization_function`` argument is set - any extension is supported
:param Callable[Any, Union[bytes, bytearray]] serialization_function: A serialization function that takes one
parameter of any types which is the object to be serialized. The function should return
parameter of any type which is the object to be serialized. The function should return
a `bytes` or `bytearray` object, which represents the serialized object. Note that the object will be
immediately serialized using this function, thus other serialization methods will not be used
(e.g. `pandas.DataFrame.to_csv`), even if possible. To deserialize this artifact when getting
@ -2008,12 +2008,12 @@ class Task(_Task):
"""
Return a dictionary with {'input': [], 'output': []} loaded/stored models of the current Task
Input models are files loaded in the task, either manually or automatically logged
Output models are files stored in the task, either manually or automatically logged
Output models are files stored in the task, either manually or automatically logged.
Automatically logged frameworks are for example: TensorFlow, Keras, PyTorch, ScikitLearn(joblib) etc.
:return: A dictionary-like object with "input"/"output" keys and input/output properties, pointing to a
list-like object containing of Model objects. Each list-like object also acts as a dictionary, mapping
model name to a appropriate model instance.
list-like object containing Model objects. Each list-like object also acts as a dictionary, mapping
model name to an appropriate model instance.
Example:
@ -2931,7 +2931,7 @@ class Task(_Task):
:param str files_host: The file server url. For example, ``host='http://localhost:8081'``
:param str key: The user key (in the key/secret pair). For example, ``key='thisisakey123'``
:param str secret: The user secret (in the key/secret pair). For example, ``secret='thisisseceret123'``
:param bool store_conf_file: If True store the current configuration into the ~/clearml.conf file.
:param bool store_conf_file: If True, store the current configuration into the ~/clearml.conf file.
If the configuration file exists, no change will be made (outputs a warning).
Not applicable when running remotely (i.e. clearml-agent).
"""
@ -2984,7 +2984,7 @@ class Task(_Task):
:param task_id: Task ID to simulate, notice that all configuration will be taken from the specified
Task, regardless of the code initial values, just like it as if executed by ClearML agent
:param reset_task: If True target Task, is automatically cleared / reset.
:param reset_task: If True, target Task, is automatically cleared / reset.
"""
# if we are already running remotely, do nothing

View File

@ -36,7 +36,7 @@ def get_filename_from_file_object(file_object, flush=False, analyze_file_handle=
Return a string of the file location, extracted from any file object
:param file_object: str, file, stream, FileIO etc.
:param flush: If True, flush file object before returning (default: False)
:param analyze_file_handle: If True try to retrieve filename from file handler object (default: False)
:param analyze_file_handle: If True, try to retrieve filename from file handler object (default: False)
:return: string full path of file location or None if filename cannot be extract
"""
if isinstance(file_object, six.string_types):

View File

@ -215,7 +215,7 @@ class ParallelZipper(object):
Else, a fresh ParallelZipper.ZipperObject will be inserted
:param zipper_results: Queue that holds ParallelZipper.ZipperObject instances. These instances
are added to this queue when chunk_size is exceeded
:param allow_zip_64: if True ZipFile will create files with ZIP64 extensions when
:param allow_zip_64: if True, ZipFile will create files with ZIP64 extensions when
needed, otherwise it will raise an exception when this would be necessary
:param compression: ZipFile.ZIP_STORED (no compression), ZipFile.ZIP_DEFLATED (requires zlib),
ZipFile.ZIP_BZIP2 (requires bz2) or ZipFile.ZIP_LZMA (requires lzma).
@ -352,7 +352,7 @@ class ParallelZipper(object):
:param chunk_size: Chunk size, in MB. The ParallelZipper will try its best not to exceed this size,
but that is not guaranteed
:param max_workers: The maximum number of workers spawned when zipping the files
:param allow_zip_64: if True ZipFile will create files with ZIP64 extensions when
:param allow_zip_64: if True, ZipFile will create files with ZIP64 extensions when
needed, otherwise it will raise an exception when this would be necessary
:param compression: ZipFile.ZIP_STORED (no compression), ZipFile.ZIP_DEFLATED (requires zlib),
ZipFile.ZIP_BZIP2 (requires bz2) or ZipFile.ZIP_LZMA (requires lzma).

View File

@ -6,7 +6,7 @@
"metadata": {},
"outputs": [],
"source": [
"# execute this in command line on all machines to be used as workers before initiating the hyperparamer search \n",
"# execute this in command line on all machines to be used as workers before initiating the hyperparameter search \n",
"# ! pip install -U clearml-agent==0.15.0\n",
"# ! clearml-agent daemon --queue default\n",
"\n",