diff --git a/Makefile b/Makefile index 296aa057064..bb540e33a5c 100755 --- a/Makefile +++ b/Makefile @@ -172,6 +172,9 @@ pytest: prepare-pytest prepare-pytest-testdata pytest ./test/unit/v1beta1/suggestion --ignore=./test/unit/v1beta1/suggestion/test_skopt_service.py pytest ./test/unit/v1beta1/earlystopping pytest ./test/unit/v1beta1/metricscollector + cp ./pkg/apis/manager/v1beta1/python/api_pb2.py ./sdk/python/v1beta1/kubeflow/katib/katib_api_pb2.py + pytest ./sdk/python/v1beta1/kubeflow/katib + rm ./sdk/python/v1beta1/kubeflow/katib/katib_api_pb2.py # The skopt service doesn't work appropriately with Python 3.11. # So, we need to run the test with Python 3.9. diff --git a/sdk/python/v1beta1/kubeflow/katib/api/katib_client.py b/sdk/python/v1beta1/kubeflow/katib/api/katib_client.py index f6876d322e4..7988dbaa898 100644 --- a/sdk/python/v1beta1/kubeflow/katib/api/katib_client.py +++ b/sdk/python/v1beta1/kubeflow/katib/api/katib_client.py @@ -13,6 +13,7 @@ # limitations under the License. import inspect +import logging import multiprocessing import textwrap import time @@ -27,24 +28,35 @@ from kubernetes import client, config +logger = logging.getLogger(__name__) + + class KatibClient(object): def __init__( self, - config_file: str = None, - context: str = None, - client_configuration: client.Configuration = None, + config_file: Optional[str] = None, + context: Optional[str] = None, + client_configuration: Optional[client.Configuration] = None, namespace: str = utils.get_default_target_namespace(), ): - """KatibClient constructor. + """KatibClient constructor. Configure logging in your application + as follows to see detailed information from the KatibClient APIs: + .. code-block:: python + import logging + logging.basicConfig() + log = logging.getLogger("kubeflow.katib.api.katib_client") + log.setLevel(logging.DEBUG) Args: config_file: Path to the kube-config file. Defaults to ~/.kube/config. context: Set the active context. Defaults to current_context from the kube-config. client_configuration: Client configuration for cluster authentication. You have to provide valid configuration with Bearer token or - with username and password. - You can find an example here: https://github.com/kubernetes-client/python/blob/67f9c7a97081b4526470cad53576bc3b71fa6fcc/examples/remote_cluster.py#L31 - namespace: Target Kubernetes namespace. Can be overridden during method invocations. + with username and password. You can find an example here: + https://github.com/kubernetes-client/python/blob/67f9c7a97081b4526470cad53576bc3b71fa6fcc/examples/remote_cluster.py#L31 + namespace: Target Kubernetes namespace. By default it takes namespace + from `/var/run/secrets/kubernetes.io/serviceaccount/namespace` location + or set as `default`. Namespace can be overridden during method invocations. """ self.in_cluster = False @@ -131,8 +143,7 @@ def create_experiment( f"Failed to create Katib Experiment: {namespace}/{experiment_name}" ) - # TODO (andreyvelich): Use proper logger. - print(f"Experiment {namespace}/{experiment_name} has been created") + logger.debug(f"Experiment {namespace}/{experiment_name} has been created") if self._is_ipython(): if self.in_cluster: @@ -248,7 +259,7 @@ def tune( # Create Katib Experiment template. experiment = models.V1beta1Experiment( - api_version=f"{constants.KUBEFLOW_GROUP}/{constants.KATIB_VERSION}", + api_version=constants.API_VERSION, kind=constants.EXPERIMENT_KIND, metadata=models.V1ObjectMeta(name=name, namespace=namespace), spec=models.V1beta1ExperimentSpec(), @@ -743,7 +754,7 @@ def wait_for_experiment_condition( ) ): utils.print_experiment_status(experiment) - print(f"Experiment: {namespace}/{name} is {expected_condition}\n\n\n") + logger.debug(f"Experiment: {namespace}/{name} is {expected_condition}\n\n\n") return experiment # Raise exception if Experiment is Failed. @@ -763,7 +774,7 @@ def wait_for_experiment_condition( ) ): utils.print_experiment_status(experiment) - print(f"Experiment: {namespace}/{name} is {expected_condition}\n\n\n") + logger.debug(f"Experiment: {namespace}/{name} is {expected_condition}\n\n\n") return experiment # Check if Experiment reaches Running condition. @@ -774,7 +785,7 @@ def wait_for_experiment_condition( ) ): utils.print_experiment_status(experiment) - print(f"Experiment: {namespace}/{name} is {expected_condition}\n\n\n") + logger.debug(f"Experiment: {namespace}/{name} is {expected_condition}\n\n\n") return experiment # Check if Experiment reaches Restarting condition. @@ -785,7 +796,7 @@ def wait_for_experiment_condition( ) ): utils.print_experiment_status(experiment) - print(f"Experiment: {namespace}/{name} is {expected_condition}\n\n\n") + logger.debug(f"Experiment: {namespace}/{name} is {expected_condition}\n\n\n") return experiment # Check if Experiment reaches Succeeded condition. @@ -796,12 +807,12 @@ def wait_for_experiment_condition( ) ): utils.print_experiment_status(experiment) - print(f"Experiment: {namespace}/{name} is {expected_condition}\n\n\n") + logger.debug(f"Experiment: {namespace}/{name} is {expected_condition}\n\n\n") return experiment # Otherwise, print the current Experiment results and sleep for the pooling interval. utils.print_experiment_status(experiment) - print( + logger.debug( f"Waiting for Experiment: {namespace}/{name} to reach {expected_condition} condition\n\n\n" ) time.sleep(polling_interval) @@ -880,7 +891,7 @@ def edit_experiment_budget( except Exception: raise RuntimeError(f"Failed to edit Katib Experiment: {namespace}/{name}") - print(f"Experiment {namespace}/{name} has been updated") + logger.debug(f"Experiment {namespace}/{name} has been updated") def delete_experiment( self, @@ -919,8 +930,7 @@ def delete_experiment( except Exception: raise RuntimeError(f"Failed to delete Katib Experiment: {namespace}/{name}") - # TODO (andreyvelich): Use proper logger. - print(f"Experiment {namespace}/{name} has been deleted") + logger.debug(f"Experiment {namespace}/{name} has been deleted") def get_suggestion( self, diff --git a/sdk/python/v1beta1/kubeflow/katib/api/katib_client_test.py b/sdk/python/v1beta1/kubeflow/katib/api/katib_client_test.py new file mode 100644 index 00000000000..6d524f68ba9 --- /dev/null +++ b/sdk/python/v1beta1/kubeflow/katib/api/katib_client_test.py @@ -0,0 +1,268 @@ +import multiprocessing +from typing import List, Optional +from unittest.mock import patch, Mock + +import pytest +from kubernetes.client import V1ObjectMeta + +from kubeflow.katib import KatibClient +from kubeflow.katib import V1beta1AlgorithmSpec +from kubeflow.katib import V1beta1Experiment +from kubeflow.katib import V1beta1ExperimentSpec +from kubeflow.katib import V1beta1FeasibleSpace +from kubeflow.katib import V1beta1ObjectiveSpec +from kubeflow.katib import V1beta1ParameterSpec +from kubeflow.katib import V1beta1TrialParameterSpec +from kubeflow.katib import V1beta1TrialTemplate +from kubeflow.katib.constants import constants + +TEST_RESULT_SUCCESS = "success" + + +class ConflictException(Exception): + def __init__(self): + self.status = 409 + + +def create_namespaced_custom_object_response(*args, **kwargs): + if args[2] == "timeout": + raise multiprocessing.TimeoutError() + elif args[2] == "conflict": + raise ConflictException() + elif args[2] == "runtime": + raise Exception() + elif args[2] in ("test", "test-name"): + return {"metadata": {"name": "experiment-mnist-ci-test"}} + elif args[2] == "test-generate-name": + return {"metadata": {"name": "12345-experiment-mnist-ci-test"}} + + +def generate_trial_template() -> V1beta1TrialTemplate: + trial_spec={ + "apiVersion": "batch/v1", + "kind": "Job", + "spec": { + "template": { + "metadata": { + "annotations": { + "sidecar.istio.io/inject": "false" + } + }, + "spec": { + "containers": [ + { + "name": "training-container", + "image": "docker.io/kubeflowkatib/pytorch-mnist-cpu:v0.14.0", + "command": [ + "python3", + "/opt/pytorch-mnist/mnist.py", + "--epochs=1", + "--batch-size=64", + "--lr=${trialParameters.learningRate}", + "--momentum=${trialParameters.momentum}", + ] + } + ], + "restartPolicy": "Never" + } + } + } + } + + return V1beta1TrialTemplate( + primary_container_name="training-container", + trial_parameters=[ + V1beta1TrialParameterSpec( + name="learningRate", + description="Learning rate for the training model", + reference="lr" + ), + V1beta1TrialParameterSpec( + name="momentum", + description="Momentum for the training model", + reference="momentum" + ), + ], + trial_spec=trial_spec + ) + + +def generate_experiment( + metadata: V1ObjectMeta, + algorithm_spec: V1beta1AlgorithmSpec, + objective_spec: V1beta1ObjectiveSpec, + parameters: List[V1beta1ParameterSpec], + trial_template: V1beta1TrialTemplate, +) -> V1beta1Experiment: + return V1beta1Experiment( + api_version=constants.API_VERSION, + kind=constants.EXPERIMENT_KIND, + metadata=metadata, + spec=V1beta1ExperimentSpec( + max_trial_count=3, + parallel_trial_count=2, + max_failed_trial_count=1, + algorithm=algorithm_spec, + objective=objective_spec, + parameters=parameters, + trial_template=trial_template, + ) + ) + + +def create_experiment( + name: Optional[str] = None, + generate_name: Optional[str] = None +) -> V1beta1Experiment: + experiment_namespace = "test" + + if name is not None: + metadata = V1ObjectMeta(name=name, namespace=experiment_namespace) + elif generate_name is not None: + metadata = V1ObjectMeta(generate_name=generate_name, namespace=experiment_namespace) + else: + metadata = V1ObjectMeta(namespace=experiment_namespace) + + algorithm_spec=V1beta1AlgorithmSpec( + algorithm_name="random" + ) + + objective_spec=V1beta1ObjectiveSpec( + type="minimize", + goal= 0.001, + objective_metric_name="loss", + ) + + parameters=[ + V1beta1ParameterSpec( + name="lr", + parameter_type="double", + feasible_space=V1beta1FeasibleSpace( + min="0.01", + max="0.06" + ), + ), + V1beta1ParameterSpec( + name="momentum", + parameter_type="double", + feasible_space=V1beta1FeasibleSpace( + min="0.5", + max="0.9" + ), + ), + ] + + trial_template = generate_trial_template() + + experiment = generate_experiment( + metadata, + algorithm_spec, + objective_spec, + parameters, + trial_template + ) + return experiment + + +test_create_experiment_data = [ + ( + "experiment name and generate_name missing", + {"experiment": create_experiment()}, + ValueError, + ), + ( + "create_namespaced_custom_object timeout error", + { + "experiment": create_experiment(name="experiment-mnist-ci-test"), + "namespace": "timeout", + }, + TimeoutError, + ), + ( + "create_namespaced_custom_object conflict error", + { + "experiment": create_experiment(name="experiment-mnist-ci-test"), + "namespace": "conflict", + }, + Exception, + ), + ( + "create_namespaced_custom_object runtime error", + { + "experiment": create_experiment(name="experiment-mnist-ci-test"), + "namespace": "runtime", + }, + RuntimeError, + ), + ( + "valid flow with experiment type V1beta1Experiment and name", + { + "experiment": create_experiment(name="experiment-mnist-ci-test"), + "namespace": "test-name", + }, + TEST_RESULT_SUCCESS, + ), + ( + "valid flow with experiment type V1beta1Experiment and generate_name", + { + "experiment": create_experiment(generate_name="experiment-mnist-ci-test"), + "namespace": "test-generate-name", + }, + TEST_RESULT_SUCCESS, + ), + ( + "valid flow with experiment JSON and name", + { + "experiment": { + "metadata": { + "name": "experiment-mnist-ci-test", + } + }, + "namespace": "test-name", + }, + TEST_RESULT_SUCCESS, + ), + ( + "valid flow with experiment JSON and generate_name", + { + "experiment": { + "metadata": { + "generate_name": "experiment-mnist-ci-test", + } + }, + "namespace": "test-generate-name", + }, + TEST_RESULT_SUCCESS, + ), +] + + +@pytest.fixture +def katib_client(): + with patch( + "kubernetes.client.CustomObjectsApi", + return_value=Mock( + create_namespaced_custom_object=Mock( + side_effect=create_namespaced_custom_object_response + ) + ), + ), patch( + "kubernetes.config.load_kube_config", + return_value=Mock() + ): + client = KatibClient() + yield client + + +@pytest.mark.parametrize("test_name,kwargs,expected_output", test_create_experiment_data) +def test_create_experiment(katib_client, test_name, kwargs, expected_output): + """ + test create_experiment function of katib client + """ + print("\n\nExecuting test:", test_name) + try: + katib_client.create_experiment(**kwargs) + assert expected_output == TEST_RESULT_SUCCESS + except Exception as e: + assert type(e) is expected_output + print("test execution complete") diff --git a/sdk/python/v1beta1/kubeflow/katib/constants/constants.py b/sdk/python/v1beta1/kubeflow/katib/constants/constants.py index 9af281524cd..74aa30e4b26 100644 --- a/sdk/python/v1beta1/kubeflow/katib/constants/constants.py +++ b/sdk/python/v1beta1/kubeflow/katib/constants/constants.py @@ -22,6 +22,7 @@ # Katib K8S constants KUBEFLOW_GROUP = "kubeflow.org" +API_VERSION = f"{KUBEFLOW_GROUP}/{KATIB_VERSION}" EXPERIMENT_KIND = "Experiment" EXPERIMENT_PLURAL = "experiments" SUGGESTION_PLURAL = "suggestions"