From e75a5570bd0efa4755f2d98a8ffc618fa6c92668 Mon Sep 17 00:00:00 2001 From: Vishal Bollu Date: Tue, 2 Mar 2021 16:46:50 +0000 Subject: [PATCH] Update version to 0.30.0 --- build/build-image.sh | 2 +- build/cli.sh | 2 +- build/push-image.sh | 2 +- dev/registry.sh | 2 +- docs/clients/install.md | 8 ++--- docs/clients/python.md | 2 +- docs/clusters/aws/install.md | 44 ++++++++++++------------ docs/clusters/gcp/install.md | 34 +++++++++--------- docs/workloads/batch/configuration.md | 8 ++--- docs/workloads/batch/predictors.md | 4 +-- docs/workloads/dependencies/images.md | 28 +++++++-------- docs/workloads/realtime/configuration.md | 8 ++--- docs/workloads/realtime/predictors.md | 6 ++-- docs/workloads/task/configuration.md | 2 +- get-cli.sh | 2 +- manager/check_cortex_version.sh | 2 +- manager/debug.sh | 2 +- manager/debug_gcp.sh | 2 +- manager/info.sh | 2 +- manager/info_gcp.sh | 2 +- manager/install.sh | 4 +-- manager/refresh.sh | 2 +- pkg/consts/consts.go | 4 +-- pkg/cortex/client/cortex/client.py | 2 +- pkg/cortex/client/cortex/consts.py | 2 +- pkg/cortex/client/setup.py | 2 +- pkg/cortex/serve/init/bootloader.sh | 2 +- pkg/cortex/serve/setup.py | 2 +- test/e2e/setup.py | 2 +- 29 files changed, 93 insertions(+), 93 deletions(-) diff --git a/build/build-image.sh b/build/build-image.sh index a6d976fa86..89aebdc492 100755 --- a/build/build-image.sh +++ b/build/build-image.sh @@ -19,7 +19,7 @@ set -euo pipefail ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")"/.. >/dev/null && pwd)" -CORTEX_VERSION=master +CORTEX_VERSION=0.30.0 image=$1 diff --git a/build/cli.sh b/build/cli.sh index ab2f8b05e1..33f11cd560 100755 --- a/build/cli.sh +++ b/build/cli.sh @@ -19,7 +19,7 @@ set -euo pipefail ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")"/.. >/dev/null && pwd)" -CORTEX_VERSION=master +CORTEX_VERSION=0.30.0 arg1=${1:-""} upload="false" diff --git a/build/push-image.sh b/build/push-image.sh index 48d4cb1e0b..88b431b097 100755 --- a/build/push-image.sh +++ b/build/push-image.sh @@ -17,7 +17,7 @@ set -euo pipefail -CORTEX_VERSION=master +CORTEX_VERSION=0.30.0 image=$1 diff --git a/dev/registry.sh b/dev/registry.sh index 12d65a6ca0..9cdd658589 100755 --- a/dev/registry.sh +++ b/dev/registry.sh @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -CORTEX_VERSION=master +CORTEX_VERSION=0.30.0 set -eo pipefail diff --git a/docs/clients/install.md b/docs/clients/install.md index d4c0f16444..1429372a2b 100644 --- a/docs/clients/install.md +++ b/docs/clients/install.md @@ -9,10 +9,10 @@ pip install cortex ``` -To install or upgrade to a specific version (e.g. v0.29.0): +To install or upgrade to a specific version (e.g. v0.30.0): ```bash -pip install cortex==0.29.0 +pip install cortex==0.30.0 ``` To upgrade to the latest version: @@ -25,8 +25,8 @@ pip install --upgrade cortex ```bash -# For example to download CLI version 0.29.0 (Note the "v"): -bash -c "$(curl -sS https://raw.githubusercontent.com/cortexlabs/cortex/v0.29.0/get-cli.sh)" +# For example to download CLI version 0.30.0 (Note the "v"): +bash -c "$(curl -sS https://raw.githubusercontent.com/cortexlabs/cortex/v0.30.0/get-cli.sh)" ``` By default, the Cortex CLI is installed at `/usr/local/bin/cortex`. To install the executable elsewhere, export the `CORTEX_INSTALL_PATH` environment variable to your desired location before running the command above. diff --git a/docs/clients/python.md b/docs/clients/python.md index 1b6be845cd..d6b0f18ca9 100644 --- a/docs/clients/python.md +++ b/docs/clients/python.md @@ -88,7 +88,7 @@ Deploy an API. **Arguments**: -- `api_spec` - A dictionary defining a single Cortex API. See https://docs.cortex.dev/v/master/ for schema. +- `api_spec` - A dictionary defining a single Cortex API. See https://docs.cortex.dev/v/0.30/ for schema. - `predictor` - A Cortex Predictor class implementation. Not required for TaskAPI/TrafficSplitter kinds. - `task` - A callable class/function implementation. Not required for RealtimeAPI/BatchAPI/TrafficSplitter kinds. - `requirements` - A list of PyPI dependencies that will be installed before the predictor class implementation is invoked. diff --git a/docs/clusters/aws/install.md b/docs/clusters/aws/install.md index 482e8c3c2a..85322245ec 100644 --- a/docs/clusters/aws/install.md +++ b/docs/clusters/aws/install.md @@ -92,26 +92,26 @@ The docker images used by the Cortex cluster can also be overridden, although th ```yaml -image_operator: quay.io/cortexlabs/operator:master -image_manager: quay.io/cortexlabs/manager:master -image_downloader: quay.io/cortexlabs/downloader:master -image_request_monitor: quay.io/cortexlabs/request-monitor:master -image_cluster_autoscaler: quay.io/cortexlabs/cluster-autoscaler:master -image_metrics_server: quay.io/cortexlabs/metrics-server:master -image_inferentia: quay.io/cortexlabs/inferentia:master -image_neuron_rtd: quay.io/cortexlabs/neuron-rtd:master -image_nvidia: quay.io/cortexlabs/nvidia:master -image_fluent_bit: quay.io/cortexlabs/fluent-bit:master -image_istio_proxy: quay.io/cortexlabs/istio-proxy:master -image_istio_pilot: quay.io/cortexlabs/istio-pilot:master -image_prometheus: quay.io/cortexlabs/prometheus:master -image_prometheus_config_reloader: quay.io/cortexlabs/prometheus-config-reloader:master -image_prometheus_operator: quay.io/cortexlabs/prometheus-operator:master -image_prometheus_statsd_exporter: quay.io/cortexlabs/prometheus-statsd-exporter:master -image_prometheus_dcgm_exporter: quay.io/cortexlabs/prometheus-dcgm-exporter:master -image_prometheus_kube_state_metrics: quay.io/cortexlabs/prometheus-kube-state-metrics:master -image_prometheus_node_exporter: quay.io/cortexlabs/prometheus-node-exporter:master -image_kube_rbac_proxy: quay.io/cortexlabs/kube-rbac-proxy:master -image_grafana: quay.io/cortexlabs/grafana:master -image_event_exporter: quay.io/cortexlabs/event-exporter:master +image_operator: quay.io/cortexlabs/operator:0.30.0 +image_manager: quay.io/cortexlabs/manager:0.30.0 +image_downloader: quay.io/cortexlabs/downloader:0.30.0 +image_request_monitor: quay.io/cortexlabs/request-monitor:0.30.0 +image_cluster_autoscaler: quay.io/cortexlabs/cluster-autoscaler:0.30.0 +image_metrics_server: quay.io/cortexlabs/metrics-server:0.30.0 +image_inferentia: quay.io/cortexlabs/inferentia:0.30.0 +image_neuron_rtd: quay.io/cortexlabs/neuron-rtd:0.30.0 +image_nvidia: quay.io/cortexlabs/nvidia:0.30.0 +image_fluent_bit: quay.io/cortexlabs/fluent-bit:0.30.0 +image_istio_proxy: quay.io/cortexlabs/istio-proxy:0.30.0 +image_istio_pilot: quay.io/cortexlabs/istio-pilot:0.30.0 +image_prometheus: quay.io/cortexlabs/prometheus:0.30.0 +image_prometheus_config_reloader: quay.io/cortexlabs/prometheus-config-reloader:0.30.0 +image_prometheus_operator: quay.io/cortexlabs/prometheus-operator:0.30.0 +image_prometheus_statsd_exporter: quay.io/cortexlabs/prometheus-statsd-exporter:0.30.0 +image_prometheus_dcgm_exporter: quay.io/cortexlabs/prometheus-dcgm-exporter:0.30.0 +image_prometheus_kube_state_metrics: quay.io/cortexlabs/prometheus-kube-state-metrics:0.30.0 +image_prometheus_node_exporter: quay.io/cortexlabs/prometheus-node-exporter:0.30.0 +image_kube_rbac_proxy: quay.io/cortexlabs/kube-rbac-proxy:0.30.0 +image_grafana: quay.io/cortexlabs/grafana:0.30.0 +image_event_exporter: quay.io/cortexlabs/event-exporter:0.30.0 ``` diff --git a/docs/clusters/gcp/install.md b/docs/clusters/gcp/install.md index 18ee459e1d..d5a82183e1 100644 --- a/docs/clusters/gcp/install.md +++ b/docs/clusters/gcp/install.md @@ -71,21 +71,21 @@ The docker images used by the Cortex cluster can also be overridden, although th ```yaml -image_operator: quay.io/cortexlabs/operator:master -image_manager: quay.io/cortexlabs/manager:master -image_downloader: quay.io/cortexlabs/downloader:master -image_request_monitor: quay.io/cortexlabs/request-monitor:master -image_istio_proxy: quay.io/cortexlabs/istio-proxy:master -image_istio_pilot: quay.io/cortexlabs/istio-pilot:master -image_google_pause: quay.io/cortexlabs/google-pause:master -image_prometheus: quay.io/cortexlabs/prometheus:master -image_prometheus_config_reloader: quay.io/cortexlabs/prometheus-config-reloader:master -image_prometheus_operator: quay.io/cortexlabs/prometheus-operator:master -image_prometheus_statsd_exporter: quay.io/cortexlabs/prometheus-statsd-exporter:master -image_prometheus_dcgm_exporter: quay.io/cortexlabs/prometheus-dcgm-exporter:master -image_prometheus_kube_state_metrics: quay.io/cortexlabs/prometheus-kube-state-metrics:master -image_prometheus_node_exporter: quay.io/cortexlabs/prometheus-node-exporter:master -image_kube_rbac_proxy: quay.io/cortexlabs/kube-rbac-proxy:master -image_grafana: quay.io/cortexlabs/grafana:master -image_event_exporter: quay.io/cortexlabs/event-exporter:master +image_operator: quay.io/cortexlabs/operator:0.30.0 +image_manager: quay.io/cortexlabs/manager:0.30.0 +image_downloader: quay.io/cortexlabs/downloader:0.30.0 +image_request_monitor: quay.io/cortexlabs/request-monitor:0.30.0 +image_istio_proxy: quay.io/cortexlabs/istio-proxy:0.30.0 +image_istio_pilot: quay.io/cortexlabs/istio-pilot:0.30.0 +image_google_pause: quay.io/cortexlabs/google-pause:0.30.0 +image_prometheus: quay.io/cortexlabs/prometheus:0.30.0 +image_prometheus_config_reloader: quay.io/cortexlabs/prometheus-config-reloader:0.30.0 +image_prometheus_operator: quay.io/cortexlabs/prometheus-operator:0.30.0 +image_prometheus_statsd_exporter: quay.io/cortexlabs/prometheus-statsd-exporter:0.30.0 +image_prometheus_dcgm_exporter: quay.io/cortexlabs/prometheus-dcgm-exporter:0.30.0 +image_prometheus_kube_state_metrics: quay.io/cortexlabs/prometheus-kube-state-metrics:0.30.0 +image_prometheus_node_exporter: quay.io/cortexlabs/prometheus-node-exporter:0.30.0 +image_kube_rbac_proxy: quay.io/cortexlabs/kube-rbac-proxy:0.30.0 +image_grafana: quay.io/cortexlabs/grafana:0.30.0 +image_event_exporter: quay.io/cortexlabs/event-exporter:0.30.0 ``` diff --git a/docs/workloads/batch/configuration.md b/docs/workloads/batch/configuration.md index b91f2838a1..8d9b1264a3 100644 --- a/docs/workloads/batch/configuration.md +++ b/docs/workloads/batch/configuration.md @@ -19,7 +19,7 @@ predictor: path: # path to a python file with a PythonPredictor class definition, relative to the Cortex root (required) config: # arbitrary dictionary passed to the constructor of the Predictor (can be overridden by config passed in job submission) (optional) python_path: # path to the root of your Python folder that will be appended to PYTHONPATH (default: folder containing cortex.yaml) - image: # docker image to use for the Predictor (default: quay.io/cortexlabs/python-predictor-cpu:master or quay.io/cortexlabs/python-predictor-gpu:master-cuda10.2-cudnn8 based on compute) + image: # docker image to use for the Predictor (default: quay.io/cortexlabs/python-predictor-cpu:0.30.0 or quay.io/cortexlabs/python-predictor-gpu:0.30.0-cuda10.2-cudnn8 based on compute) env: # dictionary of environment variables log_level: # log level that can be "debug", "info", "warning" or "error" (default: "info") shm_size: # size of shared memory (/dev/shm) for sharing data between multiple processes, e.g. 64Mi or 1Gi (default: Null) @@ -49,8 +49,8 @@ predictor: batch_interval: # the maximum amount of time to spend waiting for additional requests before running inference on the batch of requests config: # arbitrary dictionary passed to the constructor of the Predictor (can be overridden by config passed in job submission) (optional) python_path: # path to the root of your Python folder that will be appended to PYTHONPATH (default: folder containing cortex.yaml) - image: # docker image to use for the Predictor (default: quay.io/cortexlabs/tensorflow-predictor:master) - tensorflow_serving_image: # docker image to use for the TensorFlow Serving container (default: quay.io/cortexlabs/tensorflow-serving-cpu:master or quay.io/cortexlabs/tensorflow-serving-gpu:master based on compute) + image: # docker image to use for the Predictor (default: quay.io/cortexlabs/tensorflow-predictor:0.30.0) + tensorflow_serving_image: # docker image to use for the TensorFlow Serving container (default: quay.io/cortexlabs/tensorflow-serving-cpu:0.30.0 or quay.io/cortexlabs/tensorflow-serving-gpu:0.30.0 based on compute) env: # dictionary of environment variables log_level: # log level that can be "debug", "info", "warning" or "error" (default: "info") shm_size: # size of shared memory (/dev/shm) for sharing data between multiple processes, e.g. 64Mi or 1Gi (default: Null) @@ -75,7 +75,7 @@ predictor: ... config: # arbitrary dictionary passed to the constructor of the Predictor (can be overridden by config passed in job submission) (optional) python_path: # path to the root of your Python folder that will be appended to PYTHONPATH (default: folder containing cortex.yaml) - image: # docker image to use for the Predictor (default: quay.io/cortexlabs/onnx-predictor-cpu:master or quay.io/cortexlabs/onnx-predictor-gpu:master based on compute) + image: # docker image to use for the Predictor (default: quay.io/cortexlabs/onnx-predictor-cpu:0.30.0 or quay.io/cortexlabs/onnx-predictor-gpu:0.30.0 based on compute) env: # dictionary of environment variables log_level: # log level that can be "debug", "info", "warning" or "error" (default: "info") shm_size: # size of shared memory (/dev/shm) for sharing data between multiple processes, e.g. 64Mi or 1Gi (default: Null) diff --git a/docs/workloads/batch/predictors.md b/docs/workloads/batch/predictors.md index bf6893722a..9b86801cac 100644 --- a/docs/workloads/batch/predictors.md +++ b/docs/workloads/batch/predictors.md @@ -143,7 +143,7 @@ class TensorFlowPredictor: ``` -Cortex provides a `tensorflow_client` to your Predictor's constructor. `tensorflow_client` is an instance of [TensorFlowClient](https://github.com/cortexlabs/cortex/tree/master/pkg/cortex/serve/cortex_internal/lib/client/tensorflow.py) that manages a connection to a TensorFlow Serving container to make predictions using your model. It should be saved as an instance variable in your Predictor, and your `predict()` function should call `tensorflow_client.predict()` to make an inference with your exported TensorFlow model. Preprocessing of the JSON payload and postprocessing of predictions can be implemented in your `predict()` function as well. +Cortex provides a `tensorflow_client` to your Predictor's constructor. `tensorflow_client` is an instance of [TensorFlowClient](https://github.com/cortexlabs/cortex/tree/0.30/pkg/cortex/serve/cortex_internal/lib/client/tensorflow.py) that manages a connection to a TensorFlow Serving container to make predictions using your model. It should be saved as an instance variable in your Predictor, and your `predict()` function should call `tensorflow_client.predict()` to make an inference with your exported TensorFlow model. Preprocessing of the JSON payload and postprocessing of predictions can be implemented in your `predict()` function as well. When multiple models are defined using the Predictor's `models` field, the `tensorflow_client.predict()` method expects a second argument `model_name` which must hold the name of the model that you want to use for inference (for example: `self.client.predict(payload, "text-generator")`). @@ -204,7 +204,7 @@ class ONNXPredictor: ``` -Cortex provides an `onnx_client` to your Predictor's constructor. `onnx_client` is an instance of [ONNXClient](https://github.com/cortexlabs/cortex/tree/master/pkg/cortex/serve/cortex_internal/lib/client/onnx.py) that manages an ONNX Runtime session to make predictions using your model. It should be saved as an instance variable in your Predictor, and your `predict()` function should call `onnx_client.predict()` to make an inference with your exported ONNX model. Preprocessing of the JSON payload and postprocessing of predictions can be implemented in your `predict()` function as well. +Cortex provides an `onnx_client` to your Predictor's constructor. `onnx_client` is an instance of [ONNXClient](https://github.com/cortexlabs/cortex/tree/0.30/pkg/cortex/serve/cortex_internal/lib/client/onnx.py) that manages an ONNX Runtime session to make predictions using your model. It should be saved as an instance variable in your Predictor, and your `predict()` function should call `onnx_client.predict()` to make an inference with your exported ONNX model. Preprocessing of the JSON payload and postprocessing of predictions can be implemented in your `predict()` function as well. When multiple models are defined using the Predictor's `models` field, the `onnx_client.predict()` method expects a second argument `model_name` which must hold the name of the model that you want to use for inference (for example: `self.client.predict(model_input, "text-generator")`). diff --git a/docs/workloads/dependencies/images.md b/docs/workloads/dependencies/images.md index 00b664703a..7b826b8f7b 100644 --- a/docs/workloads/dependencies/images.md +++ b/docs/workloads/dependencies/images.md @@ -11,19 +11,19 @@ mkdir my-api && cd my-api && touch Dockerfile Cortex's base Docker images are listed below. Depending on the Cortex Predictor and compute type specified in your API configuration, choose one of these images to use as the base for your Docker image: -* Python Predictor (CPU): `quay.io/cortexlabs/python-predictor-cpu:master` +* Python Predictor (CPU): `quay.io/cortexlabs/python-predictor-cpu:0.30.0` * Python Predictor (GPU): choose one of the following: - * `quay.io/cortexlabs/python-predictor-gpu:master-cuda10.0-cudnn7` - * `quay.io/cortexlabs/python-predictor-gpu:master-cuda10.1-cudnn7` - * `quay.io/cortexlabs/python-predictor-gpu:master-cuda10.1-cudnn8` - * `quay.io/cortexlabs/python-predictor-gpu:master-cuda10.2-cudnn7` - * `quay.io/cortexlabs/python-predictor-gpu:master-cuda10.2-cudnn8` - * `quay.io/cortexlabs/python-predictor-gpu:master-cuda11.0-cudnn8` - * `quay.io/cortexlabs/python-predictor-gpu:master-cuda11.1-cudnn8` -* Python Predictor (Inferentia): `quay.io/cortexlabs/python-predictor-inf:master` -* TensorFlow Predictor (CPU, GPU, Inferentia): `quay.io/cortexlabs/tensorflow-predictor:master` -* ONNX Predictor (CPU): `quay.io/cortexlabs/onnx-predictor-cpu:master` -* ONNX Predictor (GPU): `quay.io/cortexlabs/onnx-predictor-gpu:master` + * `quay.io/cortexlabs/python-predictor-gpu:0.30.0-cuda10.0-cudnn7` + * `quay.io/cortexlabs/python-predictor-gpu:0.30.0-cuda10.1-cudnn7` + * `quay.io/cortexlabs/python-predictor-gpu:0.30.0-cuda10.1-cudnn8` + * `quay.io/cortexlabs/python-predictor-gpu:0.30.0-cuda10.2-cudnn7` + * `quay.io/cortexlabs/python-predictor-gpu:0.30.0-cuda10.2-cudnn8` + * `quay.io/cortexlabs/python-predictor-gpu:0.30.0-cuda11.0-cudnn8` + * `quay.io/cortexlabs/python-predictor-gpu:0.30.0-cuda11.1-cudnn8` +* Python Predictor (Inferentia): `quay.io/cortexlabs/python-predictor-inf:0.30.0` +* TensorFlow Predictor (CPU, GPU, Inferentia): `quay.io/cortexlabs/tensorflow-predictor:0.30.0` +* ONNX Predictor (CPU): `quay.io/cortexlabs/onnx-predictor-cpu:0.30.0` +* ONNX Predictor (GPU): `quay.io/cortexlabs/onnx-predictor-gpu:0.30.0` The sample `Dockerfile` below inherits from Cortex's Python CPU serving image, and installs 3 packages. `tree` is a system package and `pandas` and `rdkit` are Python packages. @@ -31,7 +31,7 @@ The sample `Dockerfile` below inherits from Cortex's Python CPU serving image, a ```dockerfile # Dockerfile -FROM quay.io/cortexlabs/python-predictor-cpu:master +FROM quay.io/cortexlabs/python-predictor-cpu:0.30.0 RUN apt-get update \ && apt-get install -y tree \ @@ -49,7 +49,7 @@ If you need to upgrade the Python Runtime version on your image, you can follow ```Dockerfile # Dockerfile -FROM quay.io/cortexlabs/python-predictor-cpu:master +FROM quay.io/cortexlabs/python-predictor-cpu:0.30.0 # upgrade python runtime version RUN conda update -n base -c defaults conda diff --git a/docs/workloads/realtime/configuration.md b/docs/workloads/realtime/configuration.md index c81ee14be8..2d7de02892 100644 --- a/docs/workloads/realtime/configuration.md +++ b/docs/workloads/realtime/configuration.md @@ -39,7 +39,7 @@ predictor: threads_per_process: # the number of threads per process (default: 1) config: # arbitrary dictionary passed to the constructor of the Predictor (optional) python_path: # path to the root of your Python folder that will be appended to PYTHONPATH (default: folder containing cortex.yaml) - image: # docker image to use for the Predictor (default: quay.io/cortexlabs/python-predictor-cpu:master, quay.io/cortexlabs/python-predictor-gpu:master-cuda10.2-cudnn8, or quay.io/cortexlabs/python-predictor-inf:master based on compute) + image: # docker image to use for the Predictor (default: quay.io/cortexlabs/python-predictor-cpu:0.30.0, quay.io/cortexlabs/python-predictor-gpu:0.30.0-cuda10.2-cudnn8, or quay.io/cortexlabs/python-predictor-inf:0.30.0 based on compute) env: # dictionary of environment variables log_level: # log level that can be "debug", "info", "warning" or "error" (default: "info") shm_size: # size of shared memory (/dev/shm) for sharing data between multiple processes, e.g. 64Mi or 1Gi (default: Null) @@ -74,8 +74,8 @@ predictor: threads_per_process: # the number of threads per process (default: 1) config: # arbitrary dictionary passed to the constructor of the Predictor (optional) python_path: # path to the root of your Python folder that will be appended to PYTHONPATH (default: folder containing cortex.yaml) - image: # docker image to use for the Predictor (default: quay.io/cortexlabs/tensorflow-predictor:master) - tensorflow_serving_image: # docker image to use for the TensorFlow Serving container (default: quay.io/cortexlabs/tensorflow-serving-cpu:master, quay.io/cortexlabs/tensorflow-serving-gpu:master, or quay.io/cortexlabs/tensorflow-serving-inf:master based on compute) + image: # docker image to use for the Predictor (default: quay.io/cortexlabs/tensorflow-predictor:0.30.0) + tensorflow_serving_image: # docker image to use for the TensorFlow Serving container (default: quay.io/cortexlabs/tensorflow-serving-cpu:0.30.0, quay.io/cortexlabs/tensorflow-serving-gpu:0.30.0, or quay.io/cortexlabs/tensorflow-serving-inf:0.30.0 based on compute) env: # dictionary of environment variables log_level: # log level that can be "debug", "info", "warning" or "error" (default: "info") shm_size: # size of shared memory (/dev/shm) for sharing data between multiple processes, e.g. 64Mi or 1Gi (default: Null) @@ -105,7 +105,7 @@ predictor: threads_per_process: # the number of threads per process (default: 1) config: # arbitrary dictionary passed to the constructor of the Predictor (optional) python_path: # path to the root of your Python folder that will be appended to PYTHONPATH (default: folder containing cortex.yaml) - image: # docker image to use for the Predictor (default: quay.io/cortexlabs/onnx-predictor-cpu:master or quay.io/cortexlabs/onnx-predictor-gpu:master based on compute) + image: # docker image to use for the Predictor (default: quay.io/cortexlabs/onnx-predictor-cpu:0.30.0 or quay.io/cortexlabs/onnx-predictor-gpu:0.30.0 based on compute) env: # dictionary of environment variables log_level: # log level that can be "debug", "info", "warning" or "error" (default: "info") shm_size: # size of shared memory (/dev/shm) for sharing data between multiple processes, e.g. 64Mi or 1Gi (default: Null) diff --git a/docs/workloads/realtime/predictors.md b/docs/workloads/realtime/predictors.md index a1b2e4ce73..efe96b3505 100644 --- a/docs/workloads/realtime/predictors.md +++ b/docs/workloads/realtime/predictors.md @@ -119,7 +119,7 @@ class PythonPredictor: ``` -When explicit model paths are specified in the Python predictor's API configuration, Cortex provides a `python_client` to your Predictor's constructor. `python_client` is an instance of [PythonClient](https://github.com/cortexlabs/cortex/tree/master/pkg/cortex/serve/cortex_internal/lib/client/python.py) that is used to load model(s) (it calls the `load_model()` method of your predictor, which must be defined when using explicit model paths). It should be saved as an instance variable in your Predictor, and your `predict()` function should call `python_client.get_model()` to load your model for inference. Preprocessing of the JSON payload and postprocessing of predictions can be implemented in your `predict()` function as well. +When explicit model paths are specified in the Python predictor's API configuration, Cortex provides a `python_client` to your Predictor's constructor. `python_client` is an instance of [PythonClient](https://github.com/cortexlabs/cortex/tree/0.30/pkg/cortex/serve/cortex_internal/lib/client/python.py) that is used to load model(s) (it calls the `load_model()` method of your predictor, which must be defined when using explicit model paths). It should be saved as an instance variable in your Predictor, and your `predict()` function should call `python_client.get_model()` to load your model for inference. Preprocessing of the JSON payload and postprocessing of predictions can be implemented in your `predict()` function as well. When multiple models are defined using the Predictor's `models` field, the `python_client.get_model()` method expects an argument `model_name` which must hold the name of the model that you want to load (for example: `self.client.get_model("text-generator")`). There is also an optional second argument to specify the model version. @@ -189,7 +189,7 @@ class TensorFlowPredictor: ``` -Cortex provides a `tensorflow_client` to your Predictor's constructor. `tensorflow_client` is an instance of [TensorFlowClient](https://github.com/cortexlabs/cortex/tree/master/pkg/cortex/serve/cortex_internal/lib/client/tensorflow.py) that manages a connection to a TensorFlow Serving container to make predictions using your model. It should be saved as an instance variable in your Predictor, and your `predict()` function should call `tensorflow_client.predict()` to make an inference with your exported TensorFlow model. Preprocessing of the JSON payload and postprocessing of predictions can be implemented in your `predict()` function as well. +Cortex provides a `tensorflow_client` to your Predictor's constructor. `tensorflow_client` is an instance of [TensorFlowClient](https://github.com/cortexlabs/cortex/tree/0.30/pkg/cortex/serve/cortex_internal/lib/client/tensorflow.py) that manages a connection to a TensorFlow Serving container to make predictions using your model. It should be saved as an instance variable in your Predictor, and your `predict()` function should call `tensorflow_client.predict()` to make an inference with your exported TensorFlow model. Preprocessing of the JSON payload and postprocessing of predictions can be implemented in your `predict()` function as well. When multiple models are defined using the Predictor's `models` field, the `tensorflow_client.predict()` method expects a second argument `model_name` which must hold the name of the model that you want to use for inference (for example: `self.client.predict(payload, "text-generator")`). There is also an optional third argument to specify the model version. @@ -261,7 +261,7 @@ class ONNXPredictor: ``` -Cortex provides an `onnx_client` to your Predictor's constructor. `onnx_client` is an instance of [ONNXClient](https://github.com/cortexlabs/cortex/tree/master/pkg/cortex/serve/cortex_internal/lib/client/onnx.py) that manages an ONNX Runtime session to make predictions using your model. It should be saved as an instance variable in your Predictor, and your `predict()` function should call `onnx_client.predict()` to make an inference with your exported ONNX model. Preprocessing of the JSON payload and postprocessing of predictions can be implemented in your `predict()` function as well. +Cortex provides an `onnx_client` to your Predictor's constructor. `onnx_client` is an instance of [ONNXClient](https://github.com/cortexlabs/cortex/tree/0.30/pkg/cortex/serve/cortex_internal/lib/client/onnx.py) that manages an ONNX Runtime session to make predictions using your model. It should be saved as an instance variable in your Predictor, and your `predict()` function should call `onnx_client.predict()` to make an inference with your exported ONNX model. Preprocessing of the JSON payload and postprocessing of predictions can be implemented in your `predict()` function as well. When multiple models are defined using the Predictor's `models` field, the `onnx_client.predict()` method expects a second argument `model_name` which must hold the name of the model that you want to use for inference (for example: `self.client.predict(model_input, "text-generator")`). There is also an optional third argument to specify the model version. diff --git a/docs/workloads/task/configuration.md b/docs/workloads/task/configuration.md index 779bafdf58..0d1b3525d8 100644 --- a/docs/workloads/task/configuration.md +++ b/docs/workloads/task/configuration.md @@ -12,7 +12,7 @@ conda: # relative path to conda-packages.txt (default: conda-packages.txt) shell: # relative path to a shell script for system package installation (default: dependencies.sh) python_path: # path to the root of your Python folder that will be appended to PYTHONPATH (default: folder containing cortex.yaml) - image: # docker image to use for the Task (default: quay.io/cortexlabs/python-predictor-cpu:master, quay.io/cortexlabs/python-predictor-gpu:master-cuda10.2-cudnn8, or quay.io/cortexlabs/python-predictor-inf:master based on compute) + image: # docker image to use for the Task (default: quay.io/cortexlabs/python-predictor-cpu:0.30.0, quay.io/cortexlabs/python-predictor-gpu:0.30.0-cuda10.2-cudnn8, or quay.io/cortexlabs/python-predictor-inf:0.30.0 based on compute) env: # dictionary of environment variables log_level: # log level that can be "debug", "info", "warning" or "error" (default: "info") networking: diff --git a/get-cli.sh b/get-cli.sh index e9be9627fa..a35b7f5589 100755 --- a/get-cli.sh +++ b/get-cli.sh @@ -16,7 +16,7 @@ set -e -CORTEX_VERSION_BRANCH_STABLE=master +CORTEX_VERSION_BRANCH_STABLE=0.30.0 CORTEX_INSTALL_PATH="${CORTEX_INSTALL_PATH:-/usr/local/bin/cortex}" # replace ~ with the home directory path diff --git a/manager/check_cortex_version.sh b/manager/check_cortex_version.sh index 41db9ca9ef..148b5bb3fe 100755 --- a/manager/check_cortex_version.sh +++ b/manager/check_cortex_version.sh @@ -16,7 +16,7 @@ set -e -CORTEX_VERSION=master +CORTEX_VERSION=0.30.0 if [ "$CORTEX_VERSION" != "$CORTEX_CLI_VERSION" ]; then echo "error: your CLI version ($CORTEX_CLI_VERSION) doesn't match your Cortex manager image version ($CORTEX_VERSION); please update your CLI (pip install cortex==$CORTEX_VERSION), or update your Cortex manager image by modifying the value for \`image_manager\` in your cluster configuration file and running \`cortex cluster configure --config cluster.yaml\` (update other image paths in cluster.yaml as well if necessary)" diff --git a/manager/debug.sh b/manager/debug.sh index b35ae9aa22..5917362df5 100755 --- a/manager/debug.sh +++ b/manager/debug.sh @@ -16,7 +16,7 @@ set +e -CORTEX_VERSION_MINOR=master +CORTEX_VERSION_MINOR=0.30 debug_out_path="$1" mkdir -p "$(dirname "$debug_out_path")" diff --git a/manager/debug_gcp.sh b/manager/debug_gcp.sh index d3eefeb052..c18d281c0d 100755 --- a/manager/debug_gcp.sh +++ b/manager/debug_gcp.sh @@ -16,7 +16,7 @@ set +e -CORTEX_VERSION_MINOR=master +CORTEX_VERSION_MINOR=0.30 debug_out_path="$1" mkdir -p "$(dirname "$debug_out_path")" diff --git a/manager/info.sh b/manager/info.sh index 4dd7a83139..7288517915 100755 --- a/manager/info.sh +++ b/manager/info.sh @@ -16,7 +16,7 @@ set -eo pipefail -CORTEX_VERSION_MINOR=master +CORTEX_VERSION_MINOR=0.30 function get_operator_endpoint() { kubectl -n=istio-system get service ingressgateway-operator -o json | tr -d '[:space:]' | sed 's/.*{\"hostname\":\"\(.*\)\".*/\1/' diff --git a/manager/info_gcp.sh b/manager/info_gcp.sh index aa84b54254..6c58bb78d8 100755 --- a/manager/info_gcp.sh +++ b/manager/info_gcp.sh @@ -16,7 +16,7 @@ set -eo pipefail -CORTEX_VERSION_MINOR=master +CORTEX_VERSION_MINOR=0.30 function get_operator_endpoint() { kubectl -n=istio-system get service ingressgateway-operator -o json | tr -d '[:space:]' | sed 's/.*{\"ip\":\"\(.*\)\".*/\1/' diff --git a/manager/install.sh b/manager/install.sh index 087d09026b..5091931231 100755 --- a/manager/install.sh +++ b/manager/install.sh @@ -16,8 +16,8 @@ set -eo pipefail -export CORTEX_VERSION=master -export CORTEX_VERSION_MINOR=master +export CORTEX_VERSION=0.30.0 +export CORTEX_VERSION_MINOR=0.30 EKSCTL_TIMEOUT=45m mkdir /workspace diff --git a/manager/refresh.sh b/manager/refresh.sh index da75db44fc..99ad518972 100755 --- a/manager/refresh.sh +++ b/manager/refresh.sh @@ -16,7 +16,7 @@ set -e -CORTEX_VERSION_MINOR=master +CORTEX_VERSION_MINOR=0.30 cluster_config_out_path="$1" mkdir -p "$(dirname "$cluster_config_out_path")" diff --git a/pkg/consts/consts.go b/pkg/consts/consts.go index cfa73aa373..888e8aa5a7 100644 --- a/pkg/consts/consts.go +++ b/pkg/consts/consts.go @@ -24,8 +24,8 @@ import ( ) var ( - CortexVersion = "master" // CORTEX_VERSION - CortexVersionMinor = "master" // CORTEX_VERSION_MINOR + CortexVersion = "0.30.0" // CORTEX_VERSION + CortexVersionMinor = "0.30" // CORTEX_VERSION_MINOR SingleModelName = "_cortex_default" diff --git a/pkg/cortex/client/cortex/client.py b/pkg/cortex/client/cortex/client.py index d148904cd6..fbb56469ba 100644 --- a/pkg/cortex/client/cortex/client.py +++ b/pkg/cortex/client/cortex/client.py @@ -70,7 +70,7 @@ def create_api( Deploy an API. Args: - api_spec: A dictionary defining a single Cortex API. See https://docs.cortex.dev/v/master/ for schema. + api_spec: A dictionary defining a single Cortex API. See https://docs.cortex.dev/v/0.30/ for schema. predictor: A Cortex Predictor class implementation. Not required for TaskAPI/TrafficSplitter kinds. task: A callable class/function implementation. Not required for RealtimeAPI/BatchAPI/TrafficSplitter kinds. requirements: A list of PyPI dependencies that will be installed before the predictor class implementation is invoked. diff --git a/pkg/cortex/client/cortex/consts.py b/pkg/cortex/client/cortex/consts.py index 316ec3f5b3..1908506304 100644 --- a/pkg/cortex/client/cortex/consts.py +++ b/pkg/cortex/client/cortex/consts.py @@ -15,6 +15,6 @@ # Change if PYTHONVERSION changes EXPECTED_PYTHON_VERSION = "3.6.9" -CORTEX_VERSION = "master" # CORTEX_VERSION +CORTEX_VERSION = "0.30.0" # CORTEX_VERSION CORTEX_TELEMETRY_SENTRY_DSN = "https://5cea3d2d67194d028f7191fcc6ebca14@sentry.io/1825326" CORTEX_TELEMETRY_SENTRY_ENVIRONMENT = "client" diff --git a/pkg/cortex/client/setup.py b/pkg/cortex/client/setup.py index 67f0e47f1e..948e704c84 100644 --- a/pkg/cortex/client/setup.py +++ b/pkg/cortex/client/setup.py @@ -76,7 +76,7 @@ def run(self): setup( name="cortex", - version="master", # CORTEX_VERSION + version="0.30.0", # CORTEX_VERSION description="Model serving at scale", author="cortex.dev", author_email="dev@cortex.dev", diff --git a/pkg/cortex/serve/init/bootloader.sh b/pkg/cortex/serve/init/bootloader.sh index 74cc214c1c..d1fcf69e08 100755 --- a/pkg/cortex/serve/init/bootloader.sh +++ b/pkg/cortex/serve/init/bootloader.sh @@ -17,7 +17,7 @@ set -e # CORTEX_VERSION -export EXPECTED_CORTEX_VERSION=master +export EXPECTED_CORTEX_VERSION=0.30.0 if [ "$CORTEX_VERSION" != "$EXPECTED_CORTEX_VERSION" ]; then echo "error: your Cortex operator version ($CORTEX_VERSION) doesn't match your predictor image version ($EXPECTED_CORTEX_VERSION); please update your predictor image by modifying the \`image\` field in your API configuration file (e.g. cortex.yaml) and re-running \`cortex deploy\`, or update your cluster by following the instructions at https://docs.cortex.dev/" diff --git a/pkg/cortex/serve/setup.py b/pkg/cortex/serve/setup.py index 1054cac25e..337ba4c07b 100644 --- a/pkg/cortex/serve/setup.py +++ b/pkg/cortex/serve/setup.py @@ -23,7 +23,7 @@ setup( name="cortex-internal", - version="master", # CORTEX_VERSION + version="0.30.0", # CORTEX_VERSION description="Internal package for Cortex containers", author="cortex.dev", author_email="dev@cortex.dev", diff --git a/test/e2e/setup.py b/test/e2e/setup.py index a2a03350c3..9edc95cc97 100644 --- a/test/e2e/setup.py +++ b/test/e2e/setup.py @@ -24,7 +24,7 @@ setup( name="e2e", - version="master", # CORTEX_VERSION + version="0.30.0", # CORTEX_VERSION packages=find_packages(exclude=["tests"]), url="https://github.com/cortexlabs/cortex", license="Apache License 2.0",