diff --git a/build/build-image.sh b/build/build-image.sh index d4b7923d1b..5dd6536f16 100755 --- a/build/build-image.sh +++ b/build/build-image.sh @@ -19,7 +19,7 @@ set -euo pipefail ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")"/.. >/dev/null && pwd)" -CORTEX_VERSION=master +CORTEX_VERSION=0.26.0 image=$1 dir="${ROOT}/images/${image/-slim}" diff --git a/build/cli.sh b/build/cli.sh index d77b7b5d65..4f40422e49 100755 --- a/build/cli.sh +++ b/build/cli.sh @@ -19,7 +19,7 @@ set -euo pipefail ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")"/.. >/dev/null && pwd)" -CORTEX_VERSION=master +CORTEX_VERSION=0.26.0 arg1=${1:-""} upload="false" diff --git a/build/push-image.sh b/build/push-image.sh index 03e43ccc42..1266f02fac 100755 --- a/build/push-image.sh +++ b/build/push-image.sh @@ -17,7 +17,7 @@ set -euo pipefail -CORTEX_VERSION=master +CORTEX_VERSION=0.26.0 image=$1 diff --git a/docs/clients/install.md b/docs/clients/install.md index da21628b65..5bb82cbb84 100644 --- a/docs/clients/install.md +++ b/docs/clients/install.md @@ -9,10 +9,10 @@ pip install cortex ``` -To install or upgrade to a specific version (e.g. v0.25.0): +To install or upgrade to a specific version (e.g. v0.26.0): ```bash -pip install cortex==0.25.0 +pip install cortex==0.26.0 ``` To upgrade to the latest version: @@ -25,8 +25,8 @@ pip install --upgrade cortex ```bash -# For example to download CLI version 0.25.0 (Note the "v"): -$ bash -c "$(curl -sS https://raw.githubusercontent.com/cortexlabs/cortex/v0.25.0/get-cli.sh)" +# For example to download CLI version 0.26.0 (Note the "v"): +$ bash -c "$(curl -sS https://raw.githubusercontent.com/cortexlabs/cortex/v0.26.0/get-cli.sh)" ``` By default, the Cortex CLI is installed at `/usr/local/bin/cortex`. To install the executable elsewhere, export the `CORTEX_INSTALL_PATH` environment variable to your desired location before running the command above. diff --git a/docs/clients/python.md b/docs/clients/python.md index 534ab6643b..c300bcfac1 100644 --- a/docs/clients/python.md +++ b/docs/clients/python.md @@ -91,7 +91,7 @@ Deploy an API. **Arguments**: -- `api_spec` - A dictionary defining a single Cortex API. See https://docs.cortex.dev/v/master/ for schema. +- `api_spec` - A dictionary defining a single Cortex API. See https://docs.cortex.dev/v/0.26/ for schema. - `predictor` - A Cortex Predictor class implementation. Not required when deploying a traffic splitter. - `requirements` - A list of PyPI dependencies that will be installed before the predictor class implementation is invoked. - `conda_packages` - A list of Conda dependencies that will be installed before the predictor class implementation is invoked. diff --git a/docs/clusters/aws/install.md b/docs/clusters/aws/install.md index c0040256aa..023f51aadb 100644 --- a/docs/clusters/aws/install.md +++ b/docs/clusters/aws/install.md @@ -89,17 +89,17 @@ The docker images used by the Cortex cluster can also be overridden, although th ```yaml -image_operator: quay.io/cortexlabs/operator:master -image_manager: quay.io/cortexlabs/manager:master -image_downloader: quay.io/cortexlabs/downloader:master -image_request_monitor: quay.io/cortexlabs/request-monitor:master -image_cluster_autoscaler: quay.io/cortexlabs/cluster-autoscaler:master -image_metrics_server: quay.io/cortexlabs/metrics-server:master -image_inferentia: quay.io/cortexlabs/inferentia:master -image_neuron_rtd: quay.io/cortexlabs/neuron-rtd:master -image_nvidia: quay.io/cortexlabs/nvidia:master -image_fluentd: quay.io/cortexlabs/fluentd:master -image_statsd: quay.io/cortexlabs/statsd:master -image_istio_proxy: quay.io/cortexlabs/istio-proxy:master -image_istio_pilot: quay.io/cortexlabs/istio-pilot:master +image_operator: quay.io/cortexlabs/operator:0.26.0 +image_manager: quay.io/cortexlabs/manager:0.26.0 +image_downloader: quay.io/cortexlabs/downloader:0.26.0 +image_request_monitor: quay.io/cortexlabs/request-monitor:0.26.0 +image_cluster_autoscaler: quay.io/cortexlabs/cluster-autoscaler:0.26.0 +image_metrics_server: quay.io/cortexlabs/metrics-server:0.26.0 +image_inferentia: quay.io/cortexlabs/inferentia:0.26.0 +image_neuron_rtd: quay.io/cortexlabs/neuron-rtd:0.26.0 +image_nvidia: quay.io/cortexlabs/nvidia:0.26.0 +image_fluentd: quay.io/cortexlabs/fluentd:0.26.0 +image_statsd: quay.io/cortexlabs/statsd:0.26.0 +image_istio_proxy: quay.io/cortexlabs/istio-proxy:0.26.0 +image_istio_pilot: quay.io/cortexlabs/istio-pilot:0.26.0 ``` diff --git a/docs/clusters/gcp/install.md b/docs/clusters/gcp/install.md index 6bcfed8153..a8344c27d2 100644 --- a/docs/clusters/gcp/install.md +++ b/docs/clusters/gcp/install.md @@ -51,11 +51,11 @@ The docker images used by the Cortex cluster can also be overridden, although th ```yaml -image_operator: quay.io/cortexlabs/operator:master -image_manager: quay.io/cortexlabs/manager:master -image_downloader: quay.io/cortexlabs/downloader:master -image_statsd: quay.io/cortexlabs/statsd:master -image_istio_proxy: quay.io/cortexlabs/istio-proxy:master -image_istio_pilot: quay.io/cortexlabs/istio-pilot:master -image_pause: quay.io/cortexlabs/pause:master +image_operator: quay.io/cortexlabs/operator:0.26.0 +image_manager: quay.io/cortexlabs/manager:0.26.0 +image_downloader: quay.io/cortexlabs/downloader:0.26.0 +image_statsd: quay.io/cortexlabs/statsd:0.26.0 +image_istio_proxy: quay.io/cortexlabs/istio-proxy:0.26.0 +image_istio_pilot: quay.io/cortexlabs/istio-pilot:0.26.0 +image_pause: quay.io/cortexlabs/pause:0.26.0 ``` diff --git a/docs/workloads/batch/configuration.md b/docs/workloads/batch/configuration.md index 3204333a21..7f1096bb67 100644 --- a/docs/workloads/batch/configuration.md +++ b/docs/workloads/batch/configuration.md @@ -11,7 +11,7 @@ path: # path to a python file with a PythonPredictor class definition, relative to the Cortex root (required) config: # arbitrary dictionary passed to the constructor of the Predictor (can be overridden by config passed in job submission) (optional) python_path: # path to the root of your Python folder that will be appended to PYTHONPATH (default: folder containing cortex.yaml) - image: # docker image to use for the Predictor (default: quay.io/cortexlabs/python-predictor-cpu:master or quay.io/cortexlabs/python-predictor-gpu:master based on compute) + image: # docker image to use for the Predictor (default: quay.io/cortexlabs/python-predictor-cpu:0.26.0 or quay.io/cortexlabs/python-predictor-gpu:0.26.0 based on compute) env: # dictionary of environment variables log_level: # log level that can be "debug", "info", "warning" or "error" (default: "info") shm_size: # size of shared memory (/dev/shm) for sharing data between multiple processes, e.g. 64Mi or 1Gi (default: Null) @@ -46,8 +46,8 @@ batch_interval: # the maximum amount of time to spend waiting for additional requests before running inference on the batch of requests config: # arbitrary dictionary passed to the constructor of the Predictor (can be overridden by config passed in job submission) (optional) python_path: # path to the root of your Python folder that will be appended to PYTHONPATH (default: folder containing cortex.yaml) - image: # docker image to use for the Predictor (default: quay.io/cortexlabs/tensorflow-predictor:master) - tensorflow_serving_image: # docker image to use for the TensorFlow Serving container (default: quay.io/cortexlabs/tensorflow-serving-gpu:master or quay.io/cortexlabs/tensorflow-serving-cpu:master based on compute) + image: # docker image to use for the Predictor (default: quay.io/cortexlabs/tensorflow-predictor:0.26.0) + tensorflow_serving_image: # docker image to use for the TensorFlow Serving container (default: quay.io/cortexlabs/tensorflow-serving-gpu:0.26.0 or quay.io/cortexlabs/tensorflow-serving-cpu:0.26.0 based on compute) env: # dictionary of environment variables log_level: # log level that can be "debug", "info", "warning" or "error" (default: "info") shm_size: # size of shared memory (/dev/shm) for sharing data between multiple processes, e.g. 64Mi or 1Gi (default: Null) @@ -77,7 +77,7 @@ ... config: # arbitrary dictionary passed to the constructor of the Predictor (can be overridden by config passed in job submission) (optional) python_path: # path to the root of your Python folder that will be appended to PYTHONPATH (default: folder containing cortex.yaml) - image: # docker image to use for the Predictor (default: quay.io/cortexlabs/onnx-predictor-gpu:master or quay.io/cortexlabs/onnx-predictor-cpu:master based on compute) + image: # docker image to use for the Predictor (default: quay.io/cortexlabs/onnx-predictor-gpu:0.26.0 or quay.io/cortexlabs/onnx-predictor-cpu:0.26.0 based on compute) env: # dictionary of environment variables log_level: # log level that can be "debug", "info", "warning" or "error" (default: "info") shm_size: # size of shared memory (/dev/shm) for sharing data between multiple processes, e.g. 64Mi or 1Gi (default: Null) diff --git a/docs/workloads/batch/predictors.md b/docs/workloads/batch/predictors.md index 2dbc76ac21..f1e1fdcbb7 100644 --- a/docs/workloads/batch/predictors.md +++ b/docs/workloads/batch/predictors.md @@ -143,7 +143,7 @@ class TensorFlowPredictor: ``` -Cortex provides a `tensorflow_client` to your Predictor's constructor. `tensorflow_client` is an instance of [TensorFlowClient](https://github.com/cortexlabs/cortex/tree/master/pkg/cortex/serve/cortex_internal/lib/client/tensorflow.py) that manages a connection to a TensorFlow Serving container to make predictions using your model. It should be saved as an instance variable in your Predictor, and your `predict()` function should call `tensorflow_client.predict()` to make an inference with your exported TensorFlow model. Preprocessing of the JSON payload and postprocessing of predictions can be implemented in your `predict()` function as well. +Cortex provides a `tensorflow_client` to your Predictor's constructor. `tensorflow_client` is an instance of [TensorFlowClient](https://github.com/cortexlabs/cortex/tree/0.26/pkg/cortex/serve/cortex_internal/lib/client/tensorflow.py) that manages a connection to a TensorFlow Serving container to make predictions using your model. It should be saved as an instance variable in your Predictor, and your `predict()` function should call `tensorflow_client.predict()` to make an inference with your exported TensorFlow model. Preprocessing of the JSON payload and postprocessing of predictions can be implemented in your `predict()` function as well. When multiple models are defined using the Predictor's `models` field, the `tensorflow_client.predict()` method expects a second argument `model_name` which must hold the name of the model that you want to use for inference (for example: `self.client.predict(payload, "text-generator")`). @@ -202,6 +202,6 @@ class ONNXPredictor: ``` -Cortex provides an `onnx_client` to your Predictor's constructor. `onnx_client` is an instance of [ONNXClient](https://github.com/cortexlabs/cortex/tree/master/pkg/cortex/serve/cortex_internal/lib/client/onnx.py) that manages an ONNX Runtime session to make predictions using your model. It should be saved as an instance variable in your Predictor, and your `predict()` function should call `onnx_client.predict()` to make an inference with your exported ONNX model. Preprocessing of the JSON payload and postprocessing of predictions can be implemented in your `predict()` function as well. +Cortex provides an `onnx_client` to your Predictor's constructor. `onnx_client` is an instance of [ONNXClient](https://github.com/cortexlabs/cortex/tree/0.26/pkg/cortex/serve/cortex_internal/lib/client/onnx.py) that manages an ONNX Runtime session to make predictions using your model. It should be saved as an instance variable in your Predictor, and your `predict()` function should call `onnx_client.predict()` to make an inference with your exported ONNX model. Preprocessing of the JSON payload and postprocessing of predictions can be implemented in your `predict()` function as well. When multiple models are defined using the Predictor's `models` field, the `onnx_client.predict()` method expects a second argument `model_name` which must hold the name of the model that you want to use for inference (for example: `self.client.predict(model_input, "text-generator")`). diff --git a/docs/workloads/dependencies/images.md b/docs/workloads/dependencies/images.md index 4372e2be14..1f155cf13f 100644 --- a/docs/workloads/dependencies/images.md +++ b/docs/workloads/dependencies/images.md @@ -11,19 +11,19 @@ mkdir my-api && cd my-api && touch Dockerfile Cortex's base Docker images are listed below. Depending on the Cortex Predictor and compute type specified in your API configuration, choose one of these images to use as the base for your Docker image: -* Python Predictor (CPU): `quay.io/cortexlabs/python-predictor-cpu-slim:master` +* Python Predictor (CPU): `quay.io/cortexlabs/python-predictor-cpu-slim:0.26.0` * Python Predictor (GPU): choose one of the following: - * `quay.io/cortexlabs/python-predictor-gpu-slim:master-cuda10.0-cudnn7` - * `quay.io/cortexlabs/python-predictor-gpu-slim:master-cuda10.1-cudnn7` - * `quay.io/cortexlabs/python-predictor-gpu-slim:master-cuda10.1-cudnn8` - * `quay.io/cortexlabs/python-predictor-gpu-slim:master-cuda10.2-cudnn7` - * `quay.io/cortexlabs/python-predictor-gpu-slim:master-cuda10.2-cudnn8` - * `quay.io/cortexlabs/python-predictor-gpu-slim:master-cuda11.0-cudnn8` - * `quay.io/cortexlabs/python-predictor-gpu-slim:master-cuda11.1-cudnn8` -* Python Predictor (Inferentia): `quay.io/cortexlabs/python-predictor-inf-slim:master` -* TensorFlow Predictor (CPU, GPU, Inferentia): `quay.io/cortexlabs/tensorflow-predictor-slim:master` -* ONNX Predictor (CPU): `quay.io/cortexlabs/onnx-predictor-cpu-slim:master` -* ONNX Predictor (GPU): `quay.io/cortexlabs/onnx-predictor-gpu-slim:master` + * `quay.io/cortexlabs/python-predictor-gpu-slim:0.26.0-cuda10.0-cudnn7` + * `quay.io/cortexlabs/python-predictor-gpu-slim:0.26.0-cuda10.1-cudnn7` + * `quay.io/cortexlabs/python-predictor-gpu-slim:0.26.0-cuda10.1-cudnn8` + * `quay.io/cortexlabs/python-predictor-gpu-slim:0.26.0-cuda10.2-cudnn7` + * `quay.io/cortexlabs/python-predictor-gpu-slim:0.26.0-cuda10.2-cudnn8` + * `quay.io/cortexlabs/python-predictor-gpu-slim:0.26.0-cuda11.0-cudnn8` + * `quay.io/cortexlabs/python-predictor-gpu-slim:0.26.0-cuda11.1-cudnn8` +* Python Predictor (Inferentia): `quay.io/cortexlabs/python-predictor-inf-slim:0.26.0` +* TensorFlow Predictor (CPU, GPU, Inferentia): `quay.io/cortexlabs/tensorflow-predictor-slim:0.26.0` +* ONNX Predictor (CPU): `quay.io/cortexlabs/onnx-predictor-cpu-slim:0.26.0` +* ONNX Predictor (GPU): `quay.io/cortexlabs/onnx-predictor-gpu-slim:0.26.0` Note: the images listed above use the `-slim` suffix; Cortex's default API images are not `-slim`, since they have additional dependencies installed to cover common use cases. If you are building your own Docker image, starting with a `-slim` Predictor image will result in a smaller image size. @@ -33,7 +33,7 @@ The sample `Dockerfile` below inherits from Cortex's Python CPU serving image, a ```dockerfile # Dockerfile -FROM quay.io/cortexlabs/python-predictor-cpu-slim:master +FROM quay.io/cortexlabs/python-predictor-cpu-slim:0.26.0 RUN apt-get update \ && apt-get install -y tree \ diff --git a/docs/workloads/realtime/configuration.md b/docs/workloads/realtime/configuration.md index 907ca5e0ab..88c8a8d177 100644 --- a/docs/workloads/realtime/configuration.md +++ b/docs/workloads/realtime/configuration.md @@ -25,7 +25,7 @@ threads_per_process: # the number of threads per process (default: 1) config: # arbitrary dictionary passed to the constructor of the Predictor (optional) python_path: # path to the root of your Python folder that will be appended to PYTHONPATH (default: folder containing cortex.yaml) - image: # docker image to use for the Predictor (default: quay.io/cortexlabs/python-predictor-cpu:master or quay.io/cortexlabs/python-predictor-gpu:master based on compute) + image: # docker image to use for the Predictor (default: quay.io/cortexlabs/python-predictor-cpu:0.26.0 or quay.io/cortexlabs/python-predictor-gpu:0.26.0 based on compute) env: # dictionary of environment variables log_level: # log level that can be "debug", "info", "warning" or "error" (default: "info") shm_size: # size of shared memory (/dev/shm) for sharing data between multiple processes, e.g. 64Mi or 1Gi (default: Null) @@ -81,8 +81,8 @@ threads_per_process: # the number of threads per process (default: 1) config: # arbitrary dictionary passed to the constructor of the Predictor (optional) python_path: # path to the root of your Python folder that will be appended to PYTHONPATH (default: folder containing cortex.yaml) - image: # docker image to use for the Predictor (default: quay.io/cortexlabs/tensorflow-predictor:master) - tensorflow_serving_image: # docker image to use for the TensorFlow Serving container (default: quay.io/cortexlabs/tensorflow-serving-gpu:master or quay.io/cortexlabs/tensorflow-serving-cpu:master based on compute) + image: # docker image to use for the Predictor (default: quay.io/cortexlabs/tensorflow-predictor:0.26.0) + tensorflow_serving_image: # docker image to use for the TensorFlow Serving container (default: quay.io/cortexlabs/tensorflow-serving-gpu:0.26.0 or quay.io/cortexlabs/tensorflow-serving-cpu:0.26.0 based on compute) env: # dictionary of environment variables log_level: # log level that can be "debug", "info", "warning" or "error" (default: "info") shm_size: # size of shared memory (/dev/shm) for sharing data between multiple processes, e.g. 64Mi or 1Gi (default: Null) @@ -133,7 +133,7 @@ threads_per_process: # the number of threads per process (default: 1) config: # arbitrary dictionary passed to the constructor of the Predictor (optional) python_path: # path to the root of your Python folder that will be appended to PYTHONPATH (default: folder containing cortex.yaml) - image: # docker image to use for the Predictor (default: quay.io/cortexlabs/onnx-predictor-gpu:master or quay.io/cortexlabs/onnx-predictor-cpu:master based on compute) + image: # docker image to use for the Predictor (default: quay.io/cortexlabs/onnx-predictor-gpu:0.26.0 or quay.io/cortexlabs/onnx-predictor-cpu:0.26.0 based on compute) env: # dictionary of environment variables log_level: # log level that can be "debug", "info", "warning" or "error" (default: "info") shm_size: # size of shared memory (/dev/shm) for sharing data between multiple processes, e.g. 64Mi or 1Gi (default: Null) diff --git a/docs/workloads/realtime/predictors.md b/docs/workloads/realtime/predictors.md index a409808c5e..5f4c1c0467 100644 --- a/docs/workloads/realtime/predictors.md +++ b/docs/workloads/realtime/predictors.md @@ -119,7 +119,7 @@ class PythonPredictor: ``` -When explicit model paths are specified in the Python predictor's API configuration, Cortex provides a `python_client` to your Predictor's constructor. `python_client` is an instance of [PythonClient](https://github.com/cortexlabs/cortex/tree/master/pkg/cortex/serve/cortex_internal/lib/client/python.py) that is used to load model(s) (it calls the `load_model()` method of your predictor, which must be defined when using explicit model paths). It should be saved as an instance variable in your Predictor, and your `predict()` function should call `python_client.get_model()` to load your model for inference. Preprocessing of the JSON payload and postprocessing of predictions can be implemented in your `predict()` function as well. +When explicit model paths are specified in the Python predictor's API configuration, Cortex provides a `python_client` to your Predictor's constructor. `python_client` is an instance of [PythonClient](https://github.com/cortexlabs/cortex/tree/0.26/pkg/cortex/serve/cortex_internal/lib/client/python.py) that is used to load model(s) (it calls the `load_model()` method of your predictor, which must be defined when using explicit model paths). It should be saved as an instance variable in your Predictor, and your `predict()` function should call `python_client.get_model()` to load your model for inference. Preprocessing of the JSON payload and postprocessing of predictions can be implemented in your `predict()` function as well. When multiple models are defined using the Predictor's `models` field, the `python_client.get_model()` method expects an argument `model_name` which must hold the name of the model that you want to load (for example: `self.client.get_model("text-generator")`). There is also an optional second argument to specify the model version. @@ -189,7 +189,7 @@ class TensorFlowPredictor: ``` -Cortex provides a `tensorflow_client` to your Predictor's constructor. `tensorflow_client` is an instance of [TensorFlowClient](https://github.com/cortexlabs/cortex/tree/master/pkg/cortex/serve/cortex_internal/lib/client/tensorflow.py) that manages a connection to a TensorFlow Serving container to make predictions using your model. It should be saved as an instance variable in your Predictor, and your `predict()` function should call `tensorflow_client.predict()` to make an inference with your exported TensorFlow model. Preprocessing of the JSON payload and postprocessing of predictions can be implemented in your `predict()` function as well. +Cortex provides a `tensorflow_client` to your Predictor's constructor. `tensorflow_client` is an instance of [TensorFlowClient](https://github.com/cortexlabs/cortex/tree/0.26/pkg/cortex/serve/cortex_internal/lib/client/tensorflow.py) that manages a connection to a TensorFlow Serving container to make predictions using your model. It should be saved as an instance variable in your Predictor, and your `predict()` function should call `tensorflow_client.predict()` to make an inference with your exported TensorFlow model. Preprocessing of the JSON payload and postprocessing of predictions can be implemented in your `predict()` function as well. When multiple models are defined using the Predictor's `models` field, the `tensorflow_client.predict()` method expects a second argument `model_name` which must hold the name of the model that you want to use for inference (for example: `self.client.predict(payload, "text-generator")`). There is also an optional third argument to specify the model version. @@ -259,7 +259,7 @@ class ONNXPredictor: ``` -Cortex provides an `onnx_client` to your Predictor's constructor. `onnx_client` is an instance of [ONNXClient](https://github.com/cortexlabs/cortex/tree/master/pkg/cortex/serve/cortex_internal/lib/client/onnx.py) that manages an ONNX Runtime session to make predictions using your model. It should be saved as an instance variable in your Predictor, and your `predict()` function should call `onnx_client.predict()` to make an inference with your exported ONNX model. Preprocessing of the JSON payload and postprocessing of predictions can be implemented in your `predict()` function as well. +Cortex provides an `onnx_client` to your Predictor's constructor. `onnx_client` is an instance of [ONNXClient](https://github.com/cortexlabs/cortex/tree/0.26/pkg/cortex/serve/cortex_internal/lib/client/onnx.py) that manages an ONNX Runtime session to make predictions using your model. It should be saved as an instance variable in your Predictor, and your `predict()` function should call `onnx_client.predict()` to make an inference with your exported ONNX model. Preprocessing of the JSON payload and postprocessing of predictions can be implemented in your `predict()` function as well. When multiple models are defined using the Predictor's `models` field, the `onnx_client.predict()` method expects a second argument `model_name` which must hold the name of the model that you want to use for inference (for example: `self.client.predict(model_input, "text-generator")`). There is also an optional third argument to specify the model version. diff --git a/get-cli.sh b/get-cli.sh index c2e71920eb..5c834790eb 100755 --- a/get-cli.sh +++ b/get-cli.sh @@ -16,7 +16,7 @@ set -e -CORTEX_VERSION_BRANCH_STABLE=master +CORTEX_VERSION_BRANCH_STABLE=0.26.0 CORTEX_INSTALL_PATH="${CORTEX_INSTALL_PATH:-/usr/local/bin/cortex}" # replace ~ with the home directory path diff --git a/manager/check_cortex_version.sh b/manager/check_cortex_version.sh index 4d36e470e2..c471ba5df0 100755 --- a/manager/check_cortex_version.sh +++ b/manager/check_cortex_version.sh @@ -16,7 +16,7 @@ set -e -CORTEX_VERSION=master +CORTEX_VERSION=0.26.0 if [ "$CORTEX_VERSION" != "$CORTEX_CLI_VERSION" ]; then echo "error: your CLI version ($CORTEX_CLI_VERSION) doesn't match your Cortex manager image version ($CORTEX_VERSION); please update your CLI (pip install cortex==$CORTEX_VERSION), or update your Cortex manager image by modifying the value for \`image_manager\` in your cluster configuration file and running \`cortex cluster configure --config cluster.yaml\` (update other image paths in cluster.yaml as well if necessary)" diff --git a/manager/debug.sh b/manager/debug.sh index 0b292b5fed..49c2089499 100755 --- a/manager/debug.sh +++ b/manager/debug.sh @@ -16,7 +16,7 @@ set +e -CORTEX_VERSION_MINOR=master +CORTEX_VERSION_MINOR=0.26 debug_out_path="$1" mkdir -p "$(dirname "$debug_out_path")" diff --git a/manager/debug_gcp.sh b/manager/debug_gcp.sh index 5dbbfc3632..23bacec79a 100755 --- a/manager/debug_gcp.sh +++ b/manager/debug_gcp.sh @@ -16,7 +16,7 @@ set +e -CORTEX_VERSION_MINOR=master +CORTEX_VERSION_MINOR=0.26 debug_out_path="$1" mkdir -p "$(dirname "$debug_out_path")" diff --git a/manager/info.sh b/manager/info.sh index 0dc75895da..286c169116 100755 --- a/manager/info.sh +++ b/manager/info.sh @@ -16,7 +16,7 @@ set -eo pipefail -CORTEX_VERSION_MINOR=master +CORTEX_VERSION_MINOR=0.26 function get_operator_endpoint() { kubectl -n=istio-system get service ingressgateway-operator -o json | tr -d '[:space:]' | sed 's/.*{\"hostname\":\"\(.*\)\".*/\1/' diff --git a/manager/info_gcp.sh b/manager/info_gcp.sh index 218ae8f156..9621c4fcbf 100755 --- a/manager/info_gcp.sh +++ b/manager/info_gcp.sh @@ -16,7 +16,7 @@ set -eo pipefail -CORTEX_VERSION_MINOR=master +CORTEX_VERSION_MINOR=0.26 function get_operator_endpoint() { kubectl -n=istio-system get service ingressgateway-operator -o json | tr -d '[:space:]' | sed 's/.*{\"ip\":\"\(.*\)\".*/\1/' diff --git a/manager/install.sh b/manager/install.sh index 9bf8ae22c8..ff0265b656 100755 --- a/manager/install.sh +++ b/manager/install.sh @@ -16,8 +16,8 @@ set -eo pipefail -export CORTEX_VERSION=master -export CORTEX_VERSION_MINOR=master +export CORTEX_VERSION=0.26.0 +export CORTEX_VERSION_MINOR=0.26 EKSCTL_TIMEOUT=45m mkdir /workspace diff --git a/manager/refresh.sh b/manager/refresh.sh index 42595008c4..7ce1128987 100755 --- a/manager/refresh.sh +++ b/manager/refresh.sh @@ -16,7 +16,7 @@ set -e -CORTEX_VERSION_MINOR=master +CORTEX_VERSION_MINOR=0.26 cluster_config_out_path="$1" mkdir -p "$(dirname "$cluster_config_out_path")" diff --git a/pkg/consts/consts.go b/pkg/consts/consts.go index 66af3e768a..bca1a44d51 100644 --- a/pkg/consts/consts.go +++ b/pkg/consts/consts.go @@ -24,8 +24,8 @@ import ( ) var ( - CortexVersion = "master" // CORTEX_VERSION - CortexVersionMinor = "master" // CORTEX_VERSION_MINOR + CortexVersion = "0.26.0" // CORTEX_VERSION + CortexVersionMinor = "0.26" // CORTEX_VERSION_MINOR SingleModelName = "_cortex_default" diff --git a/pkg/cortex/client/cortex/client.py b/pkg/cortex/client/cortex/client.py index e1af803e5a..67af9ead47 100644 --- a/pkg/cortex/client/cortex/client.py +++ b/pkg/cortex/client/cortex/client.py @@ -59,7 +59,7 @@ def create_api( Deploy an API. Args: - api_spec: A dictionary defining a single Cortex API. See https://docs.cortex.dev/v/master/ for schema. + api_spec: A dictionary defining a single Cortex API. See https://docs.cortex.dev/v/0.26/ for schema. predictor: A Cortex Predictor class implementation. Not required when deploying a traffic splitter. requirements: A list of PyPI dependencies that will be installed before the predictor class implementation is invoked. conda_packages: A list of Conda dependencies that will be installed before the predictor class implementation is invoked. diff --git a/pkg/cortex/client/setup.py b/pkg/cortex/client/setup.py index 25253854da..5fc6d7bb43 100644 --- a/pkg/cortex/client/setup.py +++ b/pkg/cortex/client/setup.py @@ -78,7 +78,7 @@ def run(self): setup( name="cortex", - version="master", # CORTEX_VERSION + version="0.26.0", # CORTEX_VERSION description="Run inference at scale", author="cortex.dev", author_email="dev@cortex.dev", diff --git a/pkg/cortex/serve/init/bootloader.sh b/pkg/cortex/serve/init/bootloader.sh index 3f12a3da01..f58ed0d94d 100755 --- a/pkg/cortex/serve/init/bootloader.sh +++ b/pkg/cortex/serve/init/bootloader.sh @@ -17,7 +17,7 @@ set -e # CORTEX_VERSION -export EXPECTED_CORTEX_VERSION=master +export EXPECTED_CORTEX_VERSION=0.26.0 if [ "$CORTEX_VERSION" != "$EXPECTED_CORTEX_VERSION" ]; then echo "error: your Cortex operator version ($CORTEX_VERSION) doesn't match your predictor image version ($EXPECTED_CORTEX_VERSION); please update your predictor image by modifying the \`image\` field in your API configuration file (e.g. cortex.yaml) and re-running \`cortex deploy\`, or update your cluster by following the instructions at https://docs.cortex.dev/" diff --git a/pkg/cortex/serve/setup.py b/pkg/cortex/serve/setup.py index 036c5faa53..1d733af597 100644 --- a/pkg/cortex/serve/setup.py +++ b/pkg/cortex/serve/setup.py @@ -23,7 +23,7 @@ setup( name="cortex-internal", - version="master", # CORTEX_VERSION + version="0.26.0", # CORTEX_VERSION description="Internal package for Cortex containers", author="cortex.dev", author_email="dev@cortex.dev",