diff --git a/docker/ml-gpu.yml b/docker/ml-gpu.yml index a5a60eca53..a6b33d8423 100644 --- a/docker/ml-gpu.yml +++ b/docker/ml-gpu.yml @@ -8,17 +8,8 @@ version: "3.9" services: triton: - # restart: $RESTART_POLICY - # This is a custom built of Triton with: - # - GRPC/HTTP support - # - GPU support - # - Tensorflow and ONNX support - # This allows us to reduce the image size - # Build with `python3 compose.py --backend tensorflow --backend - # onnxruntime` - # Release (23.04 version): - # https://github.com/triton-inference-server/server/releases/tag/v2.33.0 - image: ghcr.io/openfoodfacts/triton:23.04-gpu + restart: $RESTART_POLICY + image: nvcr.io/nvidia/tritonserver:24.01-py3 ports: - ${TRITON_EXPOSE_HTTP:-8000}:8000 - ${TRITON_EXPOSE_GRPC:-8001}:8001 diff --git a/docker/monitor.yml b/docker/monitor.yml index c4172eb0d5..b8d8d14a61 100644 --- a/docker/monitor.yml +++ b/docker/monitor.yml @@ -11,6 +11,7 @@ services: depends_on: - statsd statsd: + restart: $RESTART_POLICY image: prom/statsd-exporter:v0.22.2 volumes: - ./statsd.conf:/statsd/statsd.conf @@ -19,6 +20,7 @@ services: ports: - ${STATSD_EXPOSE:-9102}:9102 postgres_exporter: + restart: $RESTART_POLICY image: prometheuscommunity/postgres-exporter:v0.11.1 environment: - DATA_SOURCE_NAME=postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:5432/${POSTGRES_DB}?sslmode=disable