From acfed440b1789fb9df42a5c056582342c892abf2 Mon Sep 17 00:00:00 2001 From: Daniel J Walsh Date: Tue, 12 Nov 2024 09:00:29 -0700 Subject: [PATCH] Document the host flag in ramalama.conf file Apply suggestions from code review Co-authored-by: sourcery-ai[bot] <58596630+sourcery-ai[bot]@users.noreply.github.com> Signed-off-by: Daniel J Walsh --- docs/ramalama-serve.1.md | 2 +- docs/ramalama.conf | 4 ++++ docs/ramalama.conf.5.md | 8 ++++++-- ramalama/cli.py | 2 +- 4 files changed, 12 insertions(+), 4 deletions(-) diff --git a/docs/ramalama-serve.1.md b/docs/ramalama-serve.1.md index 8844159..4181e5b 100644 --- a/docs/ramalama-serve.1.md +++ b/docs/ramalama-serve.1.md @@ -39,7 +39,7 @@ Generate specified configuration format for running the AI Model as a service show this help message and exit #### **--host**="0.0.0.0" -ip address to listen +IP address for llama.cpp to listen on. #### **--name**, **-n** Name of the container to run the Model in. diff --git a/docs/ramalama.conf b/docs/ramalama.conf index 34953cf..290a6f9 100644 --- a/docs/ramalama.conf +++ b/docs/ramalama.conf @@ -35,6 +35,10 @@ # #image = "quay.io/ramalama/ramalama:latest" +# IP address for llama.cpp to listen on. +# +#host = "0.0.0.0" + # Specify default port for services to listen on # #port = "8080" diff --git a/docs/ramalama.conf.5.md b/docs/ramalama.conf.5.md index 5ef658d..5b96a12 100644 --- a/docs/ramalama.conf.5.md +++ b/docs/ramalama.conf.5.md @@ -70,8 +70,12 @@ Image to be used when building and pushing --type=car models **engine**="podman" Run RamaLama using the specified container engine. -Valid options (Podman, Docker) -RAMALAMA_CONTAINER_ENGINE environment variable overrides this field. +Valid options are: Podman and Docker +This field can be overridden by the RAMALAMA_CONTAINER_ENGINE environment variable. + +**host**="0.0.0.0" + +IP address for llama.cpp to listen on. **image**="quay.io/ramalama/ramalama:latest" diff --git a/ramalama/cli.py b/ramalama/cli.py index 3d0a90b..2d13ec5 100644 --- a/ramalama/cli.py +++ b/ramalama/cli.py @@ -614,7 +614,7 @@ def serve_parser(subparsers): parser = subparsers.add_parser("serve", help="serve REST API on specified AI Model") parser.add_argument("--authfile", help="path of the authentication file") parser.add_argument("-d", "--detach", action="store_true", dest="detach", help="run the container in detached mode") - parser.add_argument("--host", default=config.get('host', "0.0.0.0"), help="ip address to listen") + parser.add_argument("--host", default=config.get('host', "0.0.0.0"), help="IP address to listen") parser.add_argument("-n", "--name", dest="name", help="name of container in which the Model will be run") parser.add_argument( "-p", "--port", default=config.get('port', "8080"), help="port for AI Model server to listen on"