From 7ffce8d7ba0ce8493f262bbbee3feefee963e459 Mon Sep 17 00:00:00 2001 From: Alonso Guevara Date: Thu, 29 Aug 2024 16:56:34 -0600 Subject: [PATCH] Fix img for autotune (#1060) * Fix img for autotune * Add line breaks to tune docs * More line breaks --- .../next-release/patch-20240829223855375571.json | 4 ++++ docsite/posts/prompt_tuning/auto_prompt_tuning.md | 10 +++++++--- 2 files changed, 11 insertions(+), 3 deletions(-) create mode 100644 .semversioner/next-release/patch-20240829223855375571.json diff --git a/.semversioner/next-release/patch-20240829223855375571.json b/.semversioner/next-release/patch-20240829223855375571.json new file mode 100644 index 0000000000..941b36f8ec --- /dev/null +++ b/.semversioner/next-release/patch-20240829223855375571.json @@ -0,0 +1,4 @@ +{ + "type": "patch", + "description": "Fix img for auto tune" +} diff --git a/docsite/posts/prompt_tuning/auto_prompt_tuning.md b/docsite/posts/prompt_tuning/auto_prompt_tuning.md index 522ced51ef..c00d2a8900 100644 --- a/docsite/posts/prompt_tuning/auto_prompt_tuning.md +++ b/docsite/posts/prompt_tuning/auto_prompt_tuning.md @@ -11,7 +11,7 @@ GraphRAG provides the ability to create domain adapted prompts for the generatio These are generated by loading the inputs, splitting them into chunks (text units) and then running a series of LLM invocations and template substitutions to generate the final prompts. We suggest using the default values provided by the script, but in this page you'll find the detail of each in case you want to further explore and tweak the prompt tuning algorithm.

-Figure 1: Auto Tuning Conceptual Diagram. +Figure 1: Auto Tuning Conceptual Diagram.

Figure 1: Auto Tuning Conceptual Diagram. @@ -26,7 +26,9 @@ Before running auto tuning make sure you have already initialized your workspace You can run the main script from the command line with various options: ```bash -python -m graphrag.prompt_tune [--root ROOT] [--domain DOMAIN] [--method METHOD] [--limit LIMIT] [--language LANGUAGE] [--max-tokens MAX_TOKENS] [--chunk-size CHUNK_SIZE] [--n-subset-max N_SUBSET_MAX] [--k K] [--min-examples-required MIN_EXAMPLES_REQUIRED] [--no-entity-types] [--output OUTPUT] +python -m graphrag.prompt_tune [--root ROOT] [--domain DOMAIN] [--method METHOD] [--limit LIMIT] [--language LANGUAGE] \ +[--max-tokens MAX_TOKENS] [--chunk-size CHUNK_SIZE] [--n-subset-max N_SUBSET_MAX] [--k K] \ +[--min-examples-required MIN_EXAMPLES_REQUIRED] [--no-entity-types] [--output OUTPUT] ``` ## Command-Line Options @@ -60,7 +62,9 @@ python -m graphrag.prompt_tune [--root ROOT] [--domain DOMAIN] [--method METHOD ## Example Usage ```bash -python -m graphrag.prompt_tune --root /path/to/project --config /path/to/settings.yaml --domain "environmental news" --method random --limit 10 --language English --max-tokens 2048 --chunk-size 256 --min-examples-required 3 --no-entity-types --output /path/to/output +python -m graphrag.prompt_tune --root /path/to/project --config /path/to/settings.yaml --domain "environmental news" \ +--method random --limit 10 --language English --max-tokens 2048 --chunk-size 256 --min-examples-required 3 \ +--no-entity-types --output /path/to/output ``` or, with minimal configuration (suggested):