From c4177d22180da7c50912236d1ab28ec6b3f39cc6 Mon Sep 17 00:00:00 2001 From: Marco Massenzio Date: Sat, 19 Aug 2023 15:57:40 -0700 Subject: [PATCH] Updated README Getting Started instructions Provides guidance to avoid error when downloading pre-trained model --- .gitignore | 3 ++- README.md | 16 ++++++++++++++-- 2 files changed, 16 insertions(+), 3 deletions(-) diff --git a/.gitignore b/.gitignore index 497d87e..71f5188 100644 --- a/.gitignore +++ b/.gitignore @@ -160,4 +160,5 @@ cython_debug/ #.idea/ data/ -wandb/ \ No newline at end of file +wandb/ +.idea/ diff --git a/README.md b/README.md index b800f42..b537159 100644 --- a/README.md +++ b/README.md @@ -37,6 +37,18 @@ First, we have to install all the libraries listed in `requirements.txt` ```bash pip install -r requirements.txt ``` + +If you see this error: + +> OSError: bigcode/starcoder is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' +If this is a private repository, make sure to pass a token having permission to this repo with `use_auth_token` or log in with `huggingface-cli login` and pass `use_auth_token=True`. + +it means that you need to authenticate to Hugging Face API to download the model: sign up for an account, and accept the [T&C to use BigCode](https://huggingface.co/bigcode/starcoder); then [obtain an API Token](https://huggingface.co/settings/tokens) from HF and use it to authenticate to the CLI: + +```shell +huggingface-cli login +``` + ## Code generation The code generation pipeline is as follows @@ -46,7 +58,7 @@ from transformers import AutoModelForCausalLM, AutoTokenizer checkpoint = "bigcode/starcoder" device = "cuda" # for GPU usage or "cpu" for CPU usage -tokenizer = AutoTokenizer.from_pretrained(checkpoint) +tokenizer = AutoTokenizer.from_pretrained(checkpoint, use_auth_token=True) # to save memory consider using fp16 or bf16 by specifying torch_dtype=torch.float16 for example model = AutoModelForCausalLM.from_pretrained(checkpoint).to(device) @@ -60,7 +72,7 @@ or from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline checkpoint = "bigcode/starcoder" -model = AutoModelForCausalLM.from_pretrained(checkpoint) +model = AutoModelForCausalLM.from_pretrained(checkpoint, use_auth_token=True) tokenizer = AutoTokenizer.from_pretrained(checkpoint) pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, device=0)