-
Notifications
You must be signed in to change notification settings - Fork 3
/
rugpt_xl.py
59 lines (45 loc) · 1.69 KB
/
rugpt_xl.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
import torch
import time
import os
from transformers import GPT2Tokenizer, __version__
if not os.path.exists("ru_gpts"):
raise Exception("Folder `ru_gpts` not found! Please run `git clone https://github.com/EvilFreelancer/ru-gpts.git ru_gpts` first")
from ru_gpts.src.xl_wrapper import RuGPT3XL
name = 'ai-forever/rugpt3xl'
# Checking the version of transformers
print("Transformers version:", __version__)
# Start model load time
start_time = time.time()
# Loading the model and tokenizer
model = RuGPT3XL.from_pretrained(name, seq_len=1024)
tokenizer = GPT2Tokenizer.from_pretrained(name)
# Model load time
model_load_time = time.time() - start_time
# Sample texts
system_prompt = "### System:\nYou are an AI that follows instructions extremely well. Help as much as you can. Remember, be safe, and don't do anything illegal.\n\n"
message = "Write me a poem please"
prompt = f"{system_prompt}### User: {message}\n\n### Assistant: "
# Start generation time
generation_start_time = time.time()
with torch.autocast('cuda'):
output = model.generate(
prompt,
max_length=1024,
do_sample=True,
top_k=20,
top_p=0.95,
repetition_penalty=1.1,
early_stopping=False,
num_beams=1,
num_beam_groups=1,
num_return_sequences=1,
temperature=1.0,
)
print(output)
generation_time = time.time() - generation_start_time # Generation time
tokens_per_second = len(tokenizer.encode(output[0])) / generation_time # Tokens per second
# Print results
print(f"Model loading time: {model_load_time:.2f} seconds")
print(f"Generation time: {generation_time:.2f} seconds")
print(f"Tokens per second: {tokens_per_second:.1f}")
# time.sleep(10)