-
Notifications
You must be signed in to change notification settings - Fork 117
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
* plt update, test=model * plt update, test=model * plt update, test=model * plt update testing reporter, test=model * plt update testing reporter, test=model * plt update testing reporter, test=model * plt update test reporter, test=model * plt update test reporter, test=model * plt update test reporter, test=model * plt update test reporter, test=model * plt update test reporter, test=model * plt update test reporter, test=model * plt add pr_info * plt fix ocr case, test=model * plt fix ocr case, test=model * plt add nlp case, test=model * plt add nlp case, test=model * plt add nlp case, test=model * plt add nlp case, test=model * plt add nlp case, test=model * plt add more nlp case, test=model * plt add more nlp case, test=model * plt add nlp, test=model * plt fix case, test=model * fix plt builder data, test=model * fix plt builder data, test=model * fix plt builder data, test=model * plt add nlp case, test=model * plt add nlp case, test=model * plt add nlp case, test=model * plt add nlp case, test=model * plt add nlp case, test=model * plt add nlp case, test=model * plt add nlp case, test=model * plt add nlp case, test=model * plt add nlp case, test=model
- Loading branch information
Showing
248 changed files
with
6,001 additions
and
12 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
15 changes: 15 additions & 0 deletions
15
framework/e2e/PaddleLT_new/layerNLPcase/debug/case_bug/transformers/blenderbot/__init__.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,15 @@ | ||
import os | ||
import glob | ||
|
||
# 获取当前文件所在目录 | ||
current_dir = os.path.dirname(__file__) | ||
|
||
# 获取当前目录下所有的 .py 文件路径 | ||
py_files = glob.glob(os.path.join(current_dir, "*.py")) | ||
|
||
# 动态导入所有 .py 文件 | ||
for py_file in py_files: | ||
# 获取文件名(不含扩展名) | ||
module_name = os.path.basename(py_file)[:-3] | ||
# 导入模块 | ||
__import__("layerNLPcase.debug.case_bug.transformers.blenderbot." + module_name, globals(), locals(), []) |
35 changes: 35 additions & 0 deletions
35
...new/layerNLPcase/debug/case_bug/transformers/blenderbot/blenderbot_model_blenderbot_3B.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,35 @@ | ||
import paddle | ||
import numpy as np | ||
from paddlenlp.transformers import BlenderbotModel, BlenderbotTokenizer | ||
|
||
|
||
def LayerCase(): | ||
"""模型库中间态""" | ||
model = BlenderbotModel.from_pretrained("blenderbot-3B") | ||
return model | ||
|
||
|
||
def create_inputspec(): | ||
inputspec = ( | ||
paddle.static.InputSpec(shape=(-1, 15), dtype=paddle.float32, stop_gradient=False), | ||
paddle.static.InputSpec(shape=(-1, 15), dtype=paddle.float32, stop_gradient=False), | ||
) | ||
return inputspec | ||
|
||
|
||
def create_tensor_inputs(): | ||
tokenizer = BlenderbotTokenizer.from_pretrained("blenderbot-3B") | ||
inputs_dict = tokenizer( | ||
"My friends are cool but they eat too many carbs.", return_attention_mask=True, return_token_type_ids=False | ||
) | ||
inputs = tuple(paddle.to_tensor([v], stop_gradient=False) for (k, v) in inputs_dict.items()) | ||
return inputs | ||
|
||
|
||
def create_numpy_inputs(): | ||
tokenizer = BlenderbotTokenizer.from_pretrained("blenderbot-3B") | ||
inputs_dict = tokenizer( | ||
"My friends are cool but they eat too many carbs.", return_attention_mask=True, return_token_type_ids=False | ||
) | ||
inputs = tuple(np.array([v]) for (k, v) in inputs_dict.items()) | ||
return inputs |
15 changes: 15 additions & 0 deletions
15
framework/e2e/PaddleLT_new/layerNLPcase/debug/case_bug/transformers/ernie/__init__.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,15 @@ | ||
import os | ||
import glob | ||
|
||
# 获取当前文件所在目录 | ||
current_dir = os.path.dirname(__file__) | ||
|
||
# 获取当前目录下所有的 .py 文件路径 | ||
py_files = glob.glob(os.path.join(current_dir, "*.py")) | ||
|
||
# 动态导入所有 .py 文件 | ||
for py_file in py_files: | ||
# 获取文件名(不含扩展名) | ||
module_name = os.path.basename(py_file)[:-3] | ||
# 导入模块 | ||
__import__("layerNLPcase.debug.case_bug.transformers.ernie." + module_name, globals(), locals(), []) |
29 changes: 29 additions & 0 deletions
29
...w/layerNLPcase/debug/case_bug/transformers/ernie/ernie_model_ernie_3_0_tiny_mini_v2_en.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,29 @@ | ||
import paddle | ||
import numpy as np | ||
from paddlenlp.transformers import ErnieModel, ErnieTokenizer | ||
|
||
def LayerCase(): | ||
"""模型库中间态""" | ||
model = ErnieModel.from_pretrained('ernie-3.0-tiny-mini-v2-en') | ||
return model | ||
|
||
def create_inputspec(): | ||
inputspec = ( | ||
paddle.static.InputSpec(shape=(-1, 15), dtype=paddle.float32, stop_gradient=False), | ||
paddle.static.InputSpec(shape=(-1, 15), dtype=paddle.float32, stop_gradient=False), | ||
) | ||
return inputspec | ||
|
||
|
||
def create_tensor_inputs(): | ||
tokenizer = ErnieTokenizer.from_pretrained('ernie-3.0-tiny-mini-v2-en') | ||
inputs_dict = tokenizer("Welcome to use PaddlePaddle and PaddleNLP!") | ||
inputs = tuple(paddle.to_tensor([v], stop_gradient=False) for (k, v) in inputs_dict.items()) | ||
return inputs | ||
|
||
|
||
def create_numpy_inputs(): | ||
tokenizer = ErnieTokenizer.from_pretrained('ernie-3.0-tiny-mini-v2-en') | ||
inputs_dict = tokenizer("Welcome to use PaddlePaddle and PaddleNLP!") | ||
inputs = tuple(np.array([v]) for (k, v) in inputs_dict.items()) | ||
return inputs |
29 changes: 29 additions & 0 deletions
29
...rNLPcase/debug/case_bug/transformers/ernie/ernie_model_rocketqa_v1_marco_cross_encoder.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,29 @@ | ||
import paddle | ||
import numpy as np | ||
from paddlenlp.transformers import ErnieModel, ErnieTokenizer | ||
|
||
def LayerCase(): | ||
"""模型库中间态""" | ||
model = ErnieModel.from_pretrained('rocketqa-v1-marco-cross-encoder') | ||
return model | ||
|
||
def create_inputspec(): | ||
inputspec = ( | ||
paddle.static.InputSpec(shape=(-1, 15), dtype=paddle.float32, stop_gradient=False), | ||
paddle.static.InputSpec(shape=(-1, 15), dtype=paddle.float32, stop_gradient=False), | ||
) | ||
return inputspec | ||
|
||
|
||
def create_tensor_inputs(): | ||
tokenizer = ErnieTokenizer.from_pretrained('rocketqa-v1-marco-cross-encoder') | ||
inputs_dict = tokenizer("Welcome to use PaddlePaddle and PaddleNLP!") | ||
inputs = tuple(paddle.to_tensor([v], stop_gradient=False) for (k, v) in inputs_dict.items()) | ||
return inputs | ||
|
||
|
||
def create_numpy_inputs(): | ||
tokenizer = ErnieTokenizer.from_pretrained('rocketqa-v1-marco-cross-encoder') | ||
inputs_dict = tokenizer("Welcome to use PaddlePaddle and PaddleNLP!") | ||
inputs = tuple(np.array([v]) for (k, v) in inputs_dict.items()) | ||
return inputs |
29 changes: 29 additions & 0 deletions
29
...erNLPcase/debug/case_bug/transformers/ernie/ernie_model_rocketqa_v1_marco_para_encoder.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,29 @@ | ||
import paddle | ||
import numpy as np | ||
from paddlenlp.transformers import ErnieModel, ErnieTokenizer | ||
|
||
def LayerCase(): | ||
"""模型库中间态""" | ||
model = ErnieModel.from_pretrained('rocketqa-v1-marco-para-encoder') | ||
return model | ||
|
||
def create_inputspec(): | ||
inputspec = ( | ||
paddle.static.InputSpec(shape=(-1, 15), dtype=paddle.float32, stop_gradient=False), | ||
paddle.static.InputSpec(shape=(-1, 15), dtype=paddle.float32, stop_gradient=False), | ||
) | ||
return inputspec | ||
|
||
|
||
def create_tensor_inputs(): | ||
tokenizer = ErnieTokenizer.from_pretrained('rocketqa-v1-marco-para-encoder') | ||
inputs_dict = tokenizer("Welcome to use PaddlePaddle and PaddleNLP!") | ||
inputs = tuple(paddle.to_tensor([v], stop_gradient=False) for (k, v) in inputs_dict.items()) | ||
return inputs | ||
|
||
|
||
def create_numpy_inputs(): | ||
tokenizer = ErnieTokenizer.from_pretrained('rocketqa-v1-marco-para-encoder') | ||
inputs_dict = tokenizer("Welcome to use PaddlePaddle and PaddleNLP!") | ||
inputs = tuple(np.array([v]) for (k, v) in inputs_dict.items()) | ||
return inputs |
29 changes: 29 additions & 0 deletions
29
...rNLPcase/debug/case_bug/transformers/ernie/ernie_model_rocketqa_v1_marco_query_encoder.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,29 @@ | ||
import paddle | ||
import numpy as np | ||
from paddlenlp.transformers import ErnieModel, ErnieTokenizer | ||
|
||
def LayerCase(): | ||
"""模型库中间态""" | ||
model = ErnieModel.from_pretrained('rocketqa-v1-marco-query-encoder') | ||
return model | ||
|
||
def create_inputspec(): | ||
inputspec = ( | ||
paddle.static.InputSpec(shape=(-1, 15), dtype=paddle.float32, stop_gradient=False), | ||
paddle.static.InputSpec(shape=(-1, 15), dtype=paddle.float32, stop_gradient=False), | ||
) | ||
return inputspec | ||
|
||
|
||
def create_tensor_inputs(): | ||
tokenizer = ErnieTokenizer.from_pretrained('rocketqa-v1-marco-query-encoder') | ||
inputs_dict = tokenizer("Welcome to use PaddlePaddle and PaddleNLP!") | ||
inputs = tuple(paddle.to_tensor([v], stop_gradient=False) for (k, v) in inputs_dict.items()) | ||
return inputs | ||
|
||
|
||
def create_numpy_inputs(): | ||
tokenizer = ErnieTokenizer.from_pretrained('rocketqa-v1-marco-query-encoder') | ||
inputs_dict = tokenizer("Welcome to use PaddlePaddle and PaddleNLP!") | ||
inputs = tuple(np.array([v]) for (k, v) in inputs_dict.items()) | ||
return inputs |
15 changes: 15 additions & 0 deletions
15
framework/e2e/PaddleLT_new/layerNLPcase/debug/case_bug/transformers/fnet/__init__.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,15 @@ | ||
import os | ||
import glob | ||
|
||
# 获取当前文件所在目录 | ||
current_dir = os.path.dirname(__file__) | ||
|
||
# 获取当前目录下所有的 .py 文件路径 | ||
py_files = glob.glob(os.path.join(current_dir, "*.py")) | ||
|
||
# 动态导入所有 .py 文件 | ||
for py_file in py_files: | ||
# 获取文件名(不含扩展名) | ||
module_name = os.path.basename(py_file)[:-3] | ||
# 导入模块 | ||
__import__("layerNLPcase.debug.case_bug.transformers.fnet." + module_name, globals(), locals(), []) |
30 changes: 30 additions & 0 deletions
30
...k/e2e/PaddleLT_new/layerNLPcase/debug/case_bug/transformers/fnet/fnet_model_fnet_large.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,30 @@ | ||
import paddle | ||
import numpy as np | ||
from paddlenlp.transformers.fnet.modeling import FNetModel | ||
from paddlenlp.transformers.fnet.tokenizer import FNetTokenizer | ||
|
||
def LayerCase(): | ||
"""模型库中间态""" | ||
model = FNetModel.from_pretrained('fnet-large') | ||
return model | ||
|
||
def create_inputspec(): | ||
inputspec = ( | ||
paddle.static.InputSpec(shape=(-1, 15), dtype=paddle.float32, stop_gradient=False), | ||
paddle.static.InputSpec(shape=(-1, 15), dtype=paddle.float32, stop_gradient=False), | ||
) | ||
return inputspec | ||
|
||
|
||
def create_tensor_inputs(): | ||
tokenizer = FNetTokenizer.from_pretrained('fnet-large') | ||
inputs_dict = tokenizer("Welcome to use PaddlePaddle and PaddleNLP!") | ||
inputs = tuple(paddle.to_tensor([v], stop_gradient=False) for (k, v) in inputs_dict.items()) | ||
return inputs | ||
|
||
|
||
def create_numpy_inputs(): | ||
tokenizer = FNetTokenizer.from_pretrained('fnet-large') | ||
inputs_dict = tokenizer("Welcome to use PaddlePaddle and PaddleNLP!") | ||
inputs = tuple(np.array([v]) for (k, v) in inputs_dict.items()) | ||
return inputs |
15 changes: 15 additions & 0 deletions
15
framework/e2e/PaddleLT_new/layerNLPcase/debug/case_bug/transformers/gpt/__init__.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,15 @@ | ||
import os | ||
import glob | ||
|
||
# 获取当前文件所在目录 | ||
current_dir = os.path.dirname(__file__) | ||
|
||
# 获取当前目录下所有的 .py 文件路径 | ||
py_files = glob.glob(os.path.join(current_dir, "*.py")) | ||
|
||
# 动态导入所有 .py 文件 | ||
for py_file in py_files: | ||
# 获取文件名(不含扩展名) | ||
module_name = os.path.basename(py_file)[:-3] | ||
# 导入模块 | ||
__import__("layerNLPcase.debug.case_bug.transformers.gpt." + module_name, globals(), locals(), []) |
29 changes: 29 additions & 0 deletions
29
framework/e2e/PaddleLT_new/layerNLPcase/debug/case_bug/transformers/gpt/gpt_model_gpt2_en.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,29 @@ | ||
import paddle | ||
import numpy as np | ||
from paddlenlp.transformers import GPTModel, GPTTokenizer | ||
|
||
def LayerCase(): | ||
"""模型库中间态""" | ||
model = GPTModel.from_pretrained('gpt2-en') | ||
return model | ||
|
||
def create_inputspec(): | ||
inputspec = ( | ||
paddle.static.InputSpec(shape=(-1, 13), dtype=paddle.float32, stop_gradient=False), | ||
) | ||
return inputspec | ||
|
||
|
||
def create_tensor_inputs(): | ||
tokenizer = GPTTokenizer.from_pretrained('gpt2-en') | ||
inputs_dict = tokenizer("Welcome to use PaddlePaddle and PaddleNLP!", return_token_type_ids=False) | ||
inputs = tuple(paddle.to_tensor([v], stop_gradient=False) for (k, v) in inputs_dict.items()) | ||
return inputs | ||
|
||
|
||
def create_numpy_inputs(): | ||
tokenizer = GPTTokenizer.from_pretrained('gpt2-en') | ||
inputs_dict = tokenizer("Welcome to use PaddlePaddle and PaddleNLP!", return_token_type_ids=False) | ||
inputs = tuple(np.array([v]) for (k, v) in inputs_dict.items()) | ||
print("inputs.shape",inputs[0].shape) | ||
return inputs |
Oops, something went wrong.