diff --git a/models/PaddleNLP/CI/daily_case.sh b/models/PaddleNLP/CI/daily_case.sh index 0aa3ffc7f1..d86deae8a0 100644 --- a/models/PaddleNLP/CI/daily_case.sh +++ b/models/PaddleNLP/CI/daily_case.sh @@ -979,8 +979,6 @@ python -m paddle.distributed.launch run_cmrc2018.py \ --max_steps 1 \ --output_dir ./tmp >${log_path}/clue-mrc >>${log_path}/clue-mrc 2>&1 print_info $? clue-mrc -export http_proxy=${http_proxy}; -export https_proxy=${http_proxy} } taskflow (){ diff --git a/models_restruct/PaddleNLP/cases/applications^information_extraction^document.yaml b/models_restruct/PaddleNLP/cases/applications^information_extraction^document.yaml index a8604598b7..89301ec1c4 100644 --- a/models_restruct/PaddleNLP/cases/applications^information_extraction^document.yaml +++ b/models_restruct/PaddleNLP/cases/applications^information_extraction^document.yaml @@ -3,11 +3,11 @@ case: train: - name: prepare - path: applications/information_extraction/document + path: legacy/applications/information_extraction/document cmd: wget https://paddlenlp.bj.bcebos.com/datasets/tax.tar.gz && tar -zxvf tax.tar.gz && mv tax data && rm -f tax.tar.gz - name: label_studio - path: applications/information_extraction/document + path: legacy/applications/information_extraction/document cmd: python ../label_studio.py params: - --label_studio_file ./data/label_studio.json @@ -16,7 +16,7 @@ case: - --task_type ext - name: fintune - path: applications/information_extraction/document + path: legacy/applications/information_extraction/document cmd: python -u -m paddle.distributed.launch finetune.py params: - --device gpu @@ -49,7 +49,7 @@ case: eval: - name: evaluate - path: applications/information_extraction/document + path: legacy/applications/information_extraction/document cmd: python evaluate.py params: - --device 'gpu' @@ -73,11 +73,11 @@ case: train: - name: prepare - path: applications/information_extraction/document + path: legacy/applications/information_extraction/document cmd: wget https://paddlenlp.bj.bcebos.com/datasets/tax.tar.gz && tar -zxvf tax.tar.gz && ren tax data - name: label_studio - path: applications/information_extraction/document + path: legacy/applications/information_extraction/document cmd: python ../label_studio.py params: - --label_studio_file ./data/label_studio.json @@ -86,7 +86,7 @@ case: - --task_type ext - name: fintune - path: applications/information_extraction/document + path: legacy/applications/information_extraction/document cmd: python -u -m paddle.distributed.launch finetune.py params: - --device gpu @@ -119,7 +119,7 @@ case: eval: - name: evaluate - path: applications/information_extraction/document + path: legacy/applications/information_extraction/document cmd: python evaluate.py params: - --device 'gpu' diff --git a/models_restruct/PaddleNLP/cases/applications^information_extraction^text.yaml b/models_restruct/PaddleNLP/cases/applications^information_extraction^text.yaml index 883363330c..0a083e7ff4 100644 --- a/models_restruct/PaddleNLP/cases/applications^information_extraction^text.yaml +++ b/models_restruct/PaddleNLP/cases/applications^information_extraction^text.yaml @@ -3,11 +3,11 @@ case: train: - name: prepare - path: applications/information_extraction/text + path: legacy/applications/information_extraction/text cmd: wget https://bj.bcebos.com/paddlenlp/datasets/military.tar.gz && tar -xvf military.tar.gz && mv military data - name: label_studio - path: applications/information_extraction/text + path: legacy/applications/information_extraction/text cmd: python ../label_studio.py params: - --label_studio_file ./data/label_studio.json @@ -17,7 +17,7 @@ case: - --task_type ext - name: fintune - path: applications/information_extraction/text + path: legacy/applications/information_extraction/text cmd: python -u -m paddle.distributed.launch finetune.py params: - --device gpu @@ -51,7 +51,7 @@ case: evaluation: "=" - name: data_distill - path: applications/information_extraction/text/data_distill + path: legacy/applications/information_extraction/text/data_distill cmd: python data_distill.py params: - --data_path ../data @@ -66,7 +66,7 @@ case: evaluation: "=" - name: distill_train_student - path: applications/information_extraction/text/data_distill + path: legacy/applications/information_extraction/text/data_distill cmd: python train.py params: - --task_type relation_extraction @@ -80,7 +80,7 @@ case: eval: - name: evaluate - path: applications/information_extraction/text + path: legacy/applications/information_extraction/text cmd: python evaluate.py params: - --model_path ./checkpoint/model_best @@ -94,7 +94,7 @@ case: evaluation: "=" - name: distill_evaluate_teacher - path: applications/information_extraction/text/data_distill + path: legacy/applications/information_extraction/text/data_distill cmd: python evaluate_teacher.py params: - --task_type relation_extraction @@ -103,7 +103,7 @@ case: - --model_path ../checkpoint/model_best - name: distill_evaluate_student - path: applications/information_extraction/text/data_distill + path: legacy/applications/information_extraction/text/data_distill cmd: python evaluate.py params: - --model_path ./checkpoint/model_39 @@ -119,11 +119,11 @@ case: train: - name: prepare - path: applications/information_extraction/text + path: legacy/applications/information_extraction/text cmd: wget https://bj.bcebos.com/paddlenlp/datasets/military.tar.gz && tar -xvf military.tar.gz && ren military data - name: label_studio - path: applications/information_extraction/text + path: legacy/applications/information_extraction/text cmd: python ../label_studio.py params: - --label_studio_file ./data/label_studio.json @@ -133,7 +133,7 @@ case: - --task_type ext - name: fintune - path: applications/information_extraction/text + path: legacy/applications/information_extraction/text cmd: python -u -m paddle.distributed.launch finetune.py params: - --device gpu @@ -167,7 +167,7 @@ case: evaluation: "=" - name: data_distill - path: applications/information_extraction/text/data_distill + path: legacy/applications/information_extraction/text/data_distill cmd: python data_distill.py params: - --data_path ../data @@ -182,7 +182,7 @@ case: evaluation: "=" - name: distill_train_student - path: applications/information_extraction/text/data_distill + path: legacy/applications/information_extraction/text/data_distill cmd: python train.py params: - --task_type relation_extraction @@ -196,7 +196,7 @@ case: eval: - name: evaluate - path: applications/information_extraction/text + path: legacy/applications/information_extraction/text cmd: python evaluate.py params: - --model_path ./checkpoint/model_best @@ -210,7 +210,7 @@ case: evaluation: "=" - name: distill_evaluate_teacher - path: applications/information_extraction/text/data_distill + path: legacy/applications/information_extraction/text/data_distill cmd: python evaluate_teacher.py params: - --task_type relation_extraction @@ -219,7 +219,7 @@ case: - --model_path ../checkpoint/model_best - name: distill_evaluate_student - path: applications/information_extraction/text/data_distill + path: legacy/applications/information_extraction/text/data_distill cmd: python evaluate.py params: - --model_path ./checkpoint/model_39 diff --git a/models_restruct/PaddleNLP/cases/applications^question_answering^unsupervised_qa.yaml b/models_restruct/PaddleNLP/cases/applications^question_answering^unsupervised_qa.yaml index af844a7320..4c376105df 100644 --- a/models_restruct/PaddleNLP/cases/applications^question_answering^unsupervised_qa.yaml +++ b/models_restruct/PaddleNLP/cases/applications^question_answering^unsupervised_qa.yaml @@ -3,11 +3,11 @@ case: train: - name: prepare - path: applications/question_answering/unsupervised_qa - cmd: mkdir data && cd data && wget https://paddlenlp.bj.bcebos.com/applications/unsupervised_qa/source_file.txt && wget https://paddlenlp.bj.bcebos.com/applications/unsupervised_qa/train.json && wget https://paddlenlp.bj.bcebos.com/applications/unsupervised_qa/dev.json + path: legacy/applications/question_answering/unsupervised_qa + cmd: mkdir data && cd data && wget https://paddlenlp.bj.bcebos.com/legacy/applications/unsupervised_qa/source_file.txt && wget https://paddlenlp.bj.bcebos.com/legacy/applications/unsupervised_qa/train.json && wget https://paddlenlp.bj.bcebos.com/legacy/applications/unsupervised_qa/dev.json - name: run_qa_pairs_generation - path: applications/question_answering/unsupervised_qa + path: legacy/applications/question_answering/unsupervised_qa cmd: python -u run_qa_pairs_generation.py params: - --source_file_path data/source_file.txt @@ -29,7 +29,7 @@ case: - --do_debug - name: run_data_preprocess_train - path: applications/question_answering/unsupervised_qa + path: legacy/applications/question_answering/unsupervised_qa cmd: python -u run_data_preprocess.py params: - --source_file_path data/train.json @@ -37,7 +37,7 @@ case: - --do_answer_prompt - name: run_data_preprocess_dev - path: applications/question_answering/unsupervised_qa + path: legacy/applications/question_answering/unsupervised_qa cmd: python -u run_data_preprocess.py params: - --source_file_path data/dev.json @@ -45,7 +45,7 @@ case: - --do_answer_prompt - name: answer_extraction_and_roundtrip_filtration_finetune - path: applications/question_answering/unsupervised_qa + path: legacy/applications/question_answering/unsupervised_qa cmd: python -u -m paddle.distributed.launch --log_dir log/answer_extraction finetune/answer_extraction_and_roundtrip_filtration/finetune.py params: - --train_path data/finetune/answer_extraction/train.json @@ -62,7 +62,7 @@ case: - --device gpu - name: question_generation_finetune - path: applications/question_answering/unsupervised_qa + path: legacy/applications/question_answering/unsupervised_qa cmd: python -u -m paddle.distributed.launch --log_dir log/question_generation finetune/question_generation/train.py params: - --train_file=data/finetune/question_generation/train.json @@ -89,7 +89,7 @@ case: - --device=gpu - name: filtration_finetune - path: applications/question_answering/unsupervised_qa + path: legacy/applications/question_answering/unsupervised_qa cmd: python -u -m paddle.distributed.launch --log_dir log/filtration finetune/answer_extraction_and_roundtrip_filtration/finetune.py params: - --train_path=data/finetune/filtration/train.json @@ -107,7 +107,7 @@ case: eval: - name: answer_extraction - path: applications/question_answering/unsupervised_qa + path: legacy/applications/question_answering/unsupervised_qa cmd: python finetune/answer_extraction_and_roundtrip_filtration/evaluate.py params: - --model_path=log/answer_extraction/checkpoints/model_best @@ -117,7 +117,7 @@ case: - --limit=0.01 - name: filtration - path: applications/question_answering/unsupervised_qa + path: legacy/applications/question_answering/unsupervised_qa cmd: python finetune/answer_extraction_and_roundtrip_filtration/evaluate.py params: - --model_path=log/filtration/checkpoints/model_best @@ -133,11 +133,11 @@ case: train: - name: prepare - path: applications/question_answering/unsupervised_qa - cmd: mkdir data && cd data && wget https://paddlenlp.bj.bcebos.com/applications/unsupervised_qa/source_file.txt && wget https://paddlenlp.bj.bcebos.com/applications/unsupervised_qa/train.json && wget https://paddlenlp.bj.bcebos.com/applications/unsupervised_qa/dev.json + path: legacy/applications/question_answering/unsupervised_qa + cmd: mkdir data && cd data && wget https://paddlenlp.bj.bcebos.com/legacy/applications/unsupervised_qa/source_file.txt && wget https://paddlenlp.bj.bcebos.com/legacy/applications/unsupervised_qa/train.json && wget https://paddlenlp.bj.bcebos.com/legacy/applications/unsupervised_qa/dev.json - name: run_qa_pairs_generation - path: applications/question_answering/unsupervised_qa + path: legacy/applications/question_answering/unsupervised_qa cmd: python -u run_qa_pairs_generation.py params: - --source_file_path data/source_file.txt @@ -159,7 +159,7 @@ case: - --do_debug - name: run_data_preprocess_train - path: applications/question_answering/unsupervised_qa + path: legacy/applications/question_answering/unsupervised_qa cmd: python -u run_data_preprocess.py params: - --source_file_path data/train.json @@ -167,7 +167,7 @@ case: - --do_answer_prompt - name: run_data_preprocess_dev - path: applications/question_answering/unsupervised_qa + path: legacy/applications/question_answering/unsupervised_qa cmd: python -u run_data_preprocess.py params: - --source_file_path data/dev.json @@ -175,7 +175,7 @@ case: - --do_answer_prompt - name: answer_extraction_and_roundtrip_filtration_finetune - path: applications/question_answering/unsupervised_qa + path: legacy/applications/question_answering/unsupervised_qa cmd: python -u -m paddle.distributed.launch --log_dir log/answer_extraction finetune/answer_extraction_and_roundtrip_filtration/finetune.py params: - --train_path data/finetune/answer_extraction/train.json @@ -192,7 +192,7 @@ case: - --device gpu - name: question_generation_finetune - path: applications/question_answering/unsupervised_qa + path: legacy/applications/question_answering/unsupervised_qa cmd: python -u -m paddle.distributed.launch --log_dir log/question_generation finetune/question_generation/train.py params: - --train_file=data/finetune/question_generation/train.json @@ -219,7 +219,7 @@ case: - --device=gpu - name: filtration_finetune - path: applications/question_answering/unsupervised_qa + path: legacy/applications/question_answering/unsupervised_qa cmd: python -u -m paddle.distributed.launch --log_dir log/filtration finetune/answer_extraction_and_roundtrip_filtration/finetune.py params: - --train_path=data/finetune/filtration/train.json @@ -237,7 +237,7 @@ case: eval: - name: answer_extraction_evaluate - path: applications/question_answering/unsupervised_qa + path: legacy/applications/question_answering/unsupervised_qa cmd: python finetune/answer_extraction_and_roundtrip_filtration/evaluate.py params: - --model_path=log/answer_extraction/checkpoints/model_best @@ -247,7 +247,7 @@ case: - --limit=0.01 - name: evaluate_filtration - path: applications/question_answering/unsupervised_qa + path: legacy/applications/question_answering/unsupervised_qa cmd: python finetune/answer_extraction_and_roundtrip_filtration/evaluate.py params: - --model_path=log/filtration/checkpoints/model_best diff --git a/models_restruct/PaddleNLP/cases/applications^sentiment_analysis^ASO_analysis.yaml b/models_restruct/PaddleNLP/cases/applications^sentiment_analysis^ASO_analysis.yaml index b394ee9c6c..7a3b6ad4c5 100644 --- a/models_restruct/PaddleNLP/cases/applications^sentiment_analysis^ASO_analysis.yaml +++ b/models_restruct/PaddleNLP/cases/applications^sentiment_analysis^ASO_analysis.yaml @@ -3,20 +3,20 @@ case: train: - name: data_prepare - path: applications/sentiment_analysis/ASO_analysis/ + path: legacy/applications/sentiment_analysis/ASO_analysis/ cmd: mkdir data && cd data && wget https://bj.bcebos.com/v1/paddlenlp/data/ext_data.tar.gz && tar -xzvf ext_data.tar.gz && wget https://bj.bcebos.com/v1/paddlenlp/data/cls_data.tar.gz && tar -xzvf cls_data.tar.gz && wget https://paddle-qa.bj.bcebos.com/paddlenlp/aos_tes.txt - name: checkpoints_prepare_ext - path: applications/sentiment_analysis/ASO_analysis/ + path: legacy/applications/sentiment_analysis/ASO_analysis/ cmd: mkdir ./checkpoints/ && cd checkpoints && mkdir ext_checkpoints && cd ext_checkpoints && wget https://bj.bcebos.com/paddlenlp/models/best_ext.pdparams && mv best_ext.pdparams best.pdparams - name: checkpoints_prepare_cls - path: applications/sentiment_analysis/ASO_analysis/checkpoints + path: legacy/applications/sentiment_analysis/ASO_analysis/checkpoints cmd: mkdir cls_checkpoints && cd cls_checkpoints && wget https://bj.bcebos.com/paddlenlp/models/best_cls.pdparams && mv best_cls.pdparams best.pdparams eval: - name: eval - path: applications/sentiment_analysis/ASO_analysis/ + path: legacy/applications/sentiment_analysis/ASO_analysis/ cmd: python predict.py params: - --ext_model_path "./checkpoints/ext_checkpoints/best.pdparams" @@ -32,11 +32,11 @@ case: export: - name: extraction - path: applications/sentiment_analysis/ASO_analysis/ + path: legacy/applications/sentiment_analysis/ASO_analysis/ cmd: sh run_export_model.sh extraction - name: classification - path: applications/sentiment_analysis/ASO_analysis/ + path: legacy/applications/sentiment_analysis/ASO_analysis/ cmd: sh run_export_model.sh classification predict: skipped @@ -44,24 +44,24 @@ case: train: - name: data_prepare - path: applications/sentiment_analysis/ASO_analysis/ + path: legacy/applications/sentiment_analysis/ASO_analysis/ cmd: mkdir data && cd data && wget https://bj.bcebos.com/v1/paddlenlp/data/ext_data.tar.gz && tar -xzvf ext_data.tar.gz && wget https://bj.bcebos.com/v1/paddlenlp/data/cls_data.tar.gz && tar -xzvf cls_data.tar.gz && wget https://paddle-qa.bj.bcebos.com/paddlenlp/aos_tes.txt - name: mkdir_checkpoints - path: applications/sentiment_analysis/ASO_analysis/ + path: legacy/applications/sentiment_analysis/ASO_analysis/ cmd: mkdir checkpoints && cd checkpoints && mkdir ext_checkpoints cls_checkpoints - name: checkpoints_prepare_ext - path: applications/sentiment_analysis/ASO_analysis/checkpoints/ext_checkpoints + path: legacy/applications/sentiment_analysis/ASO_analysis/checkpoints/ext_checkpoints cmd: wget https://bj.bcebos.com/paddlenlp/models/best_ext.pdparams && ren best_ext.pdparams best.pdparams - name: checkpoints_prepare_cls - path: applications/sentiment_analysis/ASO_analysis/checkpoints/cls_checkpoints + path: legacy/applications/sentiment_analysis/ASO_analysis/checkpoints/cls_checkpoints cmd: wget https://bj.bcebos.com/paddlenlp/models/best_cls.pdparams && ren best_cls.pdparams best.pdparams eval: - name: eval - path: applications/sentiment_analysis/ASO_analysis/ + path: legacy/applications/sentiment_analysis/ASO_analysis/ cmd: python predict.py params: - --ext_model_path "./checkpoints/ext_checkpoints/best.pdparams" @@ -77,11 +77,11 @@ case: export: - name: extraction - path: applications/sentiment_analysis/ASO_analysis/ + path: legacy/applications/sentiment_analysis/ASO_analysis/ cmd: sh run_export_model.sh extraction - name: classification - path: applications/sentiment_analysis/ASO_analysis/ + path: legacy/applications/sentiment_analysis/ASO_analysis/ cmd: sh run_export_model.sh classification predict: skipped diff --git a/models_restruct/PaddleNLP/cases/applications^sentiment_analysis^ASO_analysis^extration.yaml b/models_restruct/PaddleNLP/cases/applications^sentiment_analysis^ASO_analysis^extration.yaml index b39ed26476..265ef75d16 100644 --- a/models_restruct/PaddleNLP/cases/applications^sentiment_analysis^ASO_analysis^extration.yaml +++ b/models_restruct/PaddleNLP/cases/applications^sentiment_analysis^ASO_analysis^extration.yaml @@ -3,7 +3,7 @@ case: train: - name: single - path: applications/sentiment_analysis/ASO_analysis/extraction + path: legacy/applications/sentiment_analysis/ASO_analysis/extraction cmd: python train.py params: - --train_path "../data/ext_data/train.txt" @@ -24,7 +24,7 @@ case: eval: - name: eval - path: applications/sentiment_analysis/ASO_analysis/extraction + path: legacy/applications/sentiment_analysis/ASO_analysis/extraction cmd: bash run_evaluate.sh infer: skipped export: skipped diff --git a/models_restruct/PaddleNLP/cases/applications^sentiment_analysis^ASO_analysis^pp_minilm.yaml b/models_restruct/PaddleNLP/cases/applications^sentiment_analysis^ASO_analysis^pp_minilm.yaml index 582776eb97..107bc7417d 100644 --- a/models_restruct/PaddleNLP/cases/applications^sentiment_analysis^ASO_analysis^pp_minilm.yaml +++ b/models_restruct/PaddleNLP/cases/applications^sentiment_analysis^ASO_analysis^pp_minilm.yaml @@ -3,7 +3,7 @@ case: train: - name: single - path: applications/sentiment_analysis/ASO_analysis/pp_minilm + path: legacy/applications/sentiment_analysis/ASO_analysis/pp_minilm cmd: python train.py params: - --base_model_name "ppminilm-6l-768h" @@ -25,19 +25,19 @@ case: eval: - name: eval - path: applications/sentiment_analysis/ASO_analysis/pp_minilm + path: legacy/applications/sentiment_analysis/ASO_analysis/pp_minilm cmd: bash run_evaluate.sh infer: skipped export: - name: eval - path: applications/sentiment_analysis/ASO_analysis/ + path: legacy/applications/sentiment_analysis/ASO_analysis/ cmd: sh run_export_model.sh pp_minilm predict: - name: predict - path: applications/sentiment_analysis/ASO_analysis/pp_minilm + path: legacy/applications/sentiment_analysis/ASO_analysis/pp_minilm cmd: bash run_quant.sh windows: diff --git a/models_restruct/PaddleNLP/cases/applications^text_summarize^pagesus.yaml b/models_restruct/PaddleNLP/cases/applications^text_summarize^pagesus.yaml index 43c0ec252b..8c08d5c21a 100644 --- a/models_restruct/PaddleNLP/cases/applications^text_summarize^pagesus.yaml +++ b/models_restruct/PaddleNLP/cases/applications^text_summarize^pagesus.yaml @@ -4,11 +4,11 @@ case: train: - name: prepare - path: applications/text_summarization/finetune + path: legacy/applications/text_summarization/finetune cmd: python run_prepare.py - name: train - path: applications/text_summarization/finetune + path: legacy/applications/text_summarization/finetune cmd: python -m paddle.distributed.launch train.py params: - --do_train @@ -16,7 +16,7 @@ case: infer: - name: trained - path: applications/text_summarization/finetune + path: legacy/applications/text_summarization/finetune cmd: python predict.py params: - --device gpu @@ -24,14 +24,14 @@ case: export: - name: trained - path: applications/text_summarization/finetune + path: legacy/applications/text_summarization/finetune cmd: python export_model.py params: - --export_output_dir ./inference_model/ predict: - name: trained - path: applications/text_summarization/finetune/deploy/paddle_inference/ + path: legacy/applications/text_summarization/finetune/deploy/paddle_inference/ cmd: python inference_pegasus.py params: - --inference_model_dir ../../inference_model/ @@ -41,17 +41,17 @@ case: train: - name: prepare - path: applications/text_summarization/finetune + path: legacy/applications/text_summarization/finetune cmd: python run_prepare.py - name: train - path: applications/text_summarization/finetune + path: legacy/applications/text_summarization/finetune cmd: python -m paddle.distributed.launch train.py eval: skipped infer: - name: trained - path: applications/text_summarization/finetune + path: legacy/applications/text_summarization/finetune cmd: python predict.py params: - --device gpu diff --git a/models_restruct/PaddleNLP/cases/applications^zero_shot_text_classification.yaml b/models_restruct/PaddleNLP/cases/applications^zero_shot_text_classification.yaml index ab41009ff9..fce7355cc0 100644 --- a/models_restruct/PaddleNLP/cases/applications^zero_shot_text_classification.yaml +++ b/models_restruct/PaddleNLP/cases/applications^zero_shot_text_classification.yaml @@ -3,11 +3,11 @@ case: train: - name: prepare - path: applications/zero_shot_text_classification + path: legacy/applications/zero_shot_text_classification cmd: wget https://bj.bcebos.com/paddlenlp/datasets/utc-medical.tar.gz && tar -xvf utc-medical.tar.gz && mv utc-medical data - name: label_studio - path: applications/zero_shot_text_classification + path: legacy/applications/zero_shot_text_classification cmd: python label_studio.py params: - --label_studio_file ./data/label_studio.json @@ -16,7 +16,7 @@ case: - --options ./data/label.txt - name: fintune - path: applications/zero_shot_text_classification + path: legacy/applications/zero_shot_text_classification cmd: python -u -m paddle.distributed.launch --gpus "0,1" run_train.py params: - --device gpu @@ -50,7 +50,7 @@ case: eval: - name: evaluate - path: applications/zero_shot_text_classification + path: legacy/applications/zero_shot_text_classification cmd: python run_eval.py params: - --model_path ./checkpoint/model_best @@ -71,11 +71,11 @@ case: train: - name: prepare - path: applications/zero_shot_text_classification + path: legacy/applications/zero_shot_text_classification cmd: wget https://bj.bcebos.com/paddlenlp/datasets/utc-medical.tar.gz && tar -xvf utc-medical.tar.gz && ren utc-medical data - name: label_studio - path: applications/zero_shot_text_classification + path: legacy/applications/zero_shot_text_classification cmd: python label_studio.py params: - --label_studio_file ./data/label_studio.json @@ -84,7 +84,7 @@ case: - --options ./data/label.txt - name: fintune - path: applications/zero_shot_text_classification + path: legacy/applications/zero_shot_text_classification cmd: python -u -m paddle.distributed.launch run_train.py params: - --device gpu @@ -118,7 +118,7 @@ case: eval: - name: evaluate - path: applications/zero_shot_text_classification + path: legacy/applications/zero_shot_text_classification cmd: python run_eval.py params: - --model_path ./checkpoint/model_best diff --git a/models_restruct/PaddleNLP/cases/examples^code_generation^codegen.yaml b/models_restruct/PaddleNLP/cases/examples^code_generation^codegen.yaml index 93d95498e4..44a73e991d 100644 --- a/models_restruct/PaddleNLP/cases/examples^code_generation^codegen.yaml +++ b/models_restruct/PaddleNLP/cases/examples^code_generation^codegen.yaml @@ -3,11 +3,11 @@ case: train: - name: prepare - path: examples/code_generation/codegen + path: legacy/examples/code_generation/codegen cmd: wget https://paddle-qa.bj.bcebos.com/paddlenlp/codegen.tar.gz && tar -xzvf codegen.tar.gz - name: multi - path: examples/code_generation/codegen + path: legacy/examples/code_generation/codegen cmd: python -m paddle.distributed.launch run_clm.py params: - --model_name_or_path Salesforce/codegen-350M-mono @@ -29,11 +29,11 @@ case: train: - name: prepare - path: examples/code_generation/codegen + path: legacy/examples/code_generation/codegen cmd: wget https://paddle-qa.bj.bcebos.com/paddlenlp/codegen.tar.gz && tar -xzvf codegen.tar.gz - name: single - path: examples/code_generation/codegen + path: legacy/examples/code_generation/codegen cmd: python -m paddle.distributed.launch run_clm.py params: - --model_name_or_path Salesforce/codegen-350M-mono diff --git a/models_restruct/PaddleNLP/cases/examples^dependency_parsing^ddparsing.yaml b/models_restruct/PaddleNLP/cases/examples^dependency_parsing^ddparsing.yaml index ce133cf406..a3143b3830 100644 --- a/models_restruct/PaddleNLP/cases/examples^dependency_parsing^ddparsing.yaml +++ b/models_restruct/PaddleNLP/cases/examples^dependency_parsing^ddparsing.yaml @@ -3,7 +3,7 @@ case: train: - name: LSTMEncoder_MLP_BiAffine - path: examples/dependency_parsing/ddparser + path: legacy/examples/dependency_parsing/ddparser cmd: python -m paddle.distributed.launch train.py params: - --device=gpu @@ -16,7 +16,7 @@ case: - --lstm_lr=0.002 - name: LSTMByWPEncoder_MLP_BiAffine - path: examples/dependency_parsing/ddparser + path: legacy/examples/dependency_parsing/ddparser cmd: python -m paddle.distributed.launch train.py params: - --device=gpu @@ -27,7 +27,7 @@ case: - --lstm_lr=0.002 - name: LSTMEncoder_MLP_BiAffine - path: examples/dependency_parsing/ddparser + path: legacy/examples/dependency_parsing/ddparser cmd: python -m paddle.distributed.launch train.py params: - --device=gpu @@ -39,7 +39,7 @@ case: eval: - name: LSTMEncoder_MLP_BiAffine - path: examples/dependency_parsing/ddparser + path: legacy/examples/dependency_parsing/ddparser cmd: python -m paddle.distributed.launch predict.py params: - --device=gpu @@ -50,7 +50,7 @@ case: - --infer_output_file=infer_output.conll - name: LSTMByWPEncoder_MLP_BiAffine - path: examples/dependency_parsing/ddparser + path: legacy/examples/dependency_parsing/ddparser cmd: python -m paddle.distributed.launch predict.py params: - --device=gpu @@ -61,7 +61,7 @@ case: - --infer_output_file=infer_output.conll - name: LSTMEncoder_MLP_BiAffine - path: examples/dependency_parsing/ddparser + path: legacy/examples/dependency_parsing/ddparser cmd: python -m paddle.distributed.launch predict.py params: - --device=gpu @@ -73,14 +73,14 @@ case: export: - name: LSTMByWPEncoder_MLPF_BiAffine - path: examples/dependency_parsing/ddparser + path: legacy/examples/dependency_parsing/ddparser cmd: python export_model.py --encoding_model=lstm-pe params: - --params_path=./ByWPEncoder/best.pdparams - --output_path=./ByWPEncoder/output - name: LSTMEncoder_MLP_BiAffine - path: examples/dependency_parsing/ddparser + path: legacy/examples/dependency_parsing/ddparser cmd: python export_model.py --encoding_model=ernie-gram-zh params: - --params_path=./ErnieEncoderbest.pdparams @@ -88,14 +88,14 @@ case: predict: - name: LSTMByWPEncoder_MLP_BiAffine - path: examples/dependency_parsing/ddparser + path: legacy/examples/dependency_parsing/ddparser cmd: python deploy/python/predict.py params: - --model_dir=./ByWPEncoder/output - --task_name=nlpcc13_evsam05_hit - name: LSTMEncoder_MLP_BiAffine - path: examples/dependency_parsing/ddparser + path: legacy/examples/dependency_parsing/ddparser cmd: python deploy/python/predict.py params: - --model_dir=./ErnieEncoder/output @@ -105,7 +105,7 @@ case: train: - name: LSTMEncoder_MLP_BiAffine - path: examples/dependency_parsing/ddparser + path: legacy/examples/dependency_parsing/ddparser cmd: python -m paddle.distributed.launch train.py params: - --device=gpu @@ -118,7 +118,7 @@ case: - --lstm_lr=0.002 - name: LSTMByWPEncoder_MLP_BiAffine - path: examples/dependency_parsing/ddparser + path: legacy/examples/dependency_parsing/ddparser cmd: python -m paddle.distributed.launch train.py params: - --device=gpu @@ -129,7 +129,7 @@ case: - --lstm_lr=0.002 - name: LSTMEncoder_MLP_BiAffine - path: examples/dependency_parsing/ddparser + path: legacy/examples/dependency_parsing/ddparser cmd: python -m paddle.distributed.launch train.py params: - --device=gpu @@ -141,7 +141,7 @@ case: eval: - name: LSTMEncoder_MLP_BiAffine - path: examples/dependency_parsing/ddparser + path: legacy/examples/dependency_parsing/ddparser cmd: python -m paddle.distributed.launch predict.py params: - --device=gpu @@ -152,7 +152,7 @@ case: - --infer_output_file=infer_output.conll - name: LSTMByWPEncoder_MLP_BiAffine - path: examples/dependency_parsing/ddparser + path: legacy/examples/dependency_parsing/ddparser cmd: python -m paddle.distributed.launch predict.py params: - --device=gpu @@ -163,7 +163,7 @@ case: - --infer_output_file=infer_output.conll - name: LSTMEncoder_MLP_BiAffine - path: examples/dependency_parsing/ddparser + path: legacy/examples/dependency_parsing/ddparser cmd: python -m paddle.distributed.launch predict.py params: - --device=gpu @@ -175,14 +175,14 @@ case: export: - name: LSTMByWPEncoder_MLP_BiAffine - path: examples/dependency_parsing/ddparser + path: legacy/examples/dependency_parsing/ddparser cmd: python export_model.py --encoding_model=lstm-pe params: - --params_path=./ByWPEncoder/best.pdparams - --output_path=./ByWPEncoder/output - name: LSTMEncoder_MLP_BiAffine - path: examples/dependency_parsing/ddparser + path: legacy/examples/dependency_parsing/ddparser cmd: python export_model.py --encoding_model=ernie-gram-zh params: - --params_path=./ErnieEncoderbest.pdparams @@ -190,14 +190,14 @@ case: predict: - name: LSTMByWPEncoder_MLP_BiAffine - path: examples/dependency_parsing/ddparser + path: legacy/examples/dependency_parsing/ddparser cmd: python deploy/python/predict.py params: - --model_dir=./ByWPEncoder/output - --task_name=nlpcc13_evsam05_hit - name: LSTMEncoder_MLP_BiAffine - path: examples/dependency_parsing/ddparser + path: legacy/examples/dependency_parsing/ddparser cmd: python deploy/python/predict.py params: - --model_dir=./ErnieEncoder/output diff --git a/models_restruct/PaddleNLP/cases/examples^few_shot^RGL.yaml b/models_restruct/PaddleNLP/cases/examples^few_shot^RGL.yaml index dd1804985e..4f5456252b 100644 --- a/models_restruct/PaddleNLP/cases/examples^few_shot^RGL.yaml +++ b/models_restruct/PaddleNLP/cases/examples^few_shot^RGL.yaml @@ -3,11 +3,11 @@ case: train: - name: prepare - path: examples/few_shot/RGL + path: legacy/examples/few_shot/RGL cmd: wget https://paddlenlp.bj.bcebos.com/datasets/k-shot-glue/rgl-k-shot.zip && unzip rgl-k-shot.zip - name: single - path: examples/few_shot/RGL + path: legacy/examples/few_shot/RGL cmd: python rgl.py params: - --dataset SST-2 @@ -31,11 +31,11 @@ case: train: - name: prepare - path: examples/few_shot/RGL + path: legacy/examples/few_shot/RGL cmd: wget https://paddlenlp.bj.bcebos.com/datasets/k-shot-glue/rgl-k-shot.zip && unzip rgl-k-shot.zip - name: train - path: examples/few_shot/RGL + path: legacy/examples/few_shot/RGL cmd: python rgl.py params: - --dataset SST-2 @@ -59,11 +59,11 @@ case: train: - name: prepare - path: examples/few_shot/RGL + path: legacy/examples/few_shot/RGL cmd: wget https://paddlenlp.bj.bcebos.com/datasets/k-shot-glue/rgl-k-shot.zip && unzip rgl-k-shot.zip - name: train - path: examples/few_shot/RGL + path: legacy/examples/few_shot/RGL cmd: python rgl.py params: - --dataset SST-2 @@ -88,11 +88,11 @@ case: train: - name: prepare - path: examples/few_shot/RGL + path: legacy/examples/few_shot/RGL cmd: wget https://paddlenlp.bj.bcebos.com/datasets/k-shot-glue/rgl-k-shot.zip && unzip rgl-k-shot.zip - name: single - path: examples/few_shot/RGL + path: legacy/examples/few_shot/RGL cmd: python rgl.py params: - --dataset SST-2 diff --git a/models_restruct/PaddleNLP/cases/examples^few_shot^efl.yaml b/models_restruct/PaddleNLP/cases/examples^few_shot^efl.yaml index e82512994f..c13b641395 100644 --- a/models_restruct/PaddleNLP/cases/examples^few_shot^efl.yaml +++ b/models_restruct/PaddleNLP/cases/examples^few_shot^efl.yaml @@ -4,7 +4,7 @@ case: train: - name: multi - path: examples/few_shot/efl + path: legacy/examples/few_shot/efl cmd: python -m paddle.distributed.launch run_train.py eval: skipped infer: skipped @@ -16,7 +16,7 @@ case: train: - name: single - path: examples/few_shot/efl + path: legacy/examples/few_shot/efl cmd: python -m paddle.distributed.launch run_train.py eval: skipped infer: skipped @@ -27,7 +27,7 @@ case: train: - name: train - path: examples/few_shot/efl + path: legacy/examples/few_shot/efl cmd: python run_train.py params: - --max_steps 2 @@ -43,7 +43,7 @@ case: train: - name: train - path: examples/few_shot/efl + path: legacy/examples/few_shot/efl cmd: python run_train.py params: - --max_steps 2 diff --git a/models_restruct/PaddleNLP/cases/examples^few_shot^p-tuning.yaml b/models_restruct/PaddleNLP/cases/examples^few_shot^p-tuning.yaml index ad1f0aef83..cea1627faf 100644 --- a/models_restruct/PaddleNLP/cases/examples^few_shot^p-tuning.yaml +++ b/models_restruct/PaddleNLP/cases/examples^few_shot^p-tuning.yaml @@ -4,7 +4,7 @@ case: train: - name: single - path: examples/few_shot/p-tuning + path: legacy/examples/few_shot/p-tuning cmd: python -m paddle.distributed.launch run_train.py eval: skipped infer: skipped @@ -16,7 +16,7 @@ case: train: - name: single - path: examples/few_shot/p-tuning + path: legacy/examples/few_shot/p-tuning cmd: python -m paddle.distributed.launch run_train.py eval: skipped infer: skipped @@ -27,7 +27,7 @@ case: train: - name: train - path: examples/few_shot/p-tuning + path: legacy/examples/few_shot/p-tuning cmd: python run_train.py params: - --max_steps 2 @@ -43,7 +43,7 @@ case: train: - name: train - path: examples/few_shot/p-tuning + path: legacy/examples/few_shot/p-tuning cmd: python run_train.py params: - --max_steps 2 diff --git a/models_restruct/PaddleNLP/cases/examples^few_shot^pet.yaml b/models_restruct/PaddleNLP/cases/examples^few_shot^pet.yaml index 949d14b9fc..a651acb53d 100644 --- a/models_restruct/PaddleNLP/cases/examples^few_shot^pet.yaml +++ b/models_restruct/PaddleNLP/cases/examples^few_shot^pet.yaml @@ -4,7 +4,7 @@ case: train: - name: multi - path: examples/few_shot/pet + path: legacy/examples/few_shot/pet cmd: python -m paddle.distributed.launch run_train.py eval: skipped infer: skipped @@ -16,7 +16,7 @@ case: train: - name: multi - path: examples/few_shot/pet + path: legacy/examples/few_shot/pet cmd: python -m paddle.distributed.launch run_train.py eval: skipped infer: skipped diff --git a/models_restruct/PaddleNLP/cases/examples^language_model^bloom.yaml b/models_restruct/PaddleNLP/cases/examples^language_model^bloom.yaml index 23ddb944cc..e307c38bae 100644 --- a/models_restruct/PaddleNLP/cases/examples^language_model^bloom.yaml +++ b/models_restruct/PaddleNLP/cases/examples^language_model^bloom.yaml @@ -3,7 +3,7 @@ case: train: - name: multi - path: examples/language_model/bloom + path: legacy/examples/language_model/bloom cmd: python -m paddle.distributed.launch --log_dir our_log finetune_generation.py params: - --model_name_or_path bigscience/bloom-560m @@ -35,7 +35,7 @@ case: eval: - name: eval - path: examples/language_model/bloom + path: legacy/examples/language_model/bloom cmd: python -m paddle.distributed.launch predict_generation.py params: - --model_name_or_path checkpoints/bloom-560m/checkpoint-20 @@ -44,7 +44,7 @@ case: export: - name: export - path: examples/language_model/bloom + path: legacy/examples/language_model/bloom cmd: export_generation_model.py params: - --model_name_or_path ./save @@ -52,7 +52,7 @@ case: predict: - name: predict - path: examples/language_model/bloom + path: legacy/examples/language_model/bloom cmd: infer_generation.py params: - --model_dir inference/ --model_prefix bloom diff --git a/models_restruct/PaddleNLP/cases/examples^language_model^chatglm.yaml b/models_restruct/PaddleNLP/cases/examples^language_model^chatglm.yaml index c32d185e9a..53b1ebca4d 100644 --- a/models_restruct/PaddleNLP/cases/examples^language_model^chatglm.yaml +++ b/models_restruct/PaddleNLP/cases/examples^language_model^chatglm.yaml @@ -3,11 +3,11 @@ case: train: - name: prepare - path: examples/language_model/chatglm - cmd: wget https://paddlenlp.bj.bcebos.com/datasets/examples/AdvertiseGen.tar.gz && tar -xzvf AdvertiseGen.tar.gz + path: legacy/examples/language_model/chatglm + cmd: wget https://paddlenlp.bj.bcebos.com/datasets/legacy/examples/AdvertiseGen.tar.gz && tar -xzvf AdvertiseGen.tar.gz - name: multi - path: examples/language_model/chatglm + path: legacy/examples/language_model/chatglm cmd: python -m paddle.distributed.launch --gpus finetune_generation.py params: - --model_name_or_path THUDM/chatglm-6b @@ -36,7 +36,7 @@ case: eval: - name: eval - path: examples/language_model/chatglm + path: legacy/examples/language_model/chatglm cmd: python predict_generation.py params: - --model_name_or_path ./checkpoints/chatglm-6b @@ -44,7 +44,7 @@ case: export: - name: export - path: examples/language_model/chatglm + path: legacy/examples/language_model/chatglm cmd: export_generation_model.py params: - --model_name_or_path ./checkpoints/chatglm-6b diff --git a/models_restruct/PaddleNLP/cases/examples^language_model^convbert.yaml b/models_restruct/PaddleNLP/cases/examples^language_model^convbert.yaml index 547fdbed5f..a264aac8cc 100644 --- a/models_restruct/PaddleNLP/cases/examples^language_model^convbert.yaml +++ b/models_restruct/PaddleNLP/cases/examples^language_model^convbert.yaml @@ -3,7 +3,7 @@ case: train: - name: single - path: examples/language_model/convbert + path: legacy/examples/language_model/convbert cmd: python -u run_glue.py params: - --model_type convbert @@ -27,7 +27,7 @@ case: train: - name: single - path: examples/language_model/convbert + path: legacy/examples/language_model/convbert cmd: python -u run_glue.py params: - --model_type convbert diff --git a/models_restruct/PaddleNLP/cases/examples^language_model^glm.yaml b/models_restruct/PaddleNLP/cases/examples^language_model^glm.yaml index b041f693e0..3bb115d624 100644 --- a/models_restruct/PaddleNLP/cases/examples^language_model^glm.yaml +++ b/models_restruct/PaddleNLP/cases/examples^language_model^glm.yaml @@ -3,7 +3,7 @@ case: train: - name: multi - path: examples/language_model/glm + path: legacy/examples/language_model/glm cmd: python finetune_generation.py params: - --model_name_or_path THUDM/glm-large-chinese @@ -36,7 +36,7 @@ case: eval: - name: eval - path: examples/language_model/glm + path: legacy/examples/language_model/glm cmd: python predict_generation.py params: - --model_name_or_path ./checkpoints/glm-large-chinesee @@ -44,7 +44,7 @@ case: export: - name: export - path: examples/language_model/glm + path: legacy/examples/language_model/glm cmd: export_generation_model.py params: - --model_name_or_path ./checkpoints/glm-large-chinese diff --git a/models_restruct/PaddleNLP/cases/examples^language_model^llama.yaml b/models_restruct/PaddleNLP/cases/examples^language_model^llama.yaml index 04aace9ebe..79d2c44596 100644 --- a/models_restruct/PaddleNLP/cases/examples^language_model^llama.yaml +++ b/models_restruct/PaddleNLP/cases/examples^language_model^llama.yaml @@ -3,7 +3,7 @@ case: train: - name: multi - path: examples/language_model/llama + path: legacy/examples/language_model/llama cmd: python -u -m paddle.distributed.fleet.launch finetune_generation.py params: - --model_name_or_path facebook/tiny-random-llama @@ -31,7 +31,7 @@ case: eval: - name: eval - path: examples/language_model/llama + path: legacy/examples/language_model/llama cmd: python predict_generation.py params: - --model_name_or_path ./checkpoints/ @@ -39,7 +39,7 @@ case: export: - name: export - path: examples/language_model/llama + path: legacy/examples/language_model/llama cmd: export_generation_model.py params: - --model_path checkpoints/ @@ -47,7 +47,7 @@ case: predict: - name: export - path: examples/language_model/llama + path: legacy/examples/language_model/llama cmd: python infer_generation.py params: - --model_dir inference diff --git a/models_restruct/PaddleNLP/cases/examples^language_model^luke.yaml b/models_restruct/PaddleNLP/cases/examples^language_model^luke.yaml index 2c8bfa0367..beb50ea8c0 100644 --- a/models_restruct/PaddleNLP/cases/examples^language_model^luke.yaml +++ b/models_restruct/PaddleNLP/cases/examples^language_model^luke.yaml @@ -3,15 +3,15 @@ case: train: - name: prepare - path: examples/language_model/luke + path: legacy/examples/language_model/luke cmd: mkdir data && cd data && wget https://data.deepai.org/squad1.1.zip --no-check-certificate - name: prepare_data - path: examples/language_model/luke/data + path: legacy/examples/language_model/luke/data cmd: unzip squad1.1.zip && mv train-v1.1.json train.json && mv dev-v1.1.json dev.json - name: run_squad - path: examples/language_model/luke + path: legacy/examples/language_model/luke cmd: python -m paddle.distributed.launch run_squad.py params: - --model_type luke @@ -27,7 +27,7 @@ case: - --save_step 2 - name: run_open_entity.py - path: examples/language_model/luke + path: legacy/examples/language_model/luke cmd: python -m paddle.distributed.launch run_open_entity.py params: - --model_type luke-large diff --git a/models_restruct/PaddleNLP/cases/examples^language_model^megatronbert.yaml b/models_restruct/PaddleNLP/cases/examples^language_model^megatronbert.yaml index a41e1e1431..906c7f53f2 100644 --- a/models_restruct/PaddleNLP/cases/examples^language_model^megatronbert.yaml +++ b/models_restruct/PaddleNLP/cases/examples^language_model^megatronbert.yaml @@ -3,7 +3,7 @@ case: train: - name: run_squad - path: examples/language_model/luke + path: legacy/examples/language_model/luke cmd: python -m paddle.distributed.launch run_squad.py params: - --model_type luke @@ -19,7 +19,7 @@ case: - --save_step 2 - name: run_open_entity - path: examples/language_model/luke + path: legacy/examples/language_model/luke cmd: python -m paddle.distributed.launch run_open_entity.py params: - --model_type luke-large diff --git a/models_restruct/PaddleNLP/cases/examples^language_model^rembert.yaml b/models_restruct/PaddleNLP/cases/examples^language_model^rembert.yaml index 48868c54fc..8823b4633b 100644 --- a/models_restruct/PaddleNLP/cases/examples^language_model^rembert.yaml +++ b/models_restruct/PaddleNLP/cases/examples^language_model^rembert.yaml @@ -3,7 +3,7 @@ case: train: - name: run_thucnews - path: examples/language_model/roformer + path: legacy/examples/language_model/roformer cmd: python -m paddle.distributed.launch run_thucnews.py params: - --model_type roformer diff --git a/models_restruct/PaddleNLP/cases/examples^language_model^roformer.yaml b/models_restruct/PaddleNLP/cases/examples^language_model^roformer.yaml index cf661a2951..77e1afbc82 100644 --- a/models_restruct/PaddleNLP/cases/examples^language_model^roformer.yaml +++ b/models_restruct/PaddleNLP/cases/examples^language_model^roformer.yaml @@ -3,7 +3,7 @@ case: train: - name: run_thucnews - path: examples/language_model/roformer + path: legacy/examples/language_model/roformer cmd: python -m paddle.distributed.launch run_thucnews.py params: - --model_type roformer @@ -20,7 +20,7 @@ case: - --max_steps 2 - name: run_cail2019_scm - path: examples/language_model/roformer + path: legacy/examples/language_model/roformer cmd: python -m paddle.distributed.launch run_cail2019_scm.py params: - --model_type roformer_mean_pooling @@ -44,7 +44,7 @@ case: train: - name: run_thucnews - path: examples/language_model/roformer + path: legacy/examples/language_model/roformer cmd: python -m paddle.distributed.launch run_thucnews.py params: - --model_type roformer @@ -61,7 +61,7 @@ case: - --max_steps 2 - name: run_cail2019_scm - path: examples/language_model/roformer + path: legacy/examples/language_model/roformer cmd: python -m paddle.distributed.launch run_cail2019_scm.py params: - --model_type roformer_mean_pooling diff --git a/models_restruct/PaddleNLP/cases/examples^language_model^t5.yaml b/models_restruct/PaddleNLP/cases/examples^language_model^t5.yaml index 7905b0c641..eaaae0c53d 100644 --- a/models_restruct/PaddleNLP/cases/examples^language_model^t5.yaml +++ b/models_restruct/PaddleNLP/cases/examples^language_model^t5.yaml @@ -3,7 +3,7 @@ case: train: - name: train_rte - path: examples/language_model/t5 + path: legacy/examples/language_model/t5 cmd: python run_glue.py params: - --model_name_or_path t5-base @@ -22,7 +22,7 @@ case: - --output_dir outputs/rte/ - name: trainer_finetune_rte - path: examples/language_model/t5 + path: legacy/examples/language_model/t5 cmd: python -m paddle.distributed.launch run_glue_trainer.py params: - --model_name_or_path t5-base @@ -43,7 +43,7 @@ case: - --max_steps 2 - name: trainer_finetune_cluewsc2020 - path: examples/language_model/t5 + path: legacy/examples/language_model/t5 cmd: python -m paddle.distributed.launch run_glue_trainer.py params: - --model_name_or_path Langboat/mengzi-t5-base-mt @@ -68,7 +68,7 @@ case: train: - name: train_rte - path: examples/language_model/t5 + path: legacy/examples/language_model/t5 cmd: python run_glue.py params: - --model_name_or_path t5-base @@ -87,7 +87,7 @@ case: - --output_dir outputs/rte/ - name: trainer_finetune_rte - path: examples/language_model/t5 + path: legacy/examples/language_model/t5 cmd: python -m paddle.distributed.launch run_glue_trainer.py params: - --model_name_or_path t5-base @@ -108,7 +108,7 @@ case: - --max_steps 2 - name: trainer_finetune_cluewsc2020 - path: examples/language_model/t5 + path: legacy/examples/language_model/t5 cmd: python -m paddle.distributed.launch run_glue_trainer.py params: - --model_name_or_path Langboat/mengzi-t5-base-mt diff --git a/models_restruct/PaddleNLP/cases/examples^language_model^xlm.yaml b/models_restruct/PaddleNLP/cases/examples^language_model^xlm.yaml index e87173ddf6..552fd4501e 100644 --- a/models_restruct/PaddleNLP/cases/examples^language_model^xlm.yaml +++ b/models_restruct/PaddleNLP/cases/examples^language_model^xlm.yaml @@ -3,15 +3,15 @@ case: train: - name: clone_kytea - path: examples/language_model/xlm + path: legacy/examples/language_model/xlm cmd: git clone https://github.com/neubig/kytea.git - name: build_kytea - path: examples/language_model/xlm/kytea + path: legacy/examples/language_model/xlm/kytea cmd: autoreconf -i && ./configure --prefix=$HOME/local && make && make install - name: train_xnli - path: examples/language_model/xlm + path: legacy/examples/language_model/xlm cmd: python xnli_train.py params: - --batch_size 8 @@ -23,7 +23,7 @@ case: eval: - name: eval_xnli - path: examples/language_model/xlm + path: legacy/examples/language_model/xlm cmd: python xnli_eval.py params: - --batch_size 8 diff --git a/models_restruct/PaddleNLP/cases/examples^lexical_analysis.yaml b/models_restruct/PaddleNLP/cases/examples^lexical_analysis.yaml index fb7e48cba6..a620545de5 100644 --- a/models_restruct/PaddleNLP/cases/examples^lexical_analysis.yaml +++ b/models_restruct/PaddleNLP/cases/examples^lexical_analysis.yaml @@ -3,11 +3,11 @@ case: train: - name: prepare - path: examples/lexical_analysis/ + path: legacy/examples/lexical_analysis/ cmd: python download.py --data_dir ./ - name: train - path: examples/lexical_analysis/ + path: legacy/examples/lexical_analysis/ cmd: python -m paddle.distributed.launch train.py params: - --data_dir ./lexical_analysis_dataset_tiny @@ -20,7 +20,7 @@ case: eval: - name: eval - path: examples/lexical_analysis/ + path: legacy/examples/lexical_analysis/ cmd: python predict.py params: - --data_dir ./lexical_analysis_dataset_tiny @@ -31,7 +31,7 @@ case: export: - name: export - path: examples/lexical_analysis/ + path: legacy/examples/lexical_analysis/ cmd: python export_model.py params: - --data_dir=./lexical_analysis_dataset_tiny @@ -40,7 +40,7 @@ case: predict: - name: predict - path: examples/lexical_analysis/ + path: legacy/examples/lexical_analysis/ cmd: python deploy/predict.py params: - --model_file=infer_model/static_graph_params.pdmodel @@ -51,11 +51,11 @@ case: train: - name: prepare - path: examples/lexical_analysis/ + path: legacy/examples/lexical_analysis/ cmd: python download.py --data_dir ./ - name: train - path: examples/lexical_analysis/ + path: legacy/examples/lexical_analysis/ cmd: python -m paddle.distributed.launch train.py params: - --data_dir ./lexical_analysis_dataset_tiny @@ -68,7 +68,7 @@ case: eval: - name: eval - path: examples/lexical_analysis/ + path: legacy/examples/lexical_analysis/ cmd: python predict.py params: - --data_dir ./lexical_analysis_dataset_tiny @@ -79,7 +79,7 @@ case: export: - name: export - path: examples/lexical_analysis/ + path: legacy/examples/lexical_analysis/ cmd: python export_model.py params: - --data_dir=./lexical_analysis_dataset_tiny @@ -88,7 +88,7 @@ case: predict: - name: predict - path: examples/lexical_analysis/ + path: legacy/examples/lexical_analysis/ cmd: python deploy/predict.py params: - --model_file=infer_model/static_graph_params.pdmodel @@ -99,11 +99,11 @@ case: train: - name: prepare - path: examples/lexical_analysis/ + path: legacy/examples/lexical_analysis/ cmd: python download.py --data_dir ./ - name: train - path: examples/lexical_analysis/ + path: legacy/examples/lexical_analysis/ params: cmd: python -m paddle.distributed.launch train.py - --data_dir ./lexical_analysis_dataset_tiny @@ -116,7 +116,7 @@ case: eval: - name: eval - path: examples/lexical_analysis/ + path: legacy/examples/lexical_analysis/ cmd: python predict.py params: - --data_dir ./lexical_analysis_dataset_tiny @@ -127,7 +127,7 @@ case: export: - name: export - path: examples/lexical_analysis/ + path: legacy/examples/lexical_analysis/ cmd: python export_model.py - --data_dir=./lexical_analysis_dataset_tiny - --params_path=./save_dir/model_15.pdparams @@ -135,7 +135,7 @@ case: predict: - name: predict - path: examples/lexical_analysis/ + path: legacy/examples/lexical_analysis/ cmd: python deploy/predict.py params: - --model_file=infer_model/static_graph_params.pdmodel @@ -146,11 +146,11 @@ case: train: - name: prepare - path: examples/lexical_analysis/ + path: legacy/examples/lexical_analysis/ cmd: python download.py --data_dir ./ - name: train - path: examples/lexical_analysis/ + path: legacy/examples/lexical_analysis/ cmd: python -m paddle.distributed.launch train.py params: - --data_dir ./lexical_analysis_dataset_tiny @@ -163,7 +163,7 @@ case: eval: - name: eval - path: examples/lexical_analysis/ + path: legacy/examples/lexical_analysis/ cmd: python predict.py params: - --data_dir ./lexical_analysis_dataset_tiny @@ -174,7 +174,7 @@ case: export: - name: export - path: examples/lexical_analysis/ + path: legacy/examples/lexical_analysis/ cmd: python export_model.py params: - --data_dir=./lexical_analysis_dataset_tiny @@ -183,7 +183,7 @@ case: predict: - name: predict - path: examples/lexical_analysis/ + path: legacy/examples/lexical_analysis/ cmd: python deploy/predict.py params: - --model_file=infer_model/static_graph_params.pdmodel diff --git a/models_restruct/PaddleNLP/cases/examples^machine_reading_comprehension^SQuAD.yaml b/models_restruct/PaddleNLP/cases/examples^machine_reading_comprehension^SQuAD.yaml index 42b6a8a477..df0ceaa512 100644 --- a/models_restruct/PaddleNLP/cases/examples^machine_reading_comprehension^SQuAD.yaml +++ b/models_restruct/PaddleNLP/cases/examples^machine_reading_comprehension^SQuAD.yaml @@ -3,7 +3,7 @@ case: train: - name: pretrain - path: examples/machine_reading_comprehension/SQuAD/ + path: legacy/examples/machine_reading_comprehension/SQuAD/ cmd: python -m paddle.distributed.launch run_squad.py params: - --model_type bert @@ -26,7 +26,7 @@ case: export: - name: export - path: examples/machine_reading_comprehension/SQuAD/ + path: legacy/examples/machine_reading_comprehension/SQuAD/ cmd: python -u ./export_model.py params: - --model_type bert @@ -35,7 +35,7 @@ case: predict: - name: export - path: examples/machine_reading_comprehension/SQuAD/ + path: legacy/examples/machine_reading_comprehension/SQuAD/ cmd: python -u deploy/python/predict.py params: - --model_type bert diff --git a/models_restruct/PaddleNLP/cases/examples^machine_translation^seq2seq.yaml b/models_restruct/PaddleNLP/cases/examples^machine_translation^seq2seq.yaml index 62d69e28e2..35164208f3 100644 --- a/models_restruct/PaddleNLP/cases/examples^machine_translation^seq2seq.yaml +++ b/models_restruct/PaddleNLP/cases/examples^machine_translation^seq2seq.yaml @@ -3,7 +3,7 @@ case: train: - name: pretrain - path: examples/machine_translation/seq2seq/ + path: legacy/examples/machine_translation/seq2seq/ cmd: python train.py params: - --num_layers 2 @@ -19,7 +19,7 @@ case: eval: - name: eval - path: examples/machine_translation/seq2seq/ + path: legacy/examples/machine_translation/seq2seq/ cmd: python predict.py params: - --num_layers 2 @@ -36,7 +36,7 @@ case: export: - name: export - path: examples/machine_translation/seq2seq/ + path: legacy/examples/machine_translation/seq2seq/ cmd: python export_model.py params: - --num_layers 2 @@ -51,7 +51,7 @@ case: predict: - name: predict - path: examples/machine_translation/seq2seq/deploy/python + path: legacy/examples/machine_translation/seq2seq/deploy/python cmd: python infer.py params: - --export_path ../../infer_model/model diff --git a/models_restruct/PaddleNLP/cases/examples^machine_translation^transformer.yaml b/models_restruct/PaddleNLP/cases/examples^machine_translation^transformer.yaml index bd5d3911b6..0b936772b0 100644 --- a/models_restruct/PaddleNLP/cases/examples^machine_translation^transformer.yaml +++ b/models_restruct/PaddleNLP/cases/examples^machine_translation^transformer.yaml @@ -3,11 +3,11 @@ case: train: - name: download - path: examples/machine_translation/transformer + path: legacy/examples/machine_translation/transformer cmd: wget -q https://paddle-qa.bj.bcebos.com/paddlenlp/WMT14.en-de.partial.tar.gz && tar -xzvf WMT14.en-de.partial.tar.gz - name: pretrain - path: examples/machine_translation/transformer + path: legacy/examples/machine_translation/transformer cmd: python train.py params: - --config ./configs/transformer.base.yaml @@ -20,7 +20,7 @@ case: export: - name: export - path: examples/machine_translation/transformer + path: legacy/examples/machine_translation/transformer cmd: python export_model.py params: - --config ./configs/transformer.base.yaml @@ -29,7 +29,7 @@ case: predict: - name: predict - path: examples/machine_translation/transformer + path: legacy/examples/machine_translation/transformer cmd: python ./deploy/python/inference.py params: - --config ./configs/transformer.base.yaml diff --git a/models_restruct/PaddleNLP/cases/examples^model_compression^ofa.yaml b/models_restruct/PaddleNLP/cases/examples^model_compression^ofa.yaml index 8b2b68606f..96f51c7909 100644 --- a/models_restruct/PaddleNLP/cases/examples^model_compression^ofa.yaml +++ b/models_restruct/PaddleNLP/cases/examples^model_compression^ofa.yaml @@ -3,7 +3,7 @@ case: train: - name: run_glue - path: examples/benchmark/glue/ + path: legacy/examples/benchmark/glue/ cmd: python -u ./run_glue.py params: - --model_type bert @@ -20,7 +20,7 @@ case: - --device gpu - name: run_glue_ofa - path: examples/model_compression/ofa/ + path: legacy/examples/model_compression/ofa/ cmd: python -m paddle.distributed.launch run_glue_ofa.py params: - --model_type bert @@ -46,7 +46,7 @@ case: train: - name: run_glue - path: examples/benchmark/glue/ + path: legacy/examples/benchmark/glue/ cmd: python -u ./run_glue.py params: - --model_type bert @@ -63,7 +63,7 @@ case: - --device gpu - name: run_glue_ofa - path: examples/model_compression/ofa/ + path: legacy/examples/model_compression/ofa/ cmd: python -m paddle.distributed.launch run_glue_ofa.py params: - --model_type bert diff --git a/models_restruct/PaddleNLP/cases/examples^multimodal^layoutlm.yaml b/models_restruct/PaddleNLP/cases/examples^multimodal^layoutlm.yaml index 1629074160..055b547e74 100644 --- a/models_restruct/PaddleNLP/cases/examples^multimodal^layoutlm.yaml +++ b/models_restruct/PaddleNLP/cases/examples^multimodal^layoutlm.yaml @@ -3,11 +3,11 @@ case: train: - name: prepare - path: examples/multimodal/layoutlm + path: legacy/examples/multimodal/layoutlm cmd: wget https://bj.bcebos.com/v1/paddlenlp/datasets/FUNSD.zip && unzip FUNSD.zip - name: single - path: examples/multimodal/layoutlm + path: legacy/examples/multimodal/layoutlm cmd: python train_funsd.py params: - -data_dir "./data/" diff --git a/models_restruct/PaddleNLP/cases/examples^multimodal^layoutxlm.yaml b/models_restruct/PaddleNLP/cases/examples^multimodal^layoutxlm.yaml index 24dd5638ff..6a99d0051c 100644 --- a/models_restruct/PaddleNLP/cases/examples^multimodal^layoutxlm.yaml +++ b/models_restruct/PaddleNLP/cases/examples^multimodal^layoutxlm.yaml @@ -3,11 +3,11 @@ case: train: - name: prepare - path: examples/multimodal/layoutlm + path: legacy/examples/multimodal/layoutlm cmd: wget https://bj.bcebos.com/v1/paddlenlp/datasets/FUNSD.zip && unzip FUNSD.zip - name: single - path: examples/multimodal/layoutlm + path: legacy/examples/multimodal/layoutlm cmd: python train_funsd.py params: - --data_dir "./data/" diff --git a/models_restruct/PaddleNLP/cases/examples^question_generation^t5.yaml b/models_restruct/PaddleNLP/cases/examples^question_generation^t5.yaml index 5178878098..cff80d157d 100644 --- a/models_restruct/PaddleNLP/cases/examples^question_generation^t5.yaml +++ b/models_restruct/PaddleNLP/cases/examples^question_generation^t5.yaml @@ -3,7 +3,7 @@ case: train: - name: multi - path: examples/question_generation/t5 + path: legacy/examples/question_generation/t5 cmd: python -m paddle.distributed.launch train.py params: - --model_name_or_path=t5-base @@ -17,7 +17,7 @@ case: eval: - name: eval - path: examples/question_generation/t5 + path: legacy/examples/question_generation/t5 cmd: python predict.py params: - --model_name_or_path=./checkpoints/model_2 @@ -39,7 +39,7 @@ case: train: - name: single - path: examples/question_generation/t5 + path: legacy/examples/question_generation/t5 cmd: python -m paddle.distributed.launch train.py params: - --model_name_or_path=t5-base @@ -53,7 +53,7 @@ case: eval: - name: eval - path: examples/question_generation/t5 + path: legacy/examples/question_generation/t5 cmd: python predict.py params: - --model_name_or_path=./checkpoints/model_2 diff --git a/models_restruct/PaddleNLP/cases/examples^question_generation^unimo-text.yaml b/models_restruct/PaddleNLP/cases/examples^question_generation^unimo-text.yaml index b117f20318..18e99ae5a6 100644 --- a/models_restruct/PaddleNLP/cases/examples^question_generation^unimo-text.yaml +++ b/models_restruct/PaddleNLP/cases/examples^question_generation^unimo-text.yaml @@ -3,7 +3,7 @@ case: train: - name: multi - path: examples/question_generation/unimo-text + path: legacy/examples/question_generation/unimo-text cmd: python -m paddle.distributed.launch --log_dir ./unimo/finetune/log train.py params: - --dataset_name=dureader_qg @@ -29,7 +29,7 @@ case: eval: - name: eval - path: examples/question_generation/unimo-text + path: legacy/examples/question_generation/unimo-text cmd: python predict.py params: - --model_name_or_path=./unimo/finetune/checkpoints/model_best @@ -47,7 +47,7 @@ case: export: - name: eval - path: examples/question_generation/unimo-text + path: legacy/examples/question_generation/unimo-text cmd: python export_model.py params: - --model_name_or_path ./unimo/finetune/checkpoints/model_best @@ -59,7 +59,7 @@ case: train: - name: single - path: examples/question_generation/unimo-text + path: legacy/examples/question_generation/unimo-text cmd: python -m paddle.distributed.launch --log_dir ./unimo/finetune/log train.py params: - --dataset_name=dureader_qg @@ -85,7 +85,7 @@ case: eval: - name: eval - path: examples/question_generation/unimo-text + path: legacy/examples/question_generation/unimo-text cmd: python predict.py params: - --model_name_or_path=./unimo/finetune/checkpoints/model_best @@ -103,7 +103,7 @@ case: export: - name: eval - path: examples/question_generation/unimo-text + path: legacy/examples/question_generation/unimo-text cmd: python export_model.py params: - --model_name_or_path ./unimo/finetune/checkpoints/model_best diff --git a/models_restruct/PaddleNLP/cases/examples^sentiment_analysis^textcnn.yaml b/models_restruct/PaddleNLP/cases/examples^sentiment_analysis^textcnn.yaml index 62e07ad288..9070bba355 100644 --- a/models_restruct/PaddleNLP/cases/examples^sentiment_analysis^textcnn.yaml +++ b/models_restruct/PaddleNLP/cases/examples^sentiment_analysis^textcnn.yaml @@ -3,15 +3,15 @@ case: train: - name: prepare_data - path: examples/sentiment_analysis/textcnn + path: legacy/examples/sentiment_analysis/textcnn cmd: wget https://bj.bcebos.com/paddlenlp/datasets/RobotChat.tar.gz && tar xvf RobotChat.tar.gz - name: prepare_models - path: examples/sentiment_analysis/textcnn + path: legacy/examples/sentiment_analysis/textcnn cmd: wget https://bj.bcebos.com/paddlenlp/robot_chat_word_dict.txt && wget https://bj.bcebos.com/paddlenlp/models/textcnn.pdparams - name: pretrain - path: examples/sentiment_analysis/textcnn + path: legacy/examples/sentiment_analysis/textcnn cmd: python -m paddle.distributed.launch train.py params: - --vocab_path=./robot_chat_word_dict.txt @@ -27,7 +27,7 @@ case: export: - name: export - path: examples/sentiment_analysis/textcnn + path: legacy/examples/sentiment_analysis/textcnn cmd: python export_model.py params: - --vocab_path=./robot_chat_word_dict.txt @@ -36,7 +36,7 @@ case: predict: - name: predict - path: examples/sentiment_analysis/textcnn + path: legacy/examples/sentiment_analysis/textcnn cmd: python predict.py params: - --vocab_path=./robot_chat_word_dict.txt diff --git a/models_restruct/PaddleNLP/cases/examples^text_correction^ernie-csc.yaml b/models_restruct/PaddleNLP/cases/examples^text_correction^ernie-csc.yaml index c76b985037..8b26f3651e 100644 --- a/models_restruct/PaddleNLP/cases/examples^text_correction^ernie-csc.yaml +++ b/models_restruct/PaddleNLP/cases/examples^text_correction^ernie-csc.yaml @@ -3,18 +3,18 @@ case: train: - name: prepare_dowdnload_data - path: examples/text_correction/ernie-csc + path: legacy/examples/text_correction/ernie-csc cmd: python download.py params: - --data_dir ./extra_train_ds/ - --url https://github.com/wdimmy/Automatic-Corpus-Generation/raw/master/corpus/train.sgml - name: trans_xml_txt - path: examples/text_correction/ernie-csc + path: legacy/examples/text_correction/ernie-csc cmd: python change_sgml_to_txt.py -i extra_train_ds/train.sgml -o extra_train_ds/train.txt - name: pretrain - path: examples/text_correction/ernie-csc + path: legacy/examples/text_correction/ernie-csc cmd: python -m paddle.distributed.launch train.py params: - --batch_size 8 @@ -30,7 +30,7 @@ case: export: - name: export - path: examples/text_correction/ernie-csc + path: legacy/examples/text_correction/ernie-csc cmd: python export_model.py params: - --params_path ./checkpoints/best_model.pdparams @@ -38,7 +38,7 @@ case: predict: - name: predict - path: examples/text_correction/ernie-csc + path: legacy/examples/text_correction/ernie-csc cmd: python export_model.py params: - --model_file infer_model/static_graph_params.pdmodel diff --git a/models_restruct/PaddleNLP/cases/examples^text_generation^ctrl.yaml b/models_restruct/PaddleNLP/cases/examples^text_generation^ctrl.yaml index 317ab6513a..4a75eb28f0 100644 --- a/models_restruct/PaddleNLP/cases/examples^text_generation^ctrl.yaml +++ b/models_restruct/PaddleNLP/cases/examples^text_generation^ctrl.yaml @@ -3,7 +3,7 @@ case: train: - name: demo - path: examples/text_generation/ctrl + path: legacy/examples/text_generation/ctrl cmd: python demo.py eval: skipped infer: skipped diff --git a/models_restruct/PaddleNLP/cases/examples^text_generation^opt.yaml b/models_restruct/PaddleNLP/cases/examples^text_generation^opt.yaml index 9aae6ae2c1..3bb3b64596 100644 --- a/models_restruct/PaddleNLP/cases/examples^text_generation^opt.yaml +++ b/models_restruct/PaddleNLP/cases/examples^text_generation^opt.yaml @@ -3,7 +3,7 @@ case: train: - name: demo - path: examples/text_generation/opt + path: legacy/examples/text_generation/opt cmd: python demo.py eval: skipped infer: skipped @@ -14,7 +14,7 @@ case: train: - name: demo - path: examples/text_generation/opt + path: legacy/examples/text_generation/opt cmd: python demo.py eval: skipped infer: skipped diff --git a/models_restruct/PaddleNLP/cases/examples^text_generation^reformer.yaml b/models_restruct/PaddleNLP/cases/examples^text_generation^reformer.yaml index 22d0f78ad5..5db0b4342d 100644 --- a/models_restruct/PaddleNLP/cases/examples^text_generation^reformer.yaml +++ b/models_restruct/PaddleNLP/cases/examples^text_generation^reformer.yaml @@ -3,7 +3,7 @@ case: train: - name: demo - path: examples/text_generation/reformer + path: legacy/examples/text_generation/reformer cmd: python demo.py eval: skipped infer: skipped @@ -14,7 +14,7 @@ case: train: - name: demo - path: examples/text_generation/reformer + path: legacy/examples/text_generation/reformer cmd: python demo.py eval: skipped infer: skipped diff --git a/models_restruct/PaddleNLP/cases/examples^text_graph^erniesage.yaml b/models_restruct/PaddleNLP/cases/examples^text_graph^erniesage.yaml index 987ed22281..5312223197 100644 --- a/models_restruct/PaddleNLP/cases/examples^text_graph^erniesage.yaml +++ b/models_restruct/PaddleNLP/cases/examples^text_graph^erniesage.yaml @@ -3,20 +3,20 @@ case: train: - name: prepare - path: examples/text_graph/erniesage + path: legacy/examples/text_graph/erniesage cmd: python ./preprocessing/dump_graph.py params: - --conf ./config/erniesage_link_prediction.yaml - name: train - path: examples/text_graph/erniesage + path: legacy/examples/text_graph/erniesage cmd: python -m paddle.distributed.launch link_prediction.py params: - --conf ./config/erniesage_link_prediction.yaml eval: - name: train - path: examples/text_graph/erniesage + path: legacy/examples/text_graph/erniesage cmd: python -m paddle.distributed.launch link_prediction.py params: - --conf ./config/erniesage_link_prediction.yaml @@ -29,20 +29,20 @@ case: train: - name: prepare - path: examples/text_graph/erniesage + path: legacy/examples/text_graph/erniesage cmd: python ./preprocessing/dump_graph.py params: - --conf ./config/erniesage_link_prediction.yaml - name: train - path: examples/text_graph/erniesage + path: legacy/examples/text_graph/erniesage cmd: python -m paddle.distributed.launch link_prediction.py params: - --conf ./config/erniesage_link_prediction.yaml eval: - name: train - path: examples/text_graph/erniesage + path: legacy/examples/text_graph/erniesage cmd: python -m paddle.distributed.launch link_prediction.py params: - --conf ./config/erniesage_link_prediction.yaml diff --git a/models_restruct/PaddleNLP/cases/examples^text_matching^sentence_transformers.yaml b/models_restruct/PaddleNLP/cases/examples^text_matching^sentence_transformers.yaml index 573e67d4a1..5ce9148cb3 100644 --- a/models_restruct/PaddleNLP/cases/examples^text_matching^sentence_transformers.yaml +++ b/models_restruct/PaddleNLP/cases/examples^text_matching^sentence_transformers.yaml @@ -3,7 +3,7 @@ case: train: - name: train - path: examples/text_matching/sentence_transformers + path: legacy/examples/text_matching/sentence_transformers cmd: python -m paddle.distributed.launch train.py params: - --device gpu @@ -12,7 +12,7 @@ case: eval: - name: eval - path: examples/text_matching/sentence_transformers + path: legacy/examples/text_matching/sentence_transformers cmd: python predict.py params: - --device gpu @@ -25,7 +25,7 @@ case: train: - name: train - path: examples/text_matching/sentence_transformers + path: legacy/examples/text_matching/sentence_transformers cmd: python -m paddle.distributed.launch train.py params: - --device gpu @@ -34,7 +34,7 @@ case: eval: - name: train - path: examples/text_matching/sentence_transformers + path: legacy/examples/text_matching/sentence_transformers cmd: python predict.py params: - --device gpu diff --git a/models_restruct/PaddleNLP/cases/examples^text_matching^simnet.yaml b/models_restruct/PaddleNLP/cases/examples^text_matching^simnet.yaml index 5c67004797..09e793aec2 100644 --- a/models_restruct/PaddleNLP/cases/examples^text_matching^simnet.yaml +++ b/models_restruct/PaddleNLP/cases/examples^text_matching^simnet.yaml @@ -3,11 +3,11 @@ case: train: - name: prepare - path: examples/text_matching/simnet + path: legacy/examples/text_matching/simnet cmd: wget https://bj.bcebos.com/paddlenlp/data/simnet_vocab.txt - name: train - path: examples/text_matching/simnet + path: legacy/examples/text_matching/simnet cmd: python -m paddle.distributed.launch train.py params: - --vocab_path="./simnet_vocab.txt" @@ -20,7 +20,7 @@ case: eval: - name: train - path: examples/text_matching/simnet + path: legacy/examples/text_matching/simnet cmd: python predict.py params: - --vocab_path="./simnet_vocab.txt" @@ -35,11 +35,11 @@ case: train: - name: prepare - path: examples/text_matching/simnet + path: legacy/examples/text_matching/simnet cmd: wget https://bj.bcebos.com/paddlenlp/data/simnet_vocab.txt - name: train - path: examples/text_matching/simnet + path: legacy/examples/text_matching/simnet cmd: python -m paddle.distributed.launch train.py params: - --vocab_path="./simnet_vocab.txt" @@ -52,7 +52,7 @@ case: eval: - name: train - path: examples/text_matching/simnet + path: legacy/examples/text_matching/simnet cmd: python predict.py params: - --vocab_path="./simnet_vocab.txt" diff --git a/models_restruct/PaddleNLP/cases/examples^text_summarization^bart.yaml b/models_restruct/PaddleNLP/cases/examples^text_summarization^bart.yaml index c3f977db42..a3a475ce35 100644 --- a/models_restruct/PaddleNLP/cases/examples^text_summarization^bart.yaml +++ b/models_restruct/PaddleNLP/cases/examples^text_summarization^bart.yaml @@ -3,7 +3,7 @@ case: train: - name: multi - path: examples/text_summarization/bart + path: legacy/examples/text_summarization/bart cmd: python -m paddle.distributed.launch run_summarization.py params: - --dataset_name=cnn_dailymail @@ -20,7 +20,7 @@ case: predict: - name: faster - path: examples/text_summarization/bart + path: legacy/examples/text_summarization/bart cmd: python generate.py params: - --dataset_name=cnn_dailymail diff --git a/models_restruct/PaddleNLP/cases/examples^text_to_knowledge^nptag.yaml b/models_restruct/PaddleNLP/cases/examples^text_to_knowledge^nptag.yaml index 398968172b..25f2d2c3a0 100644 --- a/models_restruct/PaddleNLP/cases/examples^text_to_knowledge^nptag.yaml +++ b/models_restruct/PaddleNLP/cases/examples^text_to_knowledge^nptag.yaml @@ -3,11 +3,11 @@ case: train: - name: prepare - path: examples/text_to_knowledge/nptag/ + path: legacy/examples/text_to_knowledge/nptag/ cmd: wget -q https://paddlenlp.bj.bcebos.com/paddlenlp/datasets/nptag_dataset.tar.gz && tar -zxvf nptag_dataset.tar.gz - name: pretrain - path: examples/text_to_knowledge/nptag/ + path: legacy/examples/text_to_knowledge/nptag/ cmd: python -m paddle.distributed.launch train.py params: - --batch_size 64 @@ -20,7 +20,7 @@ case: eval: - name: eval - path: examples/text_to_knowledge/nptag/ + path: legacy/examples/text_to_knowledge/nptag/ cmd: python -m paddle.distributed.launch predict.py params: - --device=gpu @@ -29,7 +29,7 @@ case: export: - name: export - path: examples/text_to_knowledge/nptag/ + path: legacy/examples/text_to_knowledge/nptag/ cmd: python export_model.py params: - --params_path=./output/model_100/model_state.pdparams @@ -37,7 +37,7 @@ case: predict: - name: predict - path: examples/text_to_knowledge/nptag/ + path: legacy/examples/text_to_knowledge/nptag/ cmd: python deploy/python/predict.py params: - --model_dir=./export diff --git a/models_restruct/PaddleNLP/cases/llm^llama_convergence_dy2st.yaml b/models_restruct/PaddleNLP/cases/llm^llama_convergence_dy2st.yaml index ce6f197003..33e955b3c9 100644 --- a/models_restruct/PaddleNLP/cases/llm^llama_convergence_dy2st.yaml +++ b/models_restruct/PaddleNLP/cases/llm^llama_convergence_dy2st.yaml @@ -25,7 +25,7 @@ case: - --learning_rate 0.00001 - --min_learning_rate 0.000005 - --lr_scheduler_type "cosine" - - --max_steps 3000 + - --max_steps 3001 - --save_steps 3000 - --weight_decay 0.01 - --warmup_ratio 0.01 diff --git a/models_restruct/PaddleNLP/cases/llm^llama_convergence_ir.yaml b/models_restruct/PaddleNLP/cases/llm^llama_convergence_ir.yaml index f46ade2955..fac2094a13 100644 --- a/models_restruct/PaddleNLP/cases/llm^llama_convergence_ir.yaml +++ b/models_restruct/PaddleNLP/cases/llm^llama_convergence_ir.yaml @@ -25,7 +25,7 @@ case: - --learning_rate 0.00001 - --min_learning_rate 0.000005 - --lr_scheduler_type "cosine" - - --max_steps 3000 + - --max_steps 3001 - --save_steps 3000 - --weight_decay 0.01 - --warmup_ratio 0.01 diff --git a/models_restruct/PaddleNLP/cases/model_zoo^bert-fastdepoly.yaml b/models_restruct/PaddleNLP/cases/model_zoo^bert-fastdepoly.yaml index 5505bd5ef3..e8726767a1 100644 --- a/models_restruct/PaddleNLP/cases/model_zoo^bert-fastdepoly.yaml +++ b/models_restruct/PaddleNLP/cases/model_zoo^bert-fastdepoly.yaml @@ -7,14 +7,14 @@ case: predict: - name: fastdepoly-cpu - path: model_zoo/bert/deploy/python + path: legacy/model_zoo/bert/deploy/python cmd: python seq_cls_infer.py params: - --model_dir ../../infer_model/ - --device cpu --backend paddle - name: fastdepoly-gpu - path: model_zoo/bert/deploy/python + path: legacy/model_zoo/bert/deploy/python cmd: python seq_cls_infer.py params: - --model_dir ../../infer_model/ diff --git a/models_restruct/PaddleNLP/cases/model_zoo^bert.yaml b/models_restruct/PaddleNLP/cases/model_zoo^bert.yaml index 0be9ca9cb9..1eb8b9015a 100644 --- a/models_restruct/PaddleNLP/cases/model_zoo^bert.yaml +++ b/models_restruct/PaddleNLP/cases/model_zoo^bert.yaml @@ -3,11 +3,11 @@ case: train: - name: prepare - path: model_zoo/bert + path: legacy/model_zoo/bert cmd: wget -q https://paddle-qa.bj.bcebos.com/paddlenlp/bert.tar.gz && tar -xzvf bert.tar.gz - name: pretrain - path: model_zoo/bert + path: legacy/model_zoo/bert cmd: python -m paddle.distributed.launch run_pretrain.py params: - --model_type bert @@ -28,7 +28,7 @@ case: evaluation: "=" - name: fintune_glue - path: model_zoo/bert + path: legacy/model_zoo/bert cmd: python -m paddle.distributed.launch run_glue_trainer.py params: - --model_name_or_path bert-base-uncased @@ -53,7 +53,7 @@ case: evaluation: "=" - name: single_dy2st - path: model_zoo/bert + path: legacy/model_zoo/bert cmd: cd data && wget -q https://bj.bcebos.com/paddlenlp/datasets/benchmark_wikicorpus_en_seqlen128.tar && tar -xf benchmark_wikicorpus_en_seqlen128.tar && cd ..; python run_pretrain.py params: - --max_predictions_per_seq 20 @@ -76,7 +76,7 @@ case: - --amp_level O2 - name: multi_dy2st - path: model_zoo/bert + path: legacy/model_zoo/bert cmd: cd data && wget -q https://bj.bcebos.com/paddlenlp/datasets/benchmark_wikicorpus_en_seqlen128.tar && tar -xf benchmark_wikicorpus_en_seqlen128.tar && cd ..; python -m paddle.distributed.launch run_pretrain.py params: - --max_predictions_per_seq 20 @@ -102,7 +102,7 @@ case: export: - name: export - path: model_zoo/bert + path: legacy/model_zoo/bert cmd: python -u ./export_model.py params: - --model_type bert @@ -114,11 +114,11 @@ case: train: - name: prepare - path: model_zoo/bert + path: legacy/model_zoo/bert cmd: wget -q https://paddle-qa.bj.bcebos.com/paddlenlp/bert.tar.gz && tar -xzvf bert.tar.gz - name: pretrain - path: model_zoo/bert + path: legacy/model_zoo/bert cmd: python -m paddle.distributed.launch run_pretrain.py params: - --model_type bert @@ -138,7 +138,7 @@ case: evaluation: "=" - name: fintune_glue - path: model_zoo/bert + path: legacy/model_zoo/bert cmd: python -m paddle.distributed.launch run_glue.py params: - --model_type bert @@ -160,7 +160,7 @@ case: export: - name: export - path: model_zoo/bert + path: legacy/model_zoo/bert cmd: python -u ./export_model.py params: - --model_type bert @@ -169,7 +169,7 @@ case: predict: - name: predict - path: model_zoo/bert + path: legacy/model_zoo/bert cmd: python -u ./predict_glue.py params: - --task_name SST2 @@ -179,7 +179,7 @@ case: - --max_seq_length 128 - name: predict_sample_data_SST2 - path: model_zoo/bert + path: legacy/model_zoo/bert cmd: python -u ./predict_glue.py params: - --task_name SST2 diff --git a/models_restruct/PaddleNLP/cases/model_zoo^bert_convergence_dy2st.yaml b/models_restruct/PaddleNLP/cases/model_zoo^bert_convergence_dy2st.yaml index 708d8b5fca..ab3264a3d3 100644 --- a/models_restruct/PaddleNLP/cases/model_zoo^bert_convergence_dy2st.yaml +++ b/models_restruct/PaddleNLP/cases/model_zoo^bert_convergence_dy2st.yaml @@ -3,11 +3,11 @@ case: train: - name: prepare_datasets_seqlen128 - path: model_zoo/bert/ + path: legacy/model_zoo/bert/ cmd: wget https://paddle-qa.bj.bcebos.com/paddlenlp/hdf5_lower_case_1_seq_len_128_max_pred_20_masked_lm_prob_0.15_random_seed_12345_dupe_factor_5.tar.gz && tar -xzvf hdf5_lower_case_1_seq_len_128_max_pred_20_masked_lm_prob_0.15_random_seed_12345_dupe_factor_5.tar.gz - name: dy2st_prim_cinn - path: model_zoo/bert + path: legacy/model_zoo/bert cmd: python run_pretrain.py params: - --max_predictions_per_seq 20 @@ -31,7 +31,7 @@ case: - --cinn True - name: dy2st_baseline - path: model_zoo/bert + path: legacy/model_zoo/bert cmd: python run_pretrain.py params: - --max_predictions_per_seq 20 diff --git a/models_restruct/PaddleNLP/cases/model_zoo^bert_convergence_dy2st_daily.yaml b/models_restruct/PaddleNLP/cases/model_zoo^bert_convergence_dy2st_daily.yaml index f682ec6b0b..d547bbb47a 100644 --- a/models_restruct/PaddleNLP/cases/model_zoo^bert_convergence_dy2st_daily.yaml +++ b/models_restruct/PaddleNLP/cases/model_zoo^bert_convergence_dy2st_daily.yaml @@ -3,11 +3,11 @@ case: train: - name: prepare_datasets_seqlen128 - path: model_zoo/bert/data + path: legacy/model_zoo/bert/data cmd: wget -q https://bj.bcebos.com/paddlenlp/datasets/benchmark_wikicorpus_en_seqlen128.tar && tar -xf benchmark_wikicorpus_en_seqlen128.tar - name: dy2st_baseline - path: model_zoo/bert + path: legacy/model_zoo/bert cmd: python run_pretrain.py params: - --max_predictions_per_seq 20 @@ -30,7 +30,7 @@ case: - --amp_level O2 - name: dy2st_cinn - path: model_zoo/bert + path: legacy/model_zoo/bert cmd: python run_pretrain.py params: - --max_predictions_per_seq 20 @@ -54,7 +54,7 @@ case: - --cinn True - name: dy2st_prim - path: model_zoo/bert + path: legacy/model_zoo/bert cmd: python run_pretrain.py params: - --max_predictions_per_seq 20 @@ -77,7 +77,7 @@ case: - --amp_level O2 - name: dy2st_prim_cinn - path: model_zoo/bert + path: legacy/model_zoo/bert cmd: python run_pretrain.py params: - --max_predictions_per_seq 20 diff --git a/models_restruct/PaddleNLP/cases/model_zoo^bert_convergence_dy2st_debug.yaml b/models_restruct/PaddleNLP/cases/model_zoo^bert_convergence_dy2st_debug.yaml index 1ef30fc424..2bfd2621e6 100644 --- a/models_restruct/PaddleNLP/cases/model_zoo^bert_convergence_dy2st_debug.yaml +++ b/models_restruct/PaddleNLP/cases/model_zoo^bert_convergence_dy2st_debug.yaml @@ -3,11 +3,11 @@ case: train: - name: prepare_datasets_seqlen128 - path: model_zoo/bert/data + path: legacy/model_zoo/bert/data cmd: wget -q https://bj.bcebos.com/paddlenlp/datasets/benchmark_wikicorpus_en_seqlen128.tar && tar -xf benchmark_wikicorpus_en_seqlen128.tar - name: dy2st_prim_cinn - path: model_zoo/bert + path: legacy/model_zoo/bert cmd: python run_pretrain.py params: - --max_predictions_per_seq 20 diff --git a/models_restruct/PaddleNLP/cases/model_zoo^bert_convergence_ir.yaml b/models_restruct/PaddleNLP/cases/model_zoo^bert_convergence_ir.yaml index a09499d446..5abe787b89 100644 --- a/models_restruct/PaddleNLP/cases/model_zoo^bert_convergence_ir.yaml +++ b/models_restruct/PaddleNLP/cases/model_zoo^bert_convergence_ir.yaml @@ -3,11 +3,11 @@ case: train: - name: prepare_datasets_seqlen128 - path: model_zoo/bert/ + path: legacy/model_zoo/bert/ cmd: wget https://paddle-qa.bj.bcebos.com/paddlenlp/hdf5_lower_case_1_seq_len_128_max_pred_20_masked_lm_prob_0.15_random_seed_12345_dupe_factor_5.tar.gz && tar -xzvf hdf5_lower_case_1_seq_len_128_max_pred_20_masked_lm_prob_0.15_random_seed_12345_dupe_factor_5.tar.gz - name: baseline - path: model_zoo/bert + path: legacy/model_zoo/bert cmd: python run_pretrain.py params: - --max_predictions_per_seq 20 @@ -31,7 +31,7 @@ case: - --cinn True - name: ir - path: model_zoo/bert + path: legacy/model_zoo/bert cmd: python run_pretrain.py params: - --max_predictions_per_seq 20 diff --git a/models_restruct/PaddleNLP/cases/model_zoo^bert_ernie_gpt_convergence.yaml b/models_restruct/PaddleNLP/cases/model_zoo^bert_ernie_gpt_convergence.yaml index 88f506f46e..82dac8f43b 100644 --- a/models_restruct/PaddleNLP/cases/model_zoo^bert_ernie_gpt_convergence.yaml +++ b/models_restruct/PaddleNLP/cases/model_zoo^bert_ernie_gpt_convergence.yaml @@ -3,11 +3,11 @@ case: train: - name: bert_prepare - path: model_zoo/bert/data + path: legacy/model_zoo/bert/data cmd: wget -q https://bj.bcebos.com/paddlenlp/datasets/benchmark_wikicorpus_en_seqlen128.tar && tar -xf benchmark_wikicorpus_en_seqlen128.tar - name: bert_pretrain - path: model_zoo/bert + path: legacy/model_zoo/bert cmd: python run_pretrain.py params: - --max_predictions_per_seq 20 @@ -52,11 +52,11 @@ case: - --num_workers=0 - name: gpt_prepare - path: model_zoo/gpt + path: legacy/model_zoo/gpt cmd: mkdir data && cd data && wget https://paddlenlp.bj.bcebos.com/models/transformers/gpt/data/gpt_en_dataset_300m_idx.npz && wget https://paddlenlp.bj.bcebos.com/models/transformers/gpt/data/gpt_en_dataset_300m_ids.npy - name: gpt_pretrain - path: model_zoo/gpt + path: legacy/model_zoo/gpt cmd: python run_pretrain.py params: - --model_type gpt diff --git a/models_restruct/PaddleNLP/cases/model_zoo^bert_static.yaml b/models_restruct/PaddleNLP/cases/model_zoo^bert_static.yaml index ebb43a1cef..07279b13e3 100644 --- a/models_restruct/PaddleNLP/cases/model_zoo^bert_static.yaml +++ b/models_restruct/PaddleNLP/cases/model_zoo^bert_static.yaml @@ -3,11 +3,11 @@ case: train: - name: prepare - path: model_zoo/bert/static + path: legacy/model_zoo/bert/static cmd: wget -q https://paddle-qa.bj.bcebos.com/paddlenlp/bert_static.tar.gz && tar -xzvf bert_static.tar.gz - name: pretrain - path: model_zoo/bert/static + path: legacy/model_zoo/bert/static cmd: python -m paddle.distributed.launch run_pretrain.py params: - --model_type bert @@ -26,7 +26,7 @@ case: evaluation: "=" - name: fintune - path: model_zoo/bert/static + path: legacy/model_zoo/bert/static cmd: python -m paddle.distributed.launch run_glue.py params: - --model_type bert @@ -48,7 +48,7 @@ case: predict: - name: predict - path: model_zoo/bert/static + path: legacy/model_zoo/bert/static cmd: python -u ./predict_glue.py params: - --task_name SST-2 diff --git a/models_restruct/PaddleNLP/cases/model_zoo^electra.yaml b/models_restruct/PaddleNLP/cases/model_zoo^electra.yaml index 89a3f3f03d..31220310fa 100644 --- a/models_restruct/PaddleNLP/cases/model_zoo^electra.yaml +++ b/models_restruct/PaddleNLP/cases/model_zoo^electra.yaml @@ -3,7 +3,7 @@ case: train: - name: prepare - path: model_zoo/electra + path: legacy/legacy/model_zoo/electra cmd: wget -q https://paddle-qa.bj.bcebos.com/paddlenlp/BookCorpus.tar.gz && tar -xzvf BookCorpus.tar.gz result: exit_code: @@ -12,7 +12,7 @@ case: evaluation: "=" - name: pretrain - path: model_zoo/electra + path: legacy/legacy/model_zoo/electra cmd: python -u ./run_pretrain.py params: - --model_type electra @@ -37,13 +37,13 @@ case: evaluation: "=" - name: get_fintune_model - path: model_zoo/electra + path: legacy/legacy/model_zoo/electra cmd: python -u ./get_ft_model.py params: - --model_dir ./pretrain_model/model_1.pdparams/ - name: fintune_pretarined - path: model_zoo/electra + path: legacy/legacy/model_zoo/electra cmd: python -u ./run_glue.py params: - --model_type electra @@ -60,7 +60,7 @@ case: - --device gpu - name: fintune_local - path: model_zoo/electra + path: legacy/legacy/model_zoo/electra cmd: python -u ./run_glue.py params: - --model_type electra @@ -81,7 +81,7 @@ case: export: - name: export - path: model_zoo/electra + path: legacy/legacy/model_zoo/electra cmd: python -u ./export_model.py params: - --input_model_dir ./SST-2/sst-2_ft_model_1.pdparams/ @@ -90,7 +90,7 @@ case: predict: - name: predict - path: model_zoo/electra/deploy/python + path: legacy/legacy/model_zoo/electra/deploy/python cmd: python -u ./predict.py params: - --model_file ../../electra-deploy.pdmodel @@ -104,7 +104,7 @@ case: train: - name: prepare - path: model_zoo/electra + path: legacy/legacy/model_zoo/electra cmd: wget -q https://paddle-qa.bj.bcebos.com/paddlenlp/BookCorpus.tar.gz && tar -xzvf BookCorpus.tar.gz result: exit_code: @@ -113,7 +113,7 @@ case: evaluation: "=" - name: pretrain - path: model_zoo/electra + path: legacy/legacy/model_zoo/electra cmd: python -u ./run_pretrain.py params: - --model_type electra @@ -138,13 +138,13 @@ case: evaluation: "=" - name: get_fintune_model - path: model_zoo/electra + path: legacy/legacy/model_zoo/electra cmd: python -u ./get_ft_model.py params: - --model_dir ./pretrain_model/model_1.pdparams/ - name: fintune_pretarined - path: model_zoo/electra + path: legacy/legacy/model_zoo/electra cmd: python -u ./run_glue.py params: - --model_type electra @@ -161,7 +161,7 @@ case: - --device gpu - name: fintune_local - path: model_zoo/electra + path: legacy/legacy/model_zoo/electra cmd: python -u ./run_glue.py params: - --model_type electra @@ -182,7 +182,7 @@ case: export: - name: export - path: model_zoo/electra + path: legacy/legacy/model_zoo/electra cmd: python -u ./export_model.py params: - --input_model_dir ./SST-2/sst-2_ft_model_1.pdparams/ @@ -191,7 +191,7 @@ case: predict: - name: predict - path: model_zoo/electra/deploy/python + path: legacy/legacy/model_zoo/electra/deploy/python cmd: python -u ./predict.py params: - --model_file ../../electra-deploy.pdmodel diff --git a/models_restruct/PaddleNLP/cases/model_zoo^ernie-1.0.yaml b/models_restruct/PaddleNLP/cases/model_zoo^ernie-1.0.yaml index 6fdeb78e30..687028e163 100644 --- a/models_restruct/PaddleNLP/cases/model_zoo^ernie-1.0.yaml +++ b/models_restruct/PaddleNLP/cases/model_zoo^ernie-1.0.yaml @@ -3,11 +3,11 @@ case: train: - name: prepare - path: model_zoo/ernie-1.0/ + path: legacy/model_zoo/ernie-1.0/ cmd: mkdir data && cd data && wget -q https://paddlenlp.bj.bcebos.com/models/transformers/data_tools/wudao_200g_sample_ernie-1.0-base-zh_ids.npy && wget -q https://paddlenlp.bj.bcebos.com/models/transformers/data_tools/wudao_200g_sample_ernie-1.0-base-zh_idx.npz - name: pretrain - path: model_zoo/ernie-1.0/ + path: legacy/model_zoo/ernie-1.0/ cmd: python -m paddle.distributed.launch --log_dir ./log run_pretrain.py params: - --model_type "ernie" diff --git a/models_restruct/PaddleNLP/cases/model_zoo^ernie-1.0_static.yaml b/models_restruct/PaddleNLP/cases/model_zoo^ernie-1.0_static.yaml index bedb0ba4f1..3cdc1f7d34 100644 --- a/models_restruct/PaddleNLP/cases/model_zoo^ernie-1.0_static.yaml +++ b/models_restruct/PaddleNLP/cases/model_zoo^ernie-1.0_static.yaml @@ -3,7 +3,7 @@ case: train: - name: static - path: model_zoo/ernie-1.0/ + path: legacy/model_zoo/ernie-1.0/ cmd: python -u -m paddle.distributed.launch --log_dir "./log" run_pretrain_static.py params: - --model_type "ernie" diff --git a/models_restruct/PaddleNLP/cases/model_zoo^ernie-3.0-fastdepoly.yaml b/models_restruct/PaddleNLP/cases/model_zoo^ernie-3.0-fastdepoly.yaml index f953f18ec8..054ff2a05c 100644 --- a/models_restruct/PaddleNLP/cases/model_zoo^ernie-3.0-fastdepoly.yaml +++ b/models_restruct/PaddleNLP/cases/model_zoo^ernie-3.0-fastdepoly.yaml @@ -3,21 +3,21 @@ case: train: - name: FD-GPU - path: model_zoo/ernie-3.0/deploy/python/ + path: legacy/model_zoo/ernie-3.0/deploy/python/ cmd: python seq_cls_infer.py params: - --model_dir ../../best_models/afqmc/export - --device gpu --backend paddle - name: FD-CPU - path: model_zoo/ernie-3.0/deploy/python/ + path: legacy/model_zoo/ernie-3.0/deploy/python/ cmd: python seq_cls_infer.py params: - --model_dir ../../best_models/afqmc/export - --device cpu --backend paddle - name: Qunt-GPU - path: model_zoo/ernie-3.0/deploy/python/ + path: legacy/model_zoo/ernie-3.0/deploy/python/ cmd: python seq_cls_infer.py params: - --model_dir ../../best_models/afqmc/width_mult_0.75/mse16_1/ @@ -26,7 +26,7 @@ case: - --model_prefix int8 - name: Qunt-CPU - path: model_zoo/ernie-3.0/deploy/python/ + path: legacy/model_zoo/ernie-3.0/deploy/python/ cmd: python seq_cls_infer.py params: - --model_dir ../../best_models/afqmc/width_mult_0.75/mse16_1/ diff --git a/models_restruct/PaddleNLP/cases/model_zoo^ernie-3.0.yaml b/models_restruct/PaddleNLP/cases/model_zoo^ernie-3.0.yaml index d252627169..e7a321555d 100644 --- a/models_restruct/PaddleNLP/cases/model_zoo^ernie-3.0.yaml +++ b/models_restruct/PaddleNLP/cases/model_zoo^ernie-3.0.yaml @@ -3,7 +3,7 @@ case: train: - name: seq_cls - path: model_zoo/ernie-3.0/ + path: legacy/model_zoo/ernie-3.0/ cmd: python run_seq_cls.py params: - --model_name_or_path ernie-3.0-medium-zh @@ -18,7 +18,7 @@ case: - --save_step 2 - name: token_cls - path: model_zoo/ernie-3.0/ + path: legacy/model_zoo/ernie-3.0/ cmd: python run_token_cls.py params: - --model_name_or_path ernie-3.0-medium-zh @@ -33,7 +33,7 @@ case: - --save_step 2 - name: qa - path: model_zoo/ernie-3.0/ + path: legacy/model_zoo/ernie-3.0/ cmd: python run_qa.py params: - --model_name_or_path ernie-3.0-medium-zh @@ -48,7 +48,7 @@ case: - --save_step 2 - name: compress_seq_cls - path: model_zoo/ernie-3.0/ + path: legacy/model_zoo/ernie-3.0/ cmd: python compress_seq_cls.py params: - --model_name_or_path best_models/afqmc/ @@ -57,7 +57,7 @@ case: - --config configs/default.yml - name: compress_token_cls - path: model_zoo/ernie-3.0/ + path: legacy/model_zoo/ernie-3.0/ cmd: python compress_token_cls.py params: - --model_name_or_path best_models/msra_ner/ @@ -66,7 +66,7 @@ case: - --config configs/default.yml - name: compress_qa - path: model_zoo/ernie-3.0/ + path: legacy/model_zoo/ernie-3.0/ cmd: python compress_qa.py params: - --model_name_or_path best_models/cmrc2018/ @@ -76,7 +76,7 @@ case: eval: - name: seq_cls - path: model_zoo/ernie-3.0/ + path: legacy/model_zoo/ernie-3.0/ cmd: python run_seq_cls.py params: - --model_name_or_path best_models/afqmc/ @@ -91,7 +91,7 @@ case: evaluation: "=" - name: token_cls - path: model_zoo/ernie-3.0/ + path: legacy/model_zoo/ernie-3.0/ cmd: python run_token_cls.py params: - --model_name_or_path best_models/msra_ner/ @@ -106,7 +106,7 @@ case: evaluation: "=" - name: qa - path: model_zoo/ernie-3.0/ + path: legacy/model_zoo/ernie-3.0/ cmd: python run_qa.py params: - --model_name_or_path best_models/cmrc2018/ @@ -126,7 +126,7 @@ case: train: - name: seq_cls - path: model_zoo/ernie-3.0/ + path: legacy/model_zoo/ernie-3.0/ cmd: python run_seq_cls.py params: - --model_name_or_path ernie-3.0-medium-zh @@ -145,7 +145,7 @@ case: evaluation: "=" - name: token_cls - path: model_zoo/ernie-3.0/ + path: legacy/model_zoo/ernie-3.0/ cmd: python run_token_cls.py params: - --model_name_or_path ernie-3.0-medium-zh @@ -165,7 +165,7 @@ case: evaluation: "=" - name: qa - path: model_zoo/ernie-3.0/ + path: legacy/model_zoo/ernie-3.0/ cmd: python run_qa.py params: - --model_name_or_path best_models/cmrc2018/ @@ -182,7 +182,7 @@ case: evaluation: "=" - name: compress_seq_cls - path: model_zoo/ernie-3.0/ + path: legacy/model_zoo/ernie-3.0/ cmd: python compress_seq_cls.py params: - --model_name_or_path best_models/afqmc/ @@ -196,7 +196,7 @@ case: evaluation: "=" - name: compress_token_cls - path: model_zoo/ernie-3.0/ + path: legacy/model_zoo/ernie-3.0/ cmd: python compress_token_cls.py params: - --model_name_or_path best_models/msra_ner/ @@ -210,7 +210,7 @@ case: evaluation: "=" - name: compress_qa - path: model_zoo/ernie-3.0/ + path: legacy/model_zoo/ernie-3.0/ cmd: python compress_qa.py params: - --model_name_or_path best_models/cmrc2018/ @@ -225,7 +225,7 @@ case: eval: - name: seq_cls - path: model_zoo/ernie-3.0/ + path: legacy/model_zoo/ernie-3.0/ cmd: python run_seq_cls.py params: - --model_name_or_path best_models/afqmc/ @@ -240,7 +240,7 @@ case: evaluation: "=" - name: token_cls - path: model_zoo/ernie-3.0/ + path: legacy/model_zoo/ernie-3.0/ cmd: python run_token_cls.py params: - --model_name_or_path best_models/msra_ner/ @@ -255,7 +255,7 @@ case: evaluation: "=" - name: qa - path: model_zoo/ernie-3.0/ + path: legacy/model_zoo/ernie-3.0/ cmd: python run_qa.py params: - --model_name_or_path best_models/cmrc2018/ diff --git a/models_restruct/PaddleNLP/cases/model_zoo^ernie-doc.yaml b/models_restruct/PaddleNLP/cases/model_zoo^ernie-doc.yaml index b393ffde85..1949127a57 100644 --- a/models_restruct/PaddleNLP/cases/model_zoo^ernie-doc.yaml +++ b/models_restruct/PaddleNLP/cases/model_zoo^ernie-doc.yaml @@ -3,7 +3,7 @@ case: train: - name: classifier - path: model_zoo/ernie-doc + path: legacy/model_zoo/ernie-doc cmd: python -m paddle.distributed.launch --log_dir hyp run_classifier.py params: - --epochs 15 @@ -16,7 +16,7 @@ case: - --output_dir hyp - name: mrc - path: model_zoo/ernie-doc + path: legacy/model_zoo/ernie-doc cmd: python -m paddle.distributed.launch --log_dir cmrc2018 run_mrc.py params: - --batch_size 2 @@ -29,7 +29,7 @@ case: - --output_dir cmrc2018 - name: mcq - path: model_zoo/ernie-doc/ + path: legacy/model_zoo/ernie-doc/ cmd: python -m paddle.distributed.launch --log_dir c3 run_mcq.py params: - --learning_rate 6.5e-5 @@ -39,7 +39,7 @@ case: - --output_dir c3 - name: semantic_matching - path: model_zoo/ernie-doc/ + path: legacy/model_zoo/ernie-doc/ cmd: python -m paddle.distributed.launch --log_dir cail run_semantic_matching.py params: - --epochs 1 @@ -51,7 +51,7 @@ case: - --output_dir cail - name: sequence_labeling - path: model_zoo/ernie-doc/ + path: legacy/model_zoo/ernie-doc/ cmd: python -m paddle.distributed.launch --log_dir msra run_sequence_labeling.py params: - --learning_rate 3e-5 @@ -66,7 +66,7 @@ case: evaluation: "=" - name: dureader_robust - path: model_zoo/ernie-doc/ + path: legacy/model_zoo/ernie-doc/ cmd: python -m paddle.distributed.launch --log_dir dureader_robust run_mrc.py params: - --model_name_or_path ernie-doc-base-zh @@ -89,7 +89,7 @@ case: train: - name: classifier - path: model_zoo/ernie-doc + path: legacy/model_zoo/ernie-doc cmd: python -m paddle.distributed.launch --log_dir hyp run_classifier.py params: - --epochs 15 @@ -107,7 +107,7 @@ case: evaluation: "=" - name: mrc - path: model_zoo/ernie-doc + path: legacy/model_zoo/ernie-doc cmd: python -m paddle.distributed.launch --log_dir cmrc2018 run_mrc.py params: - --batch_size 2 @@ -125,7 +125,7 @@ case: evaluation: "=" - name: mcq - path: model_zoo/ernie-doc/ + path: legacy/model_zoo/ernie-doc/ cmd: python -m paddle.distributed.launch --log_dir c3 run_mcq.py params: - --learning_rate 6.5e-5 @@ -140,7 +140,7 @@ case: evaluation: "=" - name: semantic_matching - path: model_zoo/ernie-doc/ + path: legacy/model_zoo/ernie-doc/ cmd: python -m paddle.distributed.launch --log_dir cail run_semantic_matching.py params: - --epochs 1 @@ -157,7 +157,7 @@ case: evaluation: "=" - name: sequence_labeling - path: model_zoo/ernie-doc/ + path: legacy/model_zoo/ernie-doc/ cmd: python -m paddle.distributed.launch --log_dir msra run_sequence_labeling.py params: - --learning_rate 3e-5 @@ -173,7 +173,7 @@ case: evaluation: "=" - name: dureader_robust - path: model_zoo/ernie-doc/ + path: legacy/model_zoo/ernie-doc/ cmd: python -m paddle.distributed.launch --log_dir dureader_robust run_mrc.py params: - --model_name_or_path ernie-doc-base-zh diff --git a/models_restruct/PaddleNLP/cases/model_zoo^ernie-gen.yaml b/models_restruct/PaddleNLP/cases/model_zoo^ernie-gen.yaml index ba759c6bf9..f4b9ba6ee1 100644 --- a/models_restruct/PaddleNLP/cases/model_zoo^ernie-gen.yaml +++ b/models_restruct/PaddleNLP/cases/model_zoo^ernie-gen.yaml @@ -3,11 +3,11 @@ case: train: - name: prepare - path: model_zoo/ernie-gen + path: legacy/model_zoo/ernie-gen cmd: wget --no-check-certificate https://bj.bcebos.com/paddlenlp/datasets/poetry.tar.gz && tar xvf poetry.tar.gz - name: pretrain - path: model_zoo/ernie-gen + path: legacy/model_zoo/ernie-gen cmd: python -m paddle.distributed.launch train.py params: - --model_name_or_path ernie-1.0 @@ -29,7 +29,7 @@ case: eval: - name: evaluate - path: model_zoo/ernie-gen + path: legacy/model_zoo/ernie-gen cmd: python -u ./eval.py params: - --model_name_or_path ernie-1.0 @@ -41,7 +41,7 @@ case: infer: - name: evaluate - path: model_zoo/ernie-gen + path: legacy/model_zoo/ernie-gen cmd: python -u ./predict.py params: - --model_name_or_path ernie-1.0 @@ -57,11 +57,11 @@ case: train: - name: prepare - path: model_zoo/ernie-gen + path: legacy/model_zoo/ernie-gen cmd: wget --no-check-certificate https://bj.bcebos.com/paddlenlp/datasets/poetry.tar.gz && tar xvf poetry.tar.gz - name: pretrain - path: model_zoo/ernie-gen + path: legacy/model_zoo/ernie-gen cmd: python -m paddle.distributed.launch train.py params: - --model_name_or_path ernie-1.0 @@ -83,7 +83,7 @@ case: eval: - name: evaluate - path: model_zoo/ernie-gen + path: legacy/model_zoo/ernie-gen cmd: python -u ./eval.py params: - --model_name_or_path ernie-1.0 @@ -95,7 +95,7 @@ case: infer: - name: evaluate - path: model_zoo/ernie-gen + path: legacy/model_zoo/ernie-gen cmd: python -u ./predict.py params: - --model_name_or_path ernie-1.0 diff --git a/models_restruct/PaddleNLP/cases/model_zoo^ernie-health.yaml b/models_restruct/PaddleNLP/cases/model_zoo^ernie-health.yaml index 4339e1e9b3..e855e4fbb5 100644 --- a/models_restruct/PaddleNLP/cases/model_zoo^ernie-health.yaml +++ b/models_restruct/PaddleNLP/cases/model_zoo^ernie-health.yaml @@ -3,11 +3,11 @@ case: train: - name: prepare - path: model_zoo/ernie-health + path: legacy/model_zoo/ernie-health cmd: python preprocess.py --input_path ./raw_data/ --output_file ./data/samples --tokenize_tool lac --num_worker 8 - name: pretrain - path: model_zoo/ernie-health + path: legacy/model_zoo/ernie-health cmd: python -m paddle.distributed.launch run_pretrain.py params: - --input_dir ./data @@ -29,7 +29,7 @@ case: evaluation: "=" - name: trainer - path: model_zoo/ernie-health + path: legacy/model_zoo/ernie-health cmd: python -u -m paddle.distributed.launch run_pretrain_trainer.py params: - --input_dir "./data" diff --git a/models_restruct/PaddleNLP/cases/model_zoo^ernie-health_cblue.yaml b/models_restruct/PaddleNLP/cases/model_zoo^ernie-health_cblue.yaml index 9bfb18f398..e4b705fdda 100644 --- a/models_restruct/PaddleNLP/cases/model_zoo^ernie-health_cblue.yaml +++ b/models_restruct/PaddleNLP/cases/model_zoo^ernie-health_cblue.yaml @@ -3,7 +3,7 @@ case: train: - name: classification - path: model_zoo/ernie-health/cblue + path: legacy/model_zoo/ernie-health/cblue cmd: python -m paddle.distributed.launch train_classification.py params: - --dataset CHIP-CDN-2C @@ -22,7 +22,7 @@ case: evaluation: "=" - name: ner - path: model_zoo/ernie-health/cblue + path: legacy/model_zoo/ernie-health/cblue cmd: python -m paddle.distributed.launch train_ner.py params: - --batch_size 32 @@ -40,7 +40,7 @@ case: evaluation: "=" - name: spo - path: model_zoo/ernie-health/cblue + path: legacy/model_zoo/ernie-health/cblue cmd: python -m paddle.distributed.launch train_spo.py params: - --batch_size 12 @@ -61,7 +61,7 @@ case: export: - name: classification - path: model_zoo/ernie-health/cblue + path: legacy/model_zoo/ernie-health/cblue cmd: python export_model.py params: - --train_dataset CHIP-CDN-2C @@ -74,7 +74,7 @@ case: evaluation: "=" - name: ner - path: model_zoo/ernie-health/cblue + path: legacy/model_zoo/ernie-health/cblue cmd: python export_model.py params: - --train_dataset CMeIE @@ -87,7 +87,7 @@ case: evaluation: "=" - name: spo - path: model_zoo/ernie-health/cblue + path: legacy/model_zoo/ernie-health/cblue cmd: python export_model.py params: - --train_dataset CMeEE diff --git a/models_restruct/PaddleNLP/cases/model_zoo^ernie-layout.yaml b/models_restruct/PaddleNLP/cases/model_zoo^ernie-layout.yaml index 06a5440fdb..8fa72837cd 100644 --- a/models_restruct/PaddleNLP/cases/model_zoo^ernie-layout.yaml +++ b/models_restruct/PaddleNLP/cases/model_zoo^ernie-layout.yaml @@ -3,7 +3,7 @@ case: train: - name: classification - path: model_zoo/ernie-layout + path: legacy/model_zoo/ernie-layout cmd: python -u run_cls.py params: - --model_name_or_path ernie-layoutx-base-uncased @@ -39,7 +39,7 @@ case: evaluation: "=" - name: ner - path: model_zoo/ernie-layout + path: legacy/model_zoo/ernie-layout cmd: python -u run_ner.py params: - --model_name_or_path ernie-layoutx-base-uncased @@ -73,7 +73,7 @@ case: evaluation: "=" - name: mrc - path: model_zoo/ernie-layout + path: legacy/model_zoo/ernie-layout cmd: python -u run_mrc.py params: - --model_name_or_path ernie-layoutx-base-uncased @@ -114,7 +114,7 @@ case: export: - name: classification - path: model_zoo/ernie-layout + path: legacy/model_zoo/ernie-layout cmd: python export_model.py params: - --task_type cls @@ -122,7 +122,7 @@ case: - --output_path ./cls_export - name: mrc - path: model_zoo/ernie-layout + path: legacy/model_zoo/ernie-layout cmd: python export_model.py params: - --task_type mrc @@ -135,7 +135,7 @@ case: evaluation: "=" - name: ner - path: model_zoo/ernie-layout + path: legacy/model_zoo/ernie-layout cmd: python export_model.py params: - --task_type ner @@ -149,11 +149,11 @@ case: predict: - name: prepare - path: model_zoo/ernie-layout/deploy/python + path: legacy/model_zoo/ernie-layout/deploy/python cmd: wget https://bj.bcebos.com/paddlenlp/datasets/document_intelligence/images.zip && unzip images.zip - name: classification - path: model_zoo/ernie-layout/deploy/python + path: legacy/model_zoo/ernie-layout/deploy/python cmd: python infer.py params: - --model_path_prefix ../../cls_export/inference @@ -162,7 +162,7 @@ case: - --batch_size 8 - name: ner - path: model_zoo/ernie-layout/deploy/python + path: legacy/model_zoo/ernie-layout/deploy/python cmd: python infer.py params: - --model_path_prefix ../../ner_export/inference @@ -171,7 +171,7 @@ case: - --batch_size 8 - name: mrc - path: model_zoo/ernie-layout/deploy/python + path: legacy/model_zoo/ernie-layout/deploy/python cmd: python infer.py params: - --model_path_prefix ../../mrc_export/inference @@ -183,7 +183,7 @@ case: train: - name: classification - path: model_zoo/ernie-layout + path: legacy/model_zoo/ernie-layout cmd: python -u run_cls.py params: - --model_name_or_path ernie-layoutx-base-uncased @@ -219,7 +219,7 @@ case: evaluation: "=" - name: ner - path: model_zoo/ernie-layout + path: legacy/model_zoo/ernie-layout cmd: python -u run_ner.py params: - --model_name_or_path ernie-layoutx-base-uncased @@ -253,7 +253,7 @@ case: evaluation: "=" - name: mrc - path: model_zoo/ernie-layout + path: legacy/model_zoo/ernie-layout cmd: python -u run_mrc.py params: - --model_name_or_path ernie-layoutx-base-uncased @@ -294,7 +294,7 @@ case: export: - name: classification - path: model_zoo/ernie-layout + path: legacy/model_zoo/ernie-layout cmd: python export_model.py params: - --task_type cls @@ -302,7 +302,7 @@ case: - --output_path ./cls_export - name: mrc - path: model_zoo/ernie-layout + path: legacy/model_zoo/ernie-layout cmd: python export_model.py params: - --task_type mrc @@ -315,7 +315,7 @@ case: evaluation: "=" - name: ner - path: model_zoo/ernie-layout + path: legacy/model_zoo/ernie-layout cmd: python export_model.py params: - --task_type ner @@ -329,11 +329,11 @@ case: predict: - name: prepare - path: model_zoo/ernie-layout/deploy/python + path: legacy/model_zoo/ernie-layout/deploy/python cmd: wget https://bj.bcebos.com/paddlenlp/datasets/document_intelligence/images.zip && unzip images.zip - name: classification - path: model_zoo/ernie-layout/deploy/python + path: legacy/model_zoo/ernie-layout/deploy/python cmd: python infer.py params: - --model_path_prefix ../../cls_export/inference @@ -342,7 +342,7 @@ case: - --batch_size 8 - name: ner - path: model_zoo/ernie-layout/deploy/python + path: legacy/model_zoo/ernie-layout/deploy/python cmd: python infer.py params: - --model_path_prefix ../../ner_export/inference @@ -351,7 +351,7 @@ case: - --batch_size 8 - name: mrc - path: model_zoo/ernie-layout/deploy/python + path: legacy/model_zoo/ernie-layout/deploy/python cmd: python infer.py params: - --model_path_prefix ../../mrc_export/inference diff --git a/models_restruct/PaddleNLP/cases/model_zoo^ernie-m-fastdepoly.yaml b/models_restruct/PaddleNLP/cases/model_zoo^ernie-m-fastdepoly.yaml index 8fa3a7b1da..cf6da55563 100644 --- a/models_restruct/PaddleNLP/cases/model_zoo^ernie-m-fastdepoly.yaml +++ b/models_restruct/PaddleNLP/cases/model_zoo^ernie-m-fastdepoly.yaml @@ -3,7 +3,7 @@ case: train: - name: FD-GPU - path: model_zoo/ernie-m/deploy/python + path: legacy/model_zoo/ernie-m/deploy/python cmd: python seq_cls_infer.py params: - --model_dir ../../finetuned_models/export/model @@ -11,7 +11,7 @@ case: - --backend paddle - name: FD-CPU - path: model_zoo/ernie-m/deploy/python + path: legacy/model_zoo/ernie-m/deploy/python cmd: python seq_cls_infer.py params: - --model_dir ../../finetuned_models/export/ diff --git a/models_restruct/PaddleNLP/cases/model_zoo^ernie-m.yaml b/models_restruct/PaddleNLP/cases/model_zoo^ernie-m.yaml index dc96d6b474..90b13f2335 100644 --- a/models_restruct/PaddleNLP/cases/model_zoo^ernie-m.yaml +++ b/models_restruct/PaddleNLP/cases/model_zoo^ernie-m.yaml @@ -3,7 +3,7 @@ case: train: - name: run_classifier_clt - path: model_zoo/ernie-m/ + path: legacy/model_zoo/ernie-m/ cmd: python -m paddle.distributed.launch --log_dir output_clt run_classifier.py params: - --do_train @@ -29,7 +29,7 @@ case: evaluation: "=" - name: run_classifier_tta - path: model_zoo/ernie-m/ + path: legacy/model_zoo/ernie-m/ cmd: python -m paddle.distributed.launch --log_dir output_tta run_classifier.py params: - --do_train diff --git a/models_restruct/PaddleNLP/cases/model_zoo^ernie-tiny-fastdeploy.yaml b/models_restruct/PaddleNLP/cases/model_zoo^ernie-tiny-fastdeploy.yaml index 1765e40a1f..0e5531b1a2 100644 --- a/models_restruct/PaddleNLP/cases/model_zoo^ernie-tiny-fastdeploy.yaml +++ b/models_restruct/PaddleNLP/cases/model_zoo^ernie-tiny-fastdeploy.yaml @@ -3,11 +3,11 @@ case: train: - name: prepare - path: model_zoo/ernie-tiny/data + path: legacy/model_zoo/ernie-tiny/data cmd: wget https://paddle-qa.bj.bcebos.com/paddlenlp/ernie-tiny.tar.gz && tar -xzvf ernie-tiny.tar.gz && cp -r ernie-tiny/* ./ - name: fintune - path: model_zoo/ernie-tiny/ + path: legacy/model_zoo/ernie-tiny/ cmd: python run_train.py params: - --device gpu @@ -39,7 +39,7 @@ case: evaluation: "=" - name: compress - path: model_zoo/ernie-tiny/ + path: legacy/model_zoo/ernie-tiny/ cmd: python run_train.py params: - --do_compress @@ -75,7 +75,7 @@ case: eval: - name: eval - path: model_zoo/ernie-tiny/ + path: legacy/model_zoo/ernie-tiny/ cmd: python run_eval.py params: - --device gpu @@ -98,51 +98,51 @@ case: predict: - name: deploy_python_prepare - path: model_zoo/ernie-tiny/output/BS64_LR5e-5_EPOCHS30/ + path: legacy/model_zoo/ernie-tiny/output/BS64_LR5e-5_EPOCHS30/ cmd: cp -r width_mult_0.75/* ./ - name: deploy_python_install_fastdeploy - path: model_zoo/ernie-tiny/output/BS64_LR5e-5_EPOCHS30/ + path: legacy/model_zoo/ernie-tiny/output/BS64_LR5e-5_EPOCHS30/ cmd: python -m pip install fastdeploy-gpu-python -f https://www.paddlepaddle.org.cn/whl/fastdeploy.html - name: fastdeploy_python_gpu - path: model_zoo/ernie-tiny/deploy/python + path: legacy/model_zoo/ernie-tiny/deploy/python cmd: python infer_demo.py --device gpu --backend paddle --model_dir ../../output/BS64_LR5e-5_EPOCHS30 --slot_label_path ../../data/slot_label.txt --intent_label_path ../../data/intent_label.txt - name: fastdeploy_python_cpu - path: model_zoo/ernie-tiny/deploy/python + path: legacy/model_zoo/ernie-tiny/deploy/python cmd: python infer_demo.py --device cpu --backend paddle --model_dir ../../output/BS64_LR5e-5_EPOCHS30 --slot_label_path ../../data/slot_label.txt --intent_label_path ../../data/intent_label.txt - name: deploy_python_compress_gpu - path: model_zoo/ernie-tiny/deploy/python + path: legacy/model_zoo/ernie-tiny/deploy/python cmd: python infer_demo.py --device gpu --backend paddle_tensorrt --model_prefix int8 --model_dir ../../output/BS64_LR5e-5_EPOCHS30/ --slot_label_path ../../data/slot_label.txt --intent_label_path ../../data/intent_label.txt - name: deploy_python_compress_cpu - path: model_zoo/ernie-tiny/deploy/python + path: legacy/model_zoo/ernie-tiny/deploy/python cmd: python infer_demo.py --device cpu --backend paddle_tensorrt --model_prefix int8 --model_dir ../../output/BS64_LR5e-5_EPOCHS30/ --slot_label_path ../../data/slot_label.txt --intent_label_path ../../data/intent_label.txt - name: deploy_cpp_prepare - path: model_zoo/ernie-tiny/deploy/cpp + path: legacy/model_zoo/ernie-tiny/deploy/cpp cmd: wget -q https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-linux-x64-gpu-1.0.2.tgz && tar -xzvf fastdeploy-linux-x64-gpu-1.0.2.tgz && mkdir build - name: deploy_cpp_build - path: model_zoo/ernie-tiny/deploy/cpp/build + path: legacy/model_zoo/ernie-tiny/deploy/cpp/build cmd: cmake .. -DFASTDEPLOY_INSTALL_DIR=./fastdeploy-linux-x64-gpu-1.0.2 && make -j - name: deploy_cpp_gpu - path: model_zoo/ernie-tiny/deploy/cpp/build + path: legacy/model_zoo/ernie-tiny/deploy/cpp/build cmd: ./infer_demo --device gpu --backend paddle --model_dir ../../../output/BS64_LR5e-5_EPOCHS30 --slot_label_path ../../../data/slot_label.txt --intent_label_path ../../../data/intent_label.txt - name: deploy_cpp_cpu - path: model_zoo/ernie-tiny/deploy/cpp/build + path: legacy/model_zoo/ernie-tiny/deploy/cpp/build cmd: ./infer_demo --device cpu --backend paddle --model_dir ../../../output/BS64_LR5e-5_EPOCHS30 --slot_label_path ../../../data/slot_label.txt --intent_label_path ../../../data/intent_label.txt - name: deploy_cpp_compress_gpu - path: model_zoo/ernie-tiny/deploy/cpp/build + path: legacy/model_zoo/ernie-tiny/deploy/cpp/build cmd: ./infer_demo --device gpu --backend paddle_tensorrt --model_prefix int8 --model_dir ../../../output/BS64_LR5e-5_EPOCHS30 --slot_label_path ../../../data/slot_label.txt --intent_label_path ../../../data/intent_label.txt - name: deploy_cpp_compress_cpu - path: model_zoo/ernie-tiny/deploy/cpp/build + path: legacy/model_zoo/ernie-tiny/deploy/cpp/build cmd: ./infer_demo --device cpu --backend paddle_tensorrt --model_prefix int8 --model_dir ../../../output/BS64_LR5e-5_EPOCHS30 --slot_label_path ../../../data/slot_label.txt --intent_label_path ../../../data/intent_label.txt windows: diff --git a/models_restruct/PaddleNLP/cases/model_zoo^ernie-tiny.yaml b/models_restruct/PaddleNLP/cases/model_zoo^ernie-tiny.yaml index f3855922f0..0d735e7de6 100644 --- a/models_restruct/PaddleNLP/cases/model_zoo^ernie-tiny.yaml +++ b/models_restruct/PaddleNLP/cases/model_zoo^ernie-tiny.yaml @@ -3,11 +3,11 @@ case: train: - name: prepare - path: model_zoo/ernie-tiny/data + path: legacy/model_zoo/ernie-tiny/data cmd: wget https://paddle-qa.bj.bcebos.com/paddlenlp/ernie-tiny.tar.gz && tar -xzvf ernie-tiny.tar.gz && cp -r ernie-tiny/* ./ - name: fintune - path: model_zoo/ernie-tiny/ + path: legacy/model_zoo/ernie-tiny/ cmd: python run_train.py params: - --device gpu @@ -39,7 +39,7 @@ case: evaluation: "=" - name: compress - path: model_zoo/ernie-tiny/ + path: legacy/model_zoo/ernie-tiny/ cmd: python run_train.py params: - --do_compress @@ -75,7 +75,7 @@ case: eval: - name: eval - path: model_zoo/ernie-tiny/ + path: legacy/model_zoo/ernie-tiny/ cmd: python run_eval.py params: - --device gpu diff --git a/models_restruct/PaddleNLP/cases/model_zoo^ernie-vil2.0.yaml b/models_restruct/PaddleNLP/cases/model_zoo^ernie-vil2.0.yaml index b5d86ee2ac..71572fcedb 100644 --- a/models_restruct/PaddleNLP/cases/model_zoo^ernie-vil2.0.yaml +++ b/models_restruct/PaddleNLP/cases/model_zoo^ernie-vil2.0.yaml @@ -3,11 +3,11 @@ case: train: - name: prepare - path: model_zoo/ernie-vil2.0 + path: legacy/model_zoo/ernie-vil2.0 cmd: wget https://paddlenlp.bj.bcebos.com/tests/Flickr30k-CN-small.zip && unzip Flickr30k-CN-small.zip - name: multi - path: model_zoo/ernie-vil2.0 + path: legacy/model_zoo/ernie-vil2.0 cmd: python -u -m paddle.distributed.launch --log_dir train_log run_finetune.py params: - --output_dir output_pd @@ -32,7 +32,7 @@ case: eval: - name: eval - path: model_zoo/ernie-vil2.0 + path: legacy/model_zoo/ernie-vil2.0 cmd: python -u extract_features.py params: - --extract-image-feats @@ -46,7 +46,7 @@ case: infer: - name: infer_make_topk - path: model_zoo/ernie-vil2.0 + path: legacy/model_zoo/ernie-vil2.0 cmd: python -u extract_features.py params: - --image-feats="Flickr30k-CN/valid_imgs.img_feat.jsonl" @@ -56,14 +56,14 @@ case: - --output="$Flickr30k-CN/valid_predictions.jsonl" - name: infer_transform_ir_annotation_to_tr - path: model_zoo/ernie-vil2.0 + path: legacy/model_zoo/ernie-vil2.0 cmd: python utils/transform_ir_annotation_to_tr.py params: - --input Flickr30k-CN/valid_texts.jsonl export: - name: export - path: model_zoo/ernie-vil2.0 + path: legacy/model_zoo/ernie-vil2.0 cmd: export_model.py params: - --model_path=output_pd/checkpoint-2/ diff --git a/models_restruct/PaddleNLP/cases/model_zoo^gpt.yaml b/models_restruct/PaddleNLP/cases/model_zoo^gpt.yaml index 6d4ed11bb1..255325b0cd 100644 --- a/models_restruct/PaddleNLP/cases/model_zoo^gpt.yaml +++ b/models_restruct/PaddleNLP/cases/model_zoo^gpt.yaml @@ -3,15 +3,15 @@ case: train: - name: data_tools - path: model_zoo/ernie-1.0/data_tools + path: legacy/model_zoo/ernie-1.0/data_tools cmd: sed -i "s/python3/python/g" Makefile && sed -i "s/python-config/python3.7m-config/g" Makefile - name: prepare_data - path: model_zoo/gpt + path: legacy/model_zoo/gpt cmd: mkdir pre_data && cd pre_data && wget -q https://bj.bcebos.com/paddlenlp/models/transformers/gpt/data/gpt_en_dataset_300m_ids.npy && wget -q https://bj.bcebos.com/paddlenlp/models/transformers/gpt/data/gpt_en_dataset_300m_idx.npz - name: pretrain - path: model_zoo/gpt + path: legacy/model_zoo/gpt cmd: python -m paddle.distributed.launch run_pretrain.py params: - --model_name_or_path "__internal_testing__/gpt" @@ -33,7 +33,7 @@ case: evaluation: "=" - name: single_dy2st - path: model_zoo/gpt + path: legacy/model_zoo/gpt cmd: mkdir data && cd data && wget https://paddlenlp.bj.bcebos.com/models/transformers/gpt/data/gpt_en_dataset_300m_idx.npz && wget https://paddlenlp.bj.bcebos.com/models/transformers/gpt/data/gpt_en_dataset_300m_ids.npy && cd ..; python run_pretrain.py params: - --model_type gpt @@ -52,7 +52,7 @@ case: - --seed 100 - name: multi_dy2st - path: model_zoo/gpt + path: legacy/model_zoo/gpt cmd: mkdir data && cd data && wget https://paddlenlp.bj.bcebos.com/models/transformers/gpt/data/gpt_en_dataset_300m_idx.npz && wget https://paddlenlp.bj.bcebos.com/models/transformers/gpt/data/gpt_en_dataset_300m_ids.npy && cd ..; python -m paddle.distributed.launch run_pretrain.py params: - --model_type gpt @@ -75,7 +75,7 @@ case: export: - name: export - path: model_zoo/gpt + path: legacy/model_zoo/gpt cmd: python export_model.py params: - --model_type=gpt @@ -84,7 +84,7 @@ case: predict: - name: predict - path: model_zoo/gpt/deploy/python/ + path: legacy/model_zoo/gpt/deploy/python/ cmd: python inference.py params: - --model_type gpt diff --git a/models_restruct/PaddleNLP/cases/model_zoo^gpt_convergence_dy2st.yaml b/models_restruct/PaddleNLP/cases/model_zoo^gpt_convergence_dy2st.yaml index 836dd69047..b1f6224ab5 100644 --- a/models_restruct/PaddleNLP/cases/model_zoo^gpt_convergence_dy2st.yaml +++ b/models_restruct/PaddleNLP/cases/model_zoo^gpt_convergence_dy2st.yaml @@ -3,12 +3,12 @@ case: train: - name: datasets - path: model_zoo/gpt-3/tasks/gpt + path: legacy/model_zoo/gpt-3/tasks/gpt cmd: mkdir data && cd data && wget https://bj.bcebos.com/paddlenlp/models/transformers/llama/data/llama_openwebtext_100k_ids.npy && wget https://bj.bcebos.com/paddlenlp/models/transformers/llama/data/llama_openwebtext_100k_idx.npz - name: dy2st_baseline - path: model_zoo/gpt-3/tasks/gpt + path: legacy/model_zoo/gpt-3/tasks/gpt cmd: GLOG_vmodule=generated_vjp=4 python train_pir.py params: - -c ../../ppfleetx/configs/nlp/gpt/pretrain_gpt_345M_single_card.yaml @@ -19,7 +19,7 @@ case: - name: dy2st_pir - path: model_zoo/gpt-3/tasks/gpt + path: legacy/model_zoo/gpt-3/tasks/gpt cmd: GLOG_vmodule=generated_vjp=4 python train_pir.py params: - -c ../../ppfleetx/configs/nlp/gpt/pretrain_gpt_345M_single_card.yaml @@ -29,7 +29,7 @@ case: - -o Engine.max_steps=5000 - name: dy2st_pir_prim - path: model_zoo/gpt-3/tasks/gpt + path: legacy/model_zoo/gpt-3/tasks/gpt cmd: GLOG_vmodule=generated_vjp=4 python train_pir.py params: - -c ../../ppfleetx/configs/nlp/gpt/pretrain_gpt_345M_single_card.yaml diff --git a/models_restruct/PaddleNLP/cases/model_zoo^gpt_convergence_ir.yaml b/models_restruct/PaddleNLP/cases/model_zoo^gpt_convergence_ir.yaml index d129ba2343..b3f5ca85f4 100644 --- a/models_restruct/PaddleNLP/cases/model_zoo^gpt_convergence_ir.yaml +++ b/models_restruct/PaddleNLP/cases/model_zoo^gpt_convergence_ir.yaml @@ -3,12 +3,12 @@ case: train: - name: datasets - path: model_zoo/gpt-3/tasks/gpt + path: legacy/model_zoo/gpt-3/tasks/gpt cmd: mkdir data && cd data && wget https://bj.bcebos.com/paddlenlp/models/transformers/llama/data/llama_openwebtext_100k_ids.npy && wget https://bj.bcebos.com/paddlenlp/models/transformers/llama/data/llama_openwebtext_100k_idx.npz - name: baseline - path: model_zoo/gpt-3/tasks/gpt + path: legacy/model_zoo/gpt-3/tasks/gpt cmd: python train_pir.py params: - -c ../../ppfleetx/configs/nlp/gpt/pretrain_gpt_345M_single_card.yaml @@ -19,7 +19,7 @@ case: - name: ir - path: model_zoo/gpt-3/tasks/gpt + path: legacy/model_zoo/gpt-3/tasks/gpt cmd: python train_pir.py params: - -c ../../ppfleetx/configs/nlp/gpt/pretrain_gpt_345M_single_card.yaml diff --git a/models_restruct/PaddleNLP/cases/model_zoo^gpt_static.yaml b/models_restruct/PaddleNLP/cases/model_zoo^gpt_static.yaml index a4dfbc36b8..085260a8b7 100644 --- a/models_restruct/PaddleNLP/cases/model_zoo^gpt_static.yaml +++ b/models_restruct/PaddleNLP/cases/model_zoo^gpt_static.yaml @@ -3,11 +3,11 @@ case: train: - name: prepare - path: model_zoo/gpt + path: legacy/model_zoo/gpt cmd: mkdir data && cd data && wget https://paddlenlp.bj.bcebos.com/models/transformers/gpt/data/gpt_en_dataset_300m_ids.npy && wget https://paddlenlp.bj.bcebos.com/models/transformers/gpt/data/gpt_en_dataset_300m_idx.npz - name: pretrain - path: model_zoo/gpt + path: legacy/model_zoo/gpt cmd: python -m paddle.distributed.launch run_pretrain_static.py params: - --model_name_or_path __internal_testing__/gpt diff --git a/models_restruct/PaddleNLP/cases/model_zoo^tinybert.yaml b/models_restruct/PaddleNLP/cases/model_zoo^tinybert.yaml index ed0fbb8e7d..fa902aeca5 100644 --- a/models_restruct/PaddleNLP/cases/model_zoo^tinybert.yaml +++ b/models_restruct/PaddleNLP/cases/model_zoo^tinybert.yaml @@ -15,7 +15,7 @@ case: - --max_steps 2 - --logging_steps 1 - --save_steps 2 - - --output_dir ../../../model_zoo/tinybert/SST-2/ + - --output_dir ../../../legacy/model_zoo/tinybert/SST-2/ - --device gpu result: loss: @@ -24,7 +24,7 @@ case: evaluation: "=" - name: intermediate_distill - path: model_zoo/tinybert/ + path: legacy/model_zoo/tinybert/ cmd: python task_distill.py params: - --model_type tinybert @@ -50,7 +50,7 @@ case: evaluation: "=" - name: distill_ped - path: model_zoo/tinybert/ + path: legacy/model_zoo/tinybert/ cmd: python task_distill.py params: - --model_type tinybert diff --git a/models_restruct/PaddleNLP/cases/model_zoo^uie.yaml b/models_restruct/PaddleNLP/cases/model_zoo^uie.yaml index 47903cedac..f0aeb41ac7 100644 --- a/models_restruct/PaddleNLP/cases/model_zoo^uie.yaml +++ b/models_restruct/PaddleNLP/cases/model_zoo^uie.yaml @@ -3,11 +3,11 @@ case: train: - name: prepare - path: model_zoo/uie + path: legacy/model_zoo/uie cmd: mkdir data && cd data && wget https://bj.bcebos.com/paddlenlp/datasets/uie/doccano_ext.json - name: doccano - path: model_zoo/uie + path: legacy/model_zoo/uie cmd: python doccano.py params: - --doccano_file ./data/doccano_ext.json @@ -17,7 +17,7 @@ case: - --schema_lang ch - name: multi - path: model_zoo/uie + path: legacy/model_zoo/uie cmd: python -u -m paddle.distributed.launch finetune.py params: - --device gpu @@ -53,7 +53,7 @@ case: eval: - name: eval - path: model_zoo/uie + path: legacy/model_zoo/uie cmd: python evaluate.py params: - --model_path ./checkpoint/model_best @@ -68,11 +68,11 @@ case: train: - name: prepare - path: model_zoo/uie + path: legacy/model_zoo/uie cmd: mkdir data && cd data && wget https://bj.bcebos.com/paddlenlp/datasets/uie/doccano_ext.json - name: doccano - path: model_zoo/uie + path: legacy/model_zoo/uie cmd: python doccano.py params: - --doccano_file ./data/doccano_ext.json @@ -82,7 +82,7 @@ case: - --schema_lang ch - name: finetune - path: model_zoo/uie + path: legacy/model_zoo/uie cmd: python -u -m paddle.distributed.launch finetune.py params: - --device gpu @@ -118,7 +118,7 @@ case: eval: - name: eval - path: model_zoo/uie + path: legacy/model_zoo/uie cmd: python evaluate.py params: - --model_path ./checkpoint/model_best diff --git a/models_restruct/PaddleNLP/diy_build/PaddleNLP_Build.py b/models_restruct/PaddleNLP/diy_build/PaddleNLP_Build.py index be194555fc..b123909d31 100644 --- a/models_restruct/PaddleNLP/diy_build/PaddleNLP_Build.py +++ b/models_restruct/PaddleNLP/diy_build/PaddleNLP_Build.py @@ -58,18 +58,19 @@ def build_paddlenlp(self): path_now = os.getcwd() platform = self.system paddle_whl = self.paddle_whl - os.environ["no_proxy"] = "bcebos.com,baidu.com,baidu-int.com,org.cn" print("set timeout as:", os.environ["timeout"]) print("set no_proxy as:", os.environ["no_proxy"]) if platform == "linux" or platform == "linux_convergence": os.system("python -m pip install -U setuptools -i https://mirror.baidu.com/pypi/simple") + os.system("python -m pip install setuptools_scm -i https://mirror.baidu.com/pypi/simple") os.system("python -m pip install nltk h5py") + os.system("python -m pip install --user -r requirements.txt -i https://mirror.baidu.com/pypi/simple") os.system("python -m pip install --user -r requirements_nlp.txt -i https://mirror.baidu.com/pypi/simple") os.system("python -m pip uninstall protobuf -y") os.system("python -m pip uninstall protobuf -y") os.system("python -m pip uninstall protobuf -y") - os.system("python -m pip install protobuf==3.20.2") + os.system("python -m pip install protobuf==3.20.2 -i https://mirror.baidu.com/pypi/simple") # os.system("python -m pip install {}".format(paddle_whl)) # install paddle for lac requirement paddle>=1.6 else: os.system("python -m pip install --user -r requirements_win.txt -i https://mirror.baidu.com/pypi/simple") diff --git a/models_restruct/PaddleNLP/tools/PaddleNLP_ALL_PTS_list b/models_restruct/PaddleNLP/tools/PaddleNLP_ALL_PTS_list index 277f9fc950..4db893a79e 100644 --- a/models_restruct/PaddleNLP/tools/PaddleNLP_ALL_PTS_list +++ b/models_restruct/PaddleNLP/tools/PaddleNLP_ALL_PTS_list @@ -1,8 +1,6 @@ applications^information_extraction^document.yaml applications^information_extraction^text.yaml applications^question_answering^unsupervised_qa.yaml -applications^sentiment_analysis^ASO_analysis.yaml -applications^sentiment_analysis^ASO_analysis^extration.yaml examples^code_generation^codegen.yaml examples^few_shot^RGL.yaml examples^lexical_analysis.yaml diff --git a/models_restruct/PaddleNLP/tools/PaddleNLP_ALL_list b/models_restruct/PaddleNLP/tools/PaddleNLP_ALL_list index 671805f957..45ebdfac8e 100644 --- a/models_restruct/PaddleNLP/tools/PaddleNLP_ALL_list +++ b/models_restruct/PaddleNLP/tools/PaddleNLP_ALL_list @@ -1,5 +1,3 @@ -applications^sentiment_analysis^ASO_analysis^pp_minilm.yaml -applications^text_summarize^pagesus.yaml examples^dependency_parsing^ddparsing.yaml examples^language_model^bloom.yaml examples^language_model^chatglm.yaml diff --git a/models_restruct/PaddleNLP/tools/PaddleNLP_P0CUDA11_list b/models_restruct/PaddleNLP/tools/PaddleNLP_P0CUDA11_list index 7a3526c59e..c799daa7f9 100644 --- a/models_restruct/PaddleNLP/tools/PaddleNLP_P0CUDA11_list +++ b/models_restruct/PaddleNLP/tools/PaddleNLP_P0CUDA11_list @@ -1,8 +1,6 @@ applications^information_extraction^document.yaml applications^information_extraction^text.yaml applications^question_answering^unsupervised_qa.yaml -applications^sentiment_analysis^ASO_analysis.yaml -applications^sentiment_analysis^ASO_analysis^extration.yaml applications^zero_shot_text_classification.yaml examples^code_generation^codegen.yaml examples^few_shot^RGL.yaml diff --git a/models_restruct/PaddleNLP/tools/PaddleNLP_P0PY37_list b/models_restruct/PaddleNLP/tools/PaddleNLP_P0PY37_list index 545fb25a82..32d0f843ab 100644 --- a/models_restruct/PaddleNLP/tools/PaddleNLP_P0PY37_list +++ b/models_restruct/PaddleNLP/tools/PaddleNLP_P0PY37_list @@ -1,8 +1,6 @@ applications^information_extraction^document.yaml applications^information_extraction^text.yaml applications^question_answering^unsupervised_qa.yaml -applications^sentiment_analysis^ASO_analysis.yaml -applications^sentiment_analysis^ASO_analysis^extration.yaml applications^zero_shot_text_classification.yaml examples^code_generation^codegen.yaml examples^few_shot^RGL.yaml diff --git a/models_restruct/PaddleNLP/tools/PaddleNLP_P0PY38_list b/models_restruct/PaddleNLP/tools/PaddleNLP_P0PY38_list index 545fb25a82..32d0f843ab 100644 --- a/models_restruct/PaddleNLP/tools/PaddleNLP_P0PY38_list +++ b/models_restruct/PaddleNLP/tools/PaddleNLP_P0PY38_list @@ -1,8 +1,6 @@ applications^information_extraction^document.yaml applications^information_extraction^text.yaml applications^question_answering^unsupervised_qa.yaml -applications^sentiment_analysis^ASO_analysis.yaml -applications^sentiment_analysis^ASO_analysis^extration.yaml applications^zero_shot_text_classification.yaml examples^code_generation^codegen.yaml examples^few_shot^RGL.yaml diff --git a/models_restruct/PaddleNLP/tools/PaddleNLP_P0_PTS_list b/models_restruct/PaddleNLP/tools/PaddleNLP_P0_PTS_list index 3b66035ec2..8fd970a404 100644 --- a/models_restruct/PaddleNLP/tools/PaddleNLP_P0_PTS_list +++ b/models_restruct/PaddleNLP/tools/PaddleNLP_P0_PTS_list @@ -1,8 +1,6 @@ applications^information_extraction^document.yaml applications^information_extraction^text.yaml applications^question_answering^unsupervised_qa.yaml -applications^sentiment_analysis^ASO_analysis.yaml -applications^sentiment_analysis^ASO_analysis^extration.yaml examples^code_generation^codegen.yaml examples^lexical_analysis.yaml model_zoo^electra.yaml diff --git a/models_restruct/PaddleNLP/tools/PaddleNLP_P0_list b/models_restruct/PaddleNLP/tools/PaddleNLP_P0_list index 545fb25a82..32d0f843ab 100644 --- a/models_restruct/PaddleNLP/tools/PaddleNLP_P0_list +++ b/models_restruct/PaddleNLP/tools/PaddleNLP_P0_list @@ -1,8 +1,6 @@ applications^information_extraction^document.yaml applications^information_extraction^text.yaml applications^question_answering^unsupervised_qa.yaml -applications^sentiment_analysis^ASO_analysis.yaml -applications^sentiment_analysis^ASO_analysis^extration.yaml applications^zero_shot_text_classification.yaml examples^code_generation^codegen.yaml examples^few_shot^RGL.yaml diff --git a/models_restruct/PaddleNLP/tools/model_all_list b/models_restruct/PaddleNLP/tools/model_all_list index 68528ab802..4a8e02cbe8 100644 --- a/models_restruct/PaddleNLP/tools/model_all_list +++ b/models_restruct/PaddleNLP/tools/model_all_list @@ -1,8 +1,5 @@ applications^information_extraction^text.yaml applications^question_answering^unsupervised_qa.yaml -applications^sentiment_analysis^ASO_analysis.yaml -applications^sentiment_analysis^ASO_analysis^extration.yaml -applications^sentiment_analysis^ASO_analysis^pp_minilm.yaml applications^text_summarize^pagesus.yaml applications^zero_shot_text_classification.yaml examples^few_shot^efl.yaml