forked from NVIDIA/NeMo
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Jenkinsfile
408 lines (382 loc) · 20.3 KB
/
Jenkinsfile
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
pipeline {
agent {
docker {
image 'nvcr.io/nvidia/pytorch:20.01-py3'
args '--device=/dev/nvidia0 --gpus all --user 0:128 -v /home/TestData:/home/TestData -v $HOME/.cache/torch:/root/.cache/torch --shm-size=8g'
}
}
options {
timeout(time: 1, unit: 'HOURS')
disableConcurrentBuilds()
}
stages {
stage('PyTorch version') {
steps {
sh 'python -c "import torch; print(torch.__version__)"'
}
}
stage('NVIDIA-SMI') {
steps {
sh 'nvidia-smi'
}
}
stage('Install test requirements') {
steps {
sh 'apt-get update && apt-get install -y bc && pip install -r requirements/requirements_test.txt'
}
}
stage('Code formatting checks') {
steps {
sh 'python setup.py style'
}
}
stage('Documentation check') {
steps {
sh './reinstall.sh && pytest -m docs'
}
}
stage('L0: Unit Tests GPU') {
steps {
sh 'pytest -m "unit and not skipduringci"'
}
}
stage('L0: Unit Tests CPU') {
when {
anyOf{
branch 'master'
changeRequest target: 'master'
}
}
steps {
sh 'pytest -m unit --cpu'
}
}
stage('L0: Integration Tests GPU') {
steps {
sh 'pytest -s -m "integration and not skipduringci"'
}
}
stage('L0: Integration Tests CPU') {
when {
anyOf{
branch 'master'
changeRequest target: 'master'
}
}
steps {
sh 'pytest -s -m integration --cpu'
}
}
stage('L1: System Tests GPU') {
steps {
sh 'pytest -m "system and not skipduringci"'
}
}
stage('L1: System Tests CPU') {
when {
anyOf{
branch 'master'
changeRequest target: 'master'
}
}
steps {
sh 'pytest -m system --cpu'
}
}
stage('L2: Parallel Stage1 GPU') {
when {
anyOf{
branch 'master'
changeRequest()
}
}
failFast true
parallel {
stage('Simplest test') {
steps {
sh 'cd examples/start_here && CUDA_VISIBLE_DEVICES=0 python simplest_example.py'
}
}
stage ('Chatbot test') {
steps {
sh 'cd examples/start_here && CUDA_VISIBLE_DEVICES=1 python chatbot_example.py'
}
}
}
}
stage('L2: Parallel NLP-BERT pretraining') {
when {
anyOf{
branch 'master'
changeRequest()
}
}
failFast true
parallel {
stage('BERT on the fly preprocessing') {
steps {
sh 'cd examples/nlp/language_modeling && CUDA_VISIBLE_DEVICES=0 python bert_pretraining.py --amp_opt_level O1 --train_data /home/TestData/nlp/wikitext-2/train.txt --eval_data /home/TestData/nlp/wikitext-2/valid.txt --work_dir outputs/bert_lm/wikitext2 --batch_size 64 --lr 0.01 --lr_policy CosineAnnealing --lr_warmup_proportion 0.05 --vocab_size 3200 --hidden_size 768 --intermediate_size 3072 --num_hidden_layers 6 --num_attention_heads 12 --hidden_act "gelu" --save_step_freq 200 data_text --num_iters=300 --tokenizer sentence-piece --sample_size 10000000 --mask_probability 0.15 --short_seq_prob 0.1 --dataset_name wikitext-2'
sh 'cd examples/nlp/language_modeling && LOSS=$(cat outputs/bert_lm/wikitext2/log_globalrank-0_localrank-0.txt | grep "Loss" |tail -n 1| awk \'{print \$7}\' | egrep -o "[0-9.]+" ) && echo $LOSS && if [ $(echo "$LOSS < 8.0" | bc -l) -eq 1 ]; then echo "SUCCESS" && exit 0; else echo "FAILURE" && exit 1; fi'
sh 'rm -rf examples/nlp/language_modeling/outputs/wikitext2 && rm -rf /home/TestData/nlp/wikitext-2/*.pkl && rm -rf /home/TestData/nlp/wikitext-2/bert'
}
}
stage('BERT offline preprocessing') {
steps {
sh 'cd examples/nlp/language_modeling && CUDA_VISIBLE_DEVICES=1 python bert_pretraining.py --amp_opt_level O1 --train_data /home/TestData/nlp/wiki_book_mini/training --eval_data /home/TestData/nlp/wiki_book_mini/evaluation --work_dir outputs/bert_lm/wiki_book --batch_size 8 --config_file /home/TestData/nlp/bert_configs/uncased_L-12_H-768_A-12.json --save_step_freq 200 --num_gpus 1 --batches_per_step 1 --lr_policy SquareRootAnnealing --beta2 0.999 --beta1 0.9 --lr_warmup_proportion 0.01 --optimizer adam_w --weight_decay 0.01 --lr 0.875e-4 data_preprocessed --num_iters 300'
sh 'cd examples/nlp/language_modeling && LOSS=$(cat outputs/bert_lm/wiki_book/log_globalrank-0_localrank-0.txt | grep "Loss" |tail -n 1| awk \'{print \$7}\' | egrep -o "[0-9.]+" ) && echo $LOSS && if [ $(echo "$LOSS < 15.0" | bc -l) -eq 1 ]; then echo "SUCCESS" && exit 0; else echo "FAILURE" && exit 1; fi'
sh 'rm -rf examples/nlp/language_modeling/outputs/wiki_book'
}
}
}
}
stage('L2: Parallel NLP Examples 1') {
when {
anyOf{
branch 'master'
changeRequest()
}
}
failFast true
parallel {
stage ('Text Classification with BERT Test') {
steps {
sh 'cd examples/nlp/text_classification && CUDA_VISIBLE_DEVICES=0 python text_classification_with_bert.py --pretrained_model_name bert-base-uncased --num_epochs=1 --max_seq_length=50 --data_dir=/home/TestData/nlp/retail/ --eval_file_prefix=dev --batch_size=10 --num_train_samples=-1 --do_lower_case --work_dir=outputs'
sh 'rm -rf examples/nlp/text_classification/outputs'
}
}
stage ('Dialogue State Tracking - TRADE - Multi-GPUs') {
steps {
sh 'rm -rf /home/TestData/nlp/multiwoz2.1/vocab.pkl'
sh 'cd examples/nlp/dialogue_state_tracking && CUDA_VISIBLE_DEVICES=0,1 python -m torch.distributed.launch --nproc_per_node=2 dialogue_state_tracking_trade.py --batch_size=10 --eval_batch_size=10 --num_train_samples=-1 --num_eval_samples=-1 --num_epochs=1 --dropout=0.2 --eval_file_prefix=test --num_gpus=2 --lr=0.001 --grad_norm_clip=10 --work_dir=outputs --data_dir=/home/TestData/nlp/multiwoz2.1'
sh 'rm -rf examples/nlp/dialogue_state_tracking/outputs'
sh 'rm -rf /home/TestData/nlp/multiwoz2.1/vocab.pkl'
}
}
stage ('GLUE Benchmark Test') {
steps {
sh 'cd examples/nlp/glue_benchmark && CUDA_VISIBLE_DEVICES=1 python glue_benchmark_with_bert.py --data_dir /home/TestData/nlp/glue_fake/MRPC --pretrained_model_name bert-base-uncased --work_dir glue_output --save_step_freq -1 --num_epochs 1 --task_name mrpc --batch_size 2 --no_data_cache'
sh 'rm -rf examples/nlp/glue_benchmark/glue_output'
}
}
stage ('TRADE-Rule-based-DPM/NLG') {
steps {
sh 'cd examples/nlp/dialogue_state_tracking && python rule_based_policy_multiwoz.py --data_dir /home/TestData/nlp/multiwoz2.1/pm_nlg \
--encoder_ckpt /home/TestData/nlp/multiwoz2.1/pm_nlg/ckpts/EncoderRNN-EPOCH-10.pt \
--decoder_ckpt /home/TestData/nlp/multiwoz2.1/pm_nlg/ckpts/TRADEGenerator-EPOCH-10.pt'
}
}
}
}
stage('L2: Parallel NLP Examples 2') {
when {
anyOf{
branch 'master'
changeRequest()
}
}
failFast true
parallel {
stage('Token Classification Training/Inference Test') {
steps {
sh 'cd examples/nlp/token_classification && CUDA_VISIBLE_DEVICES=0 python token_classification.py --data_dir /home/TestData/nlp/token_classification_punctuation/ --batch_size 2 --num_epochs 1 --save_epoch_freq 1 --work_dir token_classification_output --pretrained_model_name bert-base-uncased'
sh 'cd examples/nlp/token_classification && DATE_F=$(ls token_classification_output/) && CUDA_VISIBLE_DEVICES=0 python token_classification_infer.py --checkpoint_dir token_classification_output/$DATE_F/checkpoints/ --labels_dict /home/TestData/nlp/token_classification_punctuation/label_ids.csv --pretrained_model_name bert-base-uncased'
sh 'rm -rf examples/nlp/token_classification/token_classification_output'
}
}
stage('Megatron finetuning Token Classification Training/Inference Test') {
steps {
sh 'cd examples/nlp/token_classification && CUDA_VISIBLE_DEVICES=0 python token_classification.py --data_dir /home/TestData/nlp/token_classification_punctuation/ --batch_size 2 --num_epochs 1 --save_epoch_freq 1 --work_dir megatron_output --pretrained_model_name megatron-bert-345m-uncased'
sh 'cd examples/nlp/token_classification && DATE_F=$(ls megatron_output/) && CUDA_VISIBLE_DEVICES=0 python token_classification_infer.py --checkpoint_dir megatron_output/$DATE_F/checkpoints/ --labels_dict /home/TestData/nlp/token_classification_punctuation/label_ids.csv --pretrained_model_name megatron-bert-345m-uncased'
sh 'rm -rf examples/nlp/token_classification/megatron_output'
}
}
stage ('Punctuation and Classification Training/Inference Test') {
steps {
sh 'cd examples/nlp/token_classification && CUDA_VISIBLE_DEVICES=1 python punctuation_capitalization.py \
--data_dir /home/TestData/nlp/token_classification_punctuation/ --work_dir punctuation_output --save_epoch_freq 1 \
--num_epochs 1 --save_step_freq -1 --batch_size 2'
sh 'cd examples/nlp/token_classification && DATE_F=$(ls punctuation_output/) && DATA_DIR="/home/TestData/nlp/token_classification_punctuation" && CUDA_VISIBLE_DEVICES=1 python punctuation_capitalization_infer.py --checkpoint_dir punctuation_output/$DATE_F/checkpoints/ --punct_labels_dict $DATA_DIR/punct_label_ids.csv --capit_labels_dict $DATA_DIR/capit_label_ids.csv'
sh 'rm -rf examples/nlp/token_classification/punctuation_output'
}
}
stage('SGD Test') {
steps {
sh 'cd examples/nlp/dialogue_state_tracking && CUDA_VISIBLE_DEVICES=0 python dialogue_state_tracking_sgd.py \
--data_dir /home/TestData/nlp/sgd/ --schema_embedding_dir /home/TestData/nlp/sgd/embeddings/ --eval_dataset dev \
--dialogues_example_dir /home/TestData/nlp/sgd/dialogue_example_dir/ --work_dir sgd_output --task debug_sample \
--num_epochs 1 --save_epoch_freq=0 --no_overwrite_schema_emb_files --no_overwrite_dial_files'
sh 'rm -rf examples/nlp/dialogue_state_tracking/sgd_output'
}
}
}
}
stage('L2: Parallel NLP-Squad') {
when {
anyOf{
branch 'master'
changeRequest()
}
}
failFast true
parallel {
stage('BERT Squad v1.1') {
steps {
sh 'cd examples/nlp/question_answering && CUDA_VISIBLE_DEVICES=0 python question_answering_squad.py --no_data_cache --amp_opt_level O1 --train_file /home/TestData/nlp/squad_mini/v1.1/train-v1.1.json --eval_file /home/TestData/nlp/squad_mini/v1.1/dev-v1.1.json --work_dir outputs/squadv1 --batch_size 8 --save_step_freq 200 --max_steps 50 --train_step_freq 5 --lr_policy WarmupAnnealing --lr 5e-5 --do_lower_case --pretrained_model_name bert-base-uncased --optimizer adam_w'
sh 'cd examples/nlp/question_answering && FSCORE=$(cat outputs/squadv1/log_globalrank-0_localrank-0.txt | grep "f1" |tail -n 1 |egrep -o "[0-9.]+"|tail -n 1 ) && echo $FSCORE && if [ $(echo "$FSCORE > 10.0" | bc -l) -eq 1 ]; then echo "SUCCESS" && exit 0; else echo "FAILURE" && exit 1; fi'
sh 'rm -rf examples/nlp/question_answering/outputs/squadv1 && rm -rf /home/TestData/nlp/squad_mini/v1.1/*cache*'
}
}
stage('BERT Squad v2.0') {
steps {
sh 'cd examples/nlp/question_answering && CUDA_VISIBLE_DEVICES=1 python question_answering_squad.py --no_data_cache --amp_opt_level O1 --train_file /home/TestData/nlp/squad_mini/v2.0/train-v2.0.json --eval_file /home/TestData/nlp/squad_mini/v2.0/dev-v2.0.json --work_dir outputs/squadv2 --batch_size 8 --save_step_freq 200 --train_step_freq 2 --max_steps 10 --lr_policy WarmupAnnealing --lr 1e-5 --do_lower_case --version_2_with_negative --pretrained_model_name bert-base-uncased --optimizer adam_w'
sh 'cd examples/nlp/question_answering && FSCORE=$(cat outputs/squadv2/log_globalrank-0_localrank-0.txt | grep "f1" |tail -n 1 |egrep -o "[0-9.]+"|tail -n 1 ) && echo $FSCORE && if [ $(echo "$FSCORE > 40.0" | bc -l) -eq 1 ]; then echo "SUCCESS" && exit 0; else echo "FAILURE" && exit 1; fi'
sh 'rm -rf examples/nlp/question_answering/outputs/squadv2 && rm -rf /home/TestData/nlp/squad_mini/v2.0/*cache*'
}
}
}
}
stage('L2: Parallel NLP-Examples 3') {
when {
anyOf{
branch 'master'
changeRequest()
}
}
failFast true
parallel {
stage('asr_processing') {
steps {
sh 'cd examples/nlp/asr_postprocessor && CUDA_VISIBLE_DEVICES=0 python asr_postprocessor.py --data_dir=/home/TestData/nlp/asr_postprocessor/pred_real --restore_from=/home/TestData/nlp/asr_postprocessor/bert-base-uncased_decoder.pt --max_steps=25 --batch_size=64'
sh 'cd examples/nlp/asr_postprocessor && WER=$(cat outputs/asr_postprocessor/log_globalrank-0_localrank-0.txt | grep "Validation WER" | tail -n 1 | egrep -o "[0-9.]+" | tail -n 1) && echo $WER && if [ $(echo "$WER < 25.0" | bc -l) -eq 1 ]; then echo "SUCCESS" && exit 0; else echo "FAILURE" && exit 1; fi'
sh 'rm -rf examples/nlp/asr_postprocessor/outputs'
}
}
stage('Roberta Squad v1.1') {
steps {
sh 'cd examples/nlp/question_answering && CUDA_VISIBLE_DEVICES=1 python question_answering_squad.py --no_data_cache --amp_opt_level O1 --train_file /home/TestData/nlp/squad_mini/v1.1/train-v1.1.json --eval_file /home/TestData/nlp/squad_mini/v1.1/dev-v1.1.json --work_dir outputs/squadv1_roberta --batch_size 5 --save_step_freq 200 --max_steps 50 --train_step_freq 5 --lr_policy WarmupAnnealing --lr 1e-5 --pretrained_model_name roberta-base --optimizer adam_w'
sh 'cd examples/nlp/question_answering && FSCORE=$(cat outputs/squadv1_roberta/log_globalrank-0_localrank-0.txt | grep "f1" |tail -n 1 |egrep -o "[0-9.]+"|tail -n 1 ) && echo $FSCORE && if [ $(echo "$FSCORE > 7.0" | bc -l) -eq 1 ]; then echo "SUCCESS" && exit 0; else echo "FAILURE" && exit 1; fi'
sh 'rm -rf examples/nlp/question_answering/outputs/squadv1_roberta && rm -rf /home/TestData/nlp/squad_mini/v1.1/*cache*'
}
}
}
}
stage('L2: NLP-Intent Detection/Slot Tagging Examples - Multi-GPU') {
when {
anyOf{
branch 'master'
changeRequest()
}
}
failFast true
steps {
sh 'cd examples/nlp/intent_detection_slot_tagging && CUDA_VISIBLE_DEVICES=0,1 python -m torch.distributed.launch --nproc_per_node=2 joint_intent_slot_with_bert.py --num_gpus=2 --pretrained_model_name=bert-base-uncased --num_epochs=1 --max_seq_length=50 --data_dir=/home/TestData/nlp/retail/ --eval_file_prefix=dev --batch_size=10 --num_train_samples=-1 --do_lower_case --work_dir=outputs_joint_intent_slot'
sh 'cd examples/nlp/intent_detection_slot_tagging && DATE_F=$(ls outputs_joint_intent_slot/) && CHECKPOINT_DIR=outputs_joint_intent_slot/$DATE_F/checkpoints/ && CUDA_VISIBLE_DEVICES=0 python joint_intent_slot_infer.py --checkpoint_dir $CHECKPOINT_DIR --pretrained_model_name=bert-base-uncased --eval_file_prefix=dev --data_dir=/home/TestData/nlp/retail/ --batch_size=10'
sh 'cd examples/nlp/intent_detection_slot_tagging && DATE_F=$(ls outputs_joint_intent_slot/) && CHECKPOINT_DIR=outputs_joint_intent_slot/$DATE_F/checkpoints/ && CUDA_VISIBLE_DEVICES=0 python joint_intent_slot_infer_b1.py --data_dir=/home/TestData/nlp/retail/ --pretrained_model_name=bert-base-uncased --checkpoint_dir $CHECKPOINT_DIR --query="how much is it?"'
sh 'rm -rf examples/nlp/intent_detection_slot_tagging/outputs'
}
}
stage('L2: NLP-NMT Example') {
when {
anyOf{
branch 'master'
changeRequest()
}
}
failFast true
steps {
sh 'cd examples/nlp/neural_machine_translation/ && CUDA_VISIBLE_DEVICES=0 python machine_translation_tutorial.py --max_steps 100'
sh 'rm -rf examples/nlp/neural_machine_translation/outputs'
}
}
stage('L2: Parallel Stage QuartzNet/JasperNet inference') {
when {
anyOf{
branch 'master'
changeRequest()
}
}
failFast true
parallel {
stage('QuartzNet inference') {
steps {
sh 'cd examples/asr && CUDA_VISIBLE_DEVICES=0 python speech2text_infer.py --asr_model=QuartzNet15x5-En --dataset=/home/TestData/librispeech/librivox-dev-other.json --wer_target=0.1060 --wer_tolerance=1.01'
}
}
stage('JasperNet inference') {
steps {
sh 'cd examples/asr && CUDA_VISIBLE_DEVICES=1 python speech2text_infer.py --asr_model=JasperNet10x5-En --dataset=/home/TestData/librispeech/librivox-dev-other.json --wer_target=0.1041 --wer_tolerance=1.01'
}
}
}
}
stage('L2: Parallel Stage Jasper / GAN') {
when {
anyOf{
branch 'master'
changeRequest()
}
}
failFast true
parallel {
// stage('Jasper AN4 O1') {
// steps {
// sh 'cd examples/asr && CUDA_VISIBLE_DEVICES=0 python jasper_an4.py --amp_opt_level=O1 --num_epochs=35 --test_after_training --work_dir=O1'
// }
// }
stage('GAN O2') {
steps {
sh 'cd examples/image && CUDA_VISIBLE_DEVICES=0 python gan.py --amp_opt_level=O2 --num_epochs=3 --train_dataset=/home/TestData/'
}
}
stage('Jasper AN4 O2') {
steps {
sh 'cd examples/asr && CUDA_VISIBLE_DEVICES=1 python jasper_an4.py --amp_opt_level=O2 --num_epochs=35 --test_after_training --work_dir=O2 --train_dataset=/home/TestData/an4_dataset/an4_train.json --eval_datasets=/home/TestData/an4_dataset/an4_val.json --do_not_eval_at_start --eval_freq 1000'
}
}
}
}
// stage('Parallel Stage GAN') {
// failFast true
// parallel {
// stage('GAN O1') {
// steps {
// sh 'cd examples/image && CUDA_VISIBLE_DEVICES=0 python gan.py --amp_opt_level=O1 --num_epochs=3'
// }
// }
// }
// }
stage('L2: Multi-GPU Jasper test') {
when {
anyOf{
branch 'master'
changeRequest()
}
}
failFast true
steps {
sh 'cd examples/asr && CUDA_VISIBLE_DEVICES=0,1 python -m torch.distributed.launch --nproc_per_node=2 jasper_an4.py --num_epochs=40 --batch_size=24 --work_dir=multi_gpu --test_after_training --train_dataset=/home/TestData/an4_dataset/an4_train.json --eval_datasets=/home/TestData/an4_dataset/an4_val.json --do_not_eval_at_start --eval_freq 1000'
}
}
// stage('L2: TTS Tests') {
// when {
// anyOf{
// branch 'master'
// changeRequest()
// }
// }
// failFast true
// steps {
// sh 'cd examples/tts && CUDA_VISIBLE_DEVICES=0,1 python -m torch.distributed.launch --nproc_per_node=2 tacotron2.py --num_epochs=4 --model_config=configs/tacotron2.yaml --train_dataset=/home/TestData/an4_dataset/an4_train.json --amp_opt_level=O1 --eval_datasets=/home/TestData/an4_dataset/an4_val.json --eval_freq=100 --do_not_eval_at_start --decoder_force --eval_batch_size=48 --random_seed=0'
// sh 'cd examples/tts && TTS_CHECKPOINT_DIR=$(ls | grep "Tacotron2") && echo $TTS_CHECKPOINT_DIR && LOSS=$(cat $TTS_CHECKPOINT_DIR/log_globalrank-0_localrank-0.txt | grep -o -E "Loss an4_val[ :0-9.]+" | grep -o -E "[0-9.]+" | tail -n 1) && echo $LOSS && if [ $(echo "$LOSS - 4.344909191131592 < 0.1" | bc -l) -eq 1 ]; then echo "SUCCESS" && exit 0; else echo "FAILURE" && exit 1; fi'
// // sh 'cd examples/tts && TTS_CHECKPOINT_DIR=$(ls | grep "Tacotron2") && cp ../asr/multi_gpu/checkpoints/* $TTS_CHECKPOINT_DIR/checkpoints'
// // sh 'CUDA_VISIBLE_DEVICES=0 python tacotron2_an4_test.py --model_config=configs/tacotron2.yaml --eval_dataset=/home/TestData/an4_dataset/an4_train.json --jasper_model_config=../asr/configs/jasper_an4.yaml --load_dir=$TTS_CHECKPOINT_DIR/checkpoints'
// }
// }
}
post {
always {
sh "chmod -R 777 ."
cleanWs()
}
}
}