From a890d78ba2e84eecd1f58b4ffa5acf4d3d2fc835 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Sat, 20 Jan 2024 14:39:32 +0000 Subject: [PATCH] Added backup urls for bert-pytorch --- .../app-mlperf-inference-reference/_cm.yaml | 1 - .../script/get-ml-model-bert-large-squad/_cm.json | 15 +++++++++++++-- .../script/run-mlperf-inference-app/customize.py | 4 ++++ 3 files changed, 17 insertions(+), 3 deletions(-) diff --git a/cm-mlops/script/app-mlperf-inference-reference/_cm.yaml b/cm-mlops/script/app-mlperf-inference-reference/_cm.yaml index e889a38688..627733300e 100644 --- a/cm-mlops/script/app-mlperf-inference-reference/_cm.yaml +++ b/cm-mlops/script/app-mlperf-inference-reference/_cm.yaml @@ -151,7 +151,6 @@ deps: CM_MODEL: - 3d-unet-99 - 3d-unet-99.9 - - resnet50 ## resnet50 and 3d-unet need both onnxruntime and onnxruntime_gpu on cuda - tags: get,generic-python-lib,_onnxruntime diff --git a/cm-mlops/script/get-ml-model-bert-large-squad/_cm.json b/cm-mlops/script/get-ml-model-bert-large-squad/_cm.json index aae31756fb..91fb87d70b 100644 --- a/cm-mlops/script/get-ml-model-bert-large-squad/_cm.json +++ b/cm-mlops/script/get-ml-model-bert-large-squad/_cm.json @@ -93,7 +93,7 @@ }, "onnx,fp32,armi": { "env": { - "CM_PACKAGE_URL": "https://armi.in/files/model.onnx1", + "CM_PACKAGE_URL": "https://armi.in/files/model.onnx", "CM_PACKAGE_URL1": "https://zenodo.org/record/3733910/files/model.onnx" } }, @@ -172,7 +172,12 @@ }, "pytorch,fp32,zenodo": { "env": { - "CM_ML_MODEL_F1": "90.874", + "CM_PACKAGE_URL": "https://zenodo.org/record/3733896/files/model.pytorch" + } + }, + "pytorch,fp32,armi": { + "env": { + "CM_PACKAGE_URL": "https://armi.in/files/fp32/model.pytorch", "CM_PACKAGE_URL": "https://zenodo.org/record/3733896/files/model.pytorch" } }, @@ -186,6 +191,12 @@ "CM_PACKAGE_URL": "https://zenodo.org/record/4792496/files/pytorch_model.bin" } }, + "pytorch,int8,armi": { + "env": { + "CM_PACKAGE_URL": "https://armi.in/files/int8/pytorch_model.bin", + "CM_PACKAGE_URL1": "https://zenodo.org/record/4792496/files/pytorch_model.bin" + } + }, "onnxruntime": { "base": [ "onnx" diff --git a/cm-mlops/script/run-mlperf-inference-app/customize.py b/cm-mlops/script/run-mlperf-inference-app/customize.py index 11c417f426..545df50f98 100644 --- a/cm-mlops/script/run-mlperf-inference-app/customize.py +++ b/cm-mlops/script/run-mlperf-inference-app/customize.py @@ -163,6 +163,8 @@ def preprocess(i): r = cm.access(ii) if r['return'] > 0: return r + if env.get('CM_MLPERF_SKIP_RUN', '') != '': + del(env['CM_MLPERF_SKIP_RUN']) if 'CM_MLPERF_RESULTS_DIR' in r['new_env']: env['CM_MLPERF_RESULTS_DIR'] = r['new_env']['CM_MLPERF_RESULTS_DIR'] if 'CM_MLPERF_BACKEND' in r['new_env']: @@ -181,6 +183,8 @@ def preprocess(i): copy.deepcopy(add_deps_recursive), 'adr': copy.deepcopy(adr), 'ad': ad, 'v': verbose, 'print_env': print_env, 'print_deps': print_deps}) if r['return'] > 0: return r + if env.get('CM_MLPERF_SKIP_RUN', '') != '': + del(env['CM_MLPERF_SKIP_RUN']) return {'return':0}