Skip to content

Commit

Permalink
Changes to run bert for intel-mlperf-inference
Browse files Browse the repository at this point in the history
  • Loading branch information
arjunsuresh committed Jan 13, 2024
1 parent 96cd7c8 commit dd4d03c
Show file tree
Hide file tree
Showing 10 changed files with 178 additions and 230 deletions.
1 change: 1 addition & 0 deletions cm-mlops/script/get-conda/_cm.json
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
"+PATH",
"+LD_LIBRARY_PATH",
"CM_CONDA_PREFIX",
"CONDA_PREFIX",
"CM_CONDA_BIN_PATH",
"CM_CONDA_BIN_WITH_PATH",
"CM_CONDA_LIB_PATH"
Expand Down
1 change: 1 addition & 0 deletions cm-mlops/script/get-conda/customize.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,7 @@ def postprocess(i):

conda_prefix = os.path.dirname(conda_bin_path)
env['CM_CONDA_PREFIX'] = conda_prefix
env['CONDA_PREFIX'] = conda_prefix

conda_lib_path = os.path.join(conda_prefix, "lib")

Expand Down
102 changes: 2 additions & 100 deletions cm-mlops/script/install-onednn-from.src/_cm.json
Original file line number Diff line number Diff line change
Expand Up @@ -84,107 +84,9 @@
"base": [
"tag.v2.6"
],
"deps": [
{
"names": [
"conda"
],
"tags": "get,conda,_name.bert-pt"
},
{
"tags": "install,pytorch,from.src,_for-intel-mlperf-inference"
},
{
"names": [
"conda-package",
"python3"
],
"tags": "get,generic,conda-package,_package.python",
"version": "3.8"
},
{
"names": [
"conda-package",
"ncurses"
],
"tags": "get,generic,conda-package,_package.ncurses,_source.conda-forge"
},
{
"names": [
"conda-package",
"ninja"
],
"tags": "get,generic,conda-package,_package.ninja"
},
{
"names": [
"conda-package",
"cmake"
],
"tags": "get,generic,conda-package,_package.cmake"
},
{
"names": [
"conda-package",
"mkl"
],
"tags": "get,generic,conda-package,_package.mkl,_source.intel",
"version": "2023.1.0"
},
{
"names": [
"conda-package",
"mkl-include"
],
"tags": "get,generic,conda-package,_package.mkl-include,_source.intel",
"version": "2023.1.0"
},
{
"names": [
"conda-package",
"intel-openmp"
],
"tags": "get,generic,conda-package,_package.intel-openmp,_source.intel",
"version": "2023.1.0"
},
{
"names": [
"conda-package",
"llvm-openmp"
],
"tags": "get,generic,conda-package,_package.llvm-openmp,_source.conda-forge"
},
{
"names": [
"conda-package",
"wheel"
],
"tags": "get,generic,conda-package,_package.wheel,_source.conda-forge"
},
{
"names": [
"conda-package",
"setuptools"
],
"tags": "get,generic,conda-package,_package.setuptools,_source.conda-forge"
},
{
"names": [
"conda-package",
"future"
],
"tags": "get,generic,conda-package,_package.future,_source.conda-forge"
},
{
"names": [
"conda-package",
"libstdcxx-ng"
],
"tags": "get,generic,conda-package,_package.libstdcxx-ng,_source.conda-forge"
}
],
"env": {
"CM_CONDA_ENV": "yes"
"CM_CONDA_ENV": "yes",
"CM_FOR_INTEL_MLPERF_INFERENCE": "yes"
}
},
"repo.#": {
Expand Down
4 changes: 4 additions & 0 deletions cm-mlops/script/install-onednn-from.src/customize.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,10 @@ def preprocess(i):
run_cmd=""

env['CM_RUN_CMD'] = run_cmd
env['CM_ONEDNN_INSTALLED_PATH'] = os.path.join(os.getcwd(), "onednn")

if env.get('CM_FOR_INTEL_MLPERF_INFERENCE', '') == "yes":
i['run_script_input']['script_name'] = "run-intel-mlperf-inference"

automation = i['automation']

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,17 +10,8 @@ rm -rf build
pwd
wget -nc --no-check-certificate https://raw.githubusercontent.com/mlcommons/inference_results_v3.1/main/closed/Intel/code/bert-99/pytorch-cpu/patches/onednnv2_6.patch
if [ "${?}" != "0" ]; then exit 1; fi
git apply onednnv2_6.patch
if [ "${?}" != "0" ]; then exit 1; fi
pip install -r requirements.txt

mkdir build
pushd build
cmake -DCMAKE_CXX_COMPILER=clang++ -DCMAKE_C_COMPILER=clang -DBUILD_TPPS_INTREE=ON -DCMAKE_BUILD_TYPE=Release -DCMAKE_PREFIX_PATH="$(dirname $(python3 -c 'import torch; print(torch.__file__)'));../cmake/Modules" -GNinja -DUSERCP=ON ..
ninja
popd
cmd="git apply onednnv2_6.patch"

#cmd="${CM_RUN_CMD}"
echo ${cmd}
eval ${cmd}

Expand Down
131 changes: 72 additions & 59 deletions cm-mlops/script/reproduce-mlperf-inference-intel/_cm.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
alias: reproduce-mlperf-inference-intel
uid: c05a90433bb04cc1
cache: false
can_force_cache: true

automation_alias: script
automation_uid: 5b4e0237da074764
Expand Down Expand Up @@ -63,15 +64,6 @@ new_state_keys:
- mlperf-inference-implementation
- CM_SUT_*

# Env keys which are exposed to higher level scripts
new_env_keys:
- CM_MLPERF_*
- CM_DATASET_*
- CM_HW_NAME
- CM_ML_MODEL_*
- CM_MAX_EXAMPLES
- CM_IMAGENET_ACCURACY_DTYPE
- CM_SQUAD_ACCURACY_DTYPE


# Dependencies on other CM scripts
Expand All @@ -93,11 +85,11 @@ deps:
names:
- mlperf-logging

- tags: get,conda,_name.bert-pt
- tags: get,generic,conda-package,_package.python
version: "3.8"
- tags: get,generic-sys-util,_numactl
- tags: get,generic,conda-package,_package.jemalloc,_source.conda-forge
- tags: get,onednn,from.src,_for-intel-mlperf-inference

########################################################################
# Install ResNet50 model (ONNX) and ImageNet
Expand Down Expand Up @@ -127,25 +119,6 @@ deps:



########################################################################
# Install bert dependencies

- enable_if_env:
CM_MODEL:
- bert-99
- bert-99.9
names:
- bert-vocab
tags: get,squad-vocab

- enable_if_env:
CM_MODEL:
- bert-99
- bert-99.9
names:
- squad-tokenized
tags: get,dataset,tokenized,squad,_raw

########################################################################
# Install OpenImages

Expand All @@ -169,33 +142,13 @@ deps:
########################################################################
# Install MLPerf inference dependencies

# Download MLPerf inference source
- tags: get,mlcommons,inference,src
names:
- inference-src

# Download MLPerf inference loadgen
- tags: get,mlcommons,inference,loadgen
names:
- inference-loadgen

# Creates user conf for given SUT
- tags: generate,user-conf,mlperf,inference
names:
- user-conf-generator

- tags: get,mlperf,inference,results
version: v3.1


# Post dependencies to run this app including for power measurement
post_deps:

- names:
- compile-program
tags: compile,cpp-program
skip_if_env:
CM_MLPERF_SKIP_RUN:
- yes

- names:
- runner
- mlperf-runner
Expand Down Expand Up @@ -224,7 +177,7 @@ variations:
CM_MLPERF_BACKEND_LIB_NAMESPEC: pytorch
deps:
- tags: get,pytorch,from.src,_for-intel-mlperf-inference
- tags: get,onednn,from.src,_for-intel-mlperf-inference
- tags: install,onednn,from.src,_for-intel-mlperf-inference



Expand Down Expand Up @@ -273,7 +226,6 @@ variations:

bert_:
deps:
- tags: get,generic-python-lib,_onnx
- tags: install,transformers,from.src,_for-intel-mlperf-inference
env:
CM_BENCHMARK: STANDALONE_BERT
Expand All @@ -283,20 +235,20 @@ variations:


standalone:
group: run-mode
group: network-mode
default: true
env:
CM_RUN_MODE: standalone
CM_MLPERF_NETWORK_RUN_MODE: standalone

network-server:
group: run-mode
group: network-mode
env:
CM_RUN_MODE: network-server
CM_MLPERF_NETWORK_RUN_MODE: network-server

network-client:
group: run-mode
group: network-run-mode
env:
CM_RUN_MODE: network-client
CM_MLPERF_NETWORK_RUN_MODE: network-client

bert_,network-server:
env:
Expand Down Expand Up @@ -332,6 +284,67 @@ variations:
CM_MODEL_BATCH_SIZE: "#"
#CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX1: "activation_count.#"

build_harness:
group: run-mode
deps:
- tags: get,generic-sys-util,_rsync
- tags: install,llvm,from.src,_for-intel-mlperf-inference
env:
CM_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE: build_harness
new_env_keys:
- CM_MLPERF_INFERENCE_INTEL_HARNESS_PATH
- DATA_PATH

build_harness,bert_:
deps:
- tags: get,dataset,original,squad
names:
- squad-original
- tags: get,ml-model,bert-large,_pytorch,_int8
names:
- bert-large
- ml-model
- tags: get,generic-python-lib,_package.tokenization


run_harness:
group: run-mode
default: true
deps:
- tags: reproduce,mlperf,inference,intel,harness,_build_harness
inherit_variation_tags: true
names:
- build-harness
skip_inherit_variation_groups:
- run-mode
- device-info
force_cache: true

# Download MLPerf inference source
- tags: get,mlcommons,inference,src
names:
- inference-src

# Creates user conf for given SUT
- tags: generate,user-conf,mlperf,inference
names:
- user-conf-generator

env:
CM_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE: run_harness

# Env keys which are exposed to higher level scripts
new_env_keys:
- CM_MLPERF_*
- CM_DATASET_*
- CM_HW_NAME
- CM_ML_MODEL_*
- CM_MAX_EXAMPLES
- CM_IMAGENET_ACCURACY_DTYPE
- CM_SQUAD_ACCURACY_DTYPE



maxq:
group: power-mode
env:
Expand Down
Loading

0 comments on commit dd4d03c

Please sign in to comment.