Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Feature/validation steps #100

Closed
wants to merge 14 commits into from
Closed
Show file tree
Hide file tree
Changes from 9 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 19 additions & 2 deletions build/bnn-pynq/build.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,8 @@
"cnv-w2a2",
]

verif_en = os.getenv("VERIFICATION_EN", "0")

# which platforms to build the networks for
zynq_platforms = ["Pynq-Z1", "Ultra96", "ZCU104"]
alveo_platforms = ["U250"]
Expand Down Expand Up @@ -93,8 +95,23 @@ def platform_to_shell(platform):
% model_name,
)
model_file = "models/%s.onnx" % model_name
# launch FINN compiler to build
build.build_dataflow_cfg(model_file, cfg)

if verif_en == "1":
# Build the model with verification
import sys

sys.path.append(os.path.abspath(os.getenv("FINN_EXAMPLES_ROOT") + "/ci/"))
from verification_funcs import init_verif, verify_build_output

cfg.verify_steps, cfg.verify_input_npy, cfg.verify_expected_output_npy = init_verif(
model_name
)
build.build_dataflow_cfg(model_file, cfg)
verify_build_output(cfg, model_name)
else:
# Build the model without verification
build.build_dataflow_cfg(model_file, cfg)

# copy bitfiles into release dir if found
bitfile_gen_dir = cfg.output_dir + "/bitfile"
files_to_check_and_copy = [
Expand Down
27 changes: 22 additions & 5 deletions build/cybersecurity-mlp/build.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,11 @@
import shutil
from custom_steps import custom_step_mlp_export

# Define model name
model_name = "unsw_nb15-mlp-w2a2"

verif_en = os.getenv("VERIFICATION_EN", "0")

# Which platforms to build the networks for
zynq_platforms = ["Pynq-Z1", "Ultra96", "ZCU104"]
alveo_platforms = []
Expand All @@ -51,9 +56,6 @@ def platform_to_shell(platform):
raise Exception("Unknown platform, can't determine ShellFlowType")


# Define model name
model_name = "unsw_nb15-mlp-w2a2"

# Create a release dir, used for finn-examples release packaging
os.makedirs("release", exist_ok=True)

Expand Down Expand Up @@ -86,14 +88,29 @@ def platform_to_shell(platform):
build_cfg.DataflowOutputType.ESTIMATE_REPORTS,
build_cfg.DataflowOutputType.BITFILE,
build_cfg.DataflowOutputType.DEPLOYMENT_PACKAGE,
build_cfg.DataflowOutputType.STITCHED_IP,
jmonks-amd marked this conversation as resolved.
Show resolved Hide resolved
],
save_intermediate_models=True,
jmonks-amd marked this conversation as resolved.
Show resolved Hide resolved
)

# Export MLP model to FINN-ONNX
model = custom_step_mlp_export(model_name)
# Launch FINN compiler to generate bitfile
build.build_dataflow_cfg(model, cfg)
if verif_en == "1":
# Build the model with verification
import sys

sys.path.append(os.path.abspath(os.getenv("FINN_EXAMPLES_ROOT") + "/ci/"))
from verification_funcs import init_verif, verify_build_output

cfg.verify_steps, cfg.verify_input_npy, cfg.verify_expected_output_npy = init_verif(
model_name
)
build.build_dataflow_cfg(model, cfg)
verify_build_output(cfg, model_name)
else:
# Build the model without verification
build.build_dataflow_cfg(model, cfg)

# Copy bitfiles into release dir if found
bitfile_gen_dir = cfg.output_dir + "/bitfile"
filtes_to_check_and_copy = ["finn-accel.bit", "finn-accel.hwh", "finn-accel.xclbin"]
Expand Down
139 changes: 98 additions & 41 deletions build/gtsrb/build.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,20 +28,39 @@

import finn.builder.build_dataflow as build
import finn.builder.build_dataflow_config as build_cfg
from finn.util.basic import alveo_default_platform
from finn.builder.build_dataflow_config import default_build_dataflow_steps
from qonnx.transformation.insert_topk import InsertTopK
from finn.builder.build_dataflow_steps import build_dataflow_step_lookup
from qonnx.core.datatype import DataType
from qonnx.core.modelwrapper import ModelWrapper
import os
import shutil
import numpy as np
import onnx
from onnx import helper as oh

models = [
"cnv_1w1a_gtsrb",
]

model_name = "cnv_1w1a_gtsrb"
model_file = "models/%s.onnx" % model_name

verif_en = os.getenv("VERIFICATION_EN", "0")

# which platforms to build the networks for
zynq_platforms = ["Pynq-Z1"]
platforms_to_build = zynq_platforms
alveo_platforms = []
platforms_to_build = zynq_platforms + alveo_platforms


def custom_step_update_model(model, cfg):
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Maybe we can move this function to a central point, since it is used in multiple places. Either to finn or to qonnx, we can sync offline about this.

op = onnx.OperatorSetIdProto()
op.version = 11
load_model = onnx.load(model_file)
update_model = onnx.helper.make_model(load_model.graph, opset_imports=[op])
model_ref = ModelWrapper(update_model)
# onnx.save(update_model, "models/%s_updated.onnx" % model_name)
jmonks-amd marked this conversation as resolved.
Show resolved Hide resolved

return model_ref


def custom_step_add_preproc(model, cfg):
Expand All @@ -63,16 +82,32 @@ def custom_step_add_preproc(model, cfg):
model.graph.node[1].input[0] = new_in_name
# set input dtype to uint8
model.set_tensor_datatype(in_name, DataType["UINT8"])

return model


# Insert TopK node to get predicted Top-1 class
def step_preprocess(model, cfg):
model = model.transform(InsertTopK(k=1))
return model


custom_build_steps = [custom_step_add_preproc] + default_build_dataflow_steps
build_dataflow_step_lookup["step_preprocess_InsertTopK"] = step_preprocess

custom_build_steps = (
[custom_step_update_model]
+ [custom_step_add_preproc]
+ ["step_preprocess_InsertTopK"]
+ default_build_dataflow_steps
)


# determine which shell flow to use for a given platform
def platform_to_shell(platform):
if platform in zynq_platforms:
return build_cfg.ShellFlowType.VIVADO_ZYNQ
elif platform in alveo_platforms:
return build_cfg.ShellFlowType.VITIS_ALVEO
else:
raise Exception("Unknown platform, can't determine ShellFlowType")

Expand All @@ -82,45 +117,67 @@ def platform_to_shell(platform):

for platform_name in platforms_to_build:
shell_flow_type = platform_to_shell(platform_name)
vitis_platform = None
if shell_flow_type == build_cfg.ShellFlowType.VITIS_ALVEO:
vitis_platform = alveo_default_platform[platform_name]
# for Alveo, use the Vitis platform name as the release name
# e.g. xilinx_u250_xdma_201830_2
release_platform_name = vitis_platform
else:
vitis_platform = None
# for Zynq, use the board name as the release name
# e.g. ZCU104
release_platform_name = platform_name
# for Zynq, use the board name as the release name
# e.g. ZCU104
release_platform_name = platform_name
# release_platform_name = platform_name
jmonks-amd marked this conversation as resolved.
Show resolved Hide resolved
platform_dir = "release/%s" % release_platform_name
os.makedirs(platform_dir, exist_ok=True)
for model_name in models:
# set up the build configuration for this model
cfg = build_cfg.DataflowBuildConfig(
output_dir="output_%s_%s" % (model_name, release_platform_name),
target_fps=3000,
synth_clk_period_ns=10.0,
board=platform_name,
steps=custom_build_steps,
folding_config_file="folding_config/gtsrb_folding_config.json",
shell_flow_type=shell_flow_type,
vitis_platform=vitis_platform,
generate_outputs=[
build_cfg.DataflowOutputType.ESTIMATE_REPORTS,
build_cfg.DataflowOutputType.STITCHED_IP,
build_cfg.DataflowOutputType.RTLSIM_PERFORMANCE,
build_cfg.DataflowOutputType.BITFILE,
build_cfg.DataflowOutputType.DEPLOYMENT_PACKAGE,
build_cfg.DataflowOutputType.PYNQ_DRIVER,
],
specialize_layers_config_file="specialize_layers_config/gtsrb_specialize_layers.json",
# set up the build configuration for this model
cfg = build_cfg.DataflowBuildConfig(
output_dir="output_%s_%s" % (model_name, release_platform_name),
target_fps=3000,
synth_clk_period_ns=10.0,
board=platform_name,
steps=custom_build_steps,
# folding_config_file="folding_config/cnv_gtsrb_folding_config.json",
jmonks-amd marked this conversation as resolved.
Show resolved Hide resolved
specialize_layers_config_file="specialize_layers_config/gtsrb_specialize_layers.json",
shell_flow_type=shell_flow_type,
vitis_platform=vitis_platform,
generate_outputs=[
build_cfg.DataflowOutputType.ESTIMATE_REPORTS,
build_cfg.DataflowOutputType.STITCHED_IP,
build_cfg.DataflowOutputType.RTLSIM_PERFORMANCE,
build_cfg.DataflowOutputType.BITFILE,
build_cfg.DataflowOutputType.DEPLOYMENT_PACKAGE,
build_cfg.DataflowOutputType.PYNQ_DRIVER,
],
)
# launch FINN compiler to build
if verif_en == "1":
# Build the model with verification
import sys

sys.path.append(os.path.abspath(os.getenv("FINN_EXAMPLES_ROOT") + "/ci/"))
from verification_funcs import init_verif, verify_build_output

cfg.verify_steps, cfg.verify_input_npy, cfg.verify_expected_output_npy = init_verif(
model_name
)
model_file = "models/%s.onnx" % model_name
# launch FINN compiler to build
build.build_dataflow_cfg(model_file, cfg)
# copy bitfiles into release dir if found
bitfile_gen_dir = cfg.output_dir + "/bitfile"
files_to_check_and_copy = [
"finn-accel.bit",
"finn-accel.hwh",
"finn-accel.xclbin",
]
for f in files_to_check_and_copy:
src_file = bitfile_gen_dir + "/" + f
dst_file = platform_dir + "/" + f.replace("finn-accel", model_name)
if os.path.isfile(src_file):
shutil.copy(src_file, dst_file)
verify_build_output(cfg, model_name)
else:
# Build the model without verification
build.build_dataflow_cfg(model_file, cfg)

# copy bitfiles into release dir if found
bitfile_gen_dir = cfg.output_dir + "/bitfile"
files_to_check_and_copy = [
"finn-accel.bit",
"finn-accel.hwh",
"finn-accel.xclbin",
]
for f in files_to_check_and_copy:
src_file = bitfile_gen_dir + "/" + f
dst_file = platform_dir + "/" + f.replace("finn-accel", model_name)
if os.path.isfile(src_file):
shutil.copy(src_file, dst_file)
35 changes: 21 additions & 14 deletions build/kws/build.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,11 @@
import os
import shutil

model_name = "MLP_W3A3_python_speech_features_pre-processing_QONNX"
model_file = "models/" + model_name + ".onnx"

verif_en = os.getenv("VERIFICATION_EN", "0")


# Inject the preprocessing step into FINN to enable json serialization later on
def step_preprocess(model: ModelWrapper, cfg: DataflowBuildConfig):
Expand All @@ -60,15 +65,6 @@ def step_preprocess(model: ModelWrapper, cfg: DataflowBuildConfig):
build_cfg.DataflowOutputType.BITFILE,
build_cfg.DataflowOutputType.DEPLOYMENT_PACKAGE,
]
verification_steps = [
build_cfg.VerificationStepType.QONNX_TO_FINN_PYTHON,
build_cfg.VerificationStepType.TIDY_UP_PYTHON,
build_cfg.VerificationStepType.STREAMLINED_PYTHON,
build_cfg.VerificationStepType.FOLDED_HLS_CPPSIM,
]

model_name = "MLP_W3A3_python_speech_features_pre-processing_QONNX"
model_file = "models/" + model_name + ".onnx"

# Change the ONNX opset from version 9 to 11, which adds support for the TopK node
model = ModelWrapper(model_file)
Expand All @@ -89,7 +85,6 @@ def step_preprocess(model: ModelWrapper, cfg: DataflowBuildConfig):
# Configure build
cfg = build_cfg.DataflowBuildConfig(
# steps=estimate_steps, generate_outputs=estimate_outputs,
verify_steps=verification_steps,
steps=build_steps,
generate_outputs=build_outputs,
output_dir=last_output_dir,
Expand All @@ -98,11 +93,24 @@ def step_preprocess(model: ModelWrapper, cfg: DataflowBuildConfig):
board=platform_name,
shell_flow_type=build_cfg.ShellFlowType.VIVADO_ZYNQ,
stitched_ip_gen_dcp=True,
verify_save_full_context=True,
specialize_layers_config_file="specialize_layers_config/kws_specialize_layers.json",
)
# Build the model
build.build_dataflow_cfg(model_file, cfg)

if verif_en == "1":
# Build the model with verification
import sys

sys.path.append(os.path.abspath(os.getenv("FINN_EXAMPLES_ROOT") + "/ci/"))
from verification_funcs import init_verif, verify_build_output

cfg.verify_steps, cfg.verify_input_npy, cfg.verify_expected_output_npy = init_verif(
model_name
)
build.build_dataflow_cfg(model_file, cfg)
verify_build_output(cfg, model_name)
else:
# Build the model without verification
build.build_dataflow_cfg(model_file, cfg)

# copy bitfiles and runtime weights into release dir if found
bitfile_gen_dir = cfg.output_dir + "/bitfile"
Expand All @@ -117,7 +125,6 @@ def step_preprocess(model: ModelWrapper, cfg: DataflowBuildConfig):
if os.path.isfile(src_file):
shutil.copy(src_file, dst_file)


# Export quantized inputs
print("Quantizing validation dataset.")
parent_model = ModelWrapper(last_output_dir + "/intermediate_models/dataflow_parent.onnx")
Expand Down
Binary file removed build/kws/expected_output.npy
Binary file not shown.
Binary file removed build/kws/input.npy
Binary file not shown.
Loading
Loading