You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
/*** Copyright (C) 2019-2021 Xilinx, Inc** Licensed under the Apache License, Version 2.0 (the "License"). You may* not use this file except in compliance with the License. A copy of the* License is located at** http://www.apache.org/licenses/LICENSE-2.0** Unless required by applicable law or agreed to in writing, software* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the* License for the specific language governing permissions and limitations* under the License.*/
#include<hls_vector.h>
#include<hls_stream.h>
#include"assert.h"
#include<iostream>
#include<string.h>
#include"myproject.h"
#include"parameters.h"voidmyproject(
hls::stream<input_t> &input_1,
hls::stream<result_t> &layer5_out
) {
// hls-fpga-machine-learning insert IO// #pragma HLS INTERFACE axis port=input_1,layer5_out
#pragma HLS DATAFLOW
#ifndef __SYNTHESIS__
staticbool loaded_weights = false;
if (!loaded_weights) {
// hls-fpga-machine-learning insert load weights
nnet::load_weights_from_txt<model_default_t, 512>(w6, "w6.txt");
nnet::load_weights_from_txt<model_default_t, 32>(b6, "b6.txt");
nnet::load_weights_from_txt<model_default_t, 1024>(w7, "w7.txt");
nnet::load_weights_from_txt<model_default_t, 32>(b7, "b7.txt");
loaded_weights = true;
}
#endif// ****************************************// NETWORK INSTANTIATION// ****************************************// hls-fpga-machine-learning insert layers
hls::stream<layer6_t> layer6_out("layer6_out");
#pragma HLS STREAM variable=layer6_out depth=2048
nnet::pointwise_conv_2d_cl<input_t, layer6_t, config6>(input_1, layer6_out, w6, b6); // layer_0
hls::stream<layer3_t> layer3_out("layer3_out");
#pragma HLS STREAM variable=layer3_out depth=2048
nnet::relu<layer6_t, layer3_t, ReLU_config3>(layer6_out, layer3_out); // layer_1
hls::stream<layer7_t> layer7_out("layer7_out");
#pragma HLS STREAM variable=layer7_out depth=2048
nnet::pointwise_conv_2d_cl<layer3_t, layer7_t, config7>(layer3_out, layer7_out, w7, b7); // layer_2
nnet::relu<layer7_t, result_t, ReLU_config5>(layer7_out, layer5_out); // layer_3
}
extern"C" {
voidvadd(float* in1, // Read-Only Vector 1float *out_r // Output Result
){
#pragma HLS INTERFACE m_axi port = in1 bundle = gmem0
// #pragma HLS INTERFACE m_axi port = in2 bundle = gmem1//#pragma HLS INTERFACE m_axi port = out_r bundle = gmem1float in_buffer[28*28*3];
// copymemcpy(in_buffer,in1,28*28*3*sizeof(float));
hls::stream<input_t> input("input");
// hls::stream<result_t> layer_out("layer_out");
hls::stream<result_t> out("out");
size_t i_pack = 0;
input_t dst_pack;
for(int i = 0; i < 28*28*3; i++){
dst_pack[i_pack++] = typenameinput_t::value_type(in_buffer[i]);
if(input_t::size == i_pack){
i_pack = 0;
input.write(dst_pack);
}
}
myproject(input, out);
int write_i = 0;
float out_buffer[result_t::size];
for(int i = 0; i < 32 / result_t::size; i++){
result_t res_pack = out.read();
for(int j = 0; j < result_t::size ; j++){
out_buffer[write_i] = res_pack[j];
out_r[write_i++] = out_buffer[write_i];
}
}
}
}
I used Vitis 2021.1 to package the code I generated into an xclbin kernel file, but when I run it, it keeps running indefinitely. Why is this happening,Do you have any successful running examples?
reacted with thumbs up emoji reacted with thumbs down emoji reacted with laugh emoji reacted with hooray emoji reacted with confused emoji reacted with heart emoji reacted with rocket emoji reacted with eyes emoji
-
I used Vitis 2021.1 to package the code I generated into an xclbin kernel file, but when I run it, it keeps running indefinitely. Why is this happening,Do you have any successful running examples?
use PYNQ exec
xclbin
Beta Was this translation helpful? Give feedback.
All reactions